diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index dc4d9772269..5d8400d1926 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -1,7 +1,7 @@ Thank you for contributing to LangChain! - [ ] **PR title**: "package: description" - - Where "package" is whichever of langchain, community, core, etc. is being modified. Use "docs: ..." for purely docs changes, "templates: ..." for template changes, "infra: ..." for CI changes. + - Where "package" is whichever of langchain, community, core, etc. is being modified. Use "docs: ..." for purely docs changes, "infra: ..." for CI changes. - Example: "community: add foobar LLM" diff --git a/.github/scripts/check_diff.py b/.github/scripts/check_diff.py index 068c75ef017..600c7ac06de 100644 --- a/.github/scripts/check_diff.py +++ b/.github/scripts/check_diff.py @@ -307,7 +307,7 @@ if __name__ == "__main__": f"Unknown lib: {file}. check_diff.py likely needs " "an update for this new library!" ) - elif any(file.startswith(p) for p in ["docs/", "templates/", "cookbook/"]): + elif any(file.startswith(p) for p in ["docs/", "cookbook/"]): if file.startswith("docs/"): docs_edited = True dirs_to_run["lint"].add(".") diff --git a/Makefile b/Makefile index 8a1099c5b8e..5b684c94ac9 100644 --- a/Makefile +++ b/Makefile @@ -66,12 +66,12 @@ spell_fix: ## lint: Run linting on the project. lint lint_package lint_tests: - poetry run ruff check docs templates cookbook - poetry run ruff format docs templates cookbook --diff - poetry run ruff check --select I docs templates cookbook - git grep 'from langchain import' docs/docs templates cookbook | grep -vE 'from langchain import (hub)' && exit 1 || exit 0 + poetry run ruff check docs cookbook + poetry run ruff format docs cookbook cookbook --diff + poetry run ruff check --select I docs cookbook + git grep 'from langchain import' docs/docs cookbook | grep -vE 'from langchain import (hub)' && exit 1 || exit 0 ## format: Format the project files. format format_diff: - poetry run ruff format docs templates cookbook - poetry run ruff check --select I --fix docs templates cookbook + poetry run ruff format docs cookbook + poetry run ruff check --select I --fix docs cookbook diff --git a/docs/scripts/arxiv_references.py b/docs/scripts/arxiv_references.py index be8ba975df1..3637a38230c 100644 --- a/docs/scripts/arxiv_references.py +++ b/docs/scripts/arxiv_references.py @@ -406,7 +406,9 @@ def _format_api_ref_url(doc_path: str, compact: bool = False) -> str: def _format_template_url(template_name: str) -> str: - return f"https://{LANGCHAIN_PYTHON_URL}/docs/templates/{template_name}" + return ( + f"https://github.com/langchain-ai/langchain/blob/v0.2/templates/{template_name}" + ) def _format_cookbook_url(cookbook_name: str) -> str: diff --git a/docs/vercel.json b/docs/vercel.json index 4f368093c5a..ad2f021cacb 100644 --- a/docs/vercel.json +++ b/docs/vercel.json @@ -76,7 +76,7 @@ }, { "source": "/v0.2/docs/templates/:path(.*/?)*", - "destination": "https://github.com/langchain-ai/langchain/tree/master/templates/:path*" + "destination": "https://github.com/langchain-ai/langchain/tree/v0.2/templates/:path*" }, { "source": "/docs/integrations/providers/mlflow_ai_gateway(/?)", diff --git a/libs/cli/README.md b/libs/cli/README.md index 1dc3bfa5406..f86c6ef69d4 100644 --- a/libs/cli/README.md +++ b/libs/cli/README.md @@ -4,5 +4,3 @@ This package implements the official CLI for LangChain. Right now, it is most us for getting started with LangChain Templates! [CLI Docs](https://github.com/langchain-ai/langchain/blob/master/libs/cli/DOCS.md) - -[LangServe Templates Quickstart](https://github.com/langchain-ai/langchain/blob/master/templates/README.md) diff --git a/templates/.gitignore b/templates/.gitignore deleted file mode 100644 index 9167bb274a2..00000000000 --- a/templates/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -__pycache__ -.ruff_cache \ No newline at end of file diff --git a/templates/Makefile b/templates/Makefile deleted file mode 100644 index 8d48b4b785f..00000000000 --- a/templates/Makefile +++ /dev/null @@ -1,8 +0,0 @@ -lint lint_diff lint_package lint_tests: - poetry run poe lint - -test: - poetry run poe test - -format: - poetry run poe format diff --git a/templates/README.md b/templates/README.md deleted file mode 100644 index adfe62c1641..00000000000 --- a/templates/README.md +++ /dev/null @@ -1,137 +0,0 @@ -# LangChain Templates - -LangChain Templates are the easiest and fastest way to build a production-ready LLM application. -These templates serve as a set of reference architectures for a wide variety of popular LLM use cases. -They are all in a standard format which make it easy to deploy them with [LangServe](https://github.com/langchain-ai/langserve). - -🚩 We will be releasing a hosted version of LangServe for one-click deployments of LangChain applications. [Sign up here](https://airtable.com/app0hN6sd93QcKubv/shrAjst60xXa6quV2) to get on the waitlist. - -## Quick Start - -To use, first install the LangChain CLI. - -```shell -pip install -U langchain-cli -``` - -Next, create a new LangChain project: - -```shell -langchain app new my-app -``` - -This will create a new directory called `my-app` with two folders: - -- `app`: This is where LangServe code will live -- `packages`: This is where your chains or agents will live - -To pull in an existing template as a package, you first need to go into your new project: - -```shell -cd my-app -``` - -And you can the add a template as a project. -In this getting started guide, we will add a simple `pirate-speak` project. -All this project does is convert user input into pirate speak. - -```shell -langchain app add pirate-speak -``` - -This will pull in the specified template into `packages/pirate-speak` - -You will then be prompted if you want to install it. -This is the equivalent of running `pip install -e packages/pirate-speak`. -You should generally accept this (or run that same command afterwards). -We install it with `-e` so that if you modify the template at all (which you likely will) the changes are updated. - -After that, it will ask you if you want to generate route code for this project. -This is code you need to add to your app to start using this chain. -If we accept, we will see the following code generated: - -```shell -from pirate_speak.chain import chain as pirate_speak_chain - -add_routes(app, pirate_speak_chain, path="/pirate-speak") -``` - -You can now edit the template you pulled down. -You can change the code files in `packages/pirate-speak` to use a different model, different prompt, different logic. -Note that the above code snippet always expects the final chain to be importable as `from pirate_speak.chain import chain`, -so you should either keep the structure of the package similar enough to respect that or be prepared to update that code snippet. - -Once you have done as much of that as you want, it is -In order to have LangServe use this project, you then need to modify `app/server.py`. -Specifically, you should add the above code snippet to `app/server.py` so that file looks like: - -```python -from fastapi import FastAPI -from langserve import add_routes -from pirate_speak.chain import chain as pirate_speak_chain - -app = FastAPI() - -add_routes(app, pirate_speak_chain, path="/pirate-speak") -``` - -(Optional) Let's now configure LangSmith. -LangSmith will help us trace, monitor and debug LangChain applications. -You can sign up for LangSmith [here](https://smith.langchain.com/). -If you don't have access, you can skip this section - - -```shell -export LANGCHAIN_TRACING_V2=true -export LANGCHAIN_API_KEY= -export LANGCHAIN_PROJECT= # if not specified, defaults to "default" -``` - -For this particular application, we will use OpenAI as the LLM, so we need to export our OpenAI API key: - -```shell -export OPENAI_API_KEY=sk-... -``` - -You can then spin up production-ready endpoints, along with a playground, by running: - -```shell -langchain serve -``` - -This now gives a fully deployed LangServe application. -For example, you get a playground out-of-the-box at [http://127.0.0.1:8000/pirate-speak/playground/](http://127.0.0.1:8000/pirate-speak/playground/): - -![Screenshot of the LangServe Playground interface with input and output fields demonstrating pirate speak conversion.](docs/playground.png) "LangServe Playground Interface" - -Access API documentation at [http://127.0.0.1:8000/docs](http://127.0.0.1:8000/docs) - -![Screenshot of the API documentation interface showing available endpoints for the pirate-speak application.](docs/docs.png) "API Documentation Interface" - -Use the LangServe python or js SDK to interact with the API as if it were a regular [Runnable](https://python.langchain.com/docs/expression_language/). - -```python -from langserve import RemoteRunnable - -api = RemoteRunnable("http://127.0.0.1:8000/pirate-speak") -api.invoke({"text": "hi"}) -``` - -That's it for the quick start! -You have successfully downloaded your first template and deployed it with LangServe. - - -## Additional Resources - -### [Index of Templates](docs/INDEX.md) - -Explore the many templates available to use - from advanced RAG to agents. - -### [Contributing](docs/CONTRIBUTING.md) - -Want to contribute your own template? It's pretty easy! These instructions walk through how to do that. - -### [Launching LangServe from a Package](docs/LAUNCHING_PACKAGE.md) - -You can also launch LangServe from a package directly (without having to create a new project). -These instructions cover how to do that. diff --git a/templates/anthropic-iterative-search/README.md b/templates/anthropic-iterative-search/README.md deleted file mode 100644 index f1994193834..00000000000 --- a/templates/anthropic-iterative-search/README.md +++ /dev/null @@ -1,68 +0,0 @@ -# Anthropic - iterative search - -This template will create a virtual research assistant with the ability to search Wikipedia to find answers to your questions. - -It is heavily inspired by [this notebook](https://github.com/anthropics/anthropic-cookbook/blob/main/long_context/wikipedia-search-cookbook.ipynb). - -## Environment Setup - -Set the `ANTHROPIC_API_KEY` environment variable to access the Anthropic models. - -## Usage - -To use this package, you should first have the LangChain CLI installed: - -```shell -pip install -U langchain-cli -``` - -To create a new LangChain project and install this as the only package, you can do: - -```shell -langchain app new my-app --package anthropic-iterative-search -``` - -If you want to add this to an existing project, you can just run: - -```shell -langchain app add anthropic-iterative-search -``` - -And add the following code to your `server.py` file: -```python -from anthropic_iterative_search import chain as anthropic_iterative_search_chain - -add_routes(app, anthropic_iterative_search_chain, path="/anthropic-iterative-search") -``` - -(Optional) Let's now configure LangSmith. -LangSmith will help us trace, monitor and debug LangChain applications. -You can sign up for LangSmith [here](https://smith.langchain.com/). -If you don't have access, you can skip this section - - -```shell -export LANGCHAIN_TRACING_V2=true -export LANGCHAIN_API_KEY= -export LANGCHAIN_PROJECT= # if not specified, defaults to "default" -``` - -If you are inside this directory, then you can spin up a LangServe instance directly by: - -```shell -langchain serve -``` - -This will start the FastAPI app with a server is running locally at -[http://localhost:8000](http://localhost:8000) - -We can see all templates at [http://127.0.0.1:8000/docs](http://127.0.0.1:8000/docs) -We can access the playground at [http://127.0.0.1:8000/anthropic-iterative-search/playground](http://127.0.0.1:8000/anthropic-iterative-search/playground) - -We can access the template from code with: - -```python -from langserve.client import RemoteRunnable - -runnable = RemoteRunnable("http://localhost:8000/anthropic-iterative-search") -``` \ No newline at end of file diff --git a/templates/anthropic-iterative-search/anthropic_iterative_search/__init__.py b/templates/anthropic-iterative-search/anthropic_iterative_search/__init__.py deleted file mode 100644 index b91b515e409..00000000000 --- a/templates/anthropic-iterative-search/anthropic_iterative_search/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from .chain import chain - -__all__ = ["chain"] diff --git a/templates/anthropic-iterative-search/anthropic_iterative_search/agent_scratchpad.py b/templates/anthropic-iterative-search/anthropic_iterative_search/agent_scratchpad.py deleted file mode 100644 index 051253f811a..00000000000 --- a/templates/anthropic-iterative-search/anthropic_iterative_search/agent_scratchpad.py +++ /dev/null @@ -1,16 +0,0 @@ -def _format_docs(docs): - result = "\n".join( - [ - f'\n\n{r}\n\n' - for i, r in enumerate(docs) - ] - ) - return result - - -def format_agent_scratchpad(intermediate_steps): - thoughts = "" - for action, observation in intermediate_steps: - thoughts += action.log - thoughts += "" + _format_docs(observation) - return thoughts diff --git a/templates/anthropic-iterative-search/anthropic_iterative_search/chain.py b/templates/anthropic-iterative-search/anthropic_iterative_search/chain.py deleted file mode 100644 index 8c9a04d789b..00000000000 --- a/templates/anthropic-iterative-search/anthropic_iterative_search/chain.py +++ /dev/null @@ -1,37 +0,0 @@ -from langchain_anthropic import ChatAnthropic -from langchain_core.output_parsers import StrOutputParser -from langchain_core.prompts import ChatPromptTemplate -from langchain_core.pydantic_v1 import BaseModel -from langchain_core.runnables import ConfigurableField - -from .prompts import answer_prompt -from .retriever_agent import executor - -prompt = ChatPromptTemplate.from_template(answer_prompt) - -model = ChatAnthropic( - model="claude-3-sonnet-20240229", temperature=0, max_tokens_to_sample=1000 -) - -chain = ( - {"query": lambda x: x["query"], "information": executor | (lambda x: x["output"])} - | prompt - | model - | StrOutputParser() -) - -# Add typing for the inputs to be used in the playground - - -class Inputs(BaseModel): - query: str - - -chain = chain.with_types(input_type=Inputs) - -chain = chain.configurable_alternatives( - ConfigurableField(id="chain"), - default_key="response", - # This adds a new option, with name `openai` that is equal to `ChatOpenAI()` - retrieve=executor, -) diff --git a/templates/anthropic-iterative-search/anthropic_iterative_search/output_parser.py b/templates/anthropic-iterative-search/anthropic_iterative_search/output_parser.py deleted file mode 100644 index 23697894cad..00000000000 --- a/templates/anthropic-iterative-search/anthropic_iterative_search/output_parser.py +++ /dev/null @@ -1,37 +0,0 @@ -import re - -from langchain_core.agents import AgentAction, AgentFinish - -from .agent_scratchpad import _format_docs - - -def extract_between_tags(tag: str, string: str, strip: bool = True) -> str: - ext_list = re.findall(f"<{tag}\s?>(.+?)", string, re.DOTALL) - if strip: - ext_list = [e.strip() for e in ext_list] - if ext_list: - if len(ext_list) != 1: - raise ValueError - # Only return the first one - return ext_list[0] - - -def parse_output(outputs): - partial_completion = outputs["partial_completion"] - steps = outputs["intermediate_steps"] - search_query = extract_between_tags( - "search_query", partial_completion + "" - ) - if search_query is None: - docs = [] - str_output = "" - for action, observation in steps: - docs.extend(observation) - str_output += action.log - str_output += "" + _format_docs(observation) - str_output += partial_completion - return AgentFinish({"docs": docs, "output": str_output}, log=partial_completion) - else: - return AgentAction( - tool="search", tool_input=search_query, log=partial_completion - ) diff --git a/templates/anthropic-iterative-search/anthropic_iterative_search/prompts.py b/templates/anthropic-iterative-search/anthropic_iterative_search/prompts.py deleted file mode 100644 index fe46574cb44..00000000000 --- a/templates/anthropic-iterative-search/anthropic_iterative_search/prompts.py +++ /dev/null @@ -1,7 +0,0 @@ -retrieval_prompt = """{retriever_description} Before beginning to research the user's question, first think for a moment inside tags about what information is necessary for a well-informed answer. If the user's question is complex, you may need to decompose the query into multiple subqueries and execute them individually. Sometimes the search engine will return empty search results, or the search results may not contain the information you need. In such cases, feel free to try again with a different query. - -After each call to the Search Engine Tool, reflect briefly inside tags about whether you now have enough information to answer, or whether more information is needed. If you have all the relevant information, write it in tags, WITHOUT actually answering the question. Otherwise, issue a new search. - -Here is the user's question: {query} Remind yourself to make short queries in your scratchpad as you plan out your strategy.""" # noqa: E501 - -answer_prompt = "Here is a user query: {query}. Here is some relevant information: {information}. Please answer the question using the relevant information." # noqa: E501 diff --git a/templates/anthropic-iterative-search/anthropic_iterative_search/retriever.py b/templates/anthropic-iterative-search/anthropic_iterative_search/retriever.py deleted file mode 100644 index 2dba68eae68..00000000000 --- a/templates/anthropic-iterative-search/anthropic_iterative_search/retriever.py +++ /dev/null @@ -1,17 +0,0 @@ -from langchain.retrievers import WikipediaRetriever -from langchain.tools import tool - -# This is used to tell the model how to best use the retriever. - -retriever_description = """You will be asked a question by a human user. You have access to the following tool to help answer the question. Search Engine Tool * The search engine will exclusively search over Wikipedia for pages similar to your query. It returns for each page its title and full page content. Use this tool if you want to get up-to-date and comprehensive information on a topic to help answer queries. Queries should be as atomic as possible -- they only need to address one part of the user's question. For example, if the user's query is "what is the color of a basketball?", your search query should be "basketball". Here's another example: if the user's question is "Who created the first neural network?", your first query should be "neural network". As you can see, these queries are quite short. Think keywords, not phrases. * At any time, you can make a call to the search engine using the following syntax: query_word. * You'll then get results back in tags.""" # noqa: E501 - -retriever = WikipediaRetriever() - -# This should be the same as the function name below -RETRIEVER_TOOL_NAME = "search" - - -@tool -def search(query): - """Search with the retriever.""" - return retriever.invoke(query) diff --git a/templates/anthropic-iterative-search/anthropic_iterative_search/retriever_agent.py b/templates/anthropic-iterative-search/anthropic_iterative_search/retriever_agent.py deleted file mode 100644 index 61ac305e569..00000000000 --- a/templates/anthropic-iterative-search/anthropic_iterative_search/retriever_agent.py +++ /dev/null @@ -1,43 +0,0 @@ -from langchain.agents import AgentExecutor -from langchain_anthropic import ChatAnthropic -from langchain_core.output_parsers import StrOutputParser -from langchain_core.prompts import ChatPromptTemplate -from langchain_core.runnables import RunnableParallel, RunnablePassthrough - -from .agent_scratchpad import format_agent_scratchpad -from .output_parser import parse_output -from .prompts import retrieval_prompt -from .retriever import retriever_description, search - -prompt = ChatPromptTemplate.from_messages( - [ - ("user", retrieval_prompt), - ("ai", "{agent_scratchpad}"), - ] -) -prompt = prompt.partial(retriever_description=retriever_description) - -model = ChatAnthropic( - model="claude-3-sonnet-20240229", temperature=0, max_tokens_to_sample=1000 -) - -chain = ( - RunnablePassthrough.assign( - agent_scratchpad=lambda x: format_agent_scratchpad(x["intermediate_steps"]) - ) - | prompt - | model.bind(stop_sequences=[""]) - | StrOutputParser() -) - -agent_chain = ( - RunnableParallel( - { - "partial_completion": chain, - "intermediate_steps": lambda x: x["intermediate_steps"], - } - ) - | parse_output -) - -executor = AgentExecutor(agent=agent_chain, tools=[search], verbose=True) diff --git a/templates/anthropic-iterative-search/main.py b/templates/anthropic-iterative-search/main.py deleted file mode 100644 index 27b7aa1aa6a..00000000000 --- a/templates/anthropic-iterative-search/main.py +++ /dev/null @@ -1,12 +0,0 @@ -from anthropic_iterative_search import final_chain - -if __name__ == "__main__": - query = ( - "Which movie came out first: Oppenheimer, or " - "Are You There God It's Me Margaret?" - ) - print( - final_chain.with_config(configurable={"chain": "retrieve"}).invoke( - {"query": query} - ) - ) diff --git a/templates/anthropic-iterative-search/pyproject.toml b/templates/anthropic-iterative-search/pyproject.toml deleted file mode 100644 index 8566e330273..00000000000 --- a/templates/anthropic-iterative-search/pyproject.toml +++ /dev/null @@ -1,29 +0,0 @@ -[tool.poetry] -name = "anthropic-iterative-search" -version = "0.0.1" -description = "A virtual research assistant with the ability to search Wikipedia and answer questions" -authors = [] -readme = "README.md" - -[tool.poetry.dependencies] -python = ">=3.8.1,<4.0" -langchain = "^0.1" -wikipedia = "^1.4.0" -langchain-anthropic = "^0.1.4" - -[tool.poetry.group.dev.dependencies] -langchain-cli = ">=0.0.21" - -[tool.langserve] -export_module = "anthropic_iterative_search" -export_attr = "chain" - -[tool.templates-hub] -use-case = "research" -author = "LangChain" -integrations = ["Anthropic", "Wikipedia"] -tags = ["research", "agents"] - -[build-system] -requires = ["poetry-core"] -build-backend = "poetry.core.masonry.api" diff --git a/templates/anthropic-iterative-search/tests/__init__.py b/templates/anthropic-iterative-search/tests/__init__.py deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/templates/basic-critique-revise/LICENSE b/templates/basic-critique-revise/LICENSE deleted file mode 100644 index 426b6509034..00000000000 --- a/templates/basic-critique-revise/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -MIT License - -Copyright (c) 2023 LangChain, Inc. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/templates/basic-critique-revise/README.md b/templates/basic-critique-revise/README.md deleted file mode 100644 index 8f3b9bc205a..00000000000 --- a/templates/basic-critique-revise/README.md +++ /dev/null @@ -1,66 +0,0 @@ -# Basic critique revise - -Iteratively generate schema candidates and revise them based on errors. - -## Environment Setup - -This template uses `OpenAI function calling`, so you will need to set the `OPENAI_API_KEY` environment variable in order to use this template. - -## Usage - -To use this package, you should first have the LangChain CLI installed: - -```shell -pip install -U "langchain-cli[serve]" -``` - -To create a new LangChain project and install this as the only package, you can do: - -```shell -langchain app new my-app --package basic-critique-revise -``` - -If you want to add this to an existing project, you can just run: - -```shell -langchain app add basic-critique-revise -``` - -And add the following code to your `server.py` file: -```python -from basic_critique_revise import chain as basic_critique_revise_chain - -add_routes(app, basic_critique_revise_chain, path="/basic-critique-revise") -``` - -(Optional) Let's now configure LangSmith. -LangSmith will help us trace, monitor and debug LangChain applications. -You can sign up for LangSmith [here](https://smith.langchain.com/). -If you don't have access, you can skip this section - - -```shell -export LANGCHAIN_TRACING_V2=true -export LANGCHAIN_API_KEY= -export LANGCHAIN_PROJECT= # if not specified, defaults to "default" -``` - -If you are inside this directory, then you can spin up a LangServe instance directly by: - -```shell -langchain serve -``` - -This will start the FastAPI app with a server is running locally at -[http://localhost:8000](http://localhost:8000) - -We can see all templates at [http://127.0.0.1:8000/docs](http://127.0.0.1:8000/docs) -We can access the playground at [http://127.0.0.1:8000/basic-critique-revise/playground](http://127.0.0.1:8000/basic-critique-revise/playground) - -We can access the template from code with: - -```python -from langserve.client import RemoteRunnable - -runnable = RemoteRunnable("http://localhost:8000/basic-critique-revise") -``` \ No newline at end of file diff --git a/templates/basic-critique-revise/basic_critique_revise/__init__.py b/templates/basic-critique-revise/basic_critique_revise/__init__.py deleted file mode 100644 index 7e4ef80c999..00000000000 --- a/templates/basic-critique-revise/basic_critique_revise/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from basic_critique_revise.chain import chain - -__all__ = ["chain"] diff --git a/templates/basic-critique-revise/basic_critique_revise/chain.py b/templates/basic-critique-revise/basic_critique_revise/chain.py deleted file mode 100644 index 8d16bce45e1..00000000000 --- a/templates/basic-critique-revise/basic_critique_revise/chain.py +++ /dev/null @@ -1,143 +0,0 @@ -import json -from datetime import datetime -from enum import Enum -from operator import itemgetter -from typing import Any, Dict, Sequence - -from langchain.chains.openai_functions import convert_to_openai_function -from langchain_community.chat_models import ChatOpenAI -from langchain_core.prompts import ChatPromptTemplate -from langchain_core.pydantic_v1 import BaseModel, Field, ValidationError, conint -from langchain_core.runnables import ( - Runnable, - RunnableBranch, - RunnableLambda, - RunnablePassthrough, -) - - -class TaskType(str, Enum): - call = "Call" - message = "Message" - todo = "Todo" - in_person_meeting = "In-Person Meeting" - email = "Email" - mail = "Mail" - text = "Text" - open_house = "Open House" - - -class Task(BaseModel): - title: str = Field(..., description="The title of the tasks, reminders and alerts") - due_date: datetime = Field( - ..., description="Due date. Must be a valid ISO date string with timezone" - ) - task_type: TaskType = Field(None, description="The type of task") - - -class Tasks(BaseModel): - """JSON definition for creating tasks, reminders and alerts""" - - tasks: Sequence[Task] - - -template = """Respond to the following user query to the best of your ability: - -{query}""" - -generate_prompt = ChatPromptTemplate.from_template(template) - -function_args = {"functions": [convert_to_openai_function(Tasks)]} - -task_function_call_model = ChatOpenAI(model="gpt-3.5-turbo").bind(**function_args) - -output_parser = RunnableLambda( - lambda x: json.loads( - x.additional_kwargs.get("function_call", {}).get("arguments", '""') - ) -) - - -revise_template = """ -Based on the provided context, fix the incorrect result of the original prompt -and the provided errors. Only respond with an answer that satisfies the -constraints laid out in the original prompt and fixes the Pydantic errors. - -Hint: Datetime fields must be valid ISO date strings. - - - -{original_prompt} - - -{completion} - - -{error} - -""" - -revise_prompt = ChatPromptTemplate.from_template(revise_template) - -revise_chain = revise_prompt | task_function_call_model | output_parser - - -def output_validator(output): - try: - Tasks.validate(output["completion"]) - except ValidationError as e: - return str(e) - - return None - - -class IntermediateType(BaseModel): - error: str - completion: Dict - original_prompt: str - max_revisions: int - - -validation_step = RunnablePassthrough().assign(error=RunnableLambda(output_validator)) - - -def revise_loop(input: IntermediateType) -> IntermediateType: - revise_step = RunnablePassthrough().assign(completion=revise_chain) - - else_step: Runnable[IntermediateType, IntermediateType] = RunnableBranch( - (lambda x: x["error"] is None, RunnablePassthrough()), - revise_step | validation_step, - ).with_types(input_type=IntermediateType) - - for _ in range(max(0, input["max_revisions"] - 1)): - else_step = RunnableBranch( - (lambda x: x["error"] is None, RunnablePassthrough()), - revise_step | validation_step | else_step, - ) - return else_step - - -revise_lambda = RunnableLambda(revise_loop) - - -class InputType(BaseModel): - query: str - max_revisions: conint(ge=1, le=10) = 5 - - -chain: Runnable[Any, Any] = ( - { - "original_prompt": generate_prompt, - "max_revisions": itemgetter("max_revisions"), - } - | RunnablePassthrough().assign( - completion=( - RunnableLambda(itemgetter("original_prompt")) - | task_function_call_model - | output_parser - ) - ) - | validation_step - | revise_lambda - | RunnableLambda(itemgetter("completion")) -).with_types(input_type=InputType) diff --git a/templates/basic-critique-revise/pyproject.toml b/templates/basic-critique-revise/pyproject.toml deleted file mode 100644 index ed5b75b8d4c..00000000000 --- a/templates/basic-critique-revise/pyproject.toml +++ /dev/null @@ -1,30 +0,0 @@ -[tool.poetry] -name = "basic_critique_revise" -version = "0.0.1" -description = "Iteratively generate schema candidates and revise based on errors" -authors = [] -readme = "README.md" - -[tool.poetry.dependencies] -python = ">=3.8.1,<4.0" -langchain = "^0.1" -openai = "<2" - -[tool.poetry.group.dev.dependencies] -langchain-cli = ">=0.0.21" -fastapi = ">=0.104.0,<1" -sse-starlette = "^1.6.5" - -[tool.langserve] -export_module = "basic_critique_revise" -export_attr = "chain" - -[tool.templates-hub] -use-case = "research" -author = "LangChain" -integrations = ["OpenAI", "Function Calling", "Pydantic"] -tags = ["research", "function-calling"] - -[build-system] -requires = ["poetry-core"] -build-backend = "poetry.core.masonry.api" diff --git a/templates/basic-critique-revise/tests/__init__.py b/templates/basic-critique-revise/tests/__init__.py deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/templates/bedrock-jcvd/README.md b/templates/bedrock-jcvd/README.md deleted file mode 100644 index 60406c04163..00000000000 --- a/templates/bedrock-jcvd/README.md +++ /dev/null @@ -1,82 +0,0 @@ -# Bedrock - JCVD 🕺🥋 - -## Overview - -LangChain template that uses [Anthropic's Claude on Amazon Bedrock](https://aws.amazon.com/bedrock/claude/) -to behave like `Jean-Claude Van Damme` (`JCVD`). - -> I am the Fred Astaire of Chatbots! 🕺 - -![Animated GIF of Jean-Claude Van Damme dancing.](https://media.tenor.com/CVp9l7g3axwAAAAj/jean-claude-van-damme-jcvd.gif) "Jean-Claude Van Damme Dancing" - -## Environment Setup - -### AWS Credentials - -This template uses [Boto3](https://boto3.amazonaws.com/v1/documentation/api/latest/index.html), the AWS SDK for Python, to call [Amazon Bedrock](https://aws.amazon.com/bedrock/). You **must** configure both AWS credentials *and* an AWS Region in order to make requests. - -> For information on how to do this, see [AWS Boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html) (Developer Guide > Credentials). - -### Foundation Models - -By default, this template uses [Anthropic's Claude v2](https://aws.amazon.com/about-aws/whats-new/2023/08/claude-2-foundation-model-anthropic-amazon-bedrock/) (`anthropic.claude-v2`). - -> To request access to a specific model, check out the [Amazon Bedrock User Guide](https://docs.aws.amazon.com/bedrock/latest/userguide/model-access.html) (Model access) - -To use a different model, set the environment variable `BEDROCK_JCVD_MODEL_ID`. A list of base models is available in the [Amazon Bedrock User Guide](https://docs.aws.amazon.com/bedrock/latest/userguide/model-ids-arns.html) (Use the API > API operations > Run inference > Base Model IDs). - -> The full list of available models (including base and [custom models](https://docs.aws.amazon.com/bedrock/latest/userguide/custom-models.html)) is available in the [Amazon Bedrock Console](https://docs.aws.amazon.com/bedrock/latest/userguide/using-console.html) under **Foundation Models** or by calling [`aws bedrock list-foundation-models`](https://docs.aws.amazon.com/cli/latest/reference/bedrock/list-foundation-models.html). - -## Usage - -To use this package, you should first have the LangChain CLI installed: - -```shell -pip install -U langchain-cli -``` - -To create a new LangChain project and install this as the only package, you can do: - -```shell -langchain app new my-app --package bedrock-jcvd -``` - -If you want to add this to an existing project, you can just run: - -```shell -langchain app add bedrock-jcvd -``` - -And add the following code to your `server.py` file: -```python -from bedrock_jcvd import chain as bedrock_jcvd_chain - -add_routes(app, bedrock_jcvd_chain, path="/bedrock-jcvd") -``` - -(Optional) Let's now configure LangSmith. -LangSmith will help us trace, monitor and debug LangChain applications. -You can sign up for LangSmith [here](https://smith.langchain.com/). -If you don't have access, you can skip this section - - -```shell -export LANGCHAIN_TRACING_V2=true -export LANGCHAIN_API_KEY= -export LANGCHAIN_PROJECT= # if not specified, defaults to "default" -``` - -If you are inside this directory, then you can spin up a LangServe instance directly by: - -```shell -langchain serve -``` - -This will start the FastAPI app with a server is running locally at -[http://localhost:8000](http://localhost:8000) - -We can see all templates at [http://127.0.0.1:8000/docs](http://127.0.0.1:8000/docs). - -We can also access the playground at [http://127.0.0.1:8000/bedrock-jcvd/playground](http://127.0.0.1:8000/bedrock-jcvd/playground) - -![Screenshot of the LangServe Playground interface with an example input and output demonstrating a Jean-Claude Van Damme voice imitation.](jcvd_langserve.png) "JCVD Playground" \ No newline at end of file diff --git a/templates/bedrock-jcvd/bedrock_jcvd/__init__.py b/templates/bedrock-jcvd/bedrock_jcvd/__init__.py deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/templates/bedrock-jcvd/bedrock_jcvd/chain.py b/templates/bedrock-jcvd/bedrock_jcvd/chain.py deleted file mode 100644 index 4d79b37450a..00000000000 --- a/templates/bedrock-jcvd/bedrock_jcvd/chain.py +++ /dev/null @@ -1,47 +0,0 @@ -import os - -from langchain_aws import ChatBedrock -from langchain_core.prompts import ChatPromptTemplate -from langchain_core.runnables import ConfigurableField - -# For a description of each inference parameter, see -# https://docs.aws.amazon.com/bedrock/latest/userguide/model-parameters-claude.html -_model_kwargs = { - "temperature": float(os.getenv("BEDROCK_JCVD_TEMPERATURE", "0.1")), - "top_p": float(os.getenv("BEDROCK_JCVD_TOP_P", "1")), - "top_k": int(os.getenv("BEDROCK_JCVD_TOP_K", "250")), - "max_tokens_to_sample": int(os.getenv("BEDROCK_JCVD_MAX_TOKENS_TO_SAMPLE", "300")), -} - -# Full list of base model IDs is available at -# https://docs.aws.amazon.com/bedrock/latest/userguide/model-ids-arns.html -_model_alts = { - "claude_2_1": ChatBedrock( - model_id="anthropic.claude-v2:1", model_kwargs=_model_kwargs - ), - "claude_1": ChatBedrock(model_id="anthropic.claude-v1", model_kwargs=_model_kwargs), - "claude_instant_1": ChatBedrock( - model_id="anthropic.claude-instant-v1", model_kwargs=_model_kwargs - ), -} - -# For some tips on how to construct effective prompts for Claude, -# check out Anthropic's Claude Prompt Engineering deck (Bedrock edition) -# https://docs.google.com/presentation/d/1tjvAebcEyR8la3EmVwvjC7PHR8gfSrcsGKfTPAaManw -_prompt = ChatPromptTemplate.from_messages( - [ - ("human", "You are JCVD. {input}"), - ] -) - -_model = ChatBedrock( - model_id="anthropic.claude-v2", model_kwargs=_model_kwargs -).configurable_alternatives( - which=ConfigurableField( - id="model", name="Model", description="The model that will be used" - ), - default_key="claude_2", - **_model_alts, -) - -chain = _prompt | _model diff --git a/templates/bedrock-jcvd/jcvd_langserve.png b/templates/bedrock-jcvd/jcvd_langserve.png deleted file mode 100644 index f62fe5ead5a..00000000000 Binary files a/templates/bedrock-jcvd/jcvd_langserve.png and /dev/null differ diff --git a/templates/bedrock-jcvd/pyproject.toml b/templates/bedrock-jcvd/pyproject.toml deleted file mode 100644 index 0c5e4cf9cef..00000000000 --- a/templates/bedrock-jcvd/pyproject.toml +++ /dev/null @@ -1,31 +0,0 @@ -[tool.poetry] -name = "bedrock-jcvd" -version = "0.1.0" -description = "LangChain template that behaves like JCVD using Anthropic's Claude on Amazon Bedrock" -authors = ["JGalego "] -readme = "README.md" - -[tool.poetry.dependencies] -python = "^3.11" -uvicorn = "^0.23.2" -langserve = {extras = ["server"], version = ">=0.0.30"} -pydantic = "<2" -boto3 = "^1.33.10" -langchain = "^0.1" - -[tool.langserve] -export_module = "bedrock_jcvd.chain" -export_attr = "chain" - -[tool.templates-hub] -use-case = "chatbot" -author = "LangChain" -integrations = ["AWS"] -tags = ["conversation"] - -[tool.poetry.group.dev.dependencies] -langchain-cli = ">=0.0.21" - -[build-system] -requires = ["poetry-core"] -build-backend = "poetry.core.masonry.api" diff --git a/templates/cassandra-entomology-rag/.env.template b/templates/cassandra-entomology-rag/.env.template deleted file mode 100644 index 251125528ba..00000000000 --- a/templates/cassandra-entomology-rag/.env.template +++ /dev/null @@ -1,13 +0,0 @@ -export OPENAI_API_KEY="..." - -export ASTRA_DB_APPLICATION_TOKEN="AstraCS:..." -export ASTRA_DB_KEYSPACE="..." -export ASTRA_DB_ID="12345678-" - -# UNCOMMENT THE FOLLOWING FOR A CASSANDRA CLUSTER ... -# export USE_CASSANDRA_CLUSTER="1" -# ... then provide these parameters as well: -# export CASSANDRA_KEYSPACE="..." -# export CASSANDRA_CONTACT_POINTS="127.0.0.1" # optional -# export CASSANDRA_USERNAME="cassandra" # optional -# export CASSANDRA_PASSWORD="cassandra" # optional diff --git a/templates/cassandra-entomology-rag/README.md b/templates/cassandra-entomology-rag/README.md deleted file mode 100644 index ebc2c371034..00000000000 --- a/templates/cassandra-entomology-rag/README.md +++ /dev/null @@ -1,78 +0,0 @@ -# Cassandra - Entomology RAG - -This template will perform RAG using `Apache Cassandra®` or `Astra DB` -through `CQL` (`Cassandra` vector store class) - -## Environment Setup - -For the setup, you will require: -- an [Astra](https://astra.datastax.com) Vector Database. You must have a [Database Administrator token](https://awesome-astra.github.io/docs/pages/astra/create-token/#c-procedure), specifically the string starting with `AstraCS:...`. -- [Database ID](https://awesome-astra.github.io/docs/pages/astra/faq/#where-should-i-find-a-database-identifier). -- an **OpenAI API Key**. (More info [here](https://cassio.org/start_here/#llm-access)) - -You may also use a regular Cassandra cluster. In this case, provide the `USE_CASSANDRA_CLUSTER` entry as shown in `.env.template` and the subsequent environment variables to specify how to connect to it. - -The connection parameters and secrets must be provided through environment variables. Refer to `.env.template` for the required variables. - -## Usage - -To use this package, you should first have the LangChain CLI installed: - -```shell -pip install -U langchain-cli -``` - -To create a new LangChain project and install this as the only package, you can do: - -```shell -langchain app new my-app --package cassandra-entomology-rag -``` - -If you want to add this to an existing project, you can just run: - -```shell -langchain app add cassandra-entomology-rag -``` - -And add the following code to your `server.py` file: -```python -from cassandra_entomology_rag import chain as cassandra_entomology_rag_chain - -add_routes(app, cassandra_entomology_rag_chain, path="/cassandra-entomology-rag") -``` - -(Optional) Let's now configure LangSmith. -LangSmith will help us trace, monitor and debug LangChain applications. -You can sign up for LangSmith [here](https://smith.langchain.com/). -If you don't have access, you can skip this section - - -```shell -export LANGCHAIN_TRACING_V2=true -export LANGCHAIN_API_KEY= -export LANGCHAIN_PROJECT= # if not specified, defaults to "default" -``` - -If you are inside this directory, then you can spin up a LangServe instance directly by: - -```shell -langchain serve -``` - -This will start the FastAPI app with a server is running locally at -[http://localhost:8000](http://localhost:8000) - -We can see all templates at [http://127.0.0.1:8000/docs](http://127.0.0.1:8000/docs) -We can access the playground at [http://127.0.0.1:8000/cassandra-entomology-rag/playground](http://127.0.0.1:8000/cassandra-entomology-rag/playground) - -We can access the template from code with: - -```python -from langserve.client import RemoteRunnable - -runnable = RemoteRunnable("http://localhost:8000/cassandra-entomology-rag") -``` - -## Reference - -Stand-alone repo with LangServe chain: [here](https://github.com/hemidactylus/langserve_cassandra_entomology_rag). diff --git a/templates/cassandra-entomology-rag/cassandra_entomology_rag/__init__.py b/templates/cassandra-entomology-rag/cassandra_entomology_rag/__init__.py deleted file mode 100644 index 1816e8c7fdd..00000000000 --- a/templates/cassandra-entomology-rag/cassandra_entomology_rag/__init__.py +++ /dev/null @@ -1,69 +0,0 @@ -import os - -import cassio -from langchain_community.chat_models import ChatOpenAI -from langchain_community.embeddings import OpenAIEmbeddings -from langchain_community.vectorstores import Cassandra -from langchain_core.output_parsers import StrOutputParser -from langchain_core.prompts import ChatPromptTemplate -from langchain_core.runnables import RunnablePassthrough - -from .populate_vector_store import populate - -use_cassandra = int(os.environ.get("USE_CASSANDRA_CLUSTER", "0")) -if use_cassandra: - from .cassandra_cluster_init import get_cassandra_connection - - session, keyspace = get_cassandra_connection() - cassio.init( - session=session, - keyspace=keyspace, - ) -else: - cassio.init( - token=os.environ["ASTRA_DB_APPLICATION_TOKEN"], - database_id=os.environ["ASTRA_DB_ID"], - keyspace=os.environ.get("ASTRA_DB_KEYSPACE"), - ) - - -# inits -llm = ChatOpenAI() -embeddings = OpenAIEmbeddings() -vector_store = Cassandra( - session=None, - keyspace=None, - embedding=embeddings, - table_name="langserve_rag_demo", -) -retriever = vector_store.as_retriever(search_kwargs={"k": 3}) - -# For demo reasons, let's ensure there are rows on the vector store. -# Please remove this and/or adapt to your use case! -inserted_lines = populate(vector_store) -if inserted_lines: - print(f"Done ({inserted_lines} lines inserted).") - -entomology_template = """ -You are an expert entomologist, tasked with answering enthusiast biologists' questions. -You must answer based only on the provided context, do not make up any fact. -Your answers must be concise and to the point, but strive to provide scientific details -(such as family, order, Latin names, and so on when appropriate). -You MUST refuse to answer questions on other topics than entomology, -as well as questions whose answer is not found in the provided context. - -CONTEXT: -{context} - -QUESTION: {question} - -YOUR ANSWER:""" - -entomology_prompt = ChatPromptTemplate.from_template(entomology_template) - -chain = ( - {"context": retriever, "question": RunnablePassthrough()} - | entomology_prompt - | llm - | StrOutputParser() -) diff --git a/templates/cassandra-entomology-rag/cassandra_entomology_rag/cassandra_cluster_init.py b/templates/cassandra-entomology-rag/cassandra_entomology_rag/cassandra_cluster_init.py deleted file mode 100644 index 9c4ce5b9b66..00000000000 --- a/templates/cassandra-entomology-rag/cassandra_entomology_rag/cassandra_cluster_init.py +++ /dev/null @@ -1,29 +0,0 @@ -import os - -from cassandra.auth import PlainTextAuthProvider -from cassandra.cluster import Cluster - - -def get_cassandra_connection(): - contact_points = [ - cp.strip() - for cp in os.environ.get("CASSANDRA_CONTACT_POINTS", "").split(",") - if cp.strip() - ] - CASSANDRA_KEYSPACE = os.environ["CASSANDRA_KEYSPACE"] - CASSANDRA_USERNAME = os.environ.get("CASSANDRA_USERNAME") - CASSANDRA_PASSWORD = os.environ.get("CASSANDRA_PASSWORD") - # - if CASSANDRA_USERNAME and CASSANDRA_PASSWORD: - auth_provider = PlainTextAuthProvider( - CASSANDRA_USERNAME, - CASSANDRA_PASSWORD, - ) - else: - auth_provider = None - - c_cluster = Cluster( - contact_points if contact_points else None, auth_provider=auth_provider - ) - session = c_cluster.connect() - return (session, CASSANDRA_KEYSPACE) diff --git a/templates/cassandra-entomology-rag/cassandra_entomology_rag/populate_vector_store.py b/templates/cassandra-entomology-rag/cassandra_entomology_rag/populate_vector_store.py deleted file mode 100644 index e1fb9e314a5..00000000000 --- a/templates/cassandra-entomology-rag/cassandra_entomology_rag/populate_vector_store.py +++ /dev/null @@ -1,29 +0,0 @@ -import os - -BASE_DIR = os.path.abspath(os.path.dirname(__file__)) - - -def populate(vector_store): - # is the store empty? find out with a probe search - hits = vector_store.similarity_search_by_vector( - embedding=[0.001] * 1536, - k=1, - ) - # - if len(hits) == 0: - # this seems a first run: - # must populate the vector store - src_file_name = os.path.join(BASE_DIR, "..", "sources.txt") - lines = [ - line.strip() - for line in open(src_file_name).readlines() - if line.strip() - if line[0] != "#" - ] - # deterministic IDs to prevent duplicates on multiple runs - ids = ["_".join(line.split(" ")[:2]).lower().replace(":", "") for line in lines] - # - vector_store.add_texts(texts=lines, ids=ids) - return len(lines) - else: - return 0 diff --git a/templates/cassandra-entomology-rag/pyproject.toml b/templates/cassandra-entomology-rag/pyproject.toml deleted file mode 100644 index b6b2984cde3..00000000000 --- a/templates/cassandra-entomology-rag/pyproject.toml +++ /dev/null @@ -1,34 +0,0 @@ -[tool.poetry] -name = "cassandra-entomology-rag" -version = "0.0.1" -description = "RAG using Apache Cassandra® or Astra DB" -authors = [ - "Stefano Lottini ", -] -readme = "README.md" - -[tool.poetry.dependencies] -python = ">=3.8.1,<4.0" -langchain = "^0.1" -openai = "<2" -tiktoken = "^0.5.1" -cassio = "^0.1.3" - -[tool.poetry.group.dev.dependencies] -langchain-cli = ">=0.0.21" - -[tool.langserve] -export_module = "cassandra_entomology_rag" -export_attr = "chain" - -[tool.templates-hub] -use-case = "rag" -author = "DataStax" -integrations = ["OpenAI", "Cassandra"] -tags = ["vectordbs"] - -[build-system] -requires = [ - "poetry-core", -] -build-backend = "poetry.core.masonry.api" diff --git a/templates/cassandra-entomology-rag/sources.txt b/templates/cassandra-entomology-rag/sources.txt deleted file mode 100644 index c03c144753b..00000000000 --- a/templates/cassandra-entomology-rag/sources.txt +++ /dev/null @@ -1,31 +0,0 @@ -# source: https://www.thoughtco.com/a-guide-to-the-twenty-nine-insect-orders-1968419 - -Order Thysanura: The silverfish and firebrats are found in the order Thysanura. They are wingless insects often found in people's attics, and have a lifespan of several years. There are about 600 species worldwide. -Order Diplura: Diplurans are the most primitive insect species, with no eyes or wings. They have the unusual ability among insects to regenerate body parts. There are over 400 members of the order Diplura in the world. -Order Protura: Another very primitive group, the proturans have no eyes, no antennae, and no wings. They are uncommon, with perhaps less than 100 species known. -Order Collembola: The order Collembola includes the springtails, primitive insects without wings. There are approximately 2,000 species of Collembola worldwide. -Order Ephemeroptera: The mayflies of order Ephemeroptera are short-lived, and undergo incomplete metamorphosis. The larvae are aquatic, feeding on algae and other plant life. Entomologists have described about 2,100 species worldwide. -Order Odonata: The order Odonata includes dragonflies and damselflies, which undergo incomplete metamorphosis. They are predators of other insects, even in their immature stage. There are about 5,000 species in the order Odonata. -Order Plecoptera: The stoneflies of order Plecoptera are aquatic and undergo incomplete metamorphosis. The nymphs live under rocks in well flowing streams. Adults are usually seen on the ground along stream and river banks. There are roughly 3,000 species in this group. -Order Grylloblatodea: Sometimes referred to as "living fossils," the insects of the order Grylloblatodea have changed little from their ancient ancestors. This order is the smallest of all the insect orders, with perhaps only 25 known species living today. Grylloblatodea live at elevations above 1500 ft., and are commonly named ice bugs or rock crawlers. -Order Orthoptera: These are familiar insects (grasshoppers, locusts, katydids, and crickets) and one of the largest orders of herbivorous insects. Many species in the order Orthoptera can produce and detect sounds. Approximately 20,000 species exist in this group. -Order Phasmida: The order Phasmida are masters of camouflage, the stick and leaf insects. They undergo incomplete metamorphosis and feed on leaves. There are some 3,000 insects in this group, but only a small fraction of this number is leaf insects. Stick insects are the longest insects in the world. -Order Dermaptera: This order contains the earwigs, an easily recognized insect that often has pincers at the end of the abdomen. Many earwigs are scavengers, eating both plant and animal matter. The order Dermaptera includes less than 2,000 species. -Order Embiidina: The order Embioptera is another ancient order with few species, perhaps only 200 worldwide. The web spinners have silk glands in their front legs and weave nests under leaf litter and in tunnels where they live. Webspinners live in tropical or subtropical climates. -Order Dictyoptera: The order Dictyoptera includes roaches and mantids. Both groups have long, segmented antennae and leathery forewings held tightly against their backs. They undergo incomplete metamorphosis. Worldwide, there approximately 6,000 species in this order, most living in tropical regions. -Order Isoptera: Termites feed on wood and are important decomposers in forest ecosystems. They also feed on wood products and are thought of as pests for the destruction they cause to man-made structures. There are between 2,000 and 3,000 species in this order. -Order Zoraptera: Little is know about the angel insects, which belong to the order Zoraptera. Though they are grouped with winged insects, many are actually wingless. Members of this group are blind, small, and often found in decaying wood. There are only about 30 described species worldwide. -Order Psocoptera: Bark lice forage on algae, lichen, and fungus in moist, dark places. Booklice frequent human dwellings, where they feed on book paste and grains. They undergo incomplete metamorphosis. Entomologists have named about 3,200 species in the order Psocoptera. -Order Mallophaga: Biting lice are ectoparasites that feed on birds and some mammals. There are an estimated 3,000 species in the order Mallophaga, all of which undergo incomplete metamorphosis. -Order Siphunculata: The order Siphunculata are the sucking lice, which feed on the fresh blood of mammals. Their mouthparts are adapted for sucking or siphoning blood. There are only about 500 species of sucking lice. -Order Hemiptera: Most people use the term "bugs" to mean insects; an entomologist uses the term to refer to the order Hemiptera. The Hemiptera are the true bugs, and include cicadas, aphids, and spittlebugs, and others. This is a large group of over 70,000 species worldwide. -Order Thysanoptera: The thrips of order Thysanoptera are small insects that feed on plant tissue. Many are considered agricultural pests for this reason. Some thrips prey on other small insects as well. This order contains about 5,000 species. -Order Neuroptera: Commonly called the order of lacewings, this group actually includes a variety of other insects, too: dobsonflies, owlflies, mantidflies, antlions, snakeflies, and alderflies. Insects in the order Neuroptera undergo complete metamorphosis. Worldwide, there are over 5,500 species in this group. -Order Mecoptera: This order includes the scorpionflies, which live in moist, wooded habitats. Scorpionflies are omnivorous in both their larval and adult forms. The larva are caterpillar-like. There are less than 500 described species in the order Mecoptera. -Order Siphonaptera: Pet lovers fear insects in the order Siphonaptera - the fleas. Fleas are blood-sucking ectoparasites that feed on mammals, and rarely, birds. There are well over 2,000 species of fleas in the world. -Order Coleoptera: This group, the beetles and weevils, is the largest order in the insect world, with over 300,000 distinct species known. The order Coleoptera includes well-known families: june beetles, lady beetles, click beetles, and fireflies. All have hardened forewings that fold over the abdomen to protect the delicate hindwings used for flight. -Order Strepsiptera: Insects in this group are parasites of other insects, particularly bees, grasshoppers, and the true bugs. The immature Strepsiptera lies in wait on a flower and quickly burrows into any host insect that comes along. Strepsiptera undergo complete metamorphosis and pupate within the host insect's body. -Order Diptera: Diptera is one of the largest orders, with nearly 100,000 insects named to the order. These are the true flies, mosquitoes, and gnats. Insects in this group have modified hindwings which are used for balance during flight. The forewings function as the propellers for flying. -Order Lepidoptera: The butterflies and moths of the order Lepidoptera comprise the second largest group in the class Insecta. These well-known insects have scaly wings with interesting colors and patterns. You can often identify an insect in this order just by the wing shape and color. -Order Trichoptera: Caddisflies are nocturnal as adults and aquatic when immature. The caddisfly adults have silky hairs on their wings and body, which is key to identifying a Trichoptera member. The larvae spin traps for prey with silk. They also make cases from the silk and other materials that they carry and use for protection. -Order Hymenoptera: The order Hymenoptera includes many of the most common insects - ants, bees, and wasps. The larvae of some wasps cause trees to form galls, which then provides food for the immature wasps. Other wasps are parasitic, living in caterpillars, beetles, or even aphids. This is the third-largest insect order with just over 100,000 species. diff --git a/templates/cassandra-synonym-caching/.env.template b/templates/cassandra-synonym-caching/.env.template deleted file mode 100644 index 251125528ba..00000000000 --- a/templates/cassandra-synonym-caching/.env.template +++ /dev/null @@ -1,13 +0,0 @@ -export OPENAI_API_KEY="..." - -export ASTRA_DB_APPLICATION_TOKEN="AstraCS:..." -export ASTRA_DB_KEYSPACE="..." -export ASTRA_DB_ID="12345678-" - -# UNCOMMENT THE FOLLOWING FOR A CASSANDRA CLUSTER ... -# export USE_CASSANDRA_CLUSTER="1" -# ... then provide these parameters as well: -# export CASSANDRA_KEYSPACE="..." -# export CASSANDRA_CONTACT_POINTS="127.0.0.1" # optional -# export CASSANDRA_USERNAME="cassandra" # optional -# export CASSANDRA_PASSWORD="cassandra" # optional diff --git a/templates/cassandra-synonym-caching/README.md b/templates/cassandra-synonym-caching/README.md deleted file mode 100644 index 0251a2a32d1..00000000000 --- a/templates/cassandra-synonym-caching/README.md +++ /dev/null @@ -1,77 +0,0 @@ -# Cassandra - synonym caching - -This template provides a simple chain template showcasing the usage -of LLM Caching backed by `Apache Cassandra®` or `Astra DB` through `CQL`. - -## Environment Setup - -To set up your environment, you will need the following: - -- an [Astra](https://astra.datastax.com) Vector Database (free tier is fine!). **You need a [Database Administrator token](https://awesome-astra.github.io/docs/pages/astra/create-token/#c-procedure)**, in particular the string starting with `AstraCS:...`; -- likewise, get your [Database ID](https://awesome-astra.github.io/docs/pages/astra/faq/#where-should-i-find-a-database-identifier) ready, you will have to enter it below; -- an **OpenAI API Key**. (More info [here](https://cassio.org/start_here/#llm-access), note that out-of-the-box this demo supports OpenAI unless you tinker with the code.) - -_Note:_ you can alternatively use a regular Cassandra cluster: to do so, make sure you provide the `USE_CASSANDRA_CLUSTER` entry as shown in `.env.template` and the subsequent environment variables to specify how to connect to it. - -## Usage - -To use this package, you should first have the LangChain CLI installed: - -```shell -pip install -U langchain-cli -``` - -To create a new LangChain project and install this as the only package, you can do: - -```shell -langchain app new my-app --package cassandra-synonym-caching -``` - -If you want to add this to an existing project, you can just run: - -```shell -langchain app add cassandra-synonym-caching -``` - -And add the following code to your `server.py` file: -```python -from cassandra_synonym_caching import chain as cassandra_synonym_caching_chain - -add_routes(app, cassandra_synonym_caching_chain, path="/cassandra-synonym-caching") -``` - -(Optional) Let's now configure LangSmith. -LangSmith will help us trace, monitor and debug LangChain applications. -You can sign up for LangSmith [here](https://smith.langchain.com/). -If you don't have access, you can skip this section - - -```shell -export LANGCHAIN_TRACING_V2=true -export LANGCHAIN_API_KEY= -export LANGCHAIN_PROJECT= # if not specified, defaults to "default" -``` - -If you are inside this directory, then you can spin up a LangServe instance directly by: - -```shell -langchain serve -``` - -This will start the FastAPI app with a server is running locally at -[http://localhost:8000](http://localhost:8000) - -We can see all templates at [http://127.0.0.1:8000/docs](http://127.0.0.1:8000/docs) -We can access the playground at [http://127.0.0.1:8000/cassandra-synonym-caching/playground](http://127.0.0.1:8000/cassandra-synonym-caching/playground) - -We can access the template from code with: - -```python -from langserve.client import RemoteRunnable - -runnable = RemoteRunnable("http://localhost:8000/cassandra-synonym-caching") -``` - -## Reference - -Stand-alone LangServe template repo: [here](https://github.com/hemidactylus/langserve_cassandra_synonym_caching). diff --git a/templates/cassandra-synonym-caching/cassandra_synonym_caching/__init__.py b/templates/cassandra-synonym-caching/cassandra_synonym_caching/__init__.py deleted file mode 100644 index bff2421a988..00000000000 --- a/templates/cassandra-synonym-caching/cassandra_synonym_caching/__init__.py +++ /dev/null @@ -1,42 +0,0 @@ -import os - -import cassio -import langchain -from langchain_community.cache import CassandraCache -from langchain_community.chat_models import ChatOpenAI -from langchain_core.messages import BaseMessage -from langchain_core.prompts import ChatPromptTemplate -from langchain_core.runnables import RunnableLambda - -use_cassandra = int(os.environ.get("USE_CASSANDRA_CLUSTER", "0")) -if use_cassandra: - from .cassandra_cluster_init import get_cassandra_connection - - session, keyspace = get_cassandra_connection() - cassio.init( - session=session, - keyspace=keyspace, - ) -else: - cassio.init( - token=os.environ["ASTRA_DB_APPLICATION_TOKEN"], - database_id=os.environ["ASTRA_DB_ID"], - keyspace=os.environ.get("ASTRA_DB_KEYSPACE"), - ) - -# inits -langchain.llm_cache = CassandraCache(session=None, keyspace=None) -llm = ChatOpenAI() - - -# custom runnables -def msg_splitter(msg: BaseMessage): - return [w.strip() for w in msg.content.split(",") if w.strip()] - - -# synonym-route preparation -synonym_prompt = ChatPromptTemplate.from_template( - "List up to five comma-separated synonyms of this word: {word}" -) - -chain = synonym_prompt | llm | RunnableLambda(msg_splitter) diff --git a/templates/cassandra-synonym-caching/cassandra_synonym_caching/cassandra_cluster_init.py b/templates/cassandra-synonym-caching/cassandra_synonym_caching/cassandra_cluster_init.py deleted file mode 100644 index 9c4ce5b9b66..00000000000 --- a/templates/cassandra-synonym-caching/cassandra_synonym_caching/cassandra_cluster_init.py +++ /dev/null @@ -1,29 +0,0 @@ -import os - -from cassandra.auth import PlainTextAuthProvider -from cassandra.cluster import Cluster - - -def get_cassandra_connection(): - contact_points = [ - cp.strip() - for cp in os.environ.get("CASSANDRA_CONTACT_POINTS", "").split(",") - if cp.strip() - ] - CASSANDRA_KEYSPACE = os.environ["CASSANDRA_KEYSPACE"] - CASSANDRA_USERNAME = os.environ.get("CASSANDRA_USERNAME") - CASSANDRA_PASSWORD = os.environ.get("CASSANDRA_PASSWORD") - # - if CASSANDRA_USERNAME and CASSANDRA_PASSWORD: - auth_provider = PlainTextAuthProvider( - CASSANDRA_USERNAME, - CASSANDRA_PASSWORD, - ) - else: - auth_provider = None - - c_cluster = Cluster( - contact_points if contact_points else None, auth_provider=auth_provider - ) - session = c_cluster.connect() - return (session, CASSANDRA_KEYSPACE) diff --git a/templates/cassandra-synonym-caching/pyproject.toml b/templates/cassandra-synonym-caching/pyproject.toml deleted file mode 100644 index b70abe39d3f..00000000000 --- a/templates/cassandra-synonym-caching/pyproject.toml +++ /dev/null @@ -1,34 +0,0 @@ -[tool.poetry] -name = "cassandra-synonym-caching" -version = "0.0.1" -description = "LLM caching backed by Apache Cassandra® or Astra DB" -authors = [ - "Stefano Lottini ", -] -readme = "README.md" - -[tool.poetry.dependencies] -python = ">=3.8.1,<4.0" -langchain = "^0.1" -openai = "<2" -tiktoken = "^0.5.1" -cassio = "^0.1.3" - -[tool.poetry.group.dev.dependencies] -langchain-cli = ">=0.0.21" - -[tool.langserve] -export_module = "cassandra_synonym_caching" -export_attr = "chain" - -[tool.templates-hub] -use-case = "rag" -author = "DataStax" -integrations = ["OpenAI", "Cassandra"] -tags = ["vectordbs"] - -[build-system] -requires = [ - "poetry-core", -] -build-backend = "poetry.core.masonry.api" diff --git a/templates/chain-of-note-wiki/LICENSE b/templates/chain-of-note-wiki/LICENSE deleted file mode 100644 index 426b6509034..00000000000 --- a/templates/chain-of-note-wiki/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -MIT License - -Copyright (c) 2023 LangChain, Inc. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/templates/chain-of-note-wiki/README.md b/templates/chain-of-note-wiki/README.md deleted file mode 100644 index 35eaf12d3be..00000000000 --- a/templates/chain-of-note-wiki/README.md +++ /dev/null @@ -1,73 +0,0 @@ -# Chain-of-Note - Wikipedia - -Implements `Chain-of-Note` as described in [CHAIN-OF-NOTE: ENHANCING ROBUSTNESS IN -RETRIEVAL-AUGMENTED LANGUAGE MODELS](https://arxiv.org/pdf/2311.09210.pdf) paper -by Yu, et al. Uses `Wikipedia` for retrieval. - -Check out the prompt being used here https://smith.langchain.com/hub/bagatur/chain-of-note-wiki. - -## Environment Setup - -Uses Anthropic claude-3-sonnet-20240229 chat model. Set Anthropic API key: -```bash -export ANTHROPIC_API_KEY="..." -``` - -## Usage - -To use this package, you should first have the LangChain CLI installed: - -```shell -pip install -U "langchain-cli[serve]" -``` - -To create a new LangChain project and install this as the only package, you can do: - -```shell -langchain app new my-app --package chain-of-note-wiki -``` - -If you want to add this to an existing project, you can just run: - -```shell -langchain app add chain-of-note-wiki -``` - -And add the following code to your `server.py` file: -```python -from chain_of_note_wiki import chain as chain_of_note_wiki_chain - -add_routes(app, chain_of_note_wiki_chain, path="/chain-of-note-wiki") -``` - -(Optional) Let's now configure LangSmith. -LangSmith will help us trace, monitor and debug LangChain applications. -You can sign up for LangSmith [here](https://smith.langchain.com/). -If you don't have access, you can skip this section - - -```shell -export LANGCHAIN_TRACING_V2=true -export LANGCHAIN_API_KEY= -export LANGCHAIN_PROJECT= # if not specified, defaults to "default" -``` - -If you are inside this directory, then you can spin up a LangServe instance directly by: - -```shell -langchain serve -``` - -This will start the FastAPI app with a server is running locally at -[http://localhost:8000](http://localhost:8000) - -We can see all templates at [http://127.0.0.1:8000/docs](http://127.0.0.1:8000/docs) -We can access the playground at [http://127.0.0.1:8000/chain-of-note-wiki/playground](http://127.0.0.1:8000/chain-of-note-wiki/playground) - -We can access the template from code with: - -```python -from langserve.client import RemoteRunnable - -runnable = RemoteRunnable("http://localhost:8000/chain-of-note-wiki") -``` diff --git a/templates/chain-of-note-wiki/chain_of_note_wiki/__init__.py b/templates/chain-of-note-wiki/chain_of_note_wiki/__init__.py deleted file mode 100644 index 8d27e2b6823..00000000000 --- a/templates/chain-of-note-wiki/chain_of_note_wiki/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from chain_of_note_wiki.chain import chain - -__all__ = ["chain"] diff --git a/templates/chain-of-note-wiki/chain_of_note_wiki/chain.py b/templates/chain-of-note-wiki/chain_of_note_wiki/chain.py deleted file mode 100644 index 942914390fe..00000000000 --- a/templates/chain-of-note-wiki/chain_of_note_wiki/chain.py +++ /dev/null @@ -1,33 +0,0 @@ -from langchain import hub -from langchain_anthropic import ChatAnthropic -from langchain_community.utilities import WikipediaAPIWrapper -from langchain_core.output_parsers import StrOutputParser -from langchain_core.pydantic_v1 import BaseModel -from langchain_core.runnables import RunnableLambda, RunnablePassthrough - - -class Question(BaseModel): - __root__: str - - -wiki = WikipediaAPIWrapper(top_k_results=5) -prompt = hub.pull("bagatur/chain-of-note-wiki") - -llm = ChatAnthropic(model="claude-3-sonnet-20240229") - - -def format_docs(docs): - return "\n\n".join( - f"Wikipedia {i+1}:\n{doc.page_content}" for i, doc in enumerate(docs) - ) - - -chain = ( - { - "passages": RunnableLambda(wiki.load) | format_docs, - "question": RunnablePassthrough(), - } - | prompt - | llm - | StrOutputParser() -).with_types(input_type=Question) diff --git a/templates/chain-of-note-wiki/pyproject.toml b/templates/chain-of-note-wiki/pyproject.toml deleted file mode 100644 index 1b5ac8aad96..00000000000 --- a/templates/chain-of-note-wiki/pyproject.toml +++ /dev/null @@ -1,32 +0,0 @@ -[tool.poetry] -name = "chain-of-note-wiki" -version = "0.0.1" -description = "Implementation of Chain of Note prompting for Wikipedia." -authors = [] -readme = "README.md" - -[tool.poetry.dependencies] -python = ">=3.8.1,<4.0" -langchain = "^0.1" -wikipedia = "^1.4.0" -langchainhub = "^0.1.14" -langchain-anthropic = "^0.1.4" - -[tool.poetry.group.dev.dependencies] -langchain-cli = ">=0.0.21" -fastapi = ">=0.104.0,<1" -sse-starlette = "^1.6.5" - -[tool.langserve] -export_module = "chain_of_note_wiki" -export_attr = "chain" - -[tool.templates-hub] -use-case = "rag" -author = "LangChain" -integrations = ["Anthropic", "Wikipedia", "LangChain Hub"] -tags = ["paper", "prompt-hub"] - -[build-system] -requires = ["poetry-core"] -build-backend = "poetry.core.masonry.api" diff --git a/templates/chain-of-note-wiki/tests/__init__.py b/templates/chain-of-note-wiki/tests/__init__.py deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/templates/chat-bot-feedback/LICENSE b/templates/chat-bot-feedback/LICENSE deleted file mode 100644 index 426b6509034..00000000000 --- a/templates/chat-bot-feedback/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -MIT License - -Copyright (c) 2023 LangChain, Inc. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/templates/chat-bot-feedback/README.md b/templates/chat-bot-feedback/README.md deleted file mode 100644 index 27c7bb08199..00000000000 --- a/templates/chat-bot-feedback/README.md +++ /dev/null @@ -1,116 +0,0 @@ -# Chatbot feedback - -This template shows how to evaluate your chatbot without explicit user feedback. -It defines a simple chatbot in [chain.py](https://github.com/langchain-ai/langchain/blob/master/templates/chat-bot-feedback/chat_bot_feedback/chain.py) and custom evaluator that scores bot response effectiveness based on the subsequent user response. You can apply this run evaluator to your own chat bot by calling `with_config` on the chat bot before serving. You can also directly deploy your chat app using this template. - -[Chatbots](https://python.langchain.com/docs/use_cases/chatbots) are one of the most common interfaces for deploying LLMs. The quality of chat bots varies, making continuous development important. But users are wont to leave explicit feedback through mechanisms like thumbs-up or thumbs-down buttons. Furthermore, traditional analytics such as "session length" or "conversation length" often lack clarity. However, multi-turn conversations with a chat bot can provide a wealth of information, which we can transform into metrics for fine-tuning, evaluation, and product analytics. - -Taking [Chat Langchain](https://chat.langchain.com/) as a case study, only about 0.04% of all queries receive explicit feedback. Yet, approximately 70% of the queries are follow-ups to previous questions. A significant portion of these follow-up queries continue useful information we can use to infer the quality of the previous AI response. - - -This template helps solve this "feedback scarcity" problem. Below is an example invocation of this chat bot: - -![Screenshot of a chat bot interaction where the AI responds in a pirate accent to a user asking where their keys are.](./static/chat_interaction.png)["Chat Bot Interaction Example"](https://smith.langchain.com/public/3378daea-133c-4fe8-b4da-0a3044c5dbe8/r?runtab=1) - -When the user responds to this ([link](https://smith.langchain.com/public/a7e2df54-4194-455d-9978-cecd8be0df1e/r)), the response evaluator is invoked, resulting in the following evaluation run: - -![Screenshot of an evaluator run showing the AI's response effectiveness score based on the user's follow-up message expressing frustration.](./static/evaluator.png) ["Chat Bot Evaluator Run"](https://smith.langchain.com/public/534184ee-db8f-4831-a386-3f578145114c/r) - -As shown, the evaluator sees that the user is increasingly frustrated, indicating that the prior response was not effective - -## LangSmith Feedback - -[LangSmith](https://smith.langchain.com/) is a platform for building production-grade LLM applications. Beyond its debugging and offline evaluation features, LangSmith helps you capture both user and model-assisted feedback to refine your LLM application. This template uses an LLM to generate feedback for your application, which you can use to continuously improve your service. For more examples on collecting feedback using LangSmith, consult the [documentation](https://docs.smith.langchain.com/cookbook/feedback-examples). - -## Evaluator Implementation - -The user feedback is inferred by custom `RunEvaluator`. This evaluator is called using the `EvaluatorCallbackHandler`, which run it in a separate thread to avoid interfering with the chat bot's runtime. You can use this custom evaluator on any compatible chat bot by calling the following function on your LangChain object: - -```python -my_chain.with_config( - callbacks=[ - EvaluatorCallbackHandler( - evaluators=[ - ResponseEffectivenessEvaluator(evaluate_response_effectiveness) - ] - ) - ], -) -``` - -The evaluator instructs an LLM, specifically `gpt-3.5-turbo`, to evaluate the AI's most recent chat message based on the user's followup response. It generates a score and accompanying reasoning that is converted to feedback in LangSmith, applied to the value provided as the `last_run_id`. - -The prompt used within the LLM [is available on the hub](https://smith.langchain.com/hub/wfh/response-effectiveness). Feel free to customize it with things like additional app context (such as the goal of the app or the types of questions it should respond to) or "symptoms" you'd like the LLM to focus on. This evaluator also utilizes OpenAI's function-calling API to ensure a more consistent, structured output for the grade. - -## Environment Variables - -Ensure that `OPENAI_API_KEY` is set to use OpenAI models. Also, configure LangSmith by setting your `LANGSMITH_API_KEY`. - -```bash -export OPENAI_API_KEY=sk-... -export LANGSMITH_API_KEY=... -export LANGCHAIN_TRACING_V2=true -export LANGCHAIN_PROJECT=my-project # Set to the project you want to save to -``` - -## Usage - -If deploying via `LangServe`, we recommend configuring the server to return callback events as well. This will ensure the backend traces are included in whatever traces you generate using the `RemoteRunnable`. - -```python -from chat_bot_feedback.chain import chain - -add_routes(app, chain, path="/chat-bot-feedback", include_callback_events=True) -``` - -With the server running, you can use the following code snippet to stream the chat bot responses for a 2 turn conversation. - -```python -from functools import partial -from typing import Dict, Optional, Callable, List -from langserve import RemoteRunnable -from langchain.callbacks.manager import tracing_v2_enabled -from langchain_core.messages import BaseMessage, AIMessage, HumanMessage - -# Update with the URL provided by your LangServe server -chain = RemoteRunnable("http://127.0.0.1:8031/chat-bot-feedback") - -def stream_content( - text: str, - chat_history: Optional[List[BaseMessage]] = None, - last_run_id: Optional[str] = None, - on_chunk: Callable = None, -): - results = [] - with tracing_v2_enabled() as cb: - for chunk in chain.stream( - {"text": text, "chat_history": chat_history, "last_run_id": last_run_id}, - ): - on_chunk(chunk) - results.append(chunk) - last_run_id = cb.latest_run.id if cb.latest_run else None - return last_run_id, "".join(results) - -chat_history = [] -text = "Where are my keys?" -last_run_id, response_message = stream_content(text, on_chunk=partial(print, end="")) -print() -chat_history.extend([HumanMessage(content=text), AIMessage(content=response_message)]) -text = "I CAN'T FIND THEM ANYWHERE" # The previous response will likely receive a low score, -# as the user's frustration appears to be escalating. -last_run_id, response_message = stream_content( - text, - chat_history=chat_history, - last_run_id=str(last_run_id), - on_chunk=partial(print, end=""), -) -print() -chat_history.extend([HumanMessage(content=text), AIMessage(content=response_message)]) -``` - -This uses the `tracing_v2_enabled` callback manager to get the run ID of the call, which we provide in subsequent calls in the same chat thread, so the evaluator can assign feedback to the appropriate trace. - - -## Conclusion - -This template provides a simple chat bot definition you can directly deploy using LangServe. It defines a custom evaluator to log evaluation feedback for the bot without any explicit user ratings. This is an effective way to augment your analytics and to better select data points for fine-tuning and evaluation. \ No newline at end of file diff --git a/templates/chat-bot-feedback/chat_bot_feedback/__init__.py b/templates/chat-bot-feedback/chat_bot_feedback/__init__.py deleted file mode 100644 index e51bb07ae8b..00000000000 --- a/templates/chat-bot-feedback/chat_bot_feedback/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from chat_bot_feedback.chain import chain - -__all__ = ["chain"] diff --git a/templates/chat-bot-feedback/chat_bot_feedback/chain.py b/templates/chat-bot-feedback/chat_bot_feedback/chain.py deleted file mode 100644 index c8759717f3d..00000000000 --- a/templates/chat-bot-feedback/chat_bot_feedback/chain.py +++ /dev/null @@ -1,183 +0,0 @@ -from __future__ import annotations - -from typing import List, Optional - -from langchain import hub -from langchain.callbacks.tracers.evaluation import EvaluatorCallbackHandler -from langchain.callbacks.tracers.schemas import Run -from langchain.schema import ( - AIMessage, - BaseMessage, - HumanMessage, - StrOutputParser, - get_buffer_string, -) -from langchain_community.chat_models import ChatOpenAI -from langchain_core.output_parsers.openai_functions import JsonOutputFunctionsParser -from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder -from langchain_core.pydantic_v1 import BaseModel, Field -from langchain_core.runnables import Runnable -from langsmith.evaluation import EvaluationResult, RunEvaluator -from langsmith.schemas import Example - -############################################################################### -# | Chat Bot Evaluator Definition -# | This section defines an evaluator that evaluates any chat bot -# | without explicit user feedback. It formats the dialog up to -# | the current message and then instructs an LLM to grade the last AI response -# | based on the subsequent user response. If no chat history is present, -# V the evaluator is not called. -############################################################################### - - -class ResponseEffectiveness(BaseModel): - """Score the effectiveness of the AI chat bot response.""" - - reasoning: str = Field( - ..., - description="Explanation for the score.", - ) - score: int = Field( - ..., - min=0, - max=5, - description="Effectiveness of AI's final response.", - ) - - -def format_messages(input: dict) -> List[BaseMessage]: - """Format the messages for the evaluator.""" - chat_history = input.get("chat_history") or [] - results = [] - for message in chat_history: - if message["type"] == "human": - results.append(HumanMessage.parse_obj(message)) - else: - results.append(AIMessage.parse_obj(message)) - return results - - -def format_dialog(input: dict) -> dict: - """Format messages and convert to a single string.""" - chat_history = format_messages(input) - formatted_dialog = get_buffer_string(chat_history) + f"\nhuman: {input['text']}" - return {"dialog": formatted_dialog} - - -def normalize_score(response: dict) -> dict: - """Normalize the score to be between 0 and 1.""" - response["score"] = int(response["score"]) / 5 - return response - - -# To view the prompt in the playground: https://smith.langchain.com/hub/wfh/response-effectiveness -evaluation_prompt = hub.pull("wfh/response-effectiveness") -evaluate_response_effectiveness = ( - format_dialog - | evaluation_prompt - # bind_functions formats the schema for the OpenAI function - # calling endpoint, which returns more reliable structured data. - | ChatOpenAI(model="gpt-3.5-turbo").bind_functions( - functions=[ResponseEffectiveness], - function_call="ResponseEffectiveness", - ) - # Convert the model's output to a dict - | JsonOutputFunctionsParser(args_only=True) - | normalize_score -) - - -class ResponseEffectivenessEvaluator(RunEvaluator): - """Evaluate the chat bot based the subsequent user responses.""" - - def __init__(self, evaluator_runnable: Runnable) -> None: - super().__init__() - self.runnable = evaluator_runnable - - def evaluate_run( - self, run: Run, example: Optional[Example] = None - ) -> EvaluationResult: - # This evaluator grades the AI's PREVIOUS response. - # If no chat history is present, there isn't anything to evaluate - # (it's the user's first message) - if not run.inputs.get("chat_history"): - return EvaluationResult( - key="response_effectiveness", comment="No chat history present." - ) - # This only occurs if the client isn't correctly sending the run IDs - # of the previous calls. - elif "last_run_id" not in run.inputs: - return EvaluationResult( - key="response_effectiveness", comment="No last run ID present." - ) - # Call the LLM to evaluate the response - eval_grade: Optional[dict] = self.runnable.invoke(run.inputs) - target_run_id = run.inputs["last_run_id"] - return EvaluationResult( - **eval_grade, - key="response_effectiveness", - target_run_id=target_run_id, # Requires langsmith >= 0.0.54 - ) - - -############################################################################### -# | The chat bot definition -# | This is what is actually exposed by LangServe in the API -# | It can be any chain that accepts the ChainInput schema and returns a str -# | all that is required is the with_config() call at the end to add the -# V evaluators as "listeners" to the chain. -# ############################################################################ - - -class ChainInput(BaseModel): - """Input for the chat bot.""" - - chat_history: Optional[List[BaseMessage]] = Field( - description="Previous chat messages." - ) - text: str = Field(..., description="User's latest query.") - last_run_id: Optional[str] = Field("", description="Run ID of the last run.") - - -_prompt = ChatPromptTemplate.from_messages( - [ - ( - "system", - "You are a helpful assistant who speaks like a pirate", - ), - MessagesPlaceholder(variable_name="chat_history"), - ("user", "{text}"), - ] -) -_model = ChatOpenAI() - - -def format_chat_history(chain_input: dict) -> dict: - messages = format_messages(chain_input) - - return { - "chat_history": messages, - "text": chain_input.get("text"), - } - - -# if you update the name of this, you MUST also update ../pyproject.toml -# with the new `tool.langserve.export_attr` -chain = ( - (format_chat_history | _prompt | _model | StrOutputParser()) - # This is to add the evaluators as "listeners" - # and to customize the name of the chain. - # Any chain that accepts a compatible input type works here. - .with_config( - run_name="ChatBot", - callbacks=[ - EvaluatorCallbackHandler( - evaluators=[ - ResponseEffectivenessEvaluator(evaluate_response_effectiveness) - ] - ) - ], - ) -) - -chain = chain.with_types(input_type=ChainInput) diff --git a/templates/chat-bot-feedback/pyproject.toml b/templates/chat-bot-feedback/pyproject.toml deleted file mode 100644 index fbc7eec2af4..00000000000 --- a/templates/chat-bot-feedback/pyproject.toml +++ /dev/null @@ -1,32 +0,0 @@ -[tool.poetry] -name = "chat-bot-feedback" -version = "0.0.1" -description = "Evaluate your chatbot without human feedback" -authors = [] -readme = "README.md" - -[tool.poetry.dependencies] -python = ">=3.8.1,<4.0" -langchain = "^0.1" -openai = "<2" -langsmith = ">=0.0.54" -langchainhub = ">=0.1.13" - -[tool.poetry.group.dev.dependencies] -langchain-cli = ">=0.0.21" -fastapi = ">=0.104.0,<1" -sse-starlette = "^1.6.5" - -[tool.langserve] -export_module = "chat_bot_feedback.chain" -export_attr = "chain" - -[tool.templates-hub] -use-case = "evaluation" -author = "LangChain" -integrations = ["OpenAI", "LangSmith"] -tags = ["langsmith"] - -[build-system] -requires = ["poetry-core"] -build-backend = "poetry.core.masonry.api" diff --git a/templates/chat-bot-feedback/static/chat_interaction.png b/templates/chat-bot-feedback/static/chat_interaction.png deleted file mode 100644 index ddebd59b8d9..00000000000 Binary files a/templates/chat-bot-feedback/static/chat_interaction.png and /dev/null differ diff --git a/templates/chat-bot-feedback/static/evaluator.png b/templates/chat-bot-feedback/static/evaluator.png deleted file mode 100644 index c516f59d181..00000000000 Binary files a/templates/chat-bot-feedback/static/evaluator.png and /dev/null differ diff --git a/templates/chat-bot-feedback/tests/__init__.py b/templates/chat-bot-feedback/tests/__init__.py deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/templates/cohere-librarian/README.md b/templates/cohere-librarian/README.md deleted file mode 100644 index 229eebdf6b3..00000000000 --- a/templates/cohere-librarian/README.md +++ /dev/null @@ -1,74 +0,0 @@ -# Cohere - Librarian - -This template turns `Cohere` into a librarian. - -It demonstrates the use of: -- a router to switch between chains that handle different things -- a vector database with Cohere embeddings -- a chat bot that has a prompt with some information about the library -- a RAG chatbot that has access to the internet. - -For a fuller demo of the book recommendation, consider replacing `books_with_blurbs.csv` with a larger sample from the following dataset: https://www.kaggle.com/datasets/jdobrow/57000-books-with-metadata-and-blurbs/ . - -## Environment Setup - -Set the `COHERE_API_KEY` environment variable to access the Cohere models. - -## Usage - -To use this package, you should first have the LangChain CLI installed: - -```shell -pip install -U langchain-cli -``` - -To create a new LangChain project and install this as the only package, you can do: - -```shell -langchain app new my-app --package cohere-librarian -``` - -If you want to add this to an existing project, you can just run: - -```shell -langchain app add cohere-librarian -``` - -And add the following code to your `server.py` file: -```python -from cohere_librarian.chain import chain as cohere_librarian_chain - -add_routes(app, cohere_librarian_chain, path="/cohere-librarian") -``` - -(Optional) Let's now configure LangSmith. -LangSmith will help us trace, monitor and debug LangChain applications. -You can sign up for LangSmith [here](https://smith.langchain.com/). -If you don't have access, you can skip this section - - -```shell -export LANGCHAIN_TRACING_V2=true -export LANGCHAIN_API_KEY= -export LANGCHAIN_PROJECT= # if not specified, defaults to "default" -``` - -If you are inside this directory, then you can spin up a LangServe instance directly by: - -```shell -langchain serve -``` - -This will start the FastAPI app with a server is running locally at -[http://localhost:8000](http://localhost:8000) - -We can see all templates at [http://localhost:8000/docs](http://localhost:8000/docs) -We can access the playground at [http://localhost:8000/cohere-librarian/playground](http://localhost:8000/cohere-librarian/playground) - -We can access the template from code with: - -```python -from langserve.client import RemoteRunnable - -runnable = RemoteRunnable("http://localhost:8000/cohere-librarian") -``` diff --git a/templates/cohere-librarian/cohere_librarian/__init__.py b/templates/cohere-librarian/cohere_librarian/__init__.py deleted file mode 100644 index b91b515e409..00000000000 --- a/templates/cohere-librarian/cohere_librarian/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from .chain import chain - -__all__ = ["chain"] diff --git a/templates/cohere-librarian/cohere_librarian/blurb_matcher.py b/templates/cohere-librarian/cohere_librarian/blurb_matcher.py deleted file mode 100644 index 9a446801c86..00000000000 --- a/templates/cohere-librarian/cohere_librarian/blurb_matcher.py +++ /dev/null @@ -1,49 +0,0 @@ -import csv - -from langchain.chains.question_answering import load_qa_chain -from langchain_chroma import Chroma -from langchain_community.embeddings import CohereEmbeddings -from langchain_core.prompts import PromptTemplate - -from .chat import chat - -csv_file = open("data/books_with_blurbs.csv", "r") -csv_reader = csv.reader(csv_file) -csv_data = list(csv_reader) -parsed_data = [ - { - "id": x[0], - "title": x[1], - "author": x[2], - "year": x[3], - "publisher": x[4], - "blurb": x[5], - } - for x in csv_data -] -parsed_data[1] - -embeddings = CohereEmbeddings(model="embed-english-v3.0") - -docsearch = Chroma.from_texts( - [x["title"] for x in parsed_data], embeddings, metadatas=parsed_data -).as_retriever() - - -prompt_template = """ -{context} - -Use the book reccommendations to suggest books for the user to read. -Only use the titles of the books, do not make up titles. Format the response as -a bulleted list prefixed by a relevant message. - -User: {message}""" - -PROMPT = PromptTemplate( - template=prompt_template, input_variables=["context", "message"] -) - -book_rec_chain = { - "input_documents": lambda x: docsearch.invoke(x["message"]), - "message": lambda x: x["message"], -} | load_qa_chain(chat, chain_type="stuff", prompt=PROMPT) diff --git a/templates/cohere-librarian/cohere_librarian/chain.py b/templates/cohere-librarian/cohere_librarian/chain.py deleted file mode 100644 index 90aeda71329..00000000000 --- a/templates/cohere-librarian/cohere_librarian/chain.py +++ /dev/null @@ -1,10 +0,0 @@ -from langchain.pydantic_v1 import BaseModel - -from .router import branched_chain - - -class ChainInput(BaseModel): - message: str - - -chain = branched_chain.with_types(input_type=ChainInput) diff --git a/templates/cohere-librarian/cohere_librarian/chat.py b/templates/cohere-librarian/cohere_librarian/chat.py deleted file mode 100644 index cbd37e2816a..00000000000 --- a/templates/cohere-librarian/cohere_librarian/chat.py +++ /dev/null @@ -1,3 +0,0 @@ -from langchain_community.llms import Cohere - -chat = Cohere() diff --git a/templates/cohere-librarian/cohere_librarian/library_info.py b/templates/cohere-librarian/cohere_librarian/library_info.py deleted file mode 100644 index 07ad4020e5e..00000000000 --- a/templates/cohere-librarian/cohere_librarian/library_info.py +++ /dev/null @@ -1,27 +0,0 @@ -from langchain_core.prompts import ( - ChatPromptTemplate, - HumanMessagePromptTemplate, - SystemMessagePromptTemplate, -) - -from .chat import chat - -librarian_prompt = ChatPromptTemplate.from_messages( - [ - SystemMessagePromptTemplate.from_template( - """ - You are a librarian at cohere community library. Your job is to - help recommend people books to read based on their interests and - preferences. You also give information about the library. - - The library opens at 8am and closes at 9pm daily. It is closed on - Sundays. - - Please answer the following message: - """ - ), - HumanMessagePromptTemplate.from_template("{message}"), - ] -) - -library_info = librarian_prompt | chat diff --git a/templates/cohere-librarian/cohere_librarian/rag.py b/templates/cohere-librarian/cohere_librarian/rag.py deleted file mode 100644 index da15f2cda14..00000000000 --- a/templates/cohere-librarian/cohere_librarian/rag.py +++ /dev/null @@ -1,16 +0,0 @@ -from langchain.retrievers import CohereRagRetriever -from langchain_community.chat_models import ChatCohere - -rag = CohereRagRetriever(llm=ChatCohere()) - - -def get_docs_message(message): - docs = rag.invoke(message) - message_doc = next( - (x for x in docs if x.metadata.get("type") == "model_response"), None - ) - return message_doc.page_content - - -def librarian_rag(x): - return get_docs_message(x["message"]) diff --git a/templates/cohere-librarian/cohere_librarian/router.py b/templates/cohere-librarian/cohere_librarian/router.py deleted file mode 100644 index b4f0db2bb2d..00000000000 --- a/templates/cohere-librarian/cohere_librarian/router.py +++ /dev/null @@ -1,43 +0,0 @@ -from langchain_core.output_parsers import StrOutputParser -from langchain_core.prompts import ChatPromptTemplate -from langchain_core.runnables import RunnableBranch - -from .blurb_matcher import book_rec_chain -from .chat import chat -from .library_info import library_info -from .rag import librarian_rag - -chain = ( - ChatPromptTemplate.from_template( - """Given the user message below, -classify it as either being about `recommendation`, `library` or `other`. - -'{message}' - -Respond with just one word. -For example, if the message is about a book recommendation,respond with -`recommendation`. -""" - ) - | chat - | StrOutputParser() -) - - -def extract_op_field(x): - return x["output_text"] - - -branch = RunnableBranch( - ( - lambda x: "recommendation" in x["topic"].lower(), - book_rec_chain | extract_op_field, - ), - ( - lambda x: "library" in x["topic"].lower(), - {"message": lambda x: x["message"]} | library_info, - ), - librarian_rag, -) - -branched_chain = {"topic": chain, "message": lambda x: x["message"]} | branch diff --git a/templates/cohere-librarian/data/__init__.py b/templates/cohere-librarian/data/__init__.py deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/templates/cohere-librarian/data/books_with_blurbs.csv b/templates/cohere-librarian/data/books_with_blurbs.csv deleted file mode 100644 index b757130a97e..00000000000 --- a/templates/cohere-librarian/data/books_with_blurbs.csv +++ /dev/null @@ -1,21 +0,0 @@ -ISBN,Title,Author,Year,Publisher,Blurb, ** grab this file from https://www.kaggle.com/datasets/jdobrow/57000-books-with-metadata-and-blurbs/ ** -0060973129,Decision in Normandy,Carlo D'Este,1991,HarperPerennial,"Here, for the first time in paperback, is an outstanding military history that offers a dramatic new perspective on the Allied campaign that began with the invasion of the D-Day beaches of Normandy. Nationa advertising in Military History." -0374157065,Flu: The Story of the Great Influenza Pandemic of 1918 and the Search for the Virus That Caused It,Gina Bari Kolata,1999,Farrar Straus Giroux,"The fascinating, true story of the world's deadliest disease. ,In 1918, the Great Flu Epidemic felled the young and healthy virtually overnight. An estimated forty million people died as the epidemic raged. Children were left orphaned and families were devastated. As many American soldiers were killed by the 1918 flu as were killed in battle during World War I. And no area of the globe was safe. Eskimos living in remote outposts in the frozen tundra were sickened and killed by the flu in such numbers that entire villages were wiped out. ,Scientists have recently rediscovered shards of the flu virus frozen in Alaska and preserved in scraps of tissue in a government warehouse. Gina Kolata, an acclaimed reporter for ""The New York Times,"" unravels the mystery of this lethal virus with the high drama of a great adventure story. Delving into the history of the flu and previous epidemics, detailing the science and the latest understanding of this mortal disease, Kolata addresses the prospects for a great epidemic recurring, and, most important, what can be done to prevent it." -0399135782,The Kitchen God's Wife,Amy Tan,1991,Putnam Pub Group,"Winnie and Helen have kept each others worst secrets for more than fifty years. Now, because she believes she is dying, Helen wants to expose everything. And Winnie angrily determines that she must be the one to tell her daughter, Pearl, about the past—including the terrible truth even Helen does not know. And so begins Winnie's story of her life on a small island outside Shanghai in the 1920s, and other places in China during World War II, and traces the happy and desperate events that led to Winnie's coming to America in 1949." -0425176428,What If?: The World's Foremost Military Historians Imagine What Might Have Been,Robert Cowley,2000,Berkley Publishing Group,"Historians and inquisitive laymen alike love to ponder the dramatic what-its of history. In these twenty never-before-published essays, some of the keenest minds of our time ask the big, tantalizing questions:, Where might we be if history had not unfolded the way it did? , Why, how, and when was our fortune made real? ,The answers are surprising, sometimes frightening, and always entertaining.." -1881320189,Goodbye to the Buttermilk Sky,Julia Oliver,1994,River City Pub,"This highly praised first novel by fiction writer Julia Oliver is the story of one young woman's struggle with fidelity and identity in depression-era rural Alabama. A beautifully narrated novel of time and place, Goodbye to the Buttermilk Sky re-creates a southern summer when the depression and the boll weevil turned hopes to dust. With the extraordinary talent to make the reader see the Ball canning jars on the kitchen table, hear the clicks on the party line, and feel the bittersweet moments of 20-year-old Callie Tatum's first experiences with adult desire, Oliver portrays a young wife’s increasingly dangerous infidelity with cinematic precision and palpable suspense. Soon, with only her housekeeper as a confidant, Callie breaks society’s rules about race and class as well as her marriage vows. The result is a chain of events that will lead to tragedy and a woman’s stunning decision about love, passion, and the future of her life.Originally published in cloth in 1994, Goodbye to the Buttermilk Sky received considerable attention nationally and became a featured selection of the Quality Paperback Book Club. Its inclusion in the Deep South Books series from The University of Alabama Press will extend the book’s reach and its life, while offering new readers access to the enthralling story.The richly drawn, fully developed characters of Buttermilk Sky live on in the reader’s mind long after the book has been finished. Against the emotional and physical isolation of rural Alabama in 1938, the threads of family ties, whispered gossip, old secrets, and unfulfilled dreams weave a powerful, evocative story that captivates its reader until the very last word." -0440234743,The Testament,John Grisham,1999,Dell,"In a plush Virginia office, a rich, angry old man is furiously rewriting his will. With his death just hours away, Troy Phelan wants to send a message to his children, his ex-wives, and his minions, a message that will touch off a vicious legal battle and transform dozens of lives.,Because Troy Phelan's new will names a sole surprise heir to his eleven-billion-dollar fortune: a mysterious woman named Rachel Lane, a missionary living deep in the jungles of Brazil.,Enter the lawyers. Nate O'Riley is fresh out of rehab, a disgraced corporate attorney handpicked for his last job: to find Rachel Lane at any cost. As Phelan's family circles like vultures in D.C., Nate is crashing through the Brazilian jungle, entering a world where money means nothing, where death is just one misstep away, and where a woman - pursued by enemies and friends alike - holds a stunning surprise of her own." -0452264464,Beloved (Plume Contemporary Fiction),Toni Morrison,1994,Plume,"In the troubled years following the Civil War, the spirit of a murdered child haunts the Ohio home of a former slave. This angry, destructive ghost breaks mirrors, leaves its fingerprints in cake icing, and generally makes life difficult for Sethe and her family; nevertheless, the woman finds the haunting oddly comforting for the spirit is that of her own dead baby, never named, thought of only as Beloved. A dead child, a runaway slave, a terrible secret--these are the central concerns of Toni Morrison's Pulitzer Prize-winning Beloved. Morrison, a Nobel laureate, has written many fine novels, including Song of Solomon, The Bluest Eye, and Paradise--but Beloved is arguably her best. To modern readers, antebellum slavery is a subject so familiar that it is almost impossible to render its horrors in a way that seems neither clichéd nor melodramatic. Rapes, beatings, murders, and mutilations are recounted here, but they belong to characters so precisely drawn that the tragedy remains individual, terrifying to us because it is terrifying to the sufferer. And Morrison is master of the telling detail: in the bit, for example, a punishing piece of headgear used to discipline recalcitrant slaves, she manages to encapsulate all of slavery's many cruelties into one apt symbol--a device that deprives its wearer of speech. ""Days after it was taken out, goose fat was rubbed on the corners of the mouth but nothing to soothe the tongue or take the wildness out of the eye."" Most importantly, the language here, while often lyrical, is never overheated. Even as she recalls the cruelties visited upon her while a slave, Sethe is evocative without being overemotional: ""Add my husband to it, watching, above me in the loft--hiding close by--the one place he thought no one would look for him, looking down on what I couldn't look at at all. And not stopping them--looking and letting it happen.... And if he was that broken then, then he is also and certainly dead now."" Even the supernatural is treated as an ordinary fact of life: ""Not a house in the country ain't packed to its rafters with some dead Negro's grief. We lucky this ghost is a baby,"" comments Sethe's mother-in-law. Beloved is a dense, complex novel that yields up its secrets one by one. As Morrison takes us deeper into Sethe's history and her memories, the horrifying circumstances of her baby's death start to make terrible sense. And as past meets present in the shape of a mysterious young woman about the same age as Sethe's daughter would have been, the narrative builds inexorably to its powerful, painful conclusion. Beloved may well be the defining novel of slavery in America, the one that all others will be measured by. --Alix Wilber" -0609804618,Our Dumb Century: The Onion Presents 100 Years of Headlines from America's Finest News Source,The Onion,1999,Three Rivers Press," has quickly become the world's most popular humor publication, misinforming half a million readers a week with one-of-a-kind social satire both in print (on newsstands nationwide) and online from its remote office in Madison, Wisconsin.,Witness the march of history as Editor-in-Chief Scott Dikkers and , award-winning writing staff present the twentieth century like you've never seen it before." -1841721522,New Vegetarian: Bold and Beautiful Recipes for Every Occasion,Celia Brooks Brown,2001,Ryland Peters & Small Ltd,"Filled with fresh and eclectic recipes by Celia Brooks Brown -- one of the talented team of chefs at Books for Cooks, the world-famous bookshop-restaurant in London's Notting Hill -- New Vegetarian presents an innovative approach to vegetarian cooking. No longer the exclusive domain of vegetarians, meat-free food is now appreciated by all for its bright and assertive flavors, its marvelous colors, and its easy-to-make convenience. Celia gives sensible advice on choosing and preparing the major vegetarian ingredients, then presents 50 original and stylish recipes -- ranging from quick breakfasts to party foods, from salads to sweet treats -- all photographed by Philip Webb. Whether it is burritos bursting with flavor or Thai Glazed Vegetable Skewers fresh from the barbecue, Celia's enthusiasm and imagination will tempt even the most confirmed carnivore." -0439095026,Tell Me This Isn't Happening,Robynn Clairday,1999,Scholastic,"Robynn Clairday interviewed kids throughout America to collect these real tales of awkward situations. From hilarious to poignant to painful, these stories are accompanied by advice about dealing with embarrassment and finding grace under pressure." -0971880107,Wild Animus,Rich Shapero,2004,Too Far,"Newly graduated from college, Sam Altman is gripped by an inexplicable urge to lose himself in the wilderness and teams up with an enigmatic young woman who seems bent on helping him realize his dreams." -0345402871,Airframe,Michael Crichton,1997,Ballantine Books,"Three passengers are dead. Fifty-six are injured. The interior cabin virtually destroyed. But the pilot manages to land the plane. . . .,At a moment when the issue of safety and death in the skies is paramount in the public mind, a lethal midair disaster aboard a commercial twin-jet airliner bound from Hong Kong to Denver triggers a pressured and frantic investigation.,AIRFRAME is nonstop reading: the extraordinary mixture of super suspense and authentic information on a subject of compelling interest that has been a Crichton landmark since The Andromeda Strain.,(back cover)" -0345417623,Timeline,MICHAEL CRICHTON,2000,Ballantine Books,"In an Arizona desert, a man wanders in a daze, speaking words that make no sense. Within twenty-four hours he is dead, his body swiftly cremated by his only known associates. Halfway around the world, archaeologists make a shocking discovery at a medieval site. Suddenly they are swept off to the headquarters of a secretive multinational corporation that has developed an astounding technology. Now this group is about to get a chance not to study the past but to enter it. And with history opened up to the present, the dead awakened to the living, these men and women will soon find themselves fighting for their very survival – six hundred years ago." -0684823802,OUT OF THE SILENT PLANET,C.S. Lewis,1996,Scribner,"Dr. Ransom is abducted by a megalomaniacal physicist and taken via space ship to the planet Malacandra (Mars). There, Dr. Ransom finds Malacandra similar to, and yet distinct from, Earth." -0375759778,Prague : A Novel,ARTHUR PHILLIPS,2003,Random House Trade Paperbacks,"A novel of startling scope and ambition, , depicts an intentionally lost Lost Generation as it follows five American expats who come to Budapest in the early 1990s to seek their fortune. They harbor the vague suspicion that their counterparts in Prague have it better, but still they hope to find adventure, inspiration, a gold rush, or history in the making." -0425163091,Chocolate Jesus,Stephan Jaramillo,1998,Berkley Publishing Group,"The deliciously funny new novel from the acclaimed author of Going Postal.Now, in Chocolate Jesus, Jaramillo introduces Sydney Corbet, a self-proclaimed JFK assassination scholar who has just come up with the idea of a lifetime -- Chocolate Jesus. This semisweet chocolate Messiah offers salvation for many, especially the nearly extinct Bea's Candies, whose Easter promotion just might turn things around for the company. Everyone knows that the Easter Bunny can't compete with the King of Kings. But no one counted on the Reverend Willy Domingo and his vegetarian fitness zealots, who gather on a crusade against a graven image of Christ that consists of nothing more than empty calories...,""Capture(s) the mood and voice of a certain distinctive type of apprentice grown-up"". -- New York Times Book Review,""Enormously entertaining moments, reverberating with a cynical wit underscored...with heartbreaking poignancy and disillusion...haunting, honest"". -- St. Petersburg Times,""A very funny book... Jaramillo deserves credit for a crisp and funny style, a dead-on ear for dialogue, but more impressive, an emotional honesty that's rare and wonderful"". -- Rocky Mountain News" -0375406328,Lying Awake,Mark Salzman,2000,Alfred A. Knopf,"In a Carmelite monastery outside present-day Los Angeles, life goes on in a manner virtually un-changed for centuries. Sister John of the Cross has spent years there in the service of God. And there, she alone experiences visions of such dazzling power and insight that she is looked upon as a spiritual master. ,But Sister John's visions are accompanied by powerful headaches, and when a doctor reveals that they may be dangerous, she faces a devastating choice. For if her spiritual gifts are symptoms of illness rather than grace, will a ""cure"" mean the end of her visions and a soul once again dry and searching?,This is the dilemma at the heart of Mark Salzman's spare, astonishing new novel. With extraordinary dexterity, the author of the best-selling , and , brings to life the mysterious world of the cloister, giving us a brilliantly realized portrait of women today drawn to the rigors of an ancient religious life, and of one woman's trial at the perilous intersection of faith and reason. , is a novel of remarkable empathy and imagination, and Mark Salzman's most provocative work to date." -0446310786,To Kill a Mockingbird,Harper Lee,1988,Little Brown & Company,"The unforgettable novel of a childhood in a sleepy Southern town and the crisis of conscience that rocked it, To Kill A Mockingbird became both an instant bestseller and a critical success when it was first published in 1960. It went on to win the Pulitzer Prize in 1961 and was later made into an Academy Award-winning film, also a classic.,Compassionate, dramatic, and deeply moving, To Kill A Mockingbird takes readers to the roots of human behavior - to innocence and experience, kindness and cruelty, love and hatred, humor and pathos. Now with over 18 million copies in print and translated into forty languages, this regional story by a young Alabama woman claims universal appeal. Harper Lee always considered her book to be a simple love story. Today it is regarded as a masterpiece of American literature." -0449005615,Seabiscuit: An American Legend,LAURA HILLENBRAND,2002,Ballantine Books,"Seabiscuit was one of the most electrifying and popular attractions in sports history and the single biggest newsmaker in the world in 1938, receiving more coverage than FDR, Hitler, or Mussolini. But his success was a surprise to the racing establishment, which had written off the crooked-legged racehorse with the sad tail. Three men changed Seabiscuit’s fortunes:,Charles Howard was a onetime bicycle repairman who introduced the automobile to the western United States and became an overnight millionaire. When he needed a trainer for his new racehorses, he hired Tom Smith, a mysterious mustang breaker from the Colorado plains. Smith urged Howard to buy Seabiscuit for a bargain-basement price, then hired as his jockey Red Pollard, a failed boxer who was blind in one eye, half-crippled, and prone to quoting passages from Ralph Waldo Emerson. Over four years, these unlikely partners survived a phenomenal run of bad fortune, conspiracy, and severe injury to transform Seabiscuit from a neurotic, pathologically indolent also-ran into an American sports icon. ,Author Laura Hillenbrand brilliantly re-creates a universal underdog story, one that proves life is a horse race." -0060168013,Pigs in Heaven,Barbara Kingsolver,1993,Harpercollins,"Brings together Taylor, Turtle and Alice from The Bean Trees together with a new cast - Jax, Barbie Sugar Boss, Oklahoma and Annawake Fourkiller. When six-year-old Turtle witnesses a freak accident at the Hoover Dam, her insistence, and her mother's belief in her, leads to a man's rescue." \ No newline at end of file diff --git a/templates/cohere-librarian/pyproject.toml b/templates/cohere-librarian/pyproject.toml deleted file mode 100644 index 5b8485f1ca3..00000000000 --- a/templates/cohere-librarian/pyproject.toml +++ /dev/null @@ -1,31 +0,0 @@ -[tool.poetry] -name = "cohere-librarian" -version = "0.0.1" -description = "Get started with a simple template that acts as a librarian" -authors = [] -readme = "README.md" - -[tool.poetry.dependencies] -python = ">=3.8.1,<4.0" -langchain = "^0.1" -cohere = "^4.37" -langchain-chroma = "^0.1.2" - -[tool.poetry.group.dev.dependencies] -langchain-cli = ">=0.0.21" -fastapi = ">=0.104.0,<1" -sse-starlette = "^1.6.5" - -[tool.langserve] -export_module = "cohere_librarian" -export_attr = "chain" - -[tool.templates-hub] -use-case = "chatbot" -author = "Cohere" -integrations = ["Cohere"] -tags = ["getting-started"] - -[build-system] -requires = ["poetry-core"] -build-backend = "poetry.core.masonry.api" diff --git a/templates/csv-agent/README.md b/templates/csv-agent/README.md deleted file mode 100644 index ae869ea8095..00000000000 --- a/templates/csv-agent/README.md +++ /dev/null @@ -1,68 +0,0 @@ -# CSV agent - -This template uses a [CSV agent](https://python.langchain.com/docs/integrations/toolkits/csv) with tools (Python REPL) and memory (vectorstore) for interaction (question-answering) with text data. - -## Environment Setup - -Set the `OPENAI_API_KEY` environment variable to access the OpenAI models. - -To set up the environment, the `ingest.py` script should be run to handle the ingestion into a vectorstore. - -## Usage - -To use this package, you should first have the LangChain CLI installed: - -```shell -pip install -U langchain-cli -``` - -To create a new LangChain project and install this as the only package, you can do: - -```shell -langchain app new my-app --package csv-agent -``` - -If you want to add this to an existing project, you can just run: - -```shell -langchain app add csv-agent -``` - -And add the following code to your `server.py` file: -```python -from csv_agent.agent import agent_executor as csv_agent_chain - -add_routes(app, csv_agent_chain, path="/csv-agent") -``` - -(Optional) Let's now configure LangSmith. -LangSmith will help us trace, monitor and debug LangChain applications. -You can sign up for LangSmith [here](https://smith.langchain.com/). -If you don't have access, you can skip this section - - -```shell -export LANGCHAIN_TRACING_V2=true -export LANGCHAIN_API_KEY= -export LANGCHAIN_PROJECT= # if not specified, defaults to "default" -``` - -If you are inside this directory, then you can spin up a LangServe instance directly by: - -```shell -langchain serve -``` - -This will start the FastAPI app with a server is running locally at -[http://localhost:8000](http://localhost:8000) - -We can see all templates at [http://127.0.0.1:8000/docs](http://127.0.0.1:8000/docs) -We can access the playground at [http://127.0.0.1:8000/csv-agent/playground](http://127.0.0.1:8000/csv-agent/playground) - -We can access the template from code with: - -```python -from langserve.client import RemoteRunnable - -runnable = RemoteRunnable("http://localhost:8000/csv-agent") -``` diff --git a/templates/csv-agent/csv_agent/__init__.py b/templates/csv-agent/csv_agent/__init__.py deleted file mode 100644 index 11d9cca133e..00000000000 --- a/templates/csv-agent/csv_agent/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from csv_agent.agent import agent_executor - -__all__ = ["agent_executor"] diff --git a/templates/csv-agent/csv_agent/agent.py b/templates/csv-agent/csv_agent/agent.py deleted file mode 100644 index 72b73c683b1..00000000000 --- a/templates/csv-agent/csv_agent/agent.py +++ /dev/null @@ -1,84 +0,0 @@ -from pathlib import Path - -import pandas as pd -from langchain.agents import AgentExecutor, OpenAIFunctionsAgent -from langchain_community.chat_models import ChatOpenAI -from langchain_community.embeddings import OpenAIEmbeddings -from langchain_community.vectorstores import FAISS -from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder -from langchain_core.pydantic_v1 import BaseModel, Field -from langchain_core.tools.retriever import create_retriever_tool -from langchain_experimental.tools import PythonAstREPLTool - -MAIN_DIR = Path(__file__).parents[1] - -pd.set_option("display.max_rows", 20) -pd.set_option("display.max_columns", 20) - -embedding_model = OpenAIEmbeddings() -vectorstore = FAISS.load_local(MAIN_DIR / "titanic_data", embedding_model) -retriever_tool = create_retriever_tool( - vectorstore.as_retriever(), "person_name_search", "Search for a person by name" -) - - -TEMPLATE = """You are working with a pandas dataframe in Python. The name of the dataframe is `df`. -It is important to understand the attributes of the dataframe before working with it. This is the result of running `df.head().to_markdown()` - - -{dhead} - - -You are not meant to use only these rows to answer questions - they are meant as a way of telling you about the shape and schema of the dataframe. -You also do not have use only the information here to answer questions - you can run intermediate queries to do exporatory data analysis to give you more information as needed. - -You have a tool called `person_name_search` through which you can lookup a person by name and find the records corresponding to people with similar name as the query. -You should only really use this if your search term contains a persons name. Otherwise, try to solve it with code. - -For example: - -How old is Jane? -Use `person_name_search` since you can use the query `Jane` - -Who has id 320 -Use `python_repl` since even though the question is about a person, you don't know their name so you can't include it. -""" # noqa: E501 - - -class PythonInputs(BaseModel): - query: str = Field(description="code snippet to run") - - -df = pd.read_csv(MAIN_DIR / "titanic.csv") -template = TEMPLATE.format(dhead=df.head().to_markdown()) - -prompt = ChatPromptTemplate.from_messages( - [ - ("system", template), - MessagesPlaceholder(variable_name="agent_scratchpad"), - ("human", "{input}"), - ] -) - -repl = PythonAstREPLTool( - locals={"df": df}, - name="python_repl", - description="Runs code and returns the output of the final line", - args_schema=PythonInputs, -) -tools = [repl, retriever_tool] -agent = OpenAIFunctionsAgent( - llm=ChatOpenAI(temperature=0, model="gpt-4"), prompt=prompt, tools=tools -) -agent_executor = AgentExecutor( - agent=agent, tools=tools, max_iterations=5, early_stopping_method="generate" -) | (lambda x: x["output"]) - -# Typing for playground inputs - - -class AgentInputs(BaseModel): - input: str - - -agent_executor = agent_executor.with_types(input_type=AgentInputs) diff --git a/templates/csv-agent/ingest.py b/templates/csv-agent/ingest.py deleted file mode 100644 index 44a4bc8adeb..00000000000 --- a/templates/csv-agent/ingest.py +++ /dev/null @@ -1,12 +0,0 @@ -from langchain.indexes import VectorstoreIndexCreator -from langchain_community.document_loaders import CSVLoader -from langchain_community.vectorstores import FAISS - -loader = CSVLoader("/Users/harrisonchase/Downloads/titanic.csv") - -docs = loader.load() -index_creator = VectorstoreIndexCreator(vectorstore_cls=FAISS) - -index = index_creator.from_documents(docs) - -index.vectorstore.save_local("titanic_data") diff --git a/templates/csv-agent/main.py b/templates/csv-agent/main.py deleted file mode 100644 index 8814b924787..00000000000 --- a/templates/csv-agent/main.py +++ /dev/null @@ -1,5 +0,0 @@ -from csv_agent.agent import agent_executor - -if __name__ == "__main__": - question = "who was in cabin c28?" - print(agent_executor.invoke({"input": question})) diff --git a/templates/csv-agent/pyproject.toml b/templates/csv-agent/pyproject.toml deleted file mode 100644 index a8401460385..00000000000 --- a/templates/csv-agent/pyproject.toml +++ /dev/null @@ -1,37 +0,0 @@ -[tool.poetry] -name = "csv-agent" -version = "0.0.1" -description = "Analyze csv data with Pandas and OpenAI" -authors = [] -readme = "README.md" - -[tool.poetry.dependencies] -python = ">=3.9,<3.13" -langchain = "^0.1" -openai = "<2" -tiktoken = "^0.5.1" -faiss-cpu = "^1.7.4" -pandas = "^2.1.1" -setuptools = "^68.2.2" -tabulate = "^0.9.0" -pydantic = "<2" -langchain-experimental = ">=0.0.54" - -[tool.poetry.group.dev.dependencies] -langchain-cli = ">=0.0.21" - -[tool.langserve] -export_module = "csv_agent" -export_attr = "agent_executor" - -[tool.templates-hub] -use-case = "question-answering" -author = "LangChain" -integrations = ["OpenAI", "Pandas"] -tags = ["data", "agents"] - -[build-system] -requires = [ - "poetry-core", -] -build-backend = "poetry.core.masonry.api" diff --git a/templates/csv-agent/tests/__init__.py b/templates/csv-agent/tests/__init__.py deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/templates/csv-agent/titanic.csv b/templates/csv-agent/titanic.csv deleted file mode 100644 index 5cc466e97cf..00000000000 --- a/templates/csv-agent/titanic.csv +++ /dev/null @@ -1,892 +0,0 @@ -PassengerId,Survived,Pclass,Name,Sex,Age,SibSp,Parch,Ticket,Fare,Cabin,Embarked -1,0,3,"Braund, Mr. Owen Harris",male,22,1,0,A/5 21171,7.25,,S -2,1,1,"Cumings, Mrs. John Bradley (Florence Briggs Thayer)",female,38,1,0,PC 17599,71.2833,C85,C -3,1,3,"Heikkinen, Miss. Laina",female,26,0,0,STON/O2. 3101282,7.925,,S -4,1,1,"Futrelle, Mrs. Jacques Heath (Lily May Peel)",female,35,1,0,113803,53.1,C123,S -5,0,3,"Allen, Mr. William Henry",male,35,0,0,373450,8.05,,S -6,0,3,"Moran, Mr. James",male,,0,0,330877,8.4583,,Q -7,0,1,"McCarthy, Mr. Timothy J",male,54,0,0,17463,51.8625,E46,S -8,0,3,"Palsson, Master. Gosta Leonard",male,2,3,1,349909,21.075,,S -9,1,3,"Johnson, Mrs. Oscar W (Elisabeth Vilhelmina Berg)",female,27,0,2,347742,11.1333,,S -10,1,2,"Nasser, Mrs. Nicholas (Adele Achem)",female,14,1,0,237736,30.0708,,C -11,1,3,"Sandstrom, Miss. Marguerite Rut",female,4,1,1,PP 9549,16.7,G6,S -12,1,1,"Bonnell, Miss. Elizabeth",female,58,0,0,113783,26.55,C103,S -13,0,3,"Saundercock, Mr. William Henry",male,20,0,0,A/5. 2151,8.05,,S -14,0,3,"Andersson, Mr. Anders Johan",male,39,1,5,347082,31.275,,S -15,0,3,"Vestrom, Miss. Hulda Amanda Adolfina",female,14,0,0,350406,7.8542,,S -16,1,2,"Hewlett, Mrs. (Mary D Kingcome) ",female,55,0,0,248706,16,,S -17,0,3,"Rice, Master. Eugene",male,2,4,1,382652,29.125,,Q -18,1,2,"Williams, Mr. Charles Eugene",male,,0,0,244373,13,,S -19,0,3,"Vander Planke, Mrs. Julius (Emelia Maria Vandemoortele)",female,31,1,0,345763,18,,S -20,1,3,"Masselmani, Mrs. Fatima",female,,0,0,2649,7.225,,C -21,0,2,"Fynney, Mr. Joseph J",male,35,0,0,239865,26,,S -22,1,2,"Beesley, Mr. Lawrence",male,34,0,0,248698,13,D56,S -23,1,3,"McGowan, Miss. Anna ""Annie""",female,15,0,0,330923,8.0292,,Q -24,1,1,"Sloper, Mr. William Thompson",male,28,0,0,113788,35.5,A6,S -25,0,3,"Palsson, Miss. Torborg Danira",female,8,3,1,349909,21.075,,S -26,1,3,"Asplund, Mrs. Carl Oscar (Selma Augusta Emilia Johansson)",female,38,1,5,347077,31.3875,,S -27,0,3,"Emir, Mr. Farred Chehab",male,,0,0,2631,7.225,,C -28,0,1,"Fortune, Mr. Charles Alexander",male,19,3,2,19950,263,C23 C25 C27,S -29,1,3,"O'Dwyer, Miss. Ellen ""Nellie""",female,,0,0,330959,7.8792,,Q -30,0,3,"Todoroff, Mr. Lalio",male,,0,0,349216,7.8958,,S -31,0,1,"Uruchurtu, Don. Manuel E",male,40,0,0,PC 17601,27.7208,,C -32,1,1,"Spencer, Mrs. William Augustus (Marie Eugenie)",female,,1,0,PC 17569,146.5208,B78,C -33,1,3,"Glynn, Miss. Mary Agatha",female,,0,0,335677,7.75,,Q -34,0,2,"Wheadon, Mr. Edward H",male,66,0,0,C.A. 24579,10.5,,S -35,0,1,"Meyer, Mr. Edgar Joseph",male,28,1,0,PC 17604,82.1708,,C -36,0,1,"Holverson, Mr. Alexander Oskar",male,42,1,0,113789,52,,S -37,1,3,"Mamee, Mr. Hanna",male,,0,0,2677,7.2292,,C -38,0,3,"Cann, Mr. Ernest Charles",male,21,0,0,A./5. 2152,8.05,,S -39,0,3,"Vander Planke, Miss. Augusta Maria",female,18,2,0,345764,18,,S -40,1,3,"Nicola-Yarred, Miss. Jamila",female,14,1,0,2651,11.2417,,C -41,0,3,"Ahlin, Mrs. Johan (Johanna Persdotter Larsson)",female,40,1,0,7546,9.475,,S -42,0,2,"Turpin, Mrs. William John Robert (Dorothy Ann Wonnacott)",female,27,1,0,11668,21,,S -43,0,3,"Kraeff, Mr. Theodor",male,,0,0,349253,7.8958,,C -44,1,2,"Laroche, Miss. Simonne Marie Anne Andree",female,3,1,2,SC/Paris 2123,41.5792,,C -45,1,3,"Devaney, Miss. Margaret Delia",female,19,0,0,330958,7.8792,,Q -46,0,3,"Rogers, Mr. William John",male,,0,0,S.C./A.4. 23567,8.05,,S -47,0,3,"Lennon, Mr. Denis",male,,1,0,370371,15.5,,Q -48,1,3,"O'Driscoll, Miss. Bridget",female,,0,0,14311,7.75,,Q -49,0,3,"Samaan, Mr. Youssef",male,,2,0,2662,21.6792,,C -50,0,3,"Arnold-Franchi, Mrs. Josef (Josefine Franchi)",female,18,1,0,349237,17.8,,S -51,0,3,"Panula, Master. Juha Niilo",male,7,4,1,3101295,39.6875,,S -52,0,3,"Nosworthy, Mr. Richard Cater",male,21,0,0,A/4. 39886,7.8,,S -53,1,1,"Harper, Mrs. Henry Sleeper (Myna Haxtun)",female,49,1,0,PC 17572,76.7292,D33,C -54,1,2,"Faunthorpe, Mrs. Lizzie (Elizabeth Anne Wilkinson)",female,29,1,0,2926,26,,S -55,0,1,"Ostby, Mr. Engelhart Cornelius",male,65,0,1,113509,61.9792,B30,C -56,1,1,"Woolner, Mr. Hugh",male,,0,0,19947,35.5,C52,S -57,1,2,"Rugg, Miss. Emily",female,21,0,0,C.A. 31026,10.5,,S -58,0,3,"Novel, Mr. Mansouer",male,28.5,0,0,2697,7.2292,,C -59,1,2,"West, Miss. Constance Mirium",female,5,1,2,C.A. 34651,27.75,,S -60,0,3,"Goodwin, Master. William Frederick",male,11,5,2,CA 2144,46.9,,S -61,0,3,"Sirayanian, Mr. Orsen",male,22,0,0,2669,7.2292,,C -62,1,1,"Icard, Miss. Amelie",female,38,0,0,113572,80,B28, -63,0,1,"Harris, Mr. Henry Birkhardt",male,45,1,0,36973,83.475,C83,S -64,0,3,"Skoog, Master. Harald",male,4,3,2,347088,27.9,,S -65,0,1,"Stewart, Mr. Albert A",male,,0,0,PC 17605,27.7208,,C -66,1,3,"Moubarek, Master. Gerios",male,,1,1,2661,15.2458,,C -67,1,2,"Nye, Mrs. (Elizabeth Ramell)",female,29,0,0,C.A. 29395,10.5,F33,S -68,0,3,"Crease, Mr. Ernest James",male,19,0,0,S.P. 3464,8.1583,,S -69,1,3,"Andersson, Miss. Erna Alexandra",female,17,4,2,3101281,7.925,,S -70,0,3,"Kink, Mr. Vincenz",male,26,2,0,315151,8.6625,,S -71,0,2,"Jenkin, Mr. Stephen Curnow",male,32,0,0,C.A. 33111,10.5,,S -72,0,3,"Goodwin, Miss. Lillian Amy",female,16,5,2,CA 2144,46.9,,S -73,0,2,"Hood, Mr. Ambrose Jr",male,21,0,0,S.O.C. 14879,73.5,,S -74,0,3,"Chronopoulos, Mr. Apostolos",male,26,1,0,2680,14.4542,,C -75,1,3,"Bing, Mr. Lee",male,32,0,0,1601,56.4958,,S -76,0,3,"Moen, Mr. Sigurd Hansen",male,25,0,0,348123,7.65,F G73,S -77,0,3,"Staneff, Mr. Ivan",male,,0,0,349208,7.8958,,S -78,0,3,"Moutal, Mr. Rahamin Haim",male,,0,0,374746,8.05,,S -79,1,2,"Caldwell, Master. Alden Gates",male,0.83,0,2,248738,29,,S -80,1,3,"Dowdell, Miss. Elizabeth",female,30,0,0,364516,12.475,,S -81,0,3,"Waelens, Mr. Achille",male,22,0,0,345767,9,,S -82,1,3,"Sheerlinck, Mr. Jan Baptist",male,29,0,0,345779,9.5,,S -83,1,3,"McDermott, Miss. Brigdet Delia",female,,0,0,330932,7.7875,,Q -84,0,1,"Carrau, Mr. Francisco M",male,28,0,0,113059,47.1,,S -85,1,2,"Ilett, Miss. Bertha",female,17,0,0,SO/C 14885,10.5,,S -86,1,3,"Backstrom, Mrs. Karl Alfred (Maria Mathilda Gustafsson)",female,33,3,0,3101278,15.85,,S -87,0,3,"Ford, Mr. William Neal",male,16,1,3,W./C. 6608,34.375,,S -88,0,3,"Slocovski, Mr. Selman Francis",male,,0,0,SOTON/OQ 392086,8.05,,S -89,1,1,"Fortune, Miss. Mabel Helen",female,23,3,2,19950,263,C23 C25 C27,S -90,0,3,"Celotti, Mr. Francesco",male,24,0,0,343275,8.05,,S -91,0,3,"Christmann, Mr. Emil",male,29,0,0,343276,8.05,,S -92,0,3,"Andreasson, Mr. Paul Edvin",male,20,0,0,347466,7.8542,,S -93,0,1,"Chaffee, Mr. Herbert Fuller",male,46,1,0,W.E.P. 5734,61.175,E31,S -94,0,3,"Dean, Mr. Bertram Frank",male,26,1,2,C.A. 2315,20.575,,S -95,0,3,"Coxon, Mr. Daniel",male,59,0,0,364500,7.25,,S -96,0,3,"Shorney, Mr. Charles Joseph",male,,0,0,374910,8.05,,S -97,0,1,"Goldschmidt, Mr. George B",male,71,0,0,PC 17754,34.6542,A5,C -98,1,1,"Greenfield, Mr. William Bertram",male,23,0,1,PC 17759,63.3583,D10 D12,C -99,1,2,"Doling, Mrs. John T (Ada Julia Bone)",female,34,0,1,231919,23,,S -100,0,2,"Kantor, Mr. Sinai",male,34,1,0,244367,26,,S -101,0,3,"Petranec, Miss. Matilda",female,28,0,0,349245,7.8958,,S -102,0,3,"Petroff, Mr. Pastcho (""Pentcho"")",male,,0,0,349215,7.8958,,S -103,0,1,"White, Mr. Richard Frasar",male,21,0,1,35281,77.2875,D26,S -104,0,3,"Johansson, Mr. Gustaf Joel",male,33,0,0,7540,8.6542,,S -105,0,3,"Gustafsson, Mr. Anders Vilhelm",male,37,2,0,3101276,7.925,,S -106,0,3,"Mionoff, Mr. Stoytcho",male,28,0,0,349207,7.8958,,S -107,1,3,"Salkjelsvik, Miss. Anna Kristine",female,21,0,0,343120,7.65,,S -108,1,3,"Moss, Mr. Albert Johan",male,,0,0,312991,7.775,,S -109,0,3,"Rekic, Mr. Tido",male,38,0,0,349249,7.8958,,S -110,1,3,"Moran, Miss. Bertha",female,,1,0,371110,24.15,,Q -111,0,1,"Porter, Mr. Walter Chamberlain",male,47,0,0,110465,52,C110,S -112,0,3,"Zabour, Miss. Hileni",female,14.5,1,0,2665,14.4542,,C -113,0,3,"Barton, Mr. David John",male,22,0,0,324669,8.05,,S -114,0,3,"Jussila, Miss. Katriina",female,20,1,0,4136,9.825,,S -115,0,3,"Attalah, Miss. Malake",female,17,0,0,2627,14.4583,,C -116,0,3,"Pekoniemi, Mr. Edvard",male,21,0,0,STON/O 2. 3101294,7.925,,S -117,0,3,"Connors, Mr. Patrick",male,70.5,0,0,370369,7.75,,Q -118,0,2,"Turpin, Mr. William John Robert",male,29,1,0,11668,21,,S -119,0,1,"Baxter, Mr. Quigg Edmond",male,24,0,1,PC 17558,247.5208,B58 B60,C -120,0,3,"Andersson, Miss. Ellis Anna Maria",female,2,4,2,347082,31.275,,S -121,0,2,"Hickman, Mr. Stanley George",male,21,2,0,S.O.C. 14879,73.5,,S -122,0,3,"Moore, Mr. Leonard Charles",male,,0,0,A4. 54510,8.05,,S -123,0,2,"Nasser, Mr. Nicholas",male,32.5,1,0,237736,30.0708,,C -124,1,2,"Webber, Miss. Susan",female,32.5,0,0,27267,13,E101,S -125,0,1,"White, Mr. Percival Wayland",male,54,0,1,35281,77.2875,D26,S -126,1,3,"Nicola-Yarred, Master. Elias",male,12,1,0,2651,11.2417,,C -127,0,3,"McMahon, Mr. Martin",male,,0,0,370372,7.75,,Q -128,1,3,"Madsen, Mr. Fridtjof Arne",male,24,0,0,C 17369,7.1417,,S -129,1,3,"Peter, Miss. Anna",female,,1,1,2668,22.3583,F E69,C -130,0,3,"Ekstrom, Mr. Johan",male,45,0,0,347061,6.975,,S -131,0,3,"Drazenoic, Mr. Jozef",male,33,0,0,349241,7.8958,,C -132,0,3,"Coelho, Mr. Domingos Fernandeo",male,20,0,0,SOTON/O.Q. 3101307,7.05,,S -133,0,3,"Robins, Mrs. Alexander A (Grace Charity Laury)",female,47,1,0,A/5. 3337,14.5,,S -134,1,2,"Weisz, Mrs. Leopold (Mathilde Francoise Pede)",female,29,1,0,228414,26,,S -135,0,2,"Sobey, Mr. Samuel James Hayden",male,25,0,0,C.A. 29178,13,,S -136,0,2,"Richard, Mr. Emile",male,23,0,0,SC/PARIS 2133,15.0458,,C -137,1,1,"Newsom, Miss. Helen Monypeny",female,19,0,2,11752,26.2833,D47,S -138,0,1,"Futrelle, Mr. Jacques Heath",male,37,1,0,113803,53.1,C123,S -139,0,3,"Osen, Mr. Olaf Elon",male,16,0,0,7534,9.2167,,S -140,0,1,"Giglio, Mr. Victor",male,24,0,0,PC 17593,79.2,B86,C -141,0,3,"Boulos, Mrs. Joseph (Sultana)",female,,0,2,2678,15.2458,,C -142,1,3,"Nysten, Miss. Anna Sofia",female,22,0,0,347081,7.75,,S -143,1,3,"Hakkarainen, Mrs. Pekka Pietari (Elin Matilda Dolck)",female,24,1,0,STON/O2. 3101279,15.85,,S -144,0,3,"Burke, Mr. Jeremiah",male,19,0,0,365222,6.75,,Q -145,0,2,"Andrew, Mr. Edgardo Samuel",male,18,0,0,231945,11.5,,S -146,0,2,"Nicholls, Mr. Joseph Charles",male,19,1,1,C.A. 33112,36.75,,S -147,1,3,"Andersson, Mr. August Edvard (""Wennerstrom"")",male,27,0,0,350043,7.7958,,S -148,0,3,"Ford, Miss. Robina Maggie ""Ruby""",female,9,2,2,W./C. 6608,34.375,,S -149,0,2,"Navratil, Mr. Michel (""Louis M Hoffman"")",male,36.5,0,2,230080,26,F2,S -150,0,2,"Byles, Rev. Thomas Roussel Davids",male,42,0,0,244310,13,,S -151,0,2,"Bateman, Rev. Robert James",male,51,0,0,S.O.P. 1166,12.525,,S -152,1,1,"Pears, Mrs. Thomas (Edith Wearne)",female,22,1,0,113776,66.6,C2,S -153,0,3,"Meo, Mr. Alfonzo",male,55.5,0,0,A.5. 11206,8.05,,S -154,0,3,"van Billiard, Mr. Austin Blyler",male,40.5,0,2,A/5. 851,14.5,,S -155,0,3,"Olsen, Mr. Ole Martin",male,,0,0,Fa 265302,7.3125,,S -156,0,1,"Williams, Mr. Charles Duane",male,51,0,1,PC 17597,61.3792,,C -157,1,3,"Gilnagh, Miss. Katherine ""Katie""",female,16,0,0,35851,7.7333,,Q -158,0,3,"Corn, Mr. Harry",male,30,0,0,SOTON/OQ 392090,8.05,,S -159,0,3,"Smiljanic, Mr. Mile",male,,0,0,315037,8.6625,,S -160,0,3,"Sage, Master. Thomas Henry",male,,8,2,CA. 2343,69.55,,S -161,0,3,"Cribb, Mr. John Hatfield",male,44,0,1,371362,16.1,,S -162,1,2,"Watt, Mrs. James (Elizabeth ""Bessie"" Inglis Milne)",female,40,0,0,C.A. 33595,15.75,,S -163,0,3,"Bengtsson, Mr. John Viktor",male,26,0,0,347068,7.775,,S -164,0,3,"Calic, Mr. Jovo",male,17,0,0,315093,8.6625,,S -165,0,3,"Panula, Master. Eino Viljami",male,1,4,1,3101295,39.6875,,S -166,1,3,"Goldsmith, Master. Frank John William ""Frankie""",male,9,0,2,363291,20.525,,S -167,1,1,"Chibnall, Mrs. (Edith Martha Bowerman)",female,,0,1,113505,55,E33,S -168,0,3,"Skoog, Mrs. William (Anna Bernhardina Karlsson)",female,45,1,4,347088,27.9,,S -169,0,1,"Baumann, Mr. John D",male,,0,0,PC 17318,25.925,,S -170,0,3,"Ling, Mr. Lee",male,28,0,0,1601,56.4958,,S -171,0,1,"Van der hoef, Mr. Wyckoff",male,61,0,0,111240,33.5,B19,S -172,0,3,"Rice, Master. Arthur",male,4,4,1,382652,29.125,,Q -173,1,3,"Johnson, Miss. Eleanor Ileen",female,1,1,1,347742,11.1333,,S -174,0,3,"Sivola, Mr. Antti Wilhelm",male,21,0,0,STON/O 2. 3101280,7.925,,S -175,0,1,"Smith, Mr. James Clinch",male,56,0,0,17764,30.6958,A7,C -176,0,3,"Klasen, Mr. Klas Albin",male,18,1,1,350404,7.8542,,S -177,0,3,"Lefebre, Master. Henry Forbes",male,,3,1,4133,25.4667,,S -178,0,1,"Isham, Miss. Ann Elizabeth",female,50,0,0,PC 17595,28.7125,C49,C -179,0,2,"Hale, Mr. Reginald",male,30,0,0,250653,13,,S -180,0,3,"Leonard, Mr. Lionel",male,36,0,0,LINE,0,,S -181,0,3,"Sage, Miss. Constance Gladys",female,,8,2,CA. 2343,69.55,,S -182,0,2,"Pernot, Mr. Rene",male,,0,0,SC/PARIS 2131,15.05,,C -183,0,3,"Asplund, Master. Clarence Gustaf Hugo",male,9,4,2,347077,31.3875,,S -184,1,2,"Becker, Master. Richard F",male,1,2,1,230136,39,F4,S -185,1,3,"Kink-Heilmann, Miss. Luise Gretchen",female,4,0,2,315153,22.025,,S -186,0,1,"Rood, Mr. Hugh Roscoe",male,,0,0,113767,50,A32,S -187,1,3,"O'Brien, Mrs. Thomas (Johanna ""Hannah"" Godfrey)",female,,1,0,370365,15.5,,Q -188,1,1,"Romaine, Mr. Charles Hallace (""Mr C Rolmane"")",male,45,0,0,111428,26.55,,S -189,0,3,"Bourke, Mr. John",male,40,1,1,364849,15.5,,Q -190,0,3,"Turcin, Mr. Stjepan",male,36,0,0,349247,7.8958,,S -191,1,2,"Pinsky, Mrs. (Rosa)",female,32,0,0,234604,13,,S -192,0,2,"Carbines, Mr. William",male,19,0,0,28424,13,,S -193,1,3,"Andersen-Jensen, Miss. Carla Christine Nielsine",female,19,1,0,350046,7.8542,,S -194,1,2,"Navratil, Master. Michel M",male,3,1,1,230080,26,F2,S -195,1,1,"Brown, Mrs. James Joseph (Margaret Tobin)",female,44,0,0,PC 17610,27.7208,B4,C -196,1,1,"Lurette, Miss. Elise",female,58,0,0,PC 17569,146.5208,B80,C -197,0,3,"Mernagh, Mr. Robert",male,,0,0,368703,7.75,,Q -198,0,3,"Olsen, Mr. Karl Siegwart Andreas",male,42,0,1,4579,8.4042,,S -199,1,3,"Madigan, Miss. Margaret ""Maggie""",female,,0,0,370370,7.75,,Q -200,0,2,"Yrois, Miss. Henriette (""Mrs Harbeck"")",female,24,0,0,248747,13,,S -201,0,3,"Vande Walle, Mr. Nestor Cyriel",male,28,0,0,345770,9.5,,S -202,0,3,"Sage, Mr. Frederick",male,,8,2,CA. 2343,69.55,,S -203,0,3,"Johanson, Mr. Jakob Alfred",male,34,0,0,3101264,6.4958,,S -204,0,3,"Youseff, Mr. Gerious",male,45.5,0,0,2628,7.225,,C -205,1,3,"Cohen, Mr. Gurshon ""Gus""",male,18,0,0,A/5 3540,8.05,,S -206,0,3,"Strom, Miss. Telma Matilda",female,2,0,1,347054,10.4625,G6,S -207,0,3,"Backstrom, Mr. Karl Alfred",male,32,1,0,3101278,15.85,,S -208,1,3,"Albimona, Mr. Nassef Cassem",male,26,0,0,2699,18.7875,,C -209,1,3,"Carr, Miss. Helen ""Ellen""",female,16,0,0,367231,7.75,,Q -210,1,1,"Blank, Mr. Henry",male,40,0,0,112277,31,A31,C -211,0,3,"Ali, Mr. Ahmed",male,24,0,0,SOTON/O.Q. 3101311,7.05,,S -212,1,2,"Cameron, Miss. Clear Annie",female,35,0,0,F.C.C. 13528,21,,S -213,0,3,"Perkin, Mr. John Henry",male,22,0,0,A/5 21174,7.25,,S -214,0,2,"Givard, Mr. Hans Kristensen",male,30,0,0,250646,13,,S -215,0,3,"Kiernan, Mr. Philip",male,,1,0,367229,7.75,,Q -216,1,1,"Newell, Miss. Madeleine",female,31,1,0,35273,113.275,D36,C -217,1,3,"Honkanen, Miss. Eliina",female,27,0,0,STON/O2. 3101283,7.925,,S -218,0,2,"Jacobsohn, Mr. Sidney Samuel",male,42,1,0,243847,27,,S -219,1,1,"Bazzani, Miss. Albina",female,32,0,0,11813,76.2917,D15,C -220,0,2,"Harris, Mr. Walter",male,30,0,0,W/C 14208,10.5,,S -221,1,3,"Sunderland, Mr. Victor Francis",male,16,0,0,SOTON/OQ 392089,8.05,,S -222,0,2,"Bracken, Mr. James H",male,27,0,0,220367,13,,S -223,0,3,"Green, Mr. George Henry",male,51,0,0,21440,8.05,,S -224,0,3,"Nenkoff, Mr. Christo",male,,0,0,349234,7.8958,,S -225,1,1,"Hoyt, Mr. Frederick Maxfield",male,38,1,0,19943,90,C93,S -226,0,3,"Berglund, Mr. Karl Ivar Sven",male,22,0,0,PP 4348,9.35,,S -227,1,2,"Mellors, Mr. William John",male,19,0,0,SW/PP 751,10.5,,S -228,0,3,"Lovell, Mr. John Hall (""Henry"")",male,20.5,0,0,A/5 21173,7.25,,S -229,0,2,"Fahlstrom, Mr. Arne Jonas",male,18,0,0,236171,13,,S -230,0,3,"Lefebre, Miss. Mathilde",female,,3,1,4133,25.4667,,S -231,1,1,"Harris, Mrs. Henry Birkhardt (Irene Wallach)",female,35,1,0,36973,83.475,C83,S -232,0,3,"Larsson, Mr. Bengt Edvin",male,29,0,0,347067,7.775,,S -233,0,2,"Sjostedt, Mr. Ernst Adolf",male,59,0,0,237442,13.5,,S -234,1,3,"Asplund, Miss. Lillian Gertrud",female,5,4,2,347077,31.3875,,S -235,0,2,"Leyson, Mr. Robert William Norman",male,24,0,0,C.A. 29566,10.5,,S -236,0,3,"Harknett, Miss. Alice Phoebe",female,,0,0,W./C. 6609,7.55,,S -237,0,2,"Hold, Mr. Stephen",male,44,1,0,26707,26,,S -238,1,2,"Collyer, Miss. Marjorie ""Lottie""",female,8,0,2,C.A. 31921,26.25,,S -239,0,2,"Pengelly, Mr. Frederick William",male,19,0,0,28665,10.5,,S -240,0,2,"Hunt, Mr. George Henry",male,33,0,0,SCO/W 1585,12.275,,S -241,0,3,"Zabour, Miss. Thamine",female,,1,0,2665,14.4542,,C -242,1,3,"Murphy, Miss. Katherine ""Kate""",female,,1,0,367230,15.5,,Q -243,0,2,"Coleridge, Mr. Reginald Charles",male,29,0,0,W./C. 14263,10.5,,S -244,0,3,"Maenpaa, Mr. Matti Alexanteri",male,22,0,0,STON/O 2. 3101275,7.125,,S -245,0,3,"Attalah, Mr. Sleiman",male,30,0,0,2694,7.225,,C -246,0,1,"Minahan, Dr. William Edward",male,44,2,0,19928,90,C78,Q -247,0,3,"Lindahl, Miss. Agda Thorilda Viktoria",female,25,0,0,347071,7.775,,S -248,1,2,"Hamalainen, Mrs. William (Anna)",female,24,0,2,250649,14.5,,S -249,1,1,"Beckwith, Mr. Richard Leonard",male,37,1,1,11751,52.5542,D35,S -250,0,2,"Carter, Rev. Ernest Courtenay",male,54,1,0,244252,26,,S -251,0,3,"Reed, Mr. James George",male,,0,0,362316,7.25,,S -252,0,3,"Strom, Mrs. Wilhelm (Elna Matilda Persson)",female,29,1,1,347054,10.4625,G6,S -253,0,1,"Stead, Mr. William Thomas",male,62,0,0,113514,26.55,C87,S -254,0,3,"Lobb, Mr. William Arthur",male,30,1,0,A/5. 3336,16.1,,S -255,0,3,"Rosblom, Mrs. Viktor (Helena Wilhelmina)",female,41,0,2,370129,20.2125,,S -256,1,3,"Touma, Mrs. Darwis (Hanne Youssef Razi)",female,29,0,2,2650,15.2458,,C -257,1,1,"Thorne, Mrs. Gertrude Maybelle",female,,0,0,PC 17585,79.2,,C -258,1,1,"Cherry, Miss. Gladys",female,30,0,0,110152,86.5,B77,S -259,1,1,"Ward, Miss. Anna",female,35,0,0,PC 17755,512.3292,,C -260,1,2,"Parrish, Mrs. (Lutie Davis)",female,50,0,1,230433,26,,S -261,0,3,"Smith, Mr. Thomas",male,,0,0,384461,7.75,,Q -262,1,3,"Asplund, Master. Edvin Rojj Felix",male,3,4,2,347077,31.3875,,S -263,0,1,"Taussig, Mr. Emil",male,52,1,1,110413,79.65,E67,S -264,0,1,"Harrison, Mr. William",male,40,0,0,112059,0,B94,S -265,0,3,"Henry, Miss. Delia",female,,0,0,382649,7.75,,Q -266,0,2,"Reeves, Mr. David",male,36,0,0,C.A. 17248,10.5,,S -267,0,3,"Panula, Mr. Ernesti Arvid",male,16,4,1,3101295,39.6875,,S -268,1,3,"Persson, Mr. Ernst Ulrik",male,25,1,0,347083,7.775,,S -269,1,1,"Graham, Mrs. William Thompson (Edith Junkins)",female,58,0,1,PC 17582,153.4625,C125,S -270,1,1,"Bissette, Miss. Amelia",female,35,0,0,PC 17760,135.6333,C99,S -271,0,1,"Cairns, Mr. Alexander",male,,0,0,113798,31,,S -272,1,3,"Tornquist, Mr. William Henry",male,25,0,0,LINE,0,,S -273,1,2,"Mellinger, Mrs. (Elizabeth Anne Maidment)",female,41,0,1,250644,19.5,,S -274,0,1,"Natsch, Mr. Charles H",male,37,0,1,PC 17596,29.7,C118,C -275,1,3,"Healy, Miss. Hanora ""Nora""",female,,0,0,370375,7.75,,Q -276,1,1,"Andrews, Miss. Kornelia Theodosia",female,63,1,0,13502,77.9583,D7,S -277,0,3,"Lindblom, Miss. Augusta Charlotta",female,45,0,0,347073,7.75,,S -278,0,2,"Parkes, Mr. Francis ""Frank""",male,,0,0,239853,0,,S -279,0,3,"Rice, Master. Eric",male,7,4,1,382652,29.125,,Q -280,1,3,"Abbott, Mrs. Stanton (Rosa Hunt)",female,35,1,1,C.A. 2673,20.25,,S -281,0,3,"Duane, Mr. Frank",male,65,0,0,336439,7.75,,Q -282,0,3,"Olsson, Mr. Nils Johan Goransson",male,28,0,0,347464,7.8542,,S -283,0,3,"de Pelsmaeker, Mr. Alfons",male,16,0,0,345778,9.5,,S -284,1,3,"Dorking, Mr. Edward Arthur",male,19,0,0,A/5. 10482,8.05,,S -285,0,1,"Smith, Mr. Richard William",male,,0,0,113056,26,A19,S -286,0,3,"Stankovic, Mr. Ivan",male,33,0,0,349239,8.6625,,C -287,1,3,"de Mulder, Mr. Theodore",male,30,0,0,345774,9.5,,S -288,0,3,"Naidenoff, Mr. Penko",male,22,0,0,349206,7.8958,,S -289,1,2,"Hosono, Mr. Masabumi",male,42,0,0,237798,13,,S -290,1,3,"Connolly, Miss. Kate",female,22,0,0,370373,7.75,,Q -291,1,1,"Barber, Miss. Ellen ""Nellie""",female,26,0,0,19877,78.85,,S -292,1,1,"Bishop, Mrs. Dickinson H (Helen Walton)",female,19,1,0,11967,91.0792,B49,C -293,0,2,"Levy, Mr. Rene Jacques",male,36,0,0,SC/Paris 2163,12.875,D,C -294,0,3,"Haas, Miss. Aloisia",female,24,0,0,349236,8.85,,S -295,0,3,"Mineff, Mr. Ivan",male,24,0,0,349233,7.8958,,S -296,0,1,"Lewy, Mr. Ervin G",male,,0,0,PC 17612,27.7208,,C -297,0,3,"Hanna, Mr. Mansour",male,23.5,0,0,2693,7.2292,,C -298,0,1,"Allison, Miss. Helen Loraine",female,2,1,2,113781,151.55,C22 C26,S -299,1,1,"Saalfeld, Mr. Adolphe",male,,0,0,19988,30.5,C106,S -300,1,1,"Baxter, Mrs. James (Helene DeLaudeniere Chaput)",female,50,0,1,PC 17558,247.5208,B58 B60,C -301,1,3,"Kelly, Miss. Anna Katherine ""Annie Kate""",female,,0,0,9234,7.75,,Q -302,1,3,"McCoy, Mr. Bernard",male,,2,0,367226,23.25,,Q -303,0,3,"Johnson, Mr. William Cahoone Jr",male,19,0,0,LINE,0,,S -304,1,2,"Keane, Miss. Nora A",female,,0,0,226593,12.35,E101,Q -305,0,3,"Williams, Mr. Howard Hugh ""Harry""",male,,0,0,A/5 2466,8.05,,S -306,1,1,"Allison, Master. Hudson Trevor",male,0.92,1,2,113781,151.55,C22 C26,S -307,1,1,"Fleming, Miss. Margaret",female,,0,0,17421,110.8833,,C -308,1,1,"Penasco y Castellana, Mrs. Victor de Satode (Maria Josefa Perez de Soto y Vallejo)",female,17,1,0,PC 17758,108.9,C65,C -309,0,2,"Abelson, Mr. Samuel",male,30,1,0,P/PP 3381,24,,C -310,1,1,"Francatelli, Miss. Laura Mabel",female,30,0,0,PC 17485,56.9292,E36,C -311,1,1,"Hays, Miss. Margaret Bechstein",female,24,0,0,11767,83.1583,C54,C -312,1,1,"Ryerson, Miss. Emily Borie",female,18,2,2,PC 17608,262.375,B57 B59 B63 B66,C -313,0,2,"Lahtinen, Mrs. William (Anna Sylfven)",female,26,1,1,250651,26,,S -314,0,3,"Hendekovic, Mr. Ignjac",male,28,0,0,349243,7.8958,,S -315,0,2,"Hart, Mr. Benjamin",male,43,1,1,F.C.C. 13529,26.25,,S -316,1,3,"Nilsson, Miss. Helmina Josefina",female,26,0,0,347470,7.8542,,S -317,1,2,"Kantor, Mrs. Sinai (Miriam Sternin)",female,24,1,0,244367,26,,S -318,0,2,"Moraweck, Dr. Ernest",male,54,0,0,29011,14,,S -319,1,1,"Wick, Miss. Mary Natalie",female,31,0,2,36928,164.8667,C7,S -320,1,1,"Spedden, Mrs. Frederic Oakley (Margaretta Corning Stone)",female,40,1,1,16966,134.5,E34,C -321,0,3,"Dennis, Mr. Samuel",male,22,0,0,A/5 21172,7.25,,S -322,0,3,"Danoff, Mr. Yoto",male,27,0,0,349219,7.8958,,S -323,1,2,"Slayter, Miss. Hilda Mary",female,30,0,0,234818,12.35,,Q -324,1,2,"Caldwell, Mrs. Albert Francis (Sylvia Mae Harbaugh)",female,22,1,1,248738,29,,S -325,0,3,"Sage, Mr. George John Jr",male,,8,2,CA. 2343,69.55,,S -326,1,1,"Young, Miss. Marie Grice",female,36,0,0,PC 17760,135.6333,C32,C -327,0,3,"Nysveen, Mr. Johan Hansen",male,61,0,0,345364,6.2375,,S -328,1,2,"Ball, Mrs. (Ada E Hall)",female,36,0,0,28551,13,D,S -329,1,3,"Goldsmith, Mrs. Frank John (Emily Alice Brown)",female,31,1,1,363291,20.525,,S -330,1,1,"Hippach, Miss. Jean Gertrude",female,16,0,1,111361,57.9792,B18,C -331,1,3,"McCoy, Miss. Agnes",female,,2,0,367226,23.25,,Q -332,0,1,"Partner, Mr. Austen",male,45.5,0,0,113043,28.5,C124,S -333,0,1,"Graham, Mr. George Edward",male,38,0,1,PC 17582,153.4625,C91,S -334,0,3,"Vander Planke, Mr. Leo Edmondus",male,16,2,0,345764,18,,S -335,1,1,"Frauenthal, Mrs. Henry William (Clara Heinsheimer)",female,,1,0,PC 17611,133.65,,S -336,0,3,"Denkoff, Mr. Mitto",male,,0,0,349225,7.8958,,S -337,0,1,"Pears, Mr. Thomas Clinton",male,29,1,0,113776,66.6,C2,S -338,1,1,"Burns, Miss. Elizabeth Margaret",female,41,0,0,16966,134.5,E40,C -339,1,3,"Dahl, Mr. Karl Edwart",male,45,0,0,7598,8.05,,S -340,0,1,"Blackwell, Mr. Stephen Weart",male,45,0,0,113784,35.5,T,S -341,1,2,"Navratil, Master. Edmond Roger",male,2,1,1,230080,26,F2,S -342,1,1,"Fortune, Miss. Alice Elizabeth",female,24,3,2,19950,263,C23 C25 C27,S -343,0,2,"Collander, Mr. Erik Gustaf",male,28,0,0,248740,13,,S -344,0,2,"Sedgwick, Mr. Charles Frederick Waddington",male,25,0,0,244361,13,,S -345,0,2,"Fox, Mr. Stanley Hubert",male,36,0,0,229236,13,,S -346,1,2,"Brown, Miss. Amelia ""Mildred""",female,24,0,0,248733,13,F33,S -347,1,2,"Smith, Miss. Marion Elsie",female,40,0,0,31418,13,,S -348,1,3,"Davison, Mrs. Thomas Henry (Mary E Finck)",female,,1,0,386525,16.1,,S -349,1,3,"Coutts, Master. William Loch ""William""",male,3,1,1,C.A. 37671,15.9,,S -350,0,3,"Dimic, Mr. Jovan",male,42,0,0,315088,8.6625,,S -351,0,3,"Odahl, Mr. Nils Martin",male,23,0,0,7267,9.225,,S -352,0,1,"Williams-Lambert, Mr. Fletcher Fellows",male,,0,0,113510,35,C128,S -353,0,3,"Elias, Mr. Tannous",male,15,1,1,2695,7.2292,,C -354,0,3,"Arnold-Franchi, Mr. Josef",male,25,1,0,349237,17.8,,S -355,0,3,"Yousif, Mr. Wazli",male,,0,0,2647,7.225,,C -356,0,3,"Vanden Steen, Mr. Leo Peter",male,28,0,0,345783,9.5,,S -357,1,1,"Bowerman, Miss. Elsie Edith",female,22,0,1,113505,55,E33,S -358,0,2,"Funk, Miss. Annie Clemmer",female,38,0,0,237671,13,,S -359,1,3,"McGovern, Miss. Mary",female,,0,0,330931,7.8792,,Q -360,1,3,"Mockler, Miss. Helen Mary ""Ellie""",female,,0,0,330980,7.8792,,Q -361,0,3,"Skoog, Mr. Wilhelm",male,40,1,4,347088,27.9,,S -362,0,2,"del Carlo, Mr. Sebastiano",male,29,1,0,SC/PARIS 2167,27.7208,,C -363,0,3,"Barbara, Mrs. (Catherine David)",female,45,0,1,2691,14.4542,,C -364,0,3,"Asim, Mr. Adola",male,35,0,0,SOTON/O.Q. 3101310,7.05,,S -365,0,3,"O'Brien, Mr. Thomas",male,,1,0,370365,15.5,,Q -366,0,3,"Adahl, Mr. Mauritz Nils Martin",male,30,0,0,C 7076,7.25,,S -367,1,1,"Warren, Mrs. Frank Manley (Anna Sophia Atkinson)",female,60,1,0,110813,75.25,D37,C -368,1,3,"Moussa, Mrs. (Mantoura Boulos)",female,,0,0,2626,7.2292,,C -369,1,3,"Jermyn, Miss. Annie",female,,0,0,14313,7.75,,Q -370,1,1,"Aubart, Mme. Leontine Pauline",female,24,0,0,PC 17477,69.3,B35,C -371,1,1,"Harder, Mr. George Achilles",male,25,1,0,11765,55.4417,E50,C -372,0,3,"Wiklund, Mr. Jakob Alfred",male,18,1,0,3101267,6.4958,,S -373,0,3,"Beavan, Mr. William Thomas",male,19,0,0,323951,8.05,,S -374,0,1,"Ringhini, Mr. Sante",male,22,0,0,PC 17760,135.6333,,C -375,0,3,"Palsson, Miss. Stina Viola",female,3,3,1,349909,21.075,,S -376,1,1,"Meyer, Mrs. Edgar Joseph (Leila Saks)",female,,1,0,PC 17604,82.1708,,C -377,1,3,"Landergren, Miss. Aurora Adelia",female,22,0,0,C 7077,7.25,,S -378,0,1,"Widener, Mr. Harry Elkins",male,27,0,2,113503,211.5,C82,C -379,0,3,"Betros, Mr. Tannous",male,20,0,0,2648,4.0125,,C -380,0,3,"Gustafsson, Mr. Karl Gideon",male,19,0,0,347069,7.775,,S -381,1,1,"Bidois, Miss. Rosalie",female,42,0,0,PC 17757,227.525,,C -382,1,3,"Nakid, Miss. Maria (""Mary"")",female,1,0,2,2653,15.7417,,C -383,0,3,"Tikkanen, Mr. Juho",male,32,0,0,STON/O 2. 3101293,7.925,,S -384,1,1,"Holverson, Mrs. Alexander Oskar (Mary Aline Towner)",female,35,1,0,113789,52,,S -385,0,3,"Plotcharsky, Mr. Vasil",male,,0,0,349227,7.8958,,S -386,0,2,"Davies, Mr. Charles Henry",male,18,0,0,S.O.C. 14879,73.5,,S -387,0,3,"Goodwin, Master. Sidney Leonard",male,1,5,2,CA 2144,46.9,,S -388,1,2,"Buss, Miss. Kate",female,36,0,0,27849,13,,S -389,0,3,"Sadlier, Mr. Matthew",male,,0,0,367655,7.7292,,Q -390,1,2,"Lehmann, Miss. Bertha",female,17,0,0,SC 1748,12,,C -391,1,1,"Carter, Mr. William Ernest",male,36,1,2,113760,120,B96 B98,S -392,1,3,"Jansson, Mr. Carl Olof",male,21,0,0,350034,7.7958,,S -393,0,3,"Gustafsson, Mr. Johan Birger",male,28,2,0,3101277,7.925,,S -394,1,1,"Newell, Miss. Marjorie",female,23,1,0,35273,113.275,D36,C -395,1,3,"Sandstrom, Mrs. Hjalmar (Agnes Charlotta Bengtsson)",female,24,0,2,PP 9549,16.7,G6,S -396,0,3,"Johansson, Mr. Erik",male,22,0,0,350052,7.7958,,S -397,0,3,"Olsson, Miss. Elina",female,31,0,0,350407,7.8542,,S -398,0,2,"McKane, Mr. Peter David",male,46,0,0,28403,26,,S -399,0,2,"Pain, Dr. Alfred",male,23,0,0,244278,10.5,,S -400,1,2,"Trout, Mrs. William H (Jessie L)",female,28,0,0,240929,12.65,,S -401,1,3,"Niskanen, Mr. Juha",male,39,0,0,STON/O 2. 3101289,7.925,,S -402,0,3,"Adams, Mr. John",male,26,0,0,341826,8.05,,S -403,0,3,"Jussila, Miss. Mari Aina",female,21,1,0,4137,9.825,,S -404,0,3,"Hakkarainen, Mr. Pekka Pietari",male,28,1,0,STON/O2. 3101279,15.85,,S -405,0,3,"Oreskovic, Miss. Marija",female,20,0,0,315096,8.6625,,S -406,0,2,"Gale, Mr. Shadrach",male,34,1,0,28664,21,,S -407,0,3,"Widegren, Mr. Carl/Charles Peter",male,51,0,0,347064,7.75,,S -408,1,2,"Richards, Master. William Rowe",male,3,1,1,29106,18.75,,S -409,0,3,"Birkeland, Mr. Hans Martin Monsen",male,21,0,0,312992,7.775,,S -410,0,3,"Lefebre, Miss. Ida",female,,3,1,4133,25.4667,,S -411,0,3,"Sdycoff, Mr. Todor",male,,0,0,349222,7.8958,,S -412,0,3,"Hart, Mr. Henry",male,,0,0,394140,6.8583,,Q -413,1,1,"Minahan, Miss. Daisy E",female,33,1,0,19928,90,C78,Q -414,0,2,"Cunningham, Mr. Alfred Fleming",male,,0,0,239853,0,,S -415,1,3,"Sundman, Mr. Johan Julian",male,44,0,0,STON/O 2. 3101269,7.925,,S -416,0,3,"Meek, Mrs. Thomas (Annie Louise Rowley)",female,,0,0,343095,8.05,,S -417,1,2,"Drew, Mrs. James Vivian (Lulu Thorne Christian)",female,34,1,1,28220,32.5,,S -418,1,2,"Silven, Miss. Lyyli Karoliina",female,18,0,2,250652,13,,S -419,0,2,"Matthews, Mr. William John",male,30,0,0,28228,13,,S -420,0,3,"Van Impe, Miss. Catharina",female,10,0,2,345773,24.15,,S -421,0,3,"Gheorgheff, Mr. Stanio",male,,0,0,349254,7.8958,,C -422,0,3,"Charters, Mr. David",male,21,0,0,A/5. 13032,7.7333,,Q -423,0,3,"Zimmerman, Mr. Leo",male,29,0,0,315082,7.875,,S -424,0,3,"Danbom, Mrs. Ernst Gilbert (Anna Sigrid Maria Brogren)",female,28,1,1,347080,14.4,,S -425,0,3,"Rosblom, Mr. Viktor Richard",male,18,1,1,370129,20.2125,,S -426,0,3,"Wiseman, Mr. Phillippe",male,,0,0,A/4. 34244,7.25,,S -427,1,2,"Clarke, Mrs. Charles V (Ada Maria Winfield)",female,28,1,0,2003,26,,S -428,1,2,"Phillips, Miss. Kate Florence (""Mrs Kate Louise Phillips Marshall"")",female,19,0,0,250655,26,,S -429,0,3,"Flynn, Mr. James",male,,0,0,364851,7.75,,Q -430,1,3,"Pickard, Mr. Berk (Berk Trembisky)",male,32,0,0,SOTON/O.Q. 392078,8.05,E10,S -431,1,1,"Bjornstrom-Steffansson, Mr. Mauritz Hakan",male,28,0,0,110564,26.55,C52,S -432,1,3,"Thorneycroft, Mrs. Percival (Florence Kate White)",female,,1,0,376564,16.1,,S -433,1,2,"Louch, Mrs. Charles Alexander (Alice Adelaide Slow)",female,42,1,0,SC/AH 3085,26,,S -434,0,3,"Kallio, Mr. Nikolai Erland",male,17,0,0,STON/O 2. 3101274,7.125,,S -435,0,1,"Silvey, Mr. William Baird",male,50,1,0,13507,55.9,E44,S -436,1,1,"Carter, Miss. Lucile Polk",female,14,1,2,113760,120,B96 B98,S -437,0,3,"Ford, Miss. Doolina Margaret ""Daisy""",female,21,2,2,W./C. 6608,34.375,,S -438,1,2,"Richards, Mrs. Sidney (Emily Hocking)",female,24,2,3,29106,18.75,,S -439,0,1,"Fortune, Mr. Mark",male,64,1,4,19950,263,C23 C25 C27,S -440,0,2,"Kvillner, Mr. Johan Henrik Johannesson",male,31,0,0,C.A. 18723,10.5,,S -441,1,2,"Hart, Mrs. Benjamin (Esther Ada Bloomfield)",female,45,1,1,F.C.C. 13529,26.25,,S -442,0,3,"Hampe, Mr. Leon",male,20,0,0,345769,9.5,,S -443,0,3,"Petterson, Mr. Johan Emil",male,25,1,0,347076,7.775,,S -444,1,2,"Reynaldo, Ms. Encarnacion",female,28,0,0,230434,13,,S -445,1,3,"Johannesen-Bratthammer, Mr. Bernt",male,,0,0,65306,8.1125,,S -446,1,1,"Dodge, Master. Washington",male,4,0,2,33638,81.8583,A34,S -447,1,2,"Mellinger, Miss. Madeleine Violet",female,13,0,1,250644,19.5,,S -448,1,1,"Seward, Mr. Frederic Kimber",male,34,0,0,113794,26.55,,S -449,1,3,"Baclini, Miss. Marie Catherine",female,5,2,1,2666,19.2583,,C -450,1,1,"Peuchen, Major. Arthur Godfrey",male,52,0,0,113786,30.5,C104,S -451,0,2,"West, Mr. Edwy Arthur",male,36,1,2,C.A. 34651,27.75,,S -452,0,3,"Hagland, Mr. Ingvald Olai Olsen",male,,1,0,65303,19.9667,,S -453,0,1,"Foreman, Mr. Benjamin Laventall",male,30,0,0,113051,27.75,C111,C -454,1,1,"Goldenberg, Mr. Samuel L",male,49,1,0,17453,89.1042,C92,C -455,0,3,"Peduzzi, Mr. Joseph",male,,0,0,A/5 2817,8.05,,S -456,1,3,"Jalsevac, Mr. Ivan",male,29,0,0,349240,7.8958,,C -457,0,1,"Millet, Mr. Francis Davis",male,65,0,0,13509,26.55,E38,S -458,1,1,"Kenyon, Mrs. Frederick R (Marion)",female,,1,0,17464,51.8625,D21,S -459,1,2,"Toomey, Miss. Ellen",female,50,0,0,F.C.C. 13531,10.5,,S -460,0,3,"O'Connor, Mr. Maurice",male,,0,0,371060,7.75,,Q -461,1,1,"Anderson, Mr. Harry",male,48,0,0,19952,26.55,E12,S -462,0,3,"Morley, Mr. William",male,34,0,0,364506,8.05,,S -463,0,1,"Gee, Mr. Arthur H",male,47,0,0,111320,38.5,E63,S -464,0,2,"Milling, Mr. Jacob Christian",male,48,0,0,234360,13,,S -465,0,3,"Maisner, Mr. Simon",male,,0,0,A/S 2816,8.05,,S -466,0,3,"Goncalves, Mr. Manuel Estanslas",male,38,0,0,SOTON/O.Q. 3101306,7.05,,S -467,0,2,"Campbell, Mr. William",male,,0,0,239853,0,,S -468,0,1,"Smart, Mr. John Montgomery",male,56,0,0,113792,26.55,,S -469,0,3,"Scanlan, Mr. James",male,,0,0,36209,7.725,,Q -470,1,3,"Baclini, Miss. Helene Barbara",female,0.75,2,1,2666,19.2583,,C -471,0,3,"Keefe, Mr. Arthur",male,,0,0,323592,7.25,,S -472,0,3,"Cacic, Mr. Luka",male,38,0,0,315089,8.6625,,S -473,1,2,"West, Mrs. Edwy Arthur (Ada Mary Worth)",female,33,1,2,C.A. 34651,27.75,,S -474,1,2,"Jerwan, Mrs. Amin S (Marie Marthe Thuillard)",female,23,0,0,SC/AH Basle 541,13.7917,D,C -475,0,3,"Strandberg, Miss. Ida Sofia",female,22,0,0,7553,9.8375,,S -476,0,1,"Clifford, Mr. George Quincy",male,,0,0,110465,52,A14,S -477,0,2,"Renouf, Mr. Peter Henry",male,34,1,0,31027,21,,S -478,0,3,"Braund, Mr. Lewis Richard",male,29,1,0,3460,7.0458,,S -479,0,3,"Karlsson, Mr. Nils August",male,22,0,0,350060,7.5208,,S -480,1,3,"Hirvonen, Miss. Hildur E",female,2,0,1,3101298,12.2875,,S -481,0,3,"Goodwin, Master. Harold Victor",male,9,5,2,CA 2144,46.9,,S -482,0,2,"Frost, Mr. Anthony Wood ""Archie""",male,,0,0,239854,0,,S -483,0,3,"Rouse, Mr. Richard Henry",male,50,0,0,A/5 3594,8.05,,S -484,1,3,"Turkula, Mrs. (Hedwig)",female,63,0,0,4134,9.5875,,S -485,1,1,"Bishop, Mr. Dickinson H",male,25,1,0,11967,91.0792,B49,C -486,0,3,"Lefebre, Miss. Jeannie",female,,3,1,4133,25.4667,,S -487,1,1,"Hoyt, Mrs. Frederick Maxfield (Jane Anne Forby)",female,35,1,0,19943,90,C93,S -488,0,1,"Kent, Mr. Edward Austin",male,58,0,0,11771,29.7,B37,C -489,0,3,"Somerton, Mr. Francis William",male,30,0,0,A.5. 18509,8.05,,S -490,1,3,"Coutts, Master. Eden Leslie ""Neville""",male,9,1,1,C.A. 37671,15.9,,S -491,0,3,"Hagland, Mr. Konrad Mathias Reiersen",male,,1,0,65304,19.9667,,S -492,0,3,"Windelov, Mr. Einar",male,21,0,0,SOTON/OQ 3101317,7.25,,S -493,0,1,"Molson, Mr. Harry Markland",male,55,0,0,113787,30.5,C30,S -494,0,1,"Artagaveytia, Mr. Ramon",male,71,0,0,PC 17609,49.5042,,C -495,0,3,"Stanley, Mr. Edward Roland",male,21,0,0,A/4 45380,8.05,,S -496,0,3,"Yousseff, Mr. Gerious",male,,0,0,2627,14.4583,,C -497,1,1,"Eustis, Miss. Elizabeth Mussey",female,54,1,0,36947,78.2667,D20,C -498,0,3,"Shellard, Mr. Frederick William",male,,0,0,C.A. 6212,15.1,,S -499,0,1,"Allison, Mrs. Hudson J C (Bessie Waldo Daniels)",female,25,1,2,113781,151.55,C22 C26,S -500,0,3,"Svensson, Mr. Olof",male,24,0,0,350035,7.7958,,S -501,0,3,"Calic, Mr. Petar",male,17,0,0,315086,8.6625,,S -502,0,3,"Canavan, Miss. Mary",female,21,0,0,364846,7.75,,Q -503,0,3,"O'Sullivan, Miss. Bridget Mary",female,,0,0,330909,7.6292,,Q -504,0,3,"Laitinen, Miss. Kristina Sofia",female,37,0,0,4135,9.5875,,S -505,1,1,"Maioni, Miss. Roberta",female,16,0,0,110152,86.5,B79,S -506,0,1,"Penasco y Castellana, Mr. Victor de Satode",male,18,1,0,PC 17758,108.9,C65,C -507,1,2,"Quick, Mrs. Frederick Charles (Jane Richards)",female,33,0,2,26360,26,,S -508,1,1,"Bradley, Mr. George (""George Arthur Brayton"")",male,,0,0,111427,26.55,,S -509,0,3,"Olsen, Mr. Henry Margido",male,28,0,0,C 4001,22.525,,S -510,1,3,"Lang, Mr. Fang",male,26,0,0,1601,56.4958,,S -511,1,3,"Daly, Mr. Eugene Patrick",male,29,0,0,382651,7.75,,Q -512,0,3,"Webber, Mr. James",male,,0,0,SOTON/OQ 3101316,8.05,,S -513,1,1,"McGough, Mr. James Robert",male,36,0,0,PC 17473,26.2875,E25,S -514,1,1,"Rothschild, Mrs. Martin (Elizabeth L. Barrett)",female,54,1,0,PC 17603,59.4,,C -515,0,3,"Coleff, Mr. Satio",male,24,0,0,349209,7.4958,,S -516,0,1,"Walker, Mr. William Anderson",male,47,0,0,36967,34.0208,D46,S -517,1,2,"Lemore, Mrs. (Amelia Milley)",female,34,0,0,C.A. 34260,10.5,F33,S -518,0,3,"Ryan, Mr. Patrick",male,,0,0,371110,24.15,,Q -519,1,2,"Angle, Mrs. William A (Florence ""Mary"" Agnes Hughes)",female,36,1,0,226875,26,,S -520,0,3,"Pavlovic, Mr. Stefo",male,32,0,0,349242,7.8958,,S -521,1,1,"Perreault, Miss. Anne",female,30,0,0,12749,93.5,B73,S -522,0,3,"Vovk, Mr. Janko",male,22,0,0,349252,7.8958,,S -523,0,3,"Lahoud, Mr. Sarkis",male,,0,0,2624,7.225,,C -524,1,1,"Hippach, Mrs. Louis Albert (Ida Sophia Fischer)",female,44,0,1,111361,57.9792,B18,C -525,0,3,"Kassem, Mr. Fared",male,,0,0,2700,7.2292,,C -526,0,3,"Farrell, Mr. James",male,40.5,0,0,367232,7.75,,Q -527,1,2,"Ridsdale, Miss. Lucy",female,50,0,0,W./C. 14258,10.5,,S -528,0,1,"Farthing, Mr. John",male,,0,0,PC 17483,221.7792,C95,S -529,0,3,"Salonen, Mr. Johan Werner",male,39,0,0,3101296,7.925,,S -530,0,2,"Hocking, Mr. Richard George",male,23,2,1,29104,11.5,,S -531,1,2,"Quick, Miss. Phyllis May",female,2,1,1,26360,26,,S -532,0,3,"Toufik, Mr. Nakli",male,,0,0,2641,7.2292,,C -533,0,3,"Elias, Mr. Joseph Jr",male,17,1,1,2690,7.2292,,C -534,1,3,"Peter, Mrs. Catherine (Catherine Rizk)",female,,0,2,2668,22.3583,,C -535,0,3,"Cacic, Miss. Marija",female,30,0,0,315084,8.6625,,S -536,1,2,"Hart, Miss. Eva Miriam",female,7,0,2,F.C.C. 13529,26.25,,S -537,0,1,"Butt, Major. Archibald Willingham",male,45,0,0,113050,26.55,B38,S -538,1,1,"LeRoy, Miss. Bertha",female,30,0,0,PC 17761,106.425,,C -539,0,3,"Risien, Mr. Samuel Beard",male,,0,0,364498,14.5,,S -540,1,1,"Frolicher, Miss. Hedwig Margaritha",female,22,0,2,13568,49.5,B39,C -541,1,1,"Crosby, Miss. Harriet R",female,36,0,2,WE/P 5735,71,B22,S -542,0,3,"Andersson, Miss. Ingeborg Constanzia",female,9,4,2,347082,31.275,,S -543,0,3,"Andersson, Miss. Sigrid Elisabeth",female,11,4,2,347082,31.275,,S -544,1,2,"Beane, Mr. Edward",male,32,1,0,2908,26,,S -545,0,1,"Douglas, Mr. Walter Donald",male,50,1,0,PC 17761,106.425,C86,C -546,0,1,"Nicholson, Mr. Arthur Ernest",male,64,0,0,693,26,,S -547,1,2,"Beane, Mrs. Edward (Ethel Clarke)",female,19,1,0,2908,26,,S -548,1,2,"Padro y Manent, Mr. Julian",male,,0,0,SC/PARIS 2146,13.8625,,C -549,0,3,"Goldsmith, Mr. Frank John",male,33,1,1,363291,20.525,,S -550,1,2,"Davies, Master. John Morgan Jr",male,8,1,1,C.A. 33112,36.75,,S -551,1,1,"Thayer, Mr. John Borland Jr",male,17,0,2,17421,110.8833,C70,C -552,0,2,"Sharp, Mr. Percival James R",male,27,0,0,244358,26,,S -553,0,3,"O'Brien, Mr. Timothy",male,,0,0,330979,7.8292,,Q -554,1,3,"Leeni, Mr. Fahim (""Philip Zenni"")",male,22,0,0,2620,7.225,,C -555,1,3,"Ohman, Miss. Velin",female,22,0,0,347085,7.775,,S -556,0,1,"Wright, Mr. George",male,62,0,0,113807,26.55,,S -557,1,1,"Duff Gordon, Lady. (Lucille Christiana Sutherland) (""Mrs Morgan"")",female,48,1,0,11755,39.6,A16,C -558,0,1,"Robbins, Mr. Victor",male,,0,0,PC 17757,227.525,,C -559,1,1,"Taussig, Mrs. Emil (Tillie Mandelbaum)",female,39,1,1,110413,79.65,E67,S -560,1,3,"de Messemaeker, Mrs. Guillaume Joseph (Emma)",female,36,1,0,345572,17.4,,S -561,0,3,"Morrow, Mr. Thomas Rowan",male,,0,0,372622,7.75,,Q -562,0,3,"Sivic, Mr. Husein",male,40,0,0,349251,7.8958,,S -563,0,2,"Norman, Mr. Robert Douglas",male,28,0,0,218629,13.5,,S -564,0,3,"Simmons, Mr. John",male,,0,0,SOTON/OQ 392082,8.05,,S -565,0,3,"Meanwell, Miss. (Marion Ogden)",female,,0,0,SOTON/O.Q. 392087,8.05,,S -566,0,3,"Davies, Mr. Alfred J",male,24,2,0,A/4 48871,24.15,,S -567,0,3,"Stoytcheff, Mr. Ilia",male,19,0,0,349205,7.8958,,S -568,0,3,"Palsson, Mrs. Nils (Alma Cornelia Berglund)",female,29,0,4,349909,21.075,,S -569,0,3,"Doharr, Mr. Tannous",male,,0,0,2686,7.2292,,C -570,1,3,"Jonsson, Mr. Carl",male,32,0,0,350417,7.8542,,S -571,1,2,"Harris, Mr. George",male,62,0,0,S.W./PP 752,10.5,,S -572,1,1,"Appleton, Mrs. Edward Dale (Charlotte Lamson)",female,53,2,0,11769,51.4792,C101,S -573,1,1,"Flynn, Mr. John Irwin (""Irving"")",male,36,0,0,PC 17474,26.3875,E25,S -574,1,3,"Kelly, Miss. Mary",female,,0,0,14312,7.75,,Q -575,0,3,"Rush, Mr. Alfred George John",male,16,0,0,A/4. 20589,8.05,,S -576,0,3,"Patchett, Mr. George",male,19,0,0,358585,14.5,,S -577,1,2,"Garside, Miss. Ethel",female,34,0,0,243880,13,,S -578,1,1,"Silvey, Mrs. William Baird (Alice Munger)",female,39,1,0,13507,55.9,E44,S -579,0,3,"Caram, Mrs. Joseph (Maria Elias)",female,,1,0,2689,14.4583,,C -580,1,3,"Jussila, Mr. Eiriik",male,32,0,0,STON/O 2. 3101286,7.925,,S -581,1,2,"Christy, Miss. Julie Rachel",female,25,1,1,237789,30,,S -582,1,1,"Thayer, Mrs. John Borland (Marian Longstreth Morris)",female,39,1,1,17421,110.8833,C68,C -583,0,2,"Downton, Mr. William James",male,54,0,0,28403,26,,S -584,0,1,"Ross, Mr. John Hugo",male,36,0,0,13049,40.125,A10,C -585,0,3,"Paulner, Mr. Uscher",male,,0,0,3411,8.7125,,C -586,1,1,"Taussig, Miss. Ruth",female,18,0,2,110413,79.65,E68,S -587,0,2,"Jarvis, Mr. John Denzil",male,47,0,0,237565,15,,S -588,1,1,"Frolicher-Stehli, Mr. Maxmillian",male,60,1,1,13567,79.2,B41,C -589,0,3,"Gilinski, Mr. Eliezer",male,22,0,0,14973,8.05,,S -590,0,3,"Murdlin, Mr. Joseph",male,,0,0,A./5. 3235,8.05,,S -591,0,3,"Rintamaki, Mr. Matti",male,35,0,0,STON/O 2. 3101273,7.125,,S -592,1,1,"Stephenson, Mrs. Walter Bertram (Martha Eustis)",female,52,1,0,36947,78.2667,D20,C -593,0,3,"Elsbury, Mr. William James",male,47,0,0,A/5 3902,7.25,,S -594,0,3,"Bourke, Miss. Mary",female,,0,2,364848,7.75,,Q -595,0,2,"Chapman, Mr. John Henry",male,37,1,0,SC/AH 29037,26,,S -596,0,3,"Van Impe, Mr. Jean Baptiste",male,36,1,1,345773,24.15,,S -597,1,2,"Leitch, Miss. Jessie Wills",female,,0,0,248727,33,,S -598,0,3,"Johnson, Mr. Alfred",male,49,0,0,LINE,0,,S -599,0,3,"Boulos, Mr. Hanna",male,,0,0,2664,7.225,,C -600,1,1,"Duff Gordon, Sir. Cosmo Edmund (""Mr Morgan"")",male,49,1,0,PC 17485,56.9292,A20,C -601,1,2,"Jacobsohn, Mrs. Sidney Samuel (Amy Frances Christy)",female,24,2,1,243847,27,,S -602,0,3,"Slabenoff, Mr. Petco",male,,0,0,349214,7.8958,,S -603,0,1,"Harrington, Mr. Charles H",male,,0,0,113796,42.4,,S -604,0,3,"Torber, Mr. Ernst William",male,44,0,0,364511,8.05,,S -605,1,1,"Homer, Mr. Harry (""Mr E Haven"")",male,35,0,0,111426,26.55,,C -606,0,3,"Lindell, Mr. Edvard Bengtsson",male,36,1,0,349910,15.55,,S -607,0,3,"Karaic, Mr. Milan",male,30,0,0,349246,7.8958,,S -608,1,1,"Daniel, Mr. Robert Williams",male,27,0,0,113804,30.5,,S -609,1,2,"Laroche, Mrs. Joseph (Juliette Marie Louise Lafargue)",female,22,1,2,SC/Paris 2123,41.5792,,C -610,1,1,"Shutes, Miss. Elizabeth W",female,40,0,0,PC 17582,153.4625,C125,S -611,0,3,"Andersson, Mrs. Anders Johan (Alfrida Konstantia Brogren)",female,39,1,5,347082,31.275,,S -612,0,3,"Jardin, Mr. Jose Neto",male,,0,0,SOTON/O.Q. 3101305,7.05,,S -613,1,3,"Murphy, Miss. Margaret Jane",female,,1,0,367230,15.5,,Q -614,0,3,"Horgan, Mr. John",male,,0,0,370377,7.75,,Q -615,0,3,"Brocklebank, Mr. William Alfred",male,35,0,0,364512,8.05,,S -616,1,2,"Herman, Miss. Alice",female,24,1,2,220845,65,,S -617,0,3,"Danbom, Mr. Ernst Gilbert",male,34,1,1,347080,14.4,,S -618,0,3,"Lobb, Mrs. William Arthur (Cordelia K Stanlick)",female,26,1,0,A/5. 3336,16.1,,S -619,1,2,"Becker, Miss. Marion Louise",female,4,2,1,230136,39,F4,S -620,0,2,"Gavey, Mr. Lawrence",male,26,0,0,31028,10.5,,S -621,0,3,"Yasbeck, Mr. Antoni",male,27,1,0,2659,14.4542,,C -622,1,1,"Kimball, Mr. Edwin Nelson Jr",male,42,1,0,11753,52.5542,D19,S -623,1,3,"Nakid, Mr. Sahid",male,20,1,1,2653,15.7417,,C -624,0,3,"Hansen, Mr. Henry Damsgaard",male,21,0,0,350029,7.8542,,S -625,0,3,"Bowen, Mr. David John ""Dai""",male,21,0,0,54636,16.1,,S -626,0,1,"Sutton, Mr. Frederick",male,61,0,0,36963,32.3208,D50,S -627,0,2,"Kirkland, Rev. Charles Leonard",male,57,0,0,219533,12.35,,Q -628,1,1,"Longley, Miss. Gretchen Fiske",female,21,0,0,13502,77.9583,D9,S -629,0,3,"Bostandyeff, Mr. Guentcho",male,26,0,0,349224,7.8958,,S -630,0,3,"O'Connell, Mr. Patrick D",male,,0,0,334912,7.7333,,Q -631,1,1,"Barkworth, Mr. Algernon Henry Wilson",male,80,0,0,27042,30,A23,S -632,0,3,"Lundahl, Mr. Johan Svensson",male,51,0,0,347743,7.0542,,S -633,1,1,"Stahelin-Maeglin, Dr. Max",male,32,0,0,13214,30.5,B50,C -634,0,1,"Parr, Mr. William Henry Marsh",male,,0,0,112052,0,,S -635,0,3,"Skoog, Miss. Mabel",female,9,3,2,347088,27.9,,S -636,1,2,"Davis, Miss. Mary",female,28,0,0,237668,13,,S -637,0,3,"Leinonen, Mr. Antti Gustaf",male,32,0,0,STON/O 2. 3101292,7.925,,S -638,0,2,"Collyer, Mr. Harvey",male,31,1,1,C.A. 31921,26.25,,S -639,0,3,"Panula, Mrs. Juha (Maria Emilia Ojala)",female,41,0,5,3101295,39.6875,,S -640,0,3,"Thorneycroft, Mr. Percival",male,,1,0,376564,16.1,,S -641,0,3,"Jensen, Mr. Hans Peder",male,20,0,0,350050,7.8542,,S -642,1,1,"Sagesser, Mlle. Emma",female,24,0,0,PC 17477,69.3,B35,C -643,0,3,"Skoog, Miss. Margit Elizabeth",female,2,3,2,347088,27.9,,S -644,1,3,"Foo, Mr. Choong",male,,0,0,1601,56.4958,,S -645,1,3,"Baclini, Miss. Eugenie",female,0.75,2,1,2666,19.2583,,C -646,1,1,"Harper, Mr. Henry Sleeper",male,48,1,0,PC 17572,76.7292,D33,C -647,0,3,"Cor, Mr. Liudevit",male,19,0,0,349231,7.8958,,S -648,1,1,"Simonius-Blumer, Col. Oberst Alfons",male,56,0,0,13213,35.5,A26,C -649,0,3,"Willey, Mr. Edward",male,,0,0,S.O./P.P. 751,7.55,,S -650,1,3,"Stanley, Miss. Amy Zillah Elsie",female,23,0,0,CA. 2314,7.55,,S -651,0,3,"Mitkoff, Mr. Mito",male,,0,0,349221,7.8958,,S -652,1,2,"Doling, Miss. Elsie",female,18,0,1,231919,23,,S -653,0,3,"Kalvik, Mr. Johannes Halvorsen",male,21,0,0,8475,8.4333,,S -654,1,3,"O'Leary, Miss. Hanora ""Norah""",female,,0,0,330919,7.8292,,Q -655,0,3,"Hegarty, Miss. Hanora ""Nora""",female,18,0,0,365226,6.75,,Q -656,0,2,"Hickman, Mr. Leonard Mark",male,24,2,0,S.O.C. 14879,73.5,,S -657,0,3,"Radeff, Mr. Alexander",male,,0,0,349223,7.8958,,S -658,0,3,"Bourke, Mrs. John (Catherine)",female,32,1,1,364849,15.5,,Q -659,0,2,"Eitemiller, Mr. George Floyd",male,23,0,0,29751,13,,S -660,0,1,"Newell, Mr. Arthur Webster",male,58,0,2,35273,113.275,D48,C -661,1,1,"Frauenthal, Dr. Henry William",male,50,2,0,PC 17611,133.65,,S -662,0,3,"Badt, Mr. Mohamed",male,40,0,0,2623,7.225,,C -663,0,1,"Colley, Mr. Edward Pomeroy",male,47,0,0,5727,25.5875,E58,S -664,0,3,"Coleff, Mr. Peju",male,36,0,0,349210,7.4958,,S -665,1,3,"Lindqvist, Mr. Eino William",male,20,1,0,STON/O 2. 3101285,7.925,,S -666,0,2,"Hickman, Mr. Lewis",male,32,2,0,S.O.C. 14879,73.5,,S -667,0,2,"Butler, Mr. Reginald Fenton",male,25,0,0,234686,13,,S -668,0,3,"Rommetvedt, Mr. Knud Paust",male,,0,0,312993,7.775,,S -669,0,3,"Cook, Mr. Jacob",male,43,0,0,A/5 3536,8.05,,S -670,1,1,"Taylor, Mrs. Elmer Zebley (Juliet Cummins Wright)",female,,1,0,19996,52,C126,S -671,1,2,"Brown, Mrs. Thomas William Solomon (Elizabeth Catherine Ford)",female,40,1,1,29750,39,,S -672,0,1,"Davidson, Mr. Thornton",male,31,1,0,F.C. 12750,52,B71,S -673,0,2,"Mitchell, Mr. Henry Michael",male,70,0,0,C.A. 24580,10.5,,S -674,1,2,"Wilhelms, Mr. Charles",male,31,0,0,244270,13,,S -675,0,2,"Watson, Mr. Ennis Hastings",male,,0,0,239856,0,,S -676,0,3,"Edvardsson, Mr. Gustaf Hjalmar",male,18,0,0,349912,7.775,,S -677,0,3,"Sawyer, Mr. Frederick Charles",male,24.5,0,0,342826,8.05,,S -678,1,3,"Turja, Miss. Anna Sofia",female,18,0,0,4138,9.8417,,S -679,0,3,"Goodwin, Mrs. Frederick (Augusta Tyler)",female,43,1,6,CA 2144,46.9,,S -680,1,1,"Cardeza, Mr. Thomas Drake Martinez",male,36,0,1,PC 17755,512.3292,B51 B53 B55,C -681,0,3,"Peters, Miss. Katie",female,,0,0,330935,8.1375,,Q -682,1,1,"Hassab, Mr. Hammad",male,27,0,0,PC 17572,76.7292,D49,C -683,0,3,"Olsvigen, Mr. Thor Anderson",male,20,0,0,6563,9.225,,S -684,0,3,"Goodwin, Mr. Charles Edward",male,14,5,2,CA 2144,46.9,,S -685,0,2,"Brown, Mr. Thomas William Solomon",male,60,1,1,29750,39,,S -686,0,2,"Laroche, Mr. Joseph Philippe Lemercier",male,25,1,2,SC/Paris 2123,41.5792,,C -687,0,3,"Panula, Mr. Jaako Arnold",male,14,4,1,3101295,39.6875,,S -688,0,3,"Dakic, Mr. Branko",male,19,0,0,349228,10.1708,,S -689,0,3,"Fischer, Mr. Eberhard Thelander",male,18,0,0,350036,7.7958,,S -690,1,1,"Madill, Miss. Georgette Alexandra",female,15,0,1,24160,211.3375,B5,S -691,1,1,"Dick, Mr. Albert Adrian",male,31,1,0,17474,57,B20,S -692,1,3,"Karun, Miss. Manca",female,4,0,1,349256,13.4167,,C -693,1,3,"Lam, Mr. Ali",male,,0,0,1601,56.4958,,S -694,0,3,"Saad, Mr. Khalil",male,25,0,0,2672,7.225,,C -695,0,1,"Weir, Col. John",male,60,0,0,113800,26.55,,S -696,0,2,"Chapman, Mr. Charles Henry",male,52,0,0,248731,13.5,,S -697,0,3,"Kelly, Mr. James",male,44,0,0,363592,8.05,,S -698,1,3,"Mullens, Miss. Katherine ""Katie""",female,,0,0,35852,7.7333,,Q -699,0,1,"Thayer, Mr. John Borland",male,49,1,1,17421,110.8833,C68,C -700,0,3,"Humblen, Mr. Adolf Mathias Nicolai Olsen",male,42,0,0,348121,7.65,F G63,S -701,1,1,"Astor, Mrs. John Jacob (Madeleine Talmadge Force)",female,18,1,0,PC 17757,227.525,C62 C64,C -702,1,1,"Silverthorne, Mr. Spencer Victor",male,35,0,0,PC 17475,26.2875,E24,S -703,0,3,"Barbara, Miss. Saiide",female,18,0,1,2691,14.4542,,C -704,0,3,"Gallagher, Mr. Martin",male,25,0,0,36864,7.7417,,Q -705,0,3,"Hansen, Mr. Henrik Juul",male,26,1,0,350025,7.8542,,S -706,0,2,"Morley, Mr. Henry Samuel (""Mr Henry Marshall"")",male,39,0,0,250655,26,,S -707,1,2,"Kelly, Mrs. Florence ""Fannie""",female,45,0,0,223596,13.5,,S -708,1,1,"Calderhead, Mr. Edward Pennington",male,42,0,0,PC 17476,26.2875,E24,S -709,1,1,"Cleaver, Miss. Alice",female,22,0,0,113781,151.55,,S -710,1,3,"Moubarek, Master. Halim Gonios (""William George"")",male,,1,1,2661,15.2458,,C -711,1,1,"Mayne, Mlle. Berthe Antonine (""Mrs de Villiers"")",female,24,0,0,PC 17482,49.5042,C90,C -712,0,1,"Klaber, Mr. Herman",male,,0,0,113028,26.55,C124,S -713,1,1,"Taylor, Mr. Elmer Zebley",male,48,1,0,19996,52,C126,S -714,0,3,"Larsson, Mr. August Viktor",male,29,0,0,7545,9.4833,,S -715,0,2,"Greenberg, Mr. Samuel",male,52,0,0,250647,13,,S -716,0,3,"Soholt, Mr. Peter Andreas Lauritz Andersen",male,19,0,0,348124,7.65,F G73,S -717,1,1,"Endres, Miss. Caroline Louise",female,38,0,0,PC 17757,227.525,C45,C -718,1,2,"Troutt, Miss. Edwina Celia ""Winnie""",female,27,0,0,34218,10.5,E101,S -719,0,3,"McEvoy, Mr. Michael",male,,0,0,36568,15.5,,Q -720,0,3,"Johnson, Mr. Malkolm Joackim",male,33,0,0,347062,7.775,,S -721,1,2,"Harper, Miss. Annie Jessie ""Nina""",female,6,0,1,248727,33,,S -722,0,3,"Jensen, Mr. Svend Lauritz",male,17,1,0,350048,7.0542,,S -723,0,2,"Gillespie, Mr. William Henry",male,34,0,0,12233,13,,S -724,0,2,"Hodges, Mr. Henry Price",male,50,0,0,250643,13,,S -725,1,1,"Chambers, Mr. Norman Campbell",male,27,1,0,113806,53.1,E8,S -726,0,3,"Oreskovic, Mr. Luka",male,20,0,0,315094,8.6625,,S -727,1,2,"Renouf, Mrs. Peter Henry (Lillian Jefferys)",female,30,3,0,31027,21,,S -728,1,3,"Mannion, Miss. Margareth",female,,0,0,36866,7.7375,,Q -729,0,2,"Bryhl, Mr. Kurt Arnold Gottfrid",male,25,1,0,236853,26,,S -730,0,3,"Ilmakangas, Miss. Pieta Sofia",female,25,1,0,STON/O2. 3101271,7.925,,S -731,1,1,"Allen, Miss. Elisabeth Walton",female,29,0,0,24160,211.3375,B5,S -732,0,3,"Hassan, Mr. Houssein G N",male,11,0,0,2699,18.7875,,C -733,0,2,"Knight, Mr. Robert J",male,,0,0,239855,0,,S -734,0,2,"Berriman, Mr. William John",male,23,0,0,28425,13,,S -735,0,2,"Troupiansky, Mr. Moses Aaron",male,23,0,0,233639,13,,S -736,0,3,"Williams, Mr. Leslie",male,28.5,0,0,54636,16.1,,S -737,0,3,"Ford, Mrs. Edward (Margaret Ann Watson)",female,48,1,3,W./C. 6608,34.375,,S -738,1,1,"Lesurer, Mr. Gustave J",male,35,0,0,PC 17755,512.3292,B101,C -739,0,3,"Ivanoff, Mr. Kanio",male,,0,0,349201,7.8958,,S -740,0,3,"Nankoff, Mr. Minko",male,,0,0,349218,7.8958,,S -741,1,1,"Hawksford, Mr. Walter James",male,,0,0,16988,30,D45,S -742,0,1,"Cavendish, Mr. Tyrell William",male,36,1,0,19877,78.85,C46,S -743,1,1,"Ryerson, Miss. Susan Parker ""Suzette""",female,21,2,2,PC 17608,262.375,B57 B59 B63 B66,C -744,0,3,"McNamee, Mr. Neal",male,24,1,0,376566,16.1,,S -745,1,3,"Stranden, Mr. Juho",male,31,0,0,STON/O 2. 3101288,7.925,,S -746,0,1,"Crosby, Capt. Edward Gifford",male,70,1,1,WE/P 5735,71,B22,S -747,0,3,"Abbott, Mr. Rossmore Edward",male,16,1,1,C.A. 2673,20.25,,S -748,1,2,"Sinkkonen, Miss. Anna",female,30,0,0,250648,13,,S -749,0,1,"Marvin, Mr. Daniel Warner",male,19,1,0,113773,53.1,D30,S -750,0,3,"Connaghton, Mr. Michael",male,31,0,0,335097,7.75,,Q -751,1,2,"Wells, Miss. Joan",female,4,1,1,29103,23,,S -752,1,3,"Moor, Master. Meier",male,6,0,1,392096,12.475,E121,S -753,0,3,"Vande Velde, Mr. Johannes Joseph",male,33,0,0,345780,9.5,,S -754,0,3,"Jonkoff, Mr. Lalio",male,23,0,0,349204,7.8958,,S -755,1,2,"Herman, Mrs. Samuel (Jane Laver)",female,48,1,2,220845,65,,S -756,1,2,"Hamalainen, Master. Viljo",male,0.67,1,1,250649,14.5,,S -757,0,3,"Carlsson, Mr. August Sigfrid",male,28,0,0,350042,7.7958,,S -758,0,2,"Bailey, Mr. Percy Andrew",male,18,0,0,29108,11.5,,S -759,0,3,"Theobald, Mr. Thomas Leonard",male,34,0,0,363294,8.05,,S -760,1,1,"Rothes, the Countess. of (Lucy Noel Martha Dyer-Edwards)",female,33,0,0,110152,86.5,B77,S -761,0,3,"Garfirth, Mr. John",male,,0,0,358585,14.5,,S -762,0,3,"Nirva, Mr. Iisakki Antino Aijo",male,41,0,0,SOTON/O2 3101272,7.125,,S -763,1,3,"Barah, Mr. Hanna Assi",male,20,0,0,2663,7.2292,,C -764,1,1,"Carter, Mrs. William Ernest (Lucile Polk)",female,36,1,2,113760,120,B96 B98,S -765,0,3,"Eklund, Mr. Hans Linus",male,16,0,0,347074,7.775,,S -766,1,1,"Hogeboom, Mrs. John C (Anna Andrews)",female,51,1,0,13502,77.9583,D11,S -767,0,1,"Brewe, Dr. Arthur Jackson",male,,0,0,112379,39.6,,C -768,0,3,"Mangan, Miss. Mary",female,30.5,0,0,364850,7.75,,Q -769,0,3,"Moran, Mr. Daniel J",male,,1,0,371110,24.15,,Q -770,0,3,"Gronnestad, Mr. Daniel Danielsen",male,32,0,0,8471,8.3625,,S -771,0,3,"Lievens, Mr. Rene Aime",male,24,0,0,345781,9.5,,S -772,0,3,"Jensen, Mr. Niels Peder",male,48,0,0,350047,7.8542,,S -773,0,2,"Mack, Mrs. (Mary)",female,57,0,0,S.O./P.P. 3,10.5,E77,S -774,0,3,"Elias, Mr. Dibo",male,,0,0,2674,7.225,,C -775,1,2,"Hocking, Mrs. Elizabeth (Eliza Needs)",female,54,1,3,29105,23,,S -776,0,3,"Myhrman, Mr. Pehr Fabian Oliver Malkolm",male,18,0,0,347078,7.75,,S -777,0,3,"Tobin, Mr. Roger",male,,0,0,383121,7.75,F38,Q -778,1,3,"Emanuel, Miss. Virginia Ethel",female,5,0,0,364516,12.475,,S -779,0,3,"Kilgannon, Mr. Thomas J",male,,0,0,36865,7.7375,,Q -780,1,1,"Robert, Mrs. Edward Scott (Elisabeth Walton McMillan)",female,43,0,1,24160,211.3375,B3,S -781,1,3,"Ayoub, Miss. Banoura",female,13,0,0,2687,7.2292,,C -782,1,1,"Dick, Mrs. Albert Adrian (Vera Gillespie)",female,17,1,0,17474,57,B20,S -783,0,1,"Long, Mr. Milton Clyde",male,29,0,0,113501,30,D6,S -784,0,3,"Johnston, Mr. Andrew G",male,,1,2,W./C. 6607,23.45,,S -785,0,3,"Ali, Mr. William",male,25,0,0,SOTON/O.Q. 3101312,7.05,,S -786,0,3,"Harmer, Mr. Abraham (David Lishin)",male,25,0,0,374887,7.25,,S -787,1,3,"Sjoblom, Miss. Anna Sofia",female,18,0,0,3101265,7.4958,,S -788,0,3,"Rice, Master. George Hugh",male,8,4,1,382652,29.125,,Q -789,1,3,"Dean, Master. Bertram Vere",male,1,1,2,C.A. 2315,20.575,,S -790,0,1,"Guggenheim, Mr. Benjamin",male,46,0,0,PC 17593,79.2,B82 B84,C -791,0,3,"Keane, Mr. Andrew ""Andy""",male,,0,0,12460,7.75,,Q -792,0,2,"Gaskell, Mr. Alfred",male,16,0,0,239865,26,,S -793,0,3,"Sage, Miss. Stella Anna",female,,8,2,CA. 2343,69.55,,S -794,0,1,"Hoyt, Mr. William Fisher",male,,0,0,PC 17600,30.6958,,C -795,0,3,"Dantcheff, Mr. Ristiu",male,25,0,0,349203,7.8958,,S -796,0,2,"Otter, Mr. Richard",male,39,0,0,28213,13,,S -797,1,1,"Leader, Dr. Alice (Farnham)",female,49,0,0,17465,25.9292,D17,S -798,1,3,"Osman, Mrs. Mara",female,31,0,0,349244,8.6833,,S -799,0,3,"Ibrahim Shawah, Mr. Yousseff",male,30,0,0,2685,7.2292,,C -800,0,3,"Van Impe, Mrs. Jean Baptiste (Rosalie Paula Govaert)",female,30,1,1,345773,24.15,,S -801,0,2,"Ponesell, Mr. Martin",male,34,0,0,250647,13,,S -802,1,2,"Collyer, Mrs. Harvey (Charlotte Annie Tate)",female,31,1,1,C.A. 31921,26.25,,S -803,1,1,"Carter, Master. William Thornton II",male,11,1,2,113760,120,B96 B98,S -804,1,3,"Thomas, Master. Assad Alexander",male,0.42,0,1,2625,8.5167,,C -805,1,3,"Hedman, Mr. Oskar Arvid",male,27,0,0,347089,6.975,,S -806,0,3,"Johansson, Mr. Karl Johan",male,31,0,0,347063,7.775,,S -807,0,1,"Andrews, Mr. Thomas Jr",male,39,0,0,112050,0,A36,S -808,0,3,"Pettersson, Miss. Ellen Natalia",female,18,0,0,347087,7.775,,S -809,0,2,"Meyer, Mr. August",male,39,0,0,248723,13,,S -810,1,1,"Chambers, Mrs. Norman Campbell (Bertha Griggs)",female,33,1,0,113806,53.1,E8,S -811,0,3,"Alexander, Mr. William",male,26,0,0,3474,7.8875,,S -812,0,3,"Lester, Mr. James",male,39,0,0,A/4 48871,24.15,,S -813,0,2,"Slemen, Mr. Richard James",male,35,0,0,28206,10.5,,S -814,0,3,"Andersson, Miss. Ebba Iris Alfrida",female,6,4,2,347082,31.275,,S -815,0,3,"Tomlin, Mr. Ernest Portage",male,30.5,0,0,364499,8.05,,S -816,0,1,"Fry, Mr. Richard",male,,0,0,112058,0,B102,S -817,0,3,"Heininen, Miss. Wendla Maria",female,23,0,0,STON/O2. 3101290,7.925,,S -818,0,2,"Mallet, Mr. Albert",male,31,1,1,S.C./PARIS 2079,37.0042,,C -819,0,3,"Holm, Mr. John Fredrik Alexander",male,43,0,0,C 7075,6.45,,S -820,0,3,"Skoog, Master. Karl Thorsten",male,10,3,2,347088,27.9,,S -821,1,1,"Hays, Mrs. Charles Melville (Clara Jennings Gregg)",female,52,1,1,12749,93.5,B69,S -822,1,3,"Lulic, Mr. Nikola",male,27,0,0,315098,8.6625,,S -823,0,1,"Reuchlin, Jonkheer. John George",male,38,0,0,19972,0,,S -824,1,3,"Moor, Mrs. (Beila)",female,27,0,1,392096,12.475,E121,S -825,0,3,"Panula, Master. Urho Abraham",male,2,4,1,3101295,39.6875,,S -826,0,3,"Flynn, Mr. John",male,,0,0,368323,6.95,,Q -827,0,3,"Lam, Mr. Len",male,,0,0,1601,56.4958,,S -828,1,2,"Mallet, Master. Andre",male,1,0,2,S.C./PARIS 2079,37.0042,,C -829,1,3,"McCormack, Mr. Thomas Joseph",male,,0,0,367228,7.75,,Q -830,1,1,"Stone, Mrs. George Nelson (Martha Evelyn)",female,62,0,0,113572,80,B28, -831,1,3,"Yasbeck, Mrs. Antoni (Selini Alexander)",female,15,1,0,2659,14.4542,,C -832,1,2,"Richards, Master. George Sibley",male,0.83,1,1,29106,18.75,,S -833,0,3,"Saad, Mr. Amin",male,,0,0,2671,7.2292,,C -834,0,3,"Augustsson, Mr. Albert",male,23,0,0,347468,7.8542,,S -835,0,3,"Allum, Mr. Owen George",male,18,0,0,2223,8.3,,S -836,1,1,"Compton, Miss. Sara Rebecca",female,39,1,1,PC 17756,83.1583,E49,C -837,0,3,"Pasic, Mr. Jakob",male,21,0,0,315097,8.6625,,S -838,0,3,"Sirota, Mr. Maurice",male,,0,0,392092,8.05,,S -839,1,3,"Chip, Mr. Chang",male,32,0,0,1601,56.4958,,S -840,1,1,"Marechal, Mr. Pierre",male,,0,0,11774,29.7,C47,C -841,0,3,"Alhomaki, Mr. Ilmari Rudolf",male,20,0,0,SOTON/O2 3101287,7.925,,S -842,0,2,"Mudd, Mr. Thomas Charles",male,16,0,0,S.O./P.P. 3,10.5,,S -843,1,1,"Serepeca, Miss. Augusta",female,30,0,0,113798,31,,C -844,0,3,"Lemberopolous, Mr. Peter L",male,34.5,0,0,2683,6.4375,,C -845,0,3,"Culumovic, Mr. Jeso",male,17,0,0,315090,8.6625,,S -846,0,3,"Abbing, Mr. Anthony",male,42,0,0,C.A. 5547,7.55,,S -847,0,3,"Sage, Mr. Douglas Bullen",male,,8,2,CA. 2343,69.55,,S -848,0,3,"Markoff, Mr. Marin",male,35,0,0,349213,7.8958,,C -849,0,2,"Harper, Rev. John",male,28,0,1,248727,33,,S -850,1,1,"Goldenberg, Mrs. Samuel L (Edwiga Grabowska)",female,,1,0,17453,89.1042,C92,C -851,0,3,"Andersson, Master. Sigvard Harald Elias",male,4,4,2,347082,31.275,,S -852,0,3,"Svensson, Mr. Johan",male,74,0,0,347060,7.775,,S -853,0,3,"Boulos, Miss. Nourelain",female,9,1,1,2678,15.2458,,C -854,1,1,"Lines, Miss. Mary Conover",female,16,0,1,PC 17592,39.4,D28,S -855,0,2,"Carter, Mrs. Ernest Courtenay (Lilian Hughes)",female,44,1,0,244252,26,,S -856,1,3,"Aks, Mrs. Sam (Leah Rosen)",female,18,0,1,392091,9.35,,S -857,1,1,"Wick, Mrs. George Dennick (Mary Hitchcock)",female,45,1,1,36928,164.8667,,S -858,1,1,"Daly, Mr. Peter Denis ",male,51,0,0,113055,26.55,E17,S -859,1,3,"Baclini, Mrs. Solomon (Latifa Qurban)",female,24,0,3,2666,19.2583,,C -860,0,3,"Razi, Mr. Raihed",male,,0,0,2629,7.2292,,C -861,0,3,"Hansen, Mr. Claus Peter",male,41,2,0,350026,14.1083,,S -862,0,2,"Giles, Mr. Frederick Edward",male,21,1,0,28134,11.5,,S -863,1,1,"Swift, Mrs. Frederick Joel (Margaret Welles Barron)",female,48,0,0,17466,25.9292,D17,S -864,0,3,"Sage, Miss. Dorothy Edith ""Dolly""",female,,8,2,CA. 2343,69.55,,S -865,0,2,"Gill, Mr. John William",male,24,0,0,233866,13,,S -866,1,2,"Bystrom, Mrs. (Karolina)",female,42,0,0,236852,13,,S -867,1,2,"Duran y More, Miss. Asuncion",female,27,1,0,SC/PARIS 2149,13.8583,,C -868,0,1,"Roebling, Mr. Washington Augustus II",male,31,0,0,PC 17590,50.4958,A24,S -869,0,3,"van Melkebeke, Mr. Philemon",male,,0,0,345777,9.5,,S -870,1,3,"Johnson, Master. Harold Theodor",male,4,1,1,347742,11.1333,,S -871,0,3,"Balkic, Mr. Cerin",male,26,0,0,349248,7.8958,,S -872,1,1,"Beckwith, Mrs. Richard Leonard (Sallie Monypeny)",female,47,1,1,11751,52.5542,D35,S -873,0,1,"Carlsson, Mr. Frans Olof",male,33,0,0,695,5,B51 B53 B55,S -874,0,3,"Vander Cruyssen, Mr. Victor",male,47,0,0,345765,9,,S -875,1,2,"Abelson, Mrs. Samuel (Hannah Wizosky)",female,28,1,0,P/PP 3381,24,,C -876,1,3,"Najib, Miss. Adele Kiamie ""Jane""",female,15,0,0,2667,7.225,,C -877,0,3,"Gustafsson, Mr. Alfred Ossian",male,20,0,0,7534,9.8458,,S -878,0,3,"Petroff, Mr. Nedelio",male,19,0,0,349212,7.8958,,S -879,0,3,"Laleff, Mr. Kristo",male,,0,0,349217,7.8958,,S -880,1,1,"Potter, Mrs. Thomas Jr (Lily Alexenia Wilson)",female,56,0,1,11767,83.1583,C50,C -881,1,2,"Shelley, Mrs. William (Imanita Parrish Hall)",female,25,0,1,230433,26,,S -882,0,3,"Markun, Mr. Johann",male,33,0,0,349257,7.8958,,S -883,0,3,"Dahlberg, Miss. Gerda Ulrika",female,22,0,0,7552,10.5167,,S -884,0,2,"Banfield, Mr. Frederick James",male,28,0,0,C.A./SOTON 34068,10.5,,S -885,0,3,"Sutehall, Mr. Henry Jr",male,25,0,0,SOTON/OQ 392076,7.05,,S -886,0,3,"Rice, Mrs. William (Margaret Norton)",female,39,0,5,382652,29.125,,Q -887,0,2,"Montvila, Rev. Juozas",male,27,0,0,211536,13,,S -888,1,1,"Graham, Miss. Margaret Edith",female,19,0,0,112053,30,B42,S -889,0,3,"Johnston, Miss. Catherine Helen ""Carrie""",female,,1,2,W./C. 6607,23.45,,S -890,1,1,"Behr, Mr. Karl Howell",male,26,0,0,111369,30,C148,C -891,0,3,"Dooley, Mr. Patrick",male,32,0,0,370376,7.75,,Q diff --git a/templates/csv-agent/titanic_data/index.faiss b/templates/csv-agent/titanic_data/index.faiss deleted file mode 100644 index b7bfe99b6c1..00000000000 Binary files a/templates/csv-agent/titanic_data/index.faiss and /dev/null differ diff --git a/templates/csv-agent/titanic_data/index.pkl b/templates/csv-agent/titanic_data/index.pkl deleted file mode 100644 index 4b8073486a2..00000000000 Binary files a/templates/csv-agent/titanic_data/index.pkl and /dev/null differ diff --git a/templates/docs/CONTRIBUTING.md b/templates/docs/CONTRIBUTING.md deleted file mode 100644 index 3888df1d44e..00000000000 --- a/templates/docs/CONTRIBUTING.md +++ /dev/null @@ -1,43 +0,0 @@ -# Contributing - -Thanks for taking the time to contribute a new template! -We've tried to make this process as simple and painless as possible. -If you need any help at all, please reach out! - -To contribute a new template, first fork this repository. -Then clone that fork and pull it down locally. -Set up an appropriate dev environment, and make sure you are in this `templates` directory. - -Make sure you have `langchain-cli` installed. - -```shell -pip install -U langchain-cli -``` - -You can then run the following command to create a new skeleton of a package. -By convention, package names should use `-` delimiters (not `_`). - -```shell -langchain template new $PROJECT_NAME -``` - -You can then edit the contents of the package as you desire. -Note that by default we expect the main chain to be exposed as `chain` in the `__init__.py` file of the package. -You can change this (either the name or the location), but if you do so it is important to update the `tool.langchain` -part of `pyproject.toml`. -For example, if you update the main chain exposed to be called `agent_executor`, then that section should look like: - -```text -[tool.langserve] -export_module = "..." -export_attr = "agent_executor" -``` - -Make sure to add any requirements of the package to `pyproject.toml` (and to remove any that are not used). - -Please update the `README.md` file to give some background on your package and how to set it up. - -If you want to change the license of your template for whatever, you may! Note that by default it is MIT licensed. - -If you want to test out your package at any point in time, you can spin up a LangServe instance directly from the package. -See instructions [here](LAUNCHING_PACKAGE.md) on how to best do that. diff --git a/templates/docs/INDEX.md b/templates/docs/INDEX.md deleted file mode 100644 index 2a5294d74cc..00000000000 --- a/templates/docs/INDEX.md +++ /dev/null @@ -1,80 +0,0 @@ -# Templates - -Highlighting a few different categories of templates - -## ⭐ Popular - -These are some of the more popular templates to get started with. - -- [Retrieval Augmented Generation Chatbot](../rag-conversation): Build a chatbot over your data. Defaults to OpenAI and PineconeVectorStore. -- [Extraction with OpenAI Functions](../extraction-openai-functions): Do extraction of structured data from unstructured data. Uses OpenAI function calling. -- [Local Retrieval Augmented Generation](../rag-chroma-private): Build a chatbot over your data. Uses only local tooling: Ollama, GPT4all, Chroma. -- [OpenAI Functions Agent](../openai-functions-agent): Build a chatbot that can take actions. Uses OpenAI function calling and Tavily. -- [XML Agent](../xml-agent): Build a chatbot that can take actions. Uses Anthropic and You.com. - - -## 📥 Advanced Retrieval - -These templates cover advanced retrieval techniques, which can be used for chat and QA over databases or documents. - -- [Reranking](../rag-pinecone-rerank): This retrieval technique uses Cohere's reranking endpoint to rerank documents from an initial retrieval step. -- [Anthropic Iterative Search](../anthropic-iterative-search): This retrieval technique uses iterative prompting to determine what to retrieve and whether the retriever documents are good enough. -- **Parent Document Retrieval** using [Neo4j](../neo4j-parent) or [MongoDB](../mongo-parent-document-retrieval): This retrieval technique stores embeddings for smaller chunks, but then returns larger chunks to pass to the model for generation. -- [Semi-Structured RAG](../rag-semi-structured): The template shows how to do retrieval over semi-structured data (e.g. data that involves both text and tables). -- [Temporal RAG](../rag-timescale-hybrid-search-time): The template shows how to do hybrid search over data with a time-based component using [Timescale Vector](https://www.timescale.com/ai?utm_campaign=vectorlaunch&utm_source=langchain&utm_medium=referral). - -## 🔍Advanced Retrieval - Query Transformation - -A selection of advanced retrieval methods that involve transforming the original user query, which can improve retrieval quality. - -- [Hypothetical Document Embeddings](../hyde): A retrieval technique that generates a hypothetical document for a given query, and then uses the embedding of that document to do semantic search. [Paper](https://arxiv.org/abs/2212.10496). -- [Rewrite-Retrieve-Read](../rewrite-retrieve-read): A retrieval technique that rewrites a given query before passing it to a search engine. [Paper](https://arxiv.org/abs/2305.14283). -- [Step-back QA Prompting](../stepback-qa-prompting): A retrieval technique that generates a "step-back" question and then retrieves documents relevant to both that question and the original question. [Paper](https://arxiv.org/abs//2310.06117). -- [RAG-Fusion](../rag-fusion): A retrieval technique that generates multiple queries and then reranks the retrieved documents using reciprocal rank fusion. [Article](https://towardsdatascience.com/forget-rag-the-future-is-rag-fusion-1147298d8ad1). -- [Multi-Query Retriever](../rag-pinecone-multi-query): This retrieval technique uses an LLM to generate multiple queries and then fetches documents for all queries. - - -## 🧠Advanced Retrieval - Query Construction - -A selection of advanced retrieval methods that involve constructing a query in a separate DSL from natural language, which enable natural language chat over various structured databases. - -- [Elastic Query Generator](../elastic-query-generator): Generate elastic search queries from natural language. -- [Neo4j Cypher Generation](../neo4j-cypher): Generate cypher statements from natural language. Available with a ["full text" option](../neo4j-cypher-ft) as well. -- [Supabase Self Query](../self-query-supabase): Parse a natural language query into a semantic query as well as a metadata filter for Supabase. - -## 🦙 OSS Models - -These templates use OSS models, which enable privacy for sensitive data. - -- [Local Retrieval Augmented Generation](../rag-chroma-private): Build a chatbot over your data. Uses only local tooling: Ollama, GPT4all, Chroma. -- [SQL Question Answering (Replicate)](../sql-llama2): Question answering over a SQL database, using Llama2 hosted on [Replicate](https://replicate.com/). -- [SQL Question Answering (LlamaCpp)](../sql-llamacpp): Question answering over a SQL database, using Llama2 through [LlamaCpp](https://github.com/ggerganov/llama.cpp). -- [SQL Question Answering (Ollama)](../sql-ollama): Question answering over a SQL database, using Llama2 through [Ollama](https://github.com/jmorganca/ollama). - -## ⛏️ Extraction - -These templates extract data in a structured format based upon a user-specified schema. - -- [Extraction Using OpenAI Functions](../extraction-openai-functions): Extract information from text using OpenAI Function Calling. -- [Extraction Using Anthropic Functions](../extraction-anthropic-functions): Extract information from text using a LangChain wrapper around the Anthropic endpoints intended to simulate function calling. -- [Extract BioTech Plate Data](../plate-chain): Extract microplate data from messy Excel spreadsheets into a more normalized format. - -## ⛏️Summarization and tagging - -These templates summarize or categorize documents and text. - -- [Summarization using Anthropic](../summarize-anthropic): Uses Anthropic's Claude2 to summarize long documents. - -## 🤖 Agents - -These templates build chatbots that can take actions, helping to automate tasks. - -- [OpenAI Functions Agent](../openai-functions-agent): Build a chatbot that can take actions. Uses OpenAI function calling and Tavily. -- [XML Agent](../xml-agent): Build a chatbot that can take actions. Uses Anthropic and You.com. - -## :rotating_light: Safety and evaluation - -These templates enable moderation or evaluation of LLM outputs. - -- [Guardrails Output Parser](../guardrails-output-parser): Use guardrails-ai to validate LLM output. -- [Chatbot Feedback](../chat-bot-feedback): Use LangSmith to evaluate chatbot responses. diff --git a/templates/docs/LAUNCHING_PACKAGE.md b/templates/docs/LAUNCHING_PACKAGE.md deleted file mode 100644 index ea97385f7a6..00000000000 --- a/templates/docs/LAUNCHING_PACKAGE.md +++ /dev/null @@ -1,41 +0,0 @@ -# Launching LangServe from a Package - -You can also launch LangServe directly from a package, without having to pull it into a project. -This can be useful when you are developing a package and want to test it quickly. -The downside of this is that it gives you a little less control over how the LangServe APIs are configured, -which is why for proper projects we recommend creating a full project. - -In order to do this, first change your working directory to the package itself. -For example, if you are currently in this `templates` module, you can go into the `pirate-speak` package with: - -```shell -cd pirate-speak -``` - -Inside this package there is a `pyproject.toml` file. -This file contains a `tool.langchain` section that contains information on how this package should be used. -For example, in `pirate-speak` we see: - -```text -[tool.langserve] -export_module = "pirate_speak.chain" -export_attr = "chain" -``` - -This information can be used to launch a LangServe instance automatically. -In order to do this, first make sure the CLI is installed: - -```shell -pip install -U langchain-cli -``` - -You can then run: - -```shell -langchain template serve -``` - -This will spin up endpoints, documentation, and playground for this chain. -For example, you can access the playground at [http://127.0.0.1:8000/playground/](http://127.0.0.1:8000/playground/) - -![Screenshot of the LangServe Playground web interface with input and output fields.](playground.png) "LangServe Playground Interface" diff --git a/templates/docs/docs.png b/templates/docs/docs.png deleted file mode 100644 index 3ad2fc8a6d1..00000000000 Binary files a/templates/docs/docs.png and /dev/null differ diff --git a/templates/docs/playground.png b/templates/docs/playground.png deleted file mode 100644 index 6ecc38a40b8..00000000000 Binary files a/templates/docs/playground.png and /dev/null differ diff --git a/templates/elastic-query-generator/README.md b/templates/elastic-query-generator/README.md deleted file mode 100644 index 945e7fbeb49..00000000000 --- a/templates/elastic-query-generator/README.md +++ /dev/null @@ -1,86 +0,0 @@ -# Elasticsearch - query generator - -This template allows interacting with `Elasticsearch` analytics databases -in natural language using LLMs. - -It builds search queries via the `Elasticsearch DSL API` (filters and aggregations). - -## Environment Setup - -Set the `OPENAI_API_KEY` environment variable to access the OpenAI models. - -### Installing Elasticsearch - -There are a number of ways to run Elasticsearch. However, one recommended way is through Elastic Cloud. - -Create a free trial account on [Elastic Cloud](https://cloud.elastic.co/registration?utm_source=langchain&utm_content=langserve). - -With a deployment, update the connection string. - -Password and connection (elasticsearch url) can be found on the deployment console. - -Note that the Elasticsearch client must have permissions for index listing, mapping description, and search queries. - -### Populating with data - -If you want to populate the DB with some example info, you can run `python ingest.py`. - -This will create a `customers` index. In this package, we specify indexes to generate queries against, and we specify `["customers"]`. This is specific to setting up your Elastic index. - -## Usage - -To use this package, you should first have the LangChain CLI installed: - -```shell -pip install -U langchain-cli -``` - -To create a new LangChain project and install this as the only package, you can do: - -```shell -langchain app new my-app --package elastic-query-generator -``` - -If you want to add this to an existing project, you can just run: - -```shell -langchain app add elastic-query-generator -``` - -And add the following code to your `server.py` file: -```python -from elastic_query_generator.chain import chain as elastic_query_generator_chain - -add_routes(app, elastic_query_generator_chain, path="/elastic-query-generator") -``` - -(Optional) Let's now configure LangSmith. -LangSmith will help us trace, monitor and debug LangChain applications. -You can sign up for LangSmith [here](https://smith.langchain.com/). -If you don't have access, you can skip this section - -```shell -export LANGCHAIN_TRACING_V2=true -export LANGCHAIN_API_KEY= -export LANGCHAIN_PROJECT= # if not specified, defaults to "default" -``` - -If you are inside this directory, then you can spin up a LangServe instance directly by: - -```shell -langchain serve -``` - -This will start the FastAPI app with a server is running locally at -[http://localhost:8000](http://localhost:8000) - -We can see all templates at [http://127.0.0.1:8000/docs](http://127.0.0.1:8000/docs) -We can access the playground at [http://127.0.0.1:8000/elastic-query-generator/playground](http://127.0.0.1:8000/elastic-query-generator/playground) - -We can access the template from code with: - -```python -from langserve.client import RemoteRunnable - -runnable = RemoteRunnable("http://localhost:8000/elastic-query-generator") -``` diff --git a/templates/elastic-query-generator/elastic_query_generator/__init__.py b/templates/elastic-query-generator/elastic_query_generator/__init__.py deleted file mode 100644 index 24cb3fc2573..00000000000 --- a/templates/elastic-query-generator/elastic_query_generator/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -from elastic_query_generator.chain import chain - -__all__ = [ - "chain", -] diff --git a/templates/elastic-query-generator/elastic_query_generator/chain.py b/templates/elastic-query-generator/elastic_query_generator/chain.py deleted file mode 100644 index 701650bef68..00000000000 --- a/templates/elastic-query-generator/elastic_query_generator/chain.py +++ /dev/null @@ -1,51 +0,0 @@ -from elasticsearch import Elasticsearch -from langchain.output_parsers.json import SimpleJsonOutputParser -from langchain_community.chat_models import ChatOpenAI -from langchain_core.pydantic_v1 import BaseModel - -from .elastic_index_info import get_indices_infos -from .prompts import DSL_PROMPT - -# Setup Elasticsearch -# This shows how to set it up for a cloud hosted version - -# Password for the 'elastic' user generated by Elasticsearch -ELASTIC_PASSWORD = "..." - -# Found in the 'Manage Deployment' page -CLOUD_ID = "..." - -# Create the client instance -db = Elasticsearch(cloud_id=CLOUD_ID, basic_auth=("elastic", ELASTIC_PASSWORD)) - -# Specify indices to include -# If you want to use on your own indices, you will need to change this. -INCLUDE_INDICES = ["customers"] - -# With the Elasticsearch connection created, we can now move on to the chain - -_model = ChatOpenAI(temperature=0, model="gpt-4") - -chain = ( - { - "input": lambda x: x["input"], - # This line only get index info for "customers" index. - # If you are running this on your own data, you will want to change. - "indices_info": lambda _: get_indices_infos( - db, include_indices=INCLUDE_INDICES - ), - "top_k": lambda x: x.get("top_k", 5), - } - | DSL_PROMPT - | _model - | SimpleJsonOutputParser() -) - - -# Nicely typed inputs for playground -class ChainInputs(BaseModel): - input: str - top_k: int = 5 - - -chain = chain.with_types(input_type=ChainInputs) diff --git a/templates/elastic-query-generator/elastic_query_generator/elastic_index_info.py b/templates/elastic-query-generator/elastic_query_generator/elastic_index_info.py deleted file mode 100644 index 15884f476eb..00000000000 --- a/templates/elastic-query-generator/elastic_query_generator/elastic_index_info.py +++ /dev/null @@ -1,39 +0,0 @@ -from typing import List - - -def _list_indices(database, include_indices=None, ignore_indices=None) -> List[str]: - all_indices = [index["index"] for index in database.cat.indices(format="json")] - - if include_indices: - all_indices = [i for i in all_indices if i in include_indices] - if ignore_indices: - all_indices = [i for i in all_indices if i not in ignore_indices] - - return all_indices - - -def get_indices_infos( - database, - sample_documents_in_index_info=5, - include_indices=None, - ignore_indices=None, -) -> str: - indices = _list_indices( - database, include_indices=include_indices, ignore_indices=ignore_indices - ) - mappings = database.indices.get_mapping(index=",".join(indices)) - if sample_documents_in_index_info > 0: - for k, v in mappings.items(): - hits = database.search( - index=k, - query={"match_all": {}}, - size=sample_documents_in_index_info, - )["hits"]["hits"] - hits = [str(hit["_source"]) for hit in hits] - mappings[k]["mappings"] = str(v) + "\n\n/*\n" + "\n".join(hits) + "\n*/" - return "\n\n".join( - [ - "Mapping for index {}:\n{}".format(index, mappings[index]["mappings"]) - for index in mappings - ] - ) diff --git a/templates/elastic-query-generator/elastic_query_generator/prompts.py b/templates/elastic-query-generator/elastic_query_generator/prompts.py deleted file mode 100644 index de6b26e4b63..00000000000 --- a/templates/elastic-query-generator/elastic_query_generator/prompts.py +++ /dev/null @@ -1,21 +0,0 @@ -from langchain_core.prompts.prompt import PromptTemplate - -PROMPT_SUFFIX = """Only use the following Elasticsearch indices: -{indices_info} - -Question: {input} -ESQuery:""" - -DEFAULT_DSL_TEMPLATE = """Given an input question, create a syntactically correct Elasticsearch query to run. Always limit your query to at most {top_k} results, unless the user specifies in their question a specific number of examples they wish to obtain, or unless its implied that they want to see all. You can order the results by a relevant column to return the most interesting examples in the database. - -Unless told to do not query for all the columns from a specific index, only ask for a the few relevant columns given the question. - -Pay attention to use only the column names that you can see in the mapping description. Be careful to not query for columns that do not exist. Also, pay attention to which column is in which index. Return the query as valid json. - -Use the following format: - -Question: Question here -ESQuery: Elasticsearch Query formatted as json -""" # noqa: E501 - -DSL_PROMPT = PromptTemplate.from_template(DEFAULT_DSL_TEMPLATE + PROMPT_SUFFIX) diff --git a/templates/elastic-query-generator/ingest.py b/templates/elastic-query-generator/ingest.py deleted file mode 100644 index 7f8dabd3cd5..00000000000 --- a/templates/elastic-query-generator/ingest.py +++ /dev/null @@ -1,23 +0,0 @@ -from elasticsearch import Elasticsearch - -# Setup Elasticsearch -# This shows how to set it up for a cloud hosted version - -# Password for the 'elastic' user generated by Elasticsearch -ELASTIC_PASSWORD = "..." - -# Found in the 'Manage Deployment' page -CLOUD_ID = "..." - -# Create the client instance -db = Elasticsearch(cloud_id=CLOUD_ID, basic_auth=("elastic", ELASTIC_PASSWORD)) - -customers = [ - {"firstname": "Jennifer", "lastname": "Walters"}, - {"firstname": "Monica", "lastname": "Rambeau"}, - {"firstname": "Carol", "lastname": "Danvers"}, - {"firstname": "Wanda", "lastname": "Maximoff"}, - {"firstname": "Jennifer", "lastname": "Takeda"}, -] -for i, customer in enumerate(customers): - db.create(index="customers", document=customer, id=i) diff --git a/templates/elastic-query-generator/main.py b/templates/elastic-query-generator/main.py deleted file mode 100644 index 4f848b6e88a..00000000000 --- a/templates/elastic-query-generator/main.py +++ /dev/null @@ -1,4 +0,0 @@ -from elastic_query_generator.chain import chain - -if __name__ == "__main__": - print(chain.invoke({"input": "how many customers named Carol"})) diff --git a/templates/elastic-query-generator/pyproject.toml b/templates/elastic-query-generator/pyproject.toml deleted file mode 100644 index 3067def0d78..00000000000 --- a/templates/elastic-query-generator/pyproject.toml +++ /dev/null @@ -1,31 +0,0 @@ -[tool.poetry] -name = "elastic-query-generator" -version = "0.0.1" -description = "Interact with Elasticsearch analytics databases using natural language" -authors = [] -readme = "README.md" - -[tool.poetry.dependencies] -python = ">=3.8.1,<4.0" -langchain = "^0.1" -elasticsearch = "^8.10.1" -openai = "<2" - -[tool.poetry.group.dev.dependencies] -langchain-cli = ">=0.0.21" - -[tool.langserve] -export_module = "elastic_query_generator" -export_attr = "chain" - -[tool.templates-hub] -use-case = "query" -author = "LangChain" -integrations = ["Elasticsearch", "OpenAI"] -tags = ["query-generation"] - -[build-system] -requires = [ - "poetry-core", -] -build-backend = "poetry.core.masonry.api" diff --git a/templates/elastic-query-generator/tests/__init__.py b/templates/elastic-query-generator/tests/__init__.py deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/templates/extraction-anthropic-functions/LICENSE b/templates/extraction-anthropic-functions/LICENSE deleted file mode 100644 index 426b6509034..00000000000 --- a/templates/extraction-anthropic-functions/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -MIT License - -Copyright (c) 2023 LangChain, Inc. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/templates/extraction-anthropic-functions/README.md b/templates/extraction-anthropic-functions/README.md deleted file mode 100644 index 76e3b2ff781..00000000000 --- a/templates/extraction-anthropic-functions/README.md +++ /dev/null @@ -1,74 +0,0 @@ -# Extraction - Anthropic functions - -This template enables [Anthropic function calling](https://python.langchain.com/docs/integrations/chat/anthropic_functions). - -This can be used for various tasks, such as extraction or tagging. - -The function output schema can be set in `chain.py`. - -## Environment Setup - -Set the `ANTHROPIC_API_KEY` environment variable to access the Anthropic models. - -## Usage - -To use this package, you should first have the LangChain CLI installed: - -```shell -pip install -U langchain-cli -``` - -To create a new LangChain project and install this as the only package, you can do: - -```shell -langchain app new my-app --package extraction-anthropic-functions -``` - -If you want to add this to an existing project, you can just run: - -```shell -langchain app add extraction-anthropic-functions -``` - -And add the following code to your `server.py` file: -```python -from extraction_anthropic_functions import chain as extraction_anthropic_functions_chain - -add_routes(app, extraction_anthropic_functions_chain, path="/extraction-anthropic-functions") -``` - -(Optional) Let's now configure LangSmith. -LangSmith will help us trace, monitor and debug LangChain applications. -You can sign up for LangSmith [here](https://smith.langchain.com/). -If you don't have access, you can skip this section - - -```shell -export LANGCHAIN_TRACING_V2=true -export LANGCHAIN_API_KEY= -export LANGCHAIN_PROJECT= # if not specified, defaults to "default" -``` - -If you are inside this directory, then you can spin up a LangServe instance directly by: - -```shell -langchain serve -``` - -This will start the FastAPI app with a server is running locally at -[http://localhost:8000](http://localhost:8000) - -We can see all templates at [http://127.0.0.1:8000/docs](http://127.0.0.1:8000/docs) -We can access the playground at [http://127.0.0.1:8000/extraction-anthropic-functions/playground](http://127.0.0.1:8000/extraction-anthropic-functions/playground) - -We can access the template from code with: - -```python -from langserve.client import RemoteRunnable - -runnable = RemoteRunnable("http://localhost:8000/extraction-anthropic-functions") -``` - -By default, the package will extract the title and author of papers from the information you specify in `chain.py`. This template will use `Claude2` by default. - ---- diff --git a/templates/extraction-anthropic-functions/extraction_anthropic_functions.ipynb b/templates/extraction-anthropic-functions/extraction_anthropic_functions.ipynb deleted file mode 100644 index a5a54004718..00000000000 --- a/templates/extraction-anthropic-functions/extraction_anthropic_functions.ipynb +++ /dev/null @@ -1,76 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "id": "4ae4b789", - "metadata": {}, - "source": [ - "## Document Loading\n", - "\n", - "Load a blog post on agents." - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "id": "5d6bd62e", - "metadata": {}, - "outputs": [], - "source": [ - "from langchain_community.document_loaders import WebBaseLoader\n", - "\n", - "loader = WebBaseLoader(\"https://lilianweng.github.io/posts/2023-06-23-agent/\")\n", - "text = loader.load()" - ] - }, - { - "cell_type": "markdown", - "id": "8e21575d", - "metadata": {}, - "source": [ - "## Run Template\n", - "\n", - "In `server.py`, set -\n", - "```\n", - "add_routes(app, chain_ext, path=\"/extraction-anthropic-functions\")\n", - "```" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "5fd794ec-a002-490e-8eb9-06ce3e6c2f14", - "metadata": {}, - "outputs": [], - "source": [ - "from langserve.client import RemoteRunnable\n", - "\n", - "anthropic_function_model = RemoteRunnable(\n", - " \"http://localhost:8001/extraction-anthropic-functions\"\n", - ")\n", - "anthropic_function_model.invoke(text[0].page_content[0:1500])" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "langserve", - "language": "python", - "name": "langserve" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.9.16" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/templates/extraction-anthropic-functions/extraction_anthropic_functions/__init__.py b/templates/extraction-anthropic-functions/extraction_anthropic_functions/__init__.py deleted file mode 100644 index 558da85cce7..00000000000 --- a/templates/extraction-anthropic-functions/extraction_anthropic_functions/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from extraction_anthropic_functions.chain import chain - -__all__ = ["chain"] diff --git a/templates/extraction-anthropic-functions/extraction_anthropic_functions/chain.py b/templates/extraction-anthropic-functions/extraction_anthropic_functions/chain.py deleted file mode 100644 index a77ceac540f..00000000000 --- a/templates/extraction-anthropic-functions/extraction_anthropic_functions/chain.py +++ /dev/null @@ -1,40 +0,0 @@ -from typing import List, Optional - -from langchain.utils.openai_functions import convert_pydantic_to_openai_function -from langchain_core.output_parsers.openai_functions import JsonKeyOutputFunctionsParser -from langchain_core.prompts import ChatPromptTemplate -from langchain_core.pydantic_v1 import BaseModel -from langchain_experimental.llms.anthropic_functions import AnthropicFunctions - -template = """A article will be passed to you. Extract from it all papers that are mentioned by this article. - -Do not extract the name of the article itself. If no papers are mentioned that's fine - you don't need to extract any! Just return an empty list. - -Do not make up or guess ANY extra information. Only extract what exactly is in the text.""" # noqa: E501 - -prompt = ChatPromptTemplate.from_messages([("system", template), ("human", "{input}")]) - - -# Function output schema -class Paper(BaseModel): - """Information about papers mentioned.""" - - title: str - author: Optional[str] - - -class Info(BaseModel): - """Information to extract""" - - papers: List[Paper] - - -# Function definition -model = AnthropicFunctions() -function = [convert_pydantic_to_openai_function(Info)] - -chain = ( - prompt - | model.bind(functions=function, function_call={"name": "Info"}) - | JsonKeyOutputFunctionsParser(key_name="papers") -) diff --git a/templates/extraction-anthropic-functions/pyproject.toml b/templates/extraction-anthropic-functions/pyproject.toml deleted file mode 100644 index bba32946ef7..00000000000 --- a/templates/extraction-anthropic-functions/pyproject.toml +++ /dev/null @@ -1,34 +0,0 @@ -[tool.poetry] -name = "extraction-anthropic-functions" -version = "0.1.0" -description = "Use Anthropic function calling for tasks like extraction or tagging" -authors = [ - "Lance Martin ", -] -readme = "README.md" - -[tool.poetry.dependencies] -python = ">=3.8.1,<4.0" -langchain = "^0.1" -anthropic = ">=0.5.0" -langchainhub = ">=0.1.13" -langchain-experimental = ">=0.0.54" - -[tool.poetry.group.dev.dependencies] -langchain-cli = ">=0.0.21" - -[tool.langserve] -export_module = "extraction_anthropic_functions" -export_attr = "chain" - -[tool.templates-hub] -use-case = "extraction" -author = "LangChain" -integrations = ["Anthropic", "Function Calling"] -tags = ["function-calling", "tagging", "extraction"] - -[build-system] -requires = [ - "poetry-core", -] -build-backend = "poetry.core.masonry.api" diff --git a/templates/extraction-anthropic-functions/tests/__init__.py b/templates/extraction-anthropic-functions/tests/__init__.py deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/templates/extraction-openai-functions/LICENSE b/templates/extraction-openai-functions/LICENSE deleted file mode 100644 index 426b6509034..00000000000 --- a/templates/extraction-openai-functions/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -MIT License - -Copyright (c) 2023 LangChain, Inc. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/templates/extraction-openai-functions/README.md b/templates/extraction-openai-functions/README.md deleted file mode 100644 index 286f87c9401..00000000000 --- a/templates/extraction-openai-functions/README.md +++ /dev/null @@ -1,71 +0,0 @@ -# Extraction - OpenAI functions - -This template uses [OpenAI function calling](https://python.langchain.com/docs/modules/chains/how_to/openai_functions) for extraction of structured output from unstructured input text. - -The extraction output schema can be set in `chain.py`. - -## Environment Setup - -Set the `OPENAI_API_KEY` environment variable to access the OpenAI models. - -## Usage - -To use this package, you should first have the LangChain CLI installed: - -```shell -pip install -U langchain-cli -``` - -To create a new LangChain project and install this as the only package, you can do: - -```shell -langchain app new my-app --package extraction-openai-functions -``` - -If you want to add this to an existing project, you can just run: - -```shell -langchain app add extraction-openai-functions -``` - -And add the following code to your `server.py` file: -```python -from extraction_openai_functions import chain as extraction_openai_functions_chain - -add_routes(app, extraction_openai_functions_chain, path="/extraction-openai-functions") -``` - -(Optional) Let's now configure LangSmith. -LangSmith will help us trace, monitor and debug LangChain applications. -You can sign up for LangSmith [here](https://smith.langchain.com/). -If you don't have access, you can skip this section - - -```shell -export LANGCHAIN_TRACING_V2=true -export LANGCHAIN_API_KEY= -export LANGCHAIN_PROJECT= # if not specified, defaults to "default" -``` - -If you are inside this directory, then you can spin up a LangServe instance directly by: - -```shell -langchain serve -``` - -This will start the FastAPI app with a server is running locally at -[http://localhost:8000](http://localhost:8000) - -We can see all templates at [http://127.0.0.1:8000/docs](http://127.0.0.1:8000/docs) -We can access the playground at [http://127.0.0.1:8000/extraction-openai-functions/playground](http://127.0.0.1:8000/extraction-openai-functions/playground) - -We can access the template from code with: - -```python -from langserve.client import RemoteRunnable - -runnable = RemoteRunnable("http://localhost:8000/extraction-openai-functions") -``` -By default, this package is set to extract the title and author of papers, as specified in the `chain.py` file. - -LLM is leveraged by the OpenAI function by default. diff --git a/templates/extraction-openai-functions/extraction_openai_functions.ipynb b/templates/extraction-openai-functions/extraction_openai_functions.ipynb deleted file mode 100644 index 98ee676a927..00000000000 --- a/templates/extraction-openai-functions/extraction_openai_functions.ipynb +++ /dev/null @@ -1,104 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "id": "16f2c32e", - "metadata": {}, - "source": [ - "## Document Loading\n", - "\n", - "Load a blog post on agents." - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "id": "c9fadce0", - "metadata": {}, - "outputs": [], - "source": [ - "from langchain_community.document_loaders import WebBaseLoader\n", - "\n", - "loader = WebBaseLoader(\"https://lilianweng.github.io/posts/2023-06-23-agent/\")\n", - "text = loader.load()" - ] - }, - { - "cell_type": "markdown", - "id": "4086be03", - "metadata": {}, - "source": [ - "## Run Template\n", - "\n", - "In `server.py`, set -\n", - "```\n", - "add_routes(app, chain_ext, path=\"/extraction_openai_functions\")\n", - "```" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "id": "ed507784", - "metadata": {}, - "outputs": [], - "source": [ - "from langserve.client import RemoteRunnable\n", - "\n", - "oai_function = RemoteRunnable(\"http://0.0.0.0:8001/extraction_openai_functions\")" - ] - }, - { - "cell_type": "markdown", - "id": "68046695", - "metadata": {}, - "source": [ - "The function wille extract paper titles and authors from an input." - ] - }, - { - "cell_type": "code", - "execution_count": 8, - "id": "6dace748", - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "[{'title': 'Chain of Thought', 'author': 'Wei et al. 2022'},\n", - " {'title': 'Tree of Thoughts', 'author': 'Yao et al. 2023'},\n", - " {'title': 'LLM+P', 'author': 'Liu et al. 2023'}]" - ] - }, - "execution_count": 8, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "oai_function.invoke({\"input\": text[0].page_content[0:4000]})" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "langserve", - "language": "python", - "name": "langserve" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.9.16" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/templates/extraction-openai-functions/extraction_openai_functions/__init__.py b/templates/extraction-openai-functions/extraction_openai_functions/__init__.py deleted file mode 100644 index d2350f5de2c..00000000000 --- a/templates/extraction-openai-functions/extraction_openai_functions/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from extraction_openai_functions.chain import chain - -__all__ = ["chain"] diff --git a/templates/extraction-openai-functions/extraction_openai_functions/chain.py b/templates/extraction-openai-functions/extraction_openai_functions/chain.py deleted file mode 100644 index 6624f3e6b7b..00000000000 --- a/templates/extraction-openai-functions/extraction_openai_functions/chain.py +++ /dev/null @@ -1,47 +0,0 @@ -import json -from typing import List, Optional - -from langchain.utils.openai_functions import convert_pydantic_to_openai_function -from langchain_community.chat_models import ChatOpenAI -from langchain_core.prompts import ChatPromptTemplate -from langchain_core.pydantic_v1 import BaseModel - -template = """A article will be passed to you. Extract from it all papers that are mentioned by this article. - -Do not extract the name of the article itself. If no papers are mentioned that's fine - you don't need to extract any! Just return an empty list. - -Do not make up or guess ANY extra information. Only extract what exactly is in the text.""" # noqa: E501 - -prompt = ChatPromptTemplate.from_messages([("system", template), ("human", "{input}")]) - - -# Function output schema -class Paper(BaseModel): - """Information about papers mentioned.""" - - title: str - author: Optional[str] - - -class Info(BaseModel): - """Information to extract""" - - papers: List[Paper] - - -# Function definition -model = ChatOpenAI() -function = [convert_pydantic_to_openai_function(Info)] -chain = ( - prompt - | model.bind(functions=function, function_call={"name": "Info"}) - | ( - lambda x: json.loads(x.additional_kwargs["function_call"]["arguments"])[ - "papers" - ] - ) -) - -# chain = prompt | model.bind( -# functions=function, function_call={"name": "Info"} -# ) | JsonKeyOutputFunctionsParser(key_name="papers") diff --git a/templates/extraction-openai-functions/pyproject.toml b/templates/extraction-openai-functions/pyproject.toml deleted file mode 100644 index 67014a54743..00000000000 --- a/templates/extraction-openai-functions/pyproject.toml +++ /dev/null @@ -1,32 +0,0 @@ -[tool.poetry] -name = "extraction-openai-functions" -version = "0.1.0" -description = "Use OpenAI function calling for tasks like extraction or tagging" -authors = [ - "Lance Martin ", -] -readme = "README.md" - -[tool.poetry.dependencies] -python = ">=3.8.1,<4.0" -langchain = "^0.1" -openai = "<2" - -[tool.poetry.group.dev.dependencies] -langchain-cli = ">=0.0.21" - -[tool.langserve] -export_module = "extraction_openai_functions" -export_attr = "chain" - -[tool.templates-hub] -use-case = "extraction" -author = "LangChain" -integrations = ["OpenAI", "Function Calling"] -tags = ["function-calling", "tagging", "extraction"] - -[build-system] -requires = [ - "poetry-core", -] -build-backend = "poetry.core.masonry.api" diff --git a/templates/extraction-openai-functions/tests/__init__.py b/templates/extraction-openai-functions/tests/__init__.py deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/templates/gemini-functions-agent/LICENSE b/templates/gemini-functions-agent/LICENSE deleted file mode 100644 index 426b6509034..00000000000 --- a/templates/gemini-functions-agent/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -MIT License - -Copyright (c) 2023 LangChain, Inc. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/templates/gemini-functions-agent/README.md b/templates/gemini-functions-agent/README.md deleted file mode 100644 index d7ed4ad8429..00000000000 --- a/templates/gemini-functions-agent/README.md +++ /dev/null @@ -1,73 +0,0 @@ -# Gemini functions - agent - -This template creates an agent that uses `Google Gemini function calling` to communicate its decisions on what actions to take. - -This example creates an agent that optionally looks up information on the internet using `Tavily's` search engine. - -[See an example LangSmith trace here](https://smith.langchain.com/public/0ebf1bd6-b048-4019-b4de-25efe8d3d18c/r) - -## Environment Setup - -The following environment variables need to be set: - -Set the `TAVILY_API_KEY` environment variable to access Tavily - -Set the `GOOGLE_API_KEY` environment variable to access the Google Gemini APIs. - -## Usage - -To use this package, you should first have the LangChain CLI installed: - -```shell -pip install -U langchain-cli -``` - -To create a new LangChain project and install this as the only package, you can do: - -```shell -langchain app new my-app --package gemini-functions-agent -``` - -If you want to add this to an existing project, you can just run: - -```shell -langchain app add gemini-functions-agent -``` - -And add the following code to your `server.py` file: -```python -from gemini_functions_agent import agent_executor as gemini_functions_agent_chain - -add_routes(app, gemini_functions_agent_chain, path="/openai-functions-agent") -``` - -(Optional) Let's now configure LangSmith. -LangSmith will help us trace, monitor and debug LangChain applications. -You can sign up for LangSmith [here](https://smith.langchain.com/). -If you don't have access, you can skip this section - -```shell -export LANGCHAIN_TRACING_V2=true -export LANGCHAIN_API_KEY= -export LANGCHAIN_PROJECT= # if not specified, defaults to "default" -``` - -If you are inside this directory, then you can spin up a LangServe instance directly by: - -```shell -langchain serve -``` - -This will start the FastAPI app with a server is running locally at -[http://localhost:8000](http://localhost:8000) - -We can see all templates at [http://127.0.0.1:8000/docs](http://127.0.0.1:8000/docs) -We can access the playground at [http://127.0.0.1:8000/gemini-functions-agent/playground](http://127.0.0.1:8000/gemini-functions-agent/playground) - -We can access the template from code with: - -```python -from langserve.client import RemoteRunnable - -runnable = RemoteRunnable("http://localhost:8000/gemini-functions-agent") -``` \ No newline at end of file diff --git a/templates/gemini-functions-agent/gemini_functions_agent/__init__.py b/templates/gemini-functions-agent/gemini_functions_agent/__init__.py deleted file mode 100644 index da671951902..00000000000 --- a/templates/gemini-functions-agent/gemini_functions_agent/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from gemini_functions_agent.agent import agent_executor - -__all__ = ["agent_executor"] diff --git a/templates/gemini-functions-agent/gemini_functions_agent/agent.py b/templates/gemini-functions-agent/gemini_functions_agent/agent.py deleted file mode 100644 index fb65f4a8acc..00000000000 --- a/templates/gemini-functions-agent/gemini_functions_agent/agent.py +++ /dev/null @@ -1,69 +0,0 @@ -from typing import List, Tuple - -from langchain.agents import AgentExecutor -from langchain.agents.format_scratchpad import format_to_openai_function_messages -from langchain.agents.output_parsers import OpenAIFunctionsAgentOutputParser -from langchain_community.tools.tavily_search import TavilySearchResults -from langchain_community.utilities.tavily_search import TavilySearchAPIWrapper -from langchain_core.messages import AIMessage, HumanMessage -from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder -from langchain_core.pydantic_v1 import BaseModel, Field -from langchain_google_genai import ChatGoogleGenerativeAI - -# Create the tool -search = TavilySearchAPIWrapper() -description = """"A search engine optimized for comprehensive, accurate, \ -and trusted results. Useful for when you need to answer questions \ -about current events or about recent information. \ -Input should be a search query. \ -If the user is asking about something that you don't know about, \ -you should probably use this tool to see if that can provide any information.""" -tavily_tool = TavilySearchResults(api_wrapper=search, description=description) - -tools = [tavily_tool] - -llm = ChatGoogleGenerativeAI(temperature=0, model="gemini-pro") - -prompt = ChatPromptTemplate.from_messages( - [ - MessagesPlaceholder(variable_name="chat_history"), - ("user", "{input}"), - MessagesPlaceholder(variable_name="agent_scratchpad"), - ] -) - -llm_with_tools = llm.bind(functions=tools) - - -def _format_chat_history(chat_history: List[Tuple[str, str]]): - buffer = [] - for human, ai in chat_history: - buffer.append(HumanMessage(content=human)) - buffer.append(AIMessage(content=ai)) - return buffer - - -agent = ( - { - "input": lambda x: x["input"], - "chat_history": lambda x: _format_chat_history(x["chat_history"]), - "agent_scratchpad": lambda x: format_to_openai_function_messages( - x["intermediate_steps"] - ), - } - | prompt - | llm_with_tools - | OpenAIFunctionsAgentOutputParser() -) - - -class AgentInput(BaseModel): - input: str - chat_history: List[Tuple[str, str]] = Field( - ..., extra={"widget": {"type": "chat", "input": "input", "output": "output"}} - ) - - -agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True).with_types( - input_type=AgentInput -) diff --git a/templates/gemini-functions-agent/pyproject.toml b/templates/gemini-functions-agent/pyproject.toml deleted file mode 100644 index e6410fe7878..00000000000 --- a/templates/gemini-functions-agent/pyproject.toml +++ /dev/null @@ -1,29 +0,0 @@ -[tool.poetry] -name = "gemini-functions-agent" -version = "0.1.0" -description = "Agent using Gemini function calling to execute functions, including search" -authors = ["Harrison Chase"] -readme = "README.md" - -[tool.poetry.dependencies] -python = ">=3.9,<4.0" -langchain = "^0.1" -tavily-python = "^0.1.9" -langchain-google-genai = ">=0.0.8,<0.1" - -[tool.poetry.group.dev.dependencies] -langchain-cli = ">=0.0.21" - -[tool.langserve] -export_module = "gemini_functions_agent" -export_attr = "agent_executor" - -[tool.templates-hub] -use-case = "research" -author = "LangChain" -integrations = ["Google", "Tavily"] -tags = ["search", "agents", "function-calling"] - -[build-system] -requires = ["poetry-core"] -build-backend = "poetry.core.masonry.api" diff --git a/templates/gemini-functions-agent/tests/__init__.py b/templates/gemini-functions-agent/tests/__init__.py deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/templates/guardrails-output-parser/LICENSE b/templates/guardrails-output-parser/LICENSE deleted file mode 100644 index 426b6509034..00000000000 --- a/templates/guardrails-output-parser/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -MIT License - -Copyright (c) 2023 LangChain, Inc. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/templates/guardrails-output-parser/README.md b/templates/guardrails-output-parser/README.md deleted file mode 100644 index 2d3ebac5ddb..00000000000 --- a/templates/guardrails-output-parser/README.md +++ /dev/null @@ -1,72 +0,0 @@ -# Guardrails - output parser - -This template uses [guardrails-ai](https://github.com/guardrails-ai/guardrails) to validate LLM output. - -The `GuardrailsOutputParser` is set in `chain.py`. - -The default example protects against profanity. - -## Environment Setup - -Set the `OPENAI_API_KEY` environment variable to access the OpenAI models. - -## Usage - -To use this package, you should first have the LangChain CLI installed: - -```shell -pip install -U langchain-cli -``` - -To create a new LangChain project and install this as the only package, you can do: - -```shell -langchain app new my-app --package guardrails-output-parser -``` - -If you want to add this to an existing project, you can just run: - -```shell -langchain app add guardrails-output-parser -``` - -And add the following code to your `server.py` file: -```python -from guardrails_output_parser.chain import chain as guardrails_output_parser_chain - -add_routes(app, guardrails_output_parser_chain, path="/guardrails-output-parser") -``` - -(Optional) Let's now configure LangSmith. -LangSmith will help us trace, monitor and debug LangChain applications. -You can sign up for LangSmith [here](https://smith.langchain.com/). -If you don't have access, you can skip this section - - -```shell -export LANGCHAIN_TRACING_V2=true -export LANGCHAIN_API_KEY= -export LANGCHAIN_PROJECT= # if not specified, defaults to "default" -``` - -If you are inside this directory, then you can spin up a LangServe instance directly by: - -```shell -langchain serve -``` - -This will start the FastAPI app with a server is running locally at -[http://localhost:8000](http://localhost:8000) - -We can see all templates at [http://127.0.0.1:8000/docs](http://127.0.0.1:8000/docs) -We can access the playground at [http://127.0.0.1:8000/guardrails-output-parser/playground](http://127.0.0.1:8000/guardrails-output-parser/playground) - -We can access the template from code with: - -```python -from langserve.client import RemoteRunnable - -runnable = RemoteRunnable("http://localhost:8000/guardrails-output-parser") -``` - -If Guardrails does not find any profanity, then the translated output is returned as is. If Guardrails does find profanity, then an empty string is returned. diff --git a/templates/guardrails-output-parser/guardrails_output_parser/__init__.py b/templates/guardrails-output-parser/guardrails_output_parser/__init__.py deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/templates/guardrails-output-parser/guardrails_output_parser/chain.py b/templates/guardrails-output-parser/guardrails_output_parser/chain.py deleted file mode 100644 index 0e26500850d..00000000000 --- a/templates/guardrails-output-parser/guardrails_output_parser/chain.py +++ /dev/null @@ -1,40 +0,0 @@ -from langchain.output_parsers import GuardrailsOutputParser -from langchain_community.llms import OpenAI -from langchain_core.prompts import PromptTemplate - -# Define rail string - -rail_str = """ - - - - - - - Translate the given statement into English: - - ${statement_to_be_translated} - - ${gr.complete_json_suffix} - - -""" - - -# Create the GuardrailsOutputParser object from the rail string -output_parser = GuardrailsOutputParser.from_rail_string(rail_str) - -# Define the prompt, model and chain -prompt = PromptTemplate( - template=output_parser.guard.prompt.escape(), - input_variables=output_parser.guard.prompt.variable_names, -) - -chain = prompt | OpenAI() | output_parser - -# This is needed because GuardrailsOutputParser does not have an inferrable type -chain = chain.with_types(output_type=dict) diff --git a/templates/guardrails-output-parser/pyproject.toml b/templates/guardrails-output-parser/pyproject.toml deleted file mode 100644 index 7f20b0f7c84..00000000000 --- a/templates/guardrails-output-parser/pyproject.toml +++ /dev/null @@ -1,32 +0,0 @@ -[tool.poetry] -name = "guardrails-output-parser" -version = "0.0.1" -description = "Use guardrails-ai to validate LLM output" -authors = [] -readme = "README.md" - -[tool.poetry.dependencies] -python = ">=3.9,<3.13" -langchain = "^0.1" -openai = "<2" -guardrails-ai = "^0.2.4" -alt-profanity-check = "^1.3.1" - -[tool.poetry.group.dev.dependencies] -langchain-cli = ">=0.0.21" -fastapi = ">=0.104.0,<1" -sse-starlette = "^1.6.5" - -[tool.langserve] -export_module = "guardrails_output_parser.chain" -export_attr = "chain" - -[tool.templates-hub] -use-case = "validation" -author = "LangChain" -integrations = ["Guardrails", "OpenAI"] -tags = ["moderation"] - -[build-system] -requires = ["poetry-core"] -build-backend = "poetry.core.masonry.api" diff --git a/templates/guardrails-output-parser/tests/__init__.py b/templates/guardrails-output-parser/tests/__init__.py deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/templates/hybrid-search-weaviate/LICENSE b/templates/hybrid-search-weaviate/LICENSE deleted file mode 100644 index d0af411b99a..00000000000 --- a/templates/hybrid-search-weaviate/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -MIT License - -Copyright (c) 2023 LangChain, Inc. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. \ No newline at end of file diff --git a/templates/hybrid-search-weaviate/README.md b/templates/hybrid-search-weaviate/README.md deleted file mode 100644 index 84950bc4083..00000000000 --- a/templates/hybrid-search-weaviate/README.md +++ /dev/null @@ -1,73 +0,0 @@ -# Hybrid search - Weaviate - -This template shows you how to use the hybrid search feature in `Weaviate` vector store. -Hybrid search combines multiple search algorithms to improve the accuracy and relevance of search results. - -`Weaviate` uses both sparse and dense vectors to represent the meaning and context of search queries and documents. -The results use a combination of `bm25` and `vector search ranking` to return the top results. - -## Configurations -Connect to your hosted Weaviate Vectorstore by setting a few env variables in `chain.py`: - -* `WEAVIATE_ENVIRONMENT` -* `WEAVIATE_API_KEY` - -You will also need to set your `OPENAI_API_KEY` to use the OpenAI models. - -## Get Started -To use this package, you should first have the LangChain CLI installed: - -```shell -pip install -U langchain-cli -``` - -To create a new LangChain project and install this as the only package, you can do: - -```shell -langchain app new my-app --package hybrid-search-weaviate -``` - -If you want to add this to an existing project, you can just run: - -```shell -langchain app add hybrid-search-weaviate -``` - -And add the following code to your `server.py` file: -```python -from hybrid_search_weaviate import chain as hybrid_search_weaviate_chain - -add_routes(app, hybrid_search_weaviate_chain, path="/hybrid-search-weaviate") -``` - -(Optional) Let's now configure LangSmith. -LangSmith will help us trace, monitor and debug LangChain applications. -You can sign up for LangSmith [here](https://smith.langchain.com/). -If you don't have access, you can skip this section - - -```shell -export LANGCHAIN_TRACING_V2=true -export LANGCHAIN_API_KEY= -export LANGCHAIN_PROJECT= # if not specified, defaults to "default" -``` - -If you are inside this directory, then you can spin up a LangServe instance directly by: - -```shell -langchain serve -``` - -This will start the FastAPI app with a server is running locally at -[http://localhost:8000](http://localhost:8000) - -We can see all templates at [http://127.0.0.1:8000/docs](http://127.0.0.1:8000/docs) -We can access the playground at [http://127.0.0.1:8000/hybrid-search-weaviate/playground](http://127.0.0.1:8000/hybrid-search-weaviate/playground) - -We can access the template from code with: - -```python -from langserve.client import RemoteRunnable - -runnable = RemoteRunnable("http://localhost:8000/hybrid-search-weaviate") -``` diff --git a/templates/hybrid-search-weaviate/hybrid_search_weaviate.ipynb b/templates/hybrid-search-weaviate/hybrid_search_weaviate.ipynb deleted file mode 100644 index 11416f01b10..00000000000 --- a/templates/hybrid-search-weaviate/hybrid_search_weaviate.ipynb +++ /dev/null @@ -1,57 +0,0 @@ -{ - "cells": [ - { - "attachments": {}, - "cell_type": "markdown", - "id": "8692a430", - "metadata": {}, - "source": [ - "# Run Template\n", - "\n", - "In `server.py`, set -\n", - "```\n", - "add_routes(app, chain_ext, path=\"/rag-weaviate\")\n", - "```" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "41db5e30", - "metadata": {}, - "outputs": [], - "source": [ - "from langserve.client import RemoteRunnable\n", - "\n", - "rag_app_weaviate = RemoteRunnable(\"http://localhost:8000/rag-weaviate\")\n", - "rag_app_weaviate.invoke(\"How does agent memory work?\")" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3.11.6 64-bit", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.11.6" - }, - "vscode": { - "interpreter": { - "hash": "aee8b7b246df8f9039afb4144a1f6fd8d2ca17a180786b69acc140d282b71a49" - } - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/templates/hybrid-search-weaviate/hybrid_search_weaviate/__init__.py b/templates/hybrid-search-weaviate/hybrid_search_weaviate/__init__.py deleted file mode 100644 index 1a99a5b3fe6..00000000000 --- a/templates/hybrid-search-weaviate/hybrid_search_weaviate/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from hybrid_search_weaviate.chain import chain - -__all__ = ["chain"] diff --git a/templates/hybrid-search-weaviate/hybrid_search_weaviate/chain.py b/templates/hybrid-search-weaviate/hybrid_search_weaviate/chain.py deleted file mode 100644 index 0e7b2c1cb6c..00000000000 --- a/templates/hybrid-search-weaviate/hybrid_search_weaviate/chain.py +++ /dev/null @@ -1,68 +0,0 @@ -import os - -import weaviate -from langchain.retrievers.weaviate_hybrid_search import WeaviateHybridSearchRetriever -from langchain_community.chat_models import ChatOpenAI -from langchain_core.output_parsers import StrOutputParser -from langchain_core.prompts import ChatPromptTemplate -from langchain_core.runnables import RunnableParallel, RunnablePassthrough - -# Check env vars -if os.environ.get("WEAVIATE_API_KEY", None) is None: - raise Exception("Missing `WEAVIATE_API_KEY` environment variable.") - -if os.environ.get("WEAVIATE_ENVIRONMENT", None) is None: - raise Exception("Missing `WEAVIATE_ENVIRONMENT` environment variable.") - -if os.environ.get("WEAVIATE_URL", None) is None: - raise Exception("Missing `WEAVIATE_URL` environment variable.") - -if os.environ.get("OPENAI_API_KEY", None) is None: - raise Exception("Missing `OPENAI_API_KEY` environment variable.") - -# Initialize the retriever -WEAVIATE_INDEX_NAME = os.environ.get("WEAVIATE_INDEX", "langchain-test") -WEAVIATE_URL = os.getenv("WEAVIATE_URL") -auth_client_secret = (weaviate.AuthApiKey(api_key=os.getenv("WEAVIATE_API_KEY")),) -client = weaviate.Client( - url=WEAVIATE_URL, - additional_headers={ - "X-Openai-Api-Key": os.getenv("OPENAI_API_KEY"), - }, -) -retriever = WeaviateHybridSearchRetriever( - client=client, - index_name=WEAVIATE_INDEX_NAME, - text_key="text", - attributes=[], - create_schema_if_missing=True, -) - -# # Ingest code - you may need to run this the first time -# # Load -# loader = WebBaseLoader("https://lilianweng.github.io/posts/2023-06-23-agent/") -# data = loader.load() -# -# # Split -# text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=0) -# all_splits = text_splitter.split_documents(data) -# -# # Add to vectorDB -# retriever.add_documents(all_splits) - - -# RAG prompt -template = """Answer the question based only on the following context: -{context} -Question: {question} -""" -prompt = ChatPromptTemplate.from_template(template) - -# RAG -model = ChatOpenAI() -chain = ( - RunnableParallel({"context": retriever, "question": RunnablePassthrough()}) - | prompt - | model - | StrOutputParser() -) diff --git a/templates/hybrid-search-weaviate/pyproject.toml b/templates/hybrid-search-weaviate/pyproject.toml deleted file mode 100644 index 73e580e2084..00000000000 --- a/templates/hybrid-search-weaviate/pyproject.toml +++ /dev/null @@ -1,37 +0,0 @@ -[tool.poetry] -name = "hybrid-search-weaviate" -version = "0.1.0" -description = "Improve accuracy and relevance with Weaviate hybrid search" -authors = ["Erika Cardenas "] -readme = "README.md" - -[tool.poetry.dependencies] -python = ">=3.8.1,<4.0" -langchain = "^0.1" -openai = "<2" -tiktoken = "^0.5.1" -weaviate-client = ">=3.24.2" - -[tool.poetry.group.dev.dependencies] -langchain-cli = ">=0.0.21" -[tool.poetry.group.dev.dependencies.python-dotenv] -extras = [ - "cli", -] -version = "^1.0.0" - -[tool.langserve] -export_module = "hybrid_search_weaviate" -export_attr = "chain" - -[tool.templates-hub] -use-case = "search" -author = "Weaviate" -integrations = ["Weaviate", "OpenAI"] -tags = ["hybrid-search", "vectordb"] - -[build-system] -requires = [ - "poetry-core", -] -build-backend = "poetry.core.masonry.api" diff --git a/templates/hybrid-search-weaviate/tests/__init__.py b/templates/hybrid-search-weaviate/tests/__init__.py deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/templates/hyde/LICENSE b/templates/hyde/LICENSE deleted file mode 100644 index 426b6509034..00000000000 --- a/templates/hyde/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -MIT License - -Copyright (c) 2023 LangChain, Inc. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/templates/hyde/README.md b/templates/hyde/README.md deleted file mode 100644 index 1fffd8d408b..00000000000 --- a/templates/hyde/README.md +++ /dev/null @@ -1,75 +0,0 @@ -# Hypothetical Document Embeddings (HyDE) - -This template uses `HyDE` with RAG. - -`Hyde` is a retrieval method that stands for `Hypothetical Document Embeddings`. It is a method used to enhance retrieval by generating a hypothetical document for an incoming query. - -The document is then embedded, and that embedding is utilized to look up real documents that are similar to the hypothetical document. - -The underlying concept is that the hypothetical document may be closer in the embedding space than the query. - -For a more detailed description, see the[Precise Zero-Shot Dense Retrieval without Relevance Labels](https://arxiv.org/abs/2212.10496) paper. - -## Environment Setup - -Set the `OPENAI_API_KEY` environment variable to access the OpenAI models. - -## Usage - -To use this package, you should first have the LangChain CLI installed: - -```shell -pip install -U langchain-cli -``` - -To create a new LangChain project and install this as the only package, you can do: - -```shell -langchain app new my-app --package hyde -``` - -If you want to add this to an existing project, you can just run: - -```shell -langchain app add hyde -``` - -And add the following code to your `server.py` file: -```python -from hyde.chain import chain as hyde_chain - -add_routes(app, hyde_chain, path="/hyde") -``` - -(Optional) Let's now configure LangSmith. -LangSmith will help us trace, monitor and debug LangChain applications. -You can sign up for LangSmith [here](https://smith.langchain.com/). -If you don't have access, you can skip this section - - -```shell -export LANGCHAIN_TRACING_V2=true -export LANGCHAIN_API_KEY= -export LANGCHAIN_PROJECT= # if not specified, defaults to "default" -``` - -If you are inside this directory, then you can spin up a LangServe instance directly by: - -```shell -langchain serve -``` - -This will start the FastAPI app with a server is running locally at -[http://localhost:8000](http://localhost:8000) - -We can see all templates at [http://127.0.0.1:8000/docs](http://127.0.0.1:8000/docs) -We can access the playground at [http://127.0.0.1:8000/hyde/playground](http://127.0.0.1:8000/hyde/playground) - -We can access the template from code with: - -```python -from langserve.client import RemoteRunnable - -runnable = RemoteRunnable("http://localhost:8000/hyde") -``` - diff --git a/templates/hyde/hyde/__init__.py b/templates/hyde/hyde/__init__.py deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/templates/hyde/hyde/chain.py b/templates/hyde/hyde/chain.py deleted file mode 100644 index 1f24323fe4b..00000000000 --- a/templates/hyde/hyde/chain.py +++ /dev/null @@ -1,75 +0,0 @@ -from langchain_chroma import Chroma -from langchain_community.chat_models import ChatOpenAI -from langchain_community.embeddings import OpenAIEmbeddings -from langchain_core.output_parsers import StrOutputParser -from langchain_core.prompts import ChatPromptTemplate -from langchain_core.pydantic_v1 import BaseModel -from langchain_core.runnables import RunnableParallel - -from hyde.prompts import hyde_prompt - -# Example for document loading (from url), splitting, and creating vectostore - -""" -# Load -from langchain_community.document_loaders import WebBaseLoader -loader = WebBaseLoader("https://lilianweng.github.io/posts/2023-06-23-agent/") -data = loader.load() - -# Split -from langchain_text_splitters import RecursiveCharacterTextSplitter -text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=0) -all_splits = text_splitter.split_documents(data) - -# Add to vectorDB -vectorstore = Chroma.from_documents(documents=all_splits, - collection_name="rag-chroma", - embedding=OpenAIEmbeddings(), - ) -retriever = vectorstore.as_retriever() -""" - -# Embed a single document as a test -vectorstore = Chroma.from_texts( - ["harrison worked at kensho"], - collection_name="rag-chroma", - embedding=OpenAIEmbeddings(), -) -retriever = vectorstore.as_retriever() - -# RAG prompt -template = """Answer the question based only on the following context: -{context} - -Question: {question} -""" -prompt = ChatPromptTemplate.from_template(template) - -# LLM -model = ChatOpenAI() - -# Query transformation chain -# This transforms the query into the hypothetical document -hyde_chain = hyde_prompt | model | StrOutputParser() - -# RAG chain -chain = ( - RunnableParallel( - { - # Generate a hypothetical document and then pass it to the retriever - "context": hyde_chain | retriever, - "question": lambda x: x["question"], - } - ) - | prompt - | model - | StrOutputParser() -) - - -# Add input types for playground -class ChainInput(BaseModel): - question: str - - -chain = chain.with_types(input_type=ChainInput) diff --git a/templates/hyde/hyde/prompts.py b/templates/hyde/hyde/prompts.py deleted file mode 100644 index 70cc2aac14e..00000000000 --- a/templates/hyde/hyde/prompts.py +++ /dev/null @@ -1,19 +0,0 @@ -from langchain_core.prompts.prompt import PromptTemplate - -# There are a few different templates to choose from -# These are just different ways to generate hypothetical documents -web_search_template = """Please write a passage to answer the question -Question: {question} -Passage:""" -sci_fact_template = """Please write a scientific paper passage to support/refute the claim -Claim: {question} -Passage:""" # noqa: E501 -fiqa_template = """Please write a financial article passage to answer the question -Question: {question} -Passage:""" -trec_news_template = """Please write a news passage about the topic. -Topic: {question} -Passage:""" - -# For the sake of this example we will use the web search template -hyde_prompt = PromptTemplate.from_template(web_search_template) diff --git a/templates/hyde/pyproject.toml b/templates/hyde/pyproject.toml deleted file mode 100644 index fe57dd4f7c9..00000000000 --- a/templates/hyde/pyproject.toml +++ /dev/null @@ -1,41 +0,0 @@ -[tool.poetry] -name = "hyde" -version = "0.0.1" -description = "Retrieval with Hypothetical Document Embeddings (HyDE)" -authors = [] -readme = "README.md" - -[tool.poetry.dependencies] -python = ">=3.8.1,<4.0" -langchain = "^0.1" -openai = "<2" -langchain-chroma = "^0.1.2" -tiktoken = "^0.5.1" -langchain-text-splitters = ">=0.0.1,<0.1" - -[tool.poetry.group.dev.dependencies] -poethepoet = "^0.24.1" -langchain-cli = ">=0.0.21" -fastapi = ">=0.104.0,<1" -sse-starlette = "^1.6.5" - -[tool.langserve] -export_module = "hyde.chain" -export_attr = "chain" - -[tool.templates-hub] -use-case = "rag" -author = "LangChain" -integrations = ["OpenAI", "ChromaDB"] -tags = ["paper"] - -[tool.poe.tasks.start] -cmd = "uvicorn langchain_cli.dev_scripts:create_demo_server --reload --port $port --host $host" -args = [ - { name = "port", help = "port to run on", default = "8000" }, - { name = "host", help = "host to run on", default = "127.0.0.1" }, -] - -[build-system] -requires = ["poetry-core"] -build-backend = "poetry.core.masonry.api" diff --git a/templates/hyde/tests/__init__.py b/templates/hyde/tests/__init__.py deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/templates/intel-rag-xeon/README.md b/templates/intel-rag-xeon/README.md deleted file mode 100644 index cb8b5d9b90f..00000000000 --- a/templates/intel-rag-xeon/README.md +++ /dev/null @@ -1,99 +0,0 @@ -# RAG - Intel Xeon - -This template performs RAG using `Chroma` and `Hugging Face Text Generation Inference` -on `Intel® Xeon® Scalable` Processors. -`Intel® Xeon® Scalable` processors feature built-in accelerators for more performance-per-core and unmatched AI performance, with advanced security technologies for the most in-demand workload requirements—all while offering the greatest cloud choice and application portability, please check [Intel® Xeon® Scalable Processors](https://www.intel.com/content/www/us/en/products/details/processors/xeon/scalable.html). - -## Environment Setup -To use [🤗 text-generation-inference](https://github.com/huggingface/text-generation-inference) on Intel® Xeon® Scalable Processors, please follow these steps: - - -### Launch a local server instance on Intel Xeon Server: -```bash -model=Intel/neural-chat-7b-v3-3 -volume=$PWD/data # share a volume with the Docker container to avoid downloading weights every run - -docker run --shm-size 1g -p 8080:80 -v $volume:/data ghcr.io/huggingface/text-generation-inference:1.4 --model-id $model -``` - -For gated models such as `LLAMA-2`, you will have to pass -e HUGGING_FACE_HUB_TOKEN=\ to the docker run command above with a valid Hugging Face Hub read token. - -Please follow this link [huggingface token](https://huggingface.co/docs/hub/security-tokens) to get the access token ans export `HUGGINGFACEHUB_API_TOKEN` environment with the token. - -```bash -export HUGGINGFACEHUB_API_TOKEN= -``` - -Send a request to check if the endpoint is working: - -```bash -curl localhost:8080/generate -X POST -d '{"inputs":"Which NFL team won the Super Bowl in the 2010 season?","parameters":{"max_new_tokens":128, "do_sample": true}}' -H 'Content-Type: application/json' -``` - -More details please refer to [text-generation-inference](https://github.com/huggingface/text-generation-inference). - - -## Populating with data - -If you want to populate the DB with some example data, you can run the below commands: -```shell -poetry install -poetry run python ingest.py -``` - -The script process and stores sections from Edgar 10k filings data for Nike `nke-10k-2023.pdf` into a Chroma database. - -## Usage - -To use this package, you should first have the LangChain CLI installed: - -```shell -pip install -U langchain-cli -``` - -To create a new LangChain project and install this as the only package, you can do: - -```shell -langchain app new my-app --package intel-rag-xeon -``` - -If you want to add this to an existing project, you can just run: - -```shell -langchain app add intel-rag-xeon -``` - -And add the following code to your `server.py` file: -```python -from intel_rag_xeon import chain as xeon_rag_chain - -add_routes(app, xeon_rag_chain, path="/intel-rag-xeon") -``` - -(Optional) Let's now configure LangSmith. LangSmith will help us trace, monitor and debug LangChain applications. You can sign up for LangSmith [here](https://smith.langchain.com/). If you don't have access, you can skip this section - -```shell -export LANGCHAIN_TRACING_V2=true -export LANGCHAIN_API_KEY= -export LANGCHAIN_PROJECT= # if not specified, defaults to "default" -``` - -If you are inside this directory, then you can spin up a LangServe instance directly by: - -```shell -langchain serve -``` - -This will start the FastAPI app with a server is running locally at -[http://localhost:8000](http://localhost:8000) - -We can see all templates at [http://127.0.0.1:8000/docs](http://127.0.0.1:8000/docs) -We can access the playground at [http://127.0.0.1:8000/intel-rag-xeon/playground](http://127.0.0.1:8000/intel-rag-xeon/playground) - -We can access the template from code with: - -```python -from langserve.client import RemoteRunnable - -runnable = RemoteRunnable("http://localhost:8000/intel-rag-xeon") -``` diff --git a/templates/intel-rag-xeon/data/nke-10k-2023.pdf b/templates/intel-rag-xeon/data/nke-10k-2023.pdf deleted file mode 100644 index 6ade8863e80..00000000000 Binary files a/templates/intel-rag-xeon/data/nke-10k-2023.pdf and /dev/null differ diff --git a/templates/intel-rag-xeon/ingest.py b/templates/intel-rag-xeon/ingest.py deleted file mode 100644 index 1d293630957..00000000000 --- a/templates/intel-rag-xeon/ingest.py +++ /dev/null @@ -1,49 +0,0 @@ -import os - -from langchain.text_splitter import RecursiveCharacterTextSplitter -from langchain_chroma import Chroma -from langchain_community.document_loaders import UnstructuredFileLoader -from langchain_community.embeddings import HuggingFaceEmbeddings -from langchain_core.documents import Document - - -def ingest_documents(): - """ - Ingest PDF to Redis from the data/ directory that - contains Edgar 10k filings data for Nike. - """ - # Load list of pdfs - data_path = "data/" - doc = [os.path.join(data_path, file) for file in os.listdir(data_path)][0] - - print("Parsing 10k filing doc for NIKE", doc) - - text_splitter = RecursiveCharacterTextSplitter( - chunk_size=1500, chunk_overlap=100, add_start_index=True - ) - loader = UnstructuredFileLoader(doc, mode="single", strategy="fast") - chunks = loader.load_and_split(text_splitter) - - print("Done preprocessing. Created", len(chunks), "chunks of the original pdf") - - # Create vectorstore - embedder = HuggingFaceEmbeddings( - model_name="sentence-transformers/all-MiniLM-L6-v2" - ) - - documents = [] - for chunk in chunks: - doc = Document(page_content=chunk.page_content, metadata=chunk.metadata) - documents.append(doc) - - # Add to vectorDB - _ = Chroma.from_documents( - documents=documents, - collection_name="xeon-rag", - embedding=embedder, - persist_directory="/tmp/xeon_rag_db", - ) - - -if __name__ == "__main__": - ingest_documents() diff --git a/templates/intel-rag-xeon/intel_rag_xeon.ipynb b/templates/intel-rag-xeon/intel_rag_xeon.ipynb deleted file mode 100644 index 29cf2b45cde..00000000000 --- a/templates/intel-rag-xeon/intel_rag_xeon.ipynb +++ /dev/null @@ -1,62 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "id": "681a5d1e", - "metadata": {}, - "source": [ - "## Connect to RAG App\n", - "\n", - "Assuming you are already running this server:\n", - "```bash\n", - "langserve start\n", - "```" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "d774be2a", - "metadata": {}, - "outputs": [], - "source": [ - "from langserve.client import RemoteRunnable\n", - "\n", - "gaudi_rag = RemoteRunnable(\"http://localhost:8000/intel-rag-xeon\")\n", - "\n", - "print(gaudi_rag.invoke(\"What was Nike's revenue in 2023?\"))" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "07ae0005", - "metadata": {}, - "outputs": [], - "source": [ - "print(gaudi_rag.invoke(\"How many employees work at Nike?\"))" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.10.6" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/templates/intel-rag-xeon/intel_rag_xeon/__init__.py b/templates/intel-rag-xeon/intel_rag_xeon/__init__.py deleted file mode 100644 index 16f967a4383..00000000000 --- a/templates/intel-rag-xeon/intel_rag_xeon/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from intel_rag_xeon.chain import chain - -__all__ = ["chain"] diff --git a/templates/intel-rag-xeon/intel_rag_xeon/chain.py b/templates/intel-rag-xeon/intel_rag_xeon/chain.py deleted file mode 100644 index 0312003136f..00000000000 --- a/templates/intel-rag-xeon/intel_rag_xeon/chain.py +++ /dev/null @@ -1,72 +0,0 @@ -from langchain.callbacks import streaming_stdout -from langchain_chroma import Chroma -from langchain_community.embeddings import HuggingFaceEmbeddings -from langchain_community.llms import HuggingFaceEndpoint -from langchain_core.output_parsers import StrOutputParser -from langchain_core.prompts import ChatPromptTemplate -from langchain_core.pydantic_v1 import BaseModel -from langchain_core.runnables import RunnableParallel, RunnablePassthrough -from langchain_core.vectorstores import VectorStoreRetriever - - -# Make this look better in the docs. -class Question(BaseModel): - __root__: str - - -# Init Embeddings -embedder = HuggingFaceEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2") - -knowledge_base = Chroma( - persist_directory="/tmp/xeon_rag_db", - embedding_function=embedder, - collection_name="xeon-rag", -) -query = "What was Nike's revenue in 2023?" -docs = knowledge_base.similarity_search(query) -print(docs[0].page_content) -retriever = VectorStoreRetriever( - vectorstore=knowledge_base, search_type="mmr", search_kwargs={"k": 1, "fetch_k": 5} -) - -# Define our prompt -template = """ -Use the following pieces of context from retrieved -dataset to answer the question. Do not make up an answer if there is no -context provided to help answer it. - -Context: ---------- -{context} - ---------- -Question: {question} ---------- - -Answer: -""" - - -prompt = ChatPromptTemplate.from_template(template) - - -ENDPOINT_URL = "http://localhost:8080" -callbacks = [streaming_stdout.StreamingStdOutCallbackHandler()] -model = HuggingFaceEndpoint( - endpoint_url=ENDPOINT_URL, - max_new_tokens=512, - top_k=10, - top_p=0.95, - typical_p=0.95, - temperature=0.01, - repetition_penalty=1.03, - streaming=True, -) - -# RAG Chain -chain = ( - RunnableParallel({"context": retriever, "question": RunnablePassthrough()}) - | prompt - | model - | StrOutputParser() -).with_types(input_type=Question) diff --git a/templates/intel-rag-xeon/pyproject.toml b/templates/intel-rag-xeon/pyproject.toml deleted file mode 100644 index a753ed375ae..00000000000 --- a/templates/intel-rag-xeon/pyproject.toml +++ /dev/null @@ -1,51 +0,0 @@ -[tool.poetry] -name = "intel-rag-xeon" -version = "0.0.1" -description = "Run a RAG app on Intel Xeon Scalable Processors" -authors = [ - "Liang Lv ", -] -readme = "README.md" - -[tool.poetry.dependencies] -python = ">=3.9,<3.13" -langchain = "^0.1" -fastapi = "^0.104.0" -sse-starlette = "^1.6.5" -sentence-transformers = "2.2.2" -tiktoken = ">=0.5.1" -langchain-chroma = "^0.1.2" -beautifulsoup4 = ">=4.12.2" - -[tool.poetry.dependencies.unstructured] -version = "^0.10.27" -extras = [ - "pdf", -] - -[tool.poetry.group.dev.dependencies] -poethepoet = "^0.24.1" -langchain-cli = ">=0.0.21" - -[tool.langserve] -export_module = "intel_rag_xeon.chain" -export_attr = "chain" - -[tool.templates-hub] -use-case = "rag" -author = "Intel" -integrations = ["Intel", "HuggingFace"] -tags = ["vectordbs"] - -[tool.poe.tasks.start] -cmd = "uvicorn langchain_cli.dev_scripts:create_demo_server --reload --port $port --host $host" -args = [ - { name = "port", help = "port to run on", default = "8000" }, - { name = "host", help = "host to run on", default = "127.0.0.1" }, -] - -[build-system] -requires = [ - "poetry-core", -] -build-backend = "poetry.core.masonry.api" \ No newline at end of file diff --git a/templates/intel-rag-xeon/tests/__init__.py b/templates/intel-rag-xeon/tests/__init__.py deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/templates/llama2-functions/README.md b/templates/llama2-functions/README.md deleted file mode 100644 index 8dcc5a2f164..00000000000 --- a/templates/llama2-functions/README.md +++ /dev/null @@ -1,69 +0,0 @@ -# Llama.cpp functions - -This template performs extraction of structured data from unstructured data using [Llama.cpp package with the LLaMA2 model that supports a specified JSON output schema](https://github.com/ggerganov/llama.cpp/blob/master/grammars/README.md). - -The extraction schema can be set in `chain.py`. - -## Environment Setup - -This will use a [LLaMA2-13b model hosted by Replicate](https://replicate.com/andreasjansson/llama-2-13b-chat-gguf/versions). - -Ensure that `REPLICATE_API_TOKEN` is set in your environment. - -## Usage - -To use this package, you should first have the LangChain CLI installed: - -```shell -pip install -U langchain-cli -``` - -To create a new LangChain project and install this as the only package, you can do: - -```shell -langchain app new my-app --package llama2-functions -``` - -If you want to add this to an existing project, you can just run: - -```shell -langchain app add llama2-functions -``` - -And add the following code to your `server.py` file: -```python -from llama2_functions import chain as llama2_functions_chain - -add_routes(app, llama2_functions_chain, path="/llama2-functions") -``` - -(Optional) Let's now configure LangSmith. -LangSmith will help us trace, monitor and debug LangChain applications. -You can sign up for LangSmith [here](https://smith.langchain.com/). -If you don't have access, you can skip this section - -```shell -export LANGCHAIN_TRACING_V2=true -export LANGCHAIN_API_KEY= -export LANGCHAIN_PROJECT= # if not specified, defaults to "default" -``` - -If you are inside this directory, then you can spin up a LangServe instance directly by: - -```shell -langchain serve -``` - -This will start the FastAPI app with a server is running locally at -[http://localhost:8000](http://localhost:8000) - -We can see all templates at [http://127.0.0.1:8000/docs](http://127.0.0.1:8000/docs) -We can access the playground at [http://127.0.0.1:8000/llama2-functions/playground](http://127.0.0.1:8000/llama2-functions/playground) - -We can access the template from code with: - -```python -from langserve.client import RemoteRunnable - -runnable = RemoteRunnable("http://localhost:8000/llama2-functions") -``` diff --git a/templates/llama2-functions/llama2-functions.ipynb b/templates/llama2-functions/llama2-functions.ipynb deleted file mode 100644 index 411f4bd241f..00000000000 --- a/templates/llama2-functions/llama2-functions.ipynb +++ /dev/null @@ -1,52 +0,0 @@ -{ - "cells": [ - { - "attachments": {}, - "cell_type": "markdown", - "id": "9faf648c-541e-4368-82a8-96287dbf34de", - "metadata": {}, - "source": [ - "## Run Template\n", - "\n", - "In `server.py`, set -\n", - "```\n", - "add_routes(app, chain_ext, path=\"/llama2_functions\")\n", - "```" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "2dfe28bb-6112-459b-a77d-013964b65409", - "metadata": {}, - "outputs": [], - "source": [ - "from langserve.client import RemoteRunnable\n", - "\n", - "llama2_function = RemoteRunnable(\"http://0.0.0.0:8001/llama2_functions\")\n", - "llama2_function.invoke({\"question\": \"How does agent memory work?\"})" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.9.16" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/templates/llama2-functions/llama2_functions/__init__.py b/templates/llama2-functions/llama2_functions/__init__.py deleted file mode 100644 index 6b64bab82b9..00000000000 --- a/templates/llama2-functions/llama2_functions/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from llama2_functions.chain import chain - -__all__ = ["chain"] diff --git a/templates/llama2-functions/llama2_functions/chain.py b/templates/llama2-functions/llama2_functions/chain.py deleted file mode 100644 index a7580394d99..00000000000 --- a/templates/llama2-functions/llama2_functions/chain.py +++ /dev/null @@ -1,42 +0,0 @@ -from langchain_community.llms import Replicate -from langchain_core.prompts import ChatPromptTemplate - -# LLM -replicate_id = "andreasjansson/llama-2-13b-chat-gguf:60ec5dda9ff9ee0b6f786c9d1157842e6ab3cc931139ad98fe99e08a35c5d4d4" # noqa: E501 -model = Replicate( - model=replicate_id, - model_kwargs={"temperature": 0.8, "max_length": 500, "top_p": 0.95}, -) - -# Prompt with output schema specification -template = """You are an AI language model assistant. Your task is to generate 3 different versions of the given user / - question to retrieve relevant documents from a vector database. By generating multiple perspectives on the user / - question, your goal is to help the user overcome some of the limitations of distance-based similarity search. / - Respond with json that adheres to the following jsonschema: -{{ - "$schema": "http://json-schema.org/draft-07/schema#", - "type": "object", - "properties": {{ - "question_1": {{ - "type": "string", - "description": "First version of the user question." - }}, - "question_2": {{ - "type": "string", - "description": "Second version of the user question." - }}, - "question_3": {{ - "type": "string", - "description": "Third version of the user question." - }} - }}, - "required": ["question_1","question_2","question_3"], - "additionalProperties": false -}}""" # noqa: E501 - -prompt = ChatPromptTemplate.from_messages( - [("system", template), ("human", "{question}")] -) - -# Chain -chain = prompt | model diff --git a/templates/llama2-functions/pyproject.toml b/templates/llama2-functions/pyproject.toml deleted file mode 100644 index 1c7b413b293..00000000000 --- a/templates/llama2-functions/pyproject.toml +++ /dev/null @@ -1,32 +0,0 @@ -[tool.poetry] -name = "llama2-functions" -version = "0.1.0" -description = "Extraction with a JSON-output LLaMA2 model" -authors = [ - "Lance Martin ", -] -readme = "README.md" - -[tool.poetry.dependencies] -python = ">=3.8.1,<4.0" -langchain = "^0.1" -replicate = ">=0.15.4" - -[tool.poetry.group.dev.dependencies] -langchain-cli = ">=0.0.21" - -[tool.langserve] -export_module = "llama2_functions" -export_attr = "chain" - -[tool.templates-hub] -use-case = "extraction" -author = "LangChain" -integrations = ["Llama.cpp", "Replicate"] -tags = ["local-llm", "function-calling"] - -[build-system] -requires = [ - "poetry-core", -] -build-backend = "poetry.core.masonry.api" diff --git a/templates/llama2-functions/tests/__init__.py b/templates/llama2-functions/tests/__init__.py deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/templates/mongo-parent-document-retrieval/LICENSE b/templates/mongo-parent-document-retrieval/LICENSE deleted file mode 100644 index 426b6509034..00000000000 --- a/templates/mongo-parent-document-retrieval/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -MIT License - -Copyright (c) 2023 LangChain, Inc. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/templates/mongo-parent-document-retrieval/README.md b/templates/mongo-parent-document-retrieval/README.md deleted file mode 100644 index 2a843d08f91..00000000000 --- a/templates/mongo-parent-document-retrieval/README.md +++ /dev/null @@ -1,178 +0,0 @@ -# MongoDB - Parent-Document Retrieval RAG - -This template performs RAG using `MongoDB` and `OpenAI`. -It does a more advanced form of RAG called `Parent-Document Retrieval`. - -In this form of retrieval, a large document is first split into medium-sized chunks. -From there, those medium size chunks are split into small chunks. -Embeddings are created for the small chunks. -When a query comes in, an embedding is created for that query and compared to the small chunks. -But rather than passing the small chunks directly to the LLM for generation, the medium-sized chunks -from where the smaller chunks came are passed. -This helps enable finer-grained search, but then passing of larger context (which can be useful during generation). - -## Environment Setup - -You should export two environment variables, one being your MongoDB URI, the other being your OpenAI API KEY. -If you do not have a MongoDB URI, see the `Setup Mongo` section at the bottom for instructions on how to do so. - -```shell -export MONGO_URI=... -export OPENAI_API_KEY=... -``` - -## Usage - -To use this package, you should first have the LangChain CLI installed: - -```shell -pip install -U langchain-cli -``` - -To create a new LangChain project and install this as the only package, you can do: - -```shell -langchain app new my-app --package mongo-parent-document-retrieval -``` - -If you want to add this to an existing project, you can just run: - -```shell -langchain app add mongo-parent-document-retrieval -``` - -And add the following code to your `server.py` file: -```python -from mongo_parent_document_retrieval import chain as mongo_parent_document_retrieval_chain - -add_routes(app, mongo_parent_document_retrieval_chain, path="/mongo-parent-document-retrieval") -``` - -(Optional) Let's now configure LangSmith. -LangSmith will help us trace, monitor and debug LangChain applications. -You can sign up for LangSmith [here](https://smith.langchain.com/). -If you don't have access, you can skip this section - - -```shell -export LANGCHAIN_TRACING_V2=true -export LANGCHAIN_API_KEY= -export LANGCHAIN_PROJECT= # if not specified, defaults to "default" -``` - -If you DO NOT already have a Mongo Search Index you want to connect to, see `MongoDB Setup` section below before proceeding. -Note that because Parent Document Retrieval uses a different indexing strategy, it's likely you will want to run this new setup. - -If you DO have a MongoDB Search index you want to connect to, edit the connection details in `mongo_parent_document_retrieval/chain.py` - -If you are inside this directory, then you can spin up a LangServe instance directly by: - -```shell -langchain serve -``` - -This will start the FastAPI app with a server is running locally at -[http://localhost:8000](http://localhost:8000) - -We can see all templates at [http://127.0.0.1:8000/docs](http://127.0.0.1:8000/docs) -We can access the playground at [http://127.0.0.1:8000/mongo-parent-document-retrieval/playground](http://127.0.0.1:8000/mongo-parent-document-retrieval/playground) - -We can access the template from code with: - -```python -from langserve.client import RemoteRunnable - -runnable = RemoteRunnable("http://localhost:8000/mongo-parent-document-retrieval") -``` - -For additional context, please refer to [this notebook](https://colab.research.google.com/drive/1cr2HBAHyBmwKUerJq2if0JaNhy-hIq7I#scrollTo=TZp7_CBfxTOB). - - -## MongoDB Setup - -Use this step if you need to setup your MongoDB account and ingest data. -We will first follow the standard MongoDB Atlas setup instructions [here](https://www.mongodb.com/docs/atlas/getting-started/). - -1. Create an account (if not already done) -2. Create a new project (if not already done) -3. Locate your MongoDB URI. - -This can be done by going to the deployment overview page and connecting to you database - -![Screenshot highlighting the 'Connect' button in MongoDB Atlas.](_images/connect.png) "MongoDB Atlas Connect Button" - -We then look at the drivers available - -![Screenshot showing the MongoDB Atlas drivers section for connecting to the database.](_images/driver.png) "MongoDB Atlas Drivers Section" - -Among which we will see our URI listed - -![Screenshot displaying the MongoDB Atlas URI in the connection instructions.](_images/uri.png) "MongoDB Atlas URI Display" - -Let's then set that as an environment variable locally: - -```shell -export MONGO_URI=... -``` - -4. Let's also set an environment variable for OpenAI (which we will use as an LLM) - -```shell -export OPENAI_API_KEY=... -``` - -5. Let's now ingest some data! We can do that by moving into this directory and running the code in `ingest.py`, eg: - -```shell -python ingest.py -``` - -Note that you can (and should!) change this to ingest data of your choice - -6. We now need to set up a vector index on our data. - -We can first connect to the cluster where our database lives - -![cluster.png](_images%2Fcluster.png) - -We can then navigate to where all our collections are listed - -![collections.png](_images%2Fcollections.png) - -We can then find the collection we want and look at the search indexes for that collection - -![search-indexes.png](_images%2Fsearch-indexes.png) - -That should likely be empty, and we want to create a new one: - -![create.png](_images%2Fcreate.png) - -We will use the JSON editor to create it - -![json_editor.png](_images%2Fjson_editor.png) - -And we will paste the following JSON in: - -```text -{ - "mappings": { - "dynamic": true, - "fields": { - "doc_level": [ - { - "type": "token" - } - ], - "embedding": { - "dimensions": 1536, - "similarity": "cosine", - "type": "knnVector" - } - } - } -} -``` -![json.png](_images%2Fjson.png) - -From there, hit "Next" and then "Create Search Index". It will take a little bit but you should then have an index over your data! - diff --git a/templates/mongo-parent-document-retrieval/ingest.py b/templates/mongo-parent-document-retrieval/ingest.py deleted file mode 100644 index 0c8e1bb294c..00000000000 --- a/templates/mongo-parent-document-retrieval/ingest.py +++ /dev/null @@ -1,59 +0,0 @@ -import os -import uuid - -from langchain_community.document_loaders import PyPDFLoader -from langchain_community.embeddings import OpenAIEmbeddings -from langchain_community.vectorstores import MongoDBAtlasVectorSearch -from langchain_text_splitters import RecursiveCharacterTextSplitter -from pymongo import MongoClient - -PARENT_DOC_ID_KEY = "parent_doc_id" - - -def parent_child_splitter(data, id_key=PARENT_DOC_ID_KEY): - parent_splitter = RecursiveCharacterTextSplitter(chunk_size=2000) - # This text splitter is used to create the child documents - # It should create documents smaller than the parent - child_splitter = RecursiveCharacterTextSplitter(chunk_size=400) - documents = parent_splitter.split_documents(data) - doc_ids = [str(uuid.uuid4()) for _ in documents] - - docs = [] - for i, doc in enumerate(documents): - _id = doc_ids[i] - sub_docs = child_splitter.split_documents([doc]) - for _doc in sub_docs: - _doc.metadata[id_key] = _id - _doc.metadata["doc_level"] = "child" - docs.extend(sub_docs) - doc.metadata[id_key] = _id - doc.metadata["doc_level"] = "parent" - return documents, docs - - -MONGO_URI = os.environ["MONGO_URI"] - -# Note that if you change this, you also need to change it in `rag_mongo/chain.py` -DB_NAME = "langchain-test-2" -COLLECTION_NAME = "test" -ATLAS_VECTOR_SEARCH_INDEX_NAME = "default" -EMBEDDING_FIELD_NAME = "embedding" -client = MongoClient(MONGO_URI) -db = client[DB_NAME] -MONGODB_COLLECTION = db[COLLECTION_NAME] - -if __name__ == "__main__": - # Load docs - loader = PyPDFLoader("https://arxiv.org/pdf/2303.08774.pdf") - data = loader.load() - - # Split docs - parent_docs, child_docs = parent_child_splitter(data) - - # Insert the documents in MongoDB Atlas Vector Search - _ = MongoDBAtlasVectorSearch.from_documents( - documents=parent_docs + child_docs, - embedding=OpenAIEmbeddings(disallowed_special=()), - collection=MONGODB_COLLECTION, - index_name=ATLAS_VECTOR_SEARCH_INDEX_NAME, - ) diff --git a/templates/mongo-parent-document-retrieval/mongo_parent_document_retrieval/__init__.py b/templates/mongo-parent-document-retrieval/mongo_parent_document_retrieval/__init__.py deleted file mode 100644 index 01e197dabbe..00000000000 --- a/templates/mongo-parent-document-retrieval/mongo_parent_document_retrieval/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from mongo_parent_document_retrieval.chain import chain - -__all__ = ["chain"] diff --git a/templates/mongo-parent-document-retrieval/mongo_parent_document_retrieval/chain.py b/templates/mongo-parent-document-retrieval/mongo_parent_document_retrieval/chain.py deleted file mode 100644 index 47ab21ccabb..00000000000 --- a/templates/mongo-parent-document-retrieval/mongo_parent_document_retrieval/chain.py +++ /dev/null @@ -1,91 +0,0 @@ -import os - -from langchain_community.chat_models import ChatOpenAI -from langchain_community.embeddings import OpenAIEmbeddings -from langchain_community.vectorstores import MongoDBAtlasVectorSearch -from langchain_core.documents import Document -from langchain_core.output_parsers import StrOutputParser -from langchain_core.prompts import ChatPromptTemplate -from langchain_core.pydantic_v1 import BaseModel -from langchain_core.runnables import RunnableParallel, RunnablePassthrough -from pymongo import MongoClient - -MONGO_URI = os.environ["MONGO_URI"] -PARENT_DOC_ID_KEY = "parent_doc_id" -# Note that if you change this, you also need to change it in `rag_mongo/chain.py` -DB_NAME = "langchain-test-2" -COLLECTION_NAME = "test" -ATLAS_VECTOR_SEARCH_INDEX_NAME = "default" -EMBEDDING_FIELD_NAME = "embedding" -client = MongoClient(MONGO_URI) -db = client[DB_NAME] -MONGODB_COLLECTION = db[COLLECTION_NAME] - - -vector_search = MongoDBAtlasVectorSearch.from_connection_string( - MONGO_URI, - DB_NAME + "." + COLLECTION_NAME, - OpenAIEmbeddings(disallowed_special=()), - index_name=ATLAS_VECTOR_SEARCH_INDEX_NAME, -) - - -def retrieve(query: str): - results = vector_search.similarity_search( - query, - k=4, - pre_filter={"doc_level": {"$eq": "child"}}, - post_filter_pipeline=[ - {"$project": {"embedding": 0}}, - { - "$lookup": { - "from": COLLECTION_NAME, - "localField": PARENT_DOC_ID_KEY, - "foreignField": PARENT_DOC_ID_KEY, - "as": "parent_context", - "pipeline": [ - {"$match": {"doc_level": "parent"}}, - {"$limit": 1}, - {"$project": {"embedding": 0}}, - ], - } - }, - ], - ) - parent_docs = [] - parent_doc_ids = set() - for result in results: - res = result.metadata["parent_context"][0] - text = res.pop("text") - # This causes serialization issues. - res.pop("_id") - parent_doc = Document(page_content=text, metadata=res) - if parent_doc.metadata[PARENT_DOC_ID_KEY] not in parent_doc_ids: - parent_doc_ids.add(parent_doc.metadata[PARENT_DOC_ID_KEY]) - parent_docs.append(parent_doc) - return parent_docs - - -# RAG prompt -template = """Answer the question based only on the following context: -{context} -Question: {question} -""" -prompt = ChatPromptTemplate.from_template(template) - -# RAG -model = ChatOpenAI() -chain = ( - RunnableParallel({"context": retrieve, "question": RunnablePassthrough()}) - | prompt - | model - | StrOutputParser() -) - - -# Add typing for input -class Question(BaseModel): - __root__: str - - -chain = chain.with_types(input_type=Question) diff --git a/templates/mongo-parent-document-retrieval/pyproject.toml b/templates/mongo-parent-document-retrieval/pyproject.toml deleted file mode 100644 index e1403ed540a..00000000000 --- a/templates/mongo-parent-document-retrieval/pyproject.toml +++ /dev/null @@ -1,34 +0,0 @@ -[tool.poetry] -name = "mongo-parent-document-retrieval" -version = "0.0.1" -description = "RAG using MongoDB and OpenAI" -authors = [] -readme = "README.md" - -[tool.poetry.dependencies] -python = ">=3.8.1,<4.0" -langchain = "^0.1" -openai = "<2" -pymongo = "^4.6.0" -pypdf = "^4.0.0" -tiktoken = "^0.5.1" -langchain-text-splitters = ">=0.0.1,<0.1" - -[tool.poetry.group.dev.dependencies] -langchain-cli = ">=0.0.21" -fastapi = ">=0.104.0,<1" -sse-starlette = "^1.6.5" - -[tool.langserve] -export_module = "mongo_parent_document_retrieval" -export_attr = "chain" - -[tool.templates-hub] -use-case = "rag" -author = "LangChain" -integrations = ["MongoDB", "OpenAI"] -tags = ["vectordb"] - -[build-system] -requires = ["poetry-core"] -build-backend = "poetry.core.masonry.api" diff --git a/templates/mongo-parent-document-retrieval/tests/__init__.py b/templates/mongo-parent-document-retrieval/tests/__init__.py deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/templates/neo4j-advanced-rag/README.md b/templates/neo4j-advanced-rag/README.md deleted file mode 100644 index f1c8316609d..00000000000 --- a/templates/neo4j-advanced-rag/README.md +++ /dev/null @@ -1,98 +0,0 @@ -# Neo4j - advanced RAG - -This template allows you to balance precise embeddings and context retention -by implementing advanced retrieval strategies. - -## Strategies - -1. **Typical RAG**: - - Traditional method where the exact data indexed is the data retrieved. -2. **Parent retriever**: - - Instead of indexing entire documents, data is divided into smaller chunks, referred to as Parent and Child documents. - - Child documents are indexed for better representation of specific concepts, while parent documents is retrieved to ensure context retention. -3. **Hypothetical Questions**: - - Documents are processed to determine potential questions they might answer. - - These questions are then indexed for better representation of specific concepts, while parent documents are retrieved to ensure context retention. -4. **Summaries**: - - Instead of indexing the entire document, a summary of the document is created and indexed. - - Similarly, the parent document is retrieved in a RAG application. - -## Environment Setup - -You need to define the following environment variables - -``` -OPENAI_API_KEY= -NEO4J_URI= -NEO4J_USERNAME= -NEO4J_PASSWORD= -``` - -## Populating with data - -If you want to populate the DB with some example data, you can run `python ingest.py`. -The script process and stores sections of the text from the file `dune.txt` into a Neo4j graph database. -First, the text is divided into larger chunks ("parents") and then further subdivided into smaller chunks ("children"), where both parent and child chunks overlap slightly to maintain context. -After storing these chunks in the database, embeddings for the child nodes are computed using OpenAI's embeddings and stored back in the graph for future retrieval or analysis. -For every parent node, hypothetical questions and summaries are generated, embedded, and added to the database. -Additionally, a vector index for each retrieval strategy is created for efficient querying of these embeddings. - -*Note that ingestion can take a minute or two due to LLMs velocity of generating hypothetical questions and summaries.* - -## Usage - -To use this package, you should first have the LangChain CLI installed: - -```shell -pip install -U "langchain-cli[serve]" -``` - -To create a new LangChain project and install this as the only package, you can do: - -```shell -langchain app new my-app --package neo4j-advanced-rag -``` - -If you want to add this to an existing project, you can just run: - -```shell -langchain app add neo4j-advanced-rag -``` - -And add the following code to your `server.py` file: -```python -from neo4j_advanced_rag import chain as neo4j_advanced_chain - -add_routes(app, neo4j_advanced_chain, path="/neo4j-advanced-rag") -``` - -(Optional) Let's now configure LangSmith. -LangSmith will help us trace, monitor and debug LangChain applications. -You can sign up for LangSmith [here](https://smith.langchain.com/). -If you don't have access, you can skip this section - -```shell -export LANGCHAIN_TRACING_V2=true -export LANGCHAIN_API_KEY= -export LANGCHAIN_PROJECT= # if not specified, defaults to "default" -``` - -If you are inside this directory, then you can spin up a LangServe instance directly by: - -```shell -langchain serve -``` - -This will start the FastAPI app with a server is running locally at -[http://localhost:8000](http://localhost:8000) - -We can see all templates at [http://127.0.0.1:8000/docs](http://127.0.0.1:8000/docs) -We can access the playground at [http://127.0.0.1:8000/neo4j-advanced-rag/playground](http://127.0.0.1:8000/neo4j-advanced-rag/playground) - -We can access the template from code with: - -```python -from langserve.client import RemoteRunnable - -runnable = RemoteRunnable("http://localhost:8000/neo4j-advanced-rag") -``` diff --git a/templates/neo4j-advanced-rag/dune.txt b/templates/neo4j-advanced-rag/dune.txt deleted file mode 100644 index 1bb7deb0ee3..00000000000 --- a/templates/neo4j-advanced-rag/dune.txt +++ /dev/null @@ -1,95 +0,0 @@ -Dune is a 1965 epic science fiction novel by American author Frank Herbert, originally published as two separate serials in Analog magazine. It tied with Roger Zelazny's This Immortal for the Hugo Award in 1966 and it won the inaugural Nebula Award for Best Novel. It is the first installment of the Dune Chronicles. It is one of the world's best-selling science fiction novels.Dune is set in the distant future in a feudal interstellar society in which various noble houses control planetary fiefs. It tells the story of young Paul Atreides, whose family accepts the stewardship of the planet Arrakis. While the planet is an inhospitable and sparsely populated desert wasteland, it is the only source of melange, or "spice", a drug that extends life and enhances mental abilities. Melange is also necessary for space navigation, which requires a kind of multidimensional awareness and foresight that only the drug provides. As melange can only be produced on Arrakis, control of the planet is a coveted and dangerous undertaking. The story explores the multilayered interactions of politics, religion, ecology, technology, and human emotion, as the factions of the empire confront each other in a struggle for the control of Arrakis and its spice. -Herbert wrote five sequels: Dune Messiah, Children of Dune, God Emperor of Dune, Heretics of Dune, and Chapterhouse: Dune. Following Herbert's death in 1986, his son Brian Herbert and author Kevin J. Anderson continued the series in over a dozen additional novels since 1999. -Adaptations of the novel to cinema have been notoriously difficult and complicated. In the 1970s, cult filmmaker Alejandro Jodorowsky attempted to make a film based on the novel. After three years of development, the project was canceled due to a constantly growing budget. In 1984, a film adaptation directed by David Lynch was released to mostly negative responses from critics and failure at the box office, although it later developed a cult following. The book was also adapted into the 2000 Sci-Fi Channel miniseries Frank Herbert's Dune and its 2003 sequel Frank Herbert's Children of Dune (the latter of which combines the events of Dune Messiah and Children of Dune). A second film adaptation directed by Denis Villeneuve was released on October 21, 2021, to positive reviews. It grossed $401 million worldwide and went on to be nominated for ten Academy Awards, winning six. Villeneuve's film covers roughly the first half of the original novel; a sequel, which will cover the remaining story, will be released in March 2024. -The series has also been used as the basis for several board, role-playing, and video games. -Since 2009, the names of planets from the Dune novels have been adopted for the real-life nomenclature of plains and other features on Saturn's moon Titan. - - -== Origins == -After his novel The Dragon in the Sea was published in 1957, Herbert traveled to Florence, Oregon, at the north end of the Oregon Dunes. Here, the United States Department of Agriculture was attempting to use poverty grasses to stabilize the sand dunes. Herbert claimed in a letter to his literary agent, Lurton Blassingame, that the moving dunes could "swallow whole cities, lakes, rivers, highways." Herbert's article on the dunes, "They Stopped the Moving Sands", was never completed (and only published decades later in The Road to Dune), but its research sparked Herbert's interest in ecology and deserts.Herbert further drew inspiration from Native American mentors like "Indian Henry" (as Herbert referred to the man to his son; likely a Henry Martin of the Hoh tribe) and Howard Hansen. Both Martin and Hansen grew up on the Quileute reservation near Herbert's hometown. According to historian Daniel Immerwahr, Hansen regularly shared his writing with Herbert. "White men are eating the earth," Hansen told Herbert in 1958, after sharing a piece on the effect of logging on the Quileute reservation. "They're gonna turn this whole planet into a wasteland, just like North Africa." The world could become a "big dune," Herbert responded in agreement.Herbert was also interested in the idea of the superhero mystique and messiahs. He believed that feudalism was a natural condition humans fell into, where some led and others gave up the responsibility of making decisions and just followed orders. He found that desert environments have historically given birth to several major religions with messianic impulses. He decided to join his interests together so he could play religious and ecological ideas against each other. In addition, he was influenced by the story of T. E. Lawrence and the "messianic overtones" in Lawrence's involvement in the Arab Revolt during World War I. In an early version of Dune, the hero was actually very similar to Lawrence of Arabia, but Herbert decided the plot was too straightforward and added more layers to his story.Herbert drew heavy inspiration also from Lesley Blanch's The Sabres of Paradise (1960), a narrative history recounting a mid-19th century conflict in the Caucasus between rugged Islamized caucasian tribes and the expansive Russian Empire. Language used on both sides of that conflict become terms in Herbert's world—chakobsa, a Caucasian hunting language, becomes a battle language of humans spread across the galaxy; kanly, a word for blood feud in the 19th century Caucasus, represents a feud between Dune's noble Houses; sietch and tabir are both words for camp borrowed from Ukrainian Cossacks (of the Pontic–Caspian steppe).Herbert also borrowed some lines which Blanch stated were Caucasian proverbs. "To kill with the point lacked artistry", used by Blanch to describe the Caucasus peoples' love of swordsmanship, becomes in Dune "Killing with the tip lacks artistry", a piece of advice given to a young Paul during his training. "Polish comes from the city, wisdom from the hills", a Caucasian aphorism, turns into a desert expression: "Polish comes from the cities, wisdom from the desert". - -Another significant source of inspiration for Dune was Herbert's experiences with psilocybin and his hobby of cultivating mushrooms, according to mycologist Paul Stamets's account of meeting Herbert in the 1980s:Frank went on to tell me that much of the premise of Dune—the magic spice (spores) that allowed the bending of space (tripping), the giant sand worms (maggots digesting mushrooms), the eyes of the Fremen (the cerulean blue of Psilocybe mushrooms), the mysticism of the female spiritual warriors, the Bene Gesserits (influenced by the tales of Maria Sabina and the sacred mushroom cults of Mexico)—came from his perception of the fungal life cycle, and his imagination was stimulated through his experiences with the use of magic mushrooms.Herbert spent the next five years researching, writing, and revising. He published a three-part serial Dune World in the monthly Analog, from December 1963 to February 1964. The serial was accompanied by several illustrations that were not published again. After an interval of a year, he published the much slower-paced five-part The Prophet of Dune in the January–May 1965 issues. The first serial became "Book 1: Dune" in the final published Dune novel, and the second serial was divided into "Book Two: Muad'dib" and "Book Three: The Prophet". The serialized version was expanded, reworked, and submitted to more than twenty publishers, each of whom rejected it. The novel, Dune, was finally accepted and published in August 1965 by Chilton Books, a printing house better known for publishing auto repair manuals. Sterling Lanier, an editor at Chilton, had seen Herbert's manuscript and had urged his company to take a risk in publishing the book. However, the first printing, priced at $5.95 (equivalent to $55.25 in 2022), did not sell well and was poorly received by critics as being atypical of science fiction at the time. Chilton considered the publication of Dune a write-off and Lanier was fired. Over the course of time, the book gained critical acclaim, and its popularity spread by word-of-mouth to allow Herbert to start working full time on developing the sequels to Dune, elements of which were already written alongside Dune.At first Herbert considered using Mars as setting for his novel, but eventually decided to use a fictional planet instead. His son Brian said that "Readers would have too many preconceived ideas about that planet, due to the number of stories that had been written about it."Herbert dedicated his work "to the people whose labors go beyond ideas into the realm of 'real materials'—to the dry-land ecologists, wherever they may be, in whatever time they work, this effort at prediction is dedicated in humility and admiration." - - -== Plot == -Duke Leto Atreides of House Atreides, ruler of the ocean planet Caladan, is assigned by the Padishah Emperor Shaddam IV to serve as fief ruler of the planet Arrakis. Although Arrakis is a harsh and inhospitable desert planet, it is of enormous importance because it is the only planetary source of melange, or the "spice", a unique and incredibly valuable substance that extends human youth, vitality and lifespan. It is also through the consumption of spice that Spacing Guild Navigators are able to effect safe interstellar travel. Shaddam, jealous of Duke Leto Atreides's rising popularity in the Landsraad, sees House Atreides as a potential future rival and threat, so conspires with House Harkonnen, the former stewards of Arrakis and the longstanding enemies of House Atreides, to destroy Leto and his family after their arrival. Leto is aware his assignment is a trap of some kind, but is compelled to obey the Emperor's orders anyway. -Leto's concubine Lady Jessica is an acolyte of the Bene Gesserit, an exclusively female group that pursues mysterious political aims and wields seemingly superhuman physical and mental abilities, such as the ability to control their bodies down to the cellular level, and also decide the sex of their children. Though Jessica was instructed by the Bene Gesserit to bear a daughter as part of their breeding program, out of love for Leto she bore a son, Paul. From a young age, Paul has been trained in warfare by Leto's aides, the elite soldiers Duncan Idaho and Gurney Halleck. Thufir Hawat, the Duke's Mentat (human computers, able to store vast amounts of data and perform advanced calculations on demand), has instructed Paul in the ways of political intrigue. Jessica has also trained her son in Bene Gesserit disciplines. -Paul's prophetic dreams interest Jessica's superior, the Reverend Mother Gaius Helen Mohiam, who subjects Paul to the deadly gom jabbar test. Holding a poisonous needle to his neck ready to strike should he be unable to resist the impulse to withdraw his hand from the nerve induction box, she tests Paul's self-control to overcome the extreme psychological pain he is being subjected to through the box. -Leto, Jessica, and Paul travel with their household to occupy Arrakeen, the capital on Arrakis formerly held by House Harkonnen. Leto learns of the dangers involved in harvesting the spice, which is protected by giant sandworms, and seeks to negotiate with the planet's native Fremen people, seeing them as a valuable ally rather than foes. Soon after the Atreides's arrival, Harkonnen forces attack, joined by the Emperor's ferocious Sardaukar troops in disguise. Leto is betrayed by his personal physician, the Suk doctor Wellington Yueh, who delivers a drugged Leto to the Baron Vladimir Harkonnen and his twisted Mentat, Piter De Vries. Yueh, however, arranges for Jessica and Paul to escape into the desert, where they are presumed dead by the Harkonnens. Yueh replaces one of Leto's teeth with a poison gas capsule, hoping Leto can kill the Baron during their encounter. The Baron narrowly avoids the gas due to his shield, which kills Leto, De Vries, and the others in the room. The Baron forces Hawat to take over De Vries's position by dosing him with a long-lasting, fatal poison and threatening to withhold the regular antidote doses unless he obeys. While he follows the Baron's orders, Hawat works secretly to undermine the Harkonnens. -Having fled into the desert, Paul is exposed to high concentrations of spice and has visions through which he realizes he has significant powers (as a result of the Bene Gesserit breeding scheme). He foresees potential futures in which he lives among the planet's native Fremen before leading them on a Holy Jihad across the known universe. -It is revealed Jessica is the daughter of Baron Harkonnen, a secret kept from her by the Bene Gesserit. After being captured by Fremen, Paul and Jessica are accepted into the Fremen community of Sietch Tabr, and teach the Fremen the Bene Gesserit fighting technique known as the "weirding way". Paul proves his manhood by killing a Fremen named Jamis in a ritualistic crysknife fight and chooses the Fremen name Muad'Dib, while Jessica opts to undergo a ritual to become a Reverend Mother by drinking the poisonous Water of Life. Pregnant with Leto's daughter, she inadvertently causes the unborn child, Alia, to become infused with the same powers in the womb. Paul takes a Fremen lover, Chani, and has a son with her, Leto II. -Two years pass and Paul's powerful prescience manifests, which confirms for the Fremen that he is their prophesied messiah, a legend planted by the Bene Gesserit's Missionaria Protectiva. Paul embraces his father's belief that the Fremen could be a powerful fighting force to take back Arrakis, but also sees that if he does not control them, their jihad could consume the entire universe. Word of the new Fremen leader reaches both Baron Harkonnen and the Emperor as spice production falls due to their increasingly destructive raids. The Baron encourages his brutish nephew Glossu Rabban to rule with an iron fist, hoping the contrast with his shrewder nephew Feyd-Rautha will make the latter popular among the people of Arrakis when he eventually replaces Rabban. The Emperor, suspecting the Baron of trying to create troops more powerful than the Sardaukar to seize power, sends spies to monitor activity on Arrakis. Hawat uses the opportunity to sow seeds of doubt in the Baron about the Emperor's true plans, putting further strain on their alliance. -Gurney, having survived the Harkonnen coup becomes a smuggler, reuniting with Paul and Jessica after a Fremen raid on his harvester. Believing Jessica to be the traitor, Gurney threatens to kill her, but is stopped by Paul. Paul did not foresee Gurney's attack, and concludes he must increase his prescience by drinking the Water of Life, which is traditionally fatal to males. Paul falls into unconsciousness for three weeks after drinking the poison, but when he wakes, he has clairvoyance across time and space: he is the Kwisatz Haderach, the ultimate goal of the Bene Gesserit breeding program. -Paul senses the Emperor and Baron are amassing fleets around Arrakis to quell the Fremen rebellion, and prepares the Fremen for a major offensive against the Harkonnen troops. The Emperor arrives with the Baron on Arrakis. The Emperor's troops seize a Fremen outpost, killing many including young Leto II, while Alia is captured and taken to the Emperor. Under cover of an electric storm, which shorts out the Emperor's troops' defensive shields, Paul and the Fremen, riding giant sandworms, assault the capital while Alia assassinates the Baron and escapes. The Fremen quickly defeat both the Harkonnen and Sardaukar troops. -Paul faces the Emperor, threatening to destroy spice production forever unless Shaddam abdicates the throne. Feyd-Rautha attempts to stop Paul by challenging him to a ritualistic knife fight, during which he attempts to cheat and kill Paul with a poison spur in his belt. Paul gains the upper hand and kills him. The Emperor reluctantly cedes the throne to Paul and promises his daughter Princess Irulan's hand in marriage. As Paul takes control of the Empire, he realizes that while he has achieved his goal, he is no longer able to stop the Fremen jihad, as their belief in him is too powerful to restrain. - - -== Characters == -House AtreidesPaul Atreides, the Duke's son, and main character of the novel -Duke Leto Atreides, head of House Atreides -Lady Jessica, Bene Gesserit and concubine of the Duke, mother of Paul and Alia -Alia Atreides, Paul's younger sister -Thufir Hawat, Mentat and Master of Assassins to House Atreides -Gurney Halleck, staunchly loyal troubadour warrior of the Atreides -Duncan Idaho, Swordmaster for House Atreides, graduate of the Ginaz School -Wellington Yueh, Suk doctor for the Atreides who is secretly working for House HarkonnenHouse HarkonnenBaron Vladimir Harkonnen, head of House Harkonnen -Piter De Vries, twisted Mentat -Feyd-Rautha, nephew and heir-presumptive of the Baron -Glossu "Beast" Rabban, also called Rabban Harkonnen, older nephew of the Baron -Iakin Nefud, Captain of the GuardHouse CorrinoShaddam IV, Padishah Emperor of the Known Universe (the Imperium) -Princess Irulan, Shaddam's eldest daughter and heir, also a historian -Count Fenring, the Emperor's closest friend, advisor, and "errand boy"Bene GesseritReverend Mother Gaius Helen Mohiam, Proctor Superior of the Bene Gesserit school and the Emperor's Truthsayer -Lady Margot Fenring, Bene Gesserit wife of Count FenringFremenThe Fremen, native inhabitants of Arrakis -Stilgar, Fremen leader of Sietch Tabr -Chani, Paul's Fremen concubine and a Sayyadina (female acolyte) of Sietch Tabr -Dr. Liet-Kynes, the Imperial Planetologist on Arrakis and father of Chani, as well as a revered figure among the Fremen -The Shadout Mapes, head housekeeper of imperial residence on Arrakis -Jamis, Fremen killed by Paul in ritual duel -Harah, wife of Jamis and later servant to Paul who helps raise Alia among the Fremen -Reverend Mother Ramallo, religious leader of Sietch TabrSmugglersEsmar Tuek, a powerful smuggler and the father of Staban Tuek -Staban Tuek, the son of Esmar Tuek and a powerful smuggler who befriends and takes in Gurney Halleck and his surviving men after the attack on the Atreides - - -== Themes and influences == -The Dune series is a landmark of science fiction. Herbert deliberately suppressed technology in his Dune universe so he could address the politics of humanity, rather than the future of humanity's technology. For example, a key pre-history event to the novel's present is the "Butlerian Jihad", in which all robots and computers were destroyed, eliminating these common elements to science fiction from the novel as to allow focus on humanity. Dune considers the way humans and their institutions might change over time. Director John Harrison, who adapted Dune for Syfy's 2000 miniseries, called the novel a universal and timeless reflection of "the human condition and its moral dilemmas", and said: - -A lot of people refer to Dune as science fiction. I never do. I consider it an epic adventure in the classic storytelling tradition, a story of myth and legend not unlike the Morte d'Arthur or any messiah story. It just happens to be set in the future ... The story is actually more relevant today than when Herbert wrote it. In the 1960s, there were just these two colossal superpowers duking it out. Today we're living in a more feudal, corporatized world more akin to Herbert's universe of separate families, power centers and business interests, all interrelated and kept together by the one commodity necessary to all. -But Dune has also been called a mix of soft and hard science fiction since "the attention to ecology is hard, the anthropology and the psychic abilities are soft." Hard elements include the ecology of Arrakis, suspensor technology, weapon systems, and ornithopters, while soft elements include issues relating to religion, physical and mental training, cultures, politics, and psychology.Herbert said Paul's messiah figure was inspired by the Arthurian legend, and that the scarcity of water on Arrakis was a metaphor for oil, as well as air and water itself, and for the shortages of resources caused by overpopulation. Novelist Brian Herbert, his son and biographer, wrote: - -Dune is a modern-day conglomeration of familiar myths, a tale in which great sandworms guard a precious treasure of melange, the geriatric spice that represents, among other things, the finite resource of oil. The planet Arrakis features immense, ferocious worms that are like dragons of lore, with "great teeth" and a "bellows breath of cinnamon." This resembles the myth described by an unknown English poet in Beowulf, the compelling tale of a fearsome fire dragon who guarded a great treasure hoard in a lair under cliffs, at the edge of the sea. The desert of Frank Herbert's classic novel is a vast ocean of sand, with giant worms diving into the depths, the mysterious and unrevealed domain of Shai-hulud. Dune tops are like the crests of waves, and there are powerful sandstorms out there, creating extreme danger. On Arrakis, life is said to emanate from the Maker (Shai-hulud) in the desert-sea; similarly all life on Earth is believed to have evolved from our oceans. Frank Herbert drew parallels, used spectacular metaphors, and extrapolated present conditions into world systems that seem entirely alien at first blush. But close examination reveals they aren't so different from systems we know … and the book characters of his imagination are not so different from people familiar to us. -Each chapter of Dune begins with an epigraph excerpted from the fictional writings of the character Princess Irulan. In forms such as diary entries, historical commentary, biography, quotations and philosophy, these writings set tone and provide exposition, context and other details intended to enhance understanding of Herbert's complex fictional universe and themes. They act as foreshadowing and invite the reader to keep reading to close the gap between what the epigraph says and what is happening in the main narrative. The epigraphs also give the reader the feeling that the world they are reading about is epically distanced, since Irulan writes about an idealized image of Paul as if he had already passed into memory. Brian Herbert wrote: "Dad told me that you could follow any of the novel's layers as you read it, and then start the book all over again, focusing on an entirely different layer. At the end of the book, he intentionally left loose ends and said he did this to send the readers spinning out of the story with bits and pieces of it still clinging to them, so that they would want to go back and read it again." - - -=== Middle-Eastern and Islamic references === -Due to the similarities between some of Herbert's terms and ideas and actual words and concepts in the Arabic language, as well as the series' "Islamic undertones" and themes, a Middle-Eastern influence on Herbert's works has been noted repeatedly. In his descriptions of the Fremen culture and language, Herbert uses both authentic Arabic words and Arabic-sounding words. For example, one of the names for the sandworm, Shai-hulud, is derived from Arabic: شيء خلود, romanized: šayʾ ḫulūd, lit. 'immortal thing' or Arabic: شيخ خلود, romanized: šayḫ ḫulūd, lit. 'old man of eternity'. The title of the Fremen housekeeper, the Shadout Mapes, is borrowed from the Arabic: شادوف‎, romanized: šādūf, the Egyptian term for a device used to raise water. In particular, words related to the messianic religion of the Fremen, first implanted by the Bene Gesserit, are taken from Arabic, including Muad'Dib (from Arabic: مؤدب, romanized: muʾaddib, lit. 'educator'), Usul (from Arabic: أصول, romanized: ʾuṣūl, lit. 'fundamental principles'), Shari-a (from Arabic: شريعة, romanized: šarīʿa, lit. 'sharia; path'), Shaitan (from Arabic: شيطان, romanized: šayṭān, lit. 'Shaitan; devil; fiend', and jinn (from Arabic: جن, romanized: ǧinn, lit. 'jinn; spirit; demon; mythical being'). It is likely Herbert relied on second-hand resources such as phrasebooks and desert adventure stories to find these Arabic words and phrases for the Fremen. They are meaningful and carefully chosen, and help create an "imagined desert culture that resonates with exotic sounds, enigmas, and pseudo-Islamic references" and has a distinctly Bedouin aesthetic.As a foreigner who adopts the ways of a desert-dwelling people and then leads them in a military capacity, Paul Atreides bears many similarities to the historical T. E. Lawrence. His 1962 biopic Lawrence of Arabia has also been identified as a potential influence. The Sabres of Paradise (1960) has also been identified as a potential influence upon Dune, with its depiction of Imam Shamil and the Islamic culture of the Caucasus inspiring some of the themes, characters, events and terminology of Dune.The environment of the desert planet Arrakis was primarily inspired by the environments of the Middle East. Similarly Arrakis as a bioregion is presented as a particular kind of political site. Herbert has made it resemble a desertified petrostate area. The Fremen people of Arrakis were influenced by the Bedouin tribes of Arabia, and the Mahdi prophecy originates from Islamic eschatology. Inspiration is also adopted from medieval historian Ibn Khaldun's cyclical history and his dynastic concept in North Africa, hinted at by Herbert's reference to Khaldun's book Kitāb al-ʿibar ("The Book of Lessons"). The fictionalized version of the "Kitab al-ibar" in Dune is a combination of a Fremen religious manual and a desert survival book. - - -==== Additional language and historic influences ==== -In addition to Arabic, Dune derives words and names from a variety of other languages, including Hebrew, Navajo, Latin, Dutch ("Landsraad"), Chakobsa, the Nahuatl language of the Aztecs, Greek, Persian, Sanskrit ("prana bindu", "prajna"), Russian, Turkish, Finnish, and Old English. Bene Gesserit is simply the Latin for "It will have been well fought", also carrying the sense of "It will have been well managed", which stands as a statement of the order's goal and as a pledge of faithfulness to that goal. Critics tend to miss the literal meaning of the phrase, some positing that the term is derived from the Latin meaning "it will have been well borne", which interpretation is not well supported by their doctrine in the story.Through the inspiration from The Sabres of Paradise, there are also allusions to the tsarist-era Russian nobility and Cossacks. Frank Herbert stated that bureaucracy that lasted long enough would become a hereditary nobility, and a significant theme behind the aristocratic families in Dune was "aristocratic bureaucracy" which he saw as analogous to the Soviet Union. - - -=== Environmentalism and ecology === -Dune has been called the "first planetary ecology novel on a grand scale". Herbert hoped it would be seen as an "environmental awareness handbook" and said the title was meant to "echo the sound of 'doom'". It was reviewed in the best selling countercultural Whole Earth Catalog in 1968 as a "rich re-readable fantasy with clear portrayal of the fierce environment it takes to cohere a community".After the publication of Silent Spring by Rachel Carson in 1962, science fiction writers began treating the subject of ecological change and its consequences. Dune responded in 1965 with its complex descriptions of Arrakis life, from giant sandworms (for whom water is deadly) to smaller, mouse-like life forms adapted to live with limited water. Dune was followed in its creation of complex and unique ecologies by other science fiction books such as A Door into Ocean (1986) and Red Mars (1992). Environmentalists have pointed out that Dune's popularity as a novel depicting a planet as a complex—almost living—thing, in combination with the first images of Earth from space being published in the same time period, strongly influenced environmental movements such as the establishment of the international Earth Day.While the genre of climate fiction was popularized in the 2010s in response to real global climate change, Dune as well as other early science fiction works from authors like J. G. Ballard (The Drowned World) and Kim Stanley Robinson (the Mars trilogy) have retroactively been considered pioneering examples of the genre. - - -=== Declining empires === -The Imperium in Dune contains features of various empires in Europe and the Near East, including the Roman Empire, Holy Roman Empire, and Ottoman Empire. Lorenzo DiTommaso compared Dune's portrayal of the downfall of a galactic empire to Edward Gibbon's Decline and Fall of the Roman Empire, which argues that Christianity allied with the profligacy of the Roman elite led to the fall of Ancient Rome. In "The Articulation of Imperial Decadence and Decline in Epic Science Fiction" (2007), DiTommaso outlines similarities between the two works by highlighting the excesses of the Emperor on his home planet of Kaitain and of the Baron Harkonnen in his palace. The Emperor loses his effectiveness as a ruler through an excess of ceremony and pomp. The hairdressers and attendants he brings with him to Arrakis are even referred to as "parasites". The Baron Harkonnen is similarly corrupt and materially indulgent. Gibbon's Decline and Fall partly blames the fall of Rome on the rise of Christianity. Gibbon claimed that this exotic import from a conquered province weakened the soldiers of Rome and left it open to attack. The Emperor's Sardaukar fighters are little match for the Fremen of Dune not only because of the Sardaukar's overconfidence and the fact that Jessica and Paul have trained the Fremen in their battle tactics, but because of the Fremen's capacity for self-sacrifice. The Fremen put the community before themselves in every instance, while the world outside wallows in luxury at the expense of others.The decline and long peace of the Empire sets the stage for revolution and renewal by genetic mixing of successful and unsuccessful groups through war, a process culminating in the Jihad led by Paul Atreides, described by Frank Herbert as depicting "war as a collective orgasm" (drawing on Norman Walter's 1950 The Sexual Cycle of Human Warfare), themes that would reappear in God Emperor of Dune's Scattering and Leto II's all-female Fish Speaker army. - - -=== Gender dynamics === -Gender dynamics are complex in Dune. Within the Fremen sietch communities, women have almost full equality. They carry weapons and travel in raiding parties with men, fighting when necessary alongside the men. They can take positions of leadership as a Sayyadina or as a Reverend Mother (if she can survive the ritual of ingesting the Water of Life.) Both of these sietch religious leaders are routinely consulted by the all-male Council and can have a decisive voice in all matters of sietch life, security and internal politics. They are also protected by the entire community. Due to the high mortality rate among their men, women outnumber men in most sietches. Polygamy is common, and sexual relationships are voluntary and consensual; as Stilgar says to Jessica, "women among us are not taken against their will." -In contrast, the Imperial aristocracy leaves young women of noble birth very little agency. Frequently trained by the Bene Gesserit, they are raised to eventually marry other aristocrats. Marriages between Major and Minor Houses are political tools to forge alliances or heal old feuds; women are given very little say in the matter. Many such marriages are quietly maneuvered by the Bene Gesserit to produce offspring with some genetic characteristics needed by the sisterhood's human-breeding program. In addition, such highly-placed sisters were in a position to subtly influence their husbands' actions in ways that could move the politics of the Imperium toward Bene Gesserit goals. -The gom jabbar test of humanity is administered by the female Bene Gesserit order but rarely to males. The Bene Gesserit have seemingly mastered the unconscious and can play on the unconscious weaknesses of others using the Voice, yet their breeding program seeks after a male Kwisatz Haderach. Their plan is to produce a male who can "possess complete racial memory, both male and female," and look into the black hole in the collective unconscious that they fear. A central theme of the book is the connection, in Jessica's son, of this female aspect with his male aspect. This aligns with concepts in Jungian psychology, which features conscious/unconscious and taking/giving roles associated with males and females, as well as the idea of the collective unconscious. Paul's approach to power consistently requires his upbringing under the matriarchal Bene Gesserit, who operate as a long-dominating shadow government behind all of the great houses and their marriages or divisions. He is trained by Jessica in the Bene Gesserit Way, which includes prana-bindu training in nerve and muscle control and precise perception. Paul also receives Mentat training, thus helping prepare him to be a type of androgynous Kwisatz Haderach, a male Reverend Mother.In a Bene Gesserit test early in the book, it is implied that people are generally "inhuman" in that they irrationally place desire over self-interest and reason. This applies Herbert's philosophy that humans are not created equal, while equal justice and equal opportunity are higher ideals than mental, physical, or moral equality. - - -=== Heroism === -I am showing you the superhero syndrome and your own participation in it. -Throughout Paul's rise to superhuman status, he follows a plotline common to many stories describing the birth of a hero. He has unfortunate circumstances forced onto him. After a long period of hardship and exile, he confronts and defeats the source of evil in his tale. As such, Dune is representative of a general trend beginning in 1960s American science fiction in that it features a character who attains godlike status through scientific means. Eventually, Paul Atreides gains a level of omniscience which allows him to take over the planet and the galaxy, and causes the Fremen of Arrakis to worship him like a god. Author Frank Herbert said in 1979, "The bottom line of the Dune trilogy is: beware of heroes. Much better [to] rely on your own judgment, and your own mistakes." He wrote in 1985, "Dune was aimed at this whole idea of the infallible leader because my view of history says that mistakes made by a leader (or made in a leader's name) are amplified by the numbers who follow without question."Juan A. Prieto-Pablos says Herbert achieves a new typology with Paul's superpowers, differentiating the heroes of Dune from earlier heroes such as Superman, van Vogt's Gilbert Gosseyn and Henry Kuttner's telepaths. Unlike previous superheroes who acquire their powers suddenly and accidentally, Paul's are the result of "painful and slow personal progress." And unlike other superheroes of the 1960s—who are the exception among ordinary people in their respective worlds—Herbert's characters grow their powers through "the application of mystical philosophies and techniques." For Herbert, the ordinary person can develop incredible fighting skills (Fremen, Ginaz swordsmen and Sardaukar) or mental abilities (Bene Gesserit, Mentats, Spacing Guild Navigators). - - -=== Zen and religion === - -Early in his newspaper career, Herbert was introduced to Zen by two Jungian psychologists, Ralph and Irene Slattery, who "gave a crucial boost to his thinking". Zen teachings ultimately had "a profound and continuing influence on [Herbert's] work". Throughout the Dune series and particularly in Dune, Herbert employs concepts and forms borrowed from Zen Buddhism. The Fremen are referred to as Zensunni adherents, and many of Herbert's epigraphs are Zen-spirited. In "Dune Genesis", Frank Herbert wrote: - -What especially pleases me is to see the interwoven themes, the fugue like relationships of images that exactly replay the way Dune took shape. As in an Escher lithograph, I involved myself with recurrent themes that turn into paradox. The central paradox concerns the human vision of time. What about Paul's gift of prescience - the Presbyterian fixation? For the Delphic Oracle to perform, it must tangle itself in a web of predestination. Yet predestination negates surprises and, in fact, sets up a mathematically enclosed universe whose limits are always inconsistent, always encountering the unprovable. It's like a koan, a Zen mind breaker. It's like the Cretan Epimenides saying, "All Cretans are liars." -Brian Herbert called the Dune universe "a spiritual melting pot", noting that his father incorporated elements of a variety of religions, including Buddhism, Sufi mysticism and other Islamic belief systems, Catholicism, Protestantism, Judaism, and Hinduism. He added that Frank Herbert's fictional future in which "religious beliefs have combined into interesting forms" represents the author's solution to eliminating arguments between religions, each of which claimed to have "the one and only revelation." \ No newline at end of file diff --git a/templates/neo4j-advanced-rag/ingest.py b/templates/neo4j-advanced-rag/ingest.py deleted file mode 100644 index 270a597164a..00000000000 --- a/templates/neo4j-advanced-rag/ingest.py +++ /dev/null @@ -1,201 +0,0 @@ -from pathlib import Path -from typing import List - -from langchain_community.document_loaders import TextLoader -from langchain_community.graphs import Neo4jGraph -from langchain_core.prompts import ChatPromptTemplate -from langchain_core.pydantic_v1 import BaseModel, Field -from langchain_openai import ChatOpenAI, OpenAIEmbeddings -from langchain_text_splitters import TokenTextSplitter -from neo4j.exceptions import ClientError - -txt_path = Path(__file__).parent / "dune.txt" - -graph = Neo4jGraph() - -# Embeddings & LLM models -embeddings = OpenAIEmbeddings() -embedding_dimension = 1536 -llm = ChatOpenAI(temperature=0) - -# Load the text file -loader = TextLoader(str(txt_path)) -documents = loader.load() - -# Ingest Parent-Child node pairs -parent_splitter = TokenTextSplitter(chunk_size=512, chunk_overlap=24) -child_splitter = TokenTextSplitter(chunk_size=100, chunk_overlap=24) -parent_documents = parent_splitter.split_documents(documents) - -for i, parent in enumerate(parent_documents): - child_documents = child_splitter.split_documents([parent]) - params = { - "parent_text": parent.page_content, - "parent_id": i, - "parent_embedding": embeddings.embed_query(parent.page_content), - "children": [ - { - "text": c.page_content, - "id": f"{i}-{ic}", - "embedding": embeddings.embed_query(c.page_content), - } - for ic, c in enumerate(child_documents) - ], - } - # Ingest data - graph.query( - """ - MERGE (p:Parent {id: $parent_id}) - SET p.text = $parent_text - WITH p - CALL db.create.setVectorProperty(p, 'embedding', $parent_embedding) - YIELD node - WITH p - UNWIND $children AS child - MERGE (c:Child {id: child.id}) - SET c.text = child.text - MERGE (c)<-[:HAS_CHILD]-(p) - WITH c, child - CALL db.create.setVectorProperty(c, 'embedding', child.embedding) - YIELD node - RETURN count(*) - """, - params, - ) - # Create vector index for child - try: - graph.query( - "CALL db.index.vector.createNodeIndex('parent_document', " - "'Child', 'embedding', $dimension, 'cosine')", - {"dimension": embedding_dimension}, - ) - except ClientError: # already exists - pass - # Create vector index for parents - try: - graph.query( - "CALL db.index.vector.createNodeIndex('typical_rag', " - "'Parent', 'embedding', $dimension, 'cosine')", - {"dimension": embedding_dimension}, - ) - except ClientError: # already exists - pass -# Ingest hypothethical questions - - -class Questions(BaseModel): - """Generating hypothetical questions about text.""" - - questions: List[str] = Field( - ..., - description=( - "Generated hypothetical questions based on " "the information from the text" - ), - ) - - -questions_prompt = ChatPromptTemplate.from_messages( - [ - ( - "system", - ( - "You are generating hypothetical questions based on the information " - "found in the text. Make sure to provide full context in the generated " - "questions." - ), - ), - ( - "human", - ( - "Use the given format to generate hypothetical questions from the " - "following input: {input}" - ), - ), - ] -) - -question_chain = questions_prompt | llm.with_structured_output(Questions) - -for i, parent in enumerate(parent_documents): - questions = question_chain.invoke(parent.page_content).questions - params = { - "parent_id": i, - "questions": [ - {"text": q, "id": f"{i}-{iq}", "embedding": embeddings.embed_query(q)} - for iq, q in enumerate(questions) - if q - ], - } - graph.query( - """ - MERGE (p:Parent {id: $parent_id}) - WITH p - UNWIND $questions AS question - CREATE (q:Question {id: question.id}) - SET q.text = question.text - MERGE (q)<-[:HAS_QUESTION]-(p) - WITH q, question - CALL db.create.setVectorProperty(q, 'embedding', question.embedding) - YIELD node - RETURN count(*) - """, - params, - ) - # Create vector index - try: - graph.query( - "CALL db.index.vector.createNodeIndex('hypothetical_questions', " - "'Question', 'embedding', $dimension, 'cosine')", - {"dimension": embedding_dimension}, - ) - except ClientError: # already exists - pass - -# Ingest summaries - -summary_prompt = ChatPromptTemplate.from_messages( - [ - ( - "system", - ( - "You are generating concise and accurate summaries based on the " - "information found in the text." - ), - ), - ( - "human", - ("Generate a summary of the following input: {question}\n" "Summary:"), - ), - ] -) - -summary_chain = summary_prompt | llm - -for i, parent in enumerate(parent_documents): - summary = summary_chain.invoke({"question": parent.page_content}).content - params = { - "parent_id": i, - "summary": summary, - "embedding": embeddings.embed_query(summary), - } - graph.query( - """ - MERGE (p:Parent {id: $parent_id}) - MERGE (p)-[:HAS_SUMMARY]->(s:Summary) - SET s.text = $summary - WITH s - CALL db.create.setVectorProperty(s, 'embedding', $embedding) - YIELD node - RETURN count(*) - """, - params, - ) - # Create vector index - try: - graph.query( - "CALL db.index.vector.createNodeIndex('summary', " - "'Summary', 'embedding', $dimension, 'cosine')", - {"dimension": embedding_dimension}, - ) - except ClientError: # already exists - pass diff --git a/templates/neo4j-advanced-rag/main.py b/templates/neo4j-advanced-rag/main.py deleted file mode 100644 index 2b339491cd6..00000000000 --- a/templates/neo4j-advanced-rag/main.py +++ /dev/null @@ -1,10 +0,0 @@ -from neo4j_advanced_rag.chain import chain - -if __name__ == "__main__": - original_query = "What is the plot of the Dune?" - print( - chain.invoke( - {"question": original_query}, - {"configurable": {"strategy": "parent_strategy"}}, - ) - ) diff --git a/templates/neo4j-advanced-rag/neo4j_advanced_rag/__init__.py b/templates/neo4j-advanced-rag/neo4j_advanced_rag/__init__.py deleted file mode 100644 index 39202e0f420..00000000000 --- a/templates/neo4j-advanced-rag/neo4j_advanced_rag/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from neo4j_advanced_rag.chain import chain - -__all__ = ["chain"] diff --git a/templates/neo4j-advanced-rag/neo4j_advanced_rag/chain.py b/templates/neo4j-advanced-rag/neo4j_advanced_rag/chain.py deleted file mode 100644 index 1120e2fe3f0..00000000000 --- a/templates/neo4j-advanced-rag/neo4j_advanced_rag/chain.py +++ /dev/null @@ -1,56 +0,0 @@ -from operator import itemgetter - -from langchain_core.output_parsers import StrOutputParser -from langchain_core.prompts import ChatPromptTemplate -from langchain_core.pydantic_v1 import BaseModel -from langchain_core.runnables import ConfigurableField, RunnableParallel -from langchain_openai import ChatOpenAI - -from neo4j_advanced_rag.retrievers import ( - hypothetic_question_vectorstore, - parent_vectorstore, - summary_vectorstore, - typical_rag, -) - - -def format_docs(docs): - return "\n\n".join(doc.page_content for doc in docs) - - -template = """Answer the question based only on the following context: -{context} - -Question: {question} -""" -prompt = ChatPromptTemplate.from_template(template) - -model = ChatOpenAI() - -retriever = typical_rag.as_retriever().configurable_alternatives( - ConfigurableField(id="strategy"), - default_key="typical_rag", - parent_strategy=parent_vectorstore.as_retriever(), - hypothetical_questions=hypothetic_question_vectorstore.as_retriever(), - summary_strategy=summary_vectorstore.as_retriever(), -) - -chain = ( - RunnableParallel( - { - "context": itemgetter("question") | retriever | format_docs, - "question": itemgetter("question"), - } - ) - | prompt - | model - | StrOutputParser() -) - - -# Add typing for input -class Question(BaseModel): - question: str - - -chain = chain.with_types(input_type=Question) diff --git a/templates/neo4j-advanced-rag/neo4j_advanced_rag/retrievers.py b/templates/neo4j-advanced-rag/neo4j_advanced_rag/retrievers.py deleted file mode 100644 index a6574e9f31a..00000000000 --- a/templates/neo4j-advanced-rag/neo4j_advanced_rag/retrievers.py +++ /dev/null @@ -1,49 +0,0 @@ -from langchain_community.vectorstores import Neo4jVector -from langchain_openai import OpenAIEmbeddings - -# Typical RAG retriever - -typical_rag = Neo4jVector.from_existing_index( - OpenAIEmbeddings(), index_name="typical_rag" -) - -# Parent retriever - -parent_query = """ -MATCH (node)<-[:HAS_CHILD]-(parent) -WITH parent, max(score) AS score // deduplicate parents -RETURN parent.text AS text, score, {} AS metadata LIMIT 1 -""" - -parent_vectorstore = Neo4jVector.from_existing_index( - OpenAIEmbeddings(), - index_name="parent_document", - retrieval_query=parent_query, -) - -# Hypothetic questions retriever - -hypothetic_question_query = """ -MATCH (node)<-[:HAS_QUESTION]-(parent) -WITH parent, max(score) AS score // deduplicate parents -RETURN parent.text AS text, score, {} AS metadata -""" - -hypothetic_question_vectorstore = Neo4jVector.from_existing_index( - OpenAIEmbeddings(), - index_name="hypothetical_questions", - retrieval_query=hypothetic_question_query, -) -# Summary retriever - -summary_query = """ -MATCH (node)<-[:HAS_SUMMARY]-(parent) -WITH parent, max(score) AS score // deduplicate parents -RETURN parent.text AS text, score, {} AS metadata -""" - -summary_vectorstore = Neo4jVector.from_existing_index( - OpenAIEmbeddings(), - index_name="summary", - retrieval_query=summary_query, -) diff --git a/templates/neo4j-advanced-rag/pyproject.toml b/templates/neo4j-advanced-rag/pyproject.toml deleted file mode 100644 index 18d5b3e6deb..00000000000 --- a/templates/neo4j-advanced-rag/pyproject.toml +++ /dev/null @@ -1,36 +0,0 @@ -[tool.poetry] -name = "neo4j-advanced-rag" -version = "0.1.0" -description = "Balance precise embeddings and context retention with advanced strategies" -authors = [ - "Tomaz Bratanic ", -] -readme = "README.md" - -[tool.poetry.dependencies] -python = ">=3.8.1,<4.0" -langchain = "^0.1" -tiktoken = "^0.5.1" -openai = "<2" -neo4j = "^5.14.0" -langchain-text-splitters = ">=0.0.1,<0.1" -langchain-openai = "^0.1.1" - -[tool.poetry.group.dev.dependencies] -langchain-cli = ">=0.0.21" - -[tool.langserve] -export_module = "neo4j_advanced_rag" -export_attr = "chain" - -[tool.templates-hub] -use-case = "rag" -author = "Neo4j" -integrations = ["Neo4j", "OpenAI"] -tags = ["vectordb", "parent", "summarization"] - -[build-system] -requires = [ - "poetry-core", -] -build-backend = "poetry.core.masonry.api" diff --git a/templates/neo4j-advanced-rag/tests/__init__.py b/templates/neo4j-advanced-rag/tests/__init__.py deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/templates/neo4j-cypher-ft/README.md b/templates/neo4j-cypher-ft/README.md deleted file mode 100644 index 49c8436dbd8..00000000000 --- a/templates/neo4j-cypher-ft/README.md +++ /dev/null @@ -1,85 +0,0 @@ -# Neo4j Cypher full-text index - -This template allows you to interact with a `Neo4j` graph database using natural language, leveraging OpenAI's LLM. - -Its main function is to convert natural language questions into `Cypher` queries (the language used to query Neo4j databases), execute these queries, and provide natural language responses based on the query's results. - -The package utilizes a `full-text index` for efficient mapping of text values to database entries, thereby enhancing the generation of accurate Cypher statements. - -In the provided example, the full-text index is used to map names of people and movies from the user's query to corresponding database entries. - -![Workflow diagram showing the process from a user asking a question to generating an answer using the Neo4j knowledge graph and full-text index.](https://raw.githubusercontent.com/langchain-ai/langchain/master/templates/neo4j-cypher-ft/static/workflow.png) "Neo4j Cypher Workflow Diagram" - -## Environment Setup - -The following environment variables need to be set: - -``` -OPENAI_API_KEY= -NEO4J_URI= -NEO4J_USERNAME= -NEO4J_PASSWORD= -``` - -Additionally, if you wish to populate the DB with some example data, you can run `python ingest.py`. -This script will populate the database with sample movie data and create a full-text index named `entity`, which is used to map person and movies from user input to database values for precise Cypher statement generation. - - -## Usage - -To use this package, you should first have the LangChain CLI installed: - -```shell -pip install -U langchain-cli -``` - -To create a new LangChain project and install this as the only package, you can do: - -```shell -langchain app new my-app --package neo4j-cypher-ft -``` - -If you want to add this to an existing project, you can just run: - -```shell -langchain app add neo4j-cypher-ft -``` - -And add the following code to your `server.py` file: -```python -from neo4j_cypher_ft import chain as neo4j_cypher_ft_chain - -add_routes(app, neo4j_cypher_ft_chain, path="/neo4j-cypher-ft") -``` - -(Optional) Let's now configure LangSmith. -LangSmith will help us trace, monitor and debug LangChain applications. -You can sign up for LangSmith [here](https://smith.langchain.com/). -If you don't have access, you can skip this section - - -```shell -export LANGCHAIN_TRACING_V2=true -export LANGCHAIN_API_KEY= -export LANGCHAIN_PROJECT= # if not specified, defaults to "default" -``` - -If you are inside this directory, then you can spin up a LangServe instance directly by: - -```shell -langchain serve -``` - -This will start the FastAPI app with a server running locally at -[http://localhost:8000](http://localhost:8000) - -We can see all templates at [http://127.0.0.1:8000/docs](http://127.0.0.1:8000/docs) -We can access the playground at [http://127.0.0.1:8000/neo4j-cypher-ft/playground](http://127.0.0.1:8000/neo4j-cypher-ft/playground) - -We can access the template from code with: - -```python -from langserve.client import RemoteRunnable - -runnable = RemoteRunnable("http://localhost:8000/neo4j-cypher-ft") -``` diff --git a/templates/neo4j-cypher-ft/ingest.py b/templates/neo4j-cypher-ft/ingest.py deleted file mode 100644 index 99e90dfe419..00000000000 --- a/templates/neo4j-cypher-ft/ingest.py +++ /dev/null @@ -1,21 +0,0 @@ -from langchain_community.graphs import Neo4jGraph - -graph = Neo4jGraph() - -# Import sample data -graph.query( - """ -MERGE (m:Movie {name:"Top Gun"}) -WITH m -UNWIND ["Tom Cruise", "Val Kilmer", "Anthony Edwards", "Meg Ryan"] AS actor -MERGE (a:Person {name:actor}) -MERGE (a)-[:ACTED_IN]->(m) -""" -) - -# Create full text index for entity matching -# on Person and Movie nodes -graph.query( - "CREATE FULLTEXT INDEX entity IF NOT EXISTS" - " FOR (m:Movie|Person) ON EACH [m.title, m.name]" -) diff --git a/templates/neo4j-cypher-ft/main.py b/templates/neo4j-cypher-ft/main.py deleted file mode 100644 index 490d4542536..00000000000 --- a/templates/neo4j-cypher-ft/main.py +++ /dev/null @@ -1,5 +0,0 @@ -from neo4j_cypher_ft.chain import chain - -if __name__ == "__main__": - original_query = "Did tom cruis act in top gun?" - print(chain.invoke({"question": original_query})) diff --git a/templates/neo4j-cypher-ft/neo4j_cypher_ft/__init__.py b/templates/neo4j-cypher-ft/neo4j_cypher_ft/__init__.py deleted file mode 100644 index 56cf88fe137..00000000000 --- a/templates/neo4j-cypher-ft/neo4j_cypher_ft/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from neo4j_cypher_ft.chain import chain - -__all__ = ["chain"] diff --git a/templates/neo4j-cypher-ft/neo4j_cypher_ft/chain.py b/templates/neo4j-cypher-ft/neo4j_cypher_ft/chain.py deleted file mode 100644 index 5079f216f4f..00000000000 --- a/templates/neo4j-cypher-ft/neo4j_cypher_ft/chain.py +++ /dev/null @@ -1,167 +0,0 @@ -from typing import List, Optional, Union - -from langchain.chains.graph_qa.cypher_utils import CypherQueryCorrector, Schema -from langchain_community.graphs import Neo4jGraph -from langchain_core.messages import ( - AIMessage, - SystemMessage, - ToolMessage, -) -from langchain_core.output_parsers import StrOutputParser -from langchain_core.prompts import ( - ChatPromptTemplate, - HumanMessagePromptTemplate, - MessagesPlaceholder, -) -from langchain_core.pydantic_v1 import BaseModel, Field -from langchain_core.runnables import RunnablePassthrough -from langchain_openai import ChatOpenAI - -# Connection to Neo4j -graph = Neo4jGraph() - -# Cypher validation tool for relationship directions -corrector_schema = [ - Schema(el["start"], el["type"], el["end"]) - for el in graph.structured_schema.get("relationships") -] -cypher_validation = CypherQueryCorrector(corrector_schema) - -# LLMs -cypher_llm = ChatOpenAI(model="gpt-4", temperature=0.0) -qa_llm = ChatOpenAI(model="gpt-3.5-turbo", temperature=0.0) - - -# Extract entities from text -class Entities(BaseModel): - """Identifying information about entities.""" - - names: List[str] = Field( - ..., - description="All the person, organization, or business entities that " - "appear in the text", - ) - - -prompt = ChatPromptTemplate.from_messages( - [ - ( - "system", - "You are extracting organization and person entities from the text.", - ), - ( - "human", - "Use the given format to extract information from the following " - "input: {question}", - ), - ] -) - - -# Fulltext index query -def map_to_database(entities: Entities) -> Optional[str]: - result = "" - for entity in entities.names: - response = graph.query( - "CALL db.index.fulltext.queryNodes('entity', $entity + '*', {limit:1})" - " YIELD node,score RETURN node.name AS result", - {"entity": entity}, - ) - try: - result += f"{entity} maps to {response[0]['result']} in database\n" - except IndexError: - pass - return result - - -entity_chain = prompt | qa_llm.with_structured_output(Entities) - -# Generate Cypher statement based on natural language input -cypher_template = """Based on the Neo4j graph schema below, write a Cypher query that would answer the user's question: -{schema} -Entities in the question map to the following database values: -{entities_list} -Question: {question} -Cypher query:""" # noqa: E501 - -cypher_prompt = ChatPromptTemplate.from_messages( - [ - ( - "system", - "Given an input question, convert it to a Cypher query. No pre-amble.", - ), - ("human", cypher_template), - ] -) - -cypher_response = ( - RunnablePassthrough.assign(names=entity_chain) - | RunnablePassthrough.assign( - entities_list=lambda x: map_to_database(x["names"]), - schema=lambda _: graph.get_schema, - ) - | cypher_prompt - | cypher_llm.bind(stop=["\nCypherResult:"]) - | StrOutputParser() -) - -# Generate natural language response based on database results -response_system = """You are an assistant that helps to form nice and human -understandable answers based on the provided information from tools. -Do not add any other information that wasn't present in the tools, and use -very concise style in interpreting results! -""" - -response_prompt = ChatPromptTemplate.from_messages( - [ - SystemMessage(content=response_system), - HumanMessagePromptTemplate.from_template("{question}"), - MessagesPlaceholder(variable_name="function_response"), - ] -) - - -def get_function_response( - query: str, question: str -) -> List[Union[AIMessage, ToolMessage]]: - context = graph.query(cypher_validation(query)) - TOOL_ID = "call_H7fABDuzEau48T10Qn0Lsh0D" - messages = [ - AIMessage( - content="", - additional_kwargs={ - "tool_calls": [ - { - "id": TOOL_ID, - "function": { - "arguments": '{"question":"' + question + '"}', - "name": "GetInformation", - }, - "type": "function", - } - ] - }, - ), - ToolMessage(content=str(context), tool_call_id=TOOL_ID), - ] - return messages - - -chain = ( - RunnablePassthrough.assign(query=cypher_response) - | RunnablePassthrough.assign( - function_response=lambda x: get_function_response(x["query"], x["question"]) - ) - | response_prompt - | qa_llm - | StrOutputParser() -) - -# Add typing for input - - -class Question(BaseModel): - question: str - - -chain = chain.with_types(input_type=Question) diff --git a/templates/neo4j-cypher-ft/pyproject.toml b/templates/neo4j-cypher-ft/pyproject.toml deleted file mode 100644 index 53efa93b494..00000000000 --- a/templates/neo4j-cypher-ft/pyproject.toml +++ /dev/null @@ -1,35 +0,0 @@ -[tool.poetry] -name = "neo4j-cypher-ft" -version = "0.1.0" -description = "Interact with a Neo4j graph database using natural language" -authors = [ - "Tomaz Bratanic ", -] -readme = "README.md" - -[tool.poetry.dependencies] -python = ">=3.8.1,<4.0" -langchain = "^0.1" -neo4j = ">5.12" -openai = "<2" -langchain-community = "^0.0.33" -langchain-openai = "^0.1.3" - -[tool.poetry.group.dev.dependencies] -langchain-cli = ">=0.0.21" - -[tool.langserve] -export_module = "neo4j_cypher_ft" -export_attr = "chain" - -[tool.templates-hub] -use-case = "query" -author = "Neo4j" -integrations = ["Neo4j", "OpenAI"] -tags = ["search", "graph-database", "query"] - -[build-system] -requires = [ - "poetry-core", -] -build-backend = "poetry.core.masonry.api" diff --git a/templates/neo4j-cypher-ft/static/workflow.png b/templates/neo4j-cypher-ft/static/workflow.png deleted file mode 100644 index 1ec85d1c09f..00000000000 Binary files a/templates/neo4j-cypher-ft/static/workflow.png and /dev/null differ diff --git a/templates/neo4j-cypher-ft/tests/__init__.py b/templates/neo4j-cypher-ft/tests/__init__.py deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/templates/neo4j-cypher-memory/README.md b/templates/neo4j-cypher-memory/README.md deleted file mode 100644 index f91a9f0fd4e..00000000000 --- a/templates/neo4j-cypher-memory/README.md +++ /dev/null @@ -1,93 +0,0 @@ -# Neo4j Cypher memory - -This template allows you to have conversations with a `Neo4j` graph database in natural language, using an OpenAI LLM. -It transforms a natural language question into a `Cypher` query (used to fetch data from Neo4j databases), executes the query, and provides a natural language response based on the query results. -Additionally, it features a `conversational memory` module that stores the dialogue history in the Neo4j graph database. -The conversation memory is uniquely maintained for each user session, ensuring personalized interactions. -To facilitate this, please supply both the `user_id` and `session_id` when using the conversation chain. - -![Workflow diagram illustrating the process of a user asking a question, generating a Cypher query, retrieving conversational history, executing the query on a Neo4j database, generating an answer, and storing conversational memory.](https://raw.githubusercontent.com/langchain-ai/langchain/master/templates/neo4j-cypher-memory/static/workflow.png) "Neo4j Cypher Memory Workflow Diagram" - -## Environment Setup - -Define the following environment variables: - -``` -OPENAI_API_KEY= -NEO4J_URI= -NEO4J_USERNAME= -NEO4J_PASSWORD= -``` - -## Neo4j database setup - -There are a number of ways to set up a Neo4j database. - -### Neo4j Aura - -Neo4j AuraDB is a fully managed cloud graph database service. -Create a free instance on [Neo4j Aura](https://neo4j.com/cloud/platform/aura-graph-database?utm_source=langchain&utm_content=langserve). -When you initiate a free database instance, you'll receive credentials to access the database. - -## Populating with data - -If you want to populate the DB with some example data, you can run `python ingest.py`. -This script will populate the database with sample movie data. - -## Usage - -To use this package, you should first have the LangChain CLI installed: - -```shell -pip install -U langchain-cli -``` - -To create a new LangChain project and install this as the only package, you can do: - -```shell -langchain app new my-app --package neo4j-cypher-memory -``` - -If you want to add this to an existing project, you can just run: - -```shell -langchain app add neo4j-cypher-memory -``` - -And add the following code to your `server.py` file: -```python -from neo4j_cypher_memory import chain as neo4j_cypher_memory_chain - -add_routes(app, neo4j_cypher_memory_chain, path="/neo4j-cypher-memory") -``` - -(Optional) Let's now configure LangSmith. -LangSmith will help us trace, monitor and debug LangChain applications. -You can sign up for LangSmith [here](https://smith.langchain.com/). -If you don't have access, you can skip this section - -```shell -export LANGCHAIN_TRACING_V2=true -export LANGCHAIN_API_KEY= -export LANGCHAIN_PROJECT= # if not specified, defaults to "default" -``` - -If you are inside this directory, then you can spin up a LangServe instance directly by: - -```shell -langchain serve -``` - -This will start the FastAPI app with a server is running locally at -[http://localhost:8000](http://localhost:8000) - -We can see all templates at [http://127.0.0.1:8000/docs](http://127.0.0.1:8000/docs) -We can access the playground at [http://127.0.0.1:8000/neo4j_cypher_memory/playground](http://127.0.0.1:8000/neo4j_cypher/playground) - -We can access the template from code with: - -```python -from langserve.client import RemoteRunnable - -runnable = RemoteRunnable("http://localhost:8000/neo4j-cypher-memory") -``` diff --git a/templates/neo4j-cypher-memory/ingest.py b/templates/neo4j-cypher-memory/ingest.py deleted file mode 100644 index bbd4c6f57fd..00000000000 --- a/templates/neo4j-cypher-memory/ingest.py +++ /dev/null @@ -1,16 +0,0 @@ -from langchain_community.graphs import Neo4jGraph - -graph = Neo4jGraph() - -graph.query( - """ -MERGE (m:Movie {name:"Top Gun"}) -WITH m -UNWIND ["Tom Cruise", "Val Kilmer", "Anthony Edwards", "Meg Ryan"] AS actor -MERGE (a:Actor {name:actor}) -MERGE (a)-[:ACTED_IN]->(m) -WITH a -WHERE a.name = "Tom Cruise" -MERGE (a)-[:ACTED_IN]->(:Movie {name:"Mission Impossible"}) -""" -) diff --git a/templates/neo4j-cypher-memory/main.py b/templates/neo4j-cypher-memory/main.py deleted file mode 100644 index 735fac99650..00000000000 --- a/templates/neo4j-cypher-memory/main.py +++ /dev/null @@ -1,23 +0,0 @@ -from neo4j_cypher_memory.chain import chain - -if __name__ == "__main__": - original_query = "Who played in Top Gun?" - print( - chain.invoke( - { - "question": original_query, - "user_id": "user_123", - "session_id": "session_1", - } - ) - ) - follow_up_query = "Did they play in any other movies?" - print( - chain.invoke( - { - "question": follow_up_query, - "user_id": "user_123", - "session_id": "session_1", - } - ) - ) diff --git a/templates/neo4j-cypher-memory/neo4j_cypher_memory/__init__.py b/templates/neo4j-cypher-memory/neo4j_cypher_memory/__init__.py deleted file mode 100644 index 769c2921587..00000000000 --- a/templates/neo4j-cypher-memory/neo4j_cypher_memory/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from neo4j_cypher_memory.chain import chain - -__all__ = ["chain"] diff --git a/templates/neo4j-cypher-memory/neo4j_cypher_memory/chain.py b/templates/neo4j-cypher-memory/neo4j_cypher_memory/chain.py deleted file mode 100644 index cbdbd5b7151..00000000000 --- a/templates/neo4j-cypher-memory/neo4j_cypher_memory/chain.py +++ /dev/null @@ -1,182 +0,0 @@ -from typing import Any, Dict, List, Union - -from langchain.chains.graph_qa.cypher_utils import CypherQueryCorrector, Schema -from langchain.memory import ChatMessageHistory -from langchain_community.graphs import Neo4jGraph -from langchain_core.messages import ( - AIMessage, - SystemMessage, - ToolMessage, -) -from langchain_core.output_parsers import StrOutputParser -from langchain_core.prompts import ( - ChatPromptTemplate, - HumanMessagePromptTemplate, - MessagesPlaceholder, -) -from langchain_core.pydantic_v1 import BaseModel -from langchain_core.runnables import RunnablePassthrough -from langchain_openai import ChatOpenAI - -# Connection to Neo4j -graph = Neo4jGraph() - -# Cypher validation tool for relationship directions -corrector_schema = [ - Schema(el["start"], el["type"], el["end"]) - for el in graph.structured_schema.get("relationships") -] -cypher_validation = CypherQueryCorrector(corrector_schema) - -# LLMs -cypher_llm = ChatOpenAI(model="gpt-4", temperature=0.0) -qa_llm = ChatOpenAI(model="gpt-3.5-turbo", temperature=0.0) - - -def convert_messages(input: List[Dict[str, Any]]) -> ChatMessageHistory: - history = ChatMessageHistory() - for item in input: - history.add_user_message(item["result"]["question"]) - history.add_ai_message(item["result"]["answer"]) - return history - - -def get_history(input: Dict[str, Any]) -> ChatMessageHistory: - input.pop("question") - # Lookback conversation window - window = 3 - data = graph.query( - """ - MATCH (u:User {id:$user_id})-[:HAS_SESSION]->(s:Session {id:$session_id}), - (s)-[:LAST_MESSAGE]->(last_message) - MATCH p=(last_message)<-[:NEXT*0..""" - + str(window) - + """]-() - WITH p, length(p) AS length - ORDER BY length DESC LIMIT 1 - UNWIND reverse(nodes(p)) AS node - MATCH (node)-[:HAS_ANSWER]->(answer) - RETURN {question:node.text, answer:answer.text} AS result - """, - params=input, - ) - history = convert_messages(data) - return history.messages - - -def save_history(input): - print(input) - if input.get("function_response"): - input.pop("function_response") - # store history to database - graph.query( - """MERGE (u:User {id: $user_id}) -WITH u -OPTIONAL MATCH (u)-[:HAS_SESSION]->(s:Session{id: $session_id}), - (s)-[l:LAST_MESSAGE]->(last_message) -FOREACH (_ IN CASE WHEN last_message IS NULL THEN [1] ELSE [] END | -CREATE (u)-[:HAS_SESSION]->(s1:Session {id:$session_id}), - (s1)-[:LAST_MESSAGE]->(q:Question {text:$question, cypher:$query, date:datetime()}), - (q)-[:HAS_ANSWER]->(:Answer {text:$output})) -FOREACH (_ IN CASE WHEN last_message IS NOT NULL THEN [1] ELSE [] END | -CREATE (last_message)-[:NEXT]->(q:Question - {text:$question, cypher:$query, date:datetime()}), - (q)-[:HAS_ANSWER]->(:Answer {text:$output}), - (s)-[:LAST_MESSAGE]->(q) -DELETE l) """, - params=input, - ) - - # Return LLM response to the chain - return input["output"] - - -# Generate Cypher statement based on natural language input -cypher_template = """This is important for my career. -Based on the Neo4j graph schema below, write a Cypher query that would answer the user's question: -{schema} - -Question: {question} -Cypher query:""" # noqa: E501 - -cypher_prompt = ChatPromptTemplate.from_messages( - [ - ( - "system", - "Given an input question, convert it to a Cypher query. No pre-amble.", - ), - MessagesPlaceholder(variable_name="history"), - ("human", cypher_template), - ] -) - -cypher_response = ( - RunnablePassthrough.assign(schema=lambda _: graph.get_schema, history=get_history) - | cypher_prompt - | cypher_llm.bind(stop=["\nCypherResult:"]) - | StrOutputParser() -) - -# Generate natural language response based on database results -response_system = """You are an assistant that helps to form nice and human -understandable answers based on the provided information from tools. -Do not add any other information that wasn't present in the tools, and use -very concise style in interpreting results! -""" - -response_prompt = ChatPromptTemplate.from_messages( - [ - SystemMessage(content=response_system), - HumanMessagePromptTemplate.from_template("{question}"), - MessagesPlaceholder(variable_name="function_response"), - ] -) - - -def get_function_response( - query: str, question: str -) -> List[Union[AIMessage, ToolMessage]]: - context = graph.query(cypher_validation(query)) - TOOL_ID = "call_H7fABDuzEau48T10Qn0Lsh0D" - messages = [ - AIMessage( - content="", - additional_kwargs={ - "tool_calls": [ - { - "id": TOOL_ID, - "function": { - "arguments": '{"question":"' + question + '"}', - "name": "GetInformation", - }, - "type": "function", - } - ] - }, - ), - ToolMessage(content=str(context), tool_call_id=TOOL_ID), - ] - return messages - - -chain = ( - RunnablePassthrough.assign(query=cypher_response) - | RunnablePassthrough.assign( - function_response=lambda x: get_function_response(x["query"], x["question"]), - ) - | RunnablePassthrough.assign( - output=response_prompt | qa_llm | StrOutputParser(), - ) - | save_history -) - -# Add typing for input - - -class Question(BaseModel): - question: str - user_id: str - session_id: str - - -chain = chain.with_types(input_type=Question) diff --git a/templates/neo4j-cypher-memory/pyproject.toml b/templates/neo4j-cypher-memory/pyproject.toml deleted file mode 100644 index 6f628e8d7e3..00000000000 --- a/templates/neo4j-cypher-memory/pyproject.toml +++ /dev/null @@ -1,35 +0,0 @@ -[tool.poetry] -name = "neo4j-cypher-memory" -version = "0.1.0" -description = "Conversational interface for a Neo4j graph database" -authors = [ - "Tomaz Bratanic ", -] -readme = "README.md" - -[tool.poetry.dependencies] -python = ">=3.8.1,<4.0" -langchain = "^0.1" -neo4j = ">5.12" -openai = "<2" -langchain-community = "^0.0.33" -langchain-openai = "^0.1.3" - -[tool.poetry.group.dev.dependencies] -langchain-cli = ">=0.0.21" - -[tool.langserve] -export_module = "neo4j_cypher_memory" -export_attr = "chain" - -[tool.templates-hub] -use-case = "query" -author = "Neo4j" -integrations = ["Neo4j", "OpenAI"] -tags = ["conversation", "graph-database"] - -[build-system] -requires = [ - "poetry-core", -] -build-backend = "poetry.core.masonry.api" diff --git a/templates/neo4j-cypher-memory/static/workflow.png b/templates/neo4j-cypher-memory/static/workflow.png deleted file mode 100644 index 29babf9e130..00000000000 Binary files a/templates/neo4j-cypher-memory/static/workflow.png and /dev/null differ diff --git a/templates/neo4j-cypher-memory/tests/__init__.py b/templates/neo4j-cypher-memory/tests/__init__.py deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/templates/neo4j-cypher/README.md b/templates/neo4j-cypher/README.md deleted file mode 100644 index ba1fb4ee603..00000000000 --- a/templates/neo4j-cypher/README.md +++ /dev/null @@ -1,94 +0,0 @@ -# Neo4j Cypher - -This template allows you to interact with a `Neo4j` graph database -in natural language, using an `OpenAI` LLM. - -It transforms a natural language question into a `Cypher` query -(used to fetch data from `Neo4j` databases), executes the query, -and provides a natural language response based on the query results. - -![Diagram showing the workflow of a user asking a question, which is processed by a Cypher generating chain, resulting in a Cypher query to the Neo4j Knowledge Graph, and then an answer generating chain that provides a generated answer based on the information from the graph.](https://raw.githubusercontent.com/langchain-ai/langchain/master/templates/neo4j-cypher/static/workflow.png) "Neo4j Cypher Workflow Diagram" - -## Environment Setup - -Define the following environment variables: - -``` -OPENAI_API_KEY= -NEO4J_URI= -NEO4J_USERNAME= -NEO4J_PASSWORD= -``` - -## Neo4j database setup - -There are a number of ways to set up a Neo4j database. - -### Neo4j Aura - -Neo4j AuraDB is a fully managed cloud graph database service. -Create a free instance on [Neo4j Aura](https://neo4j.com/cloud/platform/aura-graph-database?utm_source=langchain&utm_content=langserve). -When you initiate a free database instance, you'll receive credentials to access the database. - -## Populating with data - -If you want to populate the DB with some example data, you can run `python ingest.py`. -This script will populate the database with sample movie data. - -## Usage - -To use this package, you should first have the LangChain CLI installed: - -```shell -pip install -U langchain-cli -``` - -To create a new LangChain project and install this as the only package, you can do: - -```shell -langchain app new my-app --package neo4j-cypher -``` - -If you want to add this to an existing project, you can just run: - -```shell -langchain app add neo4j-cypher -``` - -And add the following code to your `server.py` file: -```python -from neo4j_cypher import chain as neo4j_cypher_chain - -add_routes(app, neo4j_cypher_chain, path="/neo4j-cypher") -``` - -(Optional) Let's now configure LangSmith. -LangSmith will help us trace, monitor and debug LangChain applications. -You can sign up for LangSmith [here](https://smith.langchain.com/). -If you don't have access, you can skip this section - -```shell -export LANGCHAIN_TRACING_V2=true -export LANGCHAIN_API_KEY= -export LANGCHAIN_PROJECT= # if not specified, defaults to "default" -``` - -If you are inside this directory, then you can spin up a LangServe instance directly by: - -```shell -langchain serve -``` - -This will start the FastAPI app with a server is running locally at -[http://localhost:8000](http://localhost:8000) - -We can see all templates at [http://127.0.0.1:8000/docs](http://127.0.0.1:8000/docs) -We can access the playground at [http://127.0.0.1:8000/neo4j_cypher/playground](http://127.0.0.1:8000/neo4j_cypher/playground) - -We can access the template from code with: - -```python -from langserve.client import RemoteRunnable - -runnable = RemoteRunnable("http://localhost:8000/neo4j-cypher") -``` diff --git a/templates/neo4j-cypher/ingest.py b/templates/neo4j-cypher/ingest.py deleted file mode 100644 index 3b097b9462e..00000000000 --- a/templates/neo4j-cypher/ingest.py +++ /dev/null @@ -1,13 +0,0 @@ -from langchain_community.graphs import Neo4jGraph - -graph = Neo4jGraph() - -graph.query( - """ -MERGE (m:Movie {name:"Top Gun"}) -WITH m -UNWIND ["Tom Cruise", "Val Kilmer", "Anthony Edwards", "Meg Ryan"] AS actor -MERGE (a:Actor {name:actor}) -MERGE (a)-[:ACTED_IN]->(m) -""" -) diff --git a/templates/neo4j-cypher/main.py b/templates/neo4j-cypher/main.py deleted file mode 100644 index 611cbe2aff9..00000000000 --- a/templates/neo4j-cypher/main.py +++ /dev/null @@ -1,5 +0,0 @@ -from neo4j_cypher.chain import chain - -if __name__ == "__main__": - original_query = "Who played in Top Gun?" - print(chain.invoke({"question": original_query})) diff --git a/templates/neo4j-cypher/neo4j_cypher/__init__.py b/templates/neo4j-cypher/neo4j_cypher/__init__.py deleted file mode 100644 index c18e08325b6..00000000000 --- a/templates/neo4j-cypher/neo4j_cypher/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from neo4j_cypher.chain import chain - -__all__ = ["chain"] diff --git a/templates/neo4j-cypher/neo4j_cypher/chain.py b/templates/neo4j-cypher/neo4j_cypher/chain.py deleted file mode 100644 index effa02fb63f..00000000000 --- a/templates/neo4j-cypher/neo4j_cypher/chain.py +++ /dev/null @@ -1,118 +0,0 @@ -from typing import List, Union - -from langchain.chains.graph_qa.cypher_utils import CypherQueryCorrector, Schema -from langchain_community.graphs import Neo4jGraph -from langchain_core.messages import ( - AIMessage, - SystemMessage, - ToolMessage, -) -from langchain_core.output_parsers import StrOutputParser -from langchain_core.prompts import ( - ChatPromptTemplate, - HumanMessagePromptTemplate, - MessagesPlaceholder, -) -from langchain_core.pydantic_v1 import BaseModel -from langchain_core.runnables import RunnablePassthrough -from langchain_openai import ChatOpenAI - -# Connection to Neo4j -graph = Neo4jGraph() - -# Cypher validation tool for relationship directions -corrector_schema = [ - Schema(el["start"], el["type"], el["end"]) - for el in graph.structured_schema.get("relationships") -] -cypher_validation = CypherQueryCorrector(corrector_schema) - -# LLMs -cypher_llm = ChatOpenAI(model="gpt-4", temperature=0.0) -qa_llm = ChatOpenAI(model="gpt-3.5-turbo", temperature=0.0) - -# Generate Cypher statement based on natural language input -cypher_template = """Based on the Neo4j graph schema below, write a Cypher query that would answer the user's question: -{schema} - -Question: {question} -Cypher query:""" # noqa: E501 - -cypher_prompt = ChatPromptTemplate.from_messages( - [ - ( - "system", - "Given an input question, convert it to a Cypher query. No pre-amble.", - ), - ("human", cypher_template), - ] -) - -cypher_response = ( - RunnablePassthrough.assign( - schema=lambda _: graph.get_schema, - ) - | cypher_prompt - | cypher_llm.bind(stop=["\nCypherResult:"]) - | StrOutputParser() -) - -response_system = """You are an assistant that helps to form nice and human -understandable answers based on the provided information from tools. -Do not add any other information that wasn't present in the tools, and use -very concise style in interpreting results! -""" - -response_prompt = ChatPromptTemplate.from_messages( - [ - SystemMessage(content=response_system), - HumanMessagePromptTemplate.from_template("{question}"), - MessagesPlaceholder(variable_name="function_response"), - ] -) - - -def get_function_response( - query: str, question: str -) -> List[Union[AIMessage, ToolMessage]]: - context = graph.query(cypher_validation(query)) - TOOL_ID = "call_H7fABDuzEau48T10Qn0Lsh0D" - messages = [ - AIMessage( - content="", - additional_kwargs={ - "tool_calls": [ - { - "id": TOOL_ID, - "function": { - "arguments": '{"question":"' + question + '"}', - "name": "GetInformation", - }, - "type": "function", - } - ] - }, - ), - ToolMessage(content=str(context), tool_call_id=TOOL_ID), - ] - return messages - - -chain = ( - RunnablePassthrough.assign(query=cypher_response) - | RunnablePassthrough.assign( - function_response=lambda x: get_function_response(x["query"], x["question"]) - ) - | response_prompt - | qa_llm - | StrOutputParser() -) - -# Add typing for input - - -class Question(BaseModel): - question: str - - -chain = chain.with_types(input_type=Question) diff --git a/templates/neo4j-cypher/pyproject.toml b/templates/neo4j-cypher/pyproject.toml deleted file mode 100644 index cd889d06451..00000000000 --- a/templates/neo4j-cypher/pyproject.toml +++ /dev/null @@ -1,35 +0,0 @@ -[tool.poetry] -name = "neo4j-cypher" -version = "0.1.0" -description = "Natural language interface for a Neo4j graph database" -authors = [ - "Tomaz Bratanic ", -] -readme = "README.md" - -[tool.poetry.dependencies] -python = ">=3.8.1,<4.0" -langchain = "^0.1" -neo4j = ">5.12" -openai = "<2" -langchain-openai = "^0.1.3" -langchain-community = "^0.0.33" - -[tool.poetry.group.dev.dependencies] -langchain-cli = ">=0.0.21" - -[tool.langserve] -export_module = "neo4j_cypher" -export_attr = "chain" - -[tool.templates-hub] -use-case = "query" -author = "Neo4j" -integrations = ["Neo4j", "OpenAI"] -tags = ["search", "graph-database"] - -[build-system] -requires = [ - "poetry-core", -] -build-backend = "poetry.core.masonry.api" diff --git a/templates/neo4j-cypher/static/workflow.png b/templates/neo4j-cypher/static/workflow.png deleted file mode 100644 index efae6a31393..00000000000 Binary files a/templates/neo4j-cypher/static/workflow.png and /dev/null differ diff --git a/templates/neo4j-cypher/tests/__init__.py b/templates/neo4j-cypher/tests/__init__.py deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/templates/neo4j-generation/README.md b/templates/neo4j-generation/README.md deleted file mode 100644 index dff09155d0c..00000000000 --- a/templates/neo4j-generation/README.md +++ /dev/null @@ -1,82 +0,0 @@ -# Neo4j AuraDB - generation - -This template pairs LLM-based knowledge graph extraction with `Neo4j AuraDB`, -a fully managed cloud graph database. - -You can create a free instance on [Neo4j Aura](https://neo4j.com/cloud/platform/aura-graph-database?utm_source=langchain&utm_content=langserve). - -When you initiate a free database instance, you'll receive credentials to access the database. - -This template is flexible and allows users to guide the extraction process by specifying a list of node labels and relationship types. - -For more details on the functionality and capabilities of this package, please refer to [this blog post](https://blog.langchain.dev/constructing-knowledge-graphs-from-text-using-openai-functions/). - -## Environment Setup - -You need to set the following environment variables: - -``` -OPENAI_API_KEY= -NEO4J_URI= -NEO4J_USERNAME= -NEO4J_PASSWORD= -``` - -## Usage - -To use this package, you should first have the LangChain CLI installed: - -```shell -pip install -U langchain-cli -``` - -To create a new LangChain project and install this as the only package, you can do: - -```shell -langchain app new my-app --package neo4j-generation -``` - -If you want to add this to an existing project, you can just run: - -```shell -langchain app add neo4j-generation -``` - -And add the following code to your `server.py` file: -```python -from neo4j_generation.chain import chain as neo4j_generation_chain - -add_routes(app, neo4j_generation_chain, path="/neo4j-generation") -``` - -(Optional) Let's now configure LangSmith. -LangSmith will help us trace, monitor and debug LangChain applications. -You can sign up for LangSmith [here](https://smith.langchain.com/). -If you don't have access, you can skip this section - - -```shell -export LANGCHAIN_TRACING_V2=true -export LANGCHAIN_API_KEY= -export LANGCHAIN_PROJECT= # if not specified, defaults to "default" -``` - -If you are inside this directory, then you can spin up a LangServe instance directly by: - -```shell -langchain serve -``` - -This will start the FastAPI app with a server is running locally at -[http://localhost:8000](http://localhost:8000) - -We can see all templates at [http://127.0.0.1:8000/docs](http://127.0.0.1:8000/docs) -We can access the playground at [http://127.0.0.1:8000/neo4j-generation/playground](http://127.0.0.1:8000/neo4j-generation/playground) - -We can access the template from code with: - -```python -from langserve.client import RemoteRunnable - -runnable = RemoteRunnable("http://localhost:8000/neo4j-generation") -``` diff --git a/templates/neo4j-generation/main.py b/templates/neo4j-generation/main.py deleted file mode 100644 index 578a18013fe..00000000000 --- a/templates/neo4j-generation/main.py +++ /dev/null @@ -1,13 +0,0 @@ -from neo4j_generation.chain import chain - -if __name__ == "__main__": - text = "Harrison works at LangChain, which is located in San Francisco" - allowed_nodes = ["Person", "Organization", "Location"] - allowed_relationships = ["WORKS_AT", "LOCATED_IN"] - print( - chain( - text, - allowed_nodes=allowed_nodes, - allowed_relationships=allowed_relationships, - ) - ) diff --git a/templates/neo4j-generation/neo4j_generation/__init__.py b/templates/neo4j-generation/neo4j_generation/__init__.py deleted file mode 100644 index 87f97d321fe..00000000000 --- a/templates/neo4j-generation/neo4j_generation/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from neo4j_generation.chain import chain - -__all__ = ["chain"] diff --git a/templates/neo4j-generation/neo4j_generation/chain.py b/templates/neo4j-generation/neo4j_generation/chain.py deleted file mode 100644 index 4235fa3d01f..00000000000 --- a/templates/neo4j-generation/neo4j_generation/chain.py +++ /dev/null @@ -1,44 +0,0 @@ -from typing import List, Optional - -from langchain_community.graphs import Neo4jGraph -from langchain_core.documents import Document -from langchain_experimental.graph_transformers import LLMGraphTransformer -from langchain_openai import ChatOpenAI - -graph = Neo4jGraph() - - -llm = ChatOpenAI(model="gpt-3.5-turbo-16k", temperature=0) - - -def chain( - text: str, - allowed_nodes: Optional[List[str]] = None, - allowed_relationships: Optional[List[str]] = None, -) -> str: - """ - Process the given text to extract graph data and constructs a graph document from the extracted information. - The constructed graph document is then added to the graph. - - Parameters: - - text (str): The input text from which the information will be extracted to construct the graph. - - allowed_nodes (Optional[List[str]]): A list of node labels to guide the extraction process. - If not provided, extraction won't have specific restriction on node labels. - - allowed_relationships (Optional[List[str]]): A list of relationship types to guide the extraction process. - If not provided, extraction won't have specific restriction on relationship types. - - Returns: - str: A confirmation message indicating the completion of the graph construction. - """ # noqa: E501 - # Construct document based on text - documents = [Document(page_content=text)] - # Extract graph data using OpenAI functions - llm_graph_transformer = LLMGraphTransformer( - llm=llm, - allowed_nodes=allowed_nodes, - allowed_relationships=allowed_relationships, - ) - graph_documents = llm_graph_transformer.convert_to_graph_documents(documents) - # Store information into a graph - graph.add_graph_documents(graph_documents) - return "Graph construction finished" diff --git a/templates/neo4j-generation/pyproject.toml b/templates/neo4j-generation/pyproject.toml deleted file mode 100644 index 6b1809363a1..00000000000 --- a/templates/neo4j-generation/pyproject.toml +++ /dev/null @@ -1,36 +0,0 @@ -[tool.poetry] -name = "neo4j-generation" -version = "0.0.1" -description = "Knowledge graph extraction with Neo4j AuraDB" -authors = [ - "Tomaz Bratanic ", -] -readme = "README.md" - -[tool.poetry.dependencies] -python = ">=3.8.1,<4.0" -langchain = "^0.1" -openai = "<2" -neo4j = "^5.12.0" -langchain-openai = "^0.0.8" -langchain-community = "^0.0.28" -langchain-experimental = "^0.0.54" - -[tool.poetry.group.dev.dependencies] -langchain-cli = ">=0.0.21" - -[tool.langserve] -export_module = "neo4j_generation.chain" -export_attr = "chain" - -[tool.templates-hub] -use-case = "extraction" -author = "Neo4j" -integrations = ["Neo4j", "OpenAI"] -tags = ["graph-database", "search"] - -[build-system] -requires = [ - "poetry-core", -] -build-backend = "poetry.core.masonry.api" diff --git a/templates/neo4j-generation/tests/__init__.py b/templates/neo4j-generation/tests/__init__.py deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/templates/neo4j-parent/README.md b/templates/neo4j-parent/README.md deleted file mode 100644 index 630866c57dc..00000000000 --- a/templates/neo4j-parent/README.md +++ /dev/null @@ -1,87 +0,0 @@ -# Neo4j - hybrid parent-child retrieval - -This template allows you to balance precise embeddings and context retention -by splitting documents into smaller chunks and retrieving their original -or larger text information. - -Using a `Neo4j` vector index, the package queries child nodes using -vector similarity search and retrieves the corresponding parent's text -by defining an appropriate `retrieval_query` parameter. - -## Environment Setup - -You need to define the following environment variables - -``` -OPENAI_API_KEY= -NEO4J_URI= -NEO4J_USERNAME= -NEO4J_PASSWORD= -``` - -## Populating with data - -If you want to populate the DB with some example data, you can run `python ingest.py`. -The script process and stores sections of the text from the file `dune.txt` into a Neo4j graph database. -First, the text is divided into larger chunks ("parents") and then further subdivided into smaller chunks ("children"), where both parent and child chunks overlap slightly to maintain context. -After storing these chunks in the database, embeddings for the child nodes are computed using OpenAI's embeddings and stored back in the graph for future retrieval or analysis. -Additionally, a vector index named `retrieval` is created for efficient querying of these embeddings. - - -## Usage - -To use this package, you should first have the LangChain CLI installed: - -```shell -pip install -U langchain-cli -``` - -To create a new LangChain project and install this as the only package, you can do: - -```shell -langchain app new my-app --package neo4j-parent -``` - -If you want to add this to an existing project, you can just run: - -```shell -langchain app add neo4j-parent -``` - -And add the following code to your `server.py` file: -```python -from neo4j_parent import chain as neo4j_parent_chain - -add_routes(app, neo4j_parent_chain, path="/neo4j-parent") -``` - -(Optional) Let's now configure LangSmith. -LangSmith will help us trace, monitor and debug LangChain applications. -You can sign up for LangSmith [here](https://smith.langchain.com/). -If you don't have access, you can skip this section - -```shell -export LANGCHAIN_TRACING_V2=true -export LANGCHAIN_API_KEY= -export LANGCHAIN_PROJECT= # if not specified, defaults to "default" -``` - -If you are inside this directory, then you can spin up a LangServe instance directly by: - -```shell -langchain serve -``` - -This will start the FastAPI app with a server is running locally at -[http://localhost:8000](http://localhost:8000) - -We can see all templates at [http://127.0.0.1:8000/docs](http://127.0.0.1:8000/docs) -We can access the playground at [http://127.0.0.1:8000/neo4j-parent/playground](http://127.0.0.1:8000/neo4j-parent/playground) - -We can access the template from code with: - -```python -from langserve.client import RemoteRunnable - -runnable = RemoteRunnable("http://localhost:8000/neo4j-parent") -``` diff --git a/templates/neo4j-parent/dune.txt b/templates/neo4j-parent/dune.txt deleted file mode 100644 index 3417aaac3fc..00000000000 --- a/templates/neo4j-parent/dune.txt +++ /dev/null @@ -1,228 +0,0 @@ -Dune is a 1965 epic science fiction novel by American author Frank Herbert, originally published as two separate serials in Analog magazine. It tied with Roger Zelazny's This Immortal for the Hugo Award in 1966 and it won the inaugural Nebula Award for Best Novel. It is the first installment of the Dune Chronicles. It is one of the world's best-selling science fiction novels.Dune is set in the distant future in a feudal interstellar society in which various noble houses control planetary fiefs. It tells the story of young Paul Atreides, whose family accepts the stewardship of the planet Arrakis. While the planet is an inhospitable and sparsely populated desert wasteland, it is the only source of melange, or "spice", a drug that extends life and enhances mental abilities. Melange is also necessary for space navigation, which requires a kind of multidimensional awareness and foresight that only the drug provides. As melange can only be produced on Arrakis, control of the planet is a coveted and dangerous undertaking. The story explores the multilayered interactions of politics, religion, ecology, technology, and human emotion, as the factions of the empire confront each other in a struggle for the control of Arrakis and its spice. -Herbert wrote five sequels: Dune Messiah, Children of Dune, God Emperor of Dune, Heretics of Dune, and Chapterhouse: Dune. Following Herbert's death in 1986, his son Brian Herbert and author Kevin J. Anderson continued the series in over a dozen additional novels since 1999. -Adaptations of the novel to cinema have been notoriously difficult and complicated. In the 1970s, cult filmmaker Alejandro Jodorowsky attempted to make a film based on the novel. After three years of development, the project was canceled due to a constantly growing budget. In 1984, a film adaptation directed by David Lynch was released to mostly negative responses from critics and failure at the box office, although it later developed a cult following. The book was also adapted into the 2000 Sci-Fi Channel miniseries Frank Herbert's Dune and its 2003 sequel Frank Herbert's Children of Dune (the latter of which combines the events of Dune Messiah and Children of Dune). A second film adaptation directed by Denis Villeneuve was released on October 21, 2021, to positive reviews. It grossed $401 million worldwide and went on to be nominated for ten Academy Awards, winning six. Villeneuve's film covers roughly the first half of the original novel; a sequel, which will cover the remaining story, will be released in March 2024. -The series has also been used as the basis for several board, role-playing, and video games. -Since 2009, the names of planets from the Dune novels have been adopted for the real-life nomenclature of plains and other features on Saturn's moon Titan. - - -== Origins == -After his novel The Dragon in the Sea was published in 1957, Herbert traveled to Florence, Oregon, at the north end of the Oregon Dunes. Here, the United States Department of Agriculture was attempting to use poverty grasses to stabilize the sand dunes. Herbert claimed in a letter to his literary agent, Lurton Blassingame, that the moving dunes could "swallow whole cities, lakes, rivers, highways." Herbert's article on the dunes, "They Stopped the Moving Sands", was never completed (and only published decades later in The Road to Dune), but its research sparked Herbert's interest in ecology and deserts.Herbert further drew inspiration from Native American mentors like "Indian Henry" (as Herbert referred to the man to his son; likely a Henry Martin of the Hoh tribe) and Howard Hansen. Both Martin and Hansen grew up on the Quileute reservation near Herbert's hometown. According to historian Daniel Immerwahr, Hansen regularly shared his writing with Herbert. "White men are eating the earth," Hansen told Herbert in 1958, after sharing a piece on the effect of logging on the Quileute reservation. "They're gonna turn this whole planet into a wasteland, just like North Africa." The world could become a "big dune," Herbert responded in agreement.Herbert was also interested in the idea of the superhero mystique and messiahs. He believed that feudalism was a natural condition humans fell into, where some led and others gave up the responsibility of making decisions and just followed orders. He found that desert environments have historically given birth to several major religions with messianic impulses. He decided to join his interests together so he could play religious and ecological ideas against each other. In addition, he was influenced by the story of T. E. Lawrence and the "messianic overtones" in Lawrence's involvement in the Arab Revolt during World War I. In an early version of Dune, the hero was actually very similar to Lawrence of Arabia, but Herbert decided the plot was too straightforward and added more layers to his story.Herbert drew heavy inspiration also from Lesley Blanch's The Sabres of Paradise (1960), a narrative history recounting a mid-19th century conflict in the Caucasus between rugged Islamized caucasian tribes and the expansive Russian Empire. Language used on both sides of that conflict become terms in Herbert's world—chakobsa, a Caucasian hunting language, becomes a battle language of humans spread across the galaxy; kanly, a word for blood feud in the 19th century Caucasus, represents a feud between Dune's noble Houses; sietch and tabir are both words for camp borrowed from Ukrainian Cossacks (of the Pontic–Caspian steppe).Herbert also borrowed some lines which Blanch stated were Caucasian proverbs. "To kill with the point lacked artistry", used by Blanch to describe the Caucasus peoples' love of swordsmanship, becomes in Dune "Killing with the tip lacks artistry", a piece of advice given to a young Paul during his training. "Polish comes from the city, wisdom from the hills", a Caucasian aphorism, turns into a desert expression: "Polish comes from the cities, wisdom from the desert". - -Another significant source of inspiration for Dune was Herbert's experiences with psilocybin and his hobby of cultivating mushrooms, according to mycologist Paul Stamets's account of meeting Herbert in the 1980s:Frank went on to tell me that much of the premise of Dune—the magic spice (spores) that allowed the bending of space (tripping), the giant sand worms (maggots digesting mushrooms), the eyes of the Fremen (the cerulean blue of Psilocybe mushrooms), the mysticism of the female spiritual warriors, the Bene Gesserits (influenced by the tales of Maria Sabina and the sacred mushroom cults of Mexico)—came from his perception of the fungal life cycle, and his imagination was stimulated through his experiences with the use of magic mushrooms.Herbert spent the next five years researching, writing, and revising. He published a three-part serial Dune World in the monthly Analog, from December 1963 to February 1964. The serial was accompanied by several illustrations that were not published again. After an interval of a year, he published the much slower-paced five-part The Prophet of Dune in the January–May 1965 issues. The first serial became "Book 1: Dune" in the final published Dune novel, and the second serial was divided into "Book Two: Muad'dib" and "Book Three: The Prophet". The serialized version was expanded, reworked, and submitted to more than twenty publishers, each of whom rejected it. The novel, Dune, was finally accepted and published in August 1965 by Chilton Books, a printing house better known for publishing auto repair manuals. Sterling Lanier, an editor at Chilton, had seen Herbert's manuscript and had urged his company to take a risk in publishing the book. However, the first printing, priced at $5.95 (equivalent to $55.25 in 2022), did not sell well and was poorly received by critics as being atypical of science fiction at the time. Chilton considered the publication of Dune a write-off and Lanier was fired. Over the course of time, the book gained critical acclaim, and its popularity spread by word-of-mouth to allow Herbert to start working full time on developing the sequels to Dune, elements of which were already written alongside Dune.At first Herbert considered using Mars as setting for his novel, but eventually decided to use a fictional planet instead. His son Brian said that "Readers would have too many preconceived ideas about that planet, due to the number of stories that had been written about it."Herbert dedicated his work "to the people whose labors go beyond ideas into the realm of 'real materials'—to the dry-land ecologists, wherever they may be, in whatever time they work, this effort at prediction is dedicated in humility and admiration." - - -== Plot == -Duke Leto Atreides of House Atreides, ruler of the ocean planet Caladan, is assigned by the Padishah Emperor Shaddam IV to serve as fief ruler of the planet Arrakis. Although Arrakis is a harsh and inhospitable desert planet, it is of enormous importance because it is the only planetary source of melange, or the "spice", a unique and incredibly valuable substance that extends human youth, vitality and lifespan. It is also through the consumption of spice that Spacing Guild Navigators are able to effect safe interstellar travel. Shaddam, jealous of Duke Leto Atreides's rising popularity in the Landsraad, sees House Atreides as a potential future rival and threat, so conspires with House Harkonnen, the former stewards of Arrakis and the longstanding enemies of House Atreides, to destroy Leto and his family after their arrival. Leto is aware his assignment is a trap of some kind, but is compelled to obey the Emperor's orders anyway. -Leto's concubine Lady Jessica is an acolyte of the Bene Gesserit, an exclusively female group that pursues mysterious political aims and wields seemingly superhuman physical and mental abilities, such as the ability to control their bodies down to the cellular level, and also decide the sex of their children. Though Jessica was instructed by the Bene Gesserit to bear a daughter as part of their breeding program, out of love for Leto she bore a son, Paul. From a young age, Paul has been trained in warfare by Leto's aides, the elite soldiers Duncan Idaho and Gurney Halleck. Thufir Hawat, the Duke's Mentat (human computers, able to store vast amounts of data and perform advanced calculations on demand), has instructed Paul in the ways of political intrigue. Jessica has also trained her son in Bene Gesserit disciplines. -Paul's prophetic dreams interest Jessica's superior, the Reverend Mother Gaius Helen Mohiam, who subjects Paul to the deadly gom jabbar test. Holding a poisonous needle to his neck ready to strike should he be unable to resist the impulse to withdraw his hand from the nerve induction box, she tests Paul's self-control to overcome the extreme psychological pain he is being subjected to through the box. -Leto, Jessica, and Paul travel with their household to occupy Arrakeen, the capital on Arrakis formerly held by House Harkonnen. Leto learns of the dangers involved in harvesting the spice, which is protected by giant sandworms, and seeks to negotiate with the planet's native Fremen people, seeing them as a valuable ally rather than foes. Soon after the Atreides's arrival, Harkonnen forces attack, joined by the Emperor's ferocious Sardaukar troops in disguise. Leto is betrayed by his personal physician, the Suk doctor Wellington Yueh, who delivers a drugged Leto to the Baron Vladimir Harkonnen and his twisted Mentat, Piter De Vries. Yueh, however, arranges for Jessica and Paul to escape into the desert, where they are presumed dead by the Harkonnens. Yueh replaces one of Leto's teeth with a poison gas capsule, hoping Leto can kill the Baron during their encounter. The Baron narrowly avoids the gas due to his shield, which kills Leto, De Vries, and the others in the room. The Baron forces Hawat to take over De Vries's position by dosing him with a long-lasting, fatal poison and threatening to withhold the regular antidote doses unless he obeys. While he follows the Baron's orders, Hawat works secretly to undermine the Harkonnens. -Having fled into the desert, Paul is exposed to high concentrations of spice and has visions through which he realizes he has significant powers (as a result of the Bene Gesserit breeding scheme). He foresees potential futures in which he lives among the planet's native Fremen before leading them on a Holy Jihad across the known universe. -It is revealed Jessica is the daughter of Baron Harkonnen, a secret kept from her by the Bene Gesserit. After being captured by Fremen, Paul and Jessica are accepted into the Fremen community of Sietch Tabr, and teach the Fremen the Bene Gesserit fighting technique known as the "weirding way". Paul proves his manhood by killing a Fremen named Jamis in a ritualistic crysknife fight and chooses the Fremen name Muad'Dib, while Jessica opts to undergo a ritual to become a Reverend Mother by drinking the poisonous Water of Life. Pregnant with Leto's daughter, she inadvertently causes the unborn child, Alia, to become infused with the same powers in the womb. Paul takes a Fremen lover, Chani, and has a son with her, Leto II. -Two years pass and Paul's powerful prescience manifests, which confirms for the Fremen that he is their prophesied messiah, a legend planted by the Bene Gesserit's Missionaria Protectiva. Paul embraces his father's belief that the Fremen could be a powerful fighting force to take back Arrakis, but also sees that if he does not control them, their jihad could consume the entire universe. Word of the new Fremen leader reaches both Baron Harkonnen and the Emperor as spice production falls due to their increasingly destructive raids. The Baron encourages his brutish nephew Glossu Rabban to rule with an iron fist, hoping the contrast with his shrewder nephew Feyd-Rautha will make the latter popular among the people of Arrakis when he eventually replaces Rabban. The Emperor, suspecting the Baron of trying to create troops more powerful than the Sardaukar to seize power, sends spies to monitor activity on Arrakis. Hawat uses the opportunity to sow seeds of doubt in the Baron about the Emperor's true plans, putting further strain on their alliance. -Gurney, having survived the Harkonnen coup becomes a smuggler, reuniting with Paul and Jessica after a Fremen raid on his harvester. Believing Jessica to be the traitor, Gurney threatens to kill her, but is stopped by Paul. Paul did not foresee Gurney's attack, and concludes he must increase his prescience by drinking the Water of Life, which is traditionally fatal to males. Paul falls into unconsciousness for three weeks after drinking the poison, but when he wakes, he has clairvoyance across time and space: he is the Kwisatz Haderach, the ultimate goal of the Bene Gesserit breeding program. -Paul senses the Emperor and Baron are amassing fleets around Arrakis to quell the Fremen rebellion, and prepares the Fremen for a major offensive against the Harkonnen troops. The Emperor arrives with the Baron on Arrakis. The Emperor's troops seize a Fremen outpost, killing many including young Leto II, while Alia is captured and taken to the Emperor. Under cover of an electric storm, which shorts out the Emperor's troops' defensive shields, Paul and the Fremen, riding giant sandworms, assault the capital while Alia assassinates the Baron and escapes. The Fremen quickly defeat both the Harkonnen and Sardaukar troops. -Paul faces the Emperor, threatening to destroy spice production forever unless Shaddam abdicates the throne. Feyd-Rautha attempts to stop Paul by challenging him to a ritualistic knife fight, during which he attempts to cheat and kill Paul with a poison spur in his belt. Paul gains the upper hand and kills him. The Emperor reluctantly cedes the throne to Paul and promises his daughter Princess Irulan's hand in marriage. As Paul takes control of the Empire, he realizes that while he has achieved his goal, he is no longer able to stop the Fremen jihad, as their belief in him is too powerful to restrain. - - -== Characters == -House AtreidesPaul Atreides, the Duke's son, and main character of the novel -Duke Leto Atreides, head of House Atreides -Lady Jessica, Bene Gesserit and concubine of the Duke, mother of Paul and Alia -Alia Atreides, Paul's younger sister -Thufir Hawat, Mentat and Master of Assassins to House Atreides -Gurney Halleck, staunchly loyal troubadour warrior of the Atreides -Duncan Idaho, Swordmaster for House Atreides, graduate of the Ginaz School -Wellington Yueh, Suk doctor for the Atreides who is secretly working for House HarkonnenHouse HarkonnenBaron Vladimir Harkonnen, head of House Harkonnen -Piter De Vries, twisted Mentat -Feyd-Rautha, nephew and heir-presumptive of the Baron -Glossu "Beast" Rabban, also called Rabban Harkonnen, older nephew of the Baron -Iakin Nefud, Captain of the GuardHouse CorrinoShaddam IV, Padishah Emperor of the Known Universe (the Imperium) -Princess Irulan, Shaddam's eldest daughter and heir, also a historian -Count Fenring, the Emperor's closest friend, advisor, and "errand boy"Bene GesseritReverend Mother Gaius Helen Mohiam, Proctor Superior of the Bene Gesserit school and the Emperor's Truthsayer -Lady Margot Fenring, Bene Gesserit wife of Count FenringFremenThe Fremen, native inhabitants of Arrakis -Stilgar, Fremen leader of Sietch Tabr -Chani, Paul's Fremen concubine and a Sayyadina (female acolyte) of Sietch Tabr -Dr. Liet-Kynes, the Imperial Planetologist on Arrakis and father of Chani, as well as a revered figure among the Fremen -The Shadout Mapes, head housekeeper of imperial residence on Arrakis -Jamis, Fremen killed by Paul in ritual duel -Harah, wife of Jamis and later servant to Paul who helps raise Alia among the Fremen -Reverend Mother Ramallo, religious leader of Sietch TabrSmugglersEsmar Tuek, a powerful smuggler and the father of Staban Tuek -Staban Tuek, the son of Esmar Tuek and a powerful smuggler who befriends and takes in Gurney Halleck and his surviving men after the attack on the Atreides - - -== Themes and influences == -The Dune series is a landmark of science fiction. Herbert deliberately suppressed technology in his Dune universe so he could address the politics of humanity, rather than the future of humanity's technology. For example, a key pre-history event to the novel's present is the "Butlerian Jihad", in which all robots and computers were destroyed, eliminating these common elements to science fiction from the novel as to allow focus on humanity. Dune considers the way humans and their institutions might change over time. Director John Harrison, who adapted Dune for Syfy's 2000 miniseries, called the novel a universal and timeless reflection of "the human condition and its moral dilemmas", and said: - -A lot of people refer to Dune as science fiction. I never do. I consider it an epic adventure in the classic storytelling tradition, a story of myth and legend not unlike the Morte d'Arthur or any messiah story. It just happens to be set in the future ... The story is actually more relevant today than when Herbert wrote it. In the 1960s, there were just these two colossal superpowers duking it out. Today we're living in a more feudal, corporatized world more akin to Herbert's universe of separate families, power centers and business interests, all interrelated and kept together by the one commodity necessary to all. -But Dune has also been called a mix of soft and hard science fiction since "the attention to ecology is hard, the anthropology and the psychic abilities are soft." Hard elements include the ecology of Arrakis, suspensor technology, weapon systems, and ornithopters, while soft elements include issues relating to religion, physical and mental training, cultures, politics, and psychology.Herbert said Paul's messiah figure was inspired by the Arthurian legend, and that the scarcity of water on Arrakis was a metaphor for oil, as well as air and water itself, and for the shortages of resources caused by overpopulation. Novelist Brian Herbert, his son and biographer, wrote: - -Dune is a modern-day conglomeration of familiar myths, a tale in which great sandworms guard a precious treasure of melange, the geriatric spice that represents, among other things, the finite resource of oil. The planet Arrakis features immense, ferocious worms that are like dragons of lore, with "great teeth" and a "bellows breath of cinnamon." This resembles the myth described by an unknown English poet in Beowulf, the compelling tale of a fearsome fire dragon who guarded a great treasure hoard in a lair under cliffs, at the edge of the sea. The desert of Frank Herbert's classic novel is a vast ocean of sand, with giant worms diving into the depths, the mysterious and unrevealed domain of Shai-hulud. Dune tops are like the crests of waves, and there are powerful sandstorms out there, creating extreme danger. On Arrakis, life is said to emanate from the Maker (Shai-hulud) in the desert-sea; similarly all life on Earth is believed to have evolved from our oceans. Frank Herbert drew parallels, used spectacular metaphors, and extrapolated present conditions into world systems that seem entirely alien at first blush. But close examination reveals they aren't so different from systems we know … and the book characters of his imagination are not so different from people familiar to us. -Each chapter of Dune begins with an epigraph excerpted from the fictional writings of the character Princess Irulan. In forms such as diary entries, historical commentary, biography, quotations and philosophy, these writings set tone and provide exposition, context and other details intended to enhance understanding of Herbert's complex fictional universe and themes. They act as foreshadowing and invite the reader to keep reading to close the gap between what the epigraph says and what is happening in the main narrative. The epigraphs also give the reader the feeling that the world they are reading about is epically distanced, since Irulan writes about an idealized image of Paul as if he had already passed into memory. Brian Herbert wrote: "Dad told me that you could follow any of the novel's layers as you read it, and then start the book all over again, focusing on an entirely different layer. At the end of the book, he intentionally left loose ends and said he did this to send the readers spinning out of the story with bits and pieces of it still clinging to them, so that they would want to go back and read it again." - - -=== Middle-Eastern and Islamic references === -Due to the similarities between some of Herbert's terms and ideas and actual words and concepts in the Arabic language, as well as the series' "Islamic undertones" and themes, a Middle-Eastern influence on Herbert's works has been noted repeatedly. In his descriptions of the Fremen culture and language, Herbert uses both authentic Arabic words and Arabic-sounding words. For example, one of the names for the sandworm, Shai-hulud, is derived from Arabic: شيء خلود, romanized: šayʾ ḫulūd, lit. 'immortal thing' or Arabic: شيخ خلود, romanized: šayḫ ḫulūd, lit. 'old man of eternity'. The title of the Fremen housekeeper, the Shadout Mapes, is borrowed from the Arabic: شادوف‎, romanized: šādūf, the Egyptian term for a device used to raise water. In particular, words related to the messianic religion of the Fremen, first implanted by the Bene Gesserit, are taken from Arabic, including Muad'Dib (from Arabic: مؤدب, romanized: muʾaddib, lit. 'educator'), Usul (from Arabic: أصول, romanized: ʾuṣūl, lit. 'fundamental principles'), Shari-a (from Arabic: شريعة, romanized: šarīʿa, lit. 'sharia; path'), Shaitan (from Arabic: شيطان, romanized: šayṭān, lit. 'Shaitan; devil; fiend', and jinn (from Arabic: جن, romanized: ǧinn, lit. 'jinn; spirit; demon; mythical being'). It is likely Herbert relied on second-hand resources such as phrasebooks and desert adventure stories to find these Arabic words and phrases for the Fremen. They are meaningful and carefully chosen, and help create an "imagined desert culture that resonates with exotic sounds, enigmas, and pseudo-Islamic references" and has a distinctly Bedouin aesthetic.As a foreigner who adopts the ways of a desert-dwelling people and then leads them in a military capacity, Paul Atreides bears many similarities to the historical T. E. Lawrence. His 1962 biopic Lawrence of Arabia has also been identified as a potential influence. The Sabres of Paradise (1960) has also been identified as a potential influence upon Dune, with its depiction of Imam Shamil and the Islamic culture of the Caucasus inspiring some of the themes, characters, events and terminology of Dune.The environment of the desert planet Arrakis was primarily inspired by the environments of the Middle East. Similarly Arrakis as a bioregion is presented as a particular kind of political site. Herbert has made it resemble a desertified petrostate area. The Fremen people of Arrakis were influenced by the Bedouin tribes of Arabia, and the Mahdi prophecy originates from Islamic eschatology. Inspiration is also adopted from medieval historian Ibn Khaldun's cyclical history and his dynastic concept in North Africa, hinted at by Herbert's reference to Khaldun's book Kitāb al-ʿibar ("The Book of Lessons"). The fictionalized version of the "Kitab al-ibar" in Dune is a combination of a Fremen religious manual and a desert survival book. - - -==== Additional language and historic influences ==== -In addition to Arabic, Dune derives words and names from a variety of other languages, including Hebrew, Navajo, Latin, Dutch ("Landsraad"), Chakobsa, the Nahuatl language of the Aztecs, Greek, Persian, Sanskrit ("prana bindu", "prajna"), Russian, Turkish, Finnish, and Old English. Bene Gesserit is simply the Latin for "It will have been well fought", also carrying the sense of "It will have been well managed", which stands as a statement of the order's goal and as a pledge of faithfulness to that goal. Critics tend to miss the literal meaning of the phrase, some positing that the term is derived from the Latin meaning "it will have been well borne", which interpretation is not well supported by their doctrine in the story.Through the inspiration from The Sabres of Paradise, there are also allusions to the tsarist-era Russian nobility and Cossacks. Frank Herbert stated that bureaucracy that lasted long enough would become a hereditary nobility, and a significant theme behind the aristocratic families in Dune was "aristocratic bureaucracy" which he saw as analogous to the Soviet Union. - - -=== Environmentalism and ecology === -Dune has been called the "first planetary ecology novel on a grand scale". Herbert hoped it would be seen as an "environmental awareness handbook" and said the title was meant to "echo the sound of 'doom'". It was reviewed in the best selling countercultural Whole Earth Catalog in 1968 as a "rich re-readable fantasy with clear portrayal of the fierce environment it takes to cohere a community".After the publication of Silent Spring by Rachel Carson in 1962, science fiction writers began treating the subject of ecological change and its consequences. Dune responded in 1965 with its complex descriptions of Arrakis life, from giant sandworms (for whom water is deadly) to smaller, mouse-like life forms adapted to live with limited water. Dune was followed in its creation of complex and unique ecologies by other science fiction books such as A Door into Ocean (1986) and Red Mars (1992). Environmentalists have pointed out that Dune's popularity as a novel depicting a planet as a complex—almost living—thing, in combination with the first images of Earth from space being published in the same time period, strongly influenced environmental movements such as the establishment of the international Earth Day.While the genre of climate fiction was popularized in the 2010s in response to real global climate change, Dune as well as other early science fiction works from authors like J. G. Ballard (The Drowned World) and Kim Stanley Robinson (the Mars trilogy) have retroactively been considered pioneering examples of the genre. - - -=== Declining empires === -The Imperium in Dune contains features of various empires in Europe and the Near East, including the Roman Empire, Holy Roman Empire, and Ottoman Empire. Lorenzo DiTommaso compared Dune's portrayal of the downfall of a galactic empire to Edward Gibbon's Decline and Fall of the Roman Empire, which argues that Christianity allied with the profligacy of the Roman elite led to the fall of Ancient Rome. In "The Articulation of Imperial Decadence and Decline in Epic Science Fiction" (2007), DiTommaso outlines similarities between the two works by highlighting the excesses of the Emperor on his home planet of Kaitain and of the Baron Harkonnen in his palace. The Emperor loses his effectiveness as a ruler through an excess of ceremony and pomp. The hairdressers and attendants he brings with him to Arrakis are even referred to as "parasites". The Baron Harkonnen is similarly corrupt and materially indulgent. Gibbon's Decline and Fall partly blames the fall of Rome on the rise of Christianity. Gibbon claimed that this exotic import from a conquered province weakened the soldiers of Rome and left it open to attack. The Emperor's Sardaukar fighters are little match for the Fremen of Dune not only because of the Sardaukar's overconfidence and the fact that Jessica and Paul have trained the Fremen in their battle tactics, but because of the Fremen's capacity for self-sacrifice. The Fremen put the community before themselves in every instance, while the world outside wallows in luxury at the expense of others.The decline and long peace of the Empire sets the stage for revolution and renewal by genetic mixing of successful and unsuccessful groups through war, a process culminating in the Jihad led by Paul Atreides, described by Frank Herbert as depicting "war as a collective orgasm" (drawing on Norman Walter's 1950 The Sexual Cycle of Human Warfare), themes that would reappear in God Emperor of Dune's Scattering and Leto II's all-female Fish Speaker army. - - -=== Gender dynamics === -Gender dynamics are complex in Dune. Within the Fremen sietch communities, women have almost full equality. They carry weapons and travel in raiding parties with men, fighting when necessary alongside the men. They can take positions of leadership as a Sayyadina or as a Reverend Mother (if she can survive the ritual of ingesting the Water of Life.) Both of these sietch religious leaders are routinely consulted by the all-male Council and can have a decisive voice in all matters of sietch life, security and internal politics. They are also protected by the entire community. Due to the high mortality rate among their men, women outnumber men in most sietches. Polygamy is common, and sexual relationships are voluntary and consensual; as Stilgar says to Jessica, "women among us are not taken against their will." -In contrast, the Imperial aristocracy leaves young women of noble birth very little agency. Frequently trained by the Bene Gesserit, they are raised to eventually marry other aristocrats. Marriages between Major and Minor Houses are political tools to forge alliances or heal old feuds; women are given very little say in the matter. Many such marriages are quietly maneuvered by the Bene Gesserit to produce offspring with some genetic characteristics needed by the sisterhood's human-breeding program. In addition, such highly-placed sisters were in a position to subtly influence their husbands' actions in ways that could move the politics of the Imperium toward Bene Gesserit goals. -The gom jabbar test of humanity is administered by the female Bene Gesserit order but rarely to males. The Bene Gesserit have seemingly mastered the unconscious and can play on the unconscious weaknesses of others using the Voice, yet their breeding program seeks after a male Kwisatz Haderach. Their plan is to produce a male who can "possess complete racial memory, both male and female," and look into the black hole in the collective unconscious that they fear. A central theme of the book is the connection, in Jessica's son, of this female aspect with his male aspect. This aligns with concepts in Jungian psychology, which features conscious/unconscious and taking/giving roles associated with males and females, as well as the idea of the collective unconscious. Paul's approach to power consistently requires his upbringing under the matriarchal Bene Gesserit, who operate as a long-dominating shadow government behind all of the great houses and their marriages or divisions. He is trained by Jessica in the Bene Gesserit Way, which includes prana-bindu training in nerve and muscle control and precise perception. Paul also receives Mentat training, thus helping prepare him to be a type of androgynous Kwisatz Haderach, a male Reverend Mother.In a Bene Gesserit test early in the book, it is implied that people are generally "inhuman" in that they irrationally place desire over self-interest and reason. This applies Herbert's philosophy that humans are not created equal, while equal justice and equal opportunity are higher ideals than mental, physical, or moral equality. - - -=== Heroism === -I am showing you the superhero syndrome and your own participation in it. -Throughout Paul's rise to superhuman status, he follows a plotline common to many stories describing the birth of a hero. He has unfortunate circumstances forced onto him. After a long period of hardship and exile, he confronts and defeats the source of evil in his tale. As such, Dune is representative of a general trend beginning in 1960s American science fiction in that it features a character who attains godlike status through scientific means. Eventually, Paul Atreides gains a level of omniscience which allows him to take over the planet and the galaxy, and causes the Fremen of Arrakis to worship him like a god. Author Frank Herbert said in 1979, "The bottom line of the Dune trilogy is: beware of heroes. Much better [to] rely on your own judgment, and your own mistakes." He wrote in 1985, "Dune was aimed at this whole idea of the infallible leader because my view of history says that mistakes made by a leader (or made in a leader's name) are amplified by the numbers who follow without question."Juan A. Prieto-Pablos says Herbert achieves a new typology with Paul's superpowers, differentiating the heroes of Dune from earlier heroes such as Superman, van Vogt's Gilbert Gosseyn and Henry Kuttner's telepaths. Unlike previous superheroes who acquire their powers suddenly and accidentally, Paul's are the result of "painful and slow personal progress." And unlike other superheroes of the 1960s—who are the exception among ordinary people in their respective worlds—Herbert's characters grow their powers through "the application of mystical philosophies and techniques." For Herbert, the ordinary person can develop incredible fighting skills (Fremen, Ginaz swordsmen and Sardaukar) or mental abilities (Bene Gesserit, Mentats, Spacing Guild Navigators). - - -=== Zen and religion === - -Early in his newspaper career, Herbert was introduced to Zen by two Jungian psychologists, Ralph and Irene Slattery, who "gave a crucial boost to his thinking". Zen teachings ultimately had "a profound and continuing influence on [Herbert's] work". Throughout the Dune series and particularly in Dune, Herbert employs concepts and forms borrowed from Zen Buddhism. The Fremen are referred to as Zensunni adherents, and many of Herbert's epigraphs are Zen-spirited. In "Dune Genesis", Frank Herbert wrote: - -What especially pleases me is to see the interwoven themes, the fugue like relationships of images that exactly replay the way Dune took shape. As in an Escher lithograph, I involved myself with recurrent themes that turn into paradox. The central paradox concerns the human vision of time. What about Paul's gift of prescience - the Presbyterian fixation? For the Delphic Oracle to perform, it must tangle itself in a web of predestination. Yet predestination negates surprises and, in fact, sets up a mathematically enclosed universe whose limits are always inconsistent, always encountering the unprovable. It's like a koan, a Zen mind breaker. It's like the Cretan Epimenides saying, "All Cretans are liars." -Brian Herbert called the Dune universe "a spiritual melting pot", noting that his father incorporated elements of a variety of religions, including Buddhism, Sufi mysticism and other Islamic belief systems, Catholicism, Protestantism, Judaism, and Hinduism. He added that Frank Herbert's fictional future in which "religious beliefs have combined into interesting forms" represents the author's solution to eliminating arguments between religions, each of which claimed to have "the one and only revelation." - - -=== Asimov's Foundation === -Tim O'Reilly suggests that Herbert also wrote Dune as a counterpoint to Isaac Asimov's Foundation series. In his monograph on Frank Herbert, O'Reilly wrote that "Dune is clearly a commentary on the Foundation trilogy. Herbert has taken a look at the same imaginative situation that provoked Asimov's classic—the decay of a galactic empire—and restated it in a way that draws on different assumptions and suggests radically different conclusions. The twist he has introduced into Dune is that the Mule, not the Foundation, is his hero." According to O'Reilly, Herbert bases the Bene Gesserit on the scientific shamans of the Foundation, though they use biological rather than statistical science. In contrast to the Foundation series and its praise of science and rationality, Dune proposes that the unconscious and unexpected are actually what are needed for humanity.Both Herbert and Asimov explore the implications of prescience (i.e., visions of the future) both psychologically and socially. The Foundation series deploys a broadly determinist approach to prescient vision rooted in mathematical reasoning on a macroscopic social level. Dune, by contrast, invents a biologically rooted power of prescience that becomes determinist when the user actively relies on it to navigate past an undefined threshold of detail. Herbert’s eugenically produced and spice-enhanced prescience is also personalized to individual actors whose roles in later books constrain each other's visions, rendering the future more or less mutable as time progresses. In what might be a comment on Foundation, Herbert's most powerfully prescient being in God Emperor of Dune laments the boredom engendered by prescience, and values surprises, especially regarding one's death, as a psychological necessity.However, both works contain a similar theme of the restoration of civilization and seem to make the fundamental assumption that "political maneuvering, the need to control material resources, and friendship or mating bonds will be fundamentally the same in the future as they are now." - - -== Critical reception == -Dune tied with Roger Zelazny's This Immortal for the Hugo Award in 1966 and won the inaugural Nebula Award for Best Novel. Reviews of the novel have been largely positive, and Dune is considered by some critics to be the best science fiction book ever written. The novel has been translated into dozens of languages, and has sold almost 20 million copies. Dune has been regularly cited as one of the world's best-selling science fiction novels.Arthur C. Clarke described Dune as "unique" and wrote, "I know nothing comparable to it except The Lord of the Rings." Robert A. Heinlein described the novel as "powerful, convincing, and most ingenious." It was described as "one of the monuments of modern science fiction" by the Chicago Tribune, and P. Schuyler Miller called Dune "one of the landmarks of modern science fiction ... an amazing feat of creation." The Washington Post described it as "a portrayal of an alien society more complete and deeply detailed than any other author in the field has managed ... a story absorbing equally for its action and philosophical vistas ... An astonishing science fiction phenomenon." Algis Budrys praised Dune for the vividness of its imagined setting, saying "The time lives. It breathes, it speaks, and Herbert has smelt it in his nostrils". He found that the novel, however, "turns flat and tails off at the end. ... [T]ruly effective villains simply simper and melt; fierce men and cunning statesmen and seeresses all bend before this new Messiah". Budrys faulted in particular Herbert's decision to kill Paul's infant son offstage, with no apparent emotional impact, saying "you cannot be so busy saving a world that you cannot hear an infant shriek". After criticizing unrealistic science fiction, Carl Sagan in 1978 listed Dune as among stories "that are so tautly constructed, so rich in the accommodating details of an unfamiliar society that they sweep me along before I have even a chance to be critical".The Louisville Times wrote, "Herbert's creation of this universe, with its intricate development and analysis of ecology, religion, politics, and philosophy, remains one of the supreme and seminal achievements in science fiction." Writing for The New Yorker, Jon Michaud praised Herbert's "clever authorial decision" to exclude robots and computers ("two staples of the genre") from his fictional universe, but suggested that this may be one explanation why Dune lacks "true fandom among science-fiction fans" to the extent that it "has not penetrated popular culture in the way that The Lord of the Rings and Star Wars have". Tamara I. Hladik wrote that the story "crafts a universe where lesser novels promulgate excuses for sequels. All its rich elements are in balance and plausible—not the patchwork confederacy of made-up languages, contrived customs, and meaningless histories that are the hallmark of so many other, lesser novels."On November 5, 2019, the BBC News listed Dune on its list of the 100 most influential novels.J. R. R. Tolkien refused to review Dune, on the grounds that he disliked it "with some intensity" and thus felt it would be unfair to Herbert, another working author, if he gave an honest review of the book. - - -== First edition prints and manuscripts == -The first edition of Dune is one of the most valuable in science fiction book collecting. Copies have been sold for more than $10,000 at auction. The Chilton first edition of the novel is 9+1⁄4 inches (235 mm) tall, with bluish green boards and a price of $5.95 on the dust jacket, and notes Toronto as the Canadian publisher on the copyright page. Up to this point, Chilton had been publishing only automobile repair manuals.California State University, Fullerton's Pollak Library has several of Herbert's draft manuscripts of Dune and other works, with the author's notes, in their Frank Herbert Archives. - - -== Sequels and prequels == - -After Dune proved to be a critical and financial success for Herbert, he was able to devote himself full time to writing additional novels in the series. He had already drafted parts of the second and third while writing Dune. The series included Dune Messiah (1969), Children of Dune (1976), God Emperor of Dune (1981), Heretics of Dune (1984), and Chapterhouse: Dune (1985), each sequentially continuing on the narrative from Dune. Herbert died on February 11, 1986.Herbert's son, Brian Herbert, had found several thousand pages of notes left by his father that outlined ideas for other narratives related to Dune. Brian Herbert enlisted author Kevin J. Anderson to help build out prequel novels to the events of Dune. Brian Herbert's and Anderson's Dune prequels first started publication in 1999, and have led to additional stories that take place between those of Frank Herbert's books. The notes for what would have been Dune 7 also enabled them to publish Hunters of Dune (2006) and Sandworms of Dune (2007), sequels to Frank Herbert's final novel Chapterhouse: Dune, which complete the chronological progression of his original series, and wrap up storylines that began in Heretics of Dune. - - -== Adaptations == - -Dune has been considered as an "unfilmable" and "uncontainable" work to adapt from novel to film or other visual medium. Described by Wired, "It has four appendices and a glossary of its own gibberish, and its action takes place on two planets, one of which is a desert overrun by worms the size of airport runways. Lots of important people die or try to kill each other, and they're all tethered to about eight entangled subplots." There have been several attempts to achieve this difficult conversion with various degrees of success. - - -=== Early stalled attempts === -In 1971, the production company Apjac International (APJ) (headed by Arthur P. Jacobs) optioned the rights to film Dune. As Jacobs was busy with other projects, such as the sequel to Planet of the Apes, Dune was delayed for another year. Jacobs' first choice for director was David Lean, but he turned down the offer. Charles Jarrott was also considered to direct. Work was also under way on a script while the hunt for a director continued. Initially, the first treatment had been handled by Robert Greenhut, the producer who had lobbied Jacobs to make the movie in the first place, but subsequently Rospo Pallenberg was approached to write the script, with shooting scheduled to begin in 1974. However, Jacobs died in 1973. -In December 1974, a French consortium led by Jean-Paul Gibon purchased the film rights from APJ, with Alejandro Jodorowsky set to direct. In 1975, Jodorowsky planned to film the story as a 14-hour feature, set to star his own son Brontis Jodorowsky in the lead role of Paul Atreides, Salvador Dalí as Shaddam IV, Padishah Emperor, Amanda Lear as Princess Irulan, Orson Welles as Baron Vladimir Harkonnen, Gloria Swanson as Reverend Mother Gaius Helen Mohiam, David Carradine as Duke Leto Atreides, Geraldine Chaplin as Lady Jessica, Alain Delon as Duncan Idaho, Hervé Villechaize as Gurney Halleck, Udo Kier as Piter De Vries, and Mick Jagger as Feyd-Rautha. It was at first proposed to score the film with original music by Karlheinz Stockhausen, Henry Cow, and Magma; later on, the soundtrack was to be provided by Pink Floyd. Jodorowsky set up a pre-production unit in Paris consisting of Chris Foss, a British artist who designed covers for science fiction periodicals, Jean Giraud (Moebius), a French illustrator who created and also wrote and drew for Metal Hurlant magazine, and H. R. Giger. Moebius began designing creatures and characters for the film, while Foss was brought in to design the film's space ships and hardware. Giger began designing the Harkonnen Castle based on Moebius's storyboards. Dan O'Bannon was to head the special effects department.Dalí was cast as the Emperor. Dalí later demanded to be paid $100,000 per hour; Jodorowsky agreed, but tailored Dalí's part to be filmed in one hour, drafting plans for other scenes of the emperor to use a mechanical mannequin as substitute for Dalí. According to Giger, Dalí was "later invited to leave the film because of his pro-Franco statements". Just as the storyboards, designs, and script were finished, the financial backing dried up. Frank Herbert traveled to Europe in 1976 to find that $2 million of the $9.5 million budget had already been spent in pre-production, and that Jodorowsky's script would result in a 14-hour movie ("It was the size of a phone book", Herbert later recalled). Jodorowsky took creative liberties with the source material, but Herbert said that he and Jodorowsky had an amicable relationship. Jodorowsky said in 1985 that he found the Dune story mythical and had intended to recreate it rather than adapt the novel; though he had an "enthusiastic admiration" for Herbert, Jodorowsky said he had done everything possible to distance the author and his input from the project. Although Jodorowsky was embittered by the experience, he said the Dune project changed his life, and some of the ideas were used in his and Moebius's The Incal. O'Bannon entered a psychiatric hospital after the production failed, then worked on 13 scripts, the last of which became Alien. A 2013 documentary, Jodorowsky's Dune, was made about Jodorowsky's failed attempt at an adaptation. -In 1976, Dino De Laurentiis acquired the rights from Gibon's consortium. De Laurentiis commissioned Herbert to write a new screenplay in 1978; the script Herbert turned in was 175 pages long, the equivalent of nearly three hours of screen time. De Laurentiis then hired director Ridley Scott in 1979, with Rudy Wurlitzer writing the screenplay and H. R. Giger retained from the Jodorowsky production; Scott and Giger had also just worked together on the film Alien, after O'Bannon recommended the artist. Scott intended to split the novel into two movies. He worked on three drafts of the script, using The Battle of Algiers as a point of reference, before moving on to direct another science fiction film, Blade Runner (1982). As he recalls, the pre-production process was slow, and finishing the project would have been even more time-intensive: - -But after seven months I dropped out of Dune, by then Rudy Wurlitzer had come up with a first-draft script which I felt was a decent distillation of Frank Herbert's. But I also realised Dune was going to take a lot more work—at least two and a half years' worth. And I didn't have the heart to attack that because my older brother Frank unexpectedly died of cancer while I was prepping the De Laurentiis picture. Frankly, that freaked me out. So I went to Dino and told him the Dune script was his. -—From Ridley Scott: The Making of his Movies by Paul M. Sammon - - -=== 1984 film by David Lynch === - -In 1981, the nine-year film rights were set to expire. De Laurentiis re-negotiated the rights from the author, adding to them the rights to the Dune sequels (written and unwritten). After seeing The Elephant Man, De Laurentiis' daughter Raffaella decided that David Lynch should direct the movie. Around that time Lynch received several other directing offers, including Return of the Jedi. He agreed to direct Dune and write the screenplay even though he had not read the book, was not familiar with the story, or even been interested in science fiction. Lynch worked on the script for six months with Eric Bergren and Christopher De Vore. The team yielded two drafts of the script before it split over creative differences. Lynch would subsequently work on five more drafts. Production of the work was troubled by problems at the Mexican studio and hampering the film's timeline. Lynch ended up producing a nearly three-hour long film, but at demands from Universal Pictures, the film's distributor, he cut it back to about two hours, hastily filming additional scenes to make up for some of the cut footage.This first film of Dune, directed by Lynch, was released in 1984, nearly 20 years after the book's publication. Though Herbert said the book's depth and symbolism seemed to intimidate many filmmakers, he was pleased with the film, saying that "They've got it. It begins as Dune does. And I hear my dialogue all the way through. There are some interpretations and liberties, but you're gonna come out knowing you've seen Dune." Reviews of the film were negative, saying that it was incomprehensible to those unfamiliar with the book, and that fans would be disappointed by the way it strayed from the book's plot. Upon release for television and other forms of home media, Universal opted to reintroduce much of the footage that Lynch had cut, creating an over-three-hour long version with extensive monologue exposition. Lynch was extremely displeased with this move, and demanded that Universal replace his name on these cuts with the pseudonym "Alan Smithee", and has generally distanced himself from the film since. - - -=== 2000 miniseries by John Harrison === - -In 2000, John Harrison adapted the novel into Frank Herbert's Dune, a miniseries which premiered on American Sci-Fi Channel. As of 2004, the miniseries was one of the three highest-rated programs broadcast on the Sci-Fi Channel. - - -=== Further film attempts === -In 2008, Paramount Pictures announced that they would produce a new film based on the book, with Peter Berg attached to direct. Producer Kevin Misher, who spent a year securing the rights from the Herbert estate, was to be joined by Richard Rubinstein and John Harrison (of both Sci-Fi Channel miniseries) as well as Sarah Aubrey and Mike Messina. The producers stated that they were going for a "faithful adaptation" of the novel, and considered "its theme of finite ecological resources particularly timely." Science fiction author Kevin J. Anderson and Frank Herbert's son Brian Herbert, who had together written multiple Dune sequels and prequels since 1999, were attached to the project as technical advisors. In October 2009, Berg dropped out of the project, later saying that it "for a variety of reasons wasn't the right thing" for him. Subsequently, with a script draft by Joshua Zetumer, Paramount reportedly sought a new director who could do the film for under $175 million. In 2010, Pierre Morel was signed on to direct, with screenwriter Chase Palmer incorporating Morel's vision of the project into Zetumer's original draft. By November 2010, Morel left the project. Paramount finally dropped plans for a remake in March 2011. - - -=== Films by Denis Villeneuve === - -In November 2016, Legendary Entertainment acquired the film and TV rights for Dune. Variety reported in December 2016 that Denis Villeneuve was in negotiations to direct the project, which was confirmed in February 2017. In April 2017, Legendary announced that Eric Roth would write the screenplay. Villeneuve explained in March 2018 that his adaptation will be split into two films, with the first installment scheduled to begin production in 2019. Casting includes Timothée Chalamet as Paul Atreides, Dave Bautista as Rabban, Stellan Skarsgård as Baron Harkonnen, Rebecca Ferguson as Lady Jessica, Charlotte Rampling as Reverend Mother Mohiam, Oscar Isaac as Duke Leto Atreides, Zendaya as Chani, Javier Bardem as Stilgar, Josh Brolin as Gurney Halleck, Jason Momoa as Duncan Idaho, David Dastmalchian as Piter De Vries, Chang Chen as Dr. Yueh, and Stephen Henderson as Thufir Hawat. Warner Bros. Pictures distributed the film, which had its initial premiere on September 3, 2021, at the Venice Film Festival, and wide release in both theaters and streaming on HBO Max on October 21, 2021, as part of Warner Bros.'s approach to handling the impact of the COVID-19 pandemic on the film industry. The film received "generally favorable reviews" on Metacritic. It has gone on to win multiple awards and was named by the National Board of Review as one of the 10 best films of 2021, as well as the American Film Institute in their annual top 10 list. The film went on to be nominated for ten Academy Awards, winning six, the most wins of the night for any film in contention.A sequel, Dune: Part Two, was scheduled for release on November 3, 2023, but will now instead be released on March 15th 2024 amid the 2023 SAG-AFTRA strike. - - -=== Audiobooks === -In 1993, Recorded Books Inc. released a 20-disc audiobook narrated by George Guidall. In 2007, Audio Renaissance released an audio book narrated by Simon Vance with some parts performed by Scott Brick, Orlagh Cassidy, Euan Morton, and other performers. - - -== Cultural influence == -Dune has been widely influential, inspiring numerous novels, music, films, television, games, and comic books. It is considered one of the greatest and most influential science fiction novels of all time, with numerous modern science fiction works such as Star Wars owing their existence to Dune. Dune has also been referenced in numerous other works of popular culture, including Star Trek, Chronicles of Riddick, The Kingkiller Chronicle and Futurama. Dune was cited as a source of inspiration for Hayao Miyazaki's anime film Nausicaä of the Valley of the Wind (1984) for its post-apocalyptic world.Dune was parodied in 1984's National Lampoon's Doon by Ellis Weiner, which William F. Touponce called "something of a tribute to Herbert's success on college campuses", noting that "the only other book to have been so honored is Tolkien's The Lord of the Rings," which was parodied by The Harvard Lampoon in 1969. - - -=== Music === -In 1978, French electronic musician Richard Pinhas released the nine-track Dune-inspired album Chronolyse, which includes the seven-part Variations sur le thème des Bene Gesserit. -In 1979, German electronic music pioneer Klaus Schulze released an LP titled Dune featuring motifs and lyrics inspired by the novel. -A similar musical project, Visions of Dune, was released also in 1979 by Zed (a pseudonym of French electronic musician Bernard Sjazner). -Heavy metal band Iron Maiden wrote the song "To Tame a Land" based on the Dune story. It appears as the closing track to their 1983 album Piece of Mind. The original working title of the song was "Dune"; however, the band was denied permission to use it, with Frank Herbert's agents stating "Frank Herbert doesn't like rock bands, particularly heavy rock bands, and especially bands like Iron Maiden". -Dune inspired the German happy hardcore band Dune, who have released several albums with space travel-themed songs. -The progressive hardcore band Shai Hulud took their name from Dune. -"Traveller in Time", from the 1991 Blind Guardian album Tales from the Twilight World, is based mostly on Paul Atreides' visions of future and past. -The title of the 1993 Fear Factory album Fear is The Mindkiller is a quote from the "litany against fear". -The song "Near Fantastica", from the Matthew Good album Avalanche, makes reference to the "litany against fear", repeating "can't feel fear, fear's the mind killer" through a section of the song. -In the Fatboy Slim song "Weapon of Choice", the line "If you walk without rhythm/You won't attract the worm" is a near quotation from the sections of novel in which Stilgar teaches Paul to ride sandworms. -Dune also inspired the 1999 album The 2nd Moon by the German death metal band Golem, which is a concept album about the series. -Dune influenced Thirty Seconds to Mars on their self-titled debut album. -The Youngblood Brass Band's song "Is an Elegy" on Center:Level:Roar references "Muad'Dib", "Arrakis" and other elements from the novel. -The debut album of Canadian musician Grimes, called Geidi Primes, is a concept album based on Dune. -Japanese singer Kenshi Yonezu, released a song titled "Dune", also known as "Sand Planet". The song was released on 2017, and it was created using the voice synthesizer Hatsune Miku for her 10th anniversary. -"Fear is the Mind Killer", a song released in 2018 by Zheani (an Australian rapper) uses a quote from Dune. -"Litany Against Fear" is a spoken track released in 2018 under the 'Eight' album by Zheani. She recites an extract from Dune. -Sleep's 2018 album The Sciences features a song, Giza Butler, that references several aspects of Dune. -Tool's 2019 album Fear Inoculum has a song entitled "Litanie contre la peur (Litany against fear)". -"Rare to Wake", from Shannon Lay's album Geist (2019), is inspired by Dune. -Heavy Metal band Diamond Head based the song "The Sleeper" and its prelude, both off the album The Coffin Train, on the series. - - -=== Games === - -There have been a number of games based on the book, starting with the strategy–adventure game Dune (1992). The most important game adaptation is Dune II (1992), which established the conventions of modern real-time strategy games and is considered to be among the most influential video games of all time.The online game Lost Souls includes Dune-derived elements, including sandworms and melange—addiction to which can produce psychic talents. The 2016 game Enter the Gungeon features the spice melange as a random item which gives the player progressively stronger abilities and penalties with repeated uses, mirroring the long-term effects melange has on users.Rick Priestley cites Dune as a major influence on his 1987 wargame, Warhammer 40,000.In 2023, Funcom announced Dune: Awakening, an upcoming massively multiplayer online game set in the universe of Dune. - - -=== Space exploration === -The Apollo 15 astronauts named a small crater on Earth's Moon after the novel during the 1971 mission, and the name was formally adopted by the International Astronomical Union in 1973. Since 2009, the names of planets from the Dune novels have been adopted for the real-world nomenclature of plains and other features on Saturn's moon Titan, like Arrakis Planitia. - - -== See also == -Soft science fiction – Sub-genre of science fiction emphasizing "soft" sciences or human emotions -Hydraulic empire – Government by control of access to water - - -== References == - - -== Further reading == -Clute, John; Nicholls, Peter (1995). The Encyclopedia of Science Fiction. New York: St. Martin's Press. p. 1386. ISBN 978-0-312-13486-0. -Clute, John; Nicholls, Peter (1995). The Multimedia Encyclopedia of Science Fiction (CD-ROM). Danbury, CT: Grolier. ISBN 978-0-7172-3999-3. -Huddleston, Tom. The Worlds of Dune: The Places and Cultures That Inspired Frank Herbert. Minneapolis: Quarto Publishing Group UK, 2023. -Jakubowski, Maxim; Edwards, Malcolm (1983). The Complete Book of Science Fiction and Fantasy Lists. St Albans, Herts, UK: Granada Publishing Ltd. p. 350. ISBN 978-0-586-05678-3. -Kennedy, Kara. Frank Herbert's Dune: A Critical Companion. Cham, Switzerland: Palgrave Macmillan, 2022. -Kennedy, Kara. Women's Agency in the Dune Universe: Tracing Women's Liberation through Science Fiction. Cham, Switzerland: Palgrave Macmillan, 2020. -Nardi, Dominic J. & N. Trevor Brierly, eds. Discovering Dune: Essays on Frank Herbert's Epic Saga. Jefferson, NC: McFarland & Co., 2022. -Nicholas, Jeffery, ed. Dune and Philosophy: Weirding Way of Mentat. Chicago: Open Court, 2011. -Nicholls, Peter (1979). The Encyclopedia of Science Fiction. St Albans, Herts, UK: Granada Publishing Ltd. p. 672. ISBN 978-0-586-05380-5. -O’Reilly, Timothy. Frank Herbert. New York: Frederick Ungar, 1981. -Pringle, David (1990). The Ultimate Guide to Science Fiction. London: Grafton Books Ltd. p. 407. ISBN 978-0-246-13635-0. -Tuck, Donald H. (1974). The Encyclopedia of Science Fiction and Fantasy. Chicago: Advent. p. 136. ISBN 978-0-911682-20-5. -Williams, Kevin C. The Wisdom of the Sand: Philosophy and Frank Herbert's Dune. New York: Hampton Press, 2013. - - -== External links == - -Official website for Dune and its sequels -Dune title listing at the Internet Speculative Fiction Database -Turner, Paul (October 1973). "Vertex Interviews Frank Herbert" (Interview). Vol. 1, no. 4. Archived from the original on May 19, 2009. -Spark Notes: Dune, detailed study guide -DuneQuotes.com – Collection of quotes from the Dune series -Dune by Frank Herbert, reviewed by Ted Gioia (Conceptual Fiction) -"Frank Herbert Biography and Bibliography at LitWeb.net". www.litweb.net. Archived from the original on April 2, 2009. Retrieved January 2, 2009. -Works of Frank Herbert at Curlie -Timberg, Scott (April 18, 2010). "Frank Herbert's Dune holds timely – and timeless – appeal". Los Angeles Times. Archived from the original on December 3, 2013. Retrieved November 27, 2013. -Walton, Jo (January 12, 2011). "In league with the future: Frank Herbert's Dune (Review)". Tor.com. Retrieved November 27, 2013. -Leonard, Andrew (June 4, 2015). "To Save California, Read Dune". Nautilus. Archived from the original on November 4, 2017. Retrieved June 15, 2015. -Dune by Frank Herbert – Foreshadowing & Dedication at Fact Behind Fiction -Frank Herbert by Tim O'Reilly -DuneScholar.com – Collection of scholarly essays \ No newline at end of file diff --git a/templates/neo4j-parent/ingest.py b/templates/neo4j-parent/ingest.py deleted file mode 100644 index 4ec90888979..00000000000 --- a/templates/neo4j-parent/ingest.py +++ /dev/null @@ -1,47 +0,0 @@ -from pathlib import Path - -from langchain_community.document_loaders import TextLoader -from langchain_community.embeddings.openai import OpenAIEmbeddings -from langchain_community.graphs import Neo4jGraph -from langchain_community.vectorstores import Neo4jVector -from langchain_text_splitters import TokenTextSplitter - -txt_path = Path(__file__).parent / "dune.txt" - -graph = Neo4jGraph() - -# Load the text file -loader = TextLoader(str(txt_path)) -documents = loader.load() - -# Define chunking strategy -parent_splitter = TokenTextSplitter(chunk_size=512, chunk_overlap=24) -child_splitter = TokenTextSplitter(chunk_size=100, chunk_overlap=24) - -# Store parent-child patterns into graph -parent_documents = parent_splitter.split_documents(documents) -for parent in parent_documents: - child_documents = child_splitter.split_documents([parent]) - params = { - "parent": parent.page_content, - "children": [c.page_content for c in child_documents], - } - graph.query( - """ - CREATE (p:Parent {text: $parent}) - WITH p - UNWIND $children AS child - CREATE (c:Child {text: child}) - CREATE (c)-[:HAS_PARENT]->(p) - """, - params, - ) - -# Calculate embedding values on the child nodes -Neo4jVector.from_existing_graph( - OpenAIEmbeddings(), - index_name="retrieval", - node_label="Child", - text_node_properties=["text"], - embedding_node_property="embedding", -) diff --git a/templates/neo4j-parent/main.py b/templates/neo4j-parent/main.py deleted file mode 100644 index ac52947e3f7..00000000000 --- a/templates/neo4j-parent/main.py +++ /dev/null @@ -1,5 +0,0 @@ -from neo4j_parent.chain import chain - -if __name__ == "__main__": - original_query = "What is the plot of the Dune?" - print(chain.invoke(original_query)) diff --git a/templates/neo4j-parent/neo4j_parent/__init__.py b/templates/neo4j-parent/neo4j_parent/__init__.py deleted file mode 100644 index 0e89831116e..00000000000 --- a/templates/neo4j-parent/neo4j_parent/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from neo4j_parent.chain import chain - -__all__ = ["chain"] diff --git a/templates/neo4j-parent/neo4j_parent/chain.py b/templates/neo4j-parent/neo4j_parent/chain.py deleted file mode 100644 index 8a7ee27b1f7..00000000000 --- a/templates/neo4j-parent/neo4j_parent/chain.py +++ /dev/null @@ -1,52 +0,0 @@ -from langchain_community.vectorstores import Neo4jVector -from langchain_core.output_parsers import StrOutputParser -from langchain_core.prompts import ChatPromptTemplate -from langchain_core.pydantic_v1 import BaseModel -from langchain_core.runnables import RunnableParallel, RunnablePassthrough -from langchain_openai import ChatOpenAI, OpenAIEmbeddings - -retrieval_query = """ -MATCH (node)-[:HAS_PARENT]->(parent) -WITH parent, max(score) AS score // deduplicate parents -RETURN parent.text AS text, score, {} AS metadata -""" - - -def format_docs(docs): - return "\n\n".join(doc.page_content for doc in docs) - - -vectorstore = Neo4jVector.from_existing_index( - OpenAIEmbeddings(), - index_name="retrieval", - node_label="Child", - embedding_node_property="embedding", - retrieval_query=retrieval_query, -) -retriever = vectorstore.as_retriever() - -template = """Answer the question based only on the following context: -{context} - -Question: {question} -""" -prompt = ChatPromptTemplate.from_template(template) - -model = ChatOpenAI() - -chain = ( - RunnableParallel( - {"context": retriever | format_docs, "question": RunnablePassthrough()} - ) - | prompt - | model - | StrOutputParser() -) - - -# Add typing for input -class Question(BaseModel): - __root__: str - - -chain = chain.with_types(input_type=Question) diff --git a/templates/neo4j-parent/pyproject.toml b/templates/neo4j-parent/pyproject.toml deleted file mode 100644 index e9c8bcd0d50..00000000000 --- a/templates/neo4j-parent/pyproject.toml +++ /dev/null @@ -1,36 +0,0 @@ -[tool.poetry] -name = "neo4j-parent" -version = "0.1.0" -description = "Balance precise embeddings and context retention with Neo4j hybrid search" -authors = [ - "Tomaz Bratanic ", -] -readme = "README.md" - -[tool.poetry.dependencies] -python = ">=3.8.1,<4.0" -langchain = "^0.1" -tiktoken = "^0.5.1" -openai = "<2" -neo4j = "^5.14.0" -langchain-text-splitters = ">=0.0.1,<0.1" -langchain-openai = "^0.1.1" - -[tool.poetry.group.dev.dependencies] -langchain-cli = ">=0.0.21" - -[tool.langserve] -export_module = "neo4j_parent" -export_attr = "chain" - -[tool.templates-hub] -use-case = "rag" -author = "Neo4j" -integrations = ["Neo4j", "OpenAI"] -tags = ["hybrid-search", "graph-database"] - -[build-system] -requires = [ - "poetry-core", -] -build-backend = "poetry.core.masonry.api" diff --git a/templates/neo4j-parent/tests/__init__.py b/templates/neo4j-parent/tests/__init__.py deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/templates/neo4j-semantic-layer/README.md b/templates/neo4j-semantic-layer/README.md deleted file mode 100644 index ca47c4bb4b6..00000000000 --- a/templates/neo4j-semantic-layer/README.md +++ /dev/null @@ -1,93 +0,0 @@ -# Neo4j - Semantic Layer - -This template is designed to implement an agent capable of interacting with a graph database like `Neo4j` through a semantic layer using `OpenAI function calling`. -The semantic layer equips the agent with a suite of robust tools, allowing it to interact with the graph database based on the user's intent. -Learn more about the semantic layer template in the [corresponding blog post](https://medium.com/towards-data-science/enhancing-interaction-between-language-models-and-graph-databases-via-a-semantic-layer-0a78ad3eba49). - -![Diagram illustrating the workflow of the Neo4j semantic layer with an agent interacting with tools like Information, Recommendation, and Memory, connected to a knowledge graph.](https://raw.githubusercontent.com/langchain-ai/langchain/master/templates/neo4j-semantic-layer/static/workflow.png) "Neo4j Semantic Layer Workflow Diagram" - -## Tools - -The agent utilizes several tools to interact with the `Neo4j` graph database effectively: - -1. **Information tool**: - - Retrieves data about movies or individuals, ensuring the agent has access to the latest and most relevant information. -2. **Recommendation Tool**: - - Provides movie recommendations based upon user preferences and input. -3. **Memory Tool**: - - Stores information about user preferences in the knowledge graph, allowing for a personalized experience over multiple interactions. - -## Environment Setup - -You need to define the following environment variables - -``` -OPENAI_API_KEY= -NEO4J_URI= -NEO4J_USERNAME= -NEO4J_PASSWORD= -``` - -## Populating with data - -If you want to populate the DB with an example movie dataset, you can run `python ingest.py`. -The script import information about movies and their rating by users. -Additionally, the script creates two [fulltext indices](https://neo4j.com/docs/cypher-manual/current/indexes-for-full-text-search/), which are used to map information from user input to the database. - -## Usage - -To use this package, you should first have the LangChain CLI installed: - -```shell -pip install -U "langchain-cli[serve]" -``` - -To create a new LangChain project and install this as the only package, you can do: - -```shell -langchain app new my-app --package neo4j-semantic-layer -``` - -If you want to add this to an existing project, you can just run: - -```shell -langchain app add neo4j-semantic-layer -``` - -And add the following code to your `server.py` file: -```python -from neo4j_semantic_layer import agent_executor as neo4j_semantic_agent - -add_routes(app, neo4j_semantic_agent, path="/neo4j-semantic-layer") -``` - -(Optional) Let's now configure LangSmith. -LangSmith will help us trace, monitor and debug LangChain applications. -You can sign up for LangSmith [here](https://smith.langchain.com/). -If you don't have access, you can skip this section - -```shell -export LANGCHAIN_TRACING_V2=true -export LANGCHAIN_API_KEY= -export LANGCHAIN_PROJECT= # if not specified, defaults to "default" -``` - -If you are inside this directory, then you can spin up a LangServe instance directly by: - -```shell -langchain serve -``` - -This will start the FastAPI app with a server is running locally at -[http://localhost:8000](http://localhost:8000) - -We can see all templates at [http://127.0.0.1:8000/docs](http://127.0.0.1:8000/docs) -We can access the playground at [http://127.0.0.1:8000/neo4j-semantic-layer/playground](http://127.0.0.1:8000/neo4j-semantic-layer/playground) - -We can access the template from code with: - -```python -from langserve.client import RemoteRunnable - -runnable = RemoteRunnable("http://localhost:8000/neo4j-semantic-layer") -``` diff --git a/templates/neo4j-semantic-layer/ingest.py b/templates/neo4j-semantic-layer/ingest.py deleted file mode 100644 index 973b4c93c9c..00000000000 --- a/templates/neo4j-semantic-layer/ingest.py +++ /dev/null @@ -1,59 +0,0 @@ -from langchain_community.graphs import Neo4jGraph - -# Instantiate connection to Neo4j -graph = Neo4jGraph() - -# Define unique constraints -graph.query("CREATE CONSTRAINT IF NOT EXISTS FOR (m:Movie) REQUIRE m.id IS UNIQUE;") -graph.query("CREATE CONSTRAINT IF NOT EXISTS FOR (u:User) REQUIRE u.id IS UNIQUE;") -graph.query("CREATE CONSTRAINT IF NOT EXISTS FOR (p:Person) REQUIRE p.name IS UNIQUE;") -graph.query("CREATE CONSTRAINT IF NOT EXISTS FOR (g:Genre) REQUIRE g.name IS UNIQUE;") - -# Import movie information - -movies_query = """ -LOAD CSV WITH HEADERS FROM -'https://raw.githubusercontent.com/tomasonjo/blog-datasets/main/movies/movies.csv' -AS row -CALL { - WITH row - MERGE (m:Movie {id:row.movieId}) - SET m.released = date(row.released), - m.title = row.title, - m.imdbRating = toFloat(row.imdbRating) - FOREACH (director in split(row.director, '|') | - MERGE (p:Person {name:trim(director)}) - MERGE (p)-[:DIRECTED]->(m)) - FOREACH (actor in split(row.actors, '|') | - MERGE (p:Person {name:trim(actor)}) - MERGE (p)-[:ACTED_IN]->(m)) - FOREACH (genre in split(row.genres, '|') | - MERGE (g:Genre {name:trim(genre)}) - MERGE (m)-[:IN_GENRE]->(g)) -} IN TRANSACTIONS -""" - -graph.query(movies_query) - -# Import rating information -rating_query = """ -LOAD CSV WITH HEADERS FROM -'https://raw.githubusercontent.com/tomasonjo/blog-datasets/main/movies/ratings.csv' -AS row -CALL { - WITH row - MATCH (m:Movie {id:row.movieId}) - MERGE (u:User {id:row.userId}) - MERGE (u)-[r:RATED]->(m) - SET r.rating = toFloat(row.rating), - r.timestamp = row.timestamp -} IN TRANSACTIONS OF 10000 ROWS -""" - -graph.query(rating_query) - -# Define fulltext indices -graph.query("CREATE FULLTEXT INDEX movie IF NOT EXISTS FOR (m:Movie) ON EACH [m.title]") -graph.query( - "CREATE FULLTEXT INDEX person IF NOT EXISTS FOR (p:Person) ON EACH [p.name]" -) diff --git a/templates/neo4j-semantic-layer/main.py b/templates/neo4j-semantic-layer/main.py deleted file mode 100644 index 681c6a20d6e..00000000000 --- a/templates/neo4j-semantic-layer/main.py +++ /dev/null @@ -1,17 +0,0 @@ -from neo4j_semantic_layer import agent_executor - -if __name__ == "__main__": - original_query = "What do you know about person John?" - followup_query = "John Travolta" - chat_history = [ - ( - "What do you know about person John?", - "I found multiple people named John. Could you please specify " - "which one you are interested in? Here are some options:" - "\n\n1. John Travolta\n2. John McDonough", - ) - ] - print(agent_executor.invoke({"input": original_query})) - print( - agent_executor.invoke({"input": followup_query, "chat_history": chat_history}) - ) diff --git a/templates/neo4j-semantic-layer/neo4j_semantic_layer/__init__.py b/templates/neo4j-semantic-layer/neo4j_semantic_layer/__init__.py deleted file mode 100644 index ad5e940ccad..00000000000 --- a/templates/neo4j-semantic-layer/neo4j_semantic_layer/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from neo4j_semantic_layer.agent import agent_executor - -__all__ = ["agent_executor"] diff --git a/templates/neo4j-semantic-layer/neo4j_semantic_layer/agent.py b/templates/neo4j-semantic-layer/neo4j_semantic_layer/agent.py deleted file mode 100644 index 79fbae96b7f..00000000000 --- a/templates/neo4j-semantic-layer/neo4j_semantic_layer/agent.py +++ /dev/null @@ -1,71 +0,0 @@ -from typing import List, Tuple - -from langchain.agents import AgentExecutor -from langchain.agents.format_scratchpad import format_to_openai_function_messages -from langchain.agents.output_parsers import OpenAIFunctionsAgentOutputParser -from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder -from langchain.pydantic_v1 import BaseModel, Field -from langchain.tools.render import format_tool_to_openai_function -from langchain_community.chat_models import ChatOpenAI -from langchain_core.messages import AIMessage, HumanMessage - -from neo4j_semantic_layer.information_tool import InformationTool -from neo4j_semantic_layer.memory_tool import MemoryTool -from neo4j_semantic_layer.recommendation_tool import RecommenderTool - -llm = ChatOpenAI(temperature=0, model="gpt-4") -tools = [InformationTool(), RecommenderTool(), MemoryTool()] - -llm_with_tools = llm.bind(functions=[format_tool_to_openai_function(t) for t in tools]) - -prompt = ChatPromptTemplate.from_messages( - [ - ( - "system", - "You are a helpful assistant that finds information about movies " - " and recommends them. If tools require follow up questions, " - "make sure to ask the user for clarification. Make sure to include any " - "available options that need to be clarified in the follow up questions", - ), - MessagesPlaceholder(variable_name="chat_history"), - ("user", "{input}"), - MessagesPlaceholder(variable_name="agent_scratchpad"), - ] -) - - -def _format_chat_history(chat_history: List[Tuple[str, str]]): - buffer = [] - for human, ai in chat_history: - buffer.append(HumanMessage(content=human)) - buffer.append(AIMessage(content=ai)) - return buffer - - -agent = ( - { - "input": lambda x: x["input"], - "chat_history": lambda x: ( - _format_chat_history(x["chat_history"]) if x.get("chat_history") else [] - ), - "agent_scratchpad": lambda x: format_to_openai_function_messages( - x["intermediate_steps"] - ), - } - | prompt - | llm_with_tools - | OpenAIFunctionsAgentOutputParser() -) - - -# Add typing for input -class AgentInput(BaseModel): - input: str - chat_history: List[Tuple[str, str]] = Field( - ..., extra={"widget": {"type": "chat", "input": "input", "output": "output"}} - ) - - -agent_executor = AgentExecutor(agent=agent, tools=tools).with_types( - input_type=AgentInput -) diff --git a/templates/neo4j-semantic-layer/neo4j_semantic_layer/information_tool.py b/templates/neo4j-semantic-layer/neo4j_semantic_layer/information_tool.py deleted file mode 100644 index 780bf15f22d..00000000000 --- a/templates/neo4j-semantic-layer/neo4j_semantic_layer/information_tool.py +++ /dev/null @@ -1,74 +0,0 @@ -from typing import Optional, Type - -from langchain.callbacks.manager import ( - AsyncCallbackManagerForToolRun, - CallbackManagerForToolRun, -) - -# Import things that are needed generically -from langchain.pydantic_v1 import BaseModel, Field -from langchain_core.tools import BaseTool - -from neo4j_semantic_layer.utils import get_candidates, graph - -description_query = """ -MATCH (m:Movie|Person) -WHERE m.title = $candidate OR m.name = $candidate -MATCH (m)-[r:ACTED_IN|DIRECTED|HAS_GENRE]-(t) -WITH m, type(r) as type, collect(coalesce(t.name, t.title)) as names -WITH m, type+": "+reduce(s="", n IN names | s + n + ", ") as types -WITH m, collect(types) as contexts -WITH m, "type:" + labels(m)[0] + "\ntitle: "+ coalesce(m.title, m.name) - + "\nyear: "+coalesce(m.released,"") +"\n" + - reduce(s="", c in contexts | s + substring(c, 0, size(c)-2) +"\n") as context -RETURN context LIMIT 1 -""" - - -def get_information(entity: str, type: str) -> str: - candidates = get_candidates(entity, type) - if not candidates: - return "No information was found about the movie or person in the database" - elif len(candidates) > 1: - newline = "\n" - return ( - "Need additional information, which of these " - f"did you mean: {newline + newline.join(str(d) for d in candidates)}" - ) - data = graph.query( - description_query, params={"candidate": candidates[0]["candidate"]} - ) - return data[0]["context"] - - -class InformationInput(BaseModel): - entity: str = Field(description="movie or a person mentioned in the question") - entity_type: str = Field( - description="type of the entity. Available options are 'movie' or 'person'" - ) - - -class InformationTool(BaseTool): - name = "Information" - description = ( - "useful for when you need to answer questions about various actors or movies" - ) - args_schema: Type[BaseModel] = InformationInput - - def _run( - self, - entity: str, - entity_type: str, - run_manager: Optional[CallbackManagerForToolRun] = None, - ) -> str: - """Use the tool.""" - return get_information(entity, entity_type) - - async def _arun( - self, - entity: str, - entity_type: str, - run_manager: Optional[AsyncCallbackManagerForToolRun] = None, - ) -> str: - """Use the tool asynchronously.""" - return get_information(entity, entity_type) diff --git a/templates/neo4j-semantic-layer/neo4j_semantic_layer/memory_tool.py b/templates/neo4j-semantic-layer/neo4j_semantic_layer/memory_tool.py deleted file mode 100644 index b1c43ef6535..00000000000 --- a/templates/neo4j-semantic-layer/neo4j_semantic_layer/memory_tool.py +++ /dev/null @@ -1,72 +0,0 @@ -from typing import Optional, Type - -from langchain.callbacks.manager import ( - AsyncCallbackManagerForToolRun, - CallbackManagerForToolRun, -) - -# Import things that are needed generically -from langchain.pydantic_v1 import BaseModel, Field -from langchain_core.tools import BaseTool - -from neo4j_semantic_layer.utils import get_candidates, get_user_id, graph - -store_rating_query = """ -MERGE (u:User {userId:$user_id}) -WITH u -UNWIND $candidates as row -MATCH (m:Movie {title: row.candidate}) -MERGE (u)-[r:RATED]->(m) -SET r.rating = toFloat($rating) -RETURN distinct 'Noted' AS response -""" - - -def store_movie_rating(movie: str, rating: int): - user_id = get_user_id() - candidates = get_candidates(movie, "movie") - if not candidates: - return "This movie is not in our database" - response = graph.query( - store_rating_query, - params={"user_id": user_id, "candidates": candidates, "rating": rating}, - ) - try: - return response[0]["response"] - except Exception as e: - print(e) - return "Something went wrong" - - -class MemoryInput(BaseModel): - movie: str = Field(description="movie the user liked") - rating: int = Field( - description=( - "Rating from 1 to 5, where one represents heavy dislike " - "and 5 represent the user loved the movie" - ) - ) - - -class MemoryTool(BaseTool): - name = "Memory" - description = "useful for memorizing which movies the user liked" - args_schema: Type[BaseModel] = MemoryInput - - def _run( - self, - movie: str, - rating: int, - run_manager: Optional[CallbackManagerForToolRun] = None, - ) -> str: - """Use the tool.""" - return store_movie_rating(movie, rating) - - async def _arun( - self, - movie: str, - rating: int, - run_manager: Optional[AsyncCallbackManagerForToolRun] = None, - ) -> str: - """Use the tool asynchronously.""" - return store_movie_rating(movie, rating) diff --git a/templates/neo4j-semantic-layer/neo4j_semantic_layer/recommendation_tool.py b/templates/neo4j-semantic-layer/neo4j_semantic_layer/recommendation_tool.py deleted file mode 100644 index 6849b376ecd..00000000000 --- a/templates/neo4j-semantic-layer/neo4j_semantic_layer/recommendation_tool.py +++ /dev/null @@ -1,143 +0,0 @@ -from typing import Optional, Type - -from langchain.callbacks.manager import ( - AsyncCallbackManagerForToolRun, - CallbackManagerForToolRun, -) -from langchain.pydantic_v1 import BaseModel, Field -from langchain_core.tools import BaseTool - -from neo4j_semantic_layer.utils import get_candidates, get_user_id, graph - -recommendation_query_db_history = """ - MERGE (u:User {userId:$user_id}) - WITH u - // get recommendation candidates - OPTIONAL MATCH (u)-[r1:RATED]->()<-[r2:RATED]-()-[r3:RATED]->(recommendation) - WHERE r1.rating > 3.5 AND r2.rating > 3.5 AND r3.rating > 3.5 - AND NOT EXISTS {(u)-[:RATED]->(recommendation)} - // rank and limit recommendations - WITH u, recommendation, count(*) AS count - ORDER BY count DESC LIMIT 3 - RETURN recommendation.title AS movie -""" - -recommendation_query_genre = """ -MATCH (m:Movie)-[:IN_GENRE]->(g:Genre {name:$genre}) -// filter out already seen movies by the user -WHERE NOT EXISTS { - (m)<-[:RATED]-(:User {userId:$user_id}) -} -// rank and limit recommendations -WITH m -ORDER BY m.imdbRating DESC LIMIT 3 -RETURN m.title AS movie -""" - - -def recommendation_query_movie(genre: bool) -> str: - return f""" -MATCH (m1:Movie)<-[r1:RATED]-()-[r2:RATED]->(m2:Movie) -WHERE r1.rating > 3.5 AND r2.rating > 3.5 and m1.title IN $movieTitles -// filter out already seen movies by the user -AND NOT EXISTS {{ - (m2)<-[:RATED]-(:User {{userId:$user_id}}) -}} -{'AND EXISTS {(m2)-[:IN_GENRE]->(:Genre {name:$genre})}' if genre else ''} -// rank and limit recommendations -WITH m2, count(*) AS count -ORDER BY count DESC LIMIT 3 -RETURN m2.title As movie -""" - - -def recommend_movie(movie: Optional[str] = None, genre: Optional[str] = None) -> str: - """ - Recommends movies based on user's history and preference - for a specific movie and/or genre. - Returns: - str: A string containing a list of recommended movies, or an error message. - """ - user_id = get_user_id() - params = {"user_id": user_id, "genre": genre} - if not movie and not genre: - # Try to recommend a movie based on the information in the db - response = graph.query(recommendation_query_db_history, params) - try: - return ", ".join([el["movie"] for el in response]) - except Exception: - return "Can you tell us about some of the movies you liked?" - if not movie and genre: - # Recommend top voted movies in the genre the user haven't seen before - response = graph.query(recommendation_query_genre, params) - try: - return ", ".join([el["movie"] for el in response]) - except Exception: - return "Something went wrong" - - candidates = get_candidates(movie, "movie") - if not candidates: - return "The movie you mentioned wasn't found in the database" - params["movieTitles"] = [el["candidate"] for el in candidates] - query = recommendation_query_movie(bool(genre)) - response = graph.query(query, params) - try: - return ", ".join([el["movie"] for el in response]) - except Exception: - return "Something went wrong" - - -all_genres = [ - "Action", - "Adventure", - "Animation", - "Children", - "Comedy", - "Crime", - "Documentary", - "Drama", - "Fantasy", - "Film-Noir", - "Horror", - "IMAX", - "Musical", - "Mystery", - "Romance", - "Sci-Fi", - "Thriller", - "War", - "Western", -] - - -class RecommenderInput(BaseModel): - movie: Optional[str] = Field(description="movie used for recommendation") - genre: Optional[str] = Field( - description=( - "genre used for recommendation. Available options are:" f"{all_genres}" - ) - ) - - -class RecommenderTool(BaseTool): - name = "Recommender" - description = "useful for when you need to recommend a movie" - args_schema: Type[BaseModel] = RecommenderInput - - def _run( - self, - movie: Optional[str] = None, - genre: Optional[str] = None, - run_manager: Optional[CallbackManagerForToolRun] = None, - ) -> str: - """Use the tool.""" - return recommend_movie(movie, genre) - - async def _arun( - self, - movie: Optional[str] = None, - genre: Optional[str] = None, - run_manager: Optional[AsyncCallbackManagerForToolRun] = None, - ) -> str: - """Use the tool asynchronously.""" - return recommend_movie(movie, genre) diff --git a/templates/neo4j-semantic-layer/neo4j_semantic_layer/utils.py b/templates/neo4j-semantic-layer/neo4j_semantic_layer/utils.py deleted file mode 100644 index ae83928bd51..00000000000 --- a/templates/neo4j-semantic-layer/neo4j_semantic_layer/utils.py +++ /dev/null @@ -1,84 +0,0 @@ -from typing import Dict, List - -from langchain_community.graphs import Neo4jGraph - -graph = Neo4jGraph() - - -def get_user_id() -> int: - """ - Placeholder for a function that would normally retrieve - a user's ID - """ - return 1 - - -def remove_lucene_chars(text: str) -> str: - """Remove Lucene special characters""" - special_chars = [ - "+", - "-", - "&", - "|", - "!", - "(", - ")", - "{", - "}", - "[", - "]", - "^", - '"', - "~", - "*", - "?", - ":", - "\\", - ] - for char in special_chars: - if char in text: - text = text.replace(char, " ") - return text.strip() - - -def generate_full_text_query(input: str) -> str: - """ - Generate a full-text search query for a given input string. - - This function constructs a query string suitable for a full-text search. - It processes the input string by splitting it into words and appending a - similarity threshold (~0.8) to each word, then combines them using the AND - operator. Useful for mapping movies and people from user questions - to database values, and allows for some misspelings. - """ - full_text_query = "" - words = [el for el in remove_lucene_chars(input).split() if el] - for word in words[:-1]: - full_text_query += f" {word}~0.8 AND" - full_text_query += f" {words[-1]}~0.8" - return full_text_query.strip() - - -candidate_query = """ -CALL db.index.fulltext.queryNodes($index, $fulltextQuery, {limit: $limit}) -YIELD node -RETURN coalesce(node.name, node.title) AS candidate, - [el in labels(node) WHERE el IN ['Person', 'Movie'] | el][0] AS label -""" - - -def get_candidates(input: str, type: str, limit: int = 3) -> List[Dict[str, str]]: - """ - Retrieve a list of candidate entities from database based on the input string. - - This function queries the Neo4j database using a full-text search. It takes the - input string, generates a full-text query, and executes this query against the - specified index in the database. The function returns a list of candidates - matching the query, with each candidate being a dictionary containing their name - (or title) and label (either 'Person' or 'Movie'). - """ - ft_query = generate_full_text_query(input) - candidates = graph.query( - candidate_query, {"fulltextQuery": ft_query, "index": type, "limit": limit} - ) - return candidates diff --git a/templates/neo4j-semantic-layer/pyproject.toml b/templates/neo4j-semantic-layer/pyproject.toml deleted file mode 100644 index e7a4e411197..00000000000 --- a/templates/neo4j-semantic-layer/pyproject.toml +++ /dev/null @@ -1,33 +0,0 @@ -[tool.poetry] -name = "neo4j-semantic-layer" -version = "0.1.0" -description = "Build a semantic layer to allow an agent to interact with a graph database in consistent and robust way." -authors = [ - "Tomaz Bratanic ", -] -readme = "README.md" - -[tool.poetry.dependencies] -python = ">=3.8.1,<4.0" -langchain = "^0.1" -openai = "<2" -neo4j = "^5.14.0" - -[tool.poetry.group.dev.dependencies] -langchain-cli = ">=0.0.21" - -[tool.langserve] -export_module = "neo4j_semantic_layer" -export_attr = "agent_executor" - -[tool.templates-hub] -use-case = "semantic_layer" -author = "Neo4j" -integrations = ["Neo4j", "OpenAI"] -tags = ["search", "graph-database", "function-calling"] - -[build-system] -requires = [ - "poetry-core", -] -build-backend = "poetry.core.masonry.api" diff --git a/templates/neo4j-semantic-layer/static/workflow.png b/templates/neo4j-semantic-layer/static/workflow.png deleted file mode 100644 index 8816fcecd62..00000000000 Binary files a/templates/neo4j-semantic-layer/static/workflow.png and /dev/null differ diff --git a/templates/neo4j-semantic-layer/tests/__init__.py b/templates/neo4j-semantic-layer/tests/__init__.py deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/templates/neo4j-semantic-ollama/README.md b/templates/neo4j-semantic-ollama/README.md deleted file mode 100644 index 637e994cda5..00000000000 --- a/templates/neo4j-semantic-ollama/README.md +++ /dev/null @@ -1,122 +0,0 @@ -# Neo4j, Ollama - Semantic Layer - -This template is designed to implement an agent capable of interacting with a -graph database like `Neo4j` through a semantic layer using `Mixtral` as -a JSON-based agent. -The semantic layer equips the agent with a suite of robust tools, -allowing it to interact with the graph database based on the user's intent. -Learn more about the semantic layer template in the -[corresponding blog post](https://medium.com/towards-data-science/enhancing-interaction-between-language-models-and-graph-databases-via-a-semantic-layer-0a78ad3eba49) and specifically about [Mixtral agents with `Ollama` package](https://blog.langchain.dev/json-based-agents-with-ollama-and-langchain/). - -![Diagram illustrating the workflow of the Neo4j semantic layer with an agent interacting with tools like Information, Recommendation, and Memory, connected to a knowledge graph.](https://raw.githubusercontent.com/langchain-ai/langchain/master/templates/neo4j-semantic-ollama/static/workflow.png) "Neo4j Semantic Layer Workflow Diagram" - -## Tools - -The agent utilizes several tools to interact with the Neo4j graph database effectively: - -1. **Information tool**: - - Retrieves data about movies or individuals, ensuring the agent has access to the latest and most relevant information. -2. **Recommendation Tool**: - - Provides movie recommendations based upon user preferences and input. -3. **Memory Tool**: - - Stores information about user preferences in the knowledge graph, allowing for a personalized experience over multiple interactions. -4. **Smalltalk Tool**: - - Allows an agent to deal with smalltalk. - -## Environment Setup - -Before using this template, you need to set up Ollama and Neo4j database. - -1. Follow instructions [here](https://python.langchain.com/docs/integrations/chat/ollama) to download Ollama. - -2. Download your LLM of interest: - - * This package uses `mixtral`: `ollama pull mixtral` - * You can choose from many LLMs [here](https://ollama.ai/library) - -You need to define the following environment variables - -``` -OLLAMA_BASE_URL= -NEO4J_URI= -NEO4J_USERNAME= -NEO4J_PASSWORD= -``` - -Typically for a local Ollama installation: - -```shell -export OLLAMA_BASE_URL="http://127.0.0.1:11434" -``` - -## Populating with data - -If you want to populate the DB with an example movie dataset, you can run `python ingest.py`. -The script import information about movies and their rating by users. -Additionally, the script creates two [fulltext indices](https://neo4j.com/docs/cypher-manual/current/indexes-for-full-text-search/), which are used to map information from user input to the database. - -As an aternative, you can use the demo neo4j recommendations database: -```shell -export NEO4J_URI="neo4j+s://demo.neo4jlabs.com" -export NEO4J_USERNAME="recommendations" -export NEO4J_PASSWORD="recommendations" -export NEO4J_DATABASE="recommendations" -``` - -## Usage - -To use this package, you should first have the LangChain CLI installed: - -```shell -pip install -U "langchain-cli[serve]" -``` - -To create a new LangChain project and install this as the only package, you can do: - -```shell -langchain app new my-app --package neo4j-semantic-ollama -``` - -If you want to add this to an existing project, you can just run: - -```shell -langchain app add neo4j-semantic-ollama -``` - -And, from within the project, add the following code to your `app/server.py` file, replacing the `add_routes(app, NotImplemented)` section: -```python -from neo4j_semantic_ollama import agent_executor as neo4j_semantic_agent - -add_routes(app, neo4j_semantic_agent, path="/neo4j-semantic-ollama") -``` - -(Optional) Let's now configure LangSmith. -LangSmith will help us trace, monitor and debug LangChain applications. -You can sign up for LangSmith [here](https://smith.langchain.com/). -If you don't have access, you can skip this section - -```shell -export LANGCHAIN_TRACING_V2=true -export LANGCHAIN_API_KEY= -export LANGCHAIN_PROJECT= # if not specified, defaults to "default" -``` - -If you are inside the top-level project directory, then you can spin up a LangServe instance directly by: - -```shell -langchain serve -``` - -This will start the FastAPI app with a server is running locally at -[http://localhost:8000](http://localhost:8000) - -We can see all templates at [http://127.0.0.1:8000/docs](http://127.0.0.1:8000/docs) -We can access the playground at [http://127.0.0.1:8000/neo4j-semantic-ollama/playground](http://127.0.0.1:8000/neo4j-semantic-ollama/playground) - -We can access the template from code with: - -```python -from langserve.client import RemoteRunnable - -runnable = RemoteRunnable("http://localhost:8000/neo4j-semantic-ollama") -``` diff --git a/templates/neo4j-semantic-ollama/ingest.py b/templates/neo4j-semantic-ollama/ingest.py deleted file mode 100644 index 973b4c93c9c..00000000000 --- a/templates/neo4j-semantic-ollama/ingest.py +++ /dev/null @@ -1,59 +0,0 @@ -from langchain_community.graphs import Neo4jGraph - -# Instantiate connection to Neo4j -graph = Neo4jGraph() - -# Define unique constraints -graph.query("CREATE CONSTRAINT IF NOT EXISTS FOR (m:Movie) REQUIRE m.id IS UNIQUE;") -graph.query("CREATE CONSTRAINT IF NOT EXISTS FOR (u:User) REQUIRE u.id IS UNIQUE;") -graph.query("CREATE CONSTRAINT IF NOT EXISTS FOR (p:Person) REQUIRE p.name IS UNIQUE;") -graph.query("CREATE CONSTRAINT IF NOT EXISTS FOR (g:Genre) REQUIRE g.name IS UNIQUE;") - -# Import movie information - -movies_query = """ -LOAD CSV WITH HEADERS FROM -'https://raw.githubusercontent.com/tomasonjo/blog-datasets/main/movies/movies.csv' -AS row -CALL { - WITH row - MERGE (m:Movie {id:row.movieId}) - SET m.released = date(row.released), - m.title = row.title, - m.imdbRating = toFloat(row.imdbRating) - FOREACH (director in split(row.director, '|') | - MERGE (p:Person {name:trim(director)}) - MERGE (p)-[:DIRECTED]->(m)) - FOREACH (actor in split(row.actors, '|') | - MERGE (p:Person {name:trim(actor)}) - MERGE (p)-[:ACTED_IN]->(m)) - FOREACH (genre in split(row.genres, '|') | - MERGE (g:Genre {name:trim(genre)}) - MERGE (m)-[:IN_GENRE]->(g)) -} IN TRANSACTIONS -""" - -graph.query(movies_query) - -# Import rating information -rating_query = """ -LOAD CSV WITH HEADERS FROM -'https://raw.githubusercontent.com/tomasonjo/blog-datasets/main/movies/ratings.csv' -AS row -CALL { - WITH row - MATCH (m:Movie {id:row.movieId}) - MERGE (u:User {id:row.userId}) - MERGE (u)-[r:RATED]->(m) - SET r.rating = toFloat(row.rating), - r.timestamp = row.timestamp -} IN TRANSACTIONS OF 10000 ROWS -""" - -graph.query(rating_query) - -# Define fulltext indices -graph.query("CREATE FULLTEXT INDEX movie IF NOT EXISTS FOR (m:Movie) ON EACH [m.title]") -graph.query( - "CREATE FULLTEXT INDEX person IF NOT EXISTS FOR (p:Person) ON EACH [p.name]" -) diff --git a/templates/neo4j-semantic-ollama/main.py b/templates/neo4j-semantic-ollama/main.py deleted file mode 100644 index d6ee3a91969..00000000000 --- a/templates/neo4j-semantic-ollama/main.py +++ /dev/null @@ -1,17 +0,0 @@ -from neo4j_semantic_ollama import agent_executor - -if __name__ == "__main__": - original_query = "What do you know about person John?" - followup_query = "John Travolta" - chat_history = [ - ( - "What do you know about person John?", - "I found multiple people named John. Could you please specify " - "which one you are interested in? Here are some options:" - "\n\n1. John Travolta\n2. John McDonough", - ) - ] - print(agent_executor.invoke({"input": original_query})) - print( - agent_executor.invoke({"input": followup_query, "chat_history": chat_history}) - ) diff --git a/templates/neo4j-semantic-ollama/neo4j_semantic_ollama/__init__.py b/templates/neo4j-semantic-ollama/neo4j_semantic_ollama/__init__.py deleted file mode 100644 index 0b540e6327f..00000000000 --- a/templates/neo4j-semantic-ollama/neo4j_semantic_ollama/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from neo4j_semantic_ollama.agent import agent_executor - -__all__ = ["agent_executor"] diff --git a/templates/neo4j-semantic-ollama/neo4j_semantic_ollama/agent.py b/templates/neo4j-semantic-ollama/neo4j_semantic_ollama/agent.py deleted file mode 100644 index 08d8fdc2a17..00000000000 --- a/templates/neo4j-semantic-ollama/neo4j_semantic_ollama/agent.py +++ /dev/null @@ -1,110 +0,0 @@ -import os -from typing import List, Tuple - -from langchain.agents import AgentExecutor -from langchain.agents.format_scratchpad import format_log_to_messages -from langchain.agents.output_parsers import ( - ReActJsonSingleInputOutputParser, -) -from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder -from langchain.pydantic_v1 import BaseModel, Field -from langchain.tools.render import render_text_description_and_args -from langchain_community.chat_models import ChatOllama -from langchain_core.messages import AIMessage, HumanMessage - -from neo4j_semantic_ollama.information_tool import InformationTool -from neo4j_semantic_ollama.memory_tool import MemoryTool -from neo4j_semantic_ollama.recommendation_tool import RecommenderTool -from neo4j_semantic_ollama.smalltalk_tool import SmalltalkTool - -llm = ChatOllama( - model="mixtral", - temperature=0, - base_url=os.environ["OLLAMA_BASE_URL"], - streaming=True, -) -chat_model_with_stop = llm.bind(stop=["\nObservation"]) -tools = [InformationTool(), RecommenderTool(), MemoryTool(), SmalltalkTool()] - -# Inspiration taken from hub.pull("hwchase17/react-json") -system_message = f"""Answer the following questions as best you can. -You can answer directly if the user is greeting you or similar. -Otherise, you have access to the following tools: - -{render_text_description_and_args(tools).replace('{', '{{').replace('}', '}}')} - -The way you use the tools is by specifying a json blob. -Specifically, this json should have a `action` key (with the name of the tool to use) -and a `action_input` key (with the input to the tool going here). -The only values that should be in the "action" field are: {[t.name for t in tools]} -The $JSON_BLOB should only contain a SINGLE action, -do NOT return a list of multiple actions. -Here is an example of a valid $JSON_BLOB: -``` -{{{{ - "action": $TOOL_NAME, - "action_input": $INPUT -}}}} -``` -The $JSON_BLOB must always be enclosed with triple backticks! - -ALWAYS use the following format: -Question: the input question you must answer -Thought: you should always think about what to do -Action:``` -$JSON_BLOB -``` -Observation: the result of the action... -(this Thought/Action/Observation can repeat N times) -Thought: I now know the final answer -Final Answer: the final answer to the original input question - -Begin! Reminder to always use the exact characters `Final Answer` when responding.' -""" - -prompt = ChatPromptTemplate.from_messages( - [ - ( - "user", - system_message, - ), - MessagesPlaceholder(variable_name="chat_history"), - ("user", "{input}"), - MessagesPlaceholder(variable_name="agent_scratchpad"), - ] -) - - -def _format_chat_history(chat_history: List[Tuple[str, str]]): - buffer = [] - for human, ai in chat_history: - buffer.append(HumanMessage(content=human)) - buffer.append(AIMessage(content=ai)) - return buffer - - -agent = ( - { - "input": lambda x: x["input"], - "agent_scratchpad": lambda x: format_log_to_messages(x["intermediate_steps"]), - "chat_history": lambda x: ( - _format_chat_history(x["chat_history"]) if x.get("chat_history") else [] - ), - } - | prompt - | chat_model_with_stop - | ReActJsonSingleInputOutputParser() -) - - -# Add typing for input -class AgentInput(BaseModel): - input: str - chat_history: List[Tuple[str, str]] = Field( - ..., extra={"widget": {"type": "chat", "input": "input", "output": "output"}} - ) - - -agent_executor = AgentExecutor(agent=agent, tools=tools).with_types( - input_type=AgentInput -) diff --git a/templates/neo4j-semantic-ollama/neo4j_semantic_ollama/information_tool.py b/templates/neo4j-semantic-ollama/neo4j_semantic_ollama/information_tool.py deleted file mode 100644 index e4f61b33ae6..00000000000 --- a/templates/neo4j-semantic-ollama/neo4j_semantic_ollama/information_tool.py +++ /dev/null @@ -1,74 +0,0 @@ -from typing import Optional, Type - -from langchain.callbacks.manager import ( - AsyncCallbackManagerForToolRun, - CallbackManagerForToolRun, -) - -# Import things that are needed generically -from langchain.pydantic_v1 import BaseModel, Field -from langchain_core.tools import BaseTool - -from neo4j_semantic_ollama.utils import get_candidates, graph - -description_query = """ -MATCH (m:Movie|Person) -WHERE m.title = $candidate OR m.name = $candidate -MATCH (m)-[r:ACTED_IN|DIRECTED|HAS_GENRE]-(t) -WITH m, type(r) as type, collect(coalesce(t.name, t.title)) as names -WITH m, type+": "+reduce(s="", n IN names | s + n + ", ") as types -WITH m, collect(types) as contexts -WITH m, "type:" + labels(m)[0] + "\ntitle: "+ coalesce(m.title, m.name) - + "\nyear: "+coalesce(m.released,"") +"\n" + - reduce(s="", c in contexts | s + substring(c, 0, size(c)-2) +"\n") as context -RETURN context LIMIT 1 -""" - - -def get_information(entity: str, type: str) -> str: - candidates = get_candidates(entity, type) - if not candidates: - return "No information was found about the movie or person in the database" - elif len(candidates) > 1: - newline = "\n" - return ( - "Need additional information, which of these " - f"did you mean: {newline + newline.join(str(d) for d in candidates)}" - ) - data = graph.query( - description_query, params={"candidate": candidates[0]["candidate"]} - ) - return data[0]["context"] - - -class InformationInput(BaseModel): - entity: str = Field(description="movie or a person mentioned in the question") - entity_type: str = Field( - description="type of the entity. Available options are 'movie' or 'person'" - ) - - -class InformationTool(BaseTool): - name = "Information" - description = ( - "useful for when you need to answer questions about various actors or movies" - ) - args_schema: Type[BaseModel] = InformationInput - - def _run( - self, - entity: str, - entity_type: str, - run_manager: Optional[CallbackManagerForToolRun] = None, - ) -> str: - """Use the tool.""" - return get_information(entity, entity_type) - - async def _arun( - self, - entity: str, - entity_type: str, - run_manager: Optional[AsyncCallbackManagerForToolRun] = None, - ) -> str: - """Use the tool asynchronously.""" - return get_information(entity, entity_type) diff --git a/templates/neo4j-semantic-ollama/neo4j_semantic_ollama/memory_tool.py b/templates/neo4j-semantic-ollama/neo4j_semantic_ollama/memory_tool.py deleted file mode 100644 index 3e5ff478ac3..00000000000 --- a/templates/neo4j-semantic-ollama/neo4j_semantic_ollama/memory_tool.py +++ /dev/null @@ -1,73 +0,0 @@ -from typing import Optional, Type - -from langchain.callbacks.manager import ( - AsyncCallbackManagerForToolRun, - CallbackManagerForToolRun, -) - -# Import things that are needed generically -from langchain.pydantic_v1 import BaseModel, Field -from langchain_core.tools import BaseTool - -from neo4j_semantic_ollama.utils import get_candidates, get_user_id, graph - -store_rating_query = """ -MERGE (u:User {userId:$user_id}) -WITH u -UNWIND $candidates as row -MATCH (m:Movie {title: row.candidate}) -MERGE (u)-[r:RATED]->(m) -SET r.rating = toFloat($rating) -RETURN distinct - 'Create a final answer saying that preference has been stored' AS response -""" - - -def store_movie_rating(movie: str, rating: int): - user_id = get_user_id() - candidates = get_candidates(movie, "movie") - if not candidates: - return "This movie is not in our database" - response = graph.query( - store_rating_query, - params={"user_id": user_id, "candidates": candidates, "rating": rating}, - ) - try: - return response[0]["response"] - except Exception as e: - print(e) - return "Something went wrong" - - -class MemoryInput(BaseModel): - movie: str = Field(description="movie the user liked") - rating: int = Field( - description=( - "Rating from 1 to 5, where one represents heavy dislike " - "and 5 represent the user loved the movie" - ) - ) - - -class MemoryTool(BaseTool): - name = "Memory" - description = "useful for memorizing which movies the user liked" - args_schema: Type[BaseModel] = MemoryInput - - def _run( - self, - movie: str, - rating: int, - run_manager: Optional[CallbackManagerForToolRun] = None, - ) -> str: - """Use the tool.""" - return store_movie_rating(movie, rating) - - async def _arun( - self, - movie: str, - rating: int, - run_manager: Optional[AsyncCallbackManagerForToolRun] = None, - ) -> str: - """Use the tool asynchronously.""" - return store_movie_rating(movie, rating) diff --git a/templates/neo4j-semantic-ollama/neo4j_semantic_ollama/recommendation_tool.py b/templates/neo4j-semantic-ollama/neo4j_semantic_ollama/recommendation_tool.py deleted file mode 100644 index 211216705ab..00000000000 --- a/templates/neo4j-semantic-ollama/neo4j_semantic_ollama/recommendation_tool.py +++ /dev/null @@ -1,164 +0,0 @@ -from typing import Optional, Type - -from langchain.callbacks.manager import ( - AsyncCallbackManagerForToolRun, - CallbackManagerForToolRun, -) -from langchain.pydantic_v1 import BaseModel, Field -from langchain_core.tools import BaseTool - -from neo4j_semantic_ollama.utils import get_candidates, get_user_id, graph - -recommendation_query_db_history = """ - MERGE (u:User {userId:$user_id}) - WITH u - // get recommendation candidates - OPTIONAL MATCH (u)-[r1:RATED]->()<-[r2:RATED]-()-[r3:RATED]->(recommendation) - WHERE r1.rating > 3.5 AND r2.rating > 3.5 AND r3.rating > 3.5 - AND NOT EXISTS {(u)-[:RATED]->(recommendation)} - // rank and limit recommendations - WITH u, recommendation, count(*) AS count - ORDER BY count DESC LIMIT 3 -RETURN 'title:' + recommendation.title + '\nactors:' + -apoc.text.join([(recommendation)<-[:ACTED_IN]-(a) | a.name], ',') + -'\ngenre:' + apoc.text.join([(recommendation)-[:IN_GENRE]->(a) | a.name], ',') -AS movie -""" - -recommendation_query_genre = """ -MATCH (m:Movie)-[:IN_GENRE]->(g:Genre {name:$genre}) -// filter out already seen movies by the user -WHERE NOT EXISTS { - (m)<-[:RATED]-(:User {userId:$user_id}) -} -// rank and limit recommendations -WITH m AS recommendation -ORDER BY recommendation.imdbRating DESC LIMIT 3 -RETURN 'title:' + recommendation.title + '\nactors:' + -apoc.text.join([(recommendation)<-[:ACTED_IN]-(a) | a.name], ',') + -'\ngenre:' + apoc.text.join([(recommendation)-[:IN_GENRE]->(a) | a.name], ',') -AS movie -""" - - -def recommendation_query_movie(genre: bool) -> str: - return f""" -MATCH (m1:Movie)<-[r1:RATED]-()-[r2:RATED]->(m2:Movie) -WHERE r1.rating > 3.5 AND r2.rating > 3.5 and m1.title IN $movieTitles -// filter out already seen movies by the user -AND NOT EXISTS {{ - (m2)<-[:RATED]-(:User {{userId:$user_id}}) -}} -{'AND EXISTS {(m2)-[:IN_GENRE]->(:Genre {name:$genre})}' if genre else ''} -// rank and limit recommendations -WITH m2 AS recommendation, count(*) AS count -ORDER BY count DESC LIMIT 3 -RETURN 'title:' + recommendation.title + '\nactors:' + -apoc.text.join([(recommendation)<-[:ACTED_IN]-(a) | a.name], ',') + -'\ngenre:' + apoc.text.join([(recommendation)-[:IN_GENRE]->(a) | a.name], ',') -AS movie -""" - - -nl = "\n" - - -def recommend_movie(movie: Optional[str] = None, genre: Optional[str] = None) -> str: - """ - Recommends movies based on user's history and preference - for a specific movie and/or genre. - Returns: - str: A string containing a list of recommended movies, or an error message. - """ - user_id = get_user_id() - params = {"user_id": user_id, "genre": genre} - if not movie and not genre: - # Try to recommend a movie based on the information in the db - response = graph.query(recommendation_query_db_history, params) - try: - return ( - 'Recommended movies are: ' - f'{f"###Movie {nl}".join([el["movie"] for el in response])}' - ) - except Exception: - return "Can you tell us about some of the movies you liked?" - if not movie and genre: - # Recommend top voted movies in the genre the user haven't seen before - response = graph.query(recommendation_query_genre, params) - try: - return ( - 'Recommended movies are: ' - f'{f"###Movie {nl}".join([el["movie"] for el in response])}' - ) - except Exception: - return "Something went wrong" - - candidates = get_candidates(movie, "movie") - if not candidates: - return "The movie you mentioned wasn't found in the database" - params["movieTitles"] = [el["candidate"] for el in candidates] - query = recommendation_query_movie(bool(genre)) - response = graph.query(query, params) - try: - return ( - 'Recommended movies are: ' - f'{f"###Movie {nl}".join([el["movie"] for el in response])}' - ) - except Exception: - return "Something went wrong" - - -all_genres = [ - "Action", - "Adventure", - "Animation", - "Children", - "Comedy", - "Crime", - "Documentary", - "Drama", - "Fantasy", - "Film-Noir", - "Horror", - "IMAX", - "Musical", - "Mystery", - "Romance", - "Sci-Fi", - "Thriller", - "War", - "Western", -] - - -class RecommenderInput(BaseModel): - movie: Optional[str] = Field(description="movie used for recommendation") - genre: Optional[str] = Field( - description=( - "genre used for recommendation. Available options are:" f"{all_genres}" - ) - ) - - -class RecommenderTool(BaseTool): - name = "Recommender" - description = "useful for when you need to recommend a movie" - args_schema: Type[BaseModel] = RecommenderInput - - def _run( - self, - movie: Optional[str] = None, - genre: Optional[str] = None, - run_manager: Optional[CallbackManagerForToolRun] = None, - ) -> str: - """Use the tool.""" - return recommend_movie(movie, genre) - - async def _arun( - self, - movie: Optional[str] = None, - genre: Optional[str] = None, - run_manager: Optional[AsyncCallbackManagerForToolRun] = None, - ) -> str: - """Use the tool asynchronously.""" - return recommend_movie(movie, genre) diff --git a/templates/neo4j-semantic-ollama/neo4j_semantic_ollama/smalltalk_tool.py b/templates/neo4j-semantic-ollama/neo4j_semantic_ollama/smalltalk_tool.py deleted file mode 100644 index 7efefa74dbb..00000000000 --- a/templates/neo4j-semantic-ollama/neo4j_semantic_ollama/smalltalk_tool.py +++ /dev/null @@ -1,39 +0,0 @@ -from typing import Optional, Type - -from langchain.callbacks.manager import ( - AsyncCallbackManagerForToolRun, - CallbackManagerForToolRun, -) -from langchain.pydantic_v1 import BaseModel, Field -from langchain_core.tools import BaseTool - -response = ( - "Create a final answer that says if they " - "have any questions about movies or actors" -) - - -class SmalltalkInput(BaseModel): - query: Optional[str] = Field(description="user query") - - -class SmalltalkTool(BaseTool): - name = "Smalltalk" - description = "useful for when user greets you or wants to smalltalk" - args_schema: Type[BaseModel] = SmalltalkInput - - def _run( - self, - query: Optional[str] = None, - run_manager: Optional[CallbackManagerForToolRun] = None, - ) -> str: - """Use the tool.""" - return response - - async def _arun( - self, - query: Optional[str] = None, - run_manager: Optional[AsyncCallbackManagerForToolRun] = None, - ) -> str: - """Use the tool asynchronously.""" - return response diff --git a/templates/neo4j-semantic-ollama/neo4j_semantic_ollama/utils.py b/templates/neo4j-semantic-ollama/neo4j_semantic_ollama/utils.py deleted file mode 100644 index ae83928bd51..00000000000 --- a/templates/neo4j-semantic-ollama/neo4j_semantic_ollama/utils.py +++ /dev/null @@ -1,84 +0,0 @@ -from typing import Dict, List - -from langchain_community.graphs import Neo4jGraph - -graph = Neo4jGraph() - - -def get_user_id() -> int: - """ - Placeholder for a function that would normally retrieve - a user's ID - """ - return 1 - - -def remove_lucene_chars(text: str) -> str: - """Remove Lucene special characters""" - special_chars = [ - "+", - "-", - "&", - "|", - "!", - "(", - ")", - "{", - "}", - "[", - "]", - "^", - '"', - "~", - "*", - "?", - ":", - "\\", - ] - for char in special_chars: - if char in text: - text = text.replace(char, " ") - return text.strip() - - -def generate_full_text_query(input: str) -> str: - """ - Generate a full-text search query for a given input string. - - This function constructs a query string suitable for a full-text search. - It processes the input string by splitting it into words and appending a - similarity threshold (~0.8) to each word, then combines them using the AND - operator. Useful for mapping movies and people from user questions - to database values, and allows for some misspelings. - """ - full_text_query = "" - words = [el for el in remove_lucene_chars(input).split() if el] - for word in words[:-1]: - full_text_query += f" {word}~0.8 AND" - full_text_query += f" {words[-1]}~0.8" - return full_text_query.strip() - - -candidate_query = """ -CALL db.index.fulltext.queryNodes($index, $fulltextQuery, {limit: $limit}) -YIELD node -RETURN coalesce(node.name, node.title) AS candidate, - [el in labels(node) WHERE el IN ['Person', 'Movie'] | el][0] AS label -""" - - -def get_candidates(input: str, type: str, limit: int = 3) -> List[Dict[str, str]]: - """ - Retrieve a list of candidate entities from database based on the input string. - - This function queries the Neo4j database using a full-text search. It takes the - input string, generates a full-text query, and executes this query against the - specified index in the database. The function returns a list of candidates - matching the query, with each candidate being a dictionary containing their name - (or title) and label (either 'Person' or 'Movie'). - """ - ft_query = generate_full_text_query(input) - candidates = graph.query( - candidate_query, {"fulltextQuery": ft_query, "index": type, "limit": limit} - ) - return candidates diff --git a/templates/neo4j-semantic-ollama/pyproject.toml b/templates/neo4j-semantic-ollama/pyproject.toml deleted file mode 100644 index e112e3fad9d..00000000000 --- a/templates/neo4j-semantic-ollama/pyproject.toml +++ /dev/null @@ -1,33 +0,0 @@ -[tool.poetry] -name = "neo4j-semantic-ollama" -version = "0.1.0" -description = "Build a semantic layer with Mixtral via Ollama to allow an agent to interact with a graph database in consistent and robust way." -authors = [ - "Tomaz Bratanic ", -] -readme = "README.md" - -[tool.poetry.dependencies] -python = ">=3.8.1,<4.0" -langchain = "^0.1" -openai = "<2" -neo4j = "^5.14.0" - -[tool.poetry.group.dev.dependencies] -langchain-cli = ">=0.0.21" - -[tool.langserve] -export_module = "neo4j_semantic_ollama" -export_attr = "agent_executor" - -[tool.templates-hub] -use-case = "semantic_layer" -author = "Neo4j" -integrations = ["Neo4j", "Ollama"] -tags = ["search", "graph-database", "function-calling"] - -[build-system] -requires = [ - "poetry-core", -] -build-backend = "poetry.core.masonry.api" diff --git a/templates/neo4j-semantic-ollama/static/workflow.png b/templates/neo4j-semantic-ollama/static/workflow.png deleted file mode 100644 index f62b720beef..00000000000 Binary files a/templates/neo4j-semantic-ollama/static/workflow.png and /dev/null differ diff --git a/templates/neo4j-semantic-ollama/tests/__init__.py b/templates/neo4j-semantic-ollama/tests/__init__.py deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/templates/neo4j-vector-memory/README.md b/templates/neo4j-vector-memory/README.md deleted file mode 100644 index 1a34739020a..00000000000 --- a/templates/neo4j-vector-memory/README.md +++ /dev/null @@ -1,88 +0,0 @@ -# Neo4j - vector memory - -This template allows you to integrate an LLM with a vector-based -retrieval system using `Neo4j` as the vector store. - -Additionally, it uses the graph capabilities of the `Neo4j` database to -store and retrieve the dialogue history of a specific user's session. - -Having the dialogue history stored as a graph allows for -seamless conversational flows but also gives you the ability -to analyze user behavior and text chunk retrieval through graph analytics. - - -## Environment Setup - -You need to define the following environment variables - -``` -OPENAI_API_KEY= -NEO4J_URI= -NEO4J_USERNAME= -NEO4J_PASSWORD= -``` - -## Populating with data - -If you want to populate the DB with some example data, you can run `python ingest.py`. -The script process and stores sections of the text from the file `dune.txt` into a Neo4j graph database. -Additionally, a vector index named `dune` is created for efficient querying of these embeddings. - - -## Usage - -To use this package, you should first have the LangChain CLI installed: - -```shell -pip install -U langchain-cli -``` - -To create a new LangChain project and install this as the only package, you can do: - -```shell -langchain app new my-app --package neo4j-vector-memory -``` - -If you want to add this to an existing project, you can just run: - -```shell -langchain app add neo4j-vector-memory -``` - -And add the following code to your `server.py` file: -```python -from neo4j_vector_memory import chain as neo4j_vector_memory_chain - -add_routes(app, neo4j_vector_memory_chain, path="/neo4j-vector-memory") -``` - -(Optional) Let's now configure LangSmith. -LangSmith will help us trace, monitor and debug LangChain applications. -You can sign up for LangSmith [here](https://smith.langchain.com/). -If you don't have access, you can skip this section - -```shell -export LANGCHAIN_TRACING_V2=true -export LANGCHAIN_API_KEY= -export LANGCHAIN_PROJECT= # if not specified, defaults to "default" -``` - -If you are inside this directory, then you can spin up a LangServe instance directly by: - -```shell -langchain serve -``` - -This will start the FastAPI app with a server is running locally at -[http://localhost:8000](http://localhost:8000) - -We can see all templates at [http://127.0.0.1:8000/docs](http://127.0.0.1:8000/docs) -We can access the playground at [http://127.0.0.1:8000/neo4j-vector-memory/playground](http://127.0.0.1:8000/neo4j-parent/playground) - -We can access the template from code with: - -```python -from langserve.client import RemoteRunnable - -runnable = RemoteRunnable("http://localhost:8000/neo4j-vector-memory") -``` diff --git a/templates/neo4j-vector-memory/dune.txt b/templates/neo4j-vector-memory/dune.txt deleted file mode 100644 index 3417aaac3fc..00000000000 --- a/templates/neo4j-vector-memory/dune.txt +++ /dev/null @@ -1,228 +0,0 @@ -Dune is a 1965 epic science fiction novel by American author Frank Herbert, originally published as two separate serials in Analog magazine. It tied with Roger Zelazny's This Immortal for the Hugo Award in 1966 and it won the inaugural Nebula Award for Best Novel. It is the first installment of the Dune Chronicles. It is one of the world's best-selling science fiction novels.Dune is set in the distant future in a feudal interstellar society in which various noble houses control planetary fiefs. It tells the story of young Paul Atreides, whose family accepts the stewardship of the planet Arrakis. While the planet is an inhospitable and sparsely populated desert wasteland, it is the only source of melange, or "spice", a drug that extends life and enhances mental abilities. Melange is also necessary for space navigation, which requires a kind of multidimensional awareness and foresight that only the drug provides. As melange can only be produced on Arrakis, control of the planet is a coveted and dangerous undertaking. The story explores the multilayered interactions of politics, religion, ecology, technology, and human emotion, as the factions of the empire confront each other in a struggle for the control of Arrakis and its spice. -Herbert wrote five sequels: Dune Messiah, Children of Dune, God Emperor of Dune, Heretics of Dune, and Chapterhouse: Dune. Following Herbert's death in 1986, his son Brian Herbert and author Kevin J. Anderson continued the series in over a dozen additional novels since 1999. -Adaptations of the novel to cinema have been notoriously difficult and complicated. In the 1970s, cult filmmaker Alejandro Jodorowsky attempted to make a film based on the novel. After three years of development, the project was canceled due to a constantly growing budget. In 1984, a film adaptation directed by David Lynch was released to mostly negative responses from critics and failure at the box office, although it later developed a cult following. The book was also adapted into the 2000 Sci-Fi Channel miniseries Frank Herbert's Dune and its 2003 sequel Frank Herbert's Children of Dune (the latter of which combines the events of Dune Messiah and Children of Dune). A second film adaptation directed by Denis Villeneuve was released on October 21, 2021, to positive reviews. It grossed $401 million worldwide and went on to be nominated for ten Academy Awards, winning six. Villeneuve's film covers roughly the first half of the original novel; a sequel, which will cover the remaining story, will be released in March 2024. -The series has also been used as the basis for several board, role-playing, and video games. -Since 2009, the names of planets from the Dune novels have been adopted for the real-life nomenclature of plains and other features on Saturn's moon Titan. - - -== Origins == -After his novel The Dragon in the Sea was published in 1957, Herbert traveled to Florence, Oregon, at the north end of the Oregon Dunes. Here, the United States Department of Agriculture was attempting to use poverty grasses to stabilize the sand dunes. Herbert claimed in a letter to his literary agent, Lurton Blassingame, that the moving dunes could "swallow whole cities, lakes, rivers, highways." Herbert's article on the dunes, "They Stopped the Moving Sands", was never completed (and only published decades later in The Road to Dune), but its research sparked Herbert's interest in ecology and deserts.Herbert further drew inspiration from Native American mentors like "Indian Henry" (as Herbert referred to the man to his son; likely a Henry Martin of the Hoh tribe) and Howard Hansen. Both Martin and Hansen grew up on the Quileute reservation near Herbert's hometown. According to historian Daniel Immerwahr, Hansen regularly shared his writing with Herbert. "White men are eating the earth," Hansen told Herbert in 1958, after sharing a piece on the effect of logging on the Quileute reservation. "They're gonna turn this whole planet into a wasteland, just like North Africa." The world could become a "big dune," Herbert responded in agreement.Herbert was also interested in the idea of the superhero mystique and messiahs. He believed that feudalism was a natural condition humans fell into, where some led and others gave up the responsibility of making decisions and just followed orders. He found that desert environments have historically given birth to several major religions with messianic impulses. He decided to join his interests together so he could play religious and ecological ideas against each other. In addition, he was influenced by the story of T. E. Lawrence and the "messianic overtones" in Lawrence's involvement in the Arab Revolt during World War I. In an early version of Dune, the hero was actually very similar to Lawrence of Arabia, but Herbert decided the plot was too straightforward and added more layers to his story.Herbert drew heavy inspiration also from Lesley Blanch's The Sabres of Paradise (1960), a narrative history recounting a mid-19th century conflict in the Caucasus between rugged Islamized caucasian tribes and the expansive Russian Empire. Language used on both sides of that conflict become terms in Herbert's world—chakobsa, a Caucasian hunting language, becomes a battle language of humans spread across the galaxy; kanly, a word for blood feud in the 19th century Caucasus, represents a feud between Dune's noble Houses; sietch and tabir are both words for camp borrowed from Ukrainian Cossacks (of the Pontic–Caspian steppe).Herbert also borrowed some lines which Blanch stated were Caucasian proverbs. "To kill with the point lacked artistry", used by Blanch to describe the Caucasus peoples' love of swordsmanship, becomes in Dune "Killing with the tip lacks artistry", a piece of advice given to a young Paul during his training. "Polish comes from the city, wisdom from the hills", a Caucasian aphorism, turns into a desert expression: "Polish comes from the cities, wisdom from the desert". - -Another significant source of inspiration for Dune was Herbert's experiences with psilocybin and his hobby of cultivating mushrooms, according to mycologist Paul Stamets's account of meeting Herbert in the 1980s:Frank went on to tell me that much of the premise of Dune—the magic spice (spores) that allowed the bending of space (tripping), the giant sand worms (maggots digesting mushrooms), the eyes of the Fremen (the cerulean blue of Psilocybe mushrooms), the mysticism of the female spiritual warriors, the Bene Gesserits (influenced by the tales of Maria Sabina and the sacred mushroom cults of Mexico)—came from his perception of the fungal life cycle, and his imagination was stimulated through his experiences with the use of magic mushrooms.Herbert spent the next five years researching, writing, and revising. He published a three-part serial Dune World in the monthly Analog, from December 1963 to February 1964. The serial was accompanied by several illustrations that were not published again. After an interval of a year, he published the much slower-paced five-part The Prophet of Dune in the January–May 1965 issues. The first serial became "Book 1: Dune" in the final published Dune novel, and the second serial was divided into "Book Two: Muad'dib" and "Book Three: The Prophet". The serialized version was expanded, reworked, and submitted to more than twenty publishers, each of whom rejected it. The novel, Dune, was finally accepted and published in August 1965 by Chilton Books, a printing house better known for publishing auto repair manuals. Sterling Lanier, an editor at Chilton, had seen Herbert's manuscript and had urged his company to take a risk in publishing the book. However, the first printing, priced at $5.95 (equivalent to $55.25 in 2022), did not sell well and was poorly received by critics as being atypical of science fiction at the time. Chilton considered the publication of Dune a write-off and Lanier was fired. Over the course of time, the book gained critical acclaim, and its popularity spread by word-of-mouth to allow Herbert to start working full time on developing the sequels to Dune, elements of which were already written alongside Dune.At first Herbert considered using Mars as setting for his novel, but eventually decided to use a fictional planet instead. His son Brian said that "Readers would have too many preconceived ideas about that planet, due to the number of stories that had been written about it."Herbert dedicated his work "to the people whose labors go beyond ideas into the realm of 'real materials'—to the dry-land ecologists, wherever they may be, in whatever time they work, this effort at prediction is dedicated in humility and admiration." - - -== Plot == -Duke Leto Atreides of House Atreides, ruler of the ocean planet Caladan, is assigned by the Padishah Emperor Shaddam IV to serve as fief ruler of the planet Arrakis. Although Arrakis is a harsh and inhospitable desert planet, it is of enormous importance because it is the only planetary source of melange, or the "spice", a unique and incredibly valuable substance that extends human youth, vitality and lifespan. It is also through the consumption of spice that Spacing Guild Navigators are able to effect safe interstellar travel. Shaddam, jealous of Duke Leto Atreides's rising popularity in the Landsraad, sees House Atreides as a potential future rival and threat, so conspires with House Harkonnen, the former stewards of Arrakis and the longstanding enemies of House Atreides, to destroy Leto and his family after their arrival. Leto is aware his assignment is a trap of some kind, but is compelled to obey the Emperor's orders anyway. -Leto's concubine Lady Jessica is an acolyte of the Bene Gesserit, an exclusively female group that pursues mysterious political aims and wields seemingly superhuman physical and mental abilities, such as the ability to control their bodies down to the cellular level, and also decide the sex of their children. Though Jessica was instructed by the Bene Gesserit to bear a daughter as part of their breeding program, out of love for Leto she bore a son, Paul. From a young age, Paul has been trained in warfare by Leto's aides, the elite soldiers Duncan Idaho and Gurney Halleck. Thufir Hawat, the Duke's Mentat (human computers, able to store vast amounts of data and perform advanced calculations on demand), has instructed Paul in the ways of political intrigue. Jessica has also trained her son in Bene Gesserit disciplines. -Paul's prophetic dreams interest Jessica's superior, the Reverend Mother Gaius Helen Mohiam, who subjects Paul to the deadly gom jabbar test. Holding a poisonous needle to his neck ready to strike should he be unable to resist the impulse to withdraw his hand from the nerve induction box, she tests Paul's self-control to overcome the extreme psychological pain he is being subjected to through the box. -Leto, Jessica, and Paul travel with their household to occupy Arrakeen, the capital on Arrakis formerly held by House Harkonnen. Leto learns of the dangers involved in harvesting the spice, which is protected by giant sandworms, and seeks to negotiate with the planet's native Fremen people, seeing them as a valuable ally rather than foes. Soon after the Atreides's arrival, Harkonnen forces attack, joined by the Emperor's ferocious Sardaukar troops in disguise. Leto is betrayed by his personal physician, the Suk doctor Wellington Yueh, who delivers a drugged Leto to the Baron Vladimir Harkonnen and his twisted Mentat, Piter De Vries. Yueh, however, arranges for Jessica and Paul to escape into the desert, where they are presumed dead by the Harkonnens. Yueh replaces one of Leto's teeth with a poison gas capsule, hoping Leto can kill the Baron during their encounter. The Baron narrowly avoids the gas due to his shield, which kills Leto, De Vries, and the others in the room. The Baron forces Hawat to take over De Vries's position by dosing him with a long-lasting, fatal poison and threatening to withhold the regular antidote doses unless he obeys. While he follows the Baron's orders, Hawat works secretly to undermine the Harkonnens. -Having fled into the desert, Paul is exposed to high concentrations of spice and has visions through which he realizes he has significant powers (as a result of the Bene Gesserit breeding scheme). He foresees potential futures in which he lives among the planet's native Fremen before leading them on a Holy Jihad across the known universe. -It is revealed Jessica is the daughter of Baron Harkonnen, a secret kept from her by the Bene Gesserit. After being captured by Fremen, Paul and Jessica are accepted into the Fremen community of Sietch Tabr, and teach the Fremen the Bene Gesserit fighting technique known as the "weirding way". Paul proves his manhood by killing a Fremen named Jamis in a ritualistic crysknife fight and chooses the Fremen name Muad'Dib, while Jessica opts to undergo a ritual to become a Reverend Mother by drinking the poisonous Water of Life. Pregnant with Leto's daughter, she inadvertently causes the unborn child, Alia, to become infused with the same powers in the womb. Paul takes a Fremen lover, Chani, and has a son with her, Leto II. -Two years pass and Paul's powerful prescience manifests, which confirms for the Fremen that he is their prophesied messiah, a legend planted by the Bene Gesserit's Missionaria Protectiva. Paul embraces his father's belief that the Fremen could be a powerful fighting force to take back Arrakis, but also sees that if he does not control them, their jihad could consume the entire universe. Word of the new Fremen leader reaches both Baron Harkonnen and the Emperor as spice production falls due to their increasingly destructive raids. The Baron encourages his brutish nephew Glossu Rabban to rule with an iron fist, hoping the contrast with his shrewder nephew Feyd-Rautha will make the latter popular among the people of Arrakis when he eventually replaces Rabban. The Emperor, suspecting the Baron of trying to create troops more powerful than the Sardaukar to seize power, sends spies to monitor activity on Arrakis. Hawat uses the opportunity to sow seeds of doubt in the Baron about the Emperor's true plans, putting further strain on their alliance. -Gurney, having survived the Harkonnen coup becomes a smuggler, reuniting with Paul and Jessica after a Fremen raid on his harvester. Believing Jessica to be the traitor, Gurney threatens to kill her, but is stopped by Paul. Paul did not foresee Gurney's attack, and concludes he must increase his prescience by drinking the Water of Life, which is traditionally fatal to males. Paul falls into unconsciousness for three weeks after drinking the poison, but when he wakes, he has clairvoyance across time and space: he is the Kwisatz Haderach, the ultimate goal of the Bene Gesserit breeding program. -Paul senses the Emperor and Baron are amassing fleets around Arrakis to quell the Fremen rebellion, and prepares the Fremen for a major offensive against the Harkonnen troops. The Emperor arrives with the Baron on Arrakis. The Emperor's troops seize a Fremen outpost, killing many including young Leto II, while Alia is captured and taken to the Emperor. Under cover of an electric storm, which shorts out the Emperor's troops' defensive shields, Paul and the Fremen, riding giant sandworms, assault the capital while Alia assassinates the Baron and escapes. The Fremen quickly defeat both the Harkonnen and Sardaukar troops. -Paul faces the Emperor, threatening to destroy spice production forever unless Shaddam abdicates the throne. Feyd-Rautha attempts to stop Paul by challenging him to a ritualistic knife fight, during which he attempts to cheat and kill Paul with a poison spur in his belt. Paul gains the upper hand and kills him. The Emperor reluctantly cedes the throne to Paul and promises his daughter Princess Irulan's hand in marriage. As Paul takes control of the Empire, he realizes that while he has achieved his goal, he is no longer able to stop the Fremen jihad, as their belief in him is too powerful to restrain. - - -== Characters == -House AtreidesPaul Atreides, the Duke's son, and main character of the novel -Duke Leto Atreides, head of House Atreides -Lady Jessica, Bene Gesserit and concubine of the Duke, mother of Paul and Alia -Alia Atreides, Paul's younger sister -Thufir Hawat, Mentat and Master of Assassins to House Atreides -Gurney Halleck, staunchly loyal troubadour warrior of the Atreides -Duncan Idaho, Swordmaster for House Atreides, graduate of the Ginaz School -Wellington Yueh, Suk doctor for the Atreides who is secretly working for House HarkonnenHouse HarkonnenBaron Vladimir Harkonnen, head of House Harkonnen -Piter De Vries, twisted Mentat -Feyd-Rautha, nephew and heir-presumptive of the Baron -Glossu "Beast" Rabban, also called Rabban Harkonnen, older nephew of the Baron -Iakin Nefud, Captain of the GuardHouse CorrinoShaddam IV, Padishah Emperor of the Known Universe (the Imperium) -Princess Irulan, Shaddam's eldest daughter and heir, also a historian -Count Fenring, the Emperor's closest friend, advisor, and "errand boy"Bene GesseritReverend Mother Gaius Helen Mohiam, Proctor Superior of the Bene Gesserit school and the Emperor's Truthsayer -Lady Margot Fenring, Bene Gesserit wife of Count FenringFremenThe Fremen, native inhabitants of Arrakis -Stilgar, Fremen leader of Sietch Tabr -Chani, Paul's Fremen concubine and a Sayyadina (female acolyte) of Sietch Tabr -Dr. Liet-Kynes, the Imperial Planetologist on Arrakis and father of Chani, as well as a revered figure among the Fremen -The Shadout Mapes, head housekeeper of imperial residence on Arrakis -Jamis, Fremen killed by Paul in ritual duel -Harah, wife of Jamis and later servant to Paul who helps raise Alia among the Fremen -Reverend Mother Ramallo, religious leader of Sietch TabrSmugglersEsmar Tuek, a powerful smuggler and the father of Staban Tuek -Staban Tuek, the son of Esmar Tuek and a powerful smuggler who befriends and takes in Gurney Halleck and his surviving men after the attack on the Atreides - - -== Themes and influences == -The Dune series is a landmark of science fiction. Herbert deliberately suppressed technology in his Dune universe so he could address the politics of humanity, rather than the future of humanity's technology. For example, a key pre-history event to the novel's present is the "Butlerian Jihad", in which all robots and computers were destroyed, eliminating these common elements to science fiction from the novel as to allow focus on humanity. Dune considers the way humans and their institutions might change over time. Director John Harrison, who adapted Dune for Syfy's 2000 miniseries, called the novel a universal and timeless reflection of "the human condition and its moral dilemmas", and said: - -A lot of people refer to Dune as science fiction. I never do. I consider it an epic adventure in the classic storytelling tradition, a story of myth and legend not unlike the Morte d'Arthur or any messiah story. It just happens to be set in the future ... The story is actually more relevant today than when Herbert wrote it. In the 1960s, there were just these two colossal superpowers duking it out. Today we're living in a more feudal, corporatized world more akin to Herbert's universe of separate families, power centers and business interests, all interrelated and kept together by the one commodity necessary to all. -But Dune has also been called a mix of soft and hard science fiction since "the attention to ecology is hard, the anthropology and the psychic abilities are soft." Hard elements include the ecology of Arrakis, suspensor technology, weapon systems, and ornithopters, while soft elements include issues relating to religion, physical and mental training, cultures, politics, and psychology.Herbert said Paul's messiah figure was inspired by the Arthurian legend, and that the scarcity of water on Arrakis was a metaphor for oil, as well as air and water itself, and for the shortages of resources caused by overpopulation. Novelist Brian Herbert, his son and biographer, wrote: - -Dune is a modern-day conglomeration of familiar myths, a tale in which great sandworms guard a precious treasure of melange, the geriatric spice that represents, among other things, the finite resource of oil. The planet Arrakis features immense, ferocious worms that are like dragons of lore, with "great teeth" and a "bellows breath of cinnamon." This resembles the myth described by an unknown English poet in Beowulf, the compelling tale of a fearsome fire dragon who guarded a great treasure hoard in a lair under cliffs, at the edge of the sea. The desert of Frank Herbert's classic novel is a vast ocean of sand, with giant worms diving into the depths, the mysterious and unrevealed domain of Shai-hulud. Dune tops are like the crests of waves, and there are powerful sandstorms out there, creating extreme danger. On Arrakis, life is said to emanate from the Maker (Shai-hulud) in the desert-sea; similarly all life on Earth is believed to have evolved from our oceans. Frank Herbert drew parallels, used spectacular metaphors, and extrapolated present conditions into world systems that seem entirely alien at first blush. But close examination reveals they aren't so different from systems we know … and the book characters of his imagination are not so different from people familiar to us. -Each chapter of Dune begins with an epigraph excerpted from the fictional writings of the character Princess Irulan. In forms such as diary entries, historical commentary, biography, quotations and philosophy, these writings set tone and provide exposition, context and other details intended to enhance understanding of Herbert's complex fictional universe and themes. They act as foreshadowing and invite the reader to keep reading to close the gap between what the epigraph says and what is happening in the main narrative. The epigraphs also give the reader the feeling that the world they are reading about is epically distanced, since Irulan writes about an idealized image of Paul as if he had already passed into memory. Brian Herbert wrote: "Dad told me that you could follow any of the novel's layers as you read it, and then start the book all over again, focusing on an entirely different layer. At the end of the book, he intentionally left loose ends and said he did this to send the readers spinning out of the story with bits and pieces of it still clinging to them, so that they would want to go back and read it again." - - -=== Middle-Eastern and Islamic references === -Due to the similarities between some of Herbert's terms and ideas and actual words and concepts in the Arabic language, as well as the series' "Islamic undertones" and themes, a Middle-Eastern influence on Herbert's works has been noted repeatedly. In his descriptions of the Fremen culture and language, Herbert uses both authentic Arabic words and Arabic-sounding words. For example, one of the names for the sandworm, Shai-hulud, is derived from Arabic: شيء خلود, romanized: šayʾ ḫulūd, lit. 'immortal thing' or Arabic: شيخ خلود, romanized: šayḫ ḫulūd, lit. 'old man of eternity'. The title of the Fremen housekeeper, the Shadout Mapes, is borrowed from the Arabic: شادوف‎, romanized: šādūf, the Egyptian term for a device used to raise water. In particular, words related to the messianic religion of the Fremen, first implanted by the Bene Gesserit, are taken from Arabic, including Muad'Dib (from Arabic: مؤدب, romanized: muʾaddib, lit. 'educator'), Usul (from Arabic: أصول, romanized: ʾuṣūl, lit. 'fundamental principles'), Shari-a (from Arabic: شريعة, romanized: šarīʿa, lit. 'sharia; path'), Shaitan (from Arabic: شيطان, romanized: šayṭān, lit. 'Shaitan; devil; fiend', and jinn (from Arabic: جن, romanized: ǧinn, lit. 'jinn; spirit; demon; mythical being'). It is likely Herbert relied on second-hand resources such as phrasebooks and desert adventure stories to find these Arabic words and phrases for the Fremen. They are meaningful and carefully chosen, and help create an "imagined desert culture that resonates with exotic sounds, enigmas, and pseudo-Islamic references" and has a distinctly Bedouin aesthetic.As a foreigner who adopts the ways of a desert-dwelling people and then leads them in a military capacity, Paul Atreides bears many similarities to the historical T. E. Lawrence. His 1962 biopic Lawrence of Arabia has also been identified as a potential influence. The Sabres of Paradise (1960) has also been identified as a potential influence upon Dune, with its depiction of Imam Shamil and the Islamic culture of the Caucasus inspiring some of the themes, characters, events and terminology of Dune.The environment of the desert planet Arrakis was primarily inspired by the environments of the Middle East. Similarly Arrakis as a bioregion is presented as a particular kind of political site. Herbert has made it resemble a desertified petrostate area. The Fremen people of Arrakis were influenced by the Bedouin tribes of Arabia, and the Mahdi prophecy originates from Islamic eschatology. Inspiration is also adopted from medieval historian Ibn Khaldun's cyclical history and his dynastic concept in North Africa, hinted at by Herbert's reference to Khaldun's book Kitāb al-ʿibar ("The Book of Lessons"). The fictionalized version of the "Kitab al-ibar" in Dune is a combination of a Fremen religious manual and a desert survival book. - - -==== Additional language and historic influences ==== -In addition to Arabic, Dune derives words and names from a variety of other languages, including Hebrew, Navajo, Latin, Dutch ("Landsraad"), Chakobsa, the Nahuatl language of the Aztecs, Greek, Persian, Sanskrit ("prana bindu", "prajna"), Russian, Turkish, Finnish, and Old English. Bene Gesserit is simply the Latin for "It will have been well fought", also carrying the sense of "It will have been well managed", which stands as a statement of the order's goal and as a pledge of faithfulness to that goal. Critics tend to miss the literal meaning of the phrase, some positing that the term is derived from the Latin meaning "it will have been well borne", which interpretation is not well supported by their doctrine in the story.Through the inspiration from The Sabres of Paradise, there are also allusions to the tsarist-era Russian nobility and Cossacks. Frank Herbert stated that bureaucracy that lasted long enough would become a hereditary nobility, and a significant theme behind the aristocratic families in Dune was "aristocratic bureaucracy" which he saw as analogous to the Soviet Union. - - -=== Environmentalism and ecology === -Dune has been called the "first planetary ecology novel on a grand scale". Herbert hoped it would be seen as an "environmental awareness handbook" and said the title was meant to "echo the sound of 'doom'". It was reviewed in the best selling countercultural Whole Earth Catalog in 1968 as a "rich re-readable fantasy with clear portrayal of the fierce environment it takes to cohere a community".After the publication of Silent Spring by Rachel Carson in 1962, science fiction writers began treating the subject of ecological change and its consequences. Dune responded in 1965 with its complex descriptions of Arrakis life, from giant sandworms (for whom water is deadly) to smaller, mouse-like life forms adapted to live with limited water. Dune was followed in its creation of complex and unique ecologies by other science fiction books such as A Door into Ocean (1986) and Red Mars (1992). Environmentalists have pointed out that Dune's popularity as a novel depicting a planet as a complex—almost living—thing, in combination with the first images of Earth from space being published in the same time period, strongly influenced environmental movements such as the establishment of the international Earth Day.While the genre of climate fiction was popularized in the 2010s in response to real global climate change, Dune as well as other early science fiction works from authors like J. G. Ballard (The Drowned World) and Kim Stanley Robinson (the Mars trilogy) have retroactively been considered pioneering examples of the genre. - - -=== Declining empires === -The Imperium in Dune contains features of various empires in Europe and the Near East, including the Roman Empire, Holy Roman Empire, and Ottoman Empire. Lorenzo DiTommaso compared Dune's portrayal of the downfall of a galactic empire to Edward Gibbon's Decline and Fall of the Roman Empire, which argues that Christianity allied with the profligacy of the Roman elite led to the fall of Ancient Rome. In "The Articulation of Imperial Decadence and Decline in Epic Science Fiction" (2007), DiTommaso outlines similarities between the two works by highlighting the excesses of the Emperor on his home planet of Kaitain and of the Baron Harkonnen in his palace. The Emperor loses his effectiveness as a ruler through an excess of ceremony and pomp. The hairdressers and attendants he brings with him to Arrakis are even referred to as "parasites". The Baron Harkonnen is similarly corrupt and materially indulgent. Gibbon's Decline and Fall partly blames the fall of Rome on the rise of Christianity. Gibbon claimed that this exotic import from a conquered province weakened the soldiers of Rome and left it open to attack. The Emperor's Sardaukar fighters are little match for the Fremen of Dune not only because of the Sardaukar's overconfidence and the fact that Jessica and Paul have trained the Fremen in their battle tactics, but because of the Fremen's capacity for self-sacrifice. The Fremen put the community before themselves in every instance, while the world outside wallows in luxury at the expense of others.The decline and long peace of the Empire sets the stage for revolution and renewal by genetic mixing of successful and unsuccessful groups through war, a process culminating in the Jihad led by Paul Atreides, described by Frank Herbert as depicting "war as a collective orgasm" (drawing on Norman Walter's 1950 The Sexual Cycle of Human Warfare), themes that would reappear in God Emperor of Dune's Scattering and Leto II's all-female Fish Speaker army. - - -=== Gender dynamics === -Gender dynamics are complex in Dune. Within the Fremen sietch communities, women have almost full equality. They carry weapons and travel in raiding parties with men, fighting when necessary alongside the men. They can take positions of leadership as a Sayyadina or as a Reverend Mother (if she can survive the ritual of ingesting the Water of Life.) Both of these sietch religious leaders are routinely consulted by the all-male Council and can have a decisive voice in all matters of sietch life, security and internal politics. They are also protected by the entire community. Due to the high mortality rate among their men, women outnumber men in most sietches. Polygamy is common, and sexual relationships are voluntary and consensual; as Stilgar says to Jessica, "women among us are not taken against their will." -In contrast, the Imperial aristocracy leaves young women of noble birth very little agency. Frequently trained by the Bene Gesserit, they are raised to eventually marry other aristocrats. Marriages between Major and Minor Houses are political tools to forge alliances or heal old feuds; women are given very little say in the matter. Many such marriages are quietly maneuvered by the Bene Gesserit to produce offspring with some genetic characteristics needed by the sisterhood's human-breeding program. In addition, such highly-placed sisters were in a position to subtly influence their husbands' actions in ways that could move the politics of the Imperium toward Bene Gesserit goals. -The gom jabbar test of humanity is administered by the female Bene Gesserit order but rarely to males. The Bene Gesserit have seemingly mastered the unconscious and can play on the unconscious weaknesses of others using the Voice, yet their breeding program seeks after a male Kwisatz Haderach. Their plan is to produce a male who can "possess complete racial memory, both male and female," and look into the black hole in the collective unconscious that they fear. A central theme of the book is the connection, in Jessica's son, of this female aspect with his male aspect. This aligns with concepts in Jungian psychology, which features conscious/unconscious and taking/giving roles associated with males and females, as well as the idea of the collective unconscious. Paul's approach to power consistently requires his upbringing under the matriarchal Bene Gesserit, who operate as a long-dominating shadow government behind all of the great houses and their marriages or divisions. He is trained by Jessica in the Bene Gesserit Way, which includes prana-bindu training in nerve and muscle control and precise perception. Paul also receives Mentat training, thus helping prepare him to be a type of androgynous Kwisatz Haderach, a male Reverend Mother.In a Bene Gesserit test early in the book, it is implied that people are generally "inhuman" in that they irrationally place desire over self-interest and reason. This applies Herbert's philosophy that humans are not created equal, while equal justice and equal opportunity are higher ideals than mental, physical, or moral equality. - - -=== Heroism === -I am showing you the superhero syndrome and your own participation in it. -Throughout Paul's rise to superhuman status, he follows a plotline common to many stories describing the birth of a hero. He has unfortunate circumstances forced onto him. After a long period of hardship and exile, he confronts and defeats the source of evil in his tale. As such, Dune is representative of a general trend beginning in 1960s American science fiction in that it features a character who attains godlike status through scientific means. Eventually, Paul Atreides gains a level of omniscience which allows him to take over the planet and the galaxy, and causes the Fremen of Arrakis to worship him like a god. Author Frank Herbert said in 1979, "The bottom line of the Dune trilogy is: beware of heroes. Much better [to] rely on your own judgment, and your own mistakes." He wrote in 1985, "Dune was aimed at this whole idea of the infallible leader because my view of history says that mistakes made by a leader (or made in a leader's name) are amplified by the numbers who follow without question."Juan A. Prieto-Pablos says Herbert achieves a new typology with Paul's superpowers, differentiating the heroes of Dune from earlier heroes such as Superman, van Vogt's Gilbert Gosseyn and Henry Kuttner's telepaths. Unlike previous superheroes who acquire their powers suddenly and accidentally, Paul's are the result of "painful and slow personal progress." And unlike other superheroes of the 1960s—who are the exception among ordinary people in their respective worlds—Herbert's characters grow their powers through "the application of mystical philosophies and techniques." For Herbert, the ordinary person can develop incredible fighting skills (Fremen, Ginaz swordsmen and Sardaukar) or mental abilities (Bene Gesserit, Mentats, Spacing Guild Navigators). - - -=== Zen and religion === - -Early in his newspaper career, Herbert was introduced to Zen by two Jungian psychologists, Ralph and Irene Slattery, who "gave a crucial boost to his thinking". Zen teachings ultimately had "a profound and continuing influence on [Herbert's] work". Throughout the Dune series and particularly in Dune, Herbert employs concepts and forms borrowed from Zen Buddhism. The Fremen are referred to as Zensunni adherents, and many of Herbert's epigraphs are Zen-spirited. In "Dune Genesis", Frank Herbert wrote: - -What especially pleases me is to see the interwoven themes, the fugue like relationships of images that exactly replay the way Dune took shape. As in an Escher lithograph, I involved myself with recurrent themes that turn into paradox. The central paradox concerns the human vision of time. What about Paul's gift of prescience - the Presbyterian fixation? For the Delphic Oracle to perform, it must tangle itself in a web of predestination. Yet predestination negates surprises and, in fact, sets up a mathematically enclosed universe whose limits are always inconsistent, always encountering the unprovable. It's like a koan, a Zen mind breaker. It's like the Cretan Epimenides saying, "All Cretans are liars." -Brian Herbert called the Dune universe "a spiritual melting pot", noting that his father incorporated elements of a variety of religions, including Buddhism, Sufi mysticism and other Islamic belief systems, Catholicism, Protestantism, Judaism, and Hinduism. He added that Frank Herbert's fictional future in which "religious beliefs have combined into interesting forms" represents the author's solution to eliminating arguments between religions, each of which claimed to have "the one and only revelation." - - -=== Asimov's Foundation === -Tim O'Reilly suggests that Herbert also wrote Dune as a counterpoint to Isaac Asimov's Foundation series. In his monograph on Frank Herbert, O'Reilly wrote that "Dune is clearly a commentary on the Foundation trilogy. Herbert has taken a look at the same imaginative situation that provoked Asimov's classic—the decay of a galactic empire—and restated it in a way that draws on different assumptions and suggests radically different conclusions. The twist he has introduced into Dune is that the Mule, not the Foundation, is his hero." According to O'Reilly, Herbert bases the Bene Gesserit on the scientific shamans of the Foundation, though they use biological rather than statistical science. In contrast to the Foundation series and its praise of science and rationality, Dune proposes that the unconscious and unexpected are actually what are needed for humanity.Both Herbert and Asimov explore the implications of prescience (i.e., visions of the future) both psychologically and socially. The Foundation series deploys a broadly determinist approach to prescient vision rooted in mathematical reasoning on a macroscopic social level. Dune, by contrast, invents a biologically rooted power of prescience that becomes determinist when the user actively relies on it to navigate past an undefined threshold of detail. Herbert’s eugenically produced and spice-enhanced prescience is also personalized to individual actors whose roles in later books constrain each other's visions, rendering the future more or less mutable as time progresses. In what might be a comment on Foundation, Herbert's most powerfully prescient being in God Emperor of Dune laments the boredom engendered by prescience, and values surprises, especially regarding one's death, as a psychological necessity.However, both works contain a similar theme of the restoration of civilization and seem to make the fundamental assumption that "political maneuvering, the need to control material resources, and friendship or mating bonds will be fundamentally the same in the future as they are now." - - -== Critical reception == -Dune tied with Roger Zelazny's This Immortal for the Hugo Award in 1966 and won the inaugural Nebula Award for Best Novel. Reviews of the novel have been largely positive, and Dune is considered by some critics to be the best science fiction book ever written. The novel has been translated into dozens of languages, and has sold almost 20 million copies. Dune has been regularly cited as one of the world's best-selling science fiction novels.Arthur C. Clarke described Dune as "unique" and wrote, "I know nothing comparable to it except The Lord of the Rings." Robert A. Heinlein described the novel as "powerful, convincing, and most ingenious." It was described as "one of the monuments of modern science fiction" by the Chicago Tribune, and P. Schuyler Miller called Dune "one of the landmarks of modern science fiction ... an amazing feat of creation." The Washington Post described it as "a portrayal of an alien society more complete and deeply detailed than any other author in the field has managed ... a story absorbing equally for its action and philosophical vistas ... An astonishing science fiction phenomenon." Algis Budrys praised Dune for the vividness of its imagined setting, saying "The time lives. It breathes, it speaks, and Herbert has smelt it in his nostrils". He found that the novel, however, "turns flat and tails off at the end. ... [T]ruly effective villains simply simper and melt; fierce men and cunning statesmen and seeresses all bend before this new Messiah". Budrys faulted in particular Herbert's decision to kill Paul's infant son offstage, with no apparent emotional impact, saying "you cannot be so busy saving a world that you cannot hear an infant shriek". After criticizing unrealistic science fiction, Carl Sagan in 1978 listed Dune as among stories "that are so tautly constructed, so rich in the accommodating details of an unfamiliar society that they sweep me along before I have even a chance to be critical".The Louisville Times wrote, "Herbert's creation of this universe, with its intricate development and analysis of ecology, religion, politics, and philosophy, remains one of the supreme and seminal achievements in science fiction." Writing for The New Yorker, Jon Michaud praised Herbert's "clever authorial decision" to exclude robots and computers ("two staples of the genre") from his fictional universe, but suggested that this may be one explanation why Dune lacks "true fandom among science-fiction fans" to the extent that it "has not penetrated popular culture in the way that The Lord of the Rings and Star Wars have". Tamara I. Hladik wrote that the story "crafts a universe where lesser novels promulgate excuses for sequels. All its rich elements are in balance and plausible—not the patchwork confederacy of made-up languages, contrived customs, and meaningless histories that are the hallmark of so many other, lesser novels."On November 5, 2019, the BBC News listed Dune on its list of the 100 most influential novels.J. R. R. Tolkien refused to review Dune, on the grounds that he disliked it "with some intensity" and thus felt it would be unfair to Herbert, another working author, if he gave an honest review of the book. - - -== First edition prints and manuscripts == -The first edition of Dune is one of the most valuable in science fiction book collecting. Copies have been sold for more than $10,000 at auction. The Chilton first edition of the novel is 9+1⁄4 inches (235 mm) tall, with bluish green boards and a price of $5.95 on the dust jacket, and notes Toronto as the Canadian publisher on the copyright page. Up to this point, Chilton had been publishing only automobile repair manuals.California State University, Fullerton's Pollak Library has several of Herbert's draft manuscripts of Dune and other works, with the author's notes, in their Frank Herbert Archives. - - -== Sequels and prequels == - -After Dune proved to be a critical and financial success for Herbert, he was able to devote himself full time to writing additional novels in the series. He had already drafted parts of the second and third while writing Dune. The series included Dune Messiah (1969), Children of Dune (1976), God Emperor of Dune (1981), Heretics of Dune (1984), and Chapterhouse: Dune (1985), each sequentially continuing on the narrative from Dune. Herbert died on February 11, 1986.Herbert's son, Brian Herbert, had found several thousand pages of notes left by his father that outlined ideas for other narratives related to Dune. Brian Herbert enlisted author Kevin J. Anderson to help build out prequel novels to the events of Dune. Brian Herbert's and Anderson's Dune prequels first started publication in 1999, and have led to additional stories that take place between those of Frank Herbert's books. The notes for what would have been Dune 7 also enabled them to publish Hunters of Dune (2006) and Sandworms of Dune (2007), sequels to Frank Herbert's final novel Chapterhouse: Dune, which complete the chronological progression of his original series, and wrap up storylines that began in Heretics of Dune. - - -== Adaptations == - -Dune has been considered as an "unfilmable" and "uncontainable" work to adapt from novel to film or other visual medium. Described by Wired, "It has four appendices and a glossary of its own gibberish, and its action takes place on two planets, one of which is a desert overrun by worms the size of airport runways. Lots of important people die or try to kill each other, and they're all tethered to about eight entangled subplots." There have been several attempts to achieve this difficult conversion with various degrees of success. - - -=== Early stalled attempts === -In 1971, the production company Apjac International (APJ) (headed by Arthur P. Jacobs) optioned the rights to film Dune. As Jacobs was busy with other projects, such as the sequel to Planet of the Apes, Dune was delayed for another year. Jacobs' first choice for director was David Lean, but he turned down the offer. Charles Jarrott was also considered to direct. Work was also under way on a script while the hunt for a director continued. Initially, the first treatment had been handled by Robert Greenhut, the producer who had lobbied Jacobs to make the movie in the first place, but subsequently Rospo Pallenberg was approached to write the script, with shooting scheduled to begin in 1974. However, Jacobs died in 1973. -In December 1974, a French consortium led by Jean-Paul Gibon purchased the film rights from APJ, with Alejandro Jodorowsky set to direct. In 1975, Jodorowsky planned to film the story as a 14-hour feature, set to star his own son Brontis Jodorowsky in the lead role of Paul Atreides, Salvador Dalí as Shaddam IV, Padishah Emperor, Amanda Lear as Princess Irulan, Orson Welles as Baron Vladimir Harkonnen, Gloria Swanson as Reverend Mother Gaius Helen Mohiam, David Carradine as Duke Leto Atreides, Geraldine Chaplin as Lady Jessica, Alain Delon as Duncan Idaho, Hervé Villechaize as Gurney Halleck, Udo Kier as Piter De Vries, and Mick Jagger as Feyd-Rautha. It was at first proposed to score the film with original music by Karlheinz Stockhausen, Henry Cow, and Magma; later on, the soundtrack was to be provided by Pink Floyd. Jodorowsky set up a pre-production unit in Paris consisting of Chris Foss, a British artist who designed covers for science fiction periodicals, Jean Giraud (Moebius), a French illustrator who created and also wrote and drew for Metal Hurlant magazine, and H. R. Giger. Moebius began designing creatures and characters for the film, while Foss was brought in to design the film's space ships and hardware. Giger began designing the Harkonnen Castle based on Moebius's storyboards. Dan O'Bannon was to head the special effects department.Dalí was cast as the Emperor. Dalí later demanded to be paid $100,000 per hour; Jodorowsky agreed, but tailored Dalí's part to be filmed in one hour, drafting plans for other scenes of the emperor to use a mechanical mannequin as substitute for Dalí. According to Giger, Dalí was "later invited to leave the film because of his pro-Franco statements". Just as the storyboards, designs, and script were finished, the financial backing dried up. Frank Herbert traveled to Europe in 1976 to find that $2 million of the $9.5 million budget had already been spent in pre-production, and that Jodorowsky's script would result in a 14-hour movie ("It was the size of a phone book", Herbert later recalled). Jodorowsky took creative liberties with the source material, but Herbert said that he and Jodorowsky had an amicable relationship. Jodorowsky said in 1985 that he found the Dune story mythical and had intended to recreate it rather than adapt the novel; though he had an "enthusiastic admiration" for Herbert, Jodorowsky said he had done everything possible to distance the author and his input from the project. Although Jodorowsky was embittered by the experience, he said the Dune project changed his life, and some of the ideas were used in his and Moebius's The Incal. O'Bannon entered a psychiatric hospital after the production failed, then worked on 13 scripts, the last of which became Alien. A 2013 documentary, Jodorowsky's Dune, was made about Jodorowsky's failed attempt at an adaptation. -In 1976, Dino De Laurentiis acquired the rights from Gibon's consortium. De Laurentiis commissioned Herbert to write a new screenplay in 1978; the script Herbert turned in was 175 pages long, the equivalent of nearly three hours of screen time. De Laurentiis then hired director Ridley Scott in 1979, with Rudy Wurlitzer writing the screenplay and H. R. Giger retained from the Jodorowsky production; Scott and Giger had also just worked together on the film Alien, after O'Bannon recommended the artist. Scott intended to split the novel into two movies. He worked on three drafts of the script, using The Battle of Algiers as a point of reference, before moving on to direct another science fiction film, Blade Runner (1982). As he recalls, the pre-production process was slow, and finishing the project would have been even more time-intensive: - -But after seven months I dropped out of Dune, by then Rudy Wurlitzer had come up with a first-draft script which I felt was a decent distillation of Frank Herbert's. But I also realised Dune was going to take a lot more work—at least two and a half years' worth. And I didn't have the heart to attack that because my older brother Frank unexpectedly died of cancer while I was prepping the De Laurentiis picture. Frankly, that freaked me out. So I went to Dino and told him the Dune script was his. -—From Ridley Scott: The Making of his Movies by Paul M. Sammon - - -=== 1984 film by David Lynch === - -In 1981, the nine-year film rights were set to expire. De Laurentiis re-negotiated the rights from the author, adding to them the rights to the Dune sequels (written and unwritten). After seeing The Elephant Man, De Laurentiis' daughter Raffaella decided that David Lynch should direct the movie. Around that time Lynch received several other directing offers, including Return of the Jedi. He agreed to direct Dune and write the screenplay even though he had not read the book, was not familiar with the story, or even been interested in science fiction. Lynch worked on the script for six months with Eric Bergren and Christopher De Vore. The team yielded two drafts of the script before it split over creative differences. Lynch would subsequently work on five more drafts. Production of the work was troubled by problems at the Mexican studio and hampering the film's timeline. Lynch ended up producing a nearly three-hour long film, but at demands from Universal Pictures, the film's distributor, he cut it back to about two hours, hastily filming additional scenes to make up for some of the cut footage.This first film of Dune, directed by Lynch, was released in 1984, nearly 20 years after the book's publication. Though Herbert said the book's depth and symbolism seemed to intimidate many filmmakers, he was pleased with the film, saying that "They've got it. It begins as Dune does. And I hear my dialogue all the way through. There are some interpretations and liberties, but you're gonna come out knowing you've seen Dune." Reviews of the film were negative, saying that it was incomprehensible to those unfamiliar with the book, and that fans would be disappointed by the way it strayed from the book's plot. Upon release for television and other forms of home media, Universal opted to reintroduce much of the footage that Lynch had cut, creating an over-three-hour long version with extensive monologue exposition. Lynch was extremely displeased with this move, and demanded that Universal replace his name on these cuts with the pseudonym "Alan Smithee", and has generally distanced himself from the film since. - - -=== 2000 miniseries by John Harrison === - -In 2000, John Harrison adapted the novel into Frank Herbert's Dune, a miniseries which premiered on American Sci-Fi Channel. As of 2004, the miniseries was one of the three highest-rated programs broadcast on the Sci-Fi Channel. - - -=== Further film attempts === -In 2008, Paramount Pictures announced that they would produce a new film based on the book, with Peter Berg attached to direct. Producer Kevin Misher, who spent a year securing the rights from the Herbert estate, was to be joined by Richard Rubinstein and John Harrison (of both Sci-Fi Channel miniseries) as well as Sarah Aubrey and Mike Messina. The producers stated that they were going for a "faithful adaptation" of the novel, and considered "its theme of finite ecological resources particularly timely." Science fiction author Kevin J. Anderson and Frank Herbert's son Brian Herbert, who had together written multiple Dune sequels and prequels since 1999, were attached to the project as technical advisors. In October 2009, Berg dropped out of the project, later saying that it "for a variety of reasons wasn't the right thing" for him. Subsequently, with a script draft by Joshua Zetumer, Paramount reportedly sought a new director who could do the film for under $175 million. In 2010, Pierre Morel was signed on to direct, with screenwriter Chase Palmer incorporating Morel's vision of the project into Zetumer's original draft. By November 2010, Morel left the project. Paramount finally dropped plans for a remake in March 2011. - - -=== Films by Denis Villeneuve === - -In November 2016, Legendary Entertainment acquired the film and TV rights for Dune. Variety reported in December 2016 that Denis Villeneuve was in negotiations to direct the project, which was confirmed in February 2017. In April 2017, Legendary announced that Eric Roth would write the screenplay. Villeneuve explained in March 2018 that his adaptation will be split into two films, with the first installment scheduled to begin production in 2019. Casting includes Timothée Chalamet as Paul Atreides, Dave Bautista as Rabban, Stellan Skarsgård as Baron Harkonnen, Rebecca Ferguson as Lady Jessica, Charlotte Rampling as Reverend Mother Mohiam, Oscar Isaac as Duke Leto Atreides, Zendaya as Chani, Javier Bardem as Stilgar, Josh Brolin as Gurney Halleck, Jason Momoa as Duncan Idaho, David Dastmalchian as Piter De Vries, Chang Chen as Dr. Yueh, and Stephen Henderson as Thufir Hawat. Warner Bros. Pictures distributed the film, which had its initial premiere on September 3, 2021, at the Venice Film Festival, and wide release in both theaters and streaming on HBO Max on October 21, 2021, as part of Warner Bros.'s approach to handling the impact of the COVID-19 pandemic on the film industry. The film received "generally favorable reviews" on Metacritic. It has gone on to win multiple awards and was named by the National Board of Review as one of the 10 best films of 2021, as well as the American Film Institute in their annual top 10 list. The film went on to be nominated for ten Academy Awards, winning six, the most wins of the night for any film in contention.A sequel, Dune: Part Two, was scheduled for release on November 3, 2023, but will now instead be released on March 15th 2024 amid the 2023 SAG-AFTRA strike. - - -=== Audiobooks === -In 1993, Recorded Books Inc. released a 20-disc audiobook narrated by George Guidall. In 2007, Audio Renaissance released an audio book narrated by Simon Vance with some parts performed by Scott Brick, Orlagh Cassidy, Euan Morton, and other performers. - - -== Cultural influence == -Dune has been widely influential, inspiring numerous novels, music, films, television, games, and comic books. It is considered one of the greatest and most influential science fiction novels of all time, with numerous modern science fiction works such as Star Wars owing their existence to Dune. Dune has also been referenced in numerous other works of popular culture, including Star Trek, Chronicles of Riddick, The Kingkiller Chronicle and Futurama. Dune was cited as a source of inspiration for Hayao Miyazaki's anime film Nausicaä of the Valley of the Wind (1984) for its post-apocalyptic world.Dune was parodied in 1984's National Lampoon's Doon by Ellis Weiner, which William F. Touponce called "something of a tribute to Herbert's success on college campuses", noting that "the only other book to have been so honored is Tolkien's The Lord of the Rings," which was parodied by The Harvard Lampoon in 1969. - - -=== Music === -In 1978, French electronic musician Richard Pinhas released the nine-track Dune-inspired album Chronolyse, which includes the seven-part Variations sur le thème des Bene Gesserit. -In 1979, German electronic music pioneer Klaus Schulze released an LP titled Dune featuring motifs and lyrics inspired by the novel. -A similar musical project, Visions of Dune, was released also in 1979 by Zed (a pseudonym of French electronic musician Bernard Sjazner). -Heavy metal band Iron Maiden wrote the song "To Tame a Land" based on the Dune story. It appears as the closing track to their 1983 album Piece of Mind. The original working title of the song was "Dune"; however, the band was denied permission to use it, with Frank Herbert's agents stating "Frank Herbert doesn't like rock bands, particularly heavy rock bands, and especially bands like Iron Maiden". -Dune inspired the German happy hardcore band Dune, who have released several albums with space travel-themed songs. -The progressive hardcore band Shai Hulud took their name from Dune. -"Traveller in Time", from the 1991 Blind Guardian album Tales from the Twilight World, is based mostly on Paul Atreides' visions of future and past. -The title of the 1993 Fear Factory album Fear is The Mindkiller is a quote from the "litany against fear". -The song "Near Fantastica", from the Matthew Good album Avalanche, makes reference to the "litany against fear", repeating "can't feel fear, fear's the mind killer" through a section of the song. -In the Fatboy Slim song "Weapon of Choice", the line "If you walk without rhythm/You won't attract the worm" is a near quotation from the sections of novel in which Stilgar teaches Paul to ride sandworms. -Dune also inspired the 1999 album The 2nd Moon by the German death metal band Golem, which is a concept album about the series. -Dune influenced Thirty Seconds to Mars on their self-titled debut album. -The Youngblood Brass Band's song "Is an Elegy" on Center:Level:Roar references "Muad'Dib", "Arrakis" and other elements from the novel. -The debut album of Canadian musician Grimes, called Geidi Primes, is a concept album based on Dune. -Japanese singer Kenshi Yonezu, released a song titled "Dune", also known as "Sand Planet". The song was released on 2017, and it was created using the voice synthesizer Hatsune Miku for her 10th anniversary. -"Fear is the Mind Killer", a song released in 2018 by Zheani (an Australian rapper) uses a quote from Dune. -"Litany Against Fear" is a spoken track released in 2018 under the 'Eight' album by Zheani. She recites an extract from Dune. -Sleep's 2018 album The Sciences features a song, Giza Butler, that references several aspects of Dune. -Tool's 2019 album Fear Inoculum has a song entitled "Litanie contre la peur (Litany against fear)". -"Rare to Wake", from Shannon Lay's album Geist (2019), is inspired by Dune. -Heavy Metal band Diamond Head based the song "The Sleeper" and its prelude, both off the album The Coffin Train, on the series. - - -=== Games === - -There have been a number of games based on the book, starting with the strategy–adventure game Dune (1992). The most important game adaptation is Dune II (1992), which established the conventions of modern real-time strategy games and is considered to be among the most influential video games of all time.The online game Lost Souls includes Dune-derived elements, including sandworms and melange—addiction to which can produce psychic talents. The 2016 game Enter the Gungeon features the spice melange as a random item which gives the player progressively stronger abilities and penalties with repeated uses, mirroring the long-term effects melange has on users.Rick Priestley cites Dune as a major influence on his 1987 wargame, Warhammer 40,000.In 2023, Funcom announced Dune: Awakening, an upcoming massively multiplayer online game set in the universe of Dune. - - -=== Space exploration === -The Apollo 15 astronauts named a small crater on Earth's Moon after the novel during the 1971 mission, and the name was formally adopted by the International Astronomical Union in 1973. Since 2009, the names of planets from the Dune novels have been adopted for the real-world nomenclature of plains and other features on Saturn's moon Titan, like Arrakis Planitia. - - -== See also == -Soft science fiction – Sub-genre of science fiction emphasizing "soft" sciences or human emotions -Hydraulic empire – Government by control of access to water - - -== References == - - -== Further reading == -Clute, John; Nicholls, Peter (1995). The Encyclopedia of Science Fiction. New York: St. Martin's Press. p. 1386. ISBN 978-0-312-13486-0. -Clute, John; Nicholls, Peter (1995). The Multimedia Encyclopedia of Science Fiction (CD-ROM). Danbury, CT: Grolier. ISBN 978-0-7172-3999-3. -Huddleston, Tom. The Worlds of Dune: The Places and Cultures That Inspired Frank Herbert. Minneapolis: Quarto Publishing Group UK, 2023. -Jakubowski, Maxim; Edwards, Malcolm (1983). The Complete Book of Science Fiction and Fantasy Lists. St Albans, Herts, UK: Granada Publishing Ltd. p. 350. ISBN 978-0-586-05678-3. -Kennedy, Kara. Frank Herbert's Dune: A Critical Companion. Cham, Switzerland: Palgrave Macmillan, 2022. -Kennedy, Kara. Women's Agency in the Dune Universe: Tracing Women's Liberation through Science Fiction. Cham, Switzerland: Palgrave Macmillan, 2020. -Nardi, Dominic J. & N. Trevor Brierly, eds. Discovering Dune: Essays on Frank Herbert's Epic Saga. Jefferson, NC: McFarland & Co., 2022. -Nicholas, Jeffery, ed. Dune and Philosophy: Weirding Way of Mentat. Chicago: Open Court, 2011. -Nicholls, Peter (1979). The Encyclopedia of Science Fiction. St Albans, Herts, UK: Granada Publishing Ltd. p. 672. ISBN 978-0-586-05380-5. -O’Reilly, Timothy. Frank Herbert. New York: Frederick Ungar, 1981. -Pringle, David (1990). The Ultimate Guide to Science Fiction. London: Grafton Books Ltd. p. 407. ISBN 978-0-246-13635-0. -Tuck, Donald H. (1974). The Encyclopedia of Science Fiction and Fantasy. Chicago: Advent. p. 136. ISBN 978-0-911682-20-5. -Williams, Kevin C. The Wisdom of the Sand: Philosophy and Frank Herbert's Dune. New York: Hampton Press, 2013. - - -== External links == - -Official website for Dune and its sequels -Dune title listing at the Internet Speculative Fiction Database -Turner, Paul (October 1973). "Vertex Interviews Frank Herbert" (Interview). Vol. 1, no. 4. Archived from the original on May 19, 2009. -Spark Notes: Dune, detailed study guide -DuneQuotes.com – Collection of quotes from the Dune series -Dune by Frank Herbert, reviewed by Ted Gioia (Conceptual Fiction) -"Frank Herbert Biography and Bibliography at LitWeb.net". www.litweb.net. Archived from the original on April 2, 2009. Retrieved January 2, 2009. -Works of Frank Herbert at Curlie -Timberg, Scott (April 18, 2010). "Frank Herbert's Dune holds timely – and timeless – appeal". Los Angeles Times. Archived from the original on December 3, 2013. Retrieved November 27, 2013. -Walton, Jo (January 12, 2011). "In league with the future: Frank Herbert's Dune (Review)". Tor.com. Retrieved November 27, 2013. -Leonard, Andrew (June 4, 2015). "To Save California, Read Dune". Nautilus. Archived from the original on November 4, 2017. Retrieved June 15, 2015. -Dune by Frank Herbert – Foreshadowing & Dedication at Fact Behind Fiction -Frank Herbert by Tim O'Reilly -DuneScholar.com – Collection of scholarly essays \ No newline at end of file diff --git a/templates/neo4j-vector-memory/ingest.py b/templates/neo4j-vector-memory/ingest.py deleted file mode 100644 index fcdb302135f..00000000000 --- a/templates/neo4j-vector-memory/ingest.py +++ /dev/null @@ -1,23 +0,0 @@ -from pathlib import Path - -from langchain_community.document_loaders import TextLoader -from langchain_community.vectorstores import Neo4jVector -from langchain_openai import OpenAIEmbeddings -from langchain_text_splitters import TokenTextSplitter - -txt_path = Path(__file__).parent / "dune.txt" - -# Load the text file -loader = TextLoader(str(txt_path)) -raw_documents = loader.load() - -# Define chunking strategy -splitter = TokenTextSplitter(chunk_size=512, chunk_overlap=24) -documents = splitter.split_documents(raw_documents) - -# Calculate embedding values and store them in the graph -Neo4jVector.from_documents( - documents, - OpenAIEmbeddings(), - index_name="dune", -) diff --git a/templates/neo4j-vector-memory/main.py b/templates/neo4j-vector-memory/main.py deleted file mode 100644 index 732f0954d59..00000000000 --- a/templates/neo4j-vector-memory/main.py +++ /dev/null @@ -1,17 +0,0 @@ -from neo4j_vector_memory.chain import chain - -if __name__ == "__main__": - user_id = "user_id_1" - session_id = "session_id_1" - original_query = "What is the plot of the Dune?" - print( - chain.invoke( - {"question": original_query, "user_id": user_id, "session_id": session_id} - ) - ) - follow_up_query = "Tell me more about Leto" - print( - chain.invoke( - {"question": follow_up_query, "user_id": user_id, "session_id": session_id} - ) - ) diff --git a/templates/neo4j-vector-memory/neo4j_vector_memory/__init__.py b/templates/neo4j-vector-memory/neo4j_vector_memory/__init__.py deleted file mode 100644 index c3d4ad0d4fb..00000000000 --- a/templates/neo4j-vector-memory/neo4j_vector_memory/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from neo4j_vector_memory.chain import chain - -__all__ = ["chain"] diff --git a/templates/neo4j-vector-memory/neo4j_vector_memory/chain.py b/templates/neo4j-vector-memory/neo4j_vector_memory/chain.py deleted file mode 100644 index 815fb558abc..00000000000 --- a/templates/neo4j-vector-memory/neo4j_vector_memory/chain.py +++ /dev/null @@ -1,72 +0,0 @@ -from operator import itemgetter - -from langchain_community.vectorstores import Neo4jVector -from langchain_core.output_parsers import StrOutputParser -from langchain_core.prompts import ( - ChatPromptTemplate, - MessagesPlaceholder, - PromptTemplate, -) -from langchain_core.pydantic_v1 import BaseModel -from langchain_core.runnables import RunnablePassthrough -from langchain_openai import ChatOpenAI, OpenAIEmbeddings - -from neo4j_vector_memory.history import get_history, save_history - -# Define vector retrieval -retrieval_query = "RETURN node.text AS text, score, {id:elementId(node)} AS metadata" -vectorstore = Neo4jVector.from_existing_index( - OpenAIEmbeddings(), index_name="dune", retrieval_query=retrieval_query -) -retriever = vectorstore.as_retriever() - -# Define LLM -llm = ChatOpenAI() - - -# Condense a chat history and follow-up question into a standalone question -condense_template = """Given the following conversation and a follow up question, rephrase the follow up question to be a standalone question, in its original language. -Make sure to include all the relevant information. -Chat History: -{chat_history} -Follow Up Input: {question} -Standalone question:""" # noqa: E501 -CONDENSE_QUESTION_PROMPT = PromptTemplate.from_template(condense_template) - -# RAG answer synthesis prompt -answer_template = """Answer the question based only on the following context: - -{context} -""" - -ANSWER_PROMPT = ChatPromptTemplate.from_messages( - [ - ("system", answer_template), - MessagesPlaceholder(variable_name="chat_history"), - ("user", "{question}"), - ] -) - -chain = ( - RunnablePassthrough.assign(chat_history=get_history) - | RunnablePassthrough.assign( - rephrased_question=CONDENSE_QUESTION_PROMPT | llm | StrOutputParser() - ) - | RunnablePassthrough.assign( - context=itemgetter("rephrased_question") | retriever, - ) - | RunnablePassthrough.assign( - output=ANSWER_PROMPT | llm | StrOutputParser(), - ) - | save_history -) - - -# Add typing for input -class Question(BaseModel): - question: str - user_id: str - session_id: str - - -chain = chain.with_types(input_type=Question) diff --git a/templates/neo4j-vector-memory/neo4j_vector_memory/history.py b/templates/neo4j-vector-memory/neo4j_vector_memory/history.py deleted file mode 100644 index a88fafc2c7a..00000000000 --- a/templates/neo4j-vector-memory/neo4j_vector_memory/history.py +++ /dev/null @@ -1,79 +0,0 @@ -from typing import Any, Dict, List, Union - -from langchain.memory import ChatMessageHistory -from langchain_community.graphs import Neo4jGraph -from langchain_core.messages import AIMessage, HumanMessage - -graph = Neo4jGraph() - - -def convert_messages(input: List[Dict[str, Any]]) -> ChatMessageHistory: - history = ChatMessageHistory() - for item in input: - history.add_user_message(item["result"]["question"]) - history.add_ai_message(item["result"]["answer"]) - return history - - -def get_history(input: Dict[str, Any]) -> List[Union[HumanMessage, AIMessage]]: - # Lookback conversation window - window = 3 - data = graph.query( - """ - MATCH (u:User {id:$user_id})-[:HAS_SESSION]->(s:Session {id:$session_id}), - (s)-[:LAST_MESSAGE]->(last_message) - MATCH p=(last_message)<-[:NEXT*0..""" - + str(window) - + """]-() - WITH p, length(p) AS length - ORDER BY length DESC LIMIT 1 - UNWIND reverse(nodes(p)) AS node - MATCH (node)-[:HAS_ANSWER]->(answer) - RETURN {question:node.text, answer:answer.text} AS result - """, - params=input, - ) - history = convert_messages(data) - return history.messages - - -def save_history(input: Dict[str, Any]) -> str: - input["context"] = [el.metadata["id"] for el in input["context"]] - has_history = bool(input.pop("chat_history")) - - # store history to database - if has_history: - graph.query( - """ -MATCH (u:User {id: $user_id})-[:HAS_SESSION]->(s:Session{id: $session_id}), - (s)-[l:LAST_MESSAGE]->(last_message) -CREATE (last_message)-[:NEXT]->(q:Question - {text:$question, rephrased:$rephrased_question, date:datetime()}), - (q)-[:HAS_ANSWER]->(:Answer {text:$output}), - (s)-[:LAST_MESSAGE]->(q) -DELETE l -WITH q -UNWIND $context AS c -MATCH (n) WHERE elementId(n) = c -MERGE (q)-[:RETRIEVED]->(n) -""", - params=input, - ) - - else: - graph.query( - """MERGE (u:User {id: $user_id}) -CREATE (u)-[:HAS_SESSION]->(s1:Session {id:$session_id}), - (s1)-[:LAST_MESSAGE]->(q:Question - {text:$question, rephrased:$rephrased_question, date:datetime()}), - (q)-[:HAS_ANSWER]->(:Answer {text:$output}) -WITH q -UNWIND $context AS c -MATCH (n) WHERE elementId(n) = c -MERGE (q)-[:RETRIEVED]->(n) -""", - params=input, - ) - - # Return LLM response to the chain - return input["output"] diff --git a/templates/neo4j-vector-memory/pyproject.toml b/templates/neo4j-vector-memory/pyproject.toml deleted file mode 100644 index a9aa8c2f6c7..00000000000 --- a/templates/neo4j-vector-memory/pyproject.toml +++ /dev/null @@ -1,36 +0,0 @@ -[tool.poetry] -name = "neo4j-vector-memory" -version = "0.1.0" -description = "Store conversational flows in a Neo4j graph database" -authors = [ - "Tomaz Bratanic ", -] -readme = "README.md" - -[tool.poetry.dependencies] -python = ">=3.8.1,<4.0" -langchain = "^0.1" -tiktoken = "^0.5.1" -openai = "<2" -neo4j = "^5.14.0" -langchain-text-splitters = ">=0.0.1,<0.1" -langchain-openai = "^0.1.1" - -[tool.poetry.group.dev.dependencies] -langchain-cli = ">=0.0.21" - -[tool.langserve] -export_module = "neo4j_vector_memory" -export_attr = "chain" - -[tool.templates-hub] -use-case = "rag" -author = "Neo4j" -integrations = ["Neo4j", "OpenAI"] -tags = ["graph-database", "conversation"] - -[build-system] -requires = [ - "poetry-core", -] -build-backend = "poetry.core.masonry.api" diff --git a/templates/neo4j-vector-memory/tests/__init__.py b/templates/neo4j-vector-memory/tests/__init__.py deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/templates/nvidia-rag-canonical/LICENSE b/templates/nvidia-rag-canonical/LICENSE deleted file mode 100644 index 426b6509034..00000000000 --- a/templates/nvidia-rag-canonical/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -MIT License - -Copyright (c) 2023 LangChain, Inc. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/templates/nvidia-rag-canonical/README.md b/templates/nvidia-rag-canonical/README.md deleted file mode 100644 index 840ae5c0bbf..00000000000 --- a/templates/nvidia-rag-canonical/README.md +++ /dev/null @@ -1,121 +0,0 @@ -# Nvidia, Milvus - canonical RAG - -This template performs RAG using `Milvus` Vector Store -and `NVIDIA` Models (Embedding and Chat). - -## Environment Setup - -You should export your NVIDIA API Key as an environment variable. -If you do not have an NVIDIA API Key, you can create one by following these steps: -1. Create a free account with the [NVIDIA GPU Cloud](https://catalog.ngc.nvidia.com/) service, which hosts AI solution catalogs, containers, models, etc. -2. Navigate to `Catalog > AI Foundation Models > (Model with API endpoint)`. -3. Select the `API` option and click `Generate Key`. -4. Save the generated key as `NVIDIA_API_KEY`. From there, you should have access to the endpoints. - -```shell -export NVIDIA_API_KEY=... -``` - -For instructions on hosting the Milvus Vector Store, refer to the section at the bottom. - -## Usage - -To use this package, you should first have the LangChain CLI installed: - -```shell -pip install -U langchain-cli -``` - -To use the NVIDIA models, install the Langchain NVIDIA AI Endpoints package: -```shell -pip install -U langchain_nvidia_aiplay -``` - -To create a new LangChain project and install this as the only package, you can do: - -```shell -langchain app new my-app --package nvidia-rag-canonical -``` - -If you want to add this to an existing project, you can just run: - -```shell -langchain app add nvidia-rag-canonical -``` - -And add the following code to your `server.py` file: -```python -from nvidia_rag_canonical import chain as nvidia_rag_canonical_chain - -add_routes(app, nvidia_rag_canonical_chain, path="/nvidia-rag-canonical") -``` - -If you want to set up an ingestion pipeline, you can add the following code to your `server.py` file: -```python -from nvidia_rag_canonical import ingest as nvidia_rag_ingest - -add_routes(app, nvidia_rag_ingest, path="/nvidia-rag-ingest") -``` -Note that for files ingested by the ingestion API, the server will need to be restarted for the newly ingested files to be accessible by the retriever. - -(Optional) Let's now configure LangSmith. -LangSmith will help us trace, monitor and debug LangChain applications. -You can sign up for LangSmith [here](https://smith.langchain.com/). -If you don't have access, you can skip this section - - -```shell -export LANGCHAIN_TRACING_V2=true -export LANGCHAIN_API_KEY= -export LANGCHAIN_PROJECT= # if not specified, defaults to "default" -``` - -If you DO NOT already have a Milvus Vector Store you want to connect to, see `Milvus Setup` section below before proceeding. - -If you DO have a Milvus Vector Store you want to connect to, edit the connection details in `nvidia_rag_canonical/chain.py` - -If you are inside this directory, then you can spin up a LangServe instance directly by: - -```shell -langchain serve -``` - -This will start the FastAPI app with a server is running locally at -[http://localhost:8000](http://localhost:8000) - -We can see all templates at [http://127.0.0.1:8000/docs](http://127.0.0.1:8000/docs) -We can access the playground at [http://127.0.0.1:8000/nvidia-rag-canonical/playground](http://127.0.0.1:8000/nvidia-rag-canonical/playground) - -We can access the template from code with: - -```python -from langserve.client import RemoteRunnable - -runnable = RemoteRunnable("http://localhost:8000/nvidia-rag-canonical") -``` - - -## Milvus Setup - -Use this step if you need to create a Milvus Vector Store and ingest data. -We will first follow the standard Milvus setup instructions [here](https://milvus.io/docs/install_standalone-docker.md). - -1. Download the Docker Compose YAML file. - ```shell - wget https://github.com/milvus-io/milvus/releases/download/v2.3.3/milvus-standalone-docker-compose.yml -O docker-compose.yml - ``` -2. Start the Milvus Vector Store container - ```shell - sudo docker compose up -d - ``` -3. Install the PyMilvus package to interact with the Milvus container. - ```shell - pip install pymilvus - ``` -4. Let's now ingest some data! We can do that by moving into this directory and running the code in `ingest.py`, eg: - - ```shell - python ingest.py - ``` - - Note that you can (and should!) change this to ingest data of your choice. diff --git a/templates/nvidia-rag-canonical/ingest.py b/templates/nvidia-rag-canonical/ingest.py deleted file mode 100644 index d7851cfaed3..00000000000 --- a/templates/nvidia-rag-canonical/ingest.py +++ /dev/null @@ -1,39 +0,0 @@ -import getpass -import os - -from langchain.document_loaders import PyPDFLoader -from langchain.vectorstores.milvus import Milvus -from langchain_nvidia_aiplay import NVIDIAEmbeddings -from langchain_text_splitters.character import CharacterTextSplitter - -if os.environ.get("NVIDIA_API_KEY", "").startswith("nvapi-"): - print("Valid NVIDIA_API_KEY already in environment. Delete to reset") -else: - nvapi_key = getpass.getpass("NVAPI Key (starts with nvapi-): ") - assert nvapi_key.startswith("nvapi-"), f"{nvapi_key[:5]}... is not a valid key" - os.environ["NVIDIA_API_KEY"] = nvapi_key - -# Note: if you change this, you should also change it in `nvidia_rag_canonical/chain.py` -EMBEDDING_MODEL = "nvolveqa_40k" -HOST = "127.0.0.1" -PORT = "19530" -COLLECTION_NAME = "test" - -embeddings = NVIDIAEmbeddings(model=EMBEDDING_MODEL) - -if __name__ == "__main__": - # Load docs - loader = PyPDFLoader("https://www.ssa.gov/news/press/factsheets/basicfact-alt.pdf") - data = loader.load() - - # Split docs - text_splitter = CharacterTextSplitter(chunk_size=300, chunk_overlap=100) - docs = text_splitter.split_documents(data) - - # Insert the documents in Milvus Vector Store - vector_db = Milvus.from_documents( - docs, - embeddings, - collection_name=COLLECTION_NAME, - connection_args={"host": HOST, "port": PORT}, - ) diff --git a/templates/nvidia-rag-canonical/nvidia_rag_canonical.ipynb b/templates/nvidia-rag-canonical/nvidia_rag_canonical.ipynb deleted file mode 100644 index eaff8b2e6a6..00000000000 --- a/templates/nvidia-rag-canonical/nvidia_rag_canonical.ipynb +++ /dev/null @@ -1,52 +0,0 @@ -{ - "cells": [ - { - "attachments": {}, - "cell_type": "markdown", - "id": "681a5d1e", - "metadata": {}, - "source": [ - "## Connect to template\n", - "\n", - "In `server.py`, set -\n", - "```\n", - "add_routes(app, nvidia_rag_canonical_chain, path=\"/nvidia_rag_canonical\")\n", - "```" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "d774be2a", - "metadata": {}, - "outputs": [], - "source": [ - "from langserve.client import RemoteRunnable\n", - "\n", - "rag_app = RemoteRunnable(\"http://0.0.0.0:8000/nvidia_rag_canonical\")\n", - "rag_app.invoke(\"How many Americans receive Social Security Benefits?\")" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.9.16" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/templates/nvidia-rag-canonical/nvidia_rag_canonical/__init__.py b/templates/nvidia-rag-canonical/nvidia_rag_canonical/__init__.py deleted file mode 100644 index edcb55e32d2..00000000000 --- a/templates/nvidia-rag-canonical/nvidia_rag_canonical/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from nvidia_rag_canonical.chain import chain, ingest - -__all__ = ["chain", "ingest"] diff --git a/templates/nvidia-rag-canonical/nvidia_rag_canonical/chain.py b/templates/nvidia-rag-canonical/nvidia_rag_canonical/chain.py deleted file mode 100644 index 78d5e8e28c5..00000000000 --- a/templates/nvidia-rag-canonical/nvidia_rag_canonical/chain.py +++ /dev/null @@ -1,91 +0,0 @@ -import getpass -import os - -from langchain_community.document_loaders import PyPDFLoader -from langchain_community.vectorstores import Milvus -from langchain_core.output_parsers import StrOutputParser -from langchain_core.prompts import ChatPromptTemplate -from langchain_core.pydantic_v1 import BaseModel -from langchain_core.runnables import ( - RunnableLambda, - RunnableParallel, - RunnablePassthrough, -) -from langchain_nvidia_aiplay import ChatNVIDIA, NVIDIAEmbeddings -from langchain_text_splitters.character import CharacterTextSplitter - -EMBEDDING_MODEL = "nvolveqa_40k" -CHAT_MODEL = "llama2_13b" -HOST = "127.0.0.1" -PORT = "19530" -COLLECTION_NAME = "test" -INGESTION_CHUNK_SIZE = 500 -INGESTION_CHUNK_OVERLAP = 0 - -if os.environ.get("NVIDIA_API_KEY", "").startswith("nvapi-"): - print("Valid NVIDIA_API_KEY already in environment. Delete to reset") -else: - nvapi_key = getpass.getpass("NVAPI Key (starts with nvapi-): ") - assert nvapi_key.startswith("nvapi-"), f"{nvapi_key[:5]}... is not a valid key" - os.environ["NVIDIA_API_KEY"] = nvapi_key - -# Read from Milvus Vector Store -embeddings = NVIDIAEmbeddings(model=EMBEDDING_MODEL) -vectorstore = Milvus( - connection_args={"host": HOST, "port": PORT}, - collection_name=COLLECTION_NAME, - embedding_function=embeddings, -) -retriever = vectorstore.as_retriever() - -# RAG prompt -template = """[INST] <> -Use the following context to answer the user's question. If you don't know the answer, -just say that you don't know, don't try to make up an answer. -<> -[INST] Context: {context} Question: {question} Only return the helpful - answer below and nothing else. Helpful answer:[/INST]" -""" -prompt = ChatPromptTemplate.from_template(template) - -# RAG -model = ChatNVIDIA(model=CHAT_MODEL) -chain = ( - RunnableParallel({"context": retriever, "question": RunnablePassthrough()}) - | prompt - | model - | StrOutputParser() -) - - -# Add typing for input -class Question(BaseModel): - __root__: str - - -chain = chain.with_types(input_type=Question) - - -def _ingest(url: str) -> dict: - """Load and ingest the PDF file from the URL""" - - loader = PyPDFLoader(url) - data = loader.load() - - # Split docs - text_splitter = CharacterTextSplitter( - chunk_size=INGESTION_CHUNK_SIZE, chunk_overlap=INGESTION_CHUNK_OVERLAP - ) - docs = text_splitter.split_documents(data) - - # Insert the documents in Milvus Vector Store - _ = Milvus.from_documents( - documents=docs, - embedding=embeddings, - collection_name=COLLECTION_NAME, - connection_args={"host": HOST, "port": PORT}, - ) - return {} - - -ingest = RunnableLambda(_ingest) diff --git a/templates/nvidia-rag-canonical/pyproject.toml b/templates/nvidia-rag-canonical/pyproject.toml deleted file mode 100644 index 59ecf00a81f..00000000000 --- a/templates/nvidia-rag-canonical/pyproject.toml +++ /dev/null @@ -1,31 +0,0 @@ -[tool.poetry] -name = "nvidia-rag-canonical" -version = "0.1.0" -description = "RAG with NVIDIA" -authors = ["Sagar Bogadi Manjunath "] -readme = "README.md" - -[tool.poetry.dependencies] -python = ">=3.8.1,<4.0" -langchain = "^0.1" -pymilvus = ">=2.3.0" -langchain-nvidia-aiplay = "^0.0.2" -pypdf = ">=4.0.1" -langchain-text-splitters = ">=0.0.1,<0.1" - -[tool.poetry.group.dev.dependencies] -langchain-cli = ">=0.0.21" - -[tool.langserve] -export_module = "nvidia_rag_canonical" -export_attr = "chain" - -[tool.templates-hub] -use-case = "rag" -author = "LangChain" -integrations = ["Milvus", "NVIDIA"] -tags = ["vectordbs"] - -[build-system] -requires = ["poetry-core"] -build-backend = "poetry.core.masonry.api" diff --git a/templates/nvidia-rag-canonical/tests/__init__.py b/templates/nvidia-rag-canonical/tests/__init__.py deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/templates/openai-functions-agent-gmail/.gitignore b/templates/openai-functions-agent-gmail/.gitignore deleted file mode 100644 index e5bd0caf327..00000000000 --- a/templates/openai-functions-agent-gmail/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ -client_secret*.json -credentials.json -token.json - diff --git a/templates/openai-functions-agent-gmail/LICENSE b/templates/openai-functions-agent-gmail/LICENSE deleted file mode 100644 index 426b6509034..00000000000 --- a/templates/openai-functions-agent-gmail/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -MIT License - -Copyright (c) 2023 LangChain, Inc. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/templates/openai-functions-agent-gmail/README.md b/templates/openai-functions-agent-gmail/README.md deleted file mode 100644 index d6af630f272..00000000000 --- a/templates/openai-functions-agent-gmail/README.md +++ /dev/null @@ -1,108 +0,0 @@ -# OpenAI functions - Gmail agent - -Ever struggled to reach inbox zero? - -Using this template, you can create and customize your very own AI assistant -to manage your `Gmail` account. Using the default `Gmail` tools, -it can read, search through, and draft emails to respond on your behalf. -It also has access to a `Tavily` search engine so it can search for -relevant information about any topics or people in the email -thread before writing, ensuring the drafts include all -the relevant information needed to sound well-informed. - -![Animated GIF showing the interface of the Gmail Agent Playground with a cursor interacting with the input field.](./static/gmail-agent-playground.gif) "Gmail Agent Playground Interface" - -## Details - -This assistant uses OpenAI's [function calling](https://python.langchain.com/docs/modules/chains/how_to/openai_functions) support to reliably select and invoke the tools you've provided - -This template also imports directly from [langchain-core](https://pypi.org/project/langchain-core/) and [`langchain-community`](https://pypi.org/project/langchain-community/) where appropriate. We have restructured LangChain to let you select the specific integrations needed for your use case. While you can still import from `langchain` (we are making this transition backwards-compatible), we have separated the homes of most of the classes to reflect ownership and to make your dependency lists lighter. Most of the integrations you need can be found in the `langchain-community` package, and if you are just using the core expression language API's, you can even build solely based on `langchain-core`. - -## Environment Setup - -The following environment variables need to be set: - -Set the `OPENAI_API_KEY` environment variable to access the OpenAI models. - -Set the `TAVILY_API_KEY` environment variable to access Tavily search. - -Create a [`credentials.json`](https://developers.google.com/gmail/api/quickstart/python#authorize_credentials_for_a_desktop_application) file containing your OAuth client ID from Gmail. To customize authentication, see the [Customize Auth](#customize-auth) section below. - -_*Note:* The first time you run this app, it will force you to go through a user authentication flow._ - -(Optional): Set `GMAIL_AGENT_ENABLE_SEND` to `true` (or modify the `agent.py` file in this template) to give it access to the "Send" tool. This will give your assistant permissions to send emails on your behalf without your explicit review, which is not recommended. - - -## Usage - -To use this package, you should first have the LangChain CLI installed: - -```shell -pip install -U langchain-cli -``` - -To create a new LangChain project and install this as the only package, you can do: - -```shell -langchain app new my-app --package openai-functions-agent-gmail -``` - -If you want to add this to an existing project, you can just run: - -```shell -langchain app add openai-functions-agent-gmail -``` - -And add the following code to your `server.py` file: -```python -from openai_functions_agent import agent_executor as openai_functions_agent_chain - -add_routes(app, openai_functions_agent_chain, path="/openai-functions-agent-gmail") -``` - -(Optional) Let's now configure LangSmith. -LangSmith will help us trace, monitor and debug LangChain applications. -You can sign up for LangSmith [here](https://smith.langchain.com/). -If you don't have access, you can skip this section - -```shell -export LANGCHAIN_TRACING_V2=true -export LANGCHAIN_API_KEY= -export LANGCHAIN_PROJECT= # if not specified, defaults to "default" -``` - -If you are inside this directory, then you can spin up a LangServe instance directly by: - -```shell -langchain serve -``` - -This will start the FastAPI app with a server is running locally at -[http://localhost:8000](http://localhost:8000) - -We can see all templates at [http://127.0.0.1:8000/docs](http://127.0.0.1:8000/docs) -We can access the playground at [http://127.0.0.1:8000/openai-functions-agent-gmail/playground](http://127.0.0.1:8000/openai-functions-agent/playground) - -We can access the template from code with: - -```python -from langserve.client import RemoteRunnable - -runnable = RemoteRunnable("http://localhost:8000/openai-functions-agent-gmail") -``` - -## Customize Auth - -``` -from langchain_community.tools.gmail.utils import build_resource_service, get_gmail_credentials - -# Can review scopes here https://developers.google.com/gmail/api/auth/scopes -# For instance, readonly scope is 'https://www.googleapis.com/auth/gmail.readonly' -credentials = get_gmail_credentials( - token_file="token.json", - scopes=["https://mail.google.com/"], - client_secrets_file="credentials.json", -) -api_resource = build_resource_service(credentials=credentials) -toolkit = GmailToolkit(api_resource=api_resource) -``` \ No newline at end of file diff --git a/templates/openai-functions-agent-gmail/main.py b/templates/openai-functions-agent-gmail/main.py deleted file mode 100644 index 90fcfcc30df..00000000000 --- a/templates/openai-functions-agent-gmail/main.py +++ /dev/null @@ -1,9 +0,0 @@ -from openai_functions_agent.agent import agent_executor - -if __name__ == "__main__": - question = ( - "Write a draft response to LangChain's last email. " - "First do background research on the sender and topics to make sure you" - " understand the context, then write the draft." - ) - print(agent_executor.invoke({"input": question, "chat_history": []})) diff --git a/templates/openai-functions-agent-gmail/openai_functions_agent/__init__.py b/templates/openai-functions-agent-gmail/openai_functions_agent/__init__.py deleted file mode 100644 index 1d97eae2490..00000000000 --- a/templates/openai-functions-agent-gmail/openai_functions_agent/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from openai_functions_agent.agent import agent_executor - -__all__ = ["agent_executor"] diff --git a/templates/openai-functions-agent-gmail/openai_functions_agent/agent.py b/templates/openai-functions-agent-gmail/openai_functions_agent/agent.py deleted file mode 100644 index 3614a838222..00000000000 --- a/templates/openai-functions-agent-gmail/openai_functions_agent/agent.py +++ /dev/null @@ -1,96 +0,0 @@ -import os -from typing import List, Tuple - -from langchain.agents import AgentExecutor -from langchain.agents.format_scratchpad import format_to_openai_function_messages -from langchain.agents.output_parsers import OpenAIFunctionsAgentOutputParser -from langchain_community.chat_models import ChatOpenAI -from langchain_community.tools.convert_to_openai import format_tool_to_openai_function -from langchain_community.tools.gmail import ( - GmailCreateDraft, - GmailGetMessage, - GmailGetThread, - GmailSearch, - GmailSendMessage, -) -from langchain_community.tools.gmail.utils import build_resource_service -from langchain_community.utilities.tavily_search import TavilySearchAPIWrapper -from langchain_core.messages import AIMessage, HumanMessage -from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder -from langchain_core.pydantic_v1 import BaseModel, Field -from langchain_core.tools.convert import tool - - -@tool -def search_engine(query: str, max_results: int = 5) -> str: - """"A search engine optimized for comprehensive, accurate, \ -and trusted results. Useful for when you need to answer questions \ -about current events or about recent information. \ -Input should be a search query. \ -If the user is asking about something that you don't know about, \ -you should probably use this tool to see if that can provide any information.""" - return TavilySearchAPIWrapper().results(query, max_results=max_results) - - -# Create the tools -tools = [ - GmailCreateDraft(), - GmailGetMessage(), - GmailGetThread(), - GmailSearch(), - search_engine, -] -if os.environ.get("GMAIL_AGENT_ENABLE_SEND") == "true": - tools.append(GmailSendMessage()) -current_user = ( - build_resource_service().users().getProfile(userId="me").execute()["emailAddress"] -) -assistant_system_message = """You are a helpful assistant aiding a user with their \ -emails. Use tools (only if necessary) to best answer \ -the users questions.\n\nCurrent user: {user}""" -prompt = ChatPromptTemplate.from_messages( - [ - ("system", assistant_system_message), - MessagesPlaceholder(variable_name="chat_history"), - ("user", "{input}"), - MessagesPlaceholder(variable_name="agent_scratchpad"), - ] -).partial(user=current_user) - - -llm = ChatOpenAI(model="gpt-4-1106-preview", temperature=0) -llm_with_tools = llm.bind(functions=[format_tool_to_openai_function(t) for t in tools]) - - -def _format_chat_history(chat_history: List[Tuple[str, str]]): - buffer = [] - for human, ai in chat_history: - buffer.append(HumanMessage(content=human)) - buffer.append(AIMessage(content=ai)) - return buffer - - -agent = ( - { - "input": lambda x: x["input"], - "chat_history": lambda x: _format_chat_history(x["chat_history"]), - "agent_scratchpad": lambda x: format_to_openai_function_messages( - x["intermediate_steps"] - ), - } - | prompt - | llm_with_tools - | OpenAIFunctionsAgentOutputParser() -) - - -class AgentInput(BaseModel): - input: str - chat_history: List[Tuple[str, str]] = Field( - ..., extra={"widget": {"type": "chat", "input": "input", "output": "output"}} - ) - - -agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True).with_types( - input_type=AgentInput -) diff --git a/templates/openai-functions-agent-gmail/pyproject.toml b/templates/openai-functions-agent-gmail/pyproject.toml deleted file mode 100644 index 26a0bb07934..00000000000 --- a/templates/openai-functions-agent-gmail/pyproject.toml +++ /dev/null @@ -1,41 +0,0 @@ -[tool.poetry] -name = "openai-functions-agent-gmail" -version = "0.1.0" -description = "Agent using OpenAI function calling to execute functions, including Gmail managing" -authors = [ - "Lance Martin ", -] -readme = "README.md" -packages = [ - { include = "openai-functions-agent" }, -] - -[tool.poetry.dependencies] -python = ">=3.8.1,<4.0" -langchain = "^0.1" -openai = "<2" -langchain-core = "^0.1" -langchain-community = ">=0.0.1,<0.1.0" -google-api-python-client = "^2.110.0" -google-auth-oauthlib = "^1.1.0" -google-auth-httplib2 = "^0.1.1" -bs4 = "^0.0.1" - -[tool.poetry.group.dev.dependencies] -langchain-cli = ">=0.0.21" - -[tool.langserve] -export_module = "openai_functions_agent" -export_attr = "agent_executor" - -[tool.templates-hub] -use-case = "research" -author = "LangChain" -integrations = ["OpenAI", "Tavily"] -tags = ["search", "agents", "function-calling"] - -[build-system] -requires = [ - "poetry-core", -] -build-backend = "poetry.core.masonry.api" diff --git a/templates/openai-functions-agent-gmail/static/gmail-agent-playground.gif b/templates/openai-functions-agent-gmail/static/gmail-agent-playground.gif deleted file mode 100644 index d6720c37b76..00000000000 Binary files a/templates/openai-functions-agent-gmail/static/gmail-agent-playground.gif and /dev/null differ diff --git a/templates/openai-functions-agent-gmail/tests/__init__.py b/templates/openai-functions-agent-gmail/tests/__init__.py deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/templates/openai-functions-agent/LICENSE b/templates/openai-functions-agent/LICENSE deleted file mode 100644 index 426b6509034..00000000000 --- a/templates/openai-functions-agent/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -MIT License - -Copyright (c) 2023 LangChain, Inc. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/templates/openai-functions-agent/README.md b/templates/openai-functions-agent/README.md deleted file mode 100644 index 8fffdfc204b..00000000000 --- a/templates/openai-functions-agent/README.md +++ /dev/null @@ -1,71 +0,0 @@ -# OpenAI functions - agent - -This template creates an agent that uses `OpenAI function calling` to communicate its decisions on what actions to take. - -This example creates an agent that can optionally look up information on the internet using `Tavily`'s search engine. - -## Environment Setup - -The following environment variables need to be set: - -Set the `OPENAI_API_KEY` environment variable to access the OpenAI models. - -Set the `TAVILY_API_KEY` environment variable to access Tavily. - -## Usage - -To use this package, you should first have the LangChain CLI installed: - -```shell -pip install -U langchain-cli -``` - -To create a new LangChain project and install this as the only package, you can do: - -```shell -langchain app new my-app --package openai-functions-agent -``` - -If you want to add this to an existing project, you can just run: - -```shell -langchain app add openai-functions-agent -``` - -And add the following code to your `server.py` file: -```python -from openai_functions_agent import agent_executor as openai_functions_agent_chain - -add_routes(app, openai_functions_agent_chain, path="/openai-functions-agent") -``` - -(Optional) Let's now configure LangSmith. -LangSmith will help us trace, monitor and debug LangChain applications. -You can sign up for LangSmith [here](https://smith.langchain.com/). -If you don't have access, you can skip this section - -```shell -export LANGCHAIN_TRACING_V2=true -export LANGCHAIN_API_KEY= -export LANGCHAIN_PROJECT= # if not specified, defaults to "default" -``` - -If you are inside this directory, then you can spin up a LangServe instance directly by: - -```shell -langchain serve -``` - -This will start the FastAPI app with a server is running locally at -[http://localhost:8000](http://localhost:8000) - -We can see all templates at [http://127.0.0.1:8000/docs](http://127.0.0.1:8000/docs) -We can access the playground at [http://127.0.0.1:8000/openai-functions-agent/playground](http://127.0.0.1:8000/openai-functions-agent/playground) - -We can access the template from code with: - -```python -from langserve.client import RemoteRunnable - -runnable = RemoteRunnable("http://localhost:8000/openai-functions-agent") -``` \ No newline at end of file diff --git a/templates/openai-functions-agent/main.py b/templates/openai-functions-agent/main.py deleted file mode 100644 index f0e1f5963f9..00000000000 --- a/templates/openai-functions-agent/main.py +++ /dev/null @@ -1,5 +0,0 @@ -from openai_functions_agent.agent import agent_executor - -if __name__ == "__main__": - question = "who won the womens world cup in 2023?" - print(agent_executor.invoke({"input": question, "chat_history": []})) diff --git a/templates/openai-functions-agent/openai_functions_agent/__init__.py b/templates/openai-functions-agent/openai_functions_agent/__init__.py deleted file mode 100644 index 1d97eae2490..00000000000 --- a/templates/openai-functions-agent/openai_functions_agent/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from openai_functions_agent.agent import agent_executor - -__all__ = ["agent_executor"] diff --git a/templates/openai-functions-agent/openai_functions_agent/agent.py b/templates/openai-functions-agent/openai_functions_agent/agent.py deleted file mode 100644 index ad41da7ea39..00000000000 --- a/templates/openai-functions-agent/openai_functions_agent/agent.py +++ /dev/null @@ -1,72 +0,0 @@ -from typing import List, Tuple - -from langchain.agents import AgentExecutor -from langchain.agents.format_scratchpad import format_to_openai_function_messages -from langchain.agents.output_parsers import OpenAIFunctionsAgentOutputParser -from langchain_community.chat_models import ChatOpenAI -from langchain_community.tools.convert_to_openai import format_tool_to_openai_function -from langchain_community.tools.tavily_search import TavilySearchResults -from langchain_community.utilities.tavily_search import TavilySearchAPIWrapper -from langchain_core.messages import AIMessage, HumanMessage -from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder -from langchain_core.pydantic_v1 import BaseModel, Field - -# Create the tool -search = TavilySearchAPIWrapper() -description = """"A search engine optimized for comprehensive, accurate, \ -and trusted results. Useful for when you need to answer questions \ -about current events or about recent information. \ -Input should be a search query. \ -If the user is asking about something that you don't know about, \ -you should probably use this tool to see if that can provide any information.""" -tavily_tool = TavilySearchResults(api_wrapper=search, description=description) - -tools = [tavily_tool] - -llm = ChatOpenAI(temperature=0) -assistant_system_message = """You are a helpful assistant. \ -Use tools (only if necessary) to best answer the users questions.""" -prompt = ChatPromptTemplate.from_messages( - [ - ("system", assistant_system_message), - MessagesPlaceholder(variable_name="chat_history"), - ("user", "{input}"), - MessagesPlaceholder(variable_name="agent_scratchpad"), - ] -) - -llm_with_tools = llm.bind(functions=[format_tool_to_openai_function(t) for t in tools]) - - -def _format_chat_history(chat_history: List[Tuple[str, str]]): - buffer = [] - for human, ai in chat_history: - buffer.append(HumanMessage(content=human)) - buffer.append(AIMessage(content=ai)) - return buffer - - -agent = ( - { - "input": lambda x: x["input"], - "chat_history": lambda x: _format_chat_history(x["chat_history"]), - "agent_scratchpad": lambda x: format_to_openai_function_messages( - x["intermediate_steps"] - ), - } - | prompt - | llm_with_tools - | OpenAIFunctionsAgentOutputParser() -) - - -class AgentInput(BaseModel): - input: str - chat_history: List[Tuple[str, str]] = Field( - ..., extra={"widget": {"type": "chat", "input": "input", "output": "output"}} - ) - - -agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True).with_types( - input_type=AgentInput -) diff --git a/templates/openai-functions-agent/pyproject.toml b/templates/openai-functions-agent/pyproject.toml deleted file mode 100644 index a49bfdfda56..00000000000 --- a/templates/openai-functions-agent/pyproject.toml +++ /dev/null @@ -1,33 +0,0 @@ -[tool.poetry] -name = "openai-functions-agent" -version = "0.1.0" -description = "Agent using OpenAI function calling to execute functions, including search" -authors = [ - "Lance Martin ", -] -readme = "README.md" - -[tool.poetry.dependencies] -python = ">=3.8.1,<4.0" -langchain = "^0.1" -openai = "<2" -tavily-python = "^0.1.9" - -[tool.poetry.group.dev.dependencies] -langchain-cli = ">=0.0.21" - -[tool.langserve] -export_module = "openai_functions_agent" -export_attr = "agent_executor" - -[tool.templates-hub] -use-case = "research" -author = "LangChain" -integrations = ["OpenAI", "Tavily"] -tags = ["search", "agents", "function-calling"] - -[build-system] -requires = [ - "poetry-core", -] -build-backend = "poetry.core.masonry.api" diff --git a/templates/openai-functions-agent/tests/__init__.py b/templates/openai-functions-agent/tests/__init__.py deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/templates/openai-functions-tool-retrieval-agent/.gitignore b/templates/openai-functions-tool-retrieval-agent/.gitignore deleted file mode 100644 index bee8a64b79a..00000000000 --- a/templates/openai-functions-tool-retrieval-agent/.gitignore +++ /dev/null @@ -1 +0,0 @@ -__pycache__ diff --git a/templates/openai-functions-tool-retrieval-agent/LICENSE b/templates/openai-functions-tool-retrieval-agent/LICENSE deleted file mode 100644 index 426b6509034..00000000000 --- a/templates/openai-functions-tool-retrieval-agent/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -MIT License - -Copyright (c) 2023 LangChain, Inc. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/templates/openai-functions-tool-retrieval-agent/README.md b/templates/openai-functions-tool-retrieval-agent/README.md deleted file mode 100644 index 973ecc9478f..00000000000 --- a/templates/openai-functions-tool-retrieval-agent/README.md +++ /dev/null @@ -1,74 +0,0 @@ -# OpenAI functions - tool retrieval agent - -The novel idea introduced in this template is the idea of using retrieval to select the set of tools to use to answer an agent query. This is useful when you have many many tools to select from. You cannot put the description of all the tools in the prompt (because of context length issues) so instead you dynamically select the N tools you do want to consider using at run time. - -In this template we will create a somewhat contrived example. We will have one legitimate tool (search) and then 99 fake tools which are just nonsense. We will then add a step in the prompt template that takes the user input and retrieves tool relevant to the query. - -This template is based on [this Agent How-To](https://python.langchain.com/v0.2/docs/templates/openai-functions-agent/). - -## Environment Setup - -The following environment variables need to be set: - -Set the `OPENAI_API_KEY` environment variable to access the `OpenAI` models. - -Set the `TAVILY_API_KEY` environment variable to access `Tavily`. - -## Usage - -To use this package, you should first have the LangChain CLI installed: - -```shell -pip install -U langchain-cli -``` - -To create a new LangChain project and install this as the only package, you can do: - -```shell -langchain app new my-app --package openai-functions-tool-retrieval-agent -``` - -If you want to add this to an existing project, you can just run: - -```shell -langchain app add openai-functions-tool-retrieval-agent -``` - -And add the following code to your `server.py` file: -```python -from openai_functions_tool_retrieval_agent import agent_executor as openai_functions_tool_retrieval_agent_chain - -add_routes(app, openai_functions_tool_retrieval_agent_chain, path="/openai-functions-tool-retrieval-agent") -``` - -(Optional) Let's now configure LangSmith. -LangSmith will help us trace, monitor and debug LangChain applications. -You can sign up for LangSmith [here](https://smith.langchain.com/). -If you don't have access, you can skip this section - - -```shell -export LANGCHAIN_TRACING_V2=true -export LANGCHAIN_API_KEY= -export LANGCHAIN_PROJECT= # if not specified, defaults to "default" -``` - -If you are inside this directory, then you can spin up a LangServe instance directly by: - -```shell -langchain serve -``` - -This will start the FastAPI app with a server is running locally at -[http://localhost:8000](http://localhost:8000) - -We can see all templates at [http://127.0.0.1:8000/docs](http://127.0.0.1:8000/docs) -We can access the playground at [http://127.0.0.1:8000/openai-functions-tool-retrieval-agent/playground](http://127.0.0.1:8000/openai-functions-tool-retrieval-agent/playground) - -We can access the template from code with: - -```python -from langserve.client import RemoteRunnable - -runnable = RemoteRunnable("http://localhost:8000/openai-functions-tool-retrieval-agent") -``` diff --git a/templates/openai-functions-tool-retrieval-agent/openai_functions_tool_retrieval_agent/__init__.py b/templates/openai-functions-tool-retrieval-agent/openai_functions_tool_retrieval_agent/__init__.py deleted file mode 100644 index 8acc1a30869..00000000000 --- a/templates/openai-functions-tool-retrieval-agent/openai_functions_tool_retrieval_agent/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from openai_functions_tool_retrieval_agent.agent import agent_executor - -__all__ = ["agent_executor"] diff --git a/templates/openai-functions-tool-retrieval-agent/openai_functions_tool_retrieval_agent/agent.py b/templates/openai-functions-tool-retrieval-agent/openai_functions_tool_retrieval_agent/agent.py deleted file mode 100644 index 3d1bbfb8e0e..00000000000 --- a/templates/openai-functions-tool-retrieval-agent/openai_functions_tool_retrieval_agent/agent.py +++ /dev/null @@ -1,122 +0,0 @@ -from typing import Dict, List, Tuple - -from langchain.agents import ( - AgentExecutor, -) -from langchain.agents.format_scratchpad import format_to_openai_functions -from langchain.agents.output_parsers import OpenAIFunctionsAgentOutputParser -from langchain_community.chat_models import ChatOpenAI -from langchain_community.embeddings import OpenAIEmbeddings -from langchain_community.tools.convert_to_openai import format_tool_to_openai_function -from langchain_community.tools.tavily_search import TavilySearchResults -from langchain_community.utilities.tavily_search import TavilySearchAPIWrapper -from langchain_community.vectorstores import FAISS -from langchain_core.documents import Document -from langchain_core.messages import AIMessage, HumanMessage -from langchain_core.prompts import ( - ChatPromptTemplate, - MessagesPlaceholder, -) -from langchain_core.pydantic_v1 import BaseModel, Field -from langchain_core.runnables import Runnable, RunnableLambda, RunnableParallel -from langchain_core.tools import BaseTool, Tool - -# Create the tools -search = TavilySearchAPIWrapper() -description = """"Useful for when you need to answer questions \ -about current events or about recent information.""" -tavily_tool = TavilySearchResults(api_wrapper=search, description=description) - - -def fake_func(inp: str) -> str: - return "foo" - - -fake_tools = [ - Tool( - name=f"foo-{i}", - func=fake_func, - description=("a silly function that gets info " f"about the number {i}"), - ) - for i in range(99) -] -ALL_TOOLS: List[BaseTool] = [tavily_tool] + fake_tools - -# turn tools into documents for indexing -docs = [ - Document(page_content=t.description, metadata={"index": i}) - for i, t in enumerate(ALL_TOOLS) -] - -vector_store = FAISS.from_documents(docs, OpenAIEmbeddings()) - -retriever = vector_store.as_retriever() - - -def get_tools(query: str) -> List[Tool]: - docs = retriever.invoke(query) - return [ALL_TOOLS[d.metadata["index"]] for d in docs] - - -assistant_system_message = """You are a helpful assistant. \ -Use tools (only if necessary) to best answer the users questions.""" -assistant_system_message = """You are a helpful assistant. \ -Use tools (only if necessary) to best answer the users questions.""" -prompt = ChatPromptTemplate.from_messages( - [ - ("system", assistant_system_message), - MessagesPlaceholder(variable_name="chat_history"), - ("user", "{input}"), - MessagesPlaceholder(variable_name="agent_scratchpad"), - ] -) - - -def llm_with_tools(input: Dict) -> Runnable: - return RunnableLambda(lambda x: x["input"]) | ChatOpenAI(temperature=0).bind( - functions=input["functions"] - ) - - -def _format_chat_history(chat_history: List[Tuple[str, str]]): - buffer = [] - for human, ai in chat_history: - buffer.append(HumanMessage(content=human)) - buffer.append(AIMessage(content=ai)) - return buffer - - -agent = ( - RunnableParallel( - { - "input": lambda x: x["input"], - "chat_history": lambda x: _format_chat_history(x["chat_history"]), - "agent_scratchpad": lambda x: format_to_openai_functions( - x["intermediate_steps"] - ), - "functions": lambda x: [ - format_tool_to_openai_function(tool) for tool in get_tools(x["input"]) - ], - } - ) - | { - "input": prompt, - "functions": lambda x: x["functions"], - } - | llm_with_tools - | OpenAIFunctionsAgentOutputParser() -) - -# LLM chain consisting of the LLM and a prompt - - -class AgentInput(BaseModel): - input: str - chat_history: List[Tuple[str, str]] = Field( - ..., extra={"widget": {"type": "chat", "input": "input", "output": "output"}} - ) - - -agent_executor = AgentExecutor(agent=agent, tools=ALL_TOOLS).with_types( - input_type=AgentInput -) diff --git a/templates/openai-functions-tool-retrieval-agent/pyproject.toml b/templates/openai-functions-tool-retrieval-agent/pyproject.toml deleted file mode 100644 index 3c6b2582a2e..00000000000 --- a/templates/openai-functions-tool-retrieval-agent/pyproject.toml +++ /dev/null @@ -1,32 +0,0 @@ -[tool.poetry] -name = "openai-functions-tool-retrieval-agent" -version = "0.0.1" -description = "Use large numbers of tools with tool retrieval strategies" -authors = [] -readme = "README.md" - -[tool.poetry.dependencies] -python = ">=3.8.1,<4.0" -langchain = "^0.1" -openai = "<2" -tiktoken = "^0.5.1" -faiss-cpu = "^1.7.4" - -[tool.poetry.group.dev.dependencies] -langchain-cli = ">=0.0.21" -fastapi = ">=0.104.0,<1" -sse-starlette = "^1.6.5" - -[tool.langserve] -export_module = "openai_functions_tool_retrieval_agent" -export_attr = "agent_executor" - -[tool.templates-hub] -use-case = "agents" -author = "LangChain" -integrations = ["OpenAI", "FAISS"] -tags = ["agents", "function-calling"] - -[build-system] -requires = ["poetry-core"] -build-backend = "poetry.core.masonry.api" diff --git a/templates/openai-functions-tool-retrieval-agent/tests/__init__.py b/templates/openai-functions-tool-retrieval-agent/tests/__init__.py deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/templates/pii-protected-chatbot/LICENSE b/templates/pii-protected-chatbot/LICENSE deleted file mode 100644 index 426b6509034..00000000000 --- a/templates/pii-protected-chatbot/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -MIT License - -Copyright (c) 2023 LangChain, Inc. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/templates/pii-protected-chatbot/README.md b/templates/pii-protected-chatbot/README.md deleted file mode 100644 index 5873715bc50..00000000000 --- a/templates/pii-protected-chatbot/README.md +++ /dev/null @@ -1,71 +0,0 @@ -# PII-protected chatbot - -This template creates a chatbot that flags any incoming -`Personal Identification Information` (`PII`) and doesn't pass it to the LLM. - -It uses the [Microsoft Presidio](https://microsoft.github.io/presidio/), -the Data Protection and De-identification SDK. - -## Environment Setup - -The following environment variables need to be set: - -Set the `OPENAI_API_KEY` environment variable to access the OpenAI models. - -## Usage - -To use this package, you should first have the LangChain CLI installed: - -```shell -pip install -U "langchain-cli[serve]" -``` - -To create a new LangChain project and install this as the only package, you can do: - -```shell -langchain app new my-app --package pii-protected-chatbot -``` - -If you want to add this to an existing project, you can just run: - -```shell -langchain app add pii-protected-chatbot -``` - -And add the following code to your `server.py` file: -```python -from pii_protected_chatbot.chain import chain as pii_protected_chatbot - -add_routes(app, pii_protected_chatbot, path="/openai-functions-agent") -``` - -(Optional) Let's now configure LangSmith. -LangSmith will help us trace, monitor and debug LangChain applications. -You can sign up for LangSmith [here](https://smith.langchain.com/). -If you don't have access, you can skip this section - -```shell -export LANGCHAIN_TRACING_V2=true -export LANGCHAIN_API_KEY= -export LANGCHAIN_PROJECT= # if not specified, defaults to "default" -``` - -If you are inside this directory, then you can spin up a LangServe instance directly by: - -```shell -langchain serve -``` - -This will start the FastAPI app with a server is running locally at -[http://localhost:8000](http://localhost:8000) - -We can see all templates at [http://127.0.0.1:8000/docs](http://127.0.0.1:8000/docs) -We can access the playground at [http://127.0.0.1:8000/pii_protected_chatbot/playground](http://127.0.0.1:8000/pii_protected_chatbot/playground) - -We can access the template from code with: - -```python -from langserve.client import RemoteRunnable - -runnable = RemoteRunnable("http://localhost:8000/pii_protected_chatbot") -``` \ No newline at end of file diff --git a/templates/pii-protected-chatbot/pii_protected_chatbot/__init__.py b/templates/pii-protected-chatbot/pii_protected_chatbot/__init__.py deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/templates/pii-protected-chatbot/pii_protected_chatbot/chain.py b/templates/pii-protected-chatbot/pii_protected_chatbot/chain.py deleted file mode 100644 index 48fb4a3387c..00000000000 --- a/templates/pii-protected-chatbot/pii_protected_chatbot/chain.py +++ /dev/null @@ -1,85 +0,0 @@ -from typing import List, Tuple - -from langchain_community.chat_models import ChatOpenAI -from langchain_core.messages import AIMessage, HumanMessage -from langchain_core.output_parsers import StrOutputParser -from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder -from langchain_core.pydantic_v1 import BaseModel -from langchain_core.runnables import RunnablePassthrough -from presidio_analyzer import AnalyzerEngine - - -# Formatting for chat history -def _format_chat_history(chat_history: List[Tuple[str, str]]): - buffer = [] - for human, ai in chat_history: - buffer.append(HumanMessage(content=human)) - buffer.append(AIMessage(content=ai)) - return buffer - - -# Prompt we will use -_prompt = ChatPromptTemplate.from_messages( - [ - ( - "system", - "You are a helpful assistant who speaks like a pirate", - ), - MessagesPlaceholder(variable_name="chat_history"), - ("human", "{text}"), - ] -) - -# Model we will use -_model = ChatOpenAI() - -# Standard conversation chain. -chat_chain = ( - { - "chat_history": lambda x: _format_chat_history(x["chat_history"]), - "text": lambda x: x["text"], - } - | _prompt - | _model - | StrOutputParser() -) - -# PII Detection logic -analyzer = AnalyzerEngine() - - -# You can customize this to detect any PII -def _detect_pii(inputs: dict) -> bool: - analyzer_results = analyzer.analyze(text=inputs["text"], language="en") - return bool(analyzer_results) - - -# Add logic to route on whether PII has been detected -def _route_on_pii(inputs: dict): - if inputs["pii_detected"]: - # Response if PII is detected - return "Sorry, I can't answer questions that involve PII" - else: - return chat_chain - - -# Final chain -chain = RunnablePassthrough.assign( - # First detect PII - pii_detected=_detect_pii -) | { - # Then use this information to generate the response - "response": _route_on_pii, - # Return boolean of whether PII is detected so client can decided - # whether or not to include in chat history - "pii_detected": lambda x: x["pii_detected"], -} - - -# Add typing for playground -class ChainInput(BaseModel): - text: str - chat_history: List[Tuple[str, str]] - - -chain = chain.with_types(input_type=ChainInput) diff --git a/templates/pii-protected-chatbot/pyproject.toml b/templates/pii-protected-chatbot/pyproject.toml deleted file mode 100644 index 15fdd1447b2..00000000000 --- a/templates/pii-protected-chatbot/pyproject.toml +++ /dev/null @@ -1,31 +0,0 @@ -[tool.poetry] -name = "pii_protected_chatbot" -version = "0.0.1" -description = "Flag PII before passing it to the LLM" -authors = [] -readme = "README.md" - -[tool.poetry.dependencies] -python = ">=3.8.1,<4.0" -langchain = "^0.1" -openai = "<2" -presidio-analyzer = "^2.2.350" - -[tool.poetry.group.dev.dependencies] -langchain-cli = ">=0.0.21" -fastapi = ">=0.104.0,<1" -sse-starlette = "^1.6.5" - -[tool.langserve] -export_module = "pii_protected_chatbot.chain" -export_attr = "chain" - -[tool.templates-hub] -use-case = "chatbot" -author = "LangChain" -integrations = ["OpenAI", "Microsoft Presidio"] -tags = ["data", "redaction"] - -[build-system] -requires = ["poetry-core"] -build-backend = "poetry.core.masonry.api" diff --git a/templates/pii-protected-chatbot/tests/__init__.py b/templates/pii-protected-chatbot/tests/__init__.py deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/templates/pirate-speak-configurable/.gitignore b/templates/pirate-speak-configurable/.gitignore deleted file mode 100644 index bee8a64b79a..00000000000 --- a/templates/pirate-speak-configurable/.gitignore +++ /dev/null @@ -1 +0,0 @@ -__pycache__ diff --git a/templates/pirate-speak-configurable/LICENSE b/templates/pirate-speak-configurable/LICENSE deleted file mode 100644 index 426b6509034..00000000000 --- a/templates/pirate-speak-configurable/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -MIT License - -Copyright (c) 2023 LangChain, Inc. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/templates/pirate-speak-configurable/README.md b/templates/pirate-speak-configurable/README.md deleted file mode 100644 index 4f6f9b52439..00000000000 --- a/templates/pirate-speak-configurable/README.md +++ /dev/null @@ -1,73 +0,0 @@ -# Pirate speak configurable - -This template converts user input into pirate speak. It shows how you can allow -`configurable_alternatives` in the Runnable, allowing you to select from -OpenAI, Anthropic, or Cohere as your LLM Provider in the playground (or via API). - -## Environment Setup - -Set the following environment variables to access all 3 configurable alternative -model providers: - -- `OPENAI_API_KEY` -- `ANTHROPIC_API_KEY` -- `COHERE_API_KEY` - -## Usage - -To use this package, you should first have the LangChain CLI installed: - -```shell -pip install -U langchain-cli -``` - -To create a new LangChain project and install this as the only package, you can do: - -```shell -langchain app new my-app --package pirate-speak-configurable -``` - -If you want to add this to an existing project, you can just run: - -```shell -langchain app add pirate-speak-configurable -``` - -And add the following code to your `server.py` file: -```python -from pirate_speak_configurable import chain as pirate_speak_configurable_chain - -add_routes(app, pirate_speak_configurable_chain, path="/pirate-speak-configurable") -``` - -(Optional) Let's now configure LangSmith. -LangSmith will help us trace, monitor and debug LangChain applications. -You can sign up for LangSmith [here](https://smith.langchain.com/). -If you don't have access, you can skip this section - - -```shell -export LANGCHAIN_TRACING_V2=true -export LANGCHAIN_API_KEY= -export LANGCHAIN_PROJECT= # if not specified, defaults to "default" -``` - -If you are inside this directory, then you can spin up a LangServe instance directly by: - -```shell -langchain serve -``` - -This will start the FastAPI app with a server is running locally at -[http://localhost:8000](http://localhost:8000) - -We can see all templates at [http://127.0.0.1:8000/docs](http://127.0.0.1:8000/docs) -We can access the playground at [http://127.0.0.1:8000/pirate-speak-configurable/playground](http://127.0.0.1:8000/pirate-speak-configurable/playground) - -We can access the template from code with: - -```python -from langserve.client import RemoteRunnable - -runnable = RemoteRunnable("http://localhost:8000/pirate-speak-configurable") -``` \ No newline at end of file diff --git a/templates/pirate-speak-configurable/pirate_speak_configurable/__init__.py b/templates/pirate-speak-configurable/pirate_speak_configurable/__init__.py deleted file mode 100644 index a5bfbc75e6c..00000000000 --- a/templates/pirate-speak-configurable/pirate_speak_configurable/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from pirate_speak_configurable.chain import chain - -__all__ = ["chain"] diff --git a/templates/pirate-speak-configurable/pirate_speak_configurable/chain.py b/templates/pirate-speak-configurable/pirate_speak_configurable/chain.py deleted file mode 100644 index ab9c02e8016..00000000000 --- a/templates/pirate-speak-configurable/pirate_speak_configurable/chain.py +++ /dev/null @@ -1,23 +0,0 @@ -from langchain_community.chat_models import ChatAnthropic, ChatCohere, ChatOpenAI -from langchain_core.prompts import ChatPromptTemplate -from langchain_core.runnables import ConfigurableField - -_prompt = ChatPromptTemplate.from_messages( - [ - ( - "system", - "Translate user input into pirate speak", - ), - ("human", "{text}"), - ] -) -_model = ChatOpenAI().configurable_alternatives( - ConfigurableField(id="llm_provider"), - default_key="openai", - anthropic=ChatAnthropic, - cohere=ChatCohere, -) - -# if you update this, you MUST also update ../pyproject.toml -# with the new `tool.langserve.export_attr` -chain = _prompt | _model diff --git a/templates/pirate-speak-configurable/pyproject.toml b/templates/pirate-speak-configurable/pyproject.toml deleted file mode 100644 index 902051cef41..00000000000 --- a/templates/pirate-speak-configurable/pyproject.toml +++ /dev/null @@ -1,32 +0,0 @@ -[tool.poetry] -name = "pirate-speak-configurable" -version = "0.0.1" -description = "Use Configurable Alternatives to allow clients to choose their Runnables" -authors = [] -readme = "README.md" - -[tool.poetry.dependencies] -python = ">=3.8.1,<4.0" -langchain = "^0.1" -openai = "<2" -anthropic = "^0.6.0" -cohere = "^4.34" - -[tool.poetry.group.dev.dependencies] -langchain-cli = ">=0.0.21" -fastapi = ">=0.104.0,<1" -sse-starlette = "^1.6.5" - -[tool.langserve] -export_module = "pirate_speak_configurable" -export_attr = "chain" - -[tool.templates-hub] -use-case = "chatbot" -author = "LangChain" -integrations = ["Cohere", "Anthropic", "OpenAI"] -tags = ["configurable"] - -[build-system] -requires = ["poetry-core"] -build-backend = "poetry.core.masonry.api" diff --git a/templates/pirate-speak-configurable/tests/__init__.py b/templates/pirate-speak-configurable/tests/__init__.py deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/templates/pirate-speak/LICENSE b/templates/pirate-speak/LICENSE deleted file mode 100644 index 426b6509034..00000000000 --- a/templates/pirate-speak/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -MIT License - -Copyright (c) 2023 LangChain, Inc. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/templates/pirate-speak/README.md b/templates/pirate-speak/README.md deleted file mode 100644 index 6738927a6c0..00000000000 --- a/templates/pirate-speak/README.md +++ /dev/null @@ -1,66 +0,0 @@ -# Pirate speak - -This template converts user input into `pirate speak`. - -## Environment Setup - -Set the `OPENAI_API_KEY` environment variable to access the OpenAI models. - -## Usage - -To use this package, you should first have the LangChain CLI installed: - -```shell -pip install -U langchain-cli -``` - -To create a new LangChain project and install this as the only package, you can do: - -```shell -langchain app new my-app --package pirate-speak -``` - -If you want to add this to an existing project, you can just run: - -```shell -langchain app add pirate-speak -``` - -And add the following code to your `server.py` file: -```python -from pirate_speak.chain import chain as pirate_speak_chain - -add_routes(app, pirate_speak_chain, path="/pirate-speak") -``` - -(Optional) Let's now configure LangSmith. -LangSmith will help us trace, monitor and debug LangChain applications. -You can sign up for LangSmith [here](https://smith.langchain.com/). -If you don't have access, you can skip this section - - -```shell -export LANGCHAIN_TRACING_V2=true -export LANGCHAIN_API_KEY= -export LANGCHAIN_PROJECT= # if not specified, defaults to "default" -``` - -If you are inside this directory, then you can spin up a LangServe instance directly by: - -```shell -langchain serve -``` - -This will start the FastAPI app with a server is running locally at -[http://localhost:8000](http://localhost:8000) - -We can see all templates at [http://127.0.0.1:8000/docs](http://127.0.0.1:8000/docs) -We can access the playground at [http://127.0.0.1:8000/pirate-speak/playground](http://127.0.0.1:8000/pirate-speak/playground) - -We can access the template from code with: - -```python -from langserve.client import RemoteRunnable - -runnable = RemoteRunnable("http://localhost:8000/pirate-speak") -``` diff --git a/templates/pirate-speak/pirate_speak/__init__.py b/templates/pirate-speak/pirate_speak/__init__.py deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/templates/pirate-speak/pirate_speak/chain.py b/templates/pirate-speak/pirate_speak/chain.py deleted file mode 100644 index 2afd98d9547..00000000000 --- a/templates/pirate-speak/pirate_speak/chain.py +++ /dev/null @@ -1,18 +0,0 @@ -from langchain_community.chat_models import ChatOpenAI -from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder - -_prompt = ChatPromptTemplate.from_messages( - [ - ( - "system", - "Translate user input into pirate speak", - ), - MessagesPlaceholder("chat_history"), - ("human", "{text}"), - ] -) -_model = ChatOpenAI() - -# if you update this, you MUST also update ../pyproject.toml -# with the new `tool.langserve.export_attr` -chain = _prompt | _model diff --git a/templates/pirate-speak/pyproject.toml b/templates/pirate-speak/pyproject.toml deleted file mode 100644 index 8eeba98de26..00000000000 --- a/templates/pirate-speak/pyproject.toml +++ /dev/null @@ -1,31 +0,0 @@ -[tool.poetry] -name = "pirate-speak" -version = "0.0.1" -description = "Get started with a simple template that speaks like a pirate" -authors = [] -readme = "README.md" - -[tool.poetry.dependencies] -python = ">=3.8.1,<4.0" -openai = "<2" -langchain-community = ">=0.0.7,<0.2" -langchain-core = ">=0.1.4,<0.2" - -[tool.poetry.group.dev.dependencies] -langchain-cli = ">=0.0.21" -fastapi = ">=0.104.0,<1" -sse-starlette = "^1.6.5" - -[tool.langserve] -export_module = "pirate_speak.chain" -export_attr = "chain" - -[tool.templates-hub] -use-case = "chatbot" -author = "LangChain" -integrations = ["OpenAI"] -tags = ["getting-started"] - -[build-system] -requires = ["poetry-core"] -build-backend = "poetry.core.masonry.api" diff --git a/templates/pirate-speak/tests/__init__.py b/templates/pirate-speak/tests/__init__.py deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/templates/plate-chain/LICENSE b/templates/plate-chain/LICENSE deleted file mode 100644 index 5bd9633b12a..00000000000 --- a/templates/plate-chain/LICENSE +++ /dev/null @@ -1,13 +0,0 @@ -Copyright 2023 SphinxBio, Inc. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. diff --git a/templates/plate-chain/README.md b/templates/plate-chain/README.md deleted file mode 100644 index bfe3d1b9edb..00000000000 --- a/templates/plate-chain/README.md +++ /dev/null @@ -1,67 +0,0 @@ -# Plate chain - -This template enables parsing of data from `laboratory plates`. - -In the context of biochemistry or molecular biology, laboratory plates are commonly used tools to hold samples in a grid-like format. - -This can parse the resulting data into standardized (e.g., `JSON`) format for further processing. - -## Environment Setup - -Set the `OPENAI_API_KEY` environment variable to access the OpenAI models. - -## Usage - -To utilize plate-chain, you must have the LangChain CLI installed: - -```shell -pip install -U langchain-cli -``` - -Creating a new LangChain project and installing plate-chain as the only package can be done with: - -```shell -langchain app new my-app --package plate-chain -``` - -If you wish to add this to an existing project, simply run: - -```shell -langchain app add plate-chain -``` - -Then add the following code to your `server.py` file: - -```python -from plate_chain import chain as plate_chain - -add_routes(app, plate_chain, path="/plate-chain") -``` - -(Optional) For configuring LangSmith, which helps trace, monitor and debug LangChain applications, use the following code: - -```shell -export LANGCHAIN_TRACING_V2=true -export LANGCHAIN_API_KEY= -export LANGCHAIN_PROJECT= # if not specified, defaults to "default" -``` - -If you're in this directory, you can start a LangServe instance directly by: - -```shell -langchain serve -``` - -This starts the FastAPI app with a server running locally at -[http://localhost:8000](http://localhost:8000) - -All templates can be viewed at [http://127.0.0.1:8000/docs](http://127.0.0.1:8000/docs) -Access the playground at [http://127.0.0.1:8000/plate-chain/playground](http://127.0.0.1:8000/plate-chain/playground) - -You can access the template from code with: - -```python -from langserve.client import RemoteRunnable - -runnable = RemoteRunnable("http://localhost:8000/plate-chain") -``` \ No newline at end of file diff --git a/templates/plate-chain/examples/byonoy_absolute_example.csv b/templates/plate-chain/examples/byonoy_absolute_example.csv deleted file mode 100644 index 1e73c8e63e7..00000000000 --- a/templates/plate-chain/examples/byonoy_absolute_example.csv +++ /dev/null @@ -1,34 +0,0 @@ -,1,2,3,4,5,6,7,8,9,10,11,12 -A,0.065,0.063,0.061,0.070,0.061,0.060,0.065,0.064,0.063,0.063,0.059,0.073 -B,0.188,0.164,0.213,0.219,0.161,0.176,0.184,0.172,0.176,0.164,0.181,0.173 -C,0.207,0.190,0.218,0.190,0.211,0.182,0.215,0.211,0.187,0.184,0.208,0.171 -D,0.076,0.081,0.093,0.071,0.081,0.075,0.106,0.071,0.073,0.084,0.076,0.081 -E,0.076,0.069,0.082,0.099,0.094,0.072,0.086,0.064,0.070,0.067,0.068,0.074 -F,0.080,0.067,0.077,0.067,0.068,0.066,0.069,0.074,0.068,0.078,0.065,0.066 -G,0.061,0.076,0.063,0.069,0.083,0.074,0.071,0.067,0.066,0.067,0.067,0.068 -H,0.080,0.090,0.074,0.066,0.074,0.075,0.076,0.079,0.071,0.066,0.063,0.069 - - - -,1,2,3,4,5,6,7,8,9,10,11,12 -A,Sample 1,Sample 2,Sample 3,Sample 4,Sample 5,Sample 6,Sample 7,Sample 8,Sample 9,Sample 10,Sample 11,Sample 12 -B,Sample 13,Sample 14,Sample 15,Sample 16,Sample 17,Sample 18,Sample 19,Sample 20,Sample 21,Sample 22,Sample 23,Sample 24 -C,Sample 25,Sample 26,Sample 27,Sample 28,Sample 29,Sample 30,Sample 31,Sample 32,Sample 33,Sample 34,Sample 35,Sample 36 -D,Sample 37,Sample 38,Sample 39,Sample 40,Sample 41,Sample 42,Sample 43,Sample 44,Sample 45,Sample 46,Sample 47,Sample 48 -E,Sample 49,Sample 50,Sample 51,Sample 52,Sample 53,Sample 54,Sample 55,Sample 56,Sample 57,Sample 58,Sample 59,Sample 60 -F,Sample 61,Sample 62,Sample 63,Sample 64,Sample 65,Sample 66,Sample 67,Sample 68,Sample 69,Sample 70,Sample 71,Sample 72 -G,Sample 73,Sample 74,Sample 75,Sample 76,Sample 77,Sample 78,Sample 79,Sample 80,Sample 81,Sample 82,Sample 83,Sample 84 -H,Sample 85,Sample 86,Sample 87,Sample 88,Sample 89,Sample 90,Sample 91,Sample 92,Sample 93,Sample 94,Sample 95,Sample 96 - - - -Sample Wavelength (nm),540 -Reference Wavelength (nm),-1 - - - -Smart Validation,Pass, -Plate Well Assignment,Pass,Measured values of assigned wells indicate correct plate layout. -Plate Orientation,, -Replicates,, -Standards,, \ No newline at end of file diff --git a/templates/plate-chain/plate_chain/__init__.py b/templates/plate-chain/plate_chain/__init__.py deleted file mode 100644 index 9506bcbd9a5..00000000000 --- a/templates/plate-chain/plate_chain/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from plate_chain.chain import chain - -__all__ = ["chain"] diff --git a/templates/plate-chain/plate_chain/chain.py b/templates/plate-chain/plate_chain/chain.py deleted file mode 100644 index 007f9ed7c26..00000000000 --- a/templates/plate-chain/plate_chain/chain.py +++ /dev/null @@ -1,89 +0,0 @@ -import base64 -import json - -from langchain_community.chat_models import ChatOpenAI -from langchain_core.output_parsers import StrOutputParser -from langchain_core.prompts import ChatPromptTemplate, SystemMessagePromptTemplate -from langchain_core.pydantic_v1 import Field -from langserve import CustomUserType - -from .prompts import ( - AI_REPONSE_DICT, - FULL_PROMPT, - USER_EXAMPLE_DICT, - create_prompt, -) -from .utils import parse_llm_output - -llm = ChatOpenAI(temperature=0, model="gpt-4") - -prompt = ChatPromptTemplate.from_messages( - [ - SystemMessagePromptTemplate.from_template(FULL_PROMPT), - ("human", "{user_example}"), - ("ai", "{ai_response}"), - ("human", "{input}"), - ], -) - - -# ATTENTION: Inherit from CustomUserType instead of BaseModel otherwise -# the server will decode it into a dict instead of a pydantic model. -class FileProcessingRequest(CustomUserType): - """Request including a base64 encoded file.""" - - # The extra field is used to specify a widget for the playground UI. - file: str = Field(..., extra={"widget": {"type": "base64file"}}) - num_plates: int = None - num_rows: int = 8 - num_cols: int = 12 - - -def _load_file(request: FileProcessingRequest): - return base64.b64decode(request.file.encode("utf-8")).decode("utf-8") - - -def _load_prompt(request: FileProcessingRequest): - return create_prompt( - num_plates=request.num_plates, - num_rows=request.num_rows, - num_cols=request.num_cols, - ) - - -def _get_col_range_str(request: FileProcessingRequest): - if request.num_cols: - return f"from 1 to {request.num_cols}" - else: - return "" - - -def _get_json_format(request: FileProcessingRequest): - return json.dumps( - [ - { - "row_start": 12, - "row_end": 12 + request.num_rows - 1, - "col_start": 1, - "col_end": 1 + request.num_cols - 1, - "contents": "Entity ID", - } - ] - ) - - -chain = ( - { - # Should add validation to ensure numeric indices - "input": _load_file, - "hint": _load_prompt, - "col_range_str": _get_col_range_str, - "json_format": _get_json_format, - "user_example": lambda x: USER_EXAMPLE_DICT[x.num_rows * x.num_cols], - "ai_response": lambda x: AI_REPONSE_DICT[x.num_rows * x.num_cols], - } - | prompt - | llm - | StrOutputParser() - | parse_llm_output -).with_types(input_type=FileProcessingRequest) diff --git a/templates/plate-chain/plate_chain/prompts.py b/templates/plate-chain/plate_chain/prompts.py deleted file mode 100644 index 9b8eb7380cc..00000000000 --- a/templates/plate-chain/plate_chain/prompts.py +++ /dev/null @@ -1,76 +0,0 @@ -from typing import Optional - -FULL_PROMPT = """# Context -- Plate-based data is rectangular and could be situated anywhere within the dataset. -- The first item in every row is the row index -{hint} - -# Rules -- Ignore all data which is not part of the plate. -- Row identifiers start with a single letter of the alphabet. -- The header row of the plate has monotonically increasing integers {col_range_str}. -- The header row should NOT be considered the starting row of the plate. - -# Output -- Use 0-indexing for row and column numbers. -- Do NOT include the header row or header column in the output calcuation. -- Produce your output as JSON. ONLY RETURN THE JSON. The format should be: -```json -{json_format} -``` -""" - -NUM_PLATES_PROMPT = """- There {num_plates_str} in this data.""" -ROWS_PROMPT = """- Each plate has {num_rows} rows.""" -COLS_PROMPT = """- Each plate has {num_cols} columns.""" -GENERIC_PLATES_PROMPT = """ -- There may be multiple plates. -- Plate consist of 24 (4x6), 96 (8x12), 384 (16x24), or 1536 (32 x 48) wells. -""" - -HUMAN_24W_PROMPT = "0,,,,,,,\n1,,1,2,3,4,5,6\n2,A,SB-001,SB-002,SB-003,SB-004,SB-005,SB-006\n3,B,SB-001,SB-002,SB-003,SB-004,SB-005,SB-006\n4,C,SB-001,SB-002,SB-003,SB-004,SB-005,SB-006\n5,D,SB-001,SB-002,SB-003,SB-004,SB-005,SB-006\n" # noqa: E501 -AI_24W_RESPONSE = '[{"row_start": 2, "row_end": 5, "col_start": 1, "col_end": 6, "contents": "SB_ID"}]' # noqa: E501 - -HUMAN_96W_PROMPT = "0,,,,,,,,,,,,,\n1,,1,2,3,4,5,6,7,8,9,10,11,12\n2,A,SB-001,SB-002,SB-003,SB-004,SB-005,SB-006,SB-007,SB-008,SB-009,SB-010,SB-011,SB-012\n3,B,SB-001,SB-002,SB-003,SB-004,SB-005,SB-006,SB-007,SB-008,SB-009,SB-010,SB-011,SB-012\n4,C,SB-001,SB-002,SB-003,SB-004,SB-005,SB-006,SB-007,SB-008,SB-009,SB-010,SB-011,SB-012\n5,D,SB-001,SB-002,SB-003,SB-004,SB-005,SB-006,SB-007,SB-008,SB-009,SB-010,SB-011,SB-012\n6,E,SB-001,SB-002,SB-003,SB-004,SB-005,SB-006,SB-007,SB-008,SB-009,SB-010,SB-011,SB-012\n7,F,SB-001,SB-002,SB-003,SB-004,SB-005,SB-006,SB-007,SB-008,SB-009,SB-010,SB-011,SB-012\n8,G,SB-001,SB-002,SB-003,SB-004,SB-005,SB-006,SB-007,SB-008,SB-009,SB-010,SB-011,SB-012\n9,H,SB-001,SB-002,SB-003,SB-004,SB-005,SB-006,SB-007,SB-008,SB-009,SB-010,SB-011,SB-012\n" # noqa: E501 -AI_96W_RESPONSE = '[{"row_start": 2, "row_end": 9, "col_start": 1, "col_end": 12, "contents": "SB_ID"}]' # noqa: E501 - -HUMAN_384W_PROMPT = "0,,,,,,,,,,,,,,,,,,,,,,,,,\n1,,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24\n2,A,SB-001,SB-002,SB-003,SB-004,SB-005,SB-006,SB-007,SB-008,SB-009,SB-010,SB-011,SB-012,SB-013,SB-014,SB-015,SB-016,SB-017,SB-018,SB-019,SB-020,SB-021,SB-022,SB-023,SB-024\n3,B,SB-001,SB-002,SB-003,SB-004,SB-005,SB-006,SB-007,SB-008,SB-009,SB-010,SB-011,SB-012,SB-013,SB-014,SB-015,SB-016,SB-017,SB-018,SB-019,SB-020,SB-021,SB-022,SB-023,SB-024\n4,C,SB-001,SB-002,SB-003,SB-004,SB-005,SB-006,SB-007,SB-008,SB-009,SB-010,SB-011,SB-012,SB-013,SB-014,SB-015,SB-016,SB-017,SB-018,SB-019,SB-020,SB-021,SB-022,SB-023,SB-024\n5,D,SB-001,SB-002,SB-003,SB-004,SB-005,SB-006,SB-007,SB-008,SB-009,SB-010,SB-011,SB-012,SB-013,SB-014,SB-015,SB-016,SB-017,SB-018,SB-019,SB-020,SB-021,SB-022,SB-023,SB-024\n6,E,SB-001,SB-002,SB-003,SB-004,SB-005,SB-006,SB-007,SB-008,SB-009,SB-010,SB-011,SB-012,SB-013,SB-014,SB-015,SB-016,SB-017,SB-018,SB-019,SB-020,SB-021,SB-022,SB-023,SB-024\n7,F,SB-001,SB-002,SB-003,SB-004,SB-005,SB-006,SB-007,SB-008,SB-009,SB-010,SB-011,SB-012,SB-013,SB-014,SB-015,SB-016,SB-017,SB-018,SB-019,SB-020,SB-021,SB-022,SB-023,SB-024\n8,G,SB-001,SB-002,SB-003,SB-004,SB-005,SB-006,SB-007,SB-008,SB-009,SB-010,SB-011,SB-012,SB-013,SB-014,SB-015,SB-016,SB-017,SB-018,SB-019,SB-020,SB-021,SB-022,SB-023,SB-024\n9,H,SB-001,SB-002,SB-003,SB-004,SB-005,SB-006,SB-007,SB-008,SB-009,SB-010,SB-011,SB-012,SB-013,SB-014,SB-015,SB-016,SB-017,SB-018,SB-019,SB-020,SB-021,SB-022,SB-023,SB-024\n10,I,SB-001,SB-002,SB-003,SB-004,SB-005,SB-006,SB-007,SB-008,SB-009,SB-010,SB-011,SB-012,SB-013,SB-014,SB-015,SB-016,SB-017,SB-018,SB-019,SB-020,SB-021,SB-022,SB-023,SB-024\n11,J,SB-001,SB-002,SB-003,SB-004,SB-005,SB-006,SB-007,SB-008,SB-009,SB-010,SB-011,SB-012,SB-013,SB-014,SB-015,SB-016,SB-017,SB-018,SB-019,SB-020,SB-021,SB-022,SB-023,SB-024\n12,K,SB-001,SB-002,SB-003,SB-004,SB-005,SB-006,SB-007,SB-008,SB-009,SB-010,SB-011,SB-012,SB-013,SB-014,SB-015,SB-016,SB-017,SB-018,SB-019,SB-020,SB-021,SB-022,SB-023,SB-024\n13,L,SB-001,SB-002,SB-003,SB-004,SB-005,SB-006,SB-007,SB-008,SB-009,SB-010,SB-011,SB-012,SB-013,SB-014,SB-015,SB-016,SB-017,SB-018,SB-019,SB-020,SB-021,SB-022,SB-023,SB-024\n14,M,SB-001,SB-002,SB-003,SB-004,SB-005,SB-006,SB-007,SB-008,SB-009,SB-010,SB-011,SB-012,SB-013,SB-014,SB-015,SB-016,SB-017,SB-018,SB-019,SB-020,SB-021,SB-022,SB-023,SB-024\n15,N,SB-001,SB-002,SB-003,SB-004,SB-005,SB-006,SB-007,SB-008,SB-009,SB-010,SB-011,SB-012,SB-013,SB-014,SB-015,SB-016,SB-017,SB-018,SB-019,SB-020,SB-021,SB-022,SB-023,SB-024\n" # noqa: E501 -# should be 15, 23 -AI_384W_RESPONSE = '[{"row_start": 2, "row_end": 17, "col_start": 1, "col_end": 24, "contents": "SB_ID"}]' # noqa: E501 - -HUMAN_1536W_PROMPT = "0,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,\n1,,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48\n2,A,SB-001,SB-002,SB-003,SB-004,SB-005,SB-006,SB-007,SB-008,SB-009,SB-010,SB-011,SB-012,SB-013,SB-014,SB-015,SB-016,SB-017,SB-018,SB-019,SB-020,SB-021,SB-022,SB-023,SB-024,SB-025,SB-026,SB-027,SB-028,SB-029,SB-030,SB-031,SB-032,SB-033,SB-034,SB-035,SB-036,SB-037,SB-038,SB-039,SB-040,SB-041,SB-042,SB-043,SB-044,SB-045,SB-046,SB-047,SB-048\n3,B,SB-001,SB-002,SB-003,SB-004,SB-005,SB-006,SB-007,SB-008,SB-009,SB-010,SB-011,SB-012,SB-013,SB-014,SB-015,SB-016,SB-017,SB-018,SB-019,SB-020,SB-021,SB-022,SB-023,SB-024,SB-025,SB-026,SB-027,SB-028,SB-029,SB-030,SB-031,SB-032,SB-033,SB-034,SB-035,SB-036,SB-037,SB-038,SB-039,SB-040,SB-041,SB-042,SB-043,SB-044,SB-045,SB-046,SB-047,SB-048\n4,C,SB-001,SB-002,SB-003,SB-004,SB-005,SB-006,SB-007,SB-008,SB-009,SB-010,SB-011,SB-012,SB-013,SB-014,SB-015,SB-016,SB-017,SB-018,SB-019,SB-020,SB-021,SB-022,SB-023,SB-024,SB-025,SB-026,SB-027,SB-028,SB-029,SB-030,SB-031,SB-032,SB-033,SB-034,SB-035,SB-036,SB-037,SB-038,SB-039,SB-040,SB-041,SB-042,SB-043,SB-044,SB-045,SB-046,SB-047,SB-048\n5,D,SB-001,SB-002,SB-003,SB-004,SB-005,SB-006,SB-007,SB-008,SB-009,SB-010,SB-011,SB-012,SB-013,SB-014,SB-015,SB-016,SB-017,SB-018,SB-019,SB-020,SB-021,SB-022,SB-023,SB-024,SB-025,SB-026,SB-027,SB-028,SB-029,SB-030,SB-031,SB-032,SB-033,SB-034,SB-035,SB-036,SB-037,SB-038,SB-039,SB-040,SB-041,SB-042,SB-043,SB-044,SB-045,SB-046,SB-047,SB-048\n6,E,SB-001,SB-002,SB-003,SB-004,SB-005,SB-006,SB-007,SB-008,SB-009,SB-010,SB-011,SB-012,SB-013,SB-014,SB-015,SB-016,SB-017,SB-018,SB-019,SB-020,SB-021,SB-022,SB-023,SB-024,SB-025,SB-026,SB-027,SB-028,SB-029,SB-030,SB-031,SB-032,SB-033,SB-034,SB-035,SB-036,SB-037,SB-038,SB-039,SB-040,SB-041,SB-042,SB-043,SB-044,SB-045,SB-046,SB-047,SB-048\n7,F,SB-001,SB-002,SB-003,SB-004,SB-005,SB-006,SB-007,SB-008,SB-009,SB-010,SB-011,SB-012,SB-013,SB-014,SB-015,SB-016,SB-017,SB-018,SB-019,SB-020,SB-021,SB-022,SB-023,SB-024,SB-025,SB-026,SB-027,SB-028,SB-029,SB-030,SB-031,SB-032,SB-033,SB-034,SB-035,SB-036,SB-037,SB-038,SB-039,SB-040,SB-041,SB-042,SB-043,SB-044,SB-045,SB-046,SB-047,SB-048\n8,G,SB-001,SB-002,SB-003,SB-004,SB-005,SB-006,SB-007,SB-008,SB-009,SB-010,SB-011,SB-012,SB-013,SB-014,SB-015,SB-016,SB-017,SB-018,SB-019,SB-020,SB-021,SB-022,SB-023,SB-024,SB-025,SB-026,SB-027,SB-028,SB-029,SB-030,SB-031,SB-032,SB-033,SB-034,SB-035,SB-036,SB-037,SB-038,SB-039,SB-040,SB-041,SB-042,SB-043,SB-044,SB-045,SB-046,SB-047,SB-048\n9,H,SB-001,SB-002,SB-003,SB-004,SB-005,SB-006,SB-007,SB-008,SB-009,SB-010,SB-011,SB-012,SB-013,SB-014,SB-015,SB-016,SB-017,SB-018,SB-019,SB-020,SB-021,SB-022,SB-023,SB-024,SB-025,SB-026,SB-027,SB-028,SB-029,SB-030,SB-031,SB-032,SB-033,SB-034,SB-035,SB-036,SB-037,SB-038,SB-039,SB-040,SB-041,SB-042,SB-043,SB-044,SB-045,SB-046,SB-047,SB-048\n10,I,SB-001,SB-002,SB-003,SB-004,SB-005,SB-006,SB-007,SB-008,SB-009,SB-010,SB-011,SB-012,SB-013,SB-014,SB-015,SB-016,SB-017,SB-018,SB-019,SB-020,SB-021,SB-022,SB-023,SB-024,SB-025,SB-026,SB-027,SB-028,SB-029,SB-030,SB-031,SB-032,SB-033,SB-034,SB-035,SB-036,SB-037,SB-038,SB-039,SB-040,SB-041,SB-042,SB-043,SB-044,SB-045,SB-046,SB-047,SB-048\n11,J,SB-001,SB-002,SB-003,SB-004,SB-005,SB-006,SB-007,SB-008,SB-009,SB-010,SB-011,SB-012,SB-013,SB-014,SB-015,SB-016,SB-017,SB-018,SB-019,SB-020,SB-021,SB-022,SB-023,SB-024,SB-025,SB-026,SB-027,SB-028,SB-029,SB-030,SB-031,SB-032,SB-033,SB-034,SB-035,SB-036,SB-037,SB-038,SB-039,SB-040,SB-041,SB-042,SB-043,SB-044,SB-045,SB-046,SB-047,SB-048\n12,K,SB-001,SB-002,SB-003,SB-004,SB-005,SB-006,SB-007,SB-008,SB-009,SB-010,SB-011,SB-012,SB-013,SB-014,SB-015,SB-016,SB-017,SB-018,SB-019,SB-020,SB-021,SB-022,SB-023,SB-024,SB-025,SB-026,SB-027,SB-028,SB-029,SB-030,SB-031,SB-032,SB-033,SB-034,SB-035,SB-036,SB-037,SB-038,SB-039,SB-040,SB-041,SB-042,SB-043,SB-044,SB-045,SB-046,SB-047,SB-048\n13,L,SB-001,SB-002,SB-003,SB-004,SB-005,SB-006,SB-007,SB-008,SB-009,SB-010,SB-011,SB-012,SB-013,SB-014,SB-015,SB-016,SB-017,SB-018,SB-019,SB-020,SB-021,SB-022,SB-023,SB-024,SB-025,SB-026,SB-027,SB-028,SB-029,SB-030,SB-031,SB-032,SB-033,SB-034,SB-035,SB-036,SB-037,SB-038,SB-039,SB-040,SB-041,SB-042,SB-043,SB-044,SB-045,SB-046,SB-047,SB-048\n14,M,SB-001,SB-002,SB-003,SB-004,SB-005,SB-006,SB-007,SB-008,SB-009,SB-010,SB-011,SB-012,SB-013,SB-014,SB-015,SB-016,SB-017,SB-018,SB-019,SB-020,SB-021,SB-022,SB-023,SB-024,SB-025,SB-026,SB-027,SB-028,SB-029,SB-030,SB-031,SB-032,SB-033,SB-034,SB-035,SB-036,SB-037,SB-038,SB-039,SB-040,SB-041,SB-042,SB-043,SB-044,SB-045,SB-046,SB-047,SB-048\n15,N,SB-001,SB-002,SB-003,SB-004,SB-005,SB-006,SB-007,SB-008,SB-009,SB-010,SB-011,SB-012,SB-013,SB-014,SB-015,SB-016,SB-017,SB-018,SB-019,SB-020,SB-021,SB-022,SB-023,SB-024,SB-025,SB-026,SB-027,SB-028,SB-029,SB-030,SB-031,SB-032,SB-033,SB-034,SB-035,SB-036,SB-037,SB-038,SB-039,SB-040,SB-041,SB-042,SB-043,SB-044,SB-045,SB-046,SB-047,SB-048\n16,O,SB-001,SB-002,SB-003,SB-004,SB-005,SB-006,SB-007,SB-008,SB-009,SB-010,SB-011,SB-012,SB-013,SB-014,SB-015,SB-016,SB-017,SB-018,SB-019,SB-020,SB-021,SB-022,SB-023,SB-024,SB-025,SB-026,SB-027,SB-028,SB-029,SB-030,SB-031,SB-032,SB-033,SB-034,SB-035,SB-036,SB-037,SB-038,SB-039,SB-040,SB-041,SB-042,SB-043,SB-044,SB-045,SB-046,SB-047,SB-048\n17,P,SB-001,SB-002,SB-003,SB-004,SB-005,SB-006,SB-007,SB-008,SB-009,SB-010,SB-011,SB-012,SB-013,SB-014,SB-015,SB-016,SB-017,SB-018,SB-019,SB-020,SB-021,SB-022,SB-023,SB-024,SB-025,SB-026,SB-027,SB-028,SB-029,SB-030,SB-031,SB-032,SB-033,SB-034,SB-035,SB-036,SB-037,SB-038,SB-039,SB-040,SB-041,SB-042,SB-043,SB-044,SB-045,SB-046,SB-047,SB-048\n18,Q,SB-001,SB-002,SB-003,SB-004,SB-005,SB-006,SB-007,SB-008,SB-009,SB-010,SB-011,SB-012,SB-013,SB-014,SB-015,SB-016,SB-017,SB-018,SB-019,SB-020,SB-021,SB-022,SB-023,SB-024,SB-025,SB-026,SB-027,SB-028,SB-029,SB-030,SB-031,SB-032,SB-033,SB-034,SB-035,SB-036,SB-037,SB-038,SB-039,SB-040,SB-041,SB-042,SB-043,SB-044,SB-045,SB-046,SB-047,SB-048\n19,R,SB-001,SB-002,SB-003,SB-004,SB-005,SB-006,SB-007,SB-008,SB-009,SB-010,SB-011,SB-012,SB-013,SB-014,SB-015,SB-016,SB-017,SB-018,SB-019,SB-020,SB-021,SB-022,SB-023,SB-024,SB-025,SB-026,SB-027,SB-028,SB-029,SB-030,SB-031,SB-032,SB-033,SB-034,SB-035,SB-036,SB-037,SB-038,SB-039,SB-040,SB-041,SB-042,SB-043,SB-044,SB-045,SB-046,SB-047,SB-048\n20,S,SB-001,SB-002,SB-003,SB-004,SB-005,SB-006,SB-007,SB-008,SB-009,SB-010,SB-011,SB-012,SB-013,SB-014,SB-015,SB-016,SB-017,SB-018,SB-019,SB-020,SB-021,SB-022,SB-023,SB-024,SB-025,SB-026,SB-027,SB-028,SB-029,SB-030,SB-031,SB-032,SB-033,SB-034,SB-035,SB-036,SB-037,SB-038,SB-039,SB-040,SB-041,SB-042,SB-043,SB-044,SB-045,SB-046,SB-047,SB-048\n21,T,SB-001,SB-002,SB-003,SB-004,SB-005,SB-006,SB-007,SB-008,SB-009,SB-010,SB-011,SB-012,SB-013,SB-014,SB-015,SB-016,SB-017,SB-018,SB-019,SB-020,SB-021,SB-022,SB-023,SB-024,SB-025,SB-026,SB-027,SB-028,SB-029,SB-030,SB-031,SB-032,SB-033,SB-034,SB-035,SB-036,SB-037,SB-038,SB-039,SB-040,SB-041,SB-042,SB-043,SB-044,SB-045,SB-046,SB-047,SB-048\n22,U,SB-001,SB-002,SB-003,SB-004,SB-005,SB-006,SB-007,SB-008,SB-009,SB-010,SB-011,SB-012,SB-013,SB-014,SB-015,SB-016,SB-017,SB-018,SB-019,SB-020,SB-021,SB-022,SB-023,SB-024,SB-025,SB-026,SB-027,SB-028,SB-029,SB-030,SB-031,SB-032,SB-033,SB-034,SB-035,SB-036,SB-037,SB-038,SB-039,SB-040,SB-041,SB-042,SB-043,SB-044,SB-045,SB-046,SB-047,SB-048\n23,V,SB-001,SB-002,SB-003,SB-004,SB-005,SB-006,SB-007,SB-008,SB-009,SB-010,SB-011,SB-012,SB-013,SB-014,SB-015,SB-016,SB-017,SB-018,SB-019,SB-020,SB-021,SB-022,SB-023,SB-024,SB-025,SB-026,SB-027,SB-028,SB-029,SB-030,SB-031,SB-032,SB-033,SB-034,SB-035,SB-036,SB-037,SB-038,SB-039,SB-040,SB-041,SB-042,SB-043,SB-044,SB-045,SB-046,SB-047,SB-048\n24,W,SB-001,SB-002,SB-003,SB-004,SB-005,SB-006,SB-007,SB-008,SB-009,SB-010,SB-011,SB-012,SB-013,SB-014,SB-015,SB-016,SB-017,SB-018,SB-019,SB-020,SB-021,SB-022,SB-023,SB-024,SB-025,SB-026,SB-027,SB-028,SB-029,SB-030,SB-031,SB-032,SB-033,SB-034,SB-035,SB-036,SB-037,SB-038,SB-039,SB-040,SB-041,SB-042,SB-043,SB-044,SB-045,SB-046,SB-047,SB-048\n25,X,SB-001,SB-002,SB-003,SB-004,SB-005,SB-006,SB-007,SB-008,SB-009,SB-010,SB-011,SB-012,SB-013,SB-014,SB-015,SB-016,SB-017,SB-018,SB-019,SB-020,SB-021,SB-022,SB-023,SB-024,SB-025,SB-026,SB-027,SB-028,SB-029,SB-030,SB-031,SB-032,SB-033,SB-034,SB-035,SB-036,SB-037,SB-038,SB-039,SB-040,SB-041,SB-042,SB-043,SB-044,SB-045,SB-046,SB-047,SB-048\n26,Y,SB-001,SB-002,SB-003,SB-004,SB-005,SB-006,SB-007,SB-008,SB-009,SB-010,SB-011,SB-012,SB-013,SB-014,SB-015,SB-016,SB-017,SB-018,SB-019,SB-020,SB-021,SB-022,SB-023,SB-024,SB-025,SB-026,SB-027,SB-028,SB-029,SB-030,SB-031,SB-032,SB-033,SB-034,SB-035,SB-036,SB-037,SB-038,SB-039,SB-040,SB-041,SB-042,SB-043,SB-044,SB-045,SB-046,SB-047,SB-048\n27,Z,SB-001,SB-002,SB-003,SB-004,SB-005,SB-006,SB-007,SB-008,SB-009,SB-010,SB-011,SB-012,SB-013,SB-014,SB-015,SB-016,SB-017,SB-018,SB-019,SB-020,SB-021,SB-022,SB-023,SB-024,SB-025,SB-026,SB-027,SB-028,SB-029,SB-030,SB-031,SB-032,SB-033,SB-034,SB-035,SB-036,SB-037,SB-038,SB-039,SB-040,SB-041,SB-042,SB-043,SB-044,SB-045,SB-046,SB-047,SB-048\n28,AA,SB-001,SB-002,SB-003,SB-004,SB-005,SB-006,SB-007,SB-008,SB-009,SB-010,SB-011,SB-012,SB-013,SB-014,SB-015,SB-016,SB-017,SB-018,SB-019,SB-020,SB-021,SB-022,SB-023,SB-024,SB-025,SB-026,SB-027,SB-028,SB-029,SB-030,SB-031,SB-032,SB-033,SB-034,SB-035,SB-036,SB-037,SB-038,SB-039,SB-040,SB-041,SB-042,SB-043,SB-044,SB-045,SB-046,SB-047,SB-048\n29,AB,SB-001,SB-002,SB-003,SB-004,SB-005,SB-006,SB-007,SB-008,SB-009,SB-010,SB-011,SB-012,SB-013,SB-014,SB-015,SB-016,SB-017,SB-018,SB-019,SB-020,SB-021,SB-022,SB-023,SB-024,SB-025,SB-026,SB-027,SB-028,SB-029,SB-030,SB-031,SB-032,SB-033,SB-034,SB-035,SB-036,SB-037,SB-038,SB-039,SB-040,SB-041,SB-042,SB-043,SB-044,SB-045,SB-046,SB-047,SB-048\n30,AC,SB-001,SB-002,SB-003,SB-004,SB-005,SB-006,SB-007,SB-008,SB-009,SB-010,SB-011,SB-012,SB-013,SB-014,SB-015,SB-016,SB-017,SB-018,SB-019,SB-020,SB-021,SB-022,SB-023,SB-024,SB-025,SB-026,SB-027,SB-028,SB-029,SB-030,SB-031,SB-032,SB-033,SB-034,SB-035,SB-036,SB-037,SB-038,SB-039,SB-040,SB-041,SB-042,SB-043,SB-044,SB-045,SB-046,SB-047,SB-048\n31,AD,SB-001,SB-002,SB-003,SB-004,SB-005,SB-006,SB-007,SB-008,SB-009,SB-010,SB-011,SB-012,SB-013,SB-014,SB-015,SB-016,SB-017,SB-018,SB-019,SB-020,SB-021,SB-022,SB-023,SB-024,SB-025,SB-026,SB-027,SB-028,SB-029,SB-030,SB-031,SB-032,SB-033,SB-034,SB-035,SB-036,SB-037,SB-038,SB-039,SB-040,SB-041,SB-042,SB-043,SB-044,SB-045,SB-046,SB-047,SB-048\n" # noqa: E501 -AI_1536W_RESPONSE = '[{"row_start": 2, "row_end": 33, "col_start": 1, "col_end": 48, "contents": "SB_ID"}]' # noqa: E501 - -USER_EXAMPLE_DICT = { - 24: HUMAN_24W_PROMPT, - 96: HUMAN_96W_PROMPT, - 384: HUMAN_384W_PROMPT, - 1536: HUMAN_1536W_PROMPT, -} - -AI_REPONSE_DICT = { - 24: AI_24W_RESPONSE, - 96: AI_96W_RESPONSE, - 384: AI_384W_RESPONSE, - 1536: AI_1536W_RESPONSE, -} - - -def create_prompt( - num_plates: Optional[int] = None, - num_rows: Optional[int] = None, - num_cols: Optional[int] = None, -) -> str: - additional_prompts = [] - if num_plates: - num_plates_str = f"are {num_plates} plates" if num_plates > 1 else "is 1 plate" - additional_prompts.append( - NUM_PLATES_PROMPT.format(num_plates_str=num_plates_str) - ) - if num_rows: - additional_prompts.append(ROWS_PROMPT.format(num_rows=num_rows)) - if num_cols: - additional_prompts.append(COLS_PROMPT.format(num_cols=num_cols)) - return ( - "\n".join(additional_prompts) if additional_prompts else GENERIC_PLATES_PROMPT - ) diff --git a/templates/plate-chain/plate_chain/utils.py b/templates/plate-chain/plate_chain/utils.py deleted file mode 100644 index c2688776342..00000000000 --- a/templates/plate-chain/plate_chain/utils.py +++ /dev/null @@ -1,31 +0,0 @@ -import json - -from langchain_core.pydantic_v1 import BaseModel, Field, conint - - -class LLMPlateResponse(BaseModel): - row_start: conint(ge=0) = Field( - ..., description="The starting row of the plate (0-indexed)" - ) - row_end: conint(ge=0) = Field( - ..., description="The ending row of the plate (0-indexed)" - ) - col_start: conint(ge=0) = Field( - ..., description="The starting column of the plate (0-indexed)" - ) - col_end: conint(ge=0) = Field( - ..., description="The ending column of the plate (0-indexed)" - ) - contents: str - - -def parse_llm_output(result: str): - """ - Based on the prompt we expect the result to be a string that looks like: - - '[{"row_start": 12, "row_end": 19, "col_start": 1, \ - "col_end": 12, "contents": "Entity ID"}]' - - We'll load that JSON and turn it into a Pydantic model - """ - return [LLMPlateResponse(**plate_r) for plate_r in json.loads(result)] diff --git a/templates/plate-chain/pyproject.toml b/templates/plate-chain/pyproject.toml deleted file mode 100644 index d6b412be86f..00000000000 --- a/templates/plate-chain/pyproject.toml +++ /dev/null @@ -1,31 +0,0 @@ -[tool.poetry] -name = "plate-chain" -version = "0.0.1" -description = "Parse data from laboratory plates into standardized formats" -authors = [] -readme = "README.md" - -[tool.poetry.dependencies] -python = ">=3.8.1,<4.0" -langchain = "^0.1" -openai = "<2" -langserve = ">=0.0.19" - -[tool.poetry.group.dev.dependencies] -langchain-cli = ">=0.0.21" -fastapi = ">=0.104.0,<1" -sse-starlette = "^1.6.5" - -[tool.langserve] -export_module = "plate_chain" -export_attr = "chain" - -[tool.templates-hub] -use-case = "extraction" -author = "Sphinx Bio" -integrations = ["OpenAI"] -tags = ["bio", "data"] - -[build-system] -requires = ["poetry-core"] -build-backend = "poetry.core.masonry.api" diff --git a/templates/plate-chain/tests/__init__.py b/templates/plate-chain/tests/__init__.py deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/templates/poetry.lock b/templates/poetry.lock deleted file mode 100644 index 89b72564fa0..00000000000 --- a/templates/poetry.lock +++ /dev/null @@ -1,1877 +0,0 @@ -# This file is automatically @generated by Poetry 1.7.1 and should not be changed by hand. - -[[package]] -name = "aiohttp" -version = "3.9.3" -description = "Async http client/server framework (asyncio)" -optional = false -python-versions = ">=3.8" -files = [ - {file = "aiohttp-3.9.3-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:939677b61f9d72a4fa2a042a5eee2a99a24001a67c13da113b2e30396567db54"}, - {file = "aiohttp-3.9.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:1f5cd333fcf7590a18334c90f8c9147c837a6ec8a178e88d90a9b96ea03194cc"}, - {file = "aiohttp-3.9.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:82e6aa28dd46374f72093eda8bcd142f7771ee1eb9d1e223ff0fa7177a96b4a5"}, - {file = "aiohttp-3.9.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f56455b0c2c7cc3b0c584815264461d07b177f903a04481dfc33e08a89f0c26b"}, - {file = "aiohttp-3.9.3-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:bca77a198bb6e69795ef2f09a5f4c12758487f83f33d63acde5f0d4919815768"}, - {file = "aiohttp-3.9.3-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e083c285857b78ee21a96ba1eb1b5339733c3563f72980728ca2b08b53826ca5"}, - {file = "aiohttp-3.9.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ab40e6251c3873d86ea9b30a1ac6d7478c09277b32e14745d0d3c6e76e3c7e29"}, - {file = "aiohttp-3.9.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:df822ee7feaaeffb99c1a9e5e608800bd8eda6e5f18f5cfb0dc7eeb2eaa6bbec"}, - {file = "aiohttp-3.9.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:acef0899fea7492145d2bbaaaec7b345c87753168589cc7faf0afec9afe9b747"}, - {file = "aiohttp-3.9.3-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:cd73265a9e5ea618014802ab01babf1940cecb90c9762d8b9e7d2cc1e1969ec6"}, - {file = "aiohttp-3.9.3-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:a78ed8a53a1221393d9637c01870248a6f4ea5b214a59a92a36f18151739452c"}, - {file = "aiohttp-3.9.3-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:6b0e029353361f1746bac2e4cc19b32f972ec03f0f943b390c4ab3371840aabf"}, - {file = "aiohttp-3.9.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:7cf5c9458e1e90e3c390c2639f1017a0379a99a94fdfad3a1fd966a2874bba52"}, - {file = "aiohttp-3.9.3-cp310-cp310-win32.whl", hash = "sha256:3e59c23c52765951b69ec45ddbbc9403a8761ee6f57253250c6e1536cacc758b"}, - {file = "aiohttp-3.9.3-cp310-cp310-win_amd64.whl", hash = "sha256:055ce4f74b82551678291473f66dc9fb9048a50d8324278751926ff0ae7715e5"}, - {file = "aiohttp-3.9.3-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:6b88f9386ff1ad91ace19d2a1c0225896e28815ee09fc6a8932fded8cda97c3d"}, - {file = "aiohttp-3.9.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c46956ed82961e31557b6857a5ca153c67e5476972e5f7190015018760938da2"}, - {file = "aiohttp-3.9.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:07b837ef0d2f252f96009e9b8435ec1fef68ef8b1461933253d318748ec1acdc"}, - {file = "aiohttp-3.9.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dad46e6f620574b3b4801c68255492e0159d1712271cc99d8bdf35f2043ec266"}, - {file = "aiohttp-3.9.3-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5ed3e046ea7b14938112ccd53d91c1539af3e6679b222f9469981e3dac7ba1ce"}, - {file = "aiohttp-3.9.3-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:039df344b45ae0b34ac885ab5b53940b174530d4dd8a14ed8b0e2155b9dddccb"}, - {file = "aiohttp-3.9.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7943c414d3a8d9235f5f15c22ace69787c140c80b718dcd57caaade95f7cd93b"}, - {file = "aiohttp-3.9.3-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:84871a243359bb42c12728f04d181a389718710129b36b6aad0fc4655a7647d4"}, - {file = "aiohttp-3.9.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:5eafe2c065df5401ba06821b9a054d9cb2848867f3c59801b5d07a0be3a380ae"}, - {file = "aiohttp-3.9.3-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:9d3c9b50f19704552f23b4eaea1fc082fdd82c63429a6506446cbd8737823da3"}, - {file = "aiohttp-3.9.3-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:f033d80bc6283092613882dfe40419c6a6a1527e04fc69350e87a9df02bbc283"}, - {file = "aiohttp-3.9.3-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:2c895a656dd7e061b2fd6bb77d971cc38f2afc277229ce7dd3552de8313a483e"}, - {file = "aiohttp-3.9.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:1f5a71d25cd8106eab05f8704cd9167b6e5187bcdf8f090a66c6d88b634802b4"}, - {file = "aiohttp-3.9.3-cp311-cp311-win32.whl", hash = "sha256:50fca156d718f8ced687a373f9e140c1bb765ca16e3d6f4fe116e3df7c05b2c5"}, - {file = "aiohttp-3.9.3-cp311-cp311-win_amd64.whl", hash = "sha256:5fe9ce6c09668063b8447f85d43b8d1c4e5d3d7e92c63173e6180b2ac5d46dd8"}, - {file = "aiohttp-3.9.3-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:38a19bc3b686ad55804ae931012f78f7a534cce165d089a2059f658f6c91fa60"}, - {file = "aiohttp-3.9.3-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:770d015888c2a598b377bd2f663adfd947d78c0124cfe7b959e1ef39f5b13869"}, - {file = "aiohttp-3.9.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:ee43080e75fc92bf36219926c8e6de497f9b247301bbf88c5c7593d931426679"}, - {file = "aiohttp-3.9.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:52df73f14ed99cee84865b95a3d9e044f226320a87af208f068ecc33e0c35b96"}, - {file = "aiohttp-3.9.3-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:dc9b311743a78043b26ffaeeb9715dc360335e5517832f5a8e339f8a43581e4d"}, - {file = "aiohttp-3.9.3-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b955ed993491f1a5da7f92e98d5dad3c1e14dc175f74517c4e610b1f2456fb11"}, - {file = "aiohttp-3.9.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:504b6981675ace64c28bf4a05a508af5cde526e36492c98916127f5a02354d53"}, - {file = "aiohttp-3.9.3-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a6fe5571784af92b6bc2fda8d1925cccdf24642d49546d3144948a6a1ed58ca5"}, - {file = "aiohttp-3.9.3-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:ba39e9c8627edc56544c8628cc180d88605df3892beeb2b94c9bc857774848ca"}, - {file = "aiohttp-3.9.3-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:e5e46b578c0e9db71d04c4b506a2121c0cb371dd89af17a0586ff6769d4c58c1"}, - {file = "aiohttp-3.9.3-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:938a9653e1e0c592053f815f7028e41a3062e902095e5a7dc84617c87267ebd5"}, - {file = "aiohttp-3.9.3-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:c3452ea726c76e92f3b9fae4b34a151981a9ec0a4847a627c43d71a15ac32aa6"}, - {file = "aiohttp-3.9.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:ff30218887e62209942f91ac1be902cc80cddb86bf00fbc6783b7a43b2bea26f"}, - {file = "aiohttp-3.9.3-cp312-cp312-win32.whl", hash = "sha256:38f307b41e0bea3294a9a2a87833191e4bcf89bb0365e83a8be3a58b31fb7f38"}, - {file = "aiohttp-3.9.3-cp312-cp312-win_amd64.whl", hash = "sha256:b791a3143681a520c0a17e26ae7465f1b6f99461a28019d1a2f425236e6eedb5"}, - {file = "aiohttp-3.9.3-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:0ed621426d961df79aa3b963ac7af0d40392956ffa9be022024cd16297b30c8c"}, - {file = "aiohttp-3.9.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:7f46acd6a194287b7e41e87957bfe2ad1ad88318d447caf5b090012f2c5bb528"}, - {file = "aiohttp-3.9.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:feeb18a801aacb098220e2c3eea59a512362eb408d4afd0c242044c33ad6d542"}, - {file = "aiohttp-3.9.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f734e38fd8666f53da904c52a23ce517f1b07722118d750405af7e4123933511"}, - {file = "aiohttp-3.9.3-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b40670ec7e2156d8e57f70aec34a7216407848dfe6c693ef131ddf6e76feb672"}, - {file = "aiohttp-3.9.3-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:fdd215b7b7fd4a53994f238d0f46b7ba4ac4c0adb12452beee724ddd0743ae5d"}, - {file = "aiohttp-3.9.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:017a21b0df49039c8f46ca0971b3a7fdc1f56741ab1240cb90ca408049766168"}, - {file = "aiohttp-3.9.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e99abf0bba688259a496f966211c49a514e65afa9b3073a1fcee08856e04425b"}, - {file = "aiohttp-3.9.3-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:648056db9a9fa565d3fa851880f99f45e3f9a771dd3ff3bb0c048ea83fb28194"}, - {file = "aiohttp-3.9.3-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:8aacb477dc26797ee089721536a292a664846489c49d3ef9725f992449eda5a8"}, - {file = "aiohttp-3.9.3-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:522a11c934ea660ff8953eda090dcd2154d367dec1ae3c540aff9f8a5c109ab4"}, - {file = "aiohttp-3.9.3-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:5bce0dc147ca85caa5d33debc4f4d65e8e8b5c97c7f9f660f215fa74fc49a321"}, - {file = "aiohttp-3.9.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:4b4af9f25b49a7be47c0972139e59ec0e8285c371049df1a63b6ca81fdd216a2"}, - {file = "aiohttp-3.9.3-cp38-cp38-win32.whl", hash = "sha256:298abd678033b8571995650ccee753d9458dfa0377be4dba91e4491da3f2be63"}, - {file = "aiohttp-3.9.3-cp38-cp38-win_amd64.whl", hash = "sha256:69361bfdca5468c0488d7017b9b1e5ce769d40b46a9f4a2eed26b78619e9396c"}, - {file = "aiohttp-3.9.3-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:0fa43c32d1643f518491d9d3a730f85f5bbaedcbd7fbcae27435bb8b7a061b29"}, - {file = "aiohttp-3.9.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:835a55b7ca49468aaaac0b217092dfdff370e6c215c9224c52f30daaa735c1c1"}, - {file = "aiohttp-3.9.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:06a9b2c8837d9a94fae16c6223acc14b4dfdff216ab9b7202e07a9a09541168f"}, - {file = "aiohttp-3.9.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:abf151955990d23f84205286938796c55ff11bbfb4ccfada8c9c83ae6b3c89a3"}, - {file = "aiohttp-3.9.3-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:59c26c95975f26e662ca78fdf543d4eeaef70e533a672b4113dd888bd2423caa"}, - {file = "aiohttp-3.9.3-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f95511dd5d0e05fd9728bac4096319f80615aaef4acbecb35a990afebe953b0e"}, - {file = "aiohttp-3.9.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:595f105710293e76b9dc09f52e0dd896bd064a79346234b521f6b968ffdd8e58"}, - {file = "aiohttp-3.9.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c7c8b816c2b5af5c8a436df44ca08258fc1a13b449393a91484225fcb7545533"}, - {file = "aiohttp-3.9.3-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:f1088fa100bf46e7b398ffd9904f4808a0612e1d966b4aa43baa535d1b6341eb"}, - {file = "aiohttp-3.9.3-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:f59dfe57bb1ec82ac0698ebfcdb7bcd0e99c255bd637ff613760d5f33e7c81b3"}, - {file = "aiohttp-3.9.3-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:361a1026c9dd4aba0109e4040e2aecf9884f5cfe1b1b1bd3d09419c205e2e53d"}, - {file = "aiohttp-3.9.3-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:363afe77cfcbe3a36353d8ea133e904b108feea505aa4792dad6585a8192c55a"}, - {file = "aiohttp-3.9.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:8e2c45c208c62e955e8256949eb225bd8b66a4c9b6865729a786f2aa79b72e9d"}, - {file = "aiohttp-3.9.3-cp39-cp39-win32.whl", hash = "sha256:f7217af2e14da0856e082e96ff637f14ae45c10a5714b63c77f26d8884cf1051"}, - {file = "aiohttp-3.9.3-cp39-cp39-win_amd64.whl", hash = "sha256:27468897f628c627230dba07ec65dc8d0db566923c48f29e084ce382119802bc"}, - {file = "aiohttp-3.9.3.tar.gz", hash = "sha256:90842933e5d1ff760fae6caca4b2b3edba53ba8f4b71e95dacf2818a2aca06f7"}, -] - -[package.dependencies] -aiosignal = ">=1.1.2" -async-timeout = {version = ">=4.0,<5.0", markers = "python_version < \"3.11\""} -attrs = ">=17.3.0" -frozenlist = ">=1.1.1" -multidict = ">=4.5,<7.0" -yarl = ">=1.0,<2.0" - -[package.extras] -speedups = ["Brotli", "aiodns", "brotlicffi"] - -[[package]] -name = "aiosignal" -version = "1.3.1" -description = "aiosignal: a list of registered asynchronous callbacks" -optional = false -python-versions = ">=3.7" -files = [ - {file = "aiosignal-1.3.1-py3-none-any.whl", hash = "sha256:f8376fb07dd1e86a584e4fcdec80b36b7f81aac666ebc724e2c090300dd83b17"}, - {file = "aiosignal-1.3.1.tar.gz", hash = "sha256:54cd96e15e1649b75d6c87526a6ff0b6c1b0dd3459f43d9ca11d48c339b68cfc"}, -] - -[package.dependencies] -frozenlist = ">=1.1.0" - -[[package]] -name = "annotated-types" -version = "0.6.0" -description = "Reusable constraint types to use with typing.Annotated" -optional = false -python-versions = ">=3.8" -files = [ - {file = "annotated_types-0.6.0-py3-none-any.whl", hash = "sha256:0641064de18ba7a25dee8f96403ebc39113d0cb953a01429249d5c7564666a43"}, - {file = "annotated_types-0.6.0.tar.gz", hash = "sha256:563339e807e53ffd9c267e99fc6d9ea23eb8443c08f112651963e24e22f84a5d"}, -] - -[[package]] -name = "anyio" -version = "4.3.0" -description = "High level compatibility layer for multiple asynchronous event loop implementations" -optional = false -python-versions = ">=3.8" -files = [ - {file = "anyio-4.3.0-py3-none-any.whl", hash = "sha256:048e05d0f6caeed70d731f3db756d35dcc1f35747c8c403364a8332c630441b8"}, - {file = "anyio-4.3.0.tar.gz", hash = "sha256:f75253795a87df48568485fd18cdd2a3fa5c4f7c5be8e5e36637733fce06fed6"}, -] - -[package.dependencies] -exceptiongroup = {version = ">=1.0.2", markers = "python_version < \"3.11\""} -idna = ">=2.8" -sniffio = ">=1.1" -typing-extensions = {version = ">=4.1", markers = "python_version < \"3.11\""} - -[package.extras] -doc = ["Sphinx (>=7)", "packaging", "sphinx-autodoc-typehints (>=1.2.0)", "sphinx-rtd-theme"] -test = ["anyio[trio]", "coverage[toml] (>=7)", "exceptiongroup (>=1.2.0)", "hypothesis (>=4.0)", "psutil (>=5.9)", "pytest (>=7.0)", "pytest-mock (>=3.6.1)", "trustme", "uvloop (>=0.17)"] -trio = ["trio (>=0.23)"] - -[[package]] -name = "async-timeout" -version = "4.0.3" -description = "Timeout context manager for asyncio programs" -optional = false -python-versions = ">=3.7" -files = [ - {file = "async-timeout-4.0.3.tar.gz", hash = "sha256:4640d96be84d82d02ed59ea2b7105a0f7b33abe8703703cd0ab0bf87c427522f"}, - {file = "async_timeout-4.0.3-py3-none-any.whl", hash = "sha256:7405140ff1230c310e51dc27b3145b9092d659ce68ff733fb0cefe3ee42be028"}, -] - -[[package]] -name = "attrs" -version = "23.2.0" -description = "Classes Without Boilerplate" -optional = false -python-versions = ">=3.7" -files = [ - {file = "attrs-23.2.0-py3-none-any.whl", hash = "sha256:99b87a485a5820b23b879f04c2305b44b951b502fd64be915879d77a7e8fc6f1"}, - {file = "attrs-23.2.0.tar.gz", hash = "sha256:935dc3b529c262f6cf76e50877d35a4bd3c1de194fd41f47a2b7ae8f19971f30"}, -] - -[package.extras] -cov = ["attrs[tests]", "coverage[toml] (>=5.3)"] -dev = ["attrs[tests]", "pre-commit"] -docs = ["furo", "myst-parser", "sphinx", "sphinx-notfound-page", "sphinxcontrib-towncrier", "towncrier", "zope-interface"] -tests = ["attrs[tests-no-zope]", "zope-interface"] -tests-mypy = ["mypy (>=1.6)", "pytest-mypy-plugins"] -tests-no-zope = ["attrs[tests-mypy]", "cloudpickle", "hypothesis", "pympler", "pytest (>=4.3.0)", "pytest-xdist[psutil]"] - -[[package]] -name = "certifi" -version = "2024.2.2" -description = "Python package for providing Mozilla's CA Bundle." -optional = false -python-versions = ">=3.6" -files = [ - {file = "certifi-2024.2.2-py3-none-any.whl", hash = "sha256:dc383c07b76109f368f6106eee2b593b04a011ea4d55f652c6ca24a754d1cdd1"}, - {file = "certifi-2024.2.2.tar.gz", hash = "sha256:0569859f95fc761b18b45ef421b1290a0f65f147e92a1e5eb3e635f9a5e4e66f"}, -] - -[[package]] -name = "charset-normalizer" -version = "3.3.2" -description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet." -optional = false -python-versions = ">=3.7.0" -files = [ - {file = "charset-normalizer-3.3.2.tar.gz", hash = "sha256:f30c3cb33b24454a82faecaf01b19c18562b1e89558fb6c56de4d9118a032fd5"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:25baf083bf6f6b341f4121c2f3c548875ee6f5339300e08be3f2b2ba1721cdd3"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:06435b539f889b1f6f4ac1758871aae42dc3a8c0e24ac9e60c2384973ad73027"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9063e24fdb1e498ab71cb7419e24622516c4a04476b17a2dab57e8baa30d6e03"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6897af51655e3691ff853668779c7bad41579facacf5fd7253b0133308cf000d"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1d3193f4a680c64b4b6a9115943538edb896edc190f0b222e73761716519268e"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:cd70574b12bb8a4d2aaa0094515df2463cb429d8536cfb6c7ce983246983e5a6"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8465322196c8b4d7ab6d1e049e4c5cb460d0394da4a27d23cc242fbf0034b6b5"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a9a8e9031d613fd2009c182b69c7b2c1ef8239a0efb1df3f7c8da66d5dd3d537"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:beb58fe5cdb101e3a055192ac291b7a21e3b7ef4f67fa1d74e331a7f2124341c"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:e06ed3eb3218bc64786f7db41917d4e686cc4856944f53d5bdf83a6884432e12"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:2e81c7b9c8979ce92ed306c249d46894776a909505d8f5a4ba55b14206e3222f"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:572c3763a264ba47b3cf708a44ce965d98555f618ca42c926a9c1616d8f34269"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:fd1abc0d89e30cc4e02e4064dc67fcc51bd941eb395c502aac3ec19fab46b519"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-win32.whl", hash = "sha256:3d47fa203a7bd9c5b6cee4736ee84ca03b8ef23193c0d1ca99b5089f72645c73"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-win_amd64.whl", hash = "sha256:10955842570876604d404661fbccbc9c7e684caf432c09c715ec38fbae45ae09"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:802fe99cca7457642125a8a88a084cef28ff0cf9407060f7b93dca5aa25480db"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:573f6eac48f4769d667c4442081b1794f52919e7edada77495aaed9236d13a96"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:549a3a73da901d5bc3ce8d24e0600d1fa85524c10287f6004fbab87672bf3e1e"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f27273b60488abe721a075bcca6d7f3964f9f6f067c8c4c605743023d7d3944f"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1ceae2f17a9c33cb48e3263960dc5fc8005351ee19db217e9b1bb15d28c02574"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:65f6f63034100ead094b8744b3b97965785388f308a64cf8d7c34f2f2e5be0c4"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:753f10e867343b4511128c6ed8c82f7bec3bd026875576dfd88483c5c73b2fd8"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4a78b2b446bd7c934f5dcedc588903fb2f5eec172f3d29e52a9096a43722adfc"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:e537484df0d8f426ce2afb2d0f8e1c3d0b114b83f8850e5f2fbea0e797bd82ae"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:eb6904c354526e758fda7167b33005998fb68c46fbc10e013ca97f21ca5c8887"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:deb6be0ac38ece9ba87dea880e438f25ca3eddfac8b002a2ec3d9183a454e8ae"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:4ab2fe47fae9e0f9dee8c04187ce5d09f48eabe611be8259444906793ab7cbce"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:80402cd6ee291dcb72644d6eac93785fe2c8b9cb30893c1af5b8fdd753b9d40f"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-win32.whl", hash = "sha256:7cd13a2e3ddeed6913a65e66e94b51d80a041145a026c27e6bb76c31a853c6ab"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-win_amd64.whl", hash = "sha256:663946639d296df6a2bb2aa51b60a2454ca1cb29835324c640dafb5ff2131a77"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:0b2b64d2bb6d3fb9112bafa732def486049e63de9618b5843bcdd081d8144cd8"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:ddbb2551d7e0102e7252db79ba445cdab71b26640817ab1e3e3648dad515003b"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:55086ee1064215781fff39a1af09518bc9255b50d6333f2e4c74ca09fac6a8f6"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8f4a014bc36d3c57402e2977dada34f9c12300af536839dc38c0beab8878f38a"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a10af20b82360ab00827f916a6058451b723b4e65030c5a18577c8b2de5b3389"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8d756e44e94489e49571086ef83b2bb8ce311e730092d2c34ca8f7d925cb20aa"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:90d558489962fd4918143277a773316e56c72da56ec7aa3dc3dbbe20fdfed15b"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6ac7ffc7ad6d040517be39eb591cac5ff87416c2537df6ba3cba3bae290c0fed"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:7ed9e526742851e8d5cc9e6cf41427dfc6068d4f5a3bb03659444b4cabf6bc26"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:8bdb58ff7ba23002a4c5808d608e4e6c687175724f54a5dade5fa8c67b604e4d"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:6b3251890fff30ee142c44144871185dbe13b11bab478a88887a639655be1068"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:b4a23f61ce87adf89be746c8a8974fe1c823c891d8f86eb218bb957c924bb143"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:efcb3f6676480691518c177e3b465bcddf57cea040302f9f4e6e191af91174d4"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-win32.whl", hash = "sha256:d965bba47ddeec8cd560687584e88cf699fd28f192ceb452d1d7ee807c5597b7"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-win_amd64.whl", hash = "sha256:96b02a3dc4381e5494fad39be677abcb5e6634bf7b4fa83a6dd3112607547001"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:95f2a5796329323b8f0512e09dbb7a1860c46a39da62ecb2324f116fa8fdc85c"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c002b4ffc0be611f0d9da932eb0f704fe2602a9a949d1f738e4c34c75b0863d5"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a981a536974bbc7a512cf44ed14938cf01030a99e9b3a06dd59578882f06f985"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3287761bc4ee9e33561a7e058c72ac0938c4f57fe49a09eae428fd88aafe7bb6"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:42cb296636fcc8b0644486d15c12376cb9fa75443e00fb25de0b8602e64c1714"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0a55554a2fa0d408816b3b5cedf0045f4b8e1a6065aec45849de2d6f3f8e9786"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:c083af607d2515612056a31f0a8d9e0fcb5876b7bfc0abad3ecd275bc4ebc2d5"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:87d1351268731db79e0f8e745d92493ee2841c974128ef629dc518b937d9194c"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:bd8f7df7d12c2db9fab40bdd87a7c09b1530128315d047a086fa3ae3435cb3a8"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:c180f51afb394e165eafe4ac2936a14bee3eb10debc9d9e4db8958fe36afe711"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:8c622a5fe39a48f78944a87d4fb8a53ee07344641b0562c540d840748571b811"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-win32.whl", hash = "sha256:db364eca23f876da6f9e16c9da0df51aa4f104a972735574842618b8c6d999d4"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-win_amd64.whl", hash = "sha256:86216b5cee4b06df986d214f664305142d9c76df9b6512be2738aa72a2048f99"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:6463effa3186ea09411d50efc7d85360b38d5f09b870c48e4600f63af490e56a"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:6c4caeef8fa63d06bd437cd4bdcf3ffefe6738fb1b25951440d80dc7df8c03ac"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:37e55c8e51c236f95b033f6fb391d7d7970ba5fe7ff453dad675e88cf303377a"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fb69256e180cb6c8a894fee62b3afebae785babc1ee98b81cdf68bbca1987f33"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ae5f4161f18c61806f411a13b0310bea87f987c7d2ecdbdaad0e94eb2e404238"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b2b0a0c0517616b6869869f8c581d4eb2dd83a4d79e0ebcb7d373ef9956aeb0a"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:45485e01ff4d3630ec0d9617310448a8702f70e9c01906b0d0118bdf9d124cf2"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:eb00ed941194665c332bf8e078baf037d6c35d7c4f3102ea2d4f16ca94a26dc8"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:2127566c664442652f024c837091890cb1942c30937add288223dc895793f898"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:a50aebfa173e157099939b17f18600f72f84eed3049e743b68ad15bd69b6bf99"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:4d0d1650369165a14e14e1e47b372cfcb31d6ab44e6e33cb2d4e57265290044d"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:923c0c831b7cfcb071580d3f46c4baf50f174be571576556269530f4bbd79d04"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:06a81e93cd441c56a9b65d8e1d043daeb97a3d0856d177d5c90ba85acb3db087"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-win32.whl", hash = "sha256:6ef1d82a3af9d3eecdba2321dc1b3c238245d890843e040e41e470ffa64c3e25"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-win_amd64.whl", hash = "sha256:eb8821e09e916165e160797a6c17edda0679379a4be5c716c260e836e122f54b"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:c235ebd9baae02f1b77bcea61bce332cb4331dc3617d254df3323aa01ab47bd4"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:5b4c145409bef602a690e7cfad0a15a55c13320ff7a3ad7ca59c13bb8ba4d45d"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:68d1f8a9e9e37c1223b656399be5d6b448dea850bed7d0f87a8311f1ff3dabb0"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:22afcb9f253dac0696b5a4be4a1c0f8762f8239e21b99680099abd9b2b1b2269"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e27ad930a842b4c5eb8ac0016b0a54f5aebbe679340c26101df33424142c143c"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1f79682fbe303db92bc2b1136016a38a42e835d932bab5b3b1bfcfbf0640e519"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b261ccdec7821281dade748d088bb6e9b69e6d15b30652b74cbbac25e280b796"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:122c7fa62b130ed55f8f285bfd56d5f4b4a5b503609d181f9ad85e55c89f4185"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:d0eccceffcb53201b5bfebb52600a5fb483a20b61da9dbc885f8b103cbe7598c"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:9f96df6923e21816da7e0ad3fd47dd8f94b2a5ce594e00677c0013018b813458"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:7f04c839ed0b6b98b1a7501a002144b76c18fb1c1850c8b98d458ac269e26ed2"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:34d1c8da1e78d2e001f363791c98a272bb734000fcef47a491c1e3b0505657a8"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:ff8fa367d09b717b2a17a052544193ad76cd49979c805768879cb63d9ca50561"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-win32.whl", hash = "sha256:aed38f6e4fb3f5d6bf81bfa990a07806be9d83cf7bacef998ab1a9bd660a581f"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-win_amd64.whl", hash = "sha256:b01b88d45a6fcb69667cd6d2f7a9aeb4bf53760d7fc536bf679ec94fe9f3ff3d"}, - {file = "charset_normalizer-3.3.2-py3-none-any.whl", hash = "sha256:3e4d1f6587322d2788836a99c69062fbb091331ec940e02d12d179c1d53e25fc"}, -] - -[[package]] -name = "click" -version = "8.1.7" -description = "Composable command line interface toolkit" -optional = false -python-versions = ">=3.7" -files = [ - {file = "click-8.1.7-py3-none-any.whl", hash = "sha256:ae74fb96c20a0277a1d615f1e4d73c8414f5a98db8b799a7931d1582f3390c28"}, - {file = "click-8.1.7.tar.gz", hash = "sha256:ca9853ad459e787e2192211578cc907e7594e294c7ccc834310722b41b9ca6de"}, -] - -[package.dependencies] -colorama = {version = "*", markers = "platform_system == \"Windows\""} - -[[package]] -name = "colorama" -version = "0.4.6" -description = "Cross-platform colored terminal text." -optional = false -python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" -files = [ - {file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"}, - {file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"}, -] - -[[package]] -name = "dataclasses-json" -version = "0.6.4" -description = "Easily serialize dataclasses to and from JSON." -optional = false -python-versions = ">=3.7,<4.0" -files = [ - {file = "dataclasses_json-0.6.4-py3-none-any.whl", hash = "sha256:f90578b8a3177f7552f4e1a6e535e84293cd5da421fcce0642d49c0d7bdf8df2"}, - {file = "dataclasses_json-0.6.4.tar.gz", hash = "sha256:73696ebf24936560cca79a2430cbc4f3dd23ac7bf46ed17f38e5e5e7657a6377"}, -] - -[package.dependencies] -marshmallow = ">=3.18.0,<4.0.0" -typing-inspect = ">=0.4.0,<1" - -[[package]] -name = "docopt" -version = "0.6.2" -description = "Pythonic argument parser, that will make you smile" -optional = false -python-versions = "*" -files = [ - {file = "docopt-0.6.2.tar.gz", hash = "sha256:49b3a825280bd66b3aa83585ef59c4a8c82f2c8a522dbe754a8bc8d08c85c491"}, -] - -[[package]] -name = "exceptiongroup" -version = "1.2.0" -description = "Backport of PEP 654 (exception groups)" -optional = false -python-versions = ">=3.7" -files = [ - {file = "exceptiongroup-1.2.0-py3-none-any.whl", hash = "sha256:4bfd3996ac73b41e9b9628b04e079f193850720ea5945fc96a08633c66912f14"}, - {file = "exceptiongroup-1.2.0.tar.gz", hash = "sha256:91f5c769735f051a4290d52edd0858999b57e5876e9f85937691bd4c9fa3ed68"}, -] - -[package.extras] -test = ["pytest (>=6)"] - -[[package]] -name = "fastapi" -version = "0.110.0" -description = "FastAPI framework, high performance, easy to learn, fast to code, ready for production" -optional = false -python-versions = ">=3.8" -files = [ - {file = "fastapi-0.110.0-py3-none-any.whl", hash = "sha256:87a1f6fb632a218222c5984be540055346a8f5d8a68e8f6fb647b1dc9934de4b"}, - {file = "fastapi-0.110.0.tar.gz", hash = "sha256:266775f0dcc95af9d3ef39bad55cff525329a931d5fd51930aadd4f428bf7ff3"}, -] - -[package.dependencies] -pydantic = ">=1.7.4,<1.8 || >1.8,<1.8.1 || >1.8.1,<2.0.0 || >2.0.0,<2.0.1 || >2.0.1,<2.1.0 || >2.1.0,<3.0.0" -starlette = ">=0.36.3,<0.37.0" -typing-extensions = ">=4.8.0" - -[package.extras] -all = ["email-validator (>=2.0.0)", "httpx (>=0.23.0)", "itsdangerous (>=1.1.0)", "jinja2 (>=2.11.2)", "orjson (>=3.2.1)", "pydantic-extra-types (>=2.0.0)", "pydantic-settings (>=2.0.0)", "python-multipart (>=0.0.7)", "pyyaml (>=5.3.1)", "ujson (>=4.0.1,!=4.0.2,!=4.1.0,!=4.2.0,!=4.3.0,!=5.0.0,!=5.1.0)", "uvicorn[standard] (>=0.12.0)"] - -[[package]] -name = "frozenlist" -version = "1.4.1" -description = "A list-like structure which implements collections.abc.MutableSequence" -optional = false -python-versions = ">=3.8" -files = [ - {file = "frozenlist-1.4.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:f9aa1878d1083b276b0196f2dfbe00c9b7e752475ed3b682025ff20c1c1f51ac"}, - {file = "frozenlist-1.4.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:29acab3f66f0f24674b7dc4736477bcd4bc3ad4b896f5f45379a67bce8b96868"}, - {file = "frozenlist-1.4.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:74fb4bee6880b529a0c6560885fce4dc95936920f9f20f53d99a213f7bf66776"}, - {file = "frozenlist-1.4.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:590344787a90ae57d62511dd7c736ed56b428f04cd8c161fcc5e7232c130c69a"}, - {file = "frozenlist-1.4.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:068b63f23b17df8569b7fdca5517edef76171cf3897eb68beb01341131fbd2ad"}, - {file = "frozenlist-1.4.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5c849d495bf5154cd8da18a9eb15db127d4dba2968d88831aff6f0331ea9bd4c"}, - {file = "frozenlist-1.4.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9750cc7fe1ae3b1611bb8cfc3f9ec11d532244235d75901fb6b8e42ce9229dfe"}, - {file = "frozenlist-1.4.1-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a9b2de4cf0cdd5bd2dee4c4f63a653c61d2408055ab77b151c1957f221cabf2a"}, - {file = "frozenlist-1.4.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:0633c8d5337cb5c77acbccc6357ac49a1770b8c487e5b3505c57b949b4b82e98"}, - {file = "frozenlist-1.4.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:27657df69e8801be6c3638054e202a135c7f299267f1a55ed3a598934f6c0d75"}, - {file = "frozenlist-1.4.1-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:f9a3ea26252bd92f570600098783d1371354d89d5f6b7dfd87359d669f2109b5"}, - {file = "frozenlist-1.4.1-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:4f57dab5fe3407b6c0c1cc907ac98e8a189f9e418f3b6e54d65a718aaafe3950"}, - {file = "frozenlist-1.4.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:e02a0e11cf6597299b9f3bbd3f93d79217cb90cfd1411aec33848b13f5c656cc"}, - {file = "frozenlist-1.4.1-cp310-cp310-win32.whl", hash = "sha256:a828c57f00f729620a442881cc60e57cfcec6842ba38e1b19fd3e47ac0ff8dc1"}, - {file = "frozenlist-1.4.1-cp310-cp310-win_amd64.whl", hash = "sha256:f56e2333dda1fe0f909e7cc59f021eba0d2307bc6f012a1ccf2beca6ba362439"}, - {file = "frozenlist-1.4.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:a0cb6f11204443f27a1628b0e460f37fb30f624be6051d490fa7d7e26d4af3d0"}, - {file = "frozenlist-1.4.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:b46c8ae3a8f1f41a0d2ef350c0b6e65822d80772fe46b653ab6b6274f61d4a49"}, - {file = "frozenlist-1.4.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:fde5bd59ab5357e3853313127f4d3565fc7dad314a74d7b5d43c22c6a5ed2ced"}, - {file = "frozenlist-1.4.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:722e1124aec435320ae01ee3ac7bec11a5d47f25d0ed6328f2273d287bc3abb0"}, - {file = "frozenlist-1.4.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2471c201b70d58a0f0c1f91261542a03d9a5e088ed3dc6c160d614c01649c106"}, - {file = "frozenlist-1.4.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c757a9dd70d72b076d6f68efdbb9bc943665ae954dad2801b874c8c69e185068"}, - {file = "frozenlist-1.4.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f146e0911cb2f1da549fc58fc7bcd2b836a44b79ef871980d605ec392ff6b0d2"}, - {file = "frozenlist-1.4.1-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4f9c515e7914626b2a2e1e311794b4c35720a0be87af52b79ff8e1429fc25f19"}, - {file = "frozenlist-1.4.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:c302220494f5c1ebeb0912ea782bcd5e2f8308037b3c7553fad0e48ebad6ad82"}, - {file = "frozenlist-1.4.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:442acde1e068288a4ba7acfe05f5f343e19fac87bfc96d89eb886b0363e977ec"}, - {file = "frozenlist-1.4.1-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:1b280e6507ea8a4fa0c0a7150b4e526a8d113989e28eaaef946cc77ffd7efc0a"}, - {file = "frozenlist-1.4.1-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:fe1a06da377e3a1062ae5fe0926e12b84eceb8a50b350ddca72dc85015873f74"}, - {file = "frozenlist-1.4.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:db9e724bebd621d9beca794f2a4ff1d26eed5965b004a97f1f1685a173b869c2"}, - {file = "frozenlist-1.4.1-cp311-cp311-win32.whl", hash = "sha256:e774d53b1a477a67838a904131c4b0eef6b3d8a651f8b138b04f748fccfefe17"}, - {file = "frozenlist-1.4.1-cp311-cp311-win_amd64.whl", hash = "sha256:fb3c2db03683b5767dedb5769b8a40ebb47d6f7f45b1b3e3b4b51ec8ad9d9825"}, - {file = "frozenlist-1.4.1-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:1979bc0aeb89b33b588c51c54ab0161791149f2461ea7c7c946d95d5f93b56ae"}, - {file = "frozenlist-1.4.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:cc7b01b3754ea68a62bd77ce6020afaffb44a590c2289089289363472d13aedb"}, - {file = "frozenlist-1.4.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:c9c92be9fd329ac801cc420e08452b70e7aeab94ea4233a4804f0915c14eba9b"}, - {file = "frozenlist-1.4.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5c3894db91f5a489fc8fa6a9991820f368f0b3cbdb9cd8849547ccfab3392d86"}, - {file = "frozenlist-1.4.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ba60bb19387e13597fb059f32cd4d59445d7b18b69a745b8f8e5db0346f33480"}, - {file = "frozenlist-1.4.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8aefbba5f69d42246543407ed2461db31006b0f76c4e32dfd6f42215a2c41d09"}, - {file = "frozenlist-1.4.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:780d3a35680ced9ce682fbcf4cb9c2bad3136eeff760ab33707b71db84664e3a"}, - {file = "frozenlist-1.4.1-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9acbb16f06fe7f52f441bb6f413ebae6c37baa6ef9edd49cdd567216da8600cd"}, - {file = "frozenlist-1.4.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:23b701e65c7b36e4bf15546a89279bd4d8675faabc287d06bbcfac7d3c33e1e6"}, - {file = "frozenlist-1.4.1-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:3e0153a805a98f5ada7e09826255ba99fb4f7524bb81bf6b47fb702666484ae1"}, - {file = "frozenlist-1.4.1-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:dd9b1baec094d91bf36ec729445f7769d0d0cf6b64d04d86e45baf89e2b9059b"}, - {file = "frozenlist-1.4.1-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:1a4471094e146b6790f61b98616ab8e44f72661879cc63fa1049d13ef711e71e"}, - {file = "frozenlist-1.4.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:5667ed53d68d91920defdf4035d1cdaa3c3121dc0b113255124bcfada1cfa1b8"}, - {file = "frozenlist-1.4.1-cp312-cp312-win32.whl", hash = "sha256:beee944ae828747fd7cb216a70f120767fc9f4f00bacae8543c14a6831673f89"}, - {file = "frozenlist-1.4.1-cp312-cp312-win_amd64.whl", hash = "sha256:64536573d0a2cb6e625cf309984e2d873979709f2cf22839bf2d61790b448ad5"}, - {file = "frozenlist-1.4.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:20b51fa3f588ff2fe658663db52a41a4f7aa6c04f6201449c6c7c476bd255c0d"}, - {file = "frozenlist-1.4.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:410478a0c562d1a5bcc2f7ea448359fcb050ed48b3c6f6f4f18c313a9bdb1826"}, - {file = "frozenlist-1.4.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:c6321c9efe29975232da3bd0af0ad216800a47e93d763ce64f291917a381b8eb"}, - {file = "frozenlist-1.4.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:48f6a4533887e189dae092f1cf981f2e3885175f7a0f33c91fb5b7b682b6bab6"}, - {file = "frozenlist-1.4.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6eb73fa5426ea69ee0e012fb59cdc76a15b1283d6e32e4f8dc4482ec67d1194d"}, - {file = "frozenlist-1.4.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:fbeb989b5cc29e8daf7f976b421c220f1b8c731cbf22b9130d8815418ea45887"}, - {file = "frozenlist-1.4.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:32453c1de775c889eb4e22f1197fe3bdfe457d16476ea407472b9442e6295f7a"}, - {file = "frozenlist-1.4.1-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:693945278a31f2086d9bf3df0fe8254bbeaef1fe71e1351c3bd730aa7d31c41b"}, - {file = "frozenlist-1.4.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:1d0ce09d36d53bbbe566fe296965b23b961764c0bcf3ce2fa45f463745c04701"}, - {file = "frozenlist-1.4.1-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:3a670dc61eb0d0eb7080890c13de3066790f9049b47b0de04007090807c776b0"}, - {file = "frozenlist-1.4.1-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:dca69045298ce5c11fd539682cff879cc1e664c245d1c64da929813e54241d11"}, - {file = "frozenlist-1.4.1-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:a06339f38e9ed3a64e4c4e43aec7f59084033647f908e4259d279a52d3757d09"}, - {file = "frozenlist-1.4.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:b7f2f9f912dca3934c1baec2e4585a674ef16fe00218d833856408c48d5beee7"}, - {file = "frozenlist-1.4.1-cp38-cp38-win32.whl", hash = "sha256:e7004be74cbb7d9f34553a5ce5fb08be14fb33bc86f332fb71cbe5216362a497"}, - {file = "frozenlist-1.4.1-cp38-cp38-win_amd64.whl", hash = "sha256:5a7d70357e7cee13f470c7883a063aae5fe209a493c57d86eb7f5a6f910fae09"}, - {file = "frozenlist-1.4.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:bfa4a17e17ce9abf47a74ae02f32d014c5e9404b6d9ac7f729e01562bbee601e"}, - {file = "frozenlist-1.4.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:b7e3ed87d4138356775346e6845cccbe66cd9e207f3cd11d2f0b9fd13681359d"}, - {file = "frozenlist-1.4.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:c99169d4ff810155ca50b4da3b075cbde79752443117d89429595c2e8e37fed8"}, - {file = "frozenlist-1.4.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:edb678da49d9f72c9f6c609fbe41a5dfb9a9282f9e6a2253d5a91e0fc382d7c0"}, - {file = "frozenlist-1.4.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6db4667b187a6742b33afbbaf05a7bc551ffcf1ced0000a571aedbb4aa42fc7b"}, - {file = "frozenlist-1.4.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:55fdc093b5a3cb41d420884cdaf37a1e74c3c37a31f46e66286d9145d2063bd0"}, - {file = "frozenlist-1.4.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:82e8211d69a4f4bc360ea22cd6555f8e61a1bd211d1d5d39d3d228b48c83a897"}, - {file = "frozenlist-1.4.1-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:89aa2c2eeb20957be2d950b85974b30a01a762f3308cd02bb15e1ad632e22dc7"}, - {file = "frozenlist-1.4.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:9d3e0c25a2350080e9319724dede4f31f43a6c9779be48021a7f4ebde8b2d742"}, - {file = "frozenlist-1.4.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:7268252af60904bf52c26173cbadc3a071cece75f873705419c8681f24d3edea"}, - {file = "frozenlist-1.4.1-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:0c250a29735d4f15321007fb02865f0e6b6a41a6b88f1f523ca1596ab5f50bd5"}, - {file = "frozenlist-1.4.1-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:96ec70beabbd3b10e8bfe52616a13561e58fe84c0101dd031dc78f250d5128b9"}, - {file = "frozenlist-1.4.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:23b2d7679b73fe0e5a4560b672a39f98dfc6f60df63823b0a9970525325b95f6"}, - {file = "frozenlist-1.4.1-cp39-cp39-win32.whl", hash = "sha256:a7496bfe1da7fb1a4e1cc23bb67c58fab69311cc7d32b5a99c2007b4b2a0e932"}, - {file = "frozenlist-1.4.1-cp39-cp39-win_amd64.whl", hash = "sha256:e6a20a581f9ce92d389a8c7d7c3dd47c81fd5d6e655c8dddf341e14aa48659d0"}, - {file = "frozenlist-1.4.1-py3-none-any.whl", hash = "sha256:04ced3e6a46b4cfffe20f9ae482818e34eba9b5fb0ce4056e4cc9b6e212d09b7"}, - {file = "frozenlist-1.4.1.tar.gz", hash = "sha256:c037a86e8513059a2613aaba4d817bb90b9d9b6b69aace3ce9c877e8c8ed402b"}, -] - -[[package]] -name = "gitdb" -version = "4.0.11" -description = "Git Object Database" -optional = false -python-versions = ">=3.7" -files = [ - {file = "gitdb-4.0.11-py3-none-any.whl", hash = "sha256:81a3407ddd2ee8df444cbacea00e2d038e40150acfa3001696fe0dcf1d3adfa4"}, - {file = "gitdb-4.0.11.tar.gz", hash = "sha256:bf5421126136d6d0af55bc1e7c1af1c397a34f5b7bd79e776cd3e89785c2b04b"}, -] - -[package.dependencies] -smmap = ">=3.0.1,<6" - -[[package]] -name = "gitpython" -version = "3.1.42" -description = "GitPython is a Python library used to interact with Git repositories" -optional = false -python-versions = ">=3.7" -files = [ - {file = "GitPython-3.1.42-py3-none-any.whl", hash = "sha256:1bf9cd7c9e7255f77778ea54359e54ac22a72a5b51288c457c881057b7bb9ecd"}, - {file = "GitPython-3.1.42.tar.gz", hash = "sha256:2d99869e0fef71a73cbd242528105af1d6c1b108c60dfabd994bf292f76c3ceb"}, -] - -[package.dependencies] -gitdb = ">=4.0.1,<5" - -[package.extras] -test = ["black", "coverage[toml]", "ddt (>=1.1.1,!=1.4.3)", "mock", "mypy", "pre-commit", "pytest (>=7.3.1)", "pytest-cov", "pytest-instafail", "pytest-mock", "pytest-sugar"] - -[[package]] -name = "greenlet" -version = "3.0.3" -description = "Lightweight in-process concurrent programming" -optional = false -python-versions = ">=3.7" -files = [ - {file = "greenlet-3.0.3-cp310-cp310-macosx_11_0_universal2.whl", hash = "sha256:9da2bd29ed9e4f15955dd1595ad7bc9320308a3b766ef7f837e23ad4b4aac31a"}, - {file = "greenlet-3.0.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d353cadd6083fdb056bb46ed07e4340b0869c305c8ca54ef9da3421acbdf6881"}, - {file = "greenlet-3.0.3-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:dca1e2f3ca00b84a396bc1bce13dd21f680f035314d2379c4160c98153b2059b"}, - {file = "greenlet-3.0.3-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3ed7fb269f15dc662787f4119ec300ad0702fa1b19d2135a37c2c4de6fadfd4a"}, - {file = "greenlet-3.0.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dd4f49ae60e10adbc94b45c0b5e6a179acc1736cf7a90160b404076ee283cf83"}, - {file = "greenlet-3.0.3-cp310-cp310-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:73a411ef564e0e097dbe7e866bb2dda0f027e072b04da387282b02c308807405"}, - {file = "greenlet-3.0.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:7f362975f2d179f9e26928c5b517524e89dd48530a0202570d55ad6ca5d8a56f"}, - {file = "greenlet-3.0.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:649dde7de1a5eceb258f9cb00bdf50e978c9db1b996964cd80703614c86495eb"}, - {file = "greenlet-3.0.3-cp310-cp310-win_amd64.whl", hash = "sha256:68834da854554926fbedd38c76e60c4a2e3198c6fbed520b106a8986445caaf9"}, - {file = "greenlet-3.0.3-cp311-cp311-macosx_11_0_universal2.whl", hash = "sha256:b1b5667cced97081bf57b8fa1d6bfca67814b0afd38208d52538316e9422fc61"}, - {file = "greenlet-3.0.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:52f59dd9c96ad2fc0d5724107444f76eb20aaccb675bf825df6435acb7703559"}, - {file = "greenlet-3.0.3-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:afaff6cf5200befd5cec055b07d1c0a5a06c040fe5ad148abcd11ba6ab9b114e"}, - {file = "greenlet-3.0.3-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:fe754d231288e1e64323cfad462fcee8f0288654c10bdf4f603a39ed923bef33"}, - {file = "greenlet-3.0.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2797aa5aedac23af156bbb5a6aa2cd3427ada2972c828244eb7d1b9255846379"}, - {file = "greenlet-3.0.3-cp311-cp311-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:b7f009caad047246ed379e1c4dbcb8b020f0a390667ea74d2387be2998f58a22"}, - {file = "greenlet-3.0.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:c5e1536de2aad7bf62e27baf79225d0d64360d4168cf2e6becb91baf1ed074f3"}, - {file = "greenlet-3.0.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:894393ce10ceac937e56ec00bb71c4c2f8209ad516e96033e4b3b1de270e200d"}, - {file = "greenlet-3.0.3-cp311-cp311-win_amd64.whl", hash = "sha256:1ea188d4f49089fc6fb283845ab18a2518d279c7cd9da1065d7a84e991748728"}, - {file = "greenlet-3.0.3-cp312-cp312-macosx_11_0_universal2.whl", hash = "sha256:70fb482fdf2c707765ab5f0b6655e9cfcf3780d8d87355a063547b41177599be"}, - {file = "greenlet-3.0.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d4d1ac74f5c0c0524e4a24335350edad7e5f03b9532da7ea4d3c54d527784f2e"}, - {file = "greenlet-3.0.3-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:149e94a2dd82d19838fe4b2259f1b6b9957d5ba1b25640d2380bea9c5df37676"}, - {file = "greenlet-3.0.3-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:15d79dd26056573940fcb8c7413d84118086f2ec1a8acdfa854631084393efcc"}, - {file = "greenlet-3.0.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:881b7db1ebff4ba09aaaeae6aa491daeb226c8150fc20e836ad00041bcb11230"}, - {file = "greenlet-3.0.3-cp312-cp312-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:fcd2469d6a2cf298f198f0487e0a5b1a47a42ca0fa4dfd1b6862c999f018ebbf"}, - {file = "greenlet-3.0.3-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:1f672519db1796ca0d8753f9e78ec02355e862d0998193038c7073045899f305"}, - {file = "greenlet-3.0.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:2516a9957eed41dd8f1ec0c604f1cdc86758b587d964668b5b196a9db5bfcde6"}, - {file = "greenlet-3.0.3-cp312-cp312-win_amd64.whl", hash = "sha256:bba5387a6975598857d86de9eac14210a49d554a77eb8261cc68b7d082f78ce2"}, - {file = "greenlet-3.0.3-cp37-cp37m-macosx_11_0_universal2.whl", hash = "sha256:5b51e85cb5ceda94e79d019ed36b35386e8c37d22f07d6a751cb659b180d5274"}, - {file = "greenlet-3.0.3-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:daf3cb43b7cf2ba96d614252ce1684c1bccee6b2183a01328c98d36fcd7d5cb0"}, - {file = "greenlet-3.0.3-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:99bf650dc5d69546e076f413a87481ee1d2d09aaaaaca058c9251b6d8c14783f"}, - {file = "greenlet-3.0.3-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2dd6e660effd852586b6a8478a1d244b8dc90ab5b1321751d2ea15deb49ed414"}, - {file = "greenlet-3.0.3-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e3391d1e16e2a5a1507d83e4a8b100f4ee626e8eca43cf2cadb543de69827c4c"}, - {file = "greenlet-3.0.3-cp37-cp37m-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:e1f145462f1fa6e4a4ae3c0f782e580ce44d57c8f2c7aae1b6fa88c0b2efdb41"}, - {file = "greenlet-3.0.3-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:1a7191e42732df52cb5f39d3527217e7ab73cae2cb3694d241e18f53d84ea9a7"}, - {file = "greenlet-3.0.3-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:0448abc479fab28b00cb472d278828b3ccca164531daab4e970a0458786055d6"}, - {file = "greenlet-3.0.3-cp37-cp37m-win32.whl", hash = "sha256:b542be2440edc2d48547b5923c408cbe0fc94afb9f18741faa6ae970dbcb9b6d"}, - {file = "greenlet-3.0.3-cp37-cp37m-win_amd64.whl", hash = "sha256:01bc7ea167cf943b4c802068e178bbf70ae2e8c080467070d01bfa02f337ee67"}, - {file = "greenlet-3.0.3-cp38-cp38-macosx_11_0_universal2.whl", hash = "sha256:1996cb9306c8595335bb157d133daf5cf9f693ef413e7673cb07e3e5871379ca"}, - {file = "greenlet-3.0.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3ddc0f794e6ad661e321caa8d2f0a55ce01213c74722587256fb6566049a8b04"}, - {file = "greenlet-3.0.3-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c9db1c18f0eaad2f804728c67d6c610778456e3e1cc4ab4bbd5eeb8e6053c6fc"}, - {file = "greenlet-3.0.3-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7170375bcc99f1a2fbd9c306f5be8764eaf3ac6b5cb968862cad4c7057756506"}, - {file = "greenlet-3.0.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6b66c9c1e7ccabad3a7d037b2bcb740122a7b17a53734b7d72a344ce39882a1b"}, - {file = "greenlet-3.0.3-cp38-cp38-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:098d86f528c855ead3479afe84b49242e174ed262456c342d70fc7f972bc13c4"}, - {file = "greenlet-3.0.3-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:81bb9c6d52e8321f09c3d165b2a78c680506d9af285bfccbad9fb7ad5a5da3e5"}, - {file = "greenlet-3.0.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:fd096eb7ffef17c456cfa587523c5f92321ae02427ff955bebe9e3c63bc9f0da"}, - {file = "greenlet-3.0.3-cp38-cp38-win32.whl", hash = "sha256:d46677c85c5ba00a9cb6f7a00b2bfa6f812192d2c9f7d9c4f6a55b60216712f3"}, - {file = "greenlet-3.0.3-cp38-cp38-win_amd64.whl", hash = "sha256:419b386f84949bf0e7c73e6032e3457b82a787c1ab4a0e43732898a761cc9dbf"}, - {file = "greenlet-3.0.3-cp39-cp39-macosx_11_0_universal2.whl", hash = "sha256:da70d4d51c8b306bb7a031d5cff6cc25ad253affe89b70352af5f1cb68e74b53"}, - {file = "greenlet-3.0.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:086152f8fbc5955df88382e8a75984e2bb1c892ad2e3c80a2508954e52295257"}, - {file = "greenlet-3.0.3-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d73a9fe764d77f87f8ec26a0c85144d6a951a6c438dfe50487df5595c6373eac"}, - {file = "greenlet-3.0.3-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b7dcbe92cc99f08c8dd11f930de4d99ef756c3591a5377d1d9cd7dd5e896da71"}, - {file = "greenlet-3.0.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1551a8195c0d4a68fac7a4325efac0d541b48def35feb49d803674ac32582f61"}, - {file = "greenlet-3.0.3-cp39-cp39-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:64d7675ad83578e3fc149b617a444fab8efdafc9385471f868eb5ff83e446b8b"}, - {file = "greenlet-3.0.3-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:b37eef18ea55f2ffd8f00ff8fe7c8d3818abd3e25fb73fae2ca3b672e333a7a6"}, - {file = "greenlet-3.0.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:77457465d89b8263bca14759d7c1684df840b6811b2499838cc5b040a8b5b113"}, - {file = "greenlet-3.0.3-cp39-cp39-win32.whl", hash = "sha256:57e8974f23e47dac22b83436bdcf23080ade568ce77df33159e019d161ce1d1e"}, - {file = "greenlet-3.0.3-cp39-cp39-win_amd64.whl", hash = "sha256:c5ee858cfe08f34712f548c3c363e807e7186f03ad7a5039ebadb29e8c6be067"}, - {file = "greenlet-3.0.3.tar.gz", hash = "sha256:43374442353259554ce33599da8b692d5aa96f8976d567d4badf263371fbe491"}, -] - -[package.extras] -docs = ["Sphinx", "furo"] -test = ["objgraph", "psutil"] - -[[package]] -name = "h11" -version = "0.14.0" -description = "A pure-Python, bring-your-own-I/O implementation of HTTP/1.1" -optional = false -python-versions = ">=3.7" -files = [ - {file = "h11-0.14.0-py3-none-any.whl", hash = "sha256:e3fe4ac4b851c468cc8363d500db52c2ead036020723024a109d37346efaa761"}, - {file = "h11-0.14.0.tar.gz", hash = "sha256:8f19fbbe99e72420ff35c00b27a34cb9937e902a8b810e2c88300c6f0a3b699d"}, -] - -[[package]] -name = "httpcore" -version = "1.0.4" -description = "A minimal low-level HTTP client." -optional = false -python-versions = ">=3.8" -files = [ - {file = "httpcore-1.0.4-py3-none-any.whl", hash = "sha256:ac418c1db41bade2ad53ae2f3834a3a0f5ae76b56cf5aa497d2d033384fc7d73"}, - {file = "httpcore-1.0.4.tar.gz", hash = "sha256:cb2839ccfcba0d2d3c1131d3c3e26dfc327326fbe7a5dc0dbfe9f6c9151bb022"}, -] - -[package.dependencies] -certifi = "*" -h11 = ">=0.13,<0.15" - -[package.extras] -asyncio = ["anyio (>=4.0,<5.0)"] -http2 = ["h2 (>=3,<5)"] -socks = ["socksio (==1.*)"] -trio = ["trio (>=0.22.0,<0.25.0)"] - -[[package]] -name = "httpx" -version = "0.27.0" -description = "The next generation HTTP client." -optional = false -python-versions = ">=3.8" -files = [ - {file = "httpx-0.27.0-py3-none-any.whl", hash = "sha256:71d5465162c13681bff01ad59b2cc68dd838ea1f10e51574bac27103f00c91a5"}, - {file = "httpx-0.27.0.tar.gz", hash = "sha256:a0cb88a46f32dc874e04ee956e4c2764aba2aa228f650b06788ba6bda2962ab5"}, -] - -[package.dependencies] -anyio = "*" -certifi = "*" -httpcore = "==1.*" -idna = "*" -sniffio = "*" - -[package.extras] -brotli = ["brotli", "brotlicffi"] -cli = ["click (==8.*)", "pygments (==2.*)", "rich (>=10,<14)"] -http2 = ["h2 (>=3,<5)"] -socks = ["socksio (==1.*)"] - -[[package]] -name = "httpx-sse" -version = "0.4.0" -description = "Consume Server-Sent Event (SSE) messages with HTTPX." -optional = false -python-versions = ">=3.8" -files = [ - {file = "httpx-sse-0.4.0.tar.gz", hash = "sha256:1e81a3a3070ce322add1d3529ed42eb5f70817f45ed6ec915ab753f961139721"}, - {file = "httpx_sse-0.4.0-py3-none-any.whl", hash = "sha256:f329af6eae57eaa2bdfd962b42524764af68075ea87370a2de920af5341e318f"}, -] - -[[package]] -name = "idna" -version = "3.6" -description = "Internationalized Domain Names in Applications (IDNA)" -optional = false -python-versions = ">=3.5" -files = [ - {file = "idna-3.6-py3-none-any.whl", hash = "sha256:c05567e9c24a6b9faaa835c4821bad0590fbb9d5779e7caa6e1cc4978e7eb24f"}, - {file = "idna-3.6.tar.gz", hash = "sha256:9ecdbbd083b06798ae1e86adcbfe8ab1479cf864e4ee30fe4e46a003d12491ca"}, -] - -[[package]] -name = "iniconfig" -version = "2.0.0" -description = "brain-dead simple config-ini parsing" -optional = false -python-versions = ">=3.7" -files = [ - {file = "iniconfig-2.0.0-py3-none-any.whl", hash = "sha256:b6a85871a79d2e3b22d2d1b94ac2824226a63c6b741c88f7ae975f18b6778374"}, - {file = "iniconfig-2.0.0.tar.gz", hash = "sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3"}, -] - -[[package]] -name = "jsonpatch" -version = "1.33" -description = "Apply JSON-Patches (RFC 6902)" -optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*, !=3.6.*" -files = [ - {file = "jsonpatch-1.33-py2.py3-none-any.whl", hash = "sha256:0ae28c0cd062bbd8b8ecc26d7d164fbbea9652a1a3693f3b956c1eae5145dade"}, - {file = "jsonpatch-1.33.tar.gz", hash = "sha256:9fcd4009c41e6d12348b4a0ff2563ba56a2923a7dfee731d004e212e1ee5030c"}, -] - -[package.dependencies] -jsonpointer = ">=1.9" - -[[package]] -name = "jsonpointer" -version = "2.4" -description = "Identify specific nodes in a JSON document (RFC 6901)" -optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*, !=3.6.*" -files = [ - {file = "jsonpointer-2.4-py2.py3-none-any.whl", hash = "sha256:15d51bba20eea3165644553647711d150376234112651b4f1811022aecad7d7a"}, - {file = "jsonpointer-2.4.tar.gz", hash = "sha256:585cee82b70211fa9e6043b7bb89db6e1aa49524340dde8ad6b63206ea689d88"}, -] - -[[package]] -name = "langchain" -version = "0.1.12" -description = "Building applications with LLMs through composability" -optional = false -python-versions = ">=3.8.1,<4.0" -files = [] -develop = true - -[package.dependencies] -aiohttp = "^3.8.3" -async-timeout = {version = "^4.0.0", markers = "python_version < \"3.11\""} -dataclasses-json = ">= 0.5.7, < 0.7" -jsonpatch = "^1.33" -langchain-community = ">=0.0.28,<0.1" -langchain-core = "^0.1.31" -langchain-text-splitters = ">=0.0.1,<0.1" -langsmith = "^0.1.17" -numpy = "^1" -pydantic = ">=1,<3" -PyYAML = ">=5.3" -requests = "^2" -SQLAlchemy = ">=1.4,<3" -tenacity = "^8.1.0" - -[package.extras] -all = [] -azure = ["azure-ai-formrecognizer (>=3.2.1,<4.0.0)", "azure-ai-textanalytics (>=5.3.0,<6.0.0)", "azure-cognitiveservices-speech (>=1.28.0,<2.0.0)", "azure-core (>=1.26.4,<2.0.0)", "azure-cosmos (>=4.4.0b1,<5.0.0)", "azure-identity (>=1.12.0,<2.0.0)", "azure-search-documents (==11.4.0b8)", "openai (<2)"] -clarifai = ["clarifai (>=9.1.0)"] -cli = ["typer (>=0.9.0,<0.10.0)"] -cohere = ["cohere (>=4,<5)"] -docarray = ["docarray[hnswlib] (>=0.32.0,<0.33.0)"] -embeddings = ["sentence-transformers (>=2,<3)"] -extended-testing = ["aiosqlite (>=0.19.0,<0.20.0)", "aleph-alpha-client (>=2.15.0,<3.0.0)", "anthropic (>=0.3.11,<0.4.0)", "arxiv (>=1.4,<2.0)", "assemblyai (>=0.17.0,<0.18.0)", "atlassian-python-api (>=3.36.0,<4.0.0)", "beautifulsoup4 (>=4,<5)", "bibtexparser (>=1.4.0,<2.0.0)", "cassio (>=0.1.0,<0.2.0)", "chardet (>=5.1.0,<6.0.0)", "cohere (>=4,<5)", "couchbase (>=4.1.9,<5.0.0)", "dashvector (>=1.0.1,<2.0.0)", "databricks-vectorsearch (>=0.21,<0.22)", "datasets (>=2.15.0,<3.0.0)", "dgml-utils (>=0.3.0,<0.4.0)", "esprima (>=4.0.1,<5.0.0)", "faiss-cpu (>=1,<2)", "feedparser (>=6.0.10,<7.0.0)", "fireworks-ai (>=0.9.0,<0.10.0)", "geopandas (>=0.13.1,<0.14.0)", "gitpython (>=3.1.32,<4.0.0)", "google-cloud-documentai (>=2.20.1,<3.0.0)", "gql (>=3.4.1,<4.0.0)", "hologres-vector (>=0.0.6,<0.0.7)", "html2text (>=2020.1.16,<2021.0.0)", "javelin-sdk (>=0.1.8,<0.2.0)", "jinja2 (>=3,<4)", "jq (>=1.4.1,<2.0.0)", "jsonschema (>1)", "langchain-openai (>=0.0.2,<0.1)", "lxml (>=4.9.2,<5.0.0)", "markdownify (>=0.11.6,<0.12.0)", "motor (>=3.3.1,<4.0.0)", "msal (>=1.25.0,<2.0.0)", "mwparserfromhell (>=0.6.4,<0.7.0)", "mwxml (>=0.3.3,<0.4.0)", "newspaper3k (>=0.2.8,<0.3.0)", "numexpr (>=2.8.6,<3.0.0)", "openai (<2)", "openai (<2)", "openapi-pydantic (>=0.3.2,<0.4.0)", "pandas (>=2.0.1,<3.0.0)", "pdfminer-six (>=20221105,<20221106)", "pgvector (>=0.1.6,<0.2.0)", "praw (>=7.7.1,<8.0.0)", "psychicapi (>=0.8.0,<0.9.0)", "py-trello (>=0.19.0,<0.20.0)", "pymupdf (>=1.22.3,<2.0.0)", "pypdf (>=3.4.0,<4.0.0)", "pypdfium2 (>=4.10.0,<5.0.0)", "pyspark (>=3.4.0,<4.0.0)", "rank-bm25 (>=0.2.2,<0.3.0)", "rapidfuzz (>=3.1.1,<4.0.0)", "rapidocr-onnxruntime (>=1.3.2,<2.0.0)", "rdflib (==7.0.0)", "requests-toolbelt (>=1.0.0,<2.0.0)", "rspace_client (>=2.5.0,<3.0.0)", "scikit-learn (>=1.2.2,<2.0.0)", "sqlite-vss (>=0.1.2,<0.2.0)", "streamlit (>=1.18.0,<2.0.0)", "sympy (>=1.12,<2.0)", "telethon (>=1.28.5,<2.0.0)", "timescale-vector (>=0.0.1,<0.0.2)", "tqdm (>=4.48.0)", "upstash-redis (>=0.15.0,<0.16.0)", "xata (>=1.0.0a7,<2.0.0)", "xmltodict (>=0.13.0,<0.14.0)"] -javascript = ["esprima (>=4.0.1,<5.0.0)"] -llms = ["clarifai (>=9.1.0)", "cohere (>=4,<5)", "huggingface_hub (>=0,<1)", "manifest-ml (>=0.0.1,<0.0.2)", "nlpcloud (>=1,<2)", "openai (<2)", "openlm (>=0.0.5,<0.0.6)", "torch (>=1,<3)", "transformers (>=4,<5)"] -openai = ["openai (<2)", "tiktoken (>=0.3.2,<0.6.0)"] -qdrant = ["qdrant-client (>=1.3.1,<2.0.0)"] -text-helpers = ["chardet (>=5.1.0,<6.0.0)"] - -[package.source] -type = "directory" -url = "../libs/langchain" - -[[package]] -name = "langchain-cli" -version = "0.0.21" -description = "CLI for interacting with LangChain" -optional = false -python-versions = ">=3.8.1,<4.0" -files = [ - {file = "langchain_cli-0.0.21-py3-none-any.whl", hash = "sha256:cd5c83597ef857704db983aa1743d7c2e6da52d634f735a7610630347347583e"}, - {file = "langchain_cli-0.0.21.tar.gz", hash = "sha256:d36a40955533ce0217b9a89c11bf593b18d8b40f2abbc81c9a531eb23f54809f"}, -] - -[package.dependencies] -gitpython = ">=3.1.40,<4.0.0" -langserve = {version = ">=0.0.16", extras = ["all"]} -tomlkit = ">=0.12.2,<0.13.0" -typer = {version = ">=0.9.0,<0.10.0", extras = ["all"]} -uvicorn = ">=0.23.2,<0.24.0" - -[[package]] -name = "langchain-community" -version = "0.0.28" -description = "Community contributed LangChain integrations." -optional = false -python-versions = ">=3.8.1,<4.0" -files = [ - {file = "langchain_community-0.0.28-py3-none-any.whl", hash = "sha256:bdb015ac455ae68432ea104628717583dce041e1abdfcefe86e39f034f5e90b8"}, - {file = "langchain_community-0.0.28.tar.gz", hash = "sha256:8664d243a90550fc5ddc137b712034e02c8d43afc8d4cc832ba5842b44c864ce"}, -] - -[package.dependencies] -aiohttp = ">=3.8.3,<4.0.0" -dataclasses-json = ">=0.5.7,<0.7" -langchain-core = ">=0.1.31,<0.2.0" -langsmith = ">=0.1.0,<0.2.0" -numpy = ">=1,<2" -PyYAML = ">=5.3" -requests = ">=2,<3" -SQLAlchemy = ">=1.4,<3" -tenacity = ">=8.1.0,<9.0.0" - -[package.extras] -cli = ["typer (>=0.9.0,<0.10.0)"] -extended-testing = ["aiosqlite (>=0.19.0,<0.20.0)", "aleph-alpha-client (>=2.15.0,<3.0.0)", "anthropic (>=0.3.11,<0.4.0)", "arxiv (>=1.4,<2.0)", "assemblyai (>=0.17.0,<0.18.0)", "atlassian-python-api (>=3.36.0,<4.0.0)", "azure-ai-documentintelligence (>=1.0.0b1,<2.0.0)", "beautifulsoup4 (>=4,<5)", "bibtexparser (>=1.4.0,<2.0.0)", "cassio (>=0.1.0,<0.2.0)", "chardet (>=5.1.0,<6.0.0)", "cloudpickle (>=2.0.0)", "cohere (>=4,<5)", "databricks-vectorsearch (>=0.21,<0.22)", "datasets (>=2.15.0,<3.0.0)", "dgml-utils (>=0.3.0,<0.4.0)", "elasticsearch (>=8.12.0,<9.0.0)", "esprima (>=4.0.1,<5.0.0)", "faiss-cpu (>=1,<2)", "feedparser (>=6.0.10,<7.0.0)", "fireworks-ai (>=0.9.0,<0.10.0)", "friendli-client (>=1.2.4,<2.0.0)", "geopandas (>=0.13.1,<0.14.0)", "gitpython (>=3.1.32,<4.0.0)", "google-cloud-documentai (>=2.20.1,<3.0.0)", "gql (>=3.4.1,<4.0.0)", "gradientai (>=1.4.0,<2.0.0)", "hdbcli (>=2.19.21,<3.0.0)", "hologres-vector (>=0.0.6,<0.0.7)", "html2text (>=2020.1.16,<2021.0.0)", "httpx (>=0.24.1,<0.25.0)", "javelin-sdk (>=0.1.8,<0.2.0)", "jinja2 (>=3,<4)", "jq (>=1.4.1,<2.0.0)", "jsonschema (>1)", "lxml (>=4.9.2,<5.0.0)", "markdownify (>=0.11.6,<0.12.0)", "motor (>=3.3.1,<4.0.0)", "msal (>=1.25.0,<2.0.0)", "mwparserfromhell (>=0.6.4,<0.7.0)", "mwxml (>=0.3.3,<0.4.0)", "newspaper3k (>=0.2.8,<0.3.0)", "numexpr (>=2.8.6,<3.0.0)", "nvidia-riva-client (>=2.14.0,<3.0.0)", "oci (>=2.119.1,<3.0.0)", "openai (<2)", "openapi-pydantic (>=0.3.2,<0.4.0)", "oracle-ads (>=2.9.1,<3.0.0)", "pandas (>=2.0.1,<3.0.0)", "pdfminer-six (>=20221105,<20221106)", "pgvector (>=0.1.6,<0.2.0)", "praw (>=7.7.1,<8.0.0)", "psychicapi (>=0.8.0,<0.9.0)", "py-trello (>=0.19.0,<0.20.0)", "pymupdf (>=1.22.3,<2.0.0)", "pypdf (>=3.4.0,<4.0.0)", "pypdfium2 (>=4.10.0,<5.0.0)", "pyspark (>=3.4.0,<4.0.0)", "rank-bm25 (>=0.2.2,<0.3.0)", "rapidfuzz (>=3.1.1,<4.0.0)", "rapidocr-onnxruntime (>=1.3.2,<2.0.0)", "rdflib (==7.0.0)", "requests-toolbelt (>=1.0.0,<2.0.0)", "rspace_client (>=2.5.0,<3.0.0)", "scikit-learn (>=1.2.2,<2.0.0)", "sqlite-vss (>=0.1.2,<0.2.0)", "streamlit (>=1.18.0,<2.0.0)", "sympy (>=1.12,<2.0)", "telethon (>=1.28.5,<2.0.0)", "tidb-vector (>=0.0.3,<1.0.0)", "timescale-vector (>=0.0.1,<0.0.2)", "tqdm (>=4.48.0)", "tree-sitter (>=0.20.2,<0.21.0)", "tree-sitter-languages (>=1.8.0,<2.0.0)", "upstash-redis (>=0.15.0,<0.16.0)", "xata (>=1.0.0a7,<2.0.0)", "xmltodict (>=0.13.0,<0.14.0)", "zhipuai (>=1.0.7,<2.0.0)"] - -[[package]] -name = "langchain-core" -version = "0.1.31" -description = "Building applications with LLMs through composability" -optional = false -python-versions = ">=3.8.1,<4.0" -files = [] -develop = true - -[package.dependencies] -anyio = ">=3,<5" -jsonpatch = "^1.33" -langsmith = "^0.1.0" -packaging = "^23.2" -pydantic = ">=1,<3" -PyYAML = ">=5.3" -requests = "^2" -tenacity = "^8.1.0" - -[package.extras] -extended-testing = ["jinja2 (>=3,<4)"] - -[package.source] -type = "directory" -url = "../libs/core" - -[[package]] -name = "langchain-text-splitters" -version = "0.0.1" -description = "LangChain text splitting utilities" -optional = false -python-versions = ">=3.8.1,<4.0" -files = [ - {file = "langchain_text_splitters-0.0.1-py3-none-any.whl", hash = "sha256:f5b802f873f5ff6a8b9259ff34d53ed989666ef4e1582e6d1adb3b5520e3839a"}, - {file = "langchain_text_splitters-0.0.1.tar.gz", hash = "sha256:ac459fa98799f5117ad5425a9330b21961321e30bc19a2a2f9f761ddadd62aa1"}, -] - -[package.dependencies] -langchain-core = ">=0.1.28,<0.2.0" - -[package.extras] -extended-testing = ["lxml (>=5.1.0,<6.0.0)"] - -[[package]] -name = "langserve" -version = "0.0.51" -description = "" -optional = false -python-versions = ">=3.8.1,<4.0.0" -files = [ - {file = "langserve-0.0.51-py3-none-any.whl", hash = "sha256:e735eef2b6fde7e1514f4be8234b9f0727283e639822ca9c25e8ccc2d24e8492"}, - {file = "langserve-0.0.51.tar.gz", hash = "sha256:036c0104c512bcc2c2406ae089ef9e7e718c32c39ebf6dcb2212f168c7d09816"}, -] - -[package.dependencies] -fastapi = {version = ">=0.90.1,<1", optional = true, markers = "extra == \"server\" or extra == \"all\""} -httpx = ">=0.23.0" -httpx-sse = {version = ">=0.3.1", optional = true, markers = "extra == \"client\" or extra == \"all\""} -langchain = ">=0.0.333" -orjson = ">=2" -pydantic = ">=1" -sse-starlette = {version = ">=1.3.0,<2.0.0", optional = true, markers = "extra == \"server\" or extra == \"all\""} - -[package.extras] -all = ["fastapi (>=0.90.1,<1)", "httpx-sse (>=0.3.1)", "sse-starlette (>=1.3.0,<2.0.0)"] -client = ["httpx-sse (>=0.3.1)"] -server = ["fastapi (>=0.90.1,<1)", "sse-starlette (>=1.3.0,<2.0.0)"] - -[[package]] -name = "langsmith" -version = "0.1.24" -description = "Client library to connect to the LangSmith LLM Tracing and Evaluation Platform." -optional = false -python-versions = ">=3.8.1,<4.0" -files = [ - {file = "langsmith-0.1.24-py3-none-any.whl", hash = "sha256:898ef5265bca8fc912f7fbf207e1d69cacd86055faecf6811bd42641e6319840"}, - {file = "langsmith-0.1.24.tar.gz", hash = "sha256:432b829e763f5077df411bc59bb35449813f18174d2ebc8bbbb38427071d5e7d"}, -] - -[package.dependencies] -orjson = ">=3.9.14,<4.0.0" -pydantic = ">=1,<3" -requests = ">=2,<3" - -[[package]] -name = "markdown-it-py" -version = "3.0.0" -description = "Python port of markdown-it. Markdown parsing, done right!" -optional = false -python-versions = ">=3.8" -files = [ - {file = "markdown-it-py-3.0.0.tar.gz", hash = "sha256:e3f60a94fa066dc52ec76661e37c851cb232d92f9886b15cb560aaada2df8feb"}, - {file = "markdown_it_py-3.0.0-py3-none-any.whl", hash = "sha256:355216845c60bd96232cd8d8c40e8f9765cc86f46880e43a8fd22dc1a1a8cab1"}, -] - -[package.dependencies] -mdurl = ">=0.1,<1.0" - -[package.extras] -benchmarking = ["psutil", "pytest", "pytest-benchmark"] -code-style = ["pre-commit (>=3.0,<4.0)"] -compare = ["commonmark (>=0.9,<1.0)", "markdown (>=3.4,<4.0)", "mistletoe (>=1.0,<2.0)", "mistune (>=2.0,<3.0)", "panflute (>=2.3,<3.0)"] -linkify = ["linkify-it-py (>=1,<3)"] -plugins = ["mdit-py-plugins"] -profiling = ["gprof2dot"] -rtd = ["jupyter_sphinx", "mdit-py-plugins", "myst-parser", "pyyaml", "sphinx", "sphinx-copybutton", "sphinx-design", "sphinx_book_theme"] -testing = ["coverage", "pytest", "pytest-cov", "pytest-regressions"] - -[[package]] -name = "marshmallow" -version = "3.21.1" -description = "A lightweight library for converting complex datatypes to and from native Python datatypes." -optional = false -python-versions = ">=3.8" -files = [ - {file = "marshmallow-3.21.1-py3-none-any.whl", hash = "sha256:f085493f79efb0644f270a9bf2892843142d80d7174bbbd2f3713f2a589dc633"}, - {file = "marshmallow-3.21.1.tar.gz", hash = "sha256:4e65e9e0d80fc9e609574b9983cf32579f305c718afb30d7233ab818571768c3"}, -] - -[package.dependencies] -packaging = ">=17.0" - -[package.extras] -dev = ["marshmallow[tests]", "pre-commit (>=3.5,<4.0)", "tox"] -docs = ["alabaster (==0.7.16)", "autodocsumm (==0.2.12)", "sphinx (==7.2.6)", "sphinx-issues (==4.0.0)", "sphinx-version-warning (==1.1.2)"] -tests = ["pytest", "pytz", "simplejson"] - -[[package]] -name = "mdurl" -version = "0.1.2" -description = "Markdown URL utilities" -optional = false -python-versions = ">=3.7" -files = [ - {file = "mdurl-0.1.2-py3-none-any.whl", hash = "sha256:84008a41e51615a49fc9966191ff91509e3c40b939176e643fd50a5c2196b8f8"}, - {file = "mdurl-0.1.2.tar.gz", hash = "sha256:bb413d29f5eea38f31dd4754dd7377d4465116fb207585f97bf925588687c1ba"}, -] - -[[package]] -name = "multidict" -version = "6.0.5" -description = "multidict implementation" -optional = false -python-versions = ">=3.7" -files = [ - {file = "multidict-6.0.5-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:228b644ae063c10e7f324ab1ab6b548bdf6f8b47f3ec234fef1093bc2735e5f9"}, - {file = "multidict-6.0.5-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:896ebdcf62683551312c30e20614305f53125750803b614e9e6ce74a96232604"}, - {file = "multidict-6.0.5-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:411bf8515f3be9813d06004cac41ccf7d1cd46dfe233705933dd163b60e37600"}, - {file = "multidict-6.0.5-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1d147090048129ce3c453f0292e7697d333db95e52616b3793922945804a433c"}, - {file = "multidict-6.0.5-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:215ed703caf15f578dca76ee6f6b21b7603791ae090fbf1ef9d865571039ade5"}, - {file = "multidict-6.0.5-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7c6390cf87ff6234643428991b7359b5f59cc15155695deb4eda5c777d2b880f"}, - {file = "multidict-6.0.5-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:21fd81c4ebdb4f214161be351eb5bcf385426bf023041da2fd9e60681f3cebae"}, - {file = "multidict-6.0.5-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3cc2ad10255f903656017363cd59436f2111443a76f996584d1077e43ee51182"}, - {file = "multidict-6.0.5-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:6939c95381e003f54cd4c5516740faba40cf5ad3eeff460c3ad1d3e0ea2549bf"}, - {file = "multidict-6.0.5-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:220dd781e3f7af2c2c1053da9fa96d9cf3072ca58f057f4c5adaaa1cab8fc442"}, - {file = "multidict-6.0.5-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:766c8f7511df26d9f11cd3a8be623e59cca73d44643abab3f8c8c07620524e4a"}, - {file = "multidict-6.0.5-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:fe5d7785250541f7f5019ab9cba2c71169dc7d74d0f45253f8313f436458a4ef"}, - {file = "multidict-6.0.5-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:c1c1496e73051918fcd4f58ff2e0f2f3066d1c76a0c6aeffd9b45d53243702cc"}, - {file = "multidict-6.0.5-cp310-cp310-win32.whl", hash = "sha256:7afcdd1fc07befad18ec4523a782cde4e93e0a2bf71239894b8d61ee578c1319"}, - {file = "multidict-6.0.5-cp310-cp310-win_amd64.whl", hash = "sha256:99f60d34c048c5c2fabc766108c103612344c46e35d4ed9ae0673d33c8fb26e8"}, - {file = "multidict-6.0.5-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:f285e862d2f153a70586579c15c44656f888806ed0e5b56b64489afe4a2dbfba"}, - {file = "multidict-6.0.5-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:53689bb4e102200a4fafa9de9c7c3c212ab40a7ab2c8e474491914d2305f187e"}, - {file = "multidict-6.0.5-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:612d1156111ae11d14afaf3a0669ebf6c170dbb735e510a7438ffe2369a847fd"}, - {file = "multidict-6.0.5-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7be7047bd08accdb7487737631d25735c9a04327911de89ff1b26b81745bd4e3"}, - {file = "multidict-6.0.5-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:de170c7b4fe6859beb8926e84f7d7d6c693dfe8e27372ce3b76f01c46e489fcf"}, - {file = "multidict-6.0.5-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:04bde7a7b3de05732a4eb39c94574db1ec99abb56162d6c520ad26f83267de29"}, - {file = "multidict-6.0.5-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:85f67aed7bb647f93e7520633d8f51d3cbc6ab96957c71272b286b2f30dc70ed"}, - {file = "multidict-6.0.5-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:425bf820055005bfc8aa9a0b99ccb52cc2f4070153e34b701acc98d201693733"}, - {file = "multidict-6.0.5-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:d3eb1ceec286eba8220c26f3b0096cf189aea7057b6e7b7a2e60ed36b373b77f"}, - {file = "multidict-6.0.5-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:7901c05ead4b3fb75113fb1dd33eb1253c6d3ee37ce93305acd9d38e0b5f21a4"}, - {file = "multidict-6.0.5-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:e0e79d91e71b9867c73323a3444724d496c037e578a0e1755ae159ba14f4f3d1"}, - {file = "multidict-6.0.5-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:29bfeb0dff5cb5fdab2023a7a9947b3b4af63e9c47cae2a10ad58394b517fddc"}, - {file = "multidict-6.0.5-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:e030047e85cbcedbfc073f71836d62dd5dadfbe7531cae27789ff66bc551bd5e"}, - {file = "multidict-6.0.5-cp311-cp311-win32.whl", hash = "sha256:2f4848aa3baa109e6ab81fe2006c77ed4d3cd1e0ac2c1fbddb7b1277c168788c"}, - {file = "multidict-6.0.5-cp311-cp311-win_amd64.whl", hash = "sha256:2faa5ae9376faba05f630d7e5e6be05be22913782b927b19d12b8145968a85ea"}, - {file = "multidict-6.0.5-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:51d035609b86722963404f711db441cf7134f1889107fb171a970c9701f92e1e"}, - {file = "multidict-6.0.5-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:cbebcd5bcaf1eaf302617c114aa67569dd3f090dd0ce8ba9e35e9985b41ac35b"}, - {file = "multidict-6.0.5-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:2ffc42c922dbfddb4a4c3b438eb056828719f07608af27d163191cb3e3aa6cc5"}, - {file = "multidict-6.0.5-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ceb3b7e6a0135e092de86110c5a74e46bda4bd4fbfeeb3a3bcec79c0f861e450"}, - {file = "multidict-6.0.5-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:79660376075cfd4b2c80f295528aa6beb2058fd289f4c9252f986751a4cd0496"}, - {file = "multidict-6.0.5-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e4428b29611e989719874670fd152b6625500ad6c686d464e99f5aaeeaca175a"}, - {file = "multidict-6.0.5-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d84a5c3a5f7ce6db1f999fb9438f686bc2e09d38143f2d93d8406ed2dd6b9226"}, - {file = "multidict-6.0.5-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:76c0de87358b192de7ea9649beb392f107dcad9ad27276324c24c91774ca5271"}, - {file = "multidict-6.0.5-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:79a6d2ba910adb2cbafc95dad936f8b9386e77c84c35bc0add315b856d7c3abb"}, - {file = "multidict-6.0.5-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:92d16a3e275e38293623ebf639c471d3e03bb20b8ebb845237e0d3664914caef"}, - {file = "multidict-6.0.5-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:fb616be3538599e797a2017cccca78e354c767165e8858ab5116813146041a24"}, - {file = "multidict-6.0.5-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:14c2976aa9038c2629efa2c148022ed5eb4cb939e15ec7aace7ca932f48f9ba6"}, - {file = "multidict-6.0.5-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:435a0984199d81ca178b9ae2c26ec3d49692d20ee29bc4c11a2a8d4514c67eda"}, - {file = "multidict-6.0.5-cp312-cp312-win32.whl", hash = "sha256:9fe7b0653ba3d9d65cbe7698cca585bf0f8c83dbbcc710db9c90f478e175f2d5"}, - {file = "multidict-6.0.5-cp312-cp312-win_amd64.whl", hash = "sha256:01265f5e40f5a17f8241d52656ed27192be03bfa8764d88e8220141d1e4b3556"}, - {file = "multidict-6.0.5-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:19fe01cea168585ba0f678cad6f58133db2aa14eccaf22f88e4a6dccadfad8b3"}, - {file = "multidict-6.0.5-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6bf7a982604375a8d49b6cc1b781c1747f243d91b81035a9b43a2126c04766f5"}, - {file = "multidict-6.0.5-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:107c0cdefe028703fb5dafe640a409cb146d44a6ae201e55b35a4af8e95457dd"}, - {file = "multidict-6.0.5-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:403c0911cd5d5791605808b942c88a8155c2592e05332d2bf78f18697a5fa15e"}, - {file = "multidict-6.0.5-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:aeaf541ddbad8311a87dd695ed9642401131ea39ad7bc8cf3ef3967fd093b626"}, - {file = "multidict-6.0.5-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e4972624066095e52b569e02b5ca97dbd7a7ddd4294bf4e7247d52635630dd83"}, - {file = "multidict-6.0.5-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:d946b0a9eb8aaa590df1fe082cee553ceab173e6cb5b03239716338629c50c7a"}, - {file = "multidict-6.0.5-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:b55358304d7a73d7bdf5de62494aaf70bd33015831ffd98bc498b433dfe5b10c"}, - {file = "multidict-6.0.5-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:a3145cb08d8625b2d3fee1b2d596a8766352979c9bffe5d7833e0503d0f0b5e5"}, - {file = "multidict-6.0.5-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:d65f25da8e248202bd47445cec78e0025c0fe7582b23ec69c3b27a640dd7a8e3"}, - {file = "multidict-6.0.5-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:c9bf56195c6bbd293340ea82eafd0071cb3d450c703d2c93afb89f93b8386ccc"}, - {file = "multidict-6.0.5-cp37-cp37m-win32.whl", hash = "sha256:69db76c09796b313331bb7048229e3bee7928eb62bab5e071e9f7fcc4879caee"}, - {file = "multidict-6.0.5-cp37-cp37m-win_amd64.whl", hash = "sha256:fce28b3c8a81b6b36dfac9feb1de115bab619b3c13905b419ec71d03a3fc1423"}, - {file = "multidict-6.0.5-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:76f067f5121dcecf0d63a67f29080b26c43c71a98b10c701b0677e4a065fbd54"}, - {file = "multidict-6.0.5-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:b82cc8ace10ab5bd93235dfaab2021c70637005e1ac787031f4d1da63d493c1d"}, - {file = "multidict-6.0.5-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:5cb241881eefd96b46f89b1a056187ea8e9ba14ab88ba632e68d7a2ecb7aadf7"}, - {file = "multidict-6.0.5-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e8e94e6912639a02ce173341ff62cc1201232ab86b8a8fcc05572741a5dc7d93"}, - {file = "multidict-6.0.5-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:09a892e4a9fb47331da06948690ae38eaa2426de97b4ccbfafbdcbe5c8f37ff8"}, - {file = "multidict-6.0.5-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:55205d03e8a598cfc688c71ca8ea5f66447164efff8869517f175ea632c7cb7b"}, - {file = "multidict-6.0.5-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:37b15024f864916b4951adb95d3a80c9431299080341ab9544ed148091b53f50"}, - {file = "multidict-6.0.5-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f2a1dee728b52b33eebff5072817176c172050d44d67befd681609b4746e1c2e"}, - {file = "multidict-6.0.5-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:edd08e6f2f1a390bf137080507e44ccc086353c8e98c657e666c017718561b89"}, - {file = "multidict-6.0.5-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:60d698e8179a42ec85172d12f50b1668254628425a6bd611aba022257cac1386"}, - {file = "multidict-6.0.5-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:3d25f19500588cbc47dc19081d78131c32637c25804df8414463ec908631e453"}, - {file = "multidict-6.0.5-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:4cc0ef8b962ac7a5e62b9e826bd0cd5040e7d401bc45a6835910ed699037a461"}, - {file = "multidict-6.0.5-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:eca2e9d0cc5a889850e9bbd68e98314ada174ff6ccd1129500103df7a94a7a44"}, - {file = "multidict-6.0.5-cp38-cp38-win32.whl", hash = "sha256:4a6a4f196f08c58c59e0b8ef8ec441d12aee4125a7d4f4fef000ccb22f8d7241"}, - {file = "multidict-6.0.5-cp38-cp38-win_amd64.whl", hash = "sha256:0275e35209c27a3f7951e1ce7aaf93ce0d163b28948444bec61dd7badc6d3f8c"}, - {file = "multidict-6.0.5-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:e7be68734bd8c9a513f2b0cfd508802d6609da068f40dc57d4e3494cefc92929"}, - {file = "multidict-6.0.5-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:1d9ea7a7e779d7a3561aade7d596649fbecfa5c08a7674b11b423783217933f9"}, - {file = "multidict-6.0.5-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:ea1456df2a27c73ce51120fa2f519f1bea2f4a03a917f4a43c8707cf4cbbae1a"}, - {file = "multidict-6.0.5-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cf590b134eb70629e350691ecca88eac3e3b8b3c86992042fb82e3cb1830d5e1"}, - {file = "multidict-6.0.5-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5c0631926c4f58e9a5ccce555ad7747d9a9f8b10619621f22f9635f069f6233e"}, - {file = "multidict-6.0.5-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:dce1c6912ab9ff5f179eaf6efe7365c1f425ed690b03341911bf4939ef2f3046"}, - {file = "multidict-6.0.5-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c0868d64af83169e4d4152ec612637a543f7a336e4a307b119e98042e852ad9c"}, - {file = "multidict-6.0.5-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:141b43360bfd3bdd75f15ed811850763555a251e38b2405967f8e25fb43f7d40"}, - {file = "multidict-6.0.5-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:7df704ca8cf4a073334e0427ae2345323613e4df18cc224f647f251e5e75a527"}, - {file = "multidict-6.0.5-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:6214c5a5571802c33f80e6c84713b2c79e024995b9c5897f794b43e714daeec9"}, - {file = "multidict-6.0.5-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:cd6c8fca38178e12c00418de737aef1261576bd1b6e8c6134d3e729a4e858b38"}, - {file = "multidict-6.0.5-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:e02021f87a5b6932fa6ce916ca004c4d441509d33bbdbeca70d05dff5e9d2479"}, - {file = "multidict-6.0.5-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:ebd8d160f91a764652d3e51ce0d2956b38efe37c9231cd82cfc0bed2e40b581c"}, - {file = "multidict-6.0.5-cp39-cp39-win32.whl", hash = "sha256:04da1bb8c8dbadf2a18a452639771951c662c5ad03aefe4884775454be322c9b"}, - {file = "multidict-6.0.5-cp39-cp39-win_amd64.whl", hash = "sha256:d6f6d4f185481c9669b9447bf9d9cf3b95a0e9df9d169bbc17e363b7d5487755"}, - {file = "multidict-6.0.5-py3-none-any.whl", hash = "sha256:0d63c74e3d7ab26de115c49bffc92cc77ed23395303d496eae515d4204a625e7"}, - {file = "multidict-6.0.5.tar.gz", hash = "sha256:f7e301075edaf50500f0b341543c41194d8df3ae5caf4702f2095f3ca73dd8da"}, -] - -[[package]] -name = "mypy-extensions" -version = "1.0.0" -description = "Type system extensions for programs checked with the mypy type checker." -optional = false -python-versions = ">=3.5" -files = [ - {file = "mypy_extensions-1.0.0-py3-none-any.whl", hash = "sha256:4392f6c0eb8a5668a69e23d168ffa70f0be9ccfd32b5cc2d26a34ae5b844552d"}, - {file = "mypy_extensions-1.0.0.tar.gz", hash = "sha256:75dbf8955dc00442a438fc4d0666508a9a97b6bd41aa2f0ffe9d2f2725af0782"}, -] - -[[package]] -name = "numpy" -version = "1.26.4" -description = "Fundamental package for array computing in Python" -optional = false -python-versions = ">=3.9" -files = [ - {file = "numpy-1.26.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:9ff0f4f29c51e2803569d7a51c2304de5554655a60c5d776e35b4a41413830d0"}, - {file = "numpy-1.26.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:2e4ee3380d6de9c9ec04745830fd9e2eccb3e6cf790d39d7b98ffd19b0dd754a"}, - {file = "numpy-1.26.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d209d8969599b27ad20994c8e41936ee0964e6da07478d6c35016bc386b66ad4"}, - {file = "numpy-1.26.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ffa75af20b44f8dba823498024771d5ac50620e6915abac414251bd971b4529f"}, - {file = "numpy-1.26.4-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:62b8e4b1e28009ef2846b4c7852046736bab361f7aeadeb6a5b89ebec3c7055a"}, - {file = "numpy-1.26.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:a4abb4f9001ad2858e7ac189089c42178fcce737e4169dc61321660f1a96c7d2"}, - {file = "numpy-1.26.4-cp310-cp310-win32.whl", hash = "sha256:bfe25acf8b437eb2a8b2d49d443800a5f18508cd811fea3181723922a8a82b07"}, - {file = "numpy-1.26.4-cp310-cp310-win_amd64.whl", hash = "sha256:b97fe8060236edf3662adfc2c633f56a08ae30560c56310562cb4f95500022d5"}, - {file = "numpy-1.26.4-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:4c66707fabe114439db9068ee468c26bbdf909cac0fb58686a42a24de1760c71"}, - {file = "numpy-1.26.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:edd8b5fe47dab091176d21bb6de568acdd906d1887a4584a15a9a96a1dca06ef"}, - {file = "numpy-1.26.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7ab55401287bfec946ced39700c053796e7cc0e3acbef09993a9ad2adba6ca6e"}, - {file = "numpy-1.26.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:666dbfb6ec68962c033a450943ded891bed2d54e6755e35e5835d63f4f6931d5"}, - {file = "numpy-1.26.4-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:96ff0b2ad353d8f990b63294c8986f1ec3cb19d749234014f4e7eb0112ceba5a"}, - {file = "numpy-1.26.4-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:60dedbb91afcbfdc9bc0b1f3f402804070deed7392c23eb7a7f07fa857868e8a"}, - {file = "numpy-1.26.4-cp311-cp311-win32.whl", hash = "sha256:1af303d6b2210eb850fcf03064d364652b7120803a0b872f5211f5234b399f20"}, - {file = "numpy-1.26.4-cp311-cp311-win_amd64.whl", hash = "sha256:cd25bcecc4974d09257ffcd1f098ee778f7834c3ad767fe5db785be9a4aa9cb2"}, - {file = "numpy-1.26.4-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:b3ce300f3644fb06443ee2222c2201dd3a89ea6040541412b8fa189341847218"}, - {file = "numpy-1.26.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:03a8c78d01d9781b28a6989f6fa1bb2c4f2d51201cf99d3dd875df6fbd96b23b"}, - {file = "numpy-1.26.4-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9fad7dcb1aac3c7f0584a5a8133e3a43eeb2fe127f47e3632d43d677c66c102b"}, - {file = "numpy-1.26.4-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:675d61ffbfa78604709862923189bad94014bef562cc35cf61d3a07bba02a7ed"}, - {file = "numpy-1.26.4-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:ab47dbe5cc8210f55aa58e4805fe224dac469cde56b9f731a4c098b91917159a"}, - {file = "numpy-1.26.4-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:1dda2e7b4ec9dd512f84935c5f126c8bd8b9f2fc001e9f54af255e8c5f16b0e0"}, - {file = "numpy-1.26.4-cp312-cp312-win32.whl", hash = "sha256:50193e430acfc1346175fcbdaa28ffec49947a06918b7b92130744e81e640110"}, - {file = "numpy-1.26.4-cp312-cp312-win_amd64.whl", hash = "sha256:08beddf13648eb95f8d867350f6a018a4be2e5ad54c8d8caed89ebca558b2818"}, - {file = "numpy-1.26.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:7349ab0fa0c429c82442a27a9673fc802ffdb7c7775fad780226cb234965e53c"}, - {file = "numpy-1.26.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:52b8b60467cd7dd1e9ed082188b4e6bb35aa5cdd01777621a1658910745b90be"}, - {file = "numpy-1.26.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d5241e0a80d808d70546c697135da2c613f30e28251ff8307eb72ba696945764"}, - {file = "numpy-1.26.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f870204a840a60da0b12273ef34f7051e98c3b5961b61b0c2c1be6dfd64fbcd3"}, - {file = "numpy-1.26.4-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:679b0076f67ecc0138fd2ede3a8fd196dddc2ad3254069bcb9faf9a79b1cebcd"}, - {file = "numpy-1.26.4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:47711010ad8555514b434df65f7d7b076bb8261df1ca9bb78f53d3b2db02e95c"}, - {file = "numpy-1.26.4-cp39-cp39-win32.whl", hash = "sha256:a354325ee03388678242a4d7ebcd08b5c727033fcff3b2f536aea978e15ee9e6"}, - {file = "numpy-1.26.4-cp39-cp39-win_amd64.whl", hash = "sha256:3373d5d70a5fe74a2c1bb6d2cfd9609ecf686d47a2d7b1d37a8f3b6bf6003aea"}, - {file = "numpy-1.26.4-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:afedb719a9dcfc7eaf2287b839d8198e06dcd4cb5d276a3df279231138e83d30"}, - {file = "numpy-1.26.4-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:95a7476c59002f2f6c590b9b7b998306fba6a5aa646b1e22ddfeaf8f78c3a29c"}, - {file = "numpy-1.26.4-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:7e50d0a0cc3189f9cb0aeb3a6a6af18c16f59f004b866cd2be1c14b36134a4a0"}, - {file = "numpy-1.26.4.tar.gz", hash = "sha256:2a02aba9ed12e4ac4eb3ea9421c420301a0c6460d9830d74a9df87efa4912010"}, -] - -[[package]] -name = "orjson" -version = "3.9.15" -description = "Fast, correct Python JSON library supporting dataclasses, datetimes, and numpy" -optional = false -python-versions = ">=3.8" -files = [ - {file = "orjson-3.9.15-cp310-cp310-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:d61f7ce4727a9fa7680cd6f3986b0e2c732639f46a5e0156e550e35258aa313a"}, - {file = "orjson-3.9.15-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4feeb41882e8aa17634b589533baafdceb387e01e117b1ec65534ec724023d04"}, - {file = "orjson-3.9.15-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:fbbeb3c9b2edb5fd044b2a070f127a0ac456ffd079cb82746fc84af01ef021a4"}, - {file = "orjson-3.9.15-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b66bcc5670e8a6b78f0313bcb74774c8291f6f8aeef10fe70e910b8040f3ab75"}, - {file = "orjson-3.9.15-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2973474811db7b35c30248d1129c64fd2bdf40d57d84beed2a9a379a6f57d0ab"}, - {file = "orjson-3.9.15-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9fe41b6f72f52d3da4db524c8653e46243c8c92df826ab5ffaece2dba9cccd58"}, - {file = "orjson-3.9.15-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:4228aace81781cc9d05a3ec3a6d2673a1ad0d8725b4e915f1089803e9efd2b99"}, - {file = "orjson-3.9.15-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:6f7b65bfaf69493c73423ce9db66cfe9138b2f9ef62897486417a8fcb0a92bfe"}, - {file = "orjson-3.9.15-cp310-none-win32.whl", hash = "sha256:2d99e3c4c13a7b0fb3792cc04c2829c9db07838fb6973e578b85c1745e7d0ce7"}, - {file = "orjson-3.9.15-cp310-none-win_amd64.whl", hash = "sha256:b725da33e6e58e4a5d27958568484aa766e825e93aa20c26c91168be58e08cbb"}, - {file = "orjson-3.9.15-cp311-cp311-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:c8e8fe01e435005d4421f183038fc70ca85d2c1e490f51fb972db92af6e047c2"}, - {file = "orjson-3.9.15-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:87f1097acb569dde17f246faa268759a71a2cb8c96dd392cd25c668b104cad2f"}, - {file = "orjson-3.9.15-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ff0f9913d82e1d1fadbd976424c316fbc4d9c525c81d047bbdd16bd27dd98cfc"}, - {file = "orjson-3.9.15-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8055ec598605b0077e29652ccfe9372247474375e0e3f5775c91d9434e12d6b1"}, - {file = "orjson-3.9.15-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d6768a327ea1ba44c9114dba5fdda4a214bdb70129065cd0807eb5f010bfcbb5"}, - {file = "orjson-3.9.15-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:12365576039b1a5a47df01aadb353b68223da413e2e7f98c02403061aad34bde"}, - {file = "orjson-3.9.15-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:71c6b009d431b3839d7c14c3af86788b3cfac41e969e3e1c22f8a6ea13139404"}, - {file = "orjson-3.9.15-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:e18668f1bd39e69b7fed19fa7cd1cd110a121ec25439328b5c89934e6d30d357"}, - {file = "orjson-3.9.15-cp311-none-win32.whl", hash = "sha256:62482873e0289cf7313461009bf62ac8b2e54bc6f00c6fabcde785709231a5d7"}, - {file = "orjson-3.9.15-cp311-none-win_amd64.whl", hash = "sha256:b3d336ed75d17c7b1af233a6561cf421dee41d9204aa3cfcc6c9c65cd5bb69a8"}, - {file = "orjson-3.9.15-cp312-cp312-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:82425dd5c7bd3adfe4e94c78e27e2fa02971750c2b7ffba648b0f5d5cc016a73"}, - {file = "orjson-3.9.15-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2c51378d4a8255b2e7c1e5cc430644f0939539deddfa77f6fac7b56a9784160a"}, - {file = "orjson-3.9.15-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:6ae4e06be04dc00618247c4ae3f7c3e561d5bc19ab6941427f6d3722a0875ef7"}, - {file = "orjson-3.9.15-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:bcef128f970bb63ecf9a65f7beafd9b55e3aaf0efc271a4154050fc15cdb386e"}, - {file = "orjson-3.9.15-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b72758f3ffc36ca566ba98a8e7f4f373b6c17c646ff8ad9b21ad10c29186f00d"}, - {file = "orjson-3.9.15-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:10c57bc7b946cf2efa67ac55766e41764b66d40cbd9489041e637c1304400494"}, - {file = "orjson-3.9.15-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:946c3a1ef25338e78107fba746f299f926db408d34553b4754e90a7de1d44068"}, - {file = "orjson-3.9.15-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:2f256d03957075fcb5923410058982aea85455d035607486ccb847f095442bda"}, - {file = "orjson-3.9.15-cp312-none-win_amd64.whl", hash = "sha256:5bb399e1b49db120653a31463b4a7b27cf2fbfe60469546baf681d1b39f4edf2"}, - {file = "orjson-3.9.15-cp38-cp38-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:b17f0f14a9c0ba55ff6279a922d1932e24b13fc218a3e968ecdbf791b3682b25"}, - {file = "orjson-3.9.15-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7f6cbd8e6e446fb7e4ed5bac4661a29e43f38aeecbf60c4b900b825a353276a1"}, - {file = "orjson-3.9.15-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:76bc6356d07c1d9f4b782813094d0caf1703b729d876ab6a676f3aaa9a47e37c"}, - {file = "orjson-3.9.15-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fdfa97090e2d6f73dced247a2f2d8004ac6449df6568f30e7fa1a045767c69a6"}, - {file = "orjson-3.9.15-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7413070a3e927e4207d00bd65f42d1b780fb0d32d7b1d951f6dc6ade318e1b5a"}, - {file = "orjson-3.9.15-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9cf1596680ac1f01839dba32d496136bdd5d8ffb858c280fa82bbfeb173bdd40"}, - {file = "orjson-3.9.15-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:809d653c155e2cc4fd39ad69c08fdff7f4016c355ae4b88905219d3579e31eb7"}, - {file = "orjson-3.9.15-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:920fa5a0c5175ab14b9c78f6f820b75804fb4984423ee4c4f1e6d748f8b22bc1"}, - {file = "orjson-3.9.15-cp38-none-win32.whl", hash = "sha256:2b5c0f532905e60cf22a511120e3719b85d9c25d0e1c2a8abb20c4dede3b05a5"}, - {file = "orjson-3.9.15-cp38-none-win_amd64.whl", hash = "sha256:67384f588f7f8daf040114337d34a5188346e3fae6c38b6a19a2fe8c663a2f9b"}, - {file = "orjson-3.9.15-cp39-cp39-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:6fc2fe4647927070df3d93f561d7e588a38865ea0040027662e3e541d592811e"}, - {file = "orjson-3.9.15-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:34cbcd216e7af5270f2ffa63a963346845eb71e174ea530867b7443892d77180"}, - {file = "orjson-3.9.15-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:f541587f5c558abd93cb0de491ce99a9ef8d1ae29dd6ab4dbb5a13281ae04cbd"}, - {file = "orjson-3.9.15-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:92255879280ef9c3c0bcb327c5a1b8ed694c290d61a6a532458264f887f052cb"}, - {file = "orjson-3.9.15-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:05a1f57fb601c426635fcae9ddbe90dfc1ed42245eb4c75e4960440cac667262"}, - {file = "orjson-3.9.15-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ede0bde16cc6e9b96633df1631fbcd66491d1063667f260a4f2386a098393790"}, - {file = "orjson-3.9.15-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:e88b97ef13910e5f87bcbc4dd7979a7de9ba8702b54d3204ac587e83639c0c2b"}, - {file = "orjson-3.9.15-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:57d5d8cf9c27f7ef6bc56a5925c7fbc76b61288ab674eb352c26ac780caa5b10"}, - {file = "orjson-3.9.15-cp39-none-win32.whl", hash = "sha256:001f4eb0ecd8e9ebd295722d0cbedf0748680fb9998d3993abaed2f40587257a"}, - {file = "orjson-3.9.15-cp39-none-win_amd64.whl", hash = "sha256:ea0b183a5fe6b2b45f3b854b0d19c4e932d6f5934ae1f723b07cf9560edd4ec7"}, - {file = "orjson-3.9.15.tar.gz", hash = "sha256:95cae920959d772f30ab36d3b25f83bb0f3be671e986c72ce22f8fa700dae061"}, -] - -[[package]] -name = "packaging" -version = "23.2" -description = "Core utilities for Python packages" -optional = false -python-versions = ">=3.7" -files = [ - {file = "packaging-23.2-py3-none-any.whl", hash = "sha256:8c491190033a9af7e1d931d0b5dacc2ef47509b34dd0de67ed209b5203fc88c7"}, - {file = "packaging-23.2.tar.gz", hash = "sha256:048fb0e9405036518eaaf48a55953c750c11e1a1b68e0dd1a9d62ed0c092cfc5"}, -] - -[[package]] -name = "pastel" -version = "0.2.1" -description = "Bring colors to your terminal." -optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" -files = [ - {file = "pastel-0.2.1-py2.py3-none-any.whl", hash = "sha256:4349225fcdf6c2bb34d483e523475de5bb04a5c10ef711263452cb37d7dd4364"}, - {file = "pastel-0.2.1.tar.gz", hash = "sha256:e6581ac04e973cac858828c6202c1e1e81fee1dc7de7683f3e1ffe0bfd8a573d"}, -] - -[[package]] -name = "pluggy" -version = "1.4.0" -description = "plugin and hook calling mechanisms for python" -optional = false -python-versions = ">=3.8" -files = [ - {file = "pluggy-1.4.0-py3-none-any.whl", hash = "sha256:7db9f7b503d67d1c5b95f59773ebb58a8c1c288129a88665838012cfb07b8981"}, - {file = "pluggy-1.4.0.tar.gz", hash = "sha256:8c85c2876142a764e5b7548e7d9a0e0ddb46f5185161049a79b7e974454223be"}, -] - -[package.extras] -dev = ["pre-commit", "tox"] -testing = ["pytest", "pytest-benchmark"] - -[[package]] -name = "poethepoet" -version = "0.24.4" -description = "A task runner that works well with poetry." -optional = false -python-versions = ">=3.8" -files = [ - {file = "poethepoet-0.24.4-py3-none-any.whl", hash = "sha256:fb4ea35d7f40fe2081ea917d2e4102e2310fda2cde78974050ca83896e229075"}, - {file = "poethepoet-0.24.4.tar.gz", hash = "sha256:ff4220843a87c888cbcb5312c8905214701d0af60ac7271795baa8369b428fef"}, -] - -[package.dependencies] -pastel = ">=0.2.1,<0.3.0" -tomli = ">=1.2.2" - -[package.extras] -poetry-plugin = ["poetry (>=1.0,<2.0)"] - -[[package]] -name = "pydantic" -version = "2.6.4" -description = "Data validation using Python type hints" -optional = false -python-versions = ">=3.8" -files = [ - {file = "pydantic-2.6.4-py3-none-any.whl", hash = "sha256:cc46fce86607580867bdc3361ad462bab9c222ef042d3da86f2fb333e1d916c5"}, - {file = "pydantic-2.6.4.tar.gz", hash = "sha256:b1704e0847db01817624a6b86766967f552dd9dbf3afba4004409f908dcc84e6"}, -] - -[package.dependencies] -annotated-types = ">=0.4.0" -pydantic-core = "2.16.3" -typing-extensions = ">=4.6.1" - -[package.extras] -email = ["email-validator (>=2.0.0)"] - -[[package]] -name = "pydantic-core" -version = "2.16.3" -description = "" -optional = false -python-versions = ">=3.8" -files = [ - {file = "pydantic_core-2.16.3-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:75b81e678d1c1ede0785c7f46690621e4c6e63ccd9192af1f0bd9d504bbb6bf4"}, - {file = "pydantic_core-2.16.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9c865a7ee6f93783bd5d781af5a4c43dadc37053a5b42f7d18dc019f8c9d2bd1"}, - {file = "pydantic_core-2.16.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:162e498303d2b1c036b957a1278fa0899d02b2842f1ff901b6395104c5554a45"}, - {file = "pydantic_core-2.16.3-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:2f583bd01bbfbff4eaee0868e6fc607efdfcc2b03c1c766b06a707abbc856187"}, - {file = "pydantic_core-2.16.3-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b926dd38db1519ed3043a4de50214e0d600d404099c3392f098a7f9d75029ff8"}, - {file = "pydantic_core-2.16.3-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:716b542728d4c742353448765aa7cdaa519a7b82f9564130e2b3f6766018c9ec"}, - {file = "pydantic_core-2.16.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fc4ad7f7ee1a13d9cb49d8198cd7d7e3aa93e425f371a68235f784e99741561f"}, - {file = "pydantic_core-2.16.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:bd87f48924f360e5d1c5f770d6155ce0e7d83f7b4e10c2f9ec001c73cf475c99"}, - {file = "pydantic_core-2.16.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:0df446663464884297c793874573549229f9eca73b59360878f382a0fc085979"}, - {file = "pydantic_core-2.16.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:4df8a199d9f6afc5ae9a65f8f95ee52cae389a8c6b20163762bde0426275b7db"}, - {file = "pydantic_core-2.16.3-cp310-none-win32.whl", hash = "sha256:456855f57b413f077dff513a5a28ed838dbbb15082ba00f80750377eed23d132"}, - {file = "pydantic_core-2.16.3-cp310-none-win_amd64.whl", hash = "sha256:732da3243e1b8d3eab8c6ae23ae6a58548849d2e4a4e03a1924c8ddf71a387cb"}, - {file = "pydantic_core-2.16.3-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:519ae0312616026bf4cedc0fe459e982734f3ca82ee8c7246c19b650b60a5ee4"}, - {file = "pydantic_core-2.16.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:b3992a322a5617ded0a9f23fd06dbc1e4bd7cf39bc4ccf344b10f80af58beacd"}, - {file = "pydantic_core-2.16.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8d62da299c6ecb04df729e4b5c52dc0d53f4f8430b4492b93aa8de1f541c4aac"}, - {file = "pydantic_core-2.16.3-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:2acca2be4bb2f2147ada8cac612f8a98fc09f41c89f87add7256ad27332c2fda"}, - {file = "pydantic_core-2.16.3-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1b662180108c55dfbf1280d865b2d116633d436cfc0bba82323554873967b340"}, - {file = "pydantic_core-2.16.3-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e7c6ed0dc9d8e65f24f5824291550139fe6f37fac03788d4580da0d33bc00c97"}, - {file = "pydantic_core-2.16.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a6b1bb0827f56654b4437955555dc3aeeebeddc47c2d7ed575477f082622c49e"}, - {file = "pydantic_core-2.16.3-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:e56f8186d6210ac7ece503193ec84104da7ceb98f68ce18c07282fcc2452e76f"}, - {file = "pydantic_core-2.16.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:936e5db01dd49476fa8f4383c259b8b1303d5dd5fb34c97de194560698cc2c5e"}, - {file = "pydantic_core-2.16.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:33809aebac276089b78db106ee692bdc9044710e26f24a9a2eaa35a0f9fa70ba"}, - {file = "pydantic_core-2.16.3-cp311-none-win32.whl", hash = "sha256:ded1c35f15c9dea16ead9bffcde9bb5c7c031bff076355dc58dcb1cb436c4721"}, - {file = "pydantic_core-2.16.3-cp311-none-win_amd64.whl", hash = "sha256:d89ca19cdd0dd5f31606a9329e309d4fcbb3df860960acec32630297d61820df"}, - {file = "pydantic_core-2.16.3-cp311-none-win_arm64.whl", hash = "sha256:6162f8d2dc27ba21027f261e4fa26f8bcb3cf9784b7f9499466a311ac284b5b9"}, - {file = "pydantic_core-2.16.3-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:0f56ae86b60ea987ae8bcd6654a887238fd53d1384f9b222ac457070b7ac4cff"}, - {file = "pydantic_core-2.16.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:c9bd22a2a639e26171068f8ebb5400ce2c1bc7d17959f60a3b753ae13c632975"}, - {file = "pydantic_core-2.16.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4204e773b4b408062960e65468d5346bdfe139247ee5f1ca2a378983e11388a2"}, - {file = "pydantic_core-2.16.3-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:f651dd19363c632f4abe3480a7c87a9773be27cfe1341aef06e8759599454120"}, - {file = "pydantic_core-2.16.3-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:aaf09e615a0bf98d406657e0008e4a8701b11481840be7d31755dc9f97c44053"}, - {file = "pydantic_core-2.16.3-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8e47755d8152c1ab5b55928ab422a76e2e7b22b5ed8e90a7d584268dd49e9c6b"}, - {file = "pydantic_core-2.16.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:500960cb3a0543a724a81ba859da816e8cf01b0e6aaeedf2c3775d12ee49cade"}, - {file = "pydantic_core-2.16.3-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:cf6204fe865da605285c34cf1172879d0314ff267b1c35ff59de7154f35fdc2e"}, - {file = "pydantic_core-2.16.3-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:d33dd21f572545649f90c38c227cc8631268ba25c460b5569abebdd0ec5974ca"}, - {file = "pydantic_core-2.16.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:49d5d58abd4b83fb8ce763be7794d09b2f50f10aa65c0f0c1696c677edeb7cbf"}, - {file = "pydantic_core-2.16.3-cp312-none-win32.whl", hash = "sha256:f53aace168a2a10582e570b7736cc5bef12cae9cf21775e3eafac597e8551fbe"}, - {file = "pydantic_core-2.16.3-cp312-none-win_amd64.whl", hash = "sha256:0d32576b1de5a30d9a97f300cc6a3f4694c428d956adbc7e6e2f9cad279e45ed"}, - {file = "pydantic_core-2.16.3-cp312-none-win_arm64.whl", hash = "sha256:ec08be75bb268473677edb83ba71e7e74b43c008e4a7b1907c6d57e940bf34b6"}, - {file = "pydantic_core-2.16.3-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:b1f6f5938d63c6139860f044e2538baeee6f0b251a1816e7adb6cbce106a1f01"}, - {file = "pydantic_core-2.16.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:2a1ef6a36fdbf71538142ed604ad19b82f67b05749512e47f247a6ddd06afdc7"}, - {file = "pydantic_core-2.16.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:704d35ecc7e9c31d48926150afada60401c55efa3b46cd1ded5a01bdffaf1d48"}, - {file = "pydantic_core-2.16.3-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d937653a696465677ed583124b94a4b2d79f5e30b2c46115a68e482c6a591c8a"}, - {file = "pydantic_core-2.16.3-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c9803edf8e29bd825f43481f19c37f50d2b01899448273b3a7758441b512acf8"}, - {file = "pydantic_core-2.16.3-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:72282ad4892a9fb2da25defeac8c2e84352c108705c972db82ab121d15f14e6d"}, - {file = "pydantic_core-2.16.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7f752826b5b8361193df55afcdf8ca6a57d0232653494ba473630a83ba50d8c9"}, - {file = "pydantic_core-2.16.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:4384a8f68ddb31a0b0c3deae88765f5868a1b9148939c3f4121233314ad5532c"}, - {file = "pydantic_core-2.16.3-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:a4b2bf78342c40b3dc830880106f54328928ff03e357935ad26c7128bbd66ce8"}, - {file = "pydantic_core-2.16.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:13dcc4802961b5f843a9385fc821a0b0135e8c07fc3d9949fd49627c1a5e6ae5"}, - {file = "pydantic_core-2.16.3-cp38-none-win32.whl", hash = "sha256:e3e70c94a0c3841e6aa831edab1619ad5c511199be94d0c11ba75fe06efe107a"}, - {file = "pydantic_core-2.16.3-cp38-none-win_amd64.whl", hash = "sha256:ecdf6bf5f578615f2e985a5e1f6572e23aa632c4bd1dc67f8f406d445ac115ed"}, - {file = "pydantic_core-2.16.3-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:bda1ee3e08252b8d41fa5537413ffdddd58fa73107171a126d3b9ff001b9b820"}, - {file = "pydantic_core-2.16.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:21b888c973e4f26b7a96491c0965a8a312e13be108022ee510248fe379a5fa23"}, - {file = "pydantic_core-2.16.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:be0ec334369316fa73448cc8c982c01e5d2a81c95969d58b8f6e272884df0074"}, - {file = "pydantic_core-2.16.3-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:b5b6079cc452a7c53dd378c6f881ac528246b3ac9aae0f8eef98498a75657805"}, - {file = "pydantic_core-2.16.3-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7ee8d5f878dccb6d499ba4d30d757111847b6849ae07acdd1205fffa1fc1253c"}, - {file = "pydantic_core-2.16.3-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7233d65d9d651242a68801159763d09e9ec96e8a158dbf118dc090cd77a104c9"}, - {file = "pydantic_core-2.16.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c6119dc90483a5cb50a1306adb8d52c66e447da88ea44f323e0ae1a5fcb14256"}, - {file = "pydantic_core-2.16.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:578114bc803a4c1ff9946d977c221e4376620a46cf78da267d946397dc9514a8"}, - {file = "pydantic_core-2.16.3-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:d8f99b147ff3fcf6b3cc60cb0c39ea443884d5559a30b1481e92495f2310ff2b"}, - {file = "pydantic_core-2.16.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:4ac6b4ce1e7283d715c4b729d8f9dab9627586dafce81d9eaa009dd7f25dd972"}, - {file = "pydantic_core-2.16.3-cp39-none-win32.whl", hash = "sha256:e7774b570e61cb998490c5235740d475413a1f6de823169b4cf94e2fe9e9f6b2"}, - {file = "pydantic_core-2.16.3-cp39-none-win_amd64.whl", hash = "sha256:9091632a25b8b87b9a605ec0e61f241c456e9248bfdcf7abdf344fdb169c81cf"}, - {file = "pydantic_core-2.16.3-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:36fa178aacbc277bc6b62a2c3da95226520da4f4e9e206fdf076484363895d2c"}, - {file = "pydantic_core-2.16.3-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:dcca5d2bf65c6fb591fff92da03f94cd4f315972f97c21975398bd4bd046854a"}, - {file = "pydantic_core-2.16.3-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2a72fb9963cba4cd5793854fd12f4cfee731e86df140f59ff52a49b3552db241"}, - {file = "pydantic_core-2.16.3-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b60cc1a081f80a2105a59385b92d82278b15d80ebb3adb200542ae165cd7d183"}, - {file = "pydantic_core-2.16.3-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:cbcc558401de90a746d02ef330c528f2e668c83350f045833543cd57ecead1ad"}, - {file = "pydantic_core-2.16.3-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:fee427241c2d9fb7192b658190f9f5fd6dfe41e02f3c1489d2ec1e6a5ab1e04a"}, - {file = "pydantic_core-2.16.3-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:f4cb85f693044e0f71f394ff76c98ddc1bc0953e48c061725e540396d5c8a2e1"}, - {file = "pydantic_core-2.16.3-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:b29eeb887aa931c2fcef5aa515d9d176d25006794610c264ddc114c053bf96fe"}, - {file = "pydantic_core-2.16.3-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:a425479ee40ff021f8216c9d07a6a3b54b31c8267c6e17aa88b70d7ebd0e5e5b"}, - {file = "pydantic_core-2.16.3-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:5c5cbc703168d1b7a838668998308018a2718c2130595e8e190220238addc96f"}, - {file = "pydantic_core-2.16.3-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:99b6add4c0b39a513d323d3b93bc173dac663c27b99860dd5bf491b240d26137"}, - {file = "pydantic_core-2.16.3-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:75f76ee558751746d6a38f89d60b6228fa174e5172d143886af0f85aa306fd89"}, - {file = "pydantic_core-2.16.3-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:00ee1c97b5364b84cb0bd82e9bbf645d5e2871fb8c58059d158412fee2d33d8a"}, - {file = "pydantic_core-2.16.3-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:287073c66748f624be4cef893ef9174e3eb88fe0b8a78dc22e88eca4bc357ca6"}, - {file = "pydantic_core-2.16.3-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:ed25e1835c00a332cb10c683cd39da96a719ab1dfc08427d476bce41b92531fc"}, - {file = "pydantic_core-2.16.3-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:86b3d0033580bd6bbe07590152007275bd7af95f98eaa5bd36f3da219dcd93da"}, - {file = "pydantic_core-2.16.3.tar.gz", hash = "sha256:1cac689f80a3abab2d3c0048b29eea5751114054f032a941a32de4c852c59cad"}, -] - -[package.dependencies] -typing-extensions = ">=4.6.0,<4.7.0 || >4.7.0" - -[[package]] -name = "pygments" -version = "2.17.2" -description = "Pygments is a syntax highlighting package written in Python." -optional = false -python-versions = ">=3.7" -files = [ - {file = "pygments-2.17.2-py3-none-any.whl", hash = "sha256:b27c2826c47d0f3219f29554824c30c5e8945175d888647acd804ddd04af846c"}, - {file = "pygments-2.17.2.tar.gz", hash = "sha256:da46cec9fd2de5be3a8a784f434e4c4ab670b4ff54d605c4c2717e9d49c4c367"}, -] - -[package.extras] -plugins = ["importlib-metadata"] -windows-terminal = ["colorama (>=0.4.6)"] - -[[package]] -name = "pytest" -version = "7.4.4" -description = "pytest: simple powerful testing with Python" -optional = false -python-versions = ">=3.7" -files = [ - {file = "pytest-7.4.4-py3-none-any.whl", hash = "sha256:b090cdf5ed60bf4c45261be03239c2c1c22df034fbffe691abe93cd80cea01d8"}, - {file = "pytest-7.4.4.tar.gz", hash = "sha256:2cf0005922c6ace4a3e2ec8b4080eb0d9753fdc93107415332f50ce9e7994280"}, -] - -[package.dependencies] -colorama = {version = "*", markers = "sys_platform == \"win32\""} -exceptiongroup = {version = ">=1.0.0rc8", markers = "python_version < \"3.11\""} -iniconfig = "*" -packaging = "*" -pluggy = ">=0.12,<2.0" -tomli = {version = ">=1.0.0", markers = "python_version < \"3.11\""} - -[package.extras] -testing = ["argcomplete", "attrs (>=19.2.0)", "hypothesis (>=3.56)", "mock", "nose", "pygments (>=2.7.2)", "requests", "setuptools", "xmlschema"] - -[[package]] -name = "pytest-watch" -version = "4.2.0" -description = "Local continuous test runner with pytest and watchdog." -optional = false -python-versions = "*" -files = [ - {file = "pytest-watch-4.2.0.tar.gz", hash = "sha256:06136f03d5b361718b8d0d234042f7b2f203910d8568f63df2f866b547b3d4b9"}, -] - -[package.dependencies] -colorama = ">=0.3.3" -docopt = ">=0.4.0" -pytest = ">=2.6.4" -watchdog = ">=0.6.0" - -[[package]] -name = "pyyaml" -version = "6.0.1" -description = "YAML parser and emitter for Python" -optional = false -python-versions = ">=3.6" -files = [ - {file = "PyYAML-6.0.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d858aa552c999bc8a8d57426ed01e40bef403cd8ccdd0fc5f6f04a00414cac2a"}, - {file = "PyYAML-6.0.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:fd66fc5d0da6d9815ba2cebeb4205f95818ff4b79c3ebe268e75d961704af52f"}, - {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:69b023b2b4daa7548bcfbd4aa3da05b3a74b772db9e23b982788168117739938"}, - {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:81e0b275a9ecc9c0c0c07b4b90ba548307583c125f54d5b6946cfee6360c733d"}, - {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ba336e390cd8e4d1739f42dfe9bb83a3cc2e80f567d8805e11b46f4a943f5515"}, - {file = "PyYAML-6.0.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:326c013efe8048858a6d312ddd31d56e468118ad4cdeda36c719bf5bb6192290"}, - {file = "PyYAML-6.0.1-cp310-cp310-win32.whl", hash = "sha256:bd4af7373a854424dabd882decdc5579653d7868b8fb26dc7d0e99f823aa5924"}, - {file = "PyYAML-6.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:fd1592b3fdf65fff2ad0004b5e363300ef59ced41c2e6b3a99d4089fa8c5435d"}, - {file = "PyYAML-6.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:6965a7bc3cf88e5a1c3bd2e0b5c22f8d677dc88a455344035f03399034eb3007"}, - {file = "PyYAML-6.0.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:f003ed9ad21d6a4713f0a9b5a7a0a79e08dd0f221aff4525a2be4c346ee60aab"}, - {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:42f8152b8dbc4fe7d96729ec2b99c7097d656dc1213a3229ca5383f973a5ed6d"}, - {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:062582fca9fabdd2c8b54a3ef1c978d786e0f6b3a1510e0ac93ef59e0ddae2bc"}, - {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d2b04aac4d386b172d5b9692e2d2da8de7bfb6c387fa4f801fbf6fb2e6ba4673"}, - {file = "PyYAML-6.0.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:e7d73685e87afe9f3b36c799222440d6cf362062f78be1013661b00c5c6f678b"}, - {file = "PyYAML-6.0.1-cp311-cp311-win32.whl", hash = "sha256:1635fd110e8d85d55237ab316b5b011de701ea0f29d07611174a1b42f1444741"}, - {file = "PyYAML-6.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:bf07ee2fef7014951eeb99f56f39c9bb4af143d8aa3c21b1677805985307da34"}, - {file = "PyYAML-6.0.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:855fb52b0dc35af121542a76b9a84f8d1cd886ea97c84703eaa6d88e37a2ad28"}, - {file = "PyYAML-6.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:40df9b996c2b73138957fe23a16a4f0ba614f4c0efce1e9406a184b6d07fa3a9"}, - {file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a08c6f0fe150303c1c6b71ebcd7213c2858041a7e01975da3a99aed1e7a378ef"}, - {file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c22bec3fbe2524cde73d7ada88f6566758a8f7227bfbf93a408a9d86bcc12a0"}, - {file = "PyYAML-6.0.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8d4e9c88387b0f5c7d5f281e55304de64cf7f9c0021a3525bd3b1c542da3b0e4"}, - {file = "PyYAML-6.0.1-cp312-cp312-win32.whl", hash = "sha256:d483d2cdf104e7c9fa60c544d92981f12ad66a457afae824d146093b8c294c54"}, - {file = "PyYAML-6.0.1-cp312-cp312-win_amd64.whl", hash = "sha256:0d3304d8c0adc42be59c5f8a4d9e3d7379e6955ad754aa9d6ab7a398b59dd1df"}, - {file = "PyYAML-6.0.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:50550eb667afee136e9a77d6dc71ae76a44df8b3e51e41b77f6de2932bfe0f47"}, - {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1fe35611261b29bd1de0070f0b2f47cb6ff71fa6595c077e42bd0c419fa27b98"}, - {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:704219a11b772aea0d8ecd7058d0082713c3562b4e271b849ad7dc4a5c90c13c"}, - {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:afd7e57eddb1a54f0f1a974bc4391af8bcce0b444685d936840f125cf046d5bd"}, - {file = "PyYAML-6.0.1-cp36-cp36m-win32.whl", hash = "sha256:fca0e3a251908a499833aa292323f32437106001d436eca0e6e7833256674585"}, - {file = "PyYAML-6.0.1-cp36-cp36m-win_amd64.whl", hash = "sha256:f22ac1c3cac4dbc50079e965eba2c1058622631e526bd9afd45fedd49ba781fa"}, - {file = "PyYAML-6.0.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:b1275ad35a5d18c62a7220633c913e1b42d44b46ee12554e5fd39c70a243d6a3"}, - {file = "PyYAML-6.0.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:18aeb1bf9a78867dc38b259769503436b7c72f7a1f1f4c93ff9a17de54319b27"}, - {file = "PyYAML-6.0.1-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:596106435fa6ad000c2991a98fa58eeb8656ef2325d7e158344fb33864ed87e3"}, - {file = "PyYAML-6.0.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:baa90d3f661d43131ca170712d903e6295d1f7a0f595074f151c0aed377c9b9c"}, - {file = "PyYAML-6.0.1-cp37-cp37m-win32.whl", hash = "sha256:9046c58c4395dff28dd494285c82ba00b546adfc7ef001486fbf0324bc174fba"}, - {file = "PyYAML-6.0.1-cp37-cp37m-win_amd64.whl", hash = "sha256:4fb147e7a67ef577a588a0e2c17b6db51dda102c71de36f8549b6816a96e1867"}, - {file = "PyYAML-6.0.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:1d4c7e777c441b20e32f52bd377e0c409713e8bb1386e1099c2415f26e479595"}, - {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a0cd17c15d3bb3fa06978b4e8958dcdc6e0174ccea823003a106c7d4d7899ac5"}, - {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:28c119d996beec18c05208a8bd78cbe4007878c6dd15091efb73a30e90539696"}, - {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7e07cbde391ba96ab58e532ff4803f79c4129397514e1413a7dc761ccd755735"}, - {file = "PyYAML-6.0.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:49a183be227561de579b4a36efbb21b3eab9651dd81b1858589f796549873dd6"}, - {file = "PyYAML-6.0.1-cp38-cp38-win32.whl", hash = "sha256:184c5108a2aca3c5b3d3bf9395d50893a7ab82a38004c8f61c258d4428e80206"}, - {file = "PyYAML-6.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:1e2722cc9fbb45d9b87631ac70924c11d3a401b2d7f410cc0e3bbf249f2dca62"}, - {file = "PyYAML-6.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9eb6caa9a297fc2c2fb8862bc5370d0303ddba53ba97e71f08023b6cd73d16a8"}, - {file = "PyYAML-6.0.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:c8098ddcc2a85b61647b2590f825f3db38891662cfc2fc776415143f599bb859"}, - {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5773183b6446b2c99bb77e77595dd486303b4faab2b086e7b17bc6bef28865f6"}, - {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b786eecbdf8499b9ca1d697215862083bd6d2a99965554781d0d8d1ad31e13a0"}, - {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc1bf2925a1ecd43da378f4db9e4f799775d6367bdb94671027b73b393a7c42c"}, - {file = "PyYAML-6.0.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:04ac92ad1925b2cff1db0cfebffb6ffc43457495c9b3c39d3fcae417d7125dc5"}, - {file = "PyYAML-6.0.1-cp39-cp39-win32.whl", hash = "sha256:faca3bdcf85b2fc05d06ff3fbc1f83e1391b3e724afa3feba7d13eeab355484c"}, - {file = "PyYAML-6.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:510c9deebc5c0225e8c96813043e62b680ba2f9c50a08d3724c7f28a747d1486"}, - {file = "PyYAML-6.0.1.tar.gz", hash = "sha256:bfdf460b1736c775f2ba9f6a92bca30bc2095067b8a9d77876d1fad6cc3b4a43"}, -] - -[[package]] -name = "requests" -version = "2.31.0" -description = "Python HTTP for Humans." -optional = false -python-versions = ">=3.7" -files = [ - {file = "requests-2.31.0-py3-none-any.whl", hash = "sha256:58cd2187c01e70e6e26505bca751777aa9f2ee0b7f4300988b709f44e013003f"}, - {file = "requests-2.31.0.tar.gz", hash = "sha256:942c5a758f98d790eaed1a29cb6eefc7ffb0d1cf7af05c3d2791656dbd6ad1e1"}, -] - -[package.dependencies] -certifi = ">=2017.4.17" -charset-normalizer = ">=2,<4" -idna = ">=2.5,<4" -urllib3 = ">=1.21.1,<3" - -[package.extras] -socks = ["PySocks (>=1.5.6,!=1.5.7)"] -use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"] - -[[package]] -name = "rich" -version = "13.7.1" -description = "Render rich text, tables, progress bars, syntax highlighting, markdown and more to the terminal" -optional = false -python-versions = ">=3.7.0" -files = [ - {file = "rich-13.7.1-py3-none-any.whl", hash = "sha256:4edbae314f59eb482f54e9e30bf00d33350aaa94f4bfcd4e9e3110e64d0d7222"}, - {file = "rich-13.7.1.tar.gz", hash = "sha256:9be308cb1fe2f1f57d67ce99e95af38a1e2bc71ad9813b0e247cf7ffbcc3a432"}, -] - -[package.dependencies] -markdown-it-py = ">=2.2.0" -pygments = ">=2.13.0,<3.0.0" - -[package.extras] -jupyter = ["ipywidgets (>=7.5.1,<9)"] - -[[package]] -name = "ruff" -version = "0.1.15" -description = "An extremely fast Python linter and code formatter, written in Rust." -optional = false -python-versions = ">=3.7" -files = [ - {file = "ruff-0.1.15-py3-none-macosx_10_12_x86_64.macosx_11_0_arm64.macosx_10_12_universal2.whl", hash = "sha256:5fe8d54df166ecc24106db7dd6a68d44852d14eb0729ea4672bb4d96c320b7df"}, - {file = "ruff-0.1.15-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:6f0bfbb53c4b4de117ac4d6ddfd33aa5fc31beeaa21d23c45c6dd249faf9126f"}, - {file = "ruff-0.1.15-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e0d432aec35bfc0d800d4f70eba26e23a352386be3a6cf157083d18f6f5881c8"}, - {file = "ruff-0.1.15-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:9405fa9ac0e97f35aaddf185a1be194a589424b8713e3b97b762336ec79ff807"}, - {file = "ruff-0.1.15-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c66ec24fe36841636e814b8f90f572a8c0cb0e54d8b5c2d0e300d28a0d7bffec"}, - {file = "ruff-0.1.15-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:6f8ad828f01e8dd32cc58bc28375150171d198491fc901f6f98d2a39ba8e3ff5"}, - {file = "ruff-0.1.15-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:86811954eec63e9ea162af0ffa9f8d09088bab51b7438e8b6488b9401863c25e"}, - {file = "ruff-0.1.15-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:fd4025ac5e87d9b80e1f300207eb2fd099ff8200fa2320d7dc066a3f4622dc6b"}, - {file = "ruff-0.1.15-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b17b93c02cdb6aeb696effecea1095ac93f3884a49a554a9afa76bb125c114c1"}, - {file = "ruff-0.1.15-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:ddb87643be40f034e97e97f5bc2ef7ce39de20e34608f3f829db727a93fb82c5"}, - {file = "ruff-0.1.15-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:abf4822129ed3a5ce54383d5f0e964e7fef74a41e48eb1dfad404151efc130a2"}, - {file = "ruff-0.1.15-py3-none-musllinux_1_2_i686.whl", hash = "sha256:6c629cf64bacfd136c07c78ac10a54578ec9d1bd2a9d395efbee0935868bf852"}, - {file = "ruff-0.1.15-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:1bab866aafb53da39c2cadfb8e1c4550ac5340bb40300083eb8967ba25481447"}, - {file = "ruff-0.1.15-py3-none-win32.whl", hash = "sha256:2417e1cb6e2068389b07e6fa74c306b2810fe3ee3476d5b8a96616633f40d14f"}, - {file = "ruff-0.1.15-py3-none-win_amd64.whl", hash = "sha256:3837ac73d869efc4182d9036b1405ef4c73d9b1f88da2413875e34e0d6919587"}, - {file = "ruff-0.1.15-py3-none-win_arm64.whl", hash = "sha256:9a933dfb1c14ec7a33cceb1e49ec4a16b51ce3c20fd42663198746efc0427360"}, - {file = "ruff-0.1.15.tar.gz", hash = "sha256:f6dfa8c1b21c913c326919056c390966648b680966febcb796cc9d1aaab8564e"}, -] - -[[package]] -name = "shellingham" -version = "1.5.4" -description = "Tool to Detect Surrounding Shell" -optional = false -python-versions = ">=3.7" -files = [ - {file = "shellingham-1.5.4-py2.py3-none-any.whl", hash = "sha256:7ecfff8f2fd72616f7481040475a65b2bf8af90a56c89140852d1120324e8686"}, - {file = "shellingham-1.5.4.tar.gz", hash = "sha256:8dbca0739d487e5bd35ab3ca4b36e11c4078f3a234bfce294b0a0291363404de"}, -] - -[[package]] -name = "smmap" -version = "5.0.1" -description = "A pure Python implementation of a sliding window memory map manager" -optional = false -python-versions = ">=3.7" -files = [ - {file = "smmap-5.0.1-py3-none-any.whl", hash = "sha256:e6d8668fa5f93e706934a62d7b4db19c8d9eb8cf2adbb75ef1b675aa332b69da"}, - {file = "smmap-5.0.1.tar.gz", hash = "sha256:dceeb6c0028fdb6734471eb07c0cd2aae706ccaecab45965ee83f11c8d3b1f62"}, -] - -[[package]] -name = "sniffio" -version = "1.3.1" -description = "Sniff out which async library your code is running under" -optional = false -python-versions = ">=3.7" -files = [ - {file = "sniffio-1.3.1-py3-none-any.whl", hash = "sha256:2f6da418d1f1e0fddd844478f41680e794e6051915791a034ff65e5f100525a2"}, - {file = "sniffio-1.3.1.tar.gz", hash = "sha256:f4324edc670a0f49750a81b895f35c3adb843cca46f0530f79fc1babb23789dc"}, -] - -[[package]] -name = "sqlalchemy" -version = "2.0.28" -description = "Database Abstraction Library" -optional = false -python-versions = ">=3.7" -files = [ - {file = "SQLAlchemy-2.0.28-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e0b148ab0438f72ad21cb004ce3bdaafd28465c4276af66df3b9ecd2037bf252"}, - {file = "SQLAlchemy-2.0.28-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:bbda76961eb8f27e6ad3c84d1dc56d5bc61ba8f02bd20fcf3450bd421c2fcc9c"}, - {file = "SQLAlchemy-2.0.28-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:feea693c452d85ea0015ebe3bb9cd15b6f49acc1a31c28b3c50f4db0f8fb1e71"}, - {file = "SQLAlchemy-2.0.28-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5da98815f82dce0cb31fd1e873a0cb30934971d15b74e0d78cf21f9e1b05953f"}, - {file = "SQLAlchemy-2.0.28-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:4a5adf383c73f2d49ad15ff363a8748319ff84c371eed59ffd0127355d6ea1da"}, - {file = "SQLAlchemy-2.0.28-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:56856b871146bfead25fbcaed098269d90b744eea5cb32a952df00d542cdd368"}, - {file = "SQLAlchemy-2.0.28-cp310-cp310-win32.whl", hash = "sha256:943aa74a11f5806ab68278284a4ddd282d3fb348a0e96db9b42cb81bf731acdc"}, - {file = "SQLAlchemy-2.0.28-cp310-cp310-win_amd64.whl", hash = "sha256:c6c4da4843e0dabde41b8f2e8147438330924114f541949e6318358a56d1875a"}, - {file = "SQLAlchemy-2.0.28-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:46a3d4e7a472bfff2d28db838669fc437964e8af8df8ee1e4548e92710929adc"}, - {file = "SQLAlchemy-2.0.28-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:0d3dd67b5d69794cfe82862c002512683b3db038b99002171f624712fa71aeaa"}, - {file = "SQLAlchemy-2.0.28-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c61e2e41656a673b777e2f0cbbe545323dbe0d32312f590b1bc09da1de6c2a02"}, - {file = "SQLAlchemy-2.0.28-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0315d9125a38026227f559488fe7f7cee1bd2fbc19f9fd637739dc50bb6380b2"}, - {file = "SQLAlchemy-2.0.28-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:af8ce2d31679006e7b747d30a89cd3ac1ec304c3d4c20973f0f4ad58e2d1c4c9"}, - {file = "SQLAlchemy-2.0.28-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:81ba314a08c7ab701e621b7ad079c0c933c58cdef88593c59b90b996e8b58fa5"}, - {file = "SQLAlchemy-2.0.28-cp311-cp311-win32.whl", hash = "sha256:1ee8bd6d68578e517943f5ebff3afbd93fc65f7ef8f23becab9fa8fb315afb1d"}, - {file = "SQLAlchemy-2.0.28-cp311-cp311-win_amd64.whl", hash = "sha256:ad7acbe95bac70e4e687a4dc9ae3f7a2f467aa6597049eeb6d4a662ecd990bb6"}, - {file = "SQLAlchemy-2.0.28-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:d3499008ddec83127ab286c6f6ec82a34f39c9817f020f75eca96155f9765097"}, - {file = "SQLAlchemy-2.0.28-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:9b66fcd38659cab5d29e8de5409cdf91e9986817703e1078b2fdaad731ea66f5"}, - {file = "SQLAlchemy-2.0.28-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bea30da1e76cb1acc5b72e204a920a3a7678d9d52f688f087dc08e54e2754c67"}, - {file = "SQLAlchemy-2.0.28-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:124202b4e0edea7f08a4db8c81cc7859012f90a0d14ba2bf07c099aff6e96462"}, - {file = "SQLAlchemy-2.0.28-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:e23b88c69497a6322b5796c0781400692eca1ae5532821b39ce81a48c395aae9"}, - {file = "SQLAlchemy-2.0.28-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:4b6303bfd78fb3221847723104d152e5972c22367ff66edf09120fcde5ddc2e2"}, - {file = "SQLAlchemy-2.0.28-cp312-cp312-win32.whl", hash = "sha256:a921002be69ac3ab2cf0c3017c4e6a3377f800f1fca7f254c13b5f1a2f10022c"}, - {file = "SQLAlchemy-2.0.28-cp312-cp312-win_amd64.whl", hash = "sha256:b4a2cf92995635b64876dc141af0ef089c6eea7e05898d8d8865e71a326c0385"}, - {file = "SQLAlchemy-2.0.28-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:8e91b5e341f8c7f1e5020db8e5602f3ed045a29f8e27f7f565e0bdee3338f2c7"}, - {file = "SQLAlchemy-2.0.28-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:45c7b78dfc7278329f27be02c44abc0d69fe235495bb8e16ec7ef1b1a17952db"}, - {file = "SQLAlchemy-2.0.28-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3eba73ef2c30695cb7eabcdb33bb3d0b878595737479e152468f3ba97a9c22a4"}, - {file = "SQLAlchemy-2.0.28-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:5df5d1dafb8eee89384fb7a1f79128118bc0ba50ce0db27a40750f6f91aa99d5"}, - {file = "SQLAlchemy-2.0.28-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:2858bbab1681ee5406650202950dc8f00e83b06a198741b7c656e63818633526"}, - {file = "SQLAlchemy-2.0.28-cp37-cp37m-win32.whl", hash = "sha256:9461802f2e965de5cff80c5a13bc945abea7edaa1d29360b485c3d2b56cdb075"}, - {file = "SQLAlchemy-2.0.28-cp37-cp37m-win_amd64.whl", hash = "sha256:a6bec1c010a6d65b3ed88c863d56b9ea5eeefdf62b5e39cafd08c65f5ce5198b"}, - {file = "SQLAlchemy-2.0.28-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:843a882cadebecc655a68bd9a5b8aa39b3c52f4a9a5572a3036fb1bb2ccdc197"}, - {file = "SQLAlchemy-2.0.28-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:dbb990612c36163c6072723523d2be7c3eb1517bbdd63fe50449f56afafd1133"}, - {file = "SQLAlchemy-2.0.28-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bd7e4baf9161d076b9a7e432fce06217b9bd90cfb8f1d543d6e8c4595627edb9"}, - {file = "SQLAlchemy-2.0.28-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e0a5354cb4de9b64bccb6ea33162cb83e03dbefa0d892db88a672f5aad638a75"}, - {file = "SQLAlchemy-2.0.28-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:fffcc8edc508801ed2e6a4e7b0d150a62196fd28b4e16ab9f65192e8186102b6"}, - {file = "SQLAlchemy-2.0.28-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:aca7b6d99a4541b2ebab4494f6c8c2f947e0df4ac859ced575238e1d6ca5716b"}, - {file = "SQLAlchemy-2.0.28-cp38-cp38-win32.whl", hash = "sha256:8c7f10720fc34d14abad5b647bc8202202f4948498927d9f1b4df0fb1cf391b7"}, - {file = "SQLAlchemy-2.0.28-cp38-cp38-win_amd64.whl", hash = "sha256:243feb6882b06a2af68ecf4bec8813d99452a1b62ba2be917ce6283852cf701b"}, - {file = "SQLAlchemy-2.0.28-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:fc4974d3684f28b61b9a90fcb4c41fb340fd4b6a50c04365704a4da5a9603b05"}, - {file = "SQLAlchemy-2.0.28-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:87724e7ed2a936fdda2c05dbd99d395c91ea3c96f029a033a4a20e008dd876bf"}, - {file = "SQLAlchemy-2.0.28-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:68722e6a550f5de2e3cfe9da6afb9a7dd15ef7032afa5651b0f0c6b3adb8815d"}, - {file = "SQLAlchemy-2.0.28-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:328529f7c7f90adcd65aed06a161851f83f475c2f664a898af574893f55d9e53"}, - {file = "SQLAlchemy-2.0.28-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:df40c16a7e8be7413b885c9bf900d402918cc848be08a59b022478804ea076b8"}, - {file = "SQLAlchemy-2.0.28-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:426f2fa71331a64f5132369ede5171c52fd1df1bd9727ce621f38b5b24f48750"}, - {file = "SQLAlchemy-2.0.28-cp39-cp39-win32.whl", hash = "sha256:33157920b233bc542ce497a81a2e1452e685a11834c5763933b440fedd1d8e2d"}, - {file = "SQLAlchemy-2.0.28-cp39-cp39-win_amd64.whl", hash = "sha256:2f60843068e432311c886c5f03c4664acaef507cf716f6c60d5fde7265be9d7b"}, - {file = "SQLAlchemy-2.0.28-py3-none-any.whl", hash = "sha256:78bb7e8da0183a8301352d569900d9d3594c48ac21dc1c2ec6b3121ed8b6c986"}, - {file = "SQLAlchemy-2.0.28.tar.gz", hash = "sha256:dd53b6c4e6d960600fd6532b79ee28e2da489322fcf6648738134587faf767b6"}, -] - -[package.dependencies] -greenlet = {version = "!=0.4.17", markers = "platform_machine == \"aarch64\" or platform_machine == \"ppc64le\" or platform_machine == \"x86_64\" or platform_machine == \"amd64\" or platform_machine == \"AMD64\" or platform_machine == \"win32\" or platform_machine == \"WIN32\""} -typing-extensions = ">=4.6.0" - -[package.extras] -aiomysql = ["aiomysql (>=0.2.0)", "greenlet (!=0.4.17)"] -aioodbc = ["aioodbc", "greenlet (!=0.4.17)"] -aiosqlite = ["aiosqlite", "greenlet (!=0.4.17)", "typing_extensions (!=3.10.0.1)"] -asyncio = ["greenlet (!=0.4.17)"] -asyncmy = ["asyncmy (>=0.2.3,!=0.2.4,!=0.2.6)", "greenlet (!=0.4.17)"] -mariadb-connector = ["mariadb (>=1.0.1,!=1.1.2,!=1.1.5)"] -mssql = ["pyodbc"] -mssql-pymssql = ["pymssql"] -mssql-pyodbc = ["pyodbc"] -mypy = ["mypy (>=0.910)"] -mysql = ["mysqlclient (>=1.4.0)"] -mysql-connector = ["mysql-connector-python"] -oracle = ["cx_oracle (>=8)"] -oracle-oracledb = ["oracledb (>=1.0.1)"] -postgresql = ["psycopg2 (>=2.7)"] -postgresql-asyncpg = ["asyncpg", "greenlet (!=0.4.17)"] -postgresql-pg8000 = ["pg8000 (>=1.29.1)"] -postgresql-psycopg = ["psycopg (>=3.0.7)"] -postgresql-psycopg2binary = ["psycopg2-binary"] -postgresql-psycopg2cffi = ["psycopg2cffi"] -postgresql-psycopgbinary = ["psycopg[binary] (>=3.0.7)"] -pymysql = ["pymysql"] -sqlcipher = ["sqlcipher3_binary"] - -[[package]] -name = "sse-starlette" -version = "1.8.2" -description = "SSE plugin for Starlette" -optional = false -python-versions = ">=3.8" -files = [ - {file = "sse_starlette-1.8.2-py3-none-any.whl", hash = "sha256:70cc7ef5aca4abe8a25dec1284cce4fe644dd7bf0c406d3e852e516092b7f849"}, - {file = "sse_starlette-1.8.2.tar.gz", hash = "sha256:e0f9b8dec41adc092a0a6e0694334bd3cfd3084c44c497a6ebc1fb4bdd919acd"}, -] - -[package.dependencies] -anyio = "*" -fastapi = "*" -starlette = "*" -uvicorn = "*" - -[[package]] -name = "starlette" -version = "0.36.3" -description = "The little ASGI library that shines." -optional = false -python-versions = ">=3.8" -files = [ - {file = "starlette-0.36.3-py3-none-any.whl", hash = "sha256:13d429aa93a61dc40bf503e8c801db1f1bca3dc706b10ef2434a36123568f044"}, - {file = "starlette-0.36.3.tar.gz", hash = "sha256:90a671733cfb35771d8cc605e0b679d23b992f8dcfad48cc60b38cb29aeb7080"}, -] - -[package.dependencies] -anyio = ">=3.4.0,<5" - -[package.extras] -full = ["httpx (>=0.22.0)", "itsdangerous", "jinja2", "python-multipart (>=0.0.7)", "pyyaml"] - -[[package]] -name = "tenacity" -version = "8.2.3" -description = "Retry code until it succeeds" -optional = false -python-versions = ">=3.7" -files = [ - {file = "tenacity-8.2.3-py3-none-any.whl", hash = "sha256:ce510e327a630c9e1beaf17d42e6ffacc88185044ad85cf74c0a8887c6a0f88c"}, - {file = "tenacity-8.2.3.tar.gz", hash = "sha256:5398ef0d78e63f40007c1fb4c0bff96e1911394d2fa8d194f77619c05ff6cc8a"}, -] - -[package.extras] -doc = ["reno", "sphinx", "tornado (>=4.5)"] - -[[package]] -name = "tomli" -version = "2.0.1" -description = "A lil' TOML parser" -optional = false -python-versions = ">=3.7" -files = [ - {file = "tomli-2.0.1-py3-none-any.whl", hash = "sha256:939de3e7a6161af0c887ef91b7d41a53e7c5a1ca976325f429cb46ea9bc30ecc"}, - {file = "tomli-2.0.1.tar.gz", hash = "sha256:de526c12914f0c550d15924c62d72abc48d6fe7364aa87328337a31007fe8a4f"}, -] - -[[package]] -name = "tomlkit" -version = "0.12.4" -description = "Style preserving TOML library" -optional = false -python-versions = ">=3.7" -files = [ - {file = "tomlkit-0.12.4-py3-none-any.whl", hash = "sha256:5cd82d48a3dd89dee1f9d64420aa20ae65cfbd00668d6f094d7578a78efbb77b"}, - {file = "tomlkit-0.12.4.tar.gz", hash = "sha256:7ca1cfc12232806517a8515047ba66a19369e71edf2439d0f5824f91032b6cc3"}, -] - -[[package]] -name = "typer" -version = "0.9.0" -description = "Typer, build great CLIs. Easy to code. Based on Python type hints." -optional = false -python-versions = ">=3.6" -files = [ - {file = "typer-0.9.0-py3-none-any.whl", hash = "sha256:5d96d986a21493606a358cae4461bd8cdf83cbf33a5aa950ae629ca3b51467ee"}, - {file = "typer-0.9.0.tar.gz", hash = "sha256:50922fd79aea2f4751a8e0408ff10d2662bd0c8bbfa84755a699f3bada2978b2"}, -] - -[package.dependencies] -click = ">=7.1.1,<9.0.0" -colorama = {version = ">=0.4.3,<0.5.0", optional = true, markers = "extra == \"all\""} -rich = {version = ">=10.11.0,<14.0.0", optional = true, markers = "extra == \"all\""} -shellingham = {version = ">=1.3.0,<2.0.0", optional = true, markers = "extra == \"all\""} -typing-extensions = ">=3.7.4.3" - -[package.extras] -all = ["colorama (>=0.4.3,<0.5.0)", "rich (>=10.11.0,<14.0.0)", "shellingham (>=1.3.0,<2.0.0)"] -dev = ["autoflake (>=1.3.1,<2.0.0)", "flake8 (>=3.8.3,<4.0.0)", "pre-commit (>=2.17.0,<3.0.0)"] -doc = ["cairosvg (>=2.5.2,<3.0.0)", "mdx-include (>=1.4.1,<2.0.0)", "mkdocs (>=1.1.2,<2.0.0)", "mkdocs-material (>=8.1.4,<9.0.0)", "pillow (>=9.3.0,<10.0.0)"] -test = ["black (>=22.3.0,<23.0.0)", "coverage (>=6.2,<7.0)", "isort (>=5.0.6,<6.0.0)", "mypy (==0.910)", "pytest (>=4.4.0,<8.0.0)", "pytest-cov (>=2.10.0,<5.0.0)", "pytest-sugar (>=0.9.4,<0.10.0)", "pytest-xdist (>=1.32.0,<4.0.0)", "rich (>=10.11.0,<14.0.0)", "shellingham (>=1.3.0,<2.0.0)"] - -[[package]] -name = "typing-extensions" -version = "4.10.0" -description = "Backported and Experimental Type Hints for Python 3.8+" -optional = false -python-versions = ">=3.8" -files = [ - {file = "typing_extensions-4.10.0-py3-none-any.whl", hash = "sha256:69b1a937c3a517342112fb4c6df7e72fc39a38e7891a5730ed4985b5214b5475"}, - {file = "typing_extensions-4.10.0.tar.gz", hash = "sha256:b0abd7c89e8fb96f98db18d86106ff1d90ab692004eb746cf6eda2682f91b3cb"}, -] - -[[package]] -name = "typing-inspect" -version = "0.9.0" -description = "Runtime inspection utilities for typing module." -optional = false -python-versions = "*" -files = [ - {file = "typing_inspect-0.9.0-py3-none-any.whl", hash = "sha256:9ee6fc59062311ef8547596ab6b955e1b8aa46242d854bfc78f4f6b0eff35f9f"}, - {file = "typing_inspect-0.9.0.tar.gz", hash = "sha256:b23fc42ff6f6ef6954e4852c1fb512cdd18dbea03134f91f856a95ccc9461f78"}, -] - -[package.dependencies] -mypy-extensions = ">=0.3.0" -typing-extensions = ">=3.7.4" - -[[package]] -name = "urllib3" -version = "2.2.1" -description = "HTTP library with thread-safe connection pooling, file post, and more." -optional = false -python-versions = ">=3.8" -files = [ - {file = "urllib3-2.2.1-py3-none-any.whl", hash = "sha256:450b20ec296a467077128bff42b73080516e71b56ff59a60a02bef2232c4fa9d"}, - {file = "urllib3-2.2.1.tar.gz", hash = "sha256:d0570876c61ab9e520d776c38acbbb5b05a776d3f9ff98a5c8fd5162a444cf19"}, -] - -[package.extras] -brotli = ["brotli (>=1.0.9)", "brotlicffi (>=0.8.0)"] -h2 = ["h2 (>=4,<5)"] -socks = ["pysocks (>=1.5.6,!=1.5.7,<2.0)"] -zstd = ["zstandard (>=0.18.0)"] - -[[package]] -name = "uvicorn" -version = "0.23.2" -description = "The lightning-fast ASGI server." -optional = false -python-versions = ">=3.8" -files = [ - {file = "uvicorn-0.23.2-py3-none-any.whl", hash = "sha256:1f9be6558f01239d4fdf22ef8126c39cb1ad0addf76c40e760549d2c2f43ab53"}, - {file = "uvicorn-0.23.2.tar.gz", hash = "sha256:4d3cc12d7727ba72b64d12d3cc7743124074c0a69f7b201512fc50c3e3f1569a"}, -] - -[package.dependencies] -click = ">=7.0" -h11 = ">=0.8" -typing-extensions = {version = ">=4.0", markers = "python_version < \"3.11\""} - -[package.extras] -standard = ["colorama (>=0.4)", "httptools (>=0.5.0)", "python-dotenv (>=0.13)", "pyyaml (>=5.1)", "uvloop (>=0.14.0,!=0.15.0,!=0.15.1)", "watchfiles (>=0.13)", "websockets (>=10.4)"] - -[[package]] -name = "watchdog" -version = "4.0.0" -description = "Filesystem events monitoring" -optional = false -python-versions = ">=3.8" -files = [ - {file = "watchdog-4.0.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:39cb34b1f1afbf23e9562501673e7146777efe95da24fab5707b88f7fb11649b"}, - {file = "watchdog-4.0.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:c522392acc5e962bcac3b22b9592493ffd06d1fc5d755954e6be9f4990de932b"}, - {file = "watchdog-4.0.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:6c47bdd680009b11c9ac382163e05ca43baf4127954c5f6d0250e7d772d2b80c"}, - {file = "watchdog-4.0.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:8350d4055505412a426b6ad8c521bc7d367d1637a762c70fdd93a3a0d595990b"}, - {file = "watchdog-4.0.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c17d98799f32e3f55f181f19dd2021d762eb38fdd381b4a748b9f5a36738e935"}, - {file = "watchdog-4.0.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:4986db5e8880b0e6b7cd52ba36255d4793bf5cdc95bd6264806c233173b1ec0b"}, - {file = "watchdog-4.0.0-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:11e12fafb13372e18ca1bbf12d50f593e7280646687463dd47730fd4f4d5d257"}, - {file = "watchdog-4.0.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:5369136a6474678e02426bd984466343924d1df8e2fd94a9b443cb7e3aa20d19"}, - {file = "watchdog-4.0.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:76ad8484379695f3fe46228962017a7e1337e9acadafed67eb20aabb175df98b"}, - {file = "watchdog-4.0.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:45cc09cc4c3b43fb10b59ef4d07318d9a3ecdbff03abd2e36e77b6dd9f9a5c85"}, - {file = "watchdog-4.0.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:eed82cdf79cd7f0232e2fdc1ad05b06a5e102a43e331f7d041e5f0e0a34a51c4"}, - {file = "watchdog-4.0.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:ba30a896166f0fee83183cec913298151b73164160d965af2e93a20bbd2ab605"}, - {file = "watchdog-4.0.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:d18d7f18a47de6863cd480734613502904611730f8def45fc52a5d97503e5101"}, - {file = "watchdog-4.0.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:2895bf0518361a9728773083908801a376743bcc37dfa252b801af8fd281b1ca"}, - {file = "watchdog-4.0.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:87e9df830022488e235dd601478c15ad73a0389628588ba0b028cb74eb72fed8"}, - {file = "watchdog-4.0.0-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:6e949a8a94186bced05b6508faa61b7adacc911115664ccb1923b9ad1f1ccf7b"}, - {file = "watchdog-4.0.0-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:6a4db54edea37d1058b08947c789a2354ee02972ed5d1e0dca9b0b820f4c7f92"}, - {file = "watchdog-4.0.0-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:d31481ccf4694a8416b681544c23bd271f5a123162ab603c7d7d2dd7dd901a07"}, - {file = "watchdog-4.0.0-py3-none-manylinux2014_aarch64.whl", hash = "sha256:8fec441f5adcf81dd240a5fe78e3d83767999771630b5ddfc5867827a34fa3d3"}, - {file = "watchdog-4.0.0-py3-none-manylinux2014_armv7l.whl", hash = "sha256:6a9c71a0b02985b4b0b6d14b875a6c86ddea2fdbebd0c9a720a806a8bbffc69f"}, - {file = "watchdog-4.0.0-py3-none-manylinux2014_i686.whl", hash = "sha256:557ba04c816d23ce98a06e70af6abaa0485f6d94994ec78a42b05d1c03dcbd50"}, - {file = "watchdog-4.0.0-py3-none-manylinux2014_ppc64.whl", hash = "sha256:d0f9bd1fd919134d459d8abf954f63886745f4660ef66480b9d753a7c9d40927"}, - {file = "watchdog-4.0.0-py3-none-manylinux2014_ppc64le.whl", hash = "sha256:f9b2fdca47dc855516b2d66eef3c39f2672cbf7e7a42e7e67ad2cbfcd6ba107d"}, - {file = "watchdog-4.0.0-py3-none-manylinux2014_s390x.whl", hash = "sha256:73c7a935e62033bd5e8f0da33a4dcb763da2361921a69a5a95aaf6c93aa03a87"}, - {file = "watchdog-4.0.0-py3-none-manylinux2014_x86_64.whl", hash = "sha256:6a80d5cae8c265842c7419c560b9961561556c4361b297b4c431903f8c33b269"}, - {file = "watchdog-4.0.0-py3-none-win32.whl", hash = "sha256:8f9a542c979df62098ae9c58b19e03ad3df1c9d8c6895d96c0d51da17b243b1c"}, - {file = "watchdog-4.0.0-py3-none-win_amd64.whl", hash = "sha256:f970663fa4f7e80401a7b0cbeec00fa801bf0287d93d48368fc3e6fa32716245"}, - {file = "watchdog-4.0.0-py3-none-win_ia64.whl", hash = "sha256:9a03e16e55465177d416699331b0f3564138f1807ecc5f2de9d55d8f188d08c7"}, - {file = "watchdog-4.0.0.tar.gz", hash = "sha256:e3e7065cbdabe6183ab82199d7a4f6b3ba0a438c5a512a68559846ccb76a78ec"}, -] - -[package.extras] -watchmedo = ["PyYAML (>=3.10)"] - -[[package]] -name = "yarl" -version = "1.9.4" -description = "Yet another URL library" -optional = false -python-versions = ">=3.7" -files = [ - {file = "yarl-1.9.4-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:a8c1df72eb746f4136fe9a2e72b0c9dc1da1cbd23b5372f94b5820ff8ae30e0e"}, - {file = "yarl-1.9.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:a3a6ed1d525bfb91b3fc9b690c5a21bb52de28c018530ad85093cc488bee2dd2"}, - {file = "yarl-1.9.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c38c9ddb6103ceae4e4498f9c08fac9b590c5c71b0370f98714768e22ac6fa66"}, - {file = "yarl-1.9.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d9e09c9d74f4566e905a0b8fa668c58109f7624db96a2171f21747abc7524234"}, - {file = "yarl-1.9.4-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b8477c1ee4bd47c57d49621a062121c3023609f7a13b8a46953eb6c9716ca392"}, - {file = "yarl-1.9.4-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d5ff2c858f5f6a42c2a8e751100f237c5e869cbde669a724f2062d4c4ef93551"}, - {file = "yarl-1.9.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:357495293086c5b6d34ca9616a43d329317feab7917518bc97a08f9e55648455"}, - {file = "yarl-1.9.4-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:54525ae423d7b7a8ee81ba189f131054defdb122cde31ff17477951464c1691c"}, - {file = "yarl-1.9.4-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:801e9264d19643548651b9db361ce3287176671fb0117f96b5ac0ee1c3530d53"}, - {file = "yarl-1.9.4-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:e516dc8baf7b380e6c1c26792610230f37147bb754d6426462ab115a02944385"}, - {file = "yarl-1.9.4-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:7d5aaac37d19b2904bb9dfe12cdb08c8443e7ba7d2852894ad448d4b8f442863"}, - {file = "yarl-1.9.4-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:54beabb809ffcacbd9d28ac57b0db46e42a6e341a030293fb3185c409e626b8b"}, - {file = "yarl-1.9.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:bac8d525a8dbc2a1507ec731d2867025d11ceadcb4dd421423a5d42c56818541"}, - {file = "yarl-1.9.4-cp310-cp310-win32.whl", hash = "sha256:7855426dfbddac81896b6e533ebefc0af2f132d4a47340cee6d22cac7190022d"}, - {file = "yarl-1.9.4-cp310-cp310-win_amd64.whl", hash = "sha256:848cd2a1df56ddbffeb375535fb62c9d1645dde33ca4d51341378b3f5954429b"}, - {file = "yarl-1.9.4-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:35a2b9396879ce32754bd457d31a51ff0a9d426fd9e0e3c33394bf4b9036b099"}, - {file = "yarl-1.9.4-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:4c7d56b293cc071e82532f70adcbd8b61909eec973ae9d2d1f9b233f3d943f2c"}, - {file = "yarl-1.9.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:d8a1c6c0be645c745a081c192e747c5de06e944a0d21245f4cf7c05e457c36e0"}, - {file = "yarl-1.9.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4b3c1ffe10069f655ea2d731808e76e0f452fc6c749bea04781daf18e6039525"}, - {file = "yarl-1.9.4-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:549d19c84c55d11687ddbd47eeb348a89df9cb30e1993f1b128f4685cd0ebbf8"}, - {file = "yarl-1.9.4-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a7409f968456111140c1c95301cadf071bd30a81cbd7ab829169fb9e3d72eae9"}, - {file = "yarl-1.9.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e23a6d84d9d1738dbc6e38167776107e63307dfc8ad108e580548d1f2c587f42"}, - {file = "yarl-1.9.4-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d8b889777de69897406c9fb0b76cdf2fd0f31267861ae7501d93003d55f54fbe"}, - {file = "yarl-1.9.4-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:03caa9507d3d3c83bca08650678e25364e1843b484f19986a527630ca376ecce"}, - {file = "yarl-1.9.4-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:4e9035df8d0880b2f1c7f5031f33f69e071dfe72ee9310cfc76f7b605958ceb9"}, - {file = "yarl-1.9.4-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:c0ec0ed476f77db9fb29bca17f0a8fcc7bc97ad4c6c1d8959c507decb22e8572"}, - {file = "yarl-1.9.4-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:ee04010f26d5102399bd17f8df8bc38dc7ccd7701dc77f4a68c5b8d733406958"}, - {file = "yarl-1.9.4-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:49a180c2e0743d5d6e0b4d1a9e5f633c62eca3f8a86ba5dd3c471060e352ca98"}, - {file = "yarl-1.9.4-cp311-cp311-win32.whl", hash = "sha256:81eb57278deb6098a5b62e88ad8281b2ba09f2f1147c4767522353eaa6260b31"}, - {file = "yarl-1.9.4-cp311-cp311-win_amd64.whl", hash = "sha256:d1d2532b340b692880261c15aee4dc94dd22ca5d61b9db9a8a361953d36410b1"}, - {file = "yarl-1.9.4-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:0d2454f0aef65ea81037759be5ca9947539667eecebca092733b2eb43c965a81"}, - {file = "yarl-1.9.4-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:44d8ffbb9c06e5a7f529f38f53eda23e50d1ed33c6c869e01481d3fafa6b8142"}, - {file = "yarl-1.9.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:aaaea1e536f98754a6e5c56091baa1b6ce2f2700cc4a00b0d49eca8dea471074"}, - {file = "yarl-1.9.4-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3777ce5536d17989c91696db1d459574e9a9bd37660ea7ee4d3344579bb6f129"}, - {file = "yarl-1.9.4-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9fc5fc1eeb029757349ad26bbc5880557389a03fa6ada41703db5e068881e5f2"}, - {file = "yarl-1.9.4-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ea65804b5dc88dacd4a40279af0cdadcfe74b3e5b4c897aa0d81cf86927fee78"}, - {file = "yarl-1.9.4-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:aa102d6d280a5455ad6a0f9e6d769989638718e938a6a0a2ff3f4a7ff8c62cc4"}, - {file = "yarl-1.9.4-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:09efe4615ada057ba2d30df871d2f668af661e971dfeedf0c159927d48bbeff0"}, - {file = "yarl-1.9.4-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:008d3e808d03ef28542372d01057fd09168419cdc8f848efe2804f894ae03e51"}, - {file = "yarl-1.9.4-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:6f5cb257bc2ec58f437da2b37a8cd48f666db96d47b8a3115c29f316313654ff"}, - {file = "yarl-1.9.4-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:992f18e0ea248ee03b5a6e8b3b4738850ae7dbb172cc41c966462801cbf62cf7"}, - {file = "yarl-1.9.4-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:0e9d124c191d5b881060a9e5060627694c3bdd1fe24c5eecc8d5d7d0eb6faabc"}, - {file = "yarl-1.9.4-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:3986b6f41ad22988e53d5778f91855dc0399b043fc8946d4f2e68af22ee9ff10"}, - {file = "yarl-1.9.4-cp312-cp312-win32.whl", hash = "sha256:4b21516d181cd77ebd06ce160ef8cc2a5e9ad35fb1c5930882baff5ac865eee7"}, - {file = "yarl-1.9.4-cp312-cp312-win_amd64.whl", hash = "sha256:a9bd00dc3bc395a662900f33f74feb3e757429e545d831eef5bb280252631984"}, - {file = "yarl-1.9.4-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:63b20738b5aac74e239622d2fe30df4fca4942a86e31bf47a81a0e94c14df94f"}, - {file = "yarl-1.9.4-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d7d7f7de27b8944f1fee2c26a88b4dabc2409d2fea7a9ed3df79b67277644e17"}, - {file = "yarl-1.9.4-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c74018551e31269d56fab81a728f683667e7c28c04e807ba08f8c9e3bba32f14"}, - {file = "yarl-1.9.4-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ca06675212f94e7a610e85ca36948bb8fc023e458dd6c63ef71abfd482481aa5"}, - {file = "yarl-1.9.4-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5aef935237d60a51a62b86249839b51345f47564208c6ee615ed2a40878dccdd"}, - {file = "yarl-1.9.4-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2b134fd795e2322b7684155b7855cc99409d10b2e408056db2b93b51a52accc7"}, - {file = "yarl-1.9.4-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:d25039a474c4c72a5ad4b52495056f843a7ff07b632c1b92ea9043a3d9950f6e"}, - {file = "yarl-1.9.4-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:f7d6b36dd2e029b6bcb8a13cf19664c7b8e19ab3a58e0fefbb5b8461447ed5ec"}, - {file = "yarl-1.9.4-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:957b4774373cf6f709359e5c8c4a0af9f6d7875db657adb0feaf8d6cb3c3964c"}, - {file = "yarl-1.9.4-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:d7eeb6d22331e2fd42fce928a81c697c9ee2d51400bd1a28803965883e13cead"}, - {file = "yarl-1.9.4-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:6a962e04b8f91f8c4e5917e518d17958e3bdee71fd1d8b88cdce74dd0ebbf434"}, - {file = "yarl-1.9.4-cp37-cp37m-win32.whl", hash = "sha256:f3bc6af6e2b8f92eced34ef6a96ffb248e863af20ef4fde9448cc8c9b858b749"}, - {file = "yarl-1.9.4-cp37-cp37m-win_amd64.whl", hash = "sha256:ad4d7a90a92e528aadf4965d685c17dacff3df282db1121136c382dc0b6014d2"}, - {file = "yarl-1.9.4-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:ec61d826d80fc293ed46c9dd26995921e3a82146feacd952ef0757236fc137be"}, - {file = "yarl-1.9.4-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:8be9e837ea9113676e5754b43b940b50cce76d9ed7d2461df1af39a8ee674d9f"}, - {file = "yarl-1.9.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:bef596fdaa8f26e3d66af846bbe77057237cb6e8efff8cd7cc8dff9a62278bbf"}, - {file = "yarl-1.9.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2d47552b6e52c3319fede1b60b3de120fe83bde9b7bddad11a69fb0af7db32f1"}, - {file = "yarl-1.9.4-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:84fc30f71689d7fc9168b92788abc977dc8cefa806909565fc2951d02f6b7d57"}, - {file = "yarl-1.9.4-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4aa9741085f635934f3a2583e16fcf62ba835719a8b2b28fb2917bb0537c1dfa"}, - {file = "yarl-1.9.4-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:206a55215e6d05dbc6c98ce598a59e6fbd0c493e2de4ea6cc2f4934d5a18d130"}, - {file = "yarl-1.9.4-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:07574b007ee20e5c375a8fe4a0789fad26db905f9813be0f9fef5a68080de559"}, - {file = "yarl-1.9.4-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:5a2e2433eb9344a163aced6a5f6c9222c0786e5a9e9cac2c89f0b28433f56e23"}, - {file = "yarl-1.9.4-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:6ad6d10ed9b67a382b45f29ea028f92d25bc0bc1daf6c5b801b90b5aa70fb9ec"}, - {file = "yarl-1.9.4-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:6fe79f998a4052d79e1c30eeb7d6c1c1056ad33300f682465e1b4e9b5a188b78"}, - {file = "yarl-1.9.4-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:a825ec844298c791fd28ed14ed1bffc56a98d15b8c58a20e0e08c1f5f2bea1be"}, - {file = "yarl-1.9.4-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:8619d6915b3b0b34420cf9b2bb6d81ef59d984cb0fde7544e9ece32b4b3043c3"}, - {file = "yarl-1.9.4-cp38-cp38-win32.whl", hash = "sha256:686a0c2f85f83463272ddffd4deb5e591c98aac1897d65e92319f729c320eece"}, - {file = "yarl-1.9.4-cp38-cp38-win_amd64.whl", hash = "sha256:a00862fb23195b6b8322f7d781b0dc1d82cb3bcac346d1e38689370cc1cc398b"}, - {file = "yarl-1.9.4-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:604f31d97fa493083ea21bd9b92c419012531c4e17ea6da0f65cacdcf5d0bd27"}, - {file = "yarl-1.9.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:8a854227cf581330ffa2c4824d96e52ee621dd571078a252c25e3a3b3d94a1b1"}, - {file = "yarl-1.9.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:ba6f52cbc7809cd8d74604cce9c14868306ae4aa0282016b641c661f981a6e91"}, - {file = "yarl-1.9.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a6327976c7c2f4ee6816eff196e25385ccc02cb81427952414a64811037bbc8b"}, - {file = "yarl-1.9.4-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8397a3817d7dcdd14bb266283cd1d6fc7264a48c186b986f32e86d86d35fbac5"}, - {file = "yarl-1.9.4-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e0381b4ce23ff92f8170080c97678040fc5b08da85e9e292292aba67fdac6c34"}, - {file = "yarl-1.9.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:23d32a2594cb5d565d358a92e151315d1b2268bc10f4610d098f96b147370136"}, - {file = "yarl-1.9.4-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ddb2a5c08a4eaaba605340fdee8fc08e406c56617566d9643ad8bf6852778fc7"}, - {file = "yarl-1.9.4-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:26a1dc6285e03f3cc9e839a2da83bcbf31dcb0d004c72d0730e755b33466c30e"}, - {file = "yarl-1.9.4-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:18580f672e44ce1238b82f7fb87d727c4a131f3a9d33a5e0e82b793362bf18b4"}, - {file = "yarl-1.9.4-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:29e0f83f37610f173eb7e7b5562dd71467993495e568e708d99e9d1944f561ec"}, - {file = "yarl-1.9.4-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:1f23e4fe1e8794f74b6027d7cf19dc25f8b63af1483d91d595d4a07eca1fb26c"}, - {file = "yarl-1.9.4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:db8e58b9d79200c76956cefd14d5c90af54416ff5353c5bfd7cbe58818e26ef0"}, - {file = "yarl-1.9.4-cp39-cp39-win32.whl", hash = "sha256:c7224cab95645c7ab53791022ae77a4509472613e839dab722a72abe5a684575"}, - {file = "yarl-1.9.4-cp39-cp39-win_amd64.whl", hash = "sha256:824d6c50492add5da9374875ce72db7a0733b29c2394890aef23d533106e2b15"}, - {file = "yarl-1.9.4-py3-none-any.whl", hash = "sha256:928cecb0ef9d5a7946eb6ff58417ad2fe9375762382f1bf5c55e61645f2c43ad"}, - {file = "yarl-1.9.4.tar.gz", hash = "sha256:566db86717cf8080b99b58b083b773a908ae40f06681e87e589a976faf8246bf"}, -] - -[package.dependencies] -idna = ">=2.0" -multidict = ">=4.0" - -[metadata] -lock-version = "2.0" -python-versions = "^3.10" -content-hash = "a086dab31a893613f89fb7ddaffc999550289dad7ea256da88217ab3670e83ed" diff --git a/templates/propositional-retrieval/.gitignore b/templates/propositional-retrieval/.gitignore deleted file mode 100644 index 86997822350..00000000000 --- a/templates/propositional-retrieval/.gitignore +++ /dev/null @@ -1,3 +0,0 @@ -docs/img_*.jpg -chroma_db_proposals -multi_vector_retriever_metadata diff --git a/templates/propositional-retrieval/LICENSE b/templates/propositional-retrieval/LICENSE deleted file mode 100644 index 426b6509034..00000000000 --- a/templates/propositional-retrieval/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -MIT License - -Copyright (c) 2023 LangChain, Inc. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/templates/propositional-retrieval/README.md b/templates/propositional-retrieval/README.md deleted file mode 100644 index 1e7f326d80c..00000000000 --- a/templates/propositional-retrieval/README.md +++ /dev/null @@ -1,81 +0,0 @@ -# Propositional retrieval - -This template demonstrates the multi-vector indexing strategy proposed by Chen, et. al.'s [Dense X Retrieval: What Retrieval Granularity Should We Use?](https://arxiv.org/abs/2312.06648). The prompt, which you can [try out on the hub](https://smith.langchain.com/hub/wfh/proposal-indexing), directs an LLM to generate de-contextualized "propositions" which can be vectorized to increase the retrieval accuracy. You can see the full definition in `proposal_chain.py`. - -![Diagram illustrating the multi-vector indexing strategy for information retrieval, showing the process from Wikipedia data through a Proposition-izer to FactoidWiki, and the retrieval of information units for a QA model.](https://github.com/langchain-ai/langchain/raw/master/templates/propositional-retrieval/_images/retriever_diagram.png) "Retriever Diagram" - -## Storage - -For this demo, we index a simple academic paper using the RecursiveUrlLoader, and store all retriever information locally (using chroma and a bytestore stored on the local filesystem). You can modify the storage layer in `storage.py`. - -## Environment Setup - -Set the `OPENAI_API_KEY` environment variable to access `gpt-3.5` and the OpenAI Embeddings classes. - -## Indexing - -Create the index by running the following: - -```python -poetry install -poetry run python propositional_retrieval/ingest.py -``` - -## Usage - -To use this package, you should first have the LangChain CLI installed: - -```shell -pip install -U langchain-cli -``` - -To create a new LangChain project and install this as the only package, you can do: - -```shell -langchain app new my-app --package propositional-retrieval -``` - -If you want to add this to an existing project, you can just run: - -```shell -langchain app add propositional-retrieval -``` - -And add the following code to your `server.py` file: - -```python -from propositional_retrieval import chain - -add_routes(app, chain, path="/propositional-retrieval") -``` - -(Optional) Let's now configure LangSmith. -LangSmith will help us trace, monitor and debug LangChain applications. -You can sign up for LangSmith [here](https://smith.langchain.com/). -If you don't have access, you can skip this section - -```shell -export LANGCHAIN_TRACING_V2=true -export LANGCHAIN_API_KEY= -export LANGCHAIN_PROJECT= # if not specified, defaults to "default" -``` - -If you are inside this directory, then you can spin up a LangServe instance directly by: - -```shell -langchain serve -``` - -This will start the FastAPI app with a server is running locally at -[http://localhost:8000](http://localhost:8000) - -We can see all templates at [http://127.0.0.1:8000/docs](http://127.0.0.1:8000/docs) -We can access the playground at [http://127.0.0.1:8000/propositional-retrieval/playground](http://127.0.0.1:8000/propositional-retrieval/playground) - -We can access the template from code with: - -```python -from langserve.client import RemoteRunnable - -runnable = RemoteRunnable("http://localhost:8000/propositional-retrieval") -``` diff --git a/templates/propositional-retrieval/_images/retriever_diagram.png b/templates/propositional-retrieval/_images/retriever_diagram.png deleted file mode 100644 index a254a448778..00000000000 Binary files a/templates/propositional-retrieval/_images/retriever_diagram.png and /dev/null differ diff --git a/templates/propositional-retrieval/propositional_retrieval.ipynb b/templates/propositional-retrieval/propositional_retrieval.ipynb deleted file mode 100644 index 98b37e29d4b..00000000000 --- a/templates/propositional-retrieval/propositional_retrieval.ipynb +++ /dev/null @@ -1,68 +0,0 @@ -{ - "cells": [ - { - "attachments": {}, - "cell_type": "markdown", - "id": "681a5d1e", - "metadata": {}, - "source": [ - "## Run Template\n", - "\n", - "In `server.py`, set -\n", - "```\n", - "from fastapi import FastAPI\n", - "from langserve import add_routes\n", - "from propositional_retrieval import chain\n", - "\n", - "app = FastAPI(\n", - " title=\"LangChain Server\",\n", - " version=\"1.0\",\n", - " description=\"Retriever and Generator for RAG Chroma Dense Retrieval\",\n", - ")\n", - "\n", - "add_routes(app, chain, path=\"/propositional-retrieval\")\n", - "\n", - "if __name__ == \"__main__\":\n", - " import uvicorn\n", - "\n", - " uvicorn.run(app, host=\"localhost\", port=8000)\n", - "\n", - "```" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "d774be2a", - "metadata": {}, - "outputs": [], - "source": [ - "from langserve.client import RemoteRunnable\n", - "\n", - "rag_app = RemoteRunnable(\"http://localhost:8001/propositional-retrieval\")\n", - "rag_app.invoke(\"How are transformers related to convolutional neural networks?\")" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.11.2" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/templates/propositional-retrieval/propositional_retrieval/__init__.py b/templates/propositional-retrieval/propositional_retrieval/__init__.py deleted file mode 100644 index e83774b0078..00000000000 --- a/templates/propositional-retrieval/propositional_retrieval/__init__.py +++ /dev/null @@ -1,4 +0,0 @@ -from propositional_retrieval.chain import chain -from propositional_retrieval.proposal_chain import proposition_chain - -__all__ = ["chain", "proposition_chain"] diff --git a/templates/propositional-retrieval/propositional_retrieval/chain.py b/templates/propositional-retrieval/propositional_retrieval/chain.py deleted file mode 100644 index 468e694e900..00000000000 --- a/templates/propositional-retrieval/propositional_retrieval/chain.py +++ /dev/null @@ -1,67 +0,0 @@ -from langchain_community.chat_models import ChatOpenAI -from langchain_core.load import load -from langchain_core.output_parsers import StrOutputParser -from langchain_core.prompts import ChatPromptTemplate -from langchain_core.pydantic_v1 import BaseModel -from langchain_core.runnables import RunnablePassthrough - -from propositional_retrieval.constants import DOCSTORE_ID_KEY -from propositional_retrieval.storage import get_multi_vector_retriever - - -def format_docs(docs: list) -> str: - loaded_docs = [load(doc) for doc in docs] - return "\n".join( - [ - f"\n{doc.page_content}\n" - for i, doc in enumerate(loaded_docs) - ] - ) - - -def rag_chain(retriever): - """ - The RAG chain - - :param retriever: A function that retrieves the necessary context for the model. - :return: A chain of functions representing the multi-modal RAG process. - """ - model = ChatOpenAI(temperature=0, model="gpt-4-1106-preview", max_tokens=1024) - prompt = ChatPromptTemplate.from_messages( - [ - ( - "system", - "You are an AI assistant. Answer based on the retrieved documents:" - "\n\n{context}\n", - ), - ("user", "{question}?"), - ] - ) - - # Define the RAG pipeline - chain = ( - { - "context": retriever | format_docs, - "question": RunnablePassthrough(), - } - | prompt - | model - | StrOutputParser() - ) - - return chain - - -# Create the multi-vector retriever -retriever = get_multi_vector_retriever(DOCSTORE_ID_KEY) - -# Create RAG chain -chain = rag_chain(retriever) - - -# Add typing for input -class Question(BaseModel): - __root__: str - - -chain = chain.with_types(input_type=Question) diff --git a/templates/propositional-retrieval/propositional_retrieval/constants.py b/templates/propositional-retrieval/propositional_retrieval/constants.py deleted file mode 100644 index 5447a40dcc8..00000000000 --- a/templates/propositional-retrieval/propositional_retrieval/constants.py +++ /dev/null @@ -1 +0,0 @@ -DOCSTORE_ID_KEY = "doc_id" diff --git a/templates/propositional-retrieval/propositional_retrieval/ingest.py b/templates/propositional-retrieval/propositional_retrieval/ingest.py deleted file mode 100644 index 1af11ad3f9c..00000000000 --- a/templates/propositional-retrieval/propositional_retrieval/ingest.py +++ /dev/null @@ -1,93 +0,0 @@ -import logging -import uuid -from typing import Sequence - -from bs4 import BeautifulSoup as Soup -from langchain_core.documents import Document -from langchain_core.runnables import Runnable - -from propositional_retrieval.constants import DOCSTORE_ID_KEY -from propositional_retrieval.proposal_chain import proposition_chain -from propositional_retrieval.storage import get_multi_vector_retriever - -logging.basicConfig(level=logging.INFO) - -logger = logging.getLogger(__name__) - - -def add_documents( - retriever, - propositions: Sequence[Sequence[str]], - docs: Sequence[Document], - id_key: str = DOCSTORE_ID_KEY, -): - doc_ids = [ - str(uuid.uuid5(uuid.NAMESPACE_DNS, doc.metadata["source"])) for doc in docs - ] - prop_docs = [ - Document(page_content=prop, metadata={id_key: doc_ids[i]}) - for i, props in enumerate(propositions) - for prop in props - if prop - ] - retriever.vectorstore.add_documents(prop_docs) - retriever.docstore.mset(list(zip(doc_ids, docs))) - - -def create_index( - docs: Sequence[Document], - indexer: Runnable, - docstore_id_key: str = DOCSTORE_ID_KEY, -): - """ - Create retriever that indexes docs and their propositions - - :param docs: Documents to index - :param indexer: Runnable creates additional propositions per doc - :param docstore_id_key: Key to use to store the docstore id - :return: Retriever - """ - logger.info("Creating multi-vector retriever") - retriever = get_multi_vector_retriever(docstore_id_key) - propositions = indexer.batch( - [{"input": doc.page_content} for doc in docs], {"max_concurrency": 10} - ) - - add_documents( - retriever, - propositions, - docs, - id_key=docstore_id_key, - ) - - return retriever - - -if __name__ == "__main__": - # For our example, we'll load docs from the web - from langchain_text_splitters import RecursiveCharacterTextSplitter # noqa - from langchain_community.document_loaders.recursive_url_loader import ( - RecursiveUrlLoader, - ) - - # The attention is all you need paper - # Could add more parsing here, as it's very raw. - loader = RecursiveUrlLoader( - "https://ar5iv.labs.arxiv.org/html/1706.03762", - max_depth=2, - extractor=lambda x: Soup(x, "html.parser").text, - ) - data = loader.load() - logger.info(f"Loaded {len(data)} documents") - - # Split - text_splitter = RecursiveCharacterTextSplitter(chunk_size=8000, chunk_overlap=0) - all_splits = text_splitter.split_documents(data) - logger.info(f"Split into {len(all_splits)} documents") - - # Create retriever - retriever_multi_vector_img = create_index( - all_splits, - proposition_chain, - DOCSTORE_ID_KEY, - ) diff --git a/templates/propositional-retrieval/propositional_retrieval/proposal_chain.py b/templates/propositional-retrieval/propositional_retrieval/proposal_chain.py deleted file mode 100644 index 99c98025d18..00000000000 --- a/templates/propositional-retrieval/propositional_retrieval/proposal_chain.py +++ /dev/null @@ -1,107 +0,0 @@ -import logging - -from langchain.output_parsers.openai_tools import JsonOutputToolsParser -from langchain_community.chat_models import ChatOpenAI -from langchain_core.prompts import ChatPromptTemplate -from langchain_core.runnables import RunnableLambda - -logging.basicConfig(level=logging.INFO) - -logger = logging.getLogger(__name__) - -# Modified from the paper to be more robust to benign prompt injection -# https://arxiv.org/abs/2312.06648 -# @misc{chen2023dense, -# title={Dense X Retrieval: What Retrieval Granularity Should We Use?}, -# author={Tong Chen and Hongwei Wang and Sihao Chen and Wenhao Yu and Kaixin Ma -# and Xinran Zhao and Hongming Zhang and Dong Yu}, -# year={2023}, -# eprint={2312.06648}, -# archivePrefix={arXiv}, -# primaryClass={cs.CL} -# } -PROMPT = ChatPromptTemplate.from_messages( - [ - ( - "system", - """Decompose the "Content" into clear and simple propositions, ensuring they are interpretable out of -context. -1. Split compound sentence into simple sentences. Maintain the original phrasing from the input -whenever possible. -2. For any named entity that is accompanied by additional descriptive information, separate this -information into its own distinct proposition. -3. Decontextualize the proposition by adding necessary modifier to nouns or entire sentences -and replacing pronouns (e.g., "it", "he", "she", "they", "this", "that") with the full name of the -entities they refer to. -4. Present the results as a list of strings, formatted in JSON. - -Example: - -Input: Title: ¯Eostre. Section: Theories and interpretations, Connection to Easter Hares. Content: -The earliest evidence for the Easter Hare (Osterhase) was recorded in south-west Germany in -1678 by the professor of medicine Georg Franck von Franckenau, but it remained unknown in -other parts of Germany until the 18th century. Scholar Richard Sermon writes that "hares were -frequently seen in gardens in spring, and thus may have served as a convenient explanation for the -origin of the colored eggs hidden there for children. Alternatively, there is a European tradition -that hares laid eggs, since a hare’s scratch or form and a lapwing’s nest look very similar, and -both occur on grassland and are first seen in the spring. In the nineteenth century the influence -of Easter cards, toys, and books was to make the Easter Hare/Rabbit popular throughout Europe. -German immigrants then exported the custom to Britain and America where it evolved into the -Easter Bunny." -Output: [ "The earliest evidence for the Easter Hare was recorded in south-west Germany in -1678 by Georg Franck von Franckenau.", "Georg Franck von Franckenau was a professor of -medicine.", "The evidence for the Easter Hare remained unknown in other parts of Germany until -the 18th century.", "Richard Sermon was a scholar.", "Richard Sermon writes a hypothesis about -the possible explanation for the connection between hares and the tradition during Easter", "Hares -were frequently seen in gardens in spring.", "Hares may have served as a convenient explanation -for the origin of the colored eggs hidden in gardens for children.", "There is a European tradition -that hares laid eggs.", "A hare’s scratch or form and a lapwing’s nest look very similar.", "Both -hares and lapwing’s nests occur on grassland and are first seen in the spring.", "In the nineteenth -century the influence of Easter cards, toys, and books was to make the Easter Hare/Rabbit popular -throughout Europe.", "German immigrants exported the custom of the Easter Hare/Rabbit to -Britain and America.", "The custom of the Easter Hare/Rabbit evolved into the Easter Bunny in -Britain and America."]""", # noqa - ), - ("user", "Decompose the following:\n{input}"), - ] -) - - -def get_propositions(tool_calls: list) -> list: - if not tool_calls: - raise ValueError("No tool calls found") - return tool_calls[0]["args"]["propositions"] - - -def empty_proposals(x): - # Model couldn't generate proposals - return [] - - -proposition_chain = ( - PROMPT - | ChatOpenAI(model="gpt-3.5-turbo-16k").bind( - tools=[ - { - "type": "function", - "function": { - "name": "decompose_content", - "description": "Return the decomposed propositions", - "parameters": { - "type": "object", - "properties": { - "propositions": { - "type": "array", - "items": {"type": "string"}, - } - }, - "required": ["propositions"], - }, - }, - } - ], - tool_choice={"type": "function", "function": {"name": "decompose_content"}}, - ) - | JsonOutputToolsParser() - | get_propositions -).with_fallbacks([RunnableLambda(empty_proposals)]) diff --git a/templates/propositional-retrieval/propositional_retrieval/storage.py b/templates/propositional-retrieval/propositional_retrieval/storage.py deleted file mode 100644 index bed32dba5d0..00000000000 --- a/templates/propositional-retrieval/propositional_retrieval/storage.py +++ /dev/null @@ -1,38 +0,0 @@ -import logging -from pathlib import Path - -from langchain.retrievers.multi_vector import MultiVectorRetriever -from langchain.storage import LocalFileStore -from langchain_chroma import Chroma -from langchain_community.embeddings import OpenAIEmbeddings - -logging.basicConfig(level=logging.INFO) - -logger = logging.getLogger(__name__) - - -def get_multi_vector_retriever(docstore_id_key: str): - """Create the composed retriever object.""" - vectorstore = get_vectorstore() - store = get_docstore() - return MultiVectorRetriever( - vectorstore=vectorstore, - byte_store=store, - id_key=docstore_id_key, - ) - - -def get_vectorstore(collection_name: str = "proposals"): - """Get the vectorstore used for this example.""" - return Chroma( - collection_name=collection_name, - persist_directory=str(Path(__file__).parent.parent / "chroma_db_proposals"), - embedding_function=OpenAIEmbeddings(), - ) - - -def get_docstore(): - """Get the metadata store used for this example.""" - return LocalFileStore( - str(Path(__file__).parent.parent / "multi_vector_retriever_metadata") - ) diff --git a/templates/propositional-retrieval/pyproject.toml b/templates/propositional-retrieval/pyproject.toml deleted file mode 100644 index f691bf1260d..00000000000 --- a/templates/propositional-retrieval/pyproject.toml +++ /dev/null @@ -1,36 +0,0 @@ -[tool.poetry] -name = "propositional-retrieval" -version = "0.1.0" -description = "Dense retrieval using vectorized propositions." -authors = [ - "William Fu-Hinthorn ", -] -readme = "README.md" - -[tool.poetry.dependencies] -python = ">=3.8.1,<4.0" -langchain = "^0.1" -openai = "<2" -tiktoken = ">=0.5.1" -langchain-chroma = "^0.1.2" -bs4 = "^0.0.1" -langchain-text-splitters = ">=0.0.1,<0.1" - -[tool.poetry.group.dev.dependencies] -langchain-cli = ">=0.0.21" - -[tool.langserve] -export_module = "rag_chroma_multi_modal_multi_vector" -export_attr = "chain" - -[tool.templates-hub] -use-case = "rag" -author = "LangChain" -integrations = ["OpenAI", "Chroma"] -tags = ["vectordbs"] - -[build-system] -requires = [ - "poetry-core", -] -build-backend = "poetry.core.masonry.api" diff --git a/templates/propositional-retrieval/tests/__init__.py b/templates/propositional-retrieval/tests/__init__.py deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/templates/pyproject.toml b/templates/pyproject.toml deleted file mode 100644 index 64cc37434b8..00000000000 --- a/templates/pyproject.toml +++ /dev/null @@ -1,63 +0,0 @@ -[tool.poetry] -name = "templates" -version = "0.0.0" -description = "" -authors = [] -readme = "README.md" - -[tool.poetry.dependencies] -python = "^3.10" - - -# dev, test, lint, typing -langchain = ">=0.0.353,<0.2" - -[tool.poetry.group.dev] -optional = true - -[tool.poetry.group.dev.dependencies] -poethepoet = "^0.24.1" -pytest-watch = "^4.2.0" -langchain-cli = ">=0.0.21" - -[tool.poetry.group.test] -optional = true - -[tool.poetry.group.test.dependencies] -pytest = "^7.4.3" - -[tool.poetry.group.lint] -optional = true - -[tool.poetry.group.lint.dependencies] -poethepoet = "^0.24.1" -ruff = "^0.1.3" -langchain = { path = "../libs/langchain", develop = true } -langchain-core = { path = "../libs/core", develop = true } - -[tool.poetry.group.typing] -optional = true - -[tool.poetry.group.typing.dependencies] - -[tool.ruff.lint] -select = [ - "E", # pycodestyle - "F", # pyflakes - "I", # isort -] - -[tool.poe.tasks] -test = "poetry run pytest" -watch = "poetry run ptw" -lint = ["_lint", "_check_formatting"] -format = ["_lint_fix", "_format"] - -_check_formatting = "poetry run ruff format . --diff" -_lint = "poetry run ruff ." -_format = "poetry run ruff format ." -_lint_fix = "poetry run ruff . --fix" - -[build-system] -requires = ["poetry-core"] -build-backend = "poetry.core.masonry.api" diff --git a/templates/python-lint/.gitignore b/templates/python-lint/.gitignore deleted file mode 100644 index bee8a64b79a..00000000000 --- a/templates/python-lint/.gitignore +++ /dev/null @@ -1 +0,0 @@ -__pycache__ diff --git a/templates/python-lint/LICENSE b/templates/python-lint/LICENSE deleted file mode 100644 index 426b6509034..00000000000 --- a/templates/python-lint/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -MIT License - -Copyright (c) 2023 LangChain, Inc. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/templates/python-lint/README.md b/templates/python-lint/README.md deleted file mode 100644 index 3b2dfd914da..00000000000 --- a/templates/python-lint/README.md +++ /dev/null @@ -1,75 +0,0 @@ -# Python linting - -This agent specializes in generating high-quality `Python` code with -a focus on proper formatting and linting. It uses `black`, `ruff`, and `mypy` to ensure the code meets standard quality checks. - -This streamlines the coding process by integrating and responding to these checks, resulting in reliable and consistent code output. - -It cannot actually execute the code it writes, as code execution may introduce additional dependencies and potential security vulnerabilities. -This makes the agent both a secure and efficient solution for code generation tasks. - -You can use it to generate Python code directly, or network it with planning and execution agents. - -## Environment Setup - -- Install `black`, `ruff`, and `mypy`: `pip install -U black ruff mypy` -- Set `OPENAI_API_KEY` environment variable. - -## Usage - -To use this package, you should first have the LangChain CLI installed: - -```shell -pip install -U langchain-cli -``` - -To create a new LangChain project and install this as the only package, you can do: - -```shell -langchain app new my-app --package python-lint -``` - -If you want to add this to an existing project, you can just run: - -```shell -langchain app add python-lint -``` - -And add the following code to your `server.py` file: -```python -from python_lint import agent_executor as python_lint_agent - -add_routes(app, python_lint_agent, path="/python-lint") -``` - -(Optional) Let's now configure LangSmith. -LangSmith will help us trace, monitor and debug LangChain applications. -You can sign up for LangSmith [here](https://smith.langchain.com/). -If you don't have access, you can skip this section - - -```shell -export LANGCHAIN_TRACING_V2=true -export LANGCHAIN_API_KEY= -export LANGCHAIN_PROJECT= # if not specified, defaults to "default" -``` - -If you are inside this directory, then you can spin up a LangServe instance directly by: - -```shell -langchain serve -``` - -This will start the FastAPI app with a server is running locally at -[http://localhost:8000](http://localhost:8000) - -We can see all templates at [http://127.0.0.1:8000/docs](http://127.0.0.1:8000/docs) -We can access the playground at [http://127.0.0.1:8000/python-lint/playground](http://127.0.0.1:8000/python-lint/playground) - -We can access the template from code with: - -```python -from langserve.client import RemoteRunnable - -runnable = RemoteRunnable("http://localhost:8000/python-lint") -``` diff --git a/templates/python-lint/pyproject.toml b/templates/python-lint/pyproject.toml deleted file mode 100644 index 3cfcbbef296..00000000000 --- a/templates/python-lint/pyproject.toml +++ /dev/null @@ -1,33 +0,0 @@ -[tool.poetry] -name = "python-lint" -version = "0.0.1" -description = "Python code-writing agent whose work is checked by black, ruff, and mypy." -authors = ["Joshua Sundance Bailey"] -readme = "README.md" - -[tool.poetry.dependencies] -ruff = ">=0.1.8" -black = "^24.2.0" -mypy = ">=1.7.1" -python = ">=3.8.1,<4.0" -langchain = "^0.1" -openai = ">=1.3.9" - -[tool.poetry.group.dev.dependencies] -langchain-cli = ">=0.0.21" -fastapi = ">=0.104.0,<1" -sse-starlette = "^1.6.5" - -[tool.langserve] -export_module = "python_lint" -export_attr = "agent_executor" - -[tool.templates-hub] -use-case = "code-generation" -author = "Joshua Sundance Bailey" -integrations = ["OpenAI"] -tags = ["python", "agent"] - -[build-system] -requires = ["poetry-core"] -build-backend = "poetry.core.masonry.api" diff --git a/templates/python-lint/python_lint/__init__.py b/templates/python-lint/python_lint/__init__.py deleted file mode 100644 index ad54eac8c44..00000000000 --- a/templates/python-lint/python_lint/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from python_lint.agent_executor import agent_executor - -__all__ = ["agent_executor"] diff --git a/templates/python-lint/python_lint/agent_executor.py b/templates/python-lint/python_lint/agent_executor.py deleted file mode 100644 index fb9f143e169..00000000000 --- a/templates/python-lint/python_lint/agent_executor.py +++ /dev/null @@ -1,216 +0,0 @@ -import os -import re -import subprocess # nosec -import tempfile - -from langchain.agents import AgentType, initialize_agent -from langchain.pydantic_v1 import BaseModel, Field, ValidationError, validator -from langchain_community.chat_models import ChatOpenAI -from langchain_core.language_models import BaseLLM -from langchain_core.prompts import ChatPromptTemplate -from langchain_core.runnables import ConfigurableField, Runnable -from langchain_core.tools import Tool - - -def strip_python_markdown_tags(text: str) -> str: - pat = re.compile(r"```python\n(.*)```", re.DOTALL) - code = pat.match(text) - if code: - return code.group(1) - else: - return text - - -def format_black(filepath: str): - """Format a file with black.""" - subprocess.run( # nosec - f"black {filepath}", - stderr=subprocess.STDOUT, - text=True, - shell=True, - timeout=3, - check=False, - ) - - -def format_ruff(filepath: str): - """Run ruff format on a file.""" - subprocess.run( # nosec - f"ruff check --fix {filepath}", - shell=True, - text=True, - timeout=3, - universal_newlines=True, - check=False, - ) - - subprocess.run( # nosec - f"ruff format {filepath}", - stderr=subprocess.STDOUT, - shell=True, - timeout=3, - text=True, - check=False, - ) - - -def check_ruff(filepath: str): - """Run ruff check on a file.""" - subprocess.check_output( # nosec - f"ruff check {filepath}", - stderr=subprocess.STDOUT, - shell=True, - timeout=3, - text=True, - ) - - -def check_mypy(filepath: str, strict: bool = True, follow_imports: str = "skip"): - """Run mypy on a file.""" - cmd = ( - f"mypy {'--strict' if strict else ''} " - f"--follow-imports={follow_imports} {filepath}" - ) - - subprocess.check_output( # nosec - cmd, - stderr=subprocess.STDOUT, - shell=True, - text=True, - timeout=3, - ) - - -class PythonCode(BaseModel): - code: str = Field( - description="Python code conforming to " - "ruff, black, and *strict* mypy standards.", - ) - - @validator("code") - @classmethod - def check_code(cls, v: str) -> str: - v = strip_python_markdown_tags(v).strip() - try: - with tempfile.NamedTemporaryFile(mode="w", delete=False) as temp_file: - temp_file.write(v) - temp_file_path = temp_file.name - - try: - # format with black and ruff - format_black(temp_file_path) - format_ruff(temp_file_path) - except subprocess.CalledProcessError: - pass - - # update `v` with formatted code - with open(temp_file_path, "r") as temp_file: - v = temp_file.read() - - # check - complaints = dict(ruff=None, mypy=None) - - try: - check_ruff(temp_file_path) - except subprocess.CalledProcessError as e: - complaints["ruff"] = e.output - - try: - check_mypy(temp_file_path) - except subprocess.CalledProcessError as e: - complaints["mypy"] = e.output - - # raise ValueError if ruff or mypy had complaints - if any(complaints.values()): - code_str = f"```{temp_file_path}\n{v}```" - error_messages = [ - f"```{key}\n{value}```" - for key, value in complaints.items() - if value - ] - raise ValueError("\n\n".join([code_str] + error_messages)) - - finally: - os.remove(temp_file_path) - return v - - -def check_code(code: str) -> str: - try: - code_obj = PythonCode(code=code) - return ( - f"# LGTM\n" - f"# use the `submit` tool to submit this code:\n\n" - f"```python\n{code_obj.code}\n```" - ) - except ValidationError as e: - return e.errors()[0]["msg"] - - -prompt = ChatPromptTemplate.from_messages( - [ - ( - "system", - "You are a world class Python coder who uses " - "black, ruff, and *strict* mypy for all of your code. " - "Provide complete, end-to-end Python code " - "to meet the user's description/requirements. " - "Always `check` your code. When you're done, " - "you must ALWAYS use the `submit` tool.", - ), - ( - "human", - ": {input}", - ), - ], -) - -check_code_tool = Tool.from_function( - check_code, - name="check-code", - description="Always check your code before submitting it!", -) - -submit_code_tool = Tool.from_function( - strip_python_markdown_tags, - name="submit-code", - description="THIS TOOL is the most important. " - "use it to submit your code to the user who requested it... " - "but be sure to `check` it first!", - return_direct=True, -) - -tools = [check_code_tool, submit_code_tool] - - -def get_agent_executor( - llm: BaseLLM, - agent_type: AgentType = AgentType.OPENAI_FUNCTIONS, -) -> Runnable: - _agent_executor = initialize_agent( - tools, - llm, - agent=agent_type, - verbose=True, - handle_parsing_errors=True, - prompt=prompt, - ) - return _agent_executor | (lambda output: output["output"]) - - -class Instruction(BaseModel): - __root__: str - - -agent_executor = ( - get_agent_executor(ChatOpenAI(model="gpt-4-1106-preview", temperature=0.0)) - .configurable_alternatives( - ConfigurableField("model_name"), - default_key="gpt4turbo", - gpt4=get_agent_executor(ChatOpenAI(model="gpt-4", temperature=0.0)), - gpt35t=get_agent_executor( - ChatOpenAI(model="gpt-3.5-turbo", temperature=0.0), - ), - ) - .with_types(input_type=Instruction, output_type=str) -) diff --git a/templates/python-lint/tests/__init__.py b/templates/python-lint/tests/__init__.py deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/templates/rag-astradb/.env.template b/templates/rag-astradb/.env.template deleted file mode 100644 index 2157fb620b3..00000000000 --- a/templates/rag-astradb/.env.template +++ /dev/null @@ -1,5 +0,0 @@ -export OPENAI_API_KEY="..." - -export ASTRA_DB_API_ENDPOINT="https://...-....apps.astra.datastax.com" -export ASTRA_DB_APPLICATION_TOKEN="AstraCS:..." -export ASTRA_DB_KEYSPACE="..." # Optional - falls back to default if not provided diff --git a/templates/rag-astradb/README.md b/templates/rag-astradb/README.md deleted file mode 100644 index 3ba5e9073be..00000000000 --- a/templates/rag-astradb/README.md +++ /dev/null @@ -1,77 +0,0 @@ -# RAG - AstraDB - -This template will perform RAG using `AstraDB` (`AstraDB` vector store class) - -## Environment Setup - -An [Astra DB](https://astra.datastax.com) database is required; free tier is fine. - -- You need the database **API endpoint** (such as `https://0123...-us-east1.apps.astra.datastax.com`) ... -- ... and a **token** (`AstraCS:...`). - -Also, an **OpenAI API Key** is required. _Note that out-of-the-box this demo supports OpenAI only, unless you tinker with the code._ - -Provide the connection parameters and secrets through environment variables. Please refer to `.env.template` for the variable names. - -## Usage - -To use this package, you should first have the LangChain CLI installed: - -```shell -pip install -U "langchain-cli[serve]" -``` - -To create a new LangChain project and install this as the only package, you can do: - -```shell -langchain app new my-app --package rag-astradb -``` - -If you want to add this to an existing project, you can just run: - -```shell -langchain app add rag-astradb -``` - -And add the following code to your `server.py` file: -```python -from astradb_entomology_rag import chain as astradb_entomology_rag_chain - -add_routes(app, astradb_entomology_rag_chain, path="/rag-astradb") -``` - -(Optional) Let's now configure LangSmith. -LangSmith will help us trace, monitor and debug LangChain applications. -You can sign up for LangSmith [here](https://smith.langchain.com/). -If you don't have access, you can skip this section - - -```shell -export LANGCHAIN_TRACING_V2=true -export LANGCHAIN_API_KEY= -export LANGCHAIN_PROJECT= # if not specified, defaults to "default" -``` - -If you are inside this directory, then you can spin up a LangServe instance directly by: - -```shell -langchain serve -``` - -This will start the FastAPI app with a server is running locally at -[http://localhost:8000](http://localhost:8000) - -We can see all templates at [http://127.0.0.1:8000/docs](http://127.0.0.1:8000/docs) -We can access the playground at [http://127.0.0.1:8000/rag-astradb/playground](http://127.0.0.1:8000/rag-astradb/playground) - -We can access the template from code with: - -```python -from langserve.client import RemoteRunnable - -runnable = RemoteRunnable("http://localhost:8000/rag-astradb") -``` - -## Reference - -Stand-alone repo with LangServe chain: [here](https://github.com/hemidactylus/langserve_astradb_entomology_rag). diff --git a/templates/rag-astradb/astradb_entomology_rag/__init__.py b/templates/rag-astradb/astradb_entomology_rag/__init__.py deleted file mode 100644 index 7997d5909a5..00000000000 --- a/templates/rag-astradb/astradb_entomology_rag/__init__.py +++ /dev/null @@ -1,53 +0,0 @@ -import os - -from langchain_community.chat_models import ChatOpenAI -from langchain_community.embeddings import OpenAIEmbeddings -from langchain_community.vectorstores import AstraDB -from langchain_core.output_parsers import StrOutputParser -from langchain_core.prompts import ChatPromptTemplate -from langchain_core.runnables import RunnablePassthrough - -from .populate_vector_store import populate - -# inits -llm = ChatOpenAI() -embeddings = OpenAIEmbeddings() -vector_store = AstraDB( - embedding=embeddings, - collection_name="langserve_rag_demo", - token=os.environ["ASTRA_DB_APPLICATION_TOKEN"], - api_endpoint=os.environ["ASTRA_DB_API_ENDPOINT"], - namespace=os.environ.get("ASTRA_DB_KEYSPACE"), -) -retriever = vector_store.as_retriever(search_kwargs={"k": 3}) - -# For demo reasons, let's ensure there are rows on the vector store. -# Please remove this and/or adapt to your use case! - -inserted_lines = populate(vector_store) -if inserted_lines: - print(f"Done ({inserted_lines} lines inserted).") - -entomology_template = """ -You are an expert entomologist, tasked with answering enthusiast biologists' questions. -You must answer based only on the provided context, do not make up any fact. -Your answers must be concise and to the point, but strive to provide scientific details -(such as family, order, Latin names, and so on when appropriate). -You MUST refuse to answer questions on other topics than entomology, -as well as questions whose answer is not found in the provided context. - -CONTEXT: -{context} - -QUESTION: {question} - -YOUR ANSWER:""" - -entomology_prompt = ChatPromptTemplate.from_template(entomology_template) - -chain = ( - {"context": retriever, "question": RunnablePassthrough()} - | entomology_prompt - | llm - | StrOutputParser() -) diff --git a/templates/rag-astradb/astradb_entomology_rag/populate_vector_store.py b/templates/rag-astradb/astradb_entomology_rag/populate_vector_store.py deleted file mode 100644 index e1fb9e314a5..00000000000 --- a/templates/rag-astradb/astradb_entomology_rag/populate_vector_store.py +++ /dev/null @@ -1,29 +0,0 @@ -import os - -BASE_DIR = os.path.abspath(os.path.dirname(__file__)) - - -def populate(vector_store): - # is the store empty? find out with a probe search - hits = vector_store.similarity_search_by_vector( - embedding=[0.001] * 1536, - k=1, - ) - # - if len(hits) == 0: - # this seems a first run: - # must populate the vector store - src_file_name = os.path.join(BASE_DIR, "..", "sources.txt") - lines = [ - line.strip() - for line in open(src_file_name).readlines() - if line.strip() - if line[0] != "#" - ] - # deterministic IDs to prevent duplicates on multiple runs - ids = ["_".join(line.split(" ")[:2]).lower().replace(":", "") for line in lines] - # - vector_store.add_texts(texts=lines, ids=ids) - return len(lines) - else: - return 0 diff --git a/templates/rag-astradb/main.py b/templates/rag-astradb/main.py deleted file mode 100644 index f80b1b6626a..00000000000 --- a/templates/rag-astradb/main.py +++ /dev/null @@ -1,5 +0,0 @@ -from astradb_entomology_rag import chain - -if __name__ == "__main__": - response = chain.invoke("Are there more coleoptera or bugs?") - print(response) diff --git a/templates/rag-astradb/pyproject.toml b/templates/rag-astradb/pyproject.toml deleted file mode 100644 index 944c6277ab8..00000000000 --- a/templates/rag-astradb/pyproject.toml +++ /dev/null @@ -1,34 +0,0 @@ -[tool.poetry] -name = "astradb_entomology_rag" -version = "0.0.1" -description = "RAG using AstraDB" -authors = [ - "Stefano Lottini ", -] -readme = "README.md" - -[tool.poetry.dependencies] -python = ">=3.8.1,<4.0" -langchain = "^0.1" -openai = "<2" -tiktoken = "^0.5.1" -astrapy = "^0.5.3" - -[tool.poetry.group.dev.dependencies] -langchain-cli = ">=0.0.21" - -[tool.langserve] -export_module = "astradb_entomology_rag" -export_attr = "chain" - -[tool.templates-hub] -use-case = "rag" -author = "Datastax" -integrations = ["AstraDB"] -tags = ["vectordbs"] - -[build-system] -requires = [ - "poetry-core", -] -build-backend = "poetry.core.masonry.api" diff --git a/templates/rag-astradb/sources.txt b/templates/rag-astradb/sources.txt deleted file mode 100644 index c03c144753b..00000000000 --- a/templates/rag-astradb/sources.txt +++ /dev/null @@ -1,31 +0,0 @@ -# source: https://www.thoughtco.com/a-guide-to-the-twenty-nine-insect-orders-1968419 - -Order Thysanura: The silverfish and firebrats are found in the order Thysanura. They are wingless insects often found in people's attics, and have a lifespan of several years. There are about 600 species worldwide. -Order Diplura: Diplurans are the most primitive insect species, with no eyes or wings. They have the unusual ability among insects to regenerate body parts. There are over 400 members of the order Diplura in the world. -Order Protura: Another very primitive group, the proturans have no eyes, no antennae, and no wings. They are uncommon, with perhaps less than 100 species known. -Order Collembola: The order Collembola includes the springtails, primitive insects without wings. There are approximately 2,000 species of Collembola worldwide. -Order Ephemeroptera: The mayflies of order Ephemeroptera are short-lived, and undergo incomplete metamorphosis. The larvae are aquatic, feeding on algae and other plant life. Entomologists have described about 2,100 species worldwide. -Order Odonata: The order Odonata includes dragonflies and damselflies, which undergo incomplete metamorphosis. They are predators of other insects, even in their immature stage. There are about 5,000 species in the order Odonata. -Order Plecoptera: The stoneflies of order Plecoptera are aquatic and undergo incomplete metamorphosis. The nymphs live under rocks in well flowing streams. Adults are usually seen on the ground along stream and river banks. There are roughly 3,000 species in this group. -Order Grylloblatodea: Sometimes referred to as "living fossils," the insects of the order Grylloblatodea have changed little from their ancient ancestors. This order is the smallest of all the insect orders, with perhaps only 25 known species living today. Grylloblatodea live at elevations above 1500 ft., and are commonly named ice bugs or rock crawlers. -Order Orthoptera: These are familiar insects (grasshoppers, locusts, katydids, and crickets) and one of the largest orders of herbivorous insects. Many species in the order Orthoptera can produce and detect sounds. Approximately 20,000 species exist in this group. -Order Phasmida: The order Phasmida are masters of camouflage, the stick and leaf insects. They undergo incomplete metamorphosis and feed on leaves. There are some 3,000 insects in this group, but only a small fraction of this number is leaf insects. Stick insects are the longest insects in the world. -Order Dermaptera: This order contains the earwigs, an easily recognized insect that often has pincers at the end of the abdomen. Many earwigs are scavengers, eating both plant and animal matter. The order Dermaptera includes less than 2,000 species. -Order Embiidina: The order Embioptera is another ancient order with few species, perhaps only 200 worldwide. The web spinners have silk glands in their front legs and weave nests under leaf litter and in tunnels where they live. Webspinners live in tropical or subtropical climates. -Order Dictyoptera: The order Dictyoptera includes roaches and mantids. Both groups have long, segmented antennae and leathery forewings held tightly against their backs. They undergo incomplete metamorphosis. Worldwide, there approximately 6,000 species in this order, most living in tropical regions. -Order Isoptera: Termites feed on wood and are important decomposers in forest ecosystems. They also feed on wood products and are thought of as pests for the destruction they cause to man-made structures. There are between 2,000 and 3,000 species in this order. -Order Zoraptera: Little is know about the angel insects, which belong to the order Zoraptera. Though they are grouped with winged insects, many are actually wingless. Members of this group are blind, small, and often found in decaying wood. There are only about 30 described species worldwide. -Order Psocoptera: Bark lice forage on algae, lichen, and fungus in moist, dark places. Booklice frequent human dwellings, where they feed on book paste and grains. They undergo incomplete metamorphosis. Entomologists have named about 3,200 species in the order Psocoptera. -Order Mallophaga: Biting lice are ectoparasites that feed on birds and some mammals. There are an estimated 3,000 species in the order Mallophaga, all of which undergo incomplete metamorphosis. -Order Siphunculata: The order Siphunculata are the sucking lice, which feed on the fresh blood of mammals. Their mouthparts are adapted for sucking or siphoning blood. There are only about 500 species of sucking lice. -Order Hemiptera: Most people use the term "bugs" to mean insects; an entomologist uses the term to refer to the order Hemiptera. The Hemiptera are the true bugs, and include cicadas, aphids, and spittlebugs, and others. This is a large group of over 70,000 species worldwide. -Order Thysanoptera: The thrips of order Thysanoptera are small insects that feed on plant tissue. Many are considered agricultural pests for this reason. Some thrips prey on other small insects as well. This order contains about 5,000 species. -Order Neuroptera: Commonly called the order of lacewings, this group actually includes a variety of other insects, too: dobsonflies, owlflies, mantidflies, antlions, snakeflies, and alderflies. Insects in the order Neuroptera undergo complete metamorphosis. Worldwide, there are over 5,500 species in this group. -Order Mecoptera: This order includes the scorpionflies, which live in moist, wooded habitats. Scorpionflies are omnivorous in both their larval and adult forms. The larva are caterpillar-like. There are less than 500 described species in the order Mecoptera. -Order Siphonaptera: Pet lovers fear insects in the order Siphonaptera - the fleas. Fleas are blood-sucking ectoparasites that feed on mammals, and rarely, birds. There are well over 2,000 species of fleas in the world. -Order Coleoptera: This group, the beetles and weevils, is the largest order in the insect world, with over 300,000 distinct species known. The order Coleoptera includes well-known families: june beetles, lady beetles, click beetles, and fireflies. All have hardened forewings that fold over the abdomen to protect the delicate hindwings used for flight. -Order Strepsiptera: Insects in this group are parasites of other insects, particularly bees, grasshoppers, and the true bugs. The immature Strepsiptera lies in wait on a flower and quickly burrows into any host insect that comes along. Strepsiptera undergo complete metamorphosis and pupate within the host insect's body. -Order Diptera: Diptera is one of the largest orders, with nearly 100,000 insects named to the order. These are the true flies, mosquitoes, and gnats. Insects in this group have modified hindwings which are used for balance during flight. The forewings function as the propellers for flying. -Order Lepidoptera: The butterflies and moths of the order Lepidoptera comprise the second largest group in the class Insecta. These well-known insects have scaly wings with interesting colors and patterns. You can often identify an insect in this order just by the wing shape and color. -Order Trichoptera: Caddisflies are nocturnal as adults and aquatic when immature. The caddisfly adults have silky hairs on their wings and body, which is key to identifying a Trichoptera member. The larvae spin traps for prey with silk. They also make cases from the silk and other materials that they carry and use for protection. -Order Hymenoptera: The order Hymenoptera includes many of the most common insects - ants, bees, and wasps. The larvae of some wasps cause trees to form galls, which then provides food for the immature wasps. Other wasps are parasitic, living in caterpillars, beetles, or even aphids. This is the third-largest insect order with just over 100,000 species. diff --git a/templates/rag-aws-bedrock/LICENSE b/templates/rag-aws-bedrock/LICENSE deleted file mode 100644 index 426b6509034..00000000000 --- a/templates/rag-aws-bedrock/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -MIT License - -Copyright (c) 2023 LangChain, Inc. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/templates/rag-aws-bedrock/README.md b/templates/rag-aws-bedrock/README.md deleted file mode 100644 index 5d3afae72b4..00000000000 --- a/templates/rag-aws-bedrock/README.md +++ /dev/null @@ -1,79 +0,0 @@ -# RAG - AWS Bedrock, FAISS - -This template is designed to connect with the `AWS Bedrock` service, a managed server that offers a set of foundation models. - -It primarily uses the `Anthropic Claude` for text generation and `Amazon Titan` for text embedding, and utilizes FAISS as the vectorstore. - -For additional context on the RAG pipeline, refer to [these notebooks](https://github.com/aws-samples/amazon-bedrock-workshop/tree/main/02_KnowledgeBases_and_RAG). - -See [The FAISS Library](https://arxiv.org/pdf/2401.08281) paper for more details. - -## Environment Setup - -Before you can use this package, ensure that you have configured `boto3` to work with your AWS account. - -For details on how to set up and configure `boto3`, visit [this page](https://boto3.amazonaws.com/v1/documentation/api/latest/guide/quickstart.html#configuration). - -In addition, you need to install the `faiss-cpu` package to work with the FAISS vector store: - -```bash -pip install faiss-cpu -``` - -You should also set the following environment variables to reflect your AWS profile and region (if you're not using the `default` AWS profile and `us-east-1` region): - -* `AWS_DEFAULT_REGION` -* `AWS_PROFILE` - -## Usage - -First, install the LangChain CLI: - -```shell -pip install -U langchain-cli -``` - -To create a new LangChain project and install this as the only package: - -```shell -langchain app new my-app --package rag-aws-bedrock -``` - -To add this package to an existing project: - -```shell -langchain app add rag-aws-bedrock -``` - -Then add the following code to your `server.py` file: -```python -from rag_aws_bedrock import chain as rag_aws_bedrock_chain - -add_routes(app, rag_aws_bedrock_chain, path="/rag-aws-bedrock") -``` - -(Optional) If you have access to LangSmith, you can configure it to trace, monitor, and debug LangChain applications. If you don't have access, you can skip this section. - -```shell -export LANGCHAIN_TRACING_V2=true -export LANGCHAIN_API_KEY= -export LANGCHAIN_PROJECT= # if not specified, defaults to "default" -``` - -If you are inside this directory, you can spin up a LangServe instance directly by: - -```shell -langchain serve -``` - -This will start the FastAPI app with a server running locally at [http://localhost:8000](http://localhost:8000) - -You can see all templates at [http://127.0.0.1:8000/docs](http://127.0.0.1:8000/docs) and access the playground at [http://127.0.0.1:8000/rag-aws-bedrock/playground](http://127.0.0.1:8000/rag-aws-bedrock/playground). - -You can access the template from code with: - -```python -from langserve.client import RemoteRunnable - -runnable = RemoteRunnable("http://localhost:8000/rag-aws-bedrock") -``` \ No newline at end of file diff --git a/templates/rag-aws-bedrock/main.py b/templates/rag-aws-bedrock/main.py deleted file mode 100644 index d0a3c2f48c6..00000000000 --- a/templates/rag-aws-bedrock/main.py +++ /dev/null @@ -1,6 +0,0 @@ -from rag_aws_bedrock.chain import chain - -if __name__ == "__main__": - query = "What is this data about?" - - print(chain.invoke(query)) diff --git a/templates/rag-aws-bedrock/pyproject.toml b/templates/rag-aws-bedrock/pyproject.toml deleted file mode 100644 index 2fd1316cfe1..00000000000 --- a/templates/rag-aws-bedrock/pyproject.toml +++ /dev/null @@ -1,35 +0,0 @@ -[tool.poetry] -name = "rag-aws-bedrock" -version = "0.1.0" -description = "RAG using AWS Bedrock" -authors = [ - "Lance Martin ", -] -readme = "README.md" - -[tool.poetry.dependencies] -python = ">=3.8.1,<4.0" -langchain = "^0.1" -tiktoken = ">=0.5.1" -faiss-cpu = ">=1.7.4" -boto3 = ">=1.28.57" -awscli = ">=1.29.57" - -[tool.poetry.group.dev.dependencies] -langchain-cli = ">=0.0.21" - -[tool.langserve] -export_module = "rag_aws_bedrock" -export_attr = "chain" - -[tool.templates-hub] -use-case = "rag" -author = "LangChain" -integrations = ["OpenAI", "FAISS"] -tags = ["vectordbs"] - -[build-system] -requires = [ - "poetry-core", -] -build-backend = "poetry.core.masonry.api" diff --git a/templates/rag-aws-bedrock/rag_aws_bedrock.ipynb b/templates/rag-aws-bedrock/rag_aws_bedrock.ipynb deleted file mode 100644 index 0cb2d628fbe..00000000000 --- a/templates/rag-aws-bedrock/rag_aws_bedrock.ipynb +++ /dev/null @@ -1,51 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "id": "681a5d1e", - "metadata": {}, - "source": [ - "## Connect to template\n", - "\n", - "In `server.py`, set -\n", - "```\n", - "add_routes(app, chain_ext, path=\"/rag_aws_bedrock\")\n", - "```" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "d774be2a", - "metadata": {}, - "outputs": [], - "source": [ - "from langserve.client import RemoteRunnable\n", - "\n", - "rag_app_pinecone = RemoteRunnable(\"http://0.0.0.0:8001/rag_aws_bedrock\")\n", - "rag_app_pinecone.invoke(\"What are the different types of agent memory\")" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.9.16" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/templates/rag-aws-bedrock/rag_aws_bedrock/__init__.py b/templates/rag-aws-bedrock/rag_aws_bedrock/__init__.py deleted file mode 100644 index 81dd3a2b433..00000000000 --- a/templates/rag-aws-bedrock/rag_aws_bedrock/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from rag_aws_bedrock.chain import chain - -__all__ = ["chain"] diff --git a/templates/rag-aws-bedrock/rag_aws_bedrock/chain.py b/templates/rag-aws-bedrock/rag_aws_bedrock/chain.py deleted file mode 100644 index a2ad099ab01..00000000000 --- a/templates/rag-aws-bedrock/rag_aws_bedrock/chain.py +++ /dev/null @@ -1,55 +0,0 @@ -import os - -from langchain_community.embeddings import BedrockEmbeddings -from langchain_community.llms.bedrock import Bedrock -from langchain_community.vectorstores import FAISS -from langchain_core.output_parsers import StrOutputParser -from langchain_core.prompts import ChatPromptTemplate -from langchain_core.pydantic_v1 import BaseModel -from langchain_core.runnables import RunnableParallel, RunnablePassthrough - -# Get region and profile from env -region = os.environ.get("AWS_DEFAULT_REGION", "us-east-1") -profile = os.environ.get("AWS_PROFILE", "default") - -# Set LLM and embeddings -model = Bedrock( - model_id="anthropic.claude-v2", - region_name=region, - credentials_profile_name=profile, - model_kwargs={"max_tokens_to_sample": 200}, -) -bedrock_embeddings = BedrockEmbeddings(model_id="amazon.titan-embed-text-v1") - -# Add to vectorDB -vectorstore = FAISS.from_texts( - ["harrison worked at kensho"], embedding=bedrock_embeddings -) -retriever = vectorstore.as_retriever() - -# Get retriever from vectorstore -retriever = vectorstore.as_retriever() - -# RAG prompt -template = """Answer the question based only on the following context: -{context} -Question: {question} -""" -prompt = ChatPromptTemplate.from_template(template) - - -# RAG -chain = ( - RunnableParallel({"context": retriever, "question": RunnablePassthrough()}) - | prompt - | model - | StrOutputParser() -) - - -# Add typing for input -class Question(BaseModel): - __root__: str - - -chain = chain.with_types(input_type=Question) diff --git a/templates/rag-aws-bedrock/tests/__init__.py b/templates/rag-aws-bedrock/tests/__init__.py deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/templates/rag-aws-kendra/LICENSE b/templates/rag-aws-kendra/LICENSE deleted file mode 100644 index 426b6509034..00000000000 --- a/templates/rag-aws-kendra/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -MIT License - -Copyright (c) 2023 LangChain, Inc. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/templates/rag-aws-kendra/README.md b/templates/rag-aws-kendra/README.md deleted file mode 100644 index e6f4aa4abfb..00000000000 --- a/templates/rag-aws-kendra/README.md +++ /dev/null @@ -1,88 +0,0 @@ -# RAG - AWS Kendra - -This template is an application that utilizes `Amazon Kendra`, -a machine learning powered search service, -and `Anthropic Claude` for text generation. -The application retrieves documents using a Retrieval chain to answer -questions from your documents. - -It uses the `boto3` library to connect with the `Bedrock` service. - -For more context on building RAG applications with `Amazon Kendra`, check [this page](https://aws.amazon.com/blogs/machine-learning/quickly-build-high-accuracy-generative-ai-applications-on-enterprise-data-using-amazon-kendra-langchain-and-large-language-models/). - -## Environment Setup - -Please ensure to setup and configure `boto3` to work with your AWS account. - -You can follow the guide [here](https://boto3.amazonaws.com/v1/documentation/api/latest/guide/quickstart.html#configuration). - -You should also have a Kendra Index set up before using this template. - -You can use [this Cloudformation template](https://github.com/aws-samples/amazon-kendra-langchain-extensions/blob/main/kendra_retriever_samples/kendra-docs-index.yaml) to create a sample index. - -This includes sample data containing AWS online documentation for Amazon Kendra, Amazon Lex, and Amazon SageMaker. Alternatively, you can use your own Amazon Kendra index if you have indexed your own dataset. - -The following environment variables need to be set: - -* `AWS_DEFAULT_REGION` - This should reflect the correct AWS region. Default is `us-east-1`. -* `AWS_PROFILE` - This should reflect your AWS profile. Default is `default`. -* `KENDRA_INDEX_ID` - This should have the Index ID of the Kendra index. Note that the Index ID is a 36 character alphanumeric value that can be found in the index detail page. - -## Usage - -To use this package, you should first have the LangChain CLI installed: - -```shell -pip install -U langchain-cli -``` - -To create a new LangChain project and install this as the only package, you can do: - -```shell -langchain app new my-app --package rag-aws-kendra -``` - -If you want to add this to an existing project, you can just run: - -```shell -langchain app add rag-aws-kendra -``` - -And add the following code to your `server.py` file: -```python -from rag_aws_kendra.chain import chain as rag_aws_kendra_chain - -add_routes(app, rag_aws_kendra_chain, path="/rag-aws-kendra") -``` - -(Optional) Let's now configure LangSmith. -LangSmith will help us trace, monitor and debug LangChain applications. -You can sign up for LangSmith [here](https://smith.langchain.com/). -If you don't have access, you can skip this section - - -```shell -export LANGCHAIN_TRACING_V2=true -export LANGCHAIN_API_KEY= -export LANGCHAIN_PROJECT= # if not specified, defaults to "default" -``` - -If you are inside this directory, then you can spin up a LangServe instance directly by: - -```shell -langchain serve -``` - -This will start the FastAPI app with a server running locally at -[http://localhost:8000](http://localhost:8000) - -We can see all templates at [http://127.0.0.1:8000/docs](http://127.0.0.1:8000/docs) -We can access the playground at [http://127.0.0.1:8000/rag-aws-kendra/playground](http://127.0.0.1:8000/rag-aws-kendra/playground) - -We can access the template from code with: - -```python -from langserve.client import RemoteRunnable - -runnable = RemoteRunnable("http://localhost:8000/rag-aws-kendra") -``` diff --git a/templates/rag-aws-kendra/main.py b/templates/rag-aws-kendra/main.py deleted file mode 100644 index ceb1daa7afd..00000000000 --- a/templates/rag-aws-kendra/main.py +++ /dev/null @@ -1,6 +0,0 @@ -from rag_aws_kendra.chain import chain - -if __name__ == "__main__": - query = "Does Kendra support table extraction?" - - print(chain.invoke(query)) diff --git a/templates/rag-aws-kendra/pyproject.toml b/templates/rag-aws-kendra/pyproject.toml deleted file mode 100644 index 513f0c22fff..00000000000 --- a/templates/rag-aws-kendra/pyproject.toml +++ /dev/null @@ -1,32 +0,0 @@ -[tool.poetry] -name = "rag-aws-kendra" -version = "0.0.1" -description = "RAG using AWS-Kendra and anthropic" -authors = [] -readme = "README.md" - -[tool.poetry.dependencies] -python = ">=3.8.1,<4.0" -langchain = "^0.1" -tiktoken = ">=0.5.1" -boto3 = ">=1.28.57" -awscli = ">=1.29.57" - -[tool.poetry.group.dev.dependencies] -langchain-cli = ">=0.0.21" -fastapi = ">=0.104.0,<1" -sse-starlette = "^1.6.5" - -[tool.langserve] -export_module = "rag_aws_kendra.chain" -export_attr = "chain" - -[tool.templates-hub] -use-case = "rag" -author = "LangChain" -integrations = ["AWS"] -tags = ["vectordbs"] - -[build-system] -requires = ["poetry-core"] -build-backend = "poetry.core.masonry.api" diff --git a/templates/rag-aws-kendra/rag_aws_kendra/__init__.py b/templates/rag-aws-kendra/rag_aws_kendra/__init__.py deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/templates/rag-aws-kendra/rag_aws_kendra/chain.py b/templates/rag-aws-kendra/rag_aws_kendra/chain.py deleted file mode 100644 index 1d623ab5dd9..00000000000 --- a/templates/rag-aws-kendra/rag_aws_kendra/chain.py +++ /dev/null @@ -1,54 +0,0 @@ -import os - -from langchain.retrievers import AmazonKendraRetriever -from langchain_community.llms.bedrock import Bedrock -from langchain_core.output_parsers import StrOutputParser -from langchain_core.prompts import ChatPromptTemplate -from langchain_core.pydantic_v1 import BaseModel -from langchain_core.runnables import RunnableParallel, RunnablePassthrough - -# Get region and profile from env -region = os.environ.get("AWS_DEFAULT_REGION", "us-east-1") -profile = os.environ.get("AWS_PROFILE", "default") -kendra_index = os.environ.get("KENDRA_INDEX_ID", None) - -if not kendra_index: - raise ValueError( - "No value provided in env variable 'KENDRA_INDEX_ID'. " - "A Kendra index is required to run this application." - ) - -# Set LLM and embeddings -model = Bedrock( - model_id="anthropic.claude-v2", - region_name=region, - credentials_profile_name=profile, - model_kwargs={"max_tokens_to_sample": 200}, -) - -# Create Kendra retriever -retriever = AmazonKendraRetriever(index_id=kendra_index, top_k=5, region_name=region) - -# RAG prompt -template = """Answer the question based only on the following context: -{context} -Question: {question} -""" -prompt = ChatPromptTemplate.from_template(template) - - -# RAG -chain = ( - RunnableParallel({"context": retriever, "question": RunnablePassthrough()}) - | prompt - | model - | StrOutputParser() -) - - -# Add typing for input -class Question(BaseModel): - __root__: str - - -chain = chain.with_types(input_type=Question) diff --git a/templates/rag-aws-kendra/tests/__init__.py b/templates/rag-aws-kendra/tests/__init__.py deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/templates/rag-azure-search/.gitignore b/templates/rag-azure-search/.gitignore deleted file mode 100644 index bee8a64b79a..00000000000 --- a/templates/rag-azure-search/.gitignore +++ /dev/null @@ -1 +0,0 @@ -__pycache__ diff --git a/templates/rag-azure-search/LICENSE b/templates/rag-azure-search/LICENSE deleted file mode 100644 index fc0602feecd..00000000000 --- a/templates/rag-azure-search/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -MIT License - -Copyright (c) 2024 LangChain, Inc. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/templates/rag-azure-search/README.md b/templates/rag-azure-search/README.md deleted file mode 100644 index e2cfd65f5c6..00000000000 --- a/templates/rag-azure-search/README.md +++ /dev/null @@ -1,87 +0,0 @@ -# RAG - Azure AI Search - -This template performs RAG on documents using [Azure AI Search](https://learn.microsoft.com/azure/search/search-what-is-azure-search) as the vectorstore and Azure OpenAI chat and embedding models. - -For additional details on RAG with `Azure AI Search`, refer to [this notebook](https://github.com/langchain-ai/langchain/blob/master/docs/docs/integrations/vectorstores/azuresearch.ipynb). - - -## Environment Setup - -***Prerequisites:*** Existing [Azure AI Search](https://learn.microsoft.com/azure/search/search-what-is-azure-search) and [Azure OpenAI](https://learn.microsoft.com/azure/ai-services/openai/overview) resources. - -***Environment Variables:*** - -To run this template, you'll need to set the following environment variables: - -***Required:*** - -- AZURE_SEARCH_ENDPOINT - The endpoint of the Azure AI Search service. -- AZURE_SEARCH_KEY - The API key for the Azure AI Search service. -- AZURE_OPENAI_ENDPOINT - The endpoint of the Azure OpenAI service. -- AZURE_OPENAI_API_KEY - The API key for the Azure OpenAI service. -- AZURE_EMBEDDINGS_DEPLOYMENT - Name of the Azure OpenAI deployment to use for embeddings. -- AZURE_CHAT_DEPLOYMENT - Name of the Azure OpenAI deployment to use for chat. - -***Optional:*** - -- AZURE_SEARCH_INDEX_NAME - Name of an existing Azure AI Search index to use. If not provided, an index will be created with name "rag-azure-search". -- OPENAI_API_VERSION - Azure OpenAI API version to use. Defaults to "2023-05-15". - -## Usage - -To use this package, you should first have the LangChain CLI installed: - -```shell -pip install -U langchain-cli -``` - -To create a new LangChain project and install this as the only package, you can do: - -```shell -langchain app new my-app --package rag-azure-search -``` - -If you want to add this to an existing project, you can just run: - -```shell -langchain app add rag-azure-search -``` - -And add the following code to your `server.py` file: -```python -from rag_azure_search import chain as rag_azure_search_chain - -add_routes(app, rag_azure_search_chain, path="/rag-azure-search") -``` - -(Optional) Let's now configure LangSmith. -LangSmith will help us trace, monitor and debug LangChain applications. -You can sign up for LangSmith [here](https://smith.langchain.com/). -If you don't have access, you can skip this section - - -```shell -export LANGCHAIN_TRACING_V2=true -export LANGCHAIN_API_KEY= -export LANGCHAIN_PROJECT= # if not specified, defaults to "default" -``` - -If you are inside this directory, then you can spin up a LangServe instance directly by: - -```shell -langchain serve -``` - -This will start the FastAPI app with a server is running locally at -[http://localhost:8000](http://localhost:8000) - -We can see all templates at [http://127.0.0.1:8000/docs](http://127.0.0.1:8000/docs) -We can access the playground at [http://127.0.0.1:8000/rag-azure-search/playground](http://127.0.0.1:8000/rag-azure-search/playground) - -We can access the template from code with: - -```python -from langserve.client import RemoteRunnable - -runnable = RemoteRunnable("http://localhost:8000/rag-azure-search") -``` \ No newline at end of file diff --git a/templates/rag-azure-search/pyproject.toml b/templates/rag-azure-search/pyproject.toml deleted file mode 100644 index 54af32ecfb1..00000000000 --- a/templates/rag-azure-search/pyproject.toml +++ /dev/null @@ -1,25 +0,0 @@ -[tool.poetry] -name = "rag-azure-search" -version = "0.0.1" -description = "" -authors = [] -readme = "README.md" - -[tool.poetry.dependencies] -python = ">=3.8.1,<4.0" -langchain-core = ">=0.1.5" -langchain-openai = ">=0.0.1" -azure-search-documents = ">=11.4.0" - -[tool.poetry.group.dev.dependencies] -langchain-cli = ">=0.0.4" -fastapi = "^0.104.0" -sse-starlette = "^1.6.5" - -[tool.langserve] -export_module = "rag_azure_search" -export_attr = "chain" - -[build-system] -requires = ["poetry-core"] -build-backend = "poetry.core.masonry.api" diff --git a/templates/rag-azure-search/rag_azure_search/__init__.py b/templates/rag-azure-search/rag_azure_search/__init__.py deleted file mode 100644 index ee4169c8d13..00000000000 --- a/templates/rag-azure-search/rag_azure_search/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from rag_azure_search.chain import chain - -__all__ = ["chain"] diff --git a/templates/rag-azure-search/rag_azure_search/chain.py b/templates/rag-azure-search/rag_azure_search/chain.py deleted file mode 100644 index 8206cf25357..00000000000 --- a/templates/rag-azure-search/rag_azure_search/chain.py +++ /dev/null @@ -1,94 +0,0 @@ -import os - -from langchain_community.vectorstores.azuresearch import AzureSearch -from langchain_core.output_parsers import StrOutputParser -from langchain_core.prompts import ChatPromptTemplate -from langchain_core.pydantic_v1 import BaseModel -from langchain_core.runnables import RunnableParallel, RunnablePassthrough -from langchain_openai import AzureChatOpenAI, AzureOpenAIEmbeddings - -if not os.getenv("AZURE_OPENAI_ENDPOINT"): - raise ValueError("Please set the environment variable AZURE_OPENAI_ENDPOINT") - -if not os.getenv("AZURE_OPENAI_API_KEY"): - raise ValueError("Please set the environment variable AZURE_OPENAI_API_KEY") - -if not os.getenv("AZURE_EMBEDDINGS_DEPLOYMENT"): - raise ValueError("Please set the environment variable AZURE_EMBEDDINGS_DEPLOYMENT") - -if not os.getenv("AZURE_CHAT_DEPLOYMENT"): - raise ValueError("Please set the environment variable AZURE_CHAT_DEPLOYMENT") - -if not os.getenv("AZURE_SEARCH_ENDPOINT"): - raise ValueError("Please set the environment variable AZURE_SEARCH_ENDPOINT") - -if not os.getenv("AZURE_SEARCH_KEY"): - raise ValueError("Please set the environment variable AZURE_SEARCH_KEY") - - -api_version = os.getenv("OPENAI_API_VERSION", "2023-05-15") -index_name = os.getenv("AZURE_SEARCH_INDEX_NAME", "rag-azure-search") - -embeddings = AzureOpenAIEmbeddings( - deployment=os.environ["AZURE_EMBEDDINGS_DEPLOYMENT"], - api_version=api_version, - chunk_size=1, -) - -vector_store: AzureSearch = AzureSearch( - azure_search_endpoint=os.environ["AZURE_SEARCH_ENDPOINT"], - azure_search_key=os.environ["AZURE_SEARCH_KEY"], - index_name=index_name, - embedding_function=embeddings.embed_query, -) - -""" -(Optional) Example document - -Uncomment the following code to load the document into the vector store -or substitute with your own. -""" -# import pathlib -# from langchain.text_splitter import CharacterTextSplitter -# from langchain_community.document_loaders import TextLoader - -# current_file_path = pathlib.Path(__file__).resolve() -# root_directory = current_file_path.parents[3] -# target_file_path = \ -# root_directory / "docs" / "docs" / "modules" / "state_of_the_union.txt" - -# loader = TextLoader(str(target_file_path), encoding="utf-8") - -# documents = loader.load() -# text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0) -# docs = text_splitter.split_documents(documents) - -# vector_store.add_documents(documents=docs) - -# RAG prompt -template = """Answer the question based only on the following context: -{context} -Question: {question} -""" - -# Perform a similarity search -retriever = vector_store.as_retriever() - -_prompt = ChatPromptTemplate.from_template(template) -_model = AzureChatOpenAI( - deployment_name=os.environ["AZURE_CHAT_DEPLOYMENT"], - api_version=api_version, -) -chain = ( - RunnableParallel({"context": retriever, "question": RunnablePassthrough()}) - | _prompt - | _model - | StrOutputParser() -) - - -# Add typing for input -class Question(BaseModel): - __root__: str - - -chain = chain.with_types(input_type=Question) diff --git a/templates/rag-azure-search/tests/__init__.py b/templates/rag-azure-search/tests/__init__.py deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/templates/rag-chroma-multi-modal-multi-vector/.gitignore b/templates/rag-chroma-multi-modal-multi-vector/.gitignore deleted file mode 100644 index 709823056cf..00000000000 --- a/templates/rag-chroma-multi-modal-multi-vector/.gitignore +++ /dev/null @@ -1 +0,0 @@ -docs/img_*.jpg diff --git a/templates/rag-chroma-multi-modal-multi-vector/LICENSE b/templates/rag-chroma-multi-modal-multi-vector/LICENSE deleted file mode 100644 index 426b6509034..00000000000 --- a/templates/rag-chroma-multi-modal-multi-vector/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -MIT License - -Copyright (c) 2023 LangChain, Inc. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/templates/rag-chroma-multi-modal-multi-vector/README.md b/templates/rag-chroma-multi-modal-multi-vector/README.md deleted file mode 100644 index 562c4ff3700..00000000000 --- a/templates/rag-chroma-multi-modal-multi-vector/README.md +++ /dev/null @@ -1,132 +0,0 @@ -# RAG - Chroma multi-modal multi-vector - -`Multi-modal LLMs` enable visual assistants that can perform -question-answering about images. - -This template create a visual assistant for slide decks, -which often contain visuals such as graphs or figures. - -It uses `GPT-4V` to create image summaries for each slide, -embeds the summaries, and stores them in `Chroma`. - -Given a question, relevant slides are retrieved and passed -to GPT-4V for answer synthesis. - -![Diagram illustrating the multi-modal LLM process with a slide deck, captioning, storage, question input, and answer synthesis with year-over-year growth percentages.](https://github.com/langchain-ai/langchain/assets/122662504/5277ef6b-d637-43c7-8dc1-9b1567470503) "Multi-modal LLM Process Diagram" - -## Input - -Supply a slide deck as pdf in the `/docs` directory. - -By default, this template has a slide deck about Q3 earnings from DataDog, a public technology company. - -Example questions to ask can be: -``` -How many customers does Datadog have? -What is Datadog platform % Y/Y growth in FY20, FY21, and FY22? -``` - -To create an index of the slide deck, run: -``` -poetry install -python ingest.py -``` - -## Storage - -Here is the process the template will use to create an index of the slides (see [blog](https://blog.langchain.dev/multi-modal-rag-template/)): - -* Extract the slides as a collection of images -* Use GPT-4V to summarize each image -* Embed the image summaries using text embeddings with a link to the original images -* Retrieve relevant image based on similarity between the image summary and the user input question -* Pass those images to GPT-4V for answer synthesis - -By default, this will use [LocalFileStore](https://python.langchain.com/docs/integrations/stores/file_system) to store images and Chroma to store summaries. - -For production, it may be desirable to use a remote option such as Redis. - -You can set the `local_file_store` flag in `chain.py` and `ingest.py` to switch between the two options. - -For Redis, the template will use [UpstashRedisByteStore](https://python.langchain.com/docs/integrations/stores/upstash_redis). - -We will use Upstash to store the images, which offers Redis with a REST API. - -Simply login [here](https://upstash.com/) and create a database. - -This will give you a REST API with: - -* `UPSTASH_URL` -* `UPSTASH_TOKEN` - -Set `UPSTASH_URL` and `UPSTASH_TOKEN` as environment variables to access your database. - -We will use Chroma to store and index the image summaries, which will be created locally in the template directory. - -## LLM - -The app will retrieve images based on similarity between the text input and the image summary, and pass the images to GPT-4V. - -## Environment Setup - -Set the `OPENAI_API_KEY` environment variable to access the OpenAI GPT-4V. - -Set `UPSTASH_URL` and `UPSTASH_TOKEN` as environment variables to access your database if you use `UpstashRedisByteStore`. - -## Usage - -To use this package, you should first have the LangChain CLI installed: - -```shell -pip install -U langchain-cli -``` - -To create a new LangChain project and install this as the only package, you can do: - -```shell -langchain app new my-app --package rag-chroma-multi-modal-multi-vector -``` - -If you want to add this to an existing project, you can just run: - -```shell -langchain app add rag-chroma-multi-modal-multi-vector -``` - -And add the following code to your `server.py` file: -```python -from rag_chroma_multi_modal_multi_vector import chain as rag_chroma_multi_modal_chain_mv - -add_routes(app, rag_chroma_multi_modal_chain_mv, path="/rag-chroma-multi-modal-multi-vector") -``` - -(Optional) Let's now configure LangSmith. -LangSmith will help us trace, monitor and debug LangChain applications. -You can sign up for LangSmith [here](https://smith.langchain.com/). -If you don't have access, you can skip this section - -```shell -export LANGCHAIN_TRACING_V2=true -export LANGCHAIN_API_KEY= -export LANGCHAIN_PROJECT= # if not specified, defaults to "default" -``` - -If you are inside this directory, then you can spin up a LangServe instance directly by: - -```shell -langchain serve -``` - -This will start the FastAPI app with a server is running locally at -[http://localhost:8000](http://localhost:8000) - -We can see all templates at [http://127.0.0.1:8000/docs](http://127.0.0.1:8000/docs) -We can access the playground at [http://127.0.0.1:8000/rag-chroma-multi-modal-multi-vector/playground](http://127.0.0.1:8000/rag-chroma-multi-modal-multi-vector/playground) - -We can access the template from code with: - -```python -from langserve.client import RemoteRunnable - -runnable = RemoteRunnable("http://localhost:8000/rag-chroma-multi-modal-multi-vector") -``` diff --git a/templates/rag-chroma-multi-modal-multi-vector/docs/DDOG_Q3_earnings_deck.pdf b/templates/rag-chroma-multi-modal-multi-vector/docs/DDOG_Q3_earnings_deck.pdf deleted file mode 100644 index a4aa7d864c4..00000000000 Binary files a/templates/rag-chroma-multi-modal-multi-vector/docs/DDOG_Q3_earnings_deck.pdf and /dev/null differ diff --git a/templates/rag-chroma-multi-modal-multi-vector/ingest.py b/templates/rag-chroma-multi-modal-multi-vector/ingest.py deleted file mode 100644 index 01a81d95b18..00000000000 --- a/templates/rag-chroma-multi-modal-multi-vector/ingest.py +++ /dev/null @@ -1,209 +0,0 @@ -import base64 -import io -import os -import uuid -from io import BytesIO -from pathlib import Path - -import pypdfium2 as pdfium -from langchain.retrievers.multi_vector import MultiVectorRetriever -from langchain.storage import LocalFileStore, UpstashRedisByteStore -from langchain_chroma import Chroma -from langchain_community.chat_models import ChatOpenAI -from langchain_community.embeddings import OpenAIEmbeddings -from langchain_core.documents import Document -from langchain_core.messages import HumanMessage -from PIL import Image - - -def image_summarize(img_base64, prompt): - """ - Make image summary - - :param img_base64: Base64 encoded string for image - :param prompt: Text prompt for summarizatiomn - :return: Image summarization prompt - - """ - chat = ChatOpenAI(model="gpt-4-vision-preview", max_tokens=1024) - - msg = chat.invoke( - [ - HumanMessage( - content=[ - {"type": "text", "text": prompt}, - { - "type": "image_url", - "image_url": {"url": f"data:image/jpeg;base64,{img_base64}"}, - }, - ] - ) - ] - ) - return msg.content - - -def generate_img_summaries(img_base64_list): - """ - Generate summaries for images - - :param img_base64_list: Base64 encoded images - :return: List of image summaries and processed images - """ - - # Store image summaries - image_summaries = [] - processed_images = [] - - # Prompt - prompt = """You are an assistant tasked with summarizing images for retrieval. \ - These summaries will be embedded and used to retrieve the raw image. \ - Give a concise summary of the image that is well optimized for retrieval.""" - - # Apply summarization to images - for i, base64_image in enumerate(img_base64_list): - try: - image_summaries.append(image_summarize(base64_image, prompt)) - processed_images.append(base64_image) - except Exception as e: - print(f"Error with image {i+1}: {e}") - - return image_summaries, processed_images - - -def get_images_from_pdf(pdf_path): - """ - Extract images from each page of a PDF document and save as JPEG files. - - :param pdf_path: A string representing the path to the PDF file. - """ - pdf = pdfium.PdfDocument(pdf_path) - n_pages = len(pdf) - pil_images = [] - for page_number in range(n_pages): - page = pdf.get_page(page_number) - bitmap = page.render(scale=1, rotation=0, crop=(0, 0, 0, 0)) - pil_image = bitmap.to_pil() - pil_images.append(pil_image) - return pil_images - - -def resize_base64_image(base64_string, size=(128, 128)): - """ - Resize an image encoded as a Base64 string - - :param base64_string: Base64 string - :param size: Image size - :return: Re-sized Base64 string - """ - # Decode the Base64 string - img_data = base64.b64decode(base64_string) - img = Image.open(io.BytesIO(img_data)) - - # Resize the image - resized_img = img.resize(size, Image.LANCZOS) - - # Save the resized image to a bytes buffer - buffered = io.BytesIO() - resized_img.save(buffered, format=img.format) - - # Encode the resized image to Base64 - return base64.b64encode(buffered.getvalue()).decode("utf-8") - - -def convert_to_base64(pil_image): - """ - Convert PIL images to Base64 encoded strings - - :param pil_image: PIL image - :return: Re-sized Base64 string - """ - - buffered = BytesIO() - pil_image.save(buffered, format="JPEG") # You can change the format if needed - img_str = base64.b64encode(buffered.getvalue()).decode("utf-8") - img_str = resize_base64_image(img_str, size=(960, 540)) - return img_str - - -def create_multi_vector_retriever( - vectorstore, image_summaries, images, local_file_store -): - """ - Create retriever that indexes summaries, but returns raw images or texts - - :param vectorstore: Vectorstore to store embedded image sumamries - :param image_summaries: Image summaries - :param images: Base64 encoded images - :param local_file_store: Use local file storage - :return: Retriever - """ - - # File storage option - if local_file_store: - store = LocalFileStore( - str(Path(__file__).parent / "multi_vector_retriever_metadata") - ) - else: - # Initialize the storage layer for images using Redis - UPSTASH_URL = os.getenv("UPSTASH_URL") - UPSTASH_TOKEN = os.getenv("UPSTASH_TOKEN") - store = UpstashRedisByteStore(url=UPSTASH_URL, token=UPSTASH_TOKEN) - - # Doc ID - id_key = "doc_id" - - # Create the multi-vector retriever - retriever = MultiVectorRetriever( - vectorstore=vectorstore, - byte_store=store, - id_key=id_key, - ) - - # Helper function to add documents to the vectorstore and docstore - def add_documents(retriever, doc_summaries, doc_contents): - doc_ids = [str(uuid.uuid4()) for _ in doc_contents] - summary_docs = [ - Document(page_content=s, metadata={id_key: doc_ids[i]}) - for i, s in enumerate(doc_summaries) - ] - retriever.vectorstore.add_documents(summary_docs) - retriever.docstore.mset(list(zip(doc_ids, doc_contents))) - - add_documents(retriever, image_summaries, images) - - return retriever - - -# Load PDF -doc_path = Path(__file__).parent / "docs/DDOG_Q3_earnings_deck.pdf" -rel_doc_path = doc_path.relative_to(Path.cwd()) -print("Extract slides as images") -pil_images = get_images_from_pdf(rel_doc_path) - -# Convert to b64 -images_base_64 = [convert_to_base64(i) for i in pil_images] - -# Image summaries -print("Generate image summaries") -image_summaries, images_base_64_processed = generate_img_summaries(images_base_64) - -# The vectorstore to use to index the images summaries -vectorstore_mvr = Chroma( - collection_name="image_summaries", - persist_directory=str(Path(__file__).parent / "chroma_db_multi_modal"), - embedding_function=OpenAIEmbeddings(), -) - -# Create documents -images_base_64_processed_documents = [ - Document(page_content=i) for i in images_base_64_processed -] - -# Create retriever -retriever_multi_vector_img = create_multi_vector_retriever( - vectorstore_mvr, - image_summaries, - images_base_64_processed_documents, - local_file_store=True, -) diff --git a/templates/rag-chroma-multi-modal-multi-vector/pyproject.toml b/templates/rag-chroma-multi-modal-multi-vector/pyproject.toml deleted file mode 100644 index 62b233674e8..00000000000 --- a/templates/rag-chroma-multi-modal-multi-vector/pyproject.toml +++ /dev/null @@ -1,38 +0,0 @@ -[tool.poetry] -name = "rag-chroma-multi-modal-multi-vector" -version = "0.1.0" -description = "Multi-modal RAG using Chroma and multi-vector retriever" -authors = [ - "Lance Martin ", -] -readme = "README.md" - -[tool.poetry.dependencies] -python = ">=3.8.1,<4.0" -langchain = ">=0.0.353,<0.2" -openai = "<2" -tiktoken = ">=0.5.1" -langchain-chroma = "^0.1.2" -pypdfium2 = ">=4.20.0" -langchain-experimental = ">=0.0.43" -upstash-redis = ">=1.0.0" -pillow = ">=10.1.0" - -[tool.poetry.group.dev.dependencies] -langchain-cli = ">=0.0.21" - -[tool.langserve] -export_module = "rag_chroma_multi_modal_multi_vector" -export_attr = "chain" - -[tool.templates-hub] -use-case = "rag" -author = "LangChain" -integrations = ["OpenAI", "Chroma"] -tags = ["vectordbs"] - -[build-system] -requires = [ - "poetry-core", -] -build-backend = "poetry.core.masonry.api" diff --git a/templates/rag-chroma-multi-modal-multi-vector/rag_chroma_multi_modal_multi_vector.ipynb b/templates/rag-chroma-multi-modal-multi-vector/rag_chroma_multi_modal_multi_vector.ipynb deleted file mode 100644 index bfaa9d82725..00000000000 --- a/templates/rag-chroma-multi-modal-multi-vector/rag_chroma_multi_modal_multi_vector.ipynb +++ /dev/null @@ -1,52 +0,0 @@ -{ - "cells": [ - { - "attachments": {}, - "cell_type": "markdown", - "id": "681a5d1e", - "metadata": {}, - "source": [ - "## Run Template\n", - "\n", - "In `server.py`, set -\n", - "```\n", - "add_routes(app, chain_rag_conv, path=\"/rag-chroma-multi-modal-multi-vector\")\n", - "```" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "d774be2a", - "metadata": {}, - "outputs": [], - "source": [ - "from langserve.client import RemoteRunnable\n", - "\n", - "rag_app = RemoteRunnable(\"http://localhost:8001/rag-chroma-multi-modal-multi-vector\")\n", - "rag_app.invoke(\"What is the projected TAM for observability expected for each year through 2026?\")" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.9.16" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/templates/rag-chroma-multi-modal-multi-vector/rag_chroma_multi_modal_multi_vector/__init__.py b/templates/rag-chroma-multi-modal-multi-vector/rag_chroma_multi_modal_multi_vector/__init__.py deleted file mode 100644 index d6b09f0b836..00000000000 --- a/templates/rag-chroma-multi-modal-multi-vector/rag_chroma_multi_modal_multi_vector/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from rag_chroma_multi_modal_multi_vector.chain import chain - -__all__ = ["chain"] diff --git a/templates/rag-chroma-multi-modal-multi-vector/rag_chroma_multi_modal_multi_vector/chain.py b/templates/rag-chroma-multi-modal-multi-vector/rag_chroma_multi_modal_multi_vector/chain.py deleted file mode 100644 index 387fc670033..00000000000 --- a/templates/rag-chroma-multi-modal-multi-vector/rag_chroma_multi_modal_multi_vector/chain.py +++ /dev/null @@ -1,143 +0,0 @@ -import base64 -import io -import os -from pathlib import Path - -from langchain.pydantic_v1 import BaseModel -from langchain.retrievers.multi_vector import MultiVectorRetriever -from langchain.storage import LocalFileStore, UpstashRedisByteStore -from langchain_chroma import Chroma -from langchain_community.chat_models import ChatOpenAI -from langchain_community.embeddings import OpenAIEmbeddings -from langchain_core.documents import Document -from langchain_core.messages import HumanMessage -from langchain_core.output_parsers import StrOutputParser -from langchain_core.runnables import RunnableLambda, RunnablePassthrough -from PIL import Image - - -def resize_base64_image(base64_string, size=(128, 128)): - """ - Resize an image encoded as a Base64 string. - - :param base64_string: A Base64 encoded string of the image to be resized. - :param size: A tuple representing the new size (width, height) for the image. - :return: A Base64 encoded string of the resized image. - """ - img_data = base64.b64decode(base64_string) - img = Image.open(io.BytesIO(img_data)) - resized_img = img.resize(size, Image.LANCZOS) - buffered = io.BytesIO() - resized_img.save(buffered, format=img.format) - return base64.b64encode(buffered.getvalue()).decode("utf-8") - - -def get_resized_images(docs): - """ - Resize images from base64-encoded strings. - - :param docs: A list of base64-encoded image to be resized. - :return: Dict containing a list of resized base64-encoded strings. - """ - b64_images = [] - for doc in docs: - if isinstance(doc, Document): - doc = doc.page_content - resized_image = resize_base64_image(doc, size=(1280, 720)) - b64_images.append(resized_image) - return {"images": b64_images} - - -def img_prompt_func(data_dict, num_images=2): - """ - GPT-4V prompt for image analysis. - - :param data_dict: A dict with images and a user-provided question. - :param num_images: Number of images to include in the prompt. - :return: A list containing message objects for each image and the text prompt. - """ - messages = [] - if data_dict["context"]["images"]: - for image in data_dict["context"]["images"][:num_images]: - image_message = { - "type": "image_url", - "image_url": {"url": f"data:image/jpeg;base64,{image}"}, - } - messages.append(image_message) - text_message = { - "type": "text", - "text": ( - "You are an analyst tasked with answering questions about visual content.\n" - "You will be give a set of image(s) from a slide deck / presentation.\n" - "Use this information to answer the user question. \n" - f"User-provided question: {data_dict['question']}\n\n" - ), - } - messages.append(text_message) - return [HumanMessage(content=messages)] - - -def multi_modal_rag_chain(retriever): - """ - Multi-modal RAG chain, - - :param retriever: A function that retrieves the necessary context for the model. - :return: A chain of functions representing the multi-modal RAG process. - """ - # Initialize the multi-modal Large Language Model with specific parameters - model = ChatOpenAI(temperature=0, model="gpt-4-vision-preview", max_tokens=1024) - - # Define the RAG pipeline - chain = ( - { - "context": retriever | RunnableLambda(get_resized_images), - "question": RunnablePassthrough(), - } - | RunnableLambda(img_prompt_func) - | model - | StrOutputParser() - ) - - return chain - - -# Flag -local_file_store = True - -# Load chroma -vectorstore_mvr = Chroma( - collection_name="image_summaries", - persist_directory=str(Path(__file__).parent.parent / "chroma_db_multi_modal"), - embedding_function=OpenAIEmbeddings(), -) - -if local_file_store: - store = LocalFileStore( - str(Path(__file__).parent.parent / "multi_vector_retriever_metadata") - ) -else: - # Load redis - UPSTASH_URL = os.getenv("UPSTASH_URL") - UPSTASH_TOKEN = os.getenv("UPSTASH_TOKEN") - store = UpstashRedisByteStore(url=UPSTASH_URL, token=UPSTASH_TOKEN) - -# -id_key = "doc_id" - -# Create the multi-vector retriever -retriever = MultiVectorRetriever( - vectorstore=vectorstore_mvr, - byte_store=store, - id_key=id_key, -) - -# Create RAG chain -chain = multi_modal_rag_chain(retriever) - - -# Add typing for input -class Question(BaseModel): - __root__: str - - -chain = chain.with_types(input_type=Question) diff --git a/templates/rag-chroma-multi-modal-multi-vector/tests/__init__.py b/templates/rag-chroma-multi-modal-multi-vector/tests/__init__.py deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/templates/rag-chroma-multi-modal/.gitignore b/templates/rag-chroma-multi-modal/.gitignore deleted file mode 100644 index 709823056cf..00000000000 --- a/templates/rag-chroma-multi-modal/.gitignore +++ /dev/null @@ -1 +0,0 @@ -docs/img_*.jpg diff --git a/templates/rag-chroma-multi-modal/LICENSE b/templates/rag-chroma-multi-modal/LICENSE deleted file mode 100644 index 426b6509034..00000000000 --- a/templates/rag-chroma-multi-modal/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -MIT License - -Copyright (c) 2023 LangChain, Inc. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/templates/rag-chroma-multi-modal/README.md b/templates/rag-chroma-multi-modal/README.md deleted file mode 100644 index d922304cc26..00000000000 --- a/templates/rag-chroma-multi-modal/README.md +++ /dev/null @@ -1,116 +0,0 @@ -# RAG - Chroma multi-modal - -Multi-modal LLMs enable visual assistants that can perform question-answering about images. - -This template create a visual assistant for slide decks, which often contain visuals such as graphs or figures. - -It uses `OpenCLIP` embeddings to embed all the slide images and stores them in `Chroma`. - -Given a question, relevant slides are retrieved and passed to `GPT-4V` for answer synthesis. - -![Diagram illustrating the workflow of a multi-modal LLM visual assistant using OpenCLIP embeddings and GPT-4V for question-answering based on slide deck images.](https://github.com/langchain-ai/langchain/assets/122662504/b3bc8406-48ae-4707-9edf-d0b3a511b200) "Workflow Diagram for Multi-modal LLM Visual Assistant" - -## Input - -Supply a slide deck as pdf in the `/docs` directory. - -By default, this template has a slide deck about Q3 earnings from DataDog, a public technology company. - -Example questions to ask can be: -``` -How many customers does Datadog have? -What is Datadog platform % Y/Y growth in FY20, FY21, and FY22? -``` - -To create an index of the slide deck, run: -``` -poetry install -python ingest.py -``` - -## Storage - -This template will use [OpenCLIP](https://github.com/mlfoundations/open_clip) multi-modal embeddings to embed the images. - -You can select different embedding model options (see results [here](https://github.com/mlfoundations/open_clip/blob/main/docs/openclip_results.csv)). - -The first time you run the app, it will automatically download the multimodal embedding model. - -By default, LangChain will use an embedding model with moderate performance but lower memory requirements, `ViT-H-14`. - -You can choose alternative `OpenCLIPEmbeddings` models in `rag_chroma_multi_modal/ingest.py`: -``` -vectorstore_mmembd = Chroma( - collection_name="multi-modal-rag", - persist_directory=str(re_vectorstore_path), - embedding_function=OpenCLIPEmbeddings( - model_name="ViT-H-14", checkpoint="laion2b_s32b_b79k" - ), -) -``` - -## LLM - -The app will retrieve images based on similarity between the text input and the image, which are both mapped to multi-modal embedding space. It will then pass the images to GPT-4V. - -## Environment Setup - -Set the `OPENAI_API_KEY` environment variable to access the OpenAI GPT-4V. - -## Usage - -To use this package, you should first have the LangChain CLI installed: - -```shell -pip install -U langchain-cli -``` - -To create a new LangChain project and install this as the only package, you can do: - -```shell -langchain app new my-app --package rag-chroma-multi-modal -``` - -If you want to add this to an existing project, you can just run: - -```shell -langchain app add rag-chroma-multi-modal -``` - -And add the following code to your `server.py` file: -```python -from rag_chroma_multi_modal import chain as rag_chroma_multi_modal_chain - -add_routes(app, rag_chroma_multi_modal_chain, path="/rag-chroma-multi-modal") -``` - -(Optional) Let's now configure LangSmith. -LangSmith will help us trace, monitor and debug LangChain applications. -You can sign up for LangSmith [here](https://smith.langchain.com/). -If you don't have access, you can skip this section - -```shell -export LANGCHAIN_TRACING_V2=true -export LANGCHAIN_API_KEY= -export LANGCHAIN_PROJECT= # if not specified, defaults to "default" -``` - -If you are inside this directory, then you can spin up a LangServe instance directly by: - -```shell -langchain serve -``` - -This will start the FastAPI app with a server is running locally at -[http://localhost:8000](http://localhost:8000) - -We can see all templates at [http://127.0.0.1:8000/docs](http://127.0.0.1:8000/docs) -We can access the playground at [http://127.0.0.1:8000/rag-chroma-multi-modal/playground](http://127.0.0.1:8000/rag-chroma-multi-modal/playground) - -We can access the template from code with: - -```python -from langserve.client import RemoteRunnable - -runnable = RemoteRunnable("http://localhost:8000/rag-chroma-multi-modal") -``` diff --git a/templates/rag-chroma-multi-modal/docs/DDOG_Q3_earnings_deck.pdf b/templates/rag-chroma-multi-modal/docs/DDOG_Q3_earnings_deck.pdf deleted file mode 100644 index a4aa7d864c4..00000000000 Binary files a/templates/rag-chroma-multi-modal/docs/DDOG_Q3_earnings_deck.pdf and /dev/null differ diff --git a/templates/rag-chroma-multi-modal/ingest.py b/templates/rag-chroma-multi-modal/ingest.py deleted file mode 100644 index 60fb8369bac..00000000000 --- a/templates/rag-chroma-multi-modal/ingest.py +++ /dev/null @@ -1,58 +0,0 @@ -import os -from pathlib import Path - -import pypdfium2 as pdfium -from langchain_chroma import Chroma -from langchain_experimental.open_clip import OpenCLIPEmbeddings - - -def get_images_from_pdf(pdf_path, img_dump_path): - """ - Extract images from each page of a PDF document and save as JPEG files. - - :param pdf_path: A string representing the path to the PDF file. - :param img_dump_path: A string representing the path to dummp images. - """ - pdf = pdfium.PdfDocument(pdf_path) - n_pages = len(pdf) - for page_number in range(n_pages): - page = pdf.get_page(page_number) - bitmap = page.render(scale=1, rotation=0, crop=(0, 0, 0, 0)) - pil_image = bitmap.to_pil() - pil_image.save(f"{img_dump_path}/img_{page_number + 1}.jpg", format="JPEG") - - -# Load PDF -doc_path = Path(__file__).parent / "docs/DDOG_Q3_earnings_deck.pdf" -img_dump_path = Path(__file__).parent / "docs/" -rel_doc_path = doc_path.relative_to(Path.cwd()) -rel_img_dump_path = img_dump_path.relative_to(Path.cwd()) -print("pdf index") -pil_images = get_images_from_pdf(rel_doc_path, rel_img_dump_path) -print("done") -vectorstore = Path(__file__).parent / "chroma_db_multi_modal" -re_vectorstore_path = vectorstore.relative_to(Path.cwd()) - -# Load embedding function -print("Loading embedding function") -embedding = OpenCLIPEmbeddings(model_name="ViT-H-14", checkpoint="laion2b_s32b_b79k") - -# Create chroma -vectorstore_mmembd = Chroma( - collection_name="multi-modal-rag", - persist_directory=str(Path(__file__).parent / "chroma_db_multi_modal"), - embedding_function=embedding, -) - -# Get image URIs -image_uris = sorted( - [ - os.path.join(rel_img_dump_path, image_name) - for image_name in os.listdir(rel_img_dump_path) - if image_name.endswith(".jpg") - ] -) - -# Add images -print("Embedding images") -vectorstore_mmembd.add_images(uris=image_uris) diff --git a/templates/rag-chroma-multi-modal/pyproject.toml b/templates/rag-chroma-multi-modal/pyproject.toml deleted file mode 100644 index 202584eb410..00000000000 --- a/templates/rag-chroma-multi-modal/pyproject.toml +++ /dev/null @@ -1,38 +0,0 @@ -[tool.poetry] -name = "rag-chroma-multi-modal" -version = "0.1.0" -description = "Multi-modal RAG using Chroma" -authors = [ - "Lance Martin ", -] -readme = "README.md" - -[tool.poetry.dependencies] -python = ">=3.8.1,<4.0" -langchain = ">=0.0.353,<0.2" -openai = "<2" -tiktoken = ">=0.5.1" -langchain-chroma = "^0.1.2" -open-clip-torch = ">=2.23.0" -torch = ">=2.1.0" -pypdfium2 = ">=4.20.0" -langchain-experimental = ">=0.0.43" - -[tool.poetry.group.dev.dependencies] -langchain-cli = ">=0.0.21" - -[tool.langserve] -export_module = "rag_chroma_multi_modal" -export_attr = "chain" - -[tool.templates-hub] -use-case = "rag" -author = "LangChain" -integrations = ["OpenAI", "Chroma"] -tags = ["vectordbs"] - -[build-system] -requires = [ - "poetry-core", -] -build-backend = "poetry.core.masonry.api" diff --git a/templates/rag-chroma-multi-modal/rag_chroma_multi_modal.ipynb b/templates/rag-chroma-multi-modal/rag_chroma_multi_modal.ipynb deleted file mode 100644 index 55562449550..00000000000 --- a/templates/rag-chroma-multi-modal/rag_chroma_multi_modal.ipynb +++ /dev/null @@ -1,52 +0,0 @@ -{ - "cells": [ - { - "attachments": {}, - "cell_type": "markdown", - "id": "681a5d1e", - "metadata": {}, - "source": [ - "## Run Template\n", - "\n", - "In `server.py`, set -\n", - "```\n", - "add_routes(app, chain_rag_conv, path=\"/rag-chroma-multi-modal\")\n", - "```" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "d774be2a", - "metadata": {}, - "outputs": [], - "source": [ - "from langserve.client import RemoteRunnable\n", - "\n", - "rag_app = RemoteRunnable(\"http://localhost:8001/rag-chroma-multi-modal\")\n", - "rag_app.invoke(\"What is the projected TAM for observability expected for each year through 2026?\")" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.9.16" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/templates/rag-chroma-multi-modal/rag_chroma_multi_modal/__init__.py b/templates/rag-chroma-multi-modal/rag_chroma_multi_modal/__init__.py deleted file mode 100644 index 2446829d4a8..00000000000 --- a/templates/rag-chroma-multi-modal/rag_chroma_multi_modal/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from rag_chroma_multi_modal.chain import chain - -__all__ = ["chain"] diff --git a/templates/rag-chroma-multi-modal/rag_chroma_multi_modal/chain.py b/templates/rag-chroma-multi-modal/rag_chroma_multi_modal/chain.py deleted file mode 100644 index c227fbac9c7..00000000000 --- a/templates/rag-chroma-multi-modal/rag_chroma_multi_modal/chain.py +++ /dev/null @@ -1,122 +0,0 @@ -import base64 -import io -from pathlib import Path - -from langchain_chroma import Chroma -from langchain_community.chat_models import ChatOpenAI -from langchain_core.documents import Document -from langchain_core.messages import HumanMessage -from langchain_core.output_parsers import StrOutputParser -from langchain_core.pydantic_v1 import BaseModel -from langchain_core.runnables import RunnableLambda, RunnablePassthrough -from langchain_experimental.open_clip import OpenCLIPEmbeddings -from PIL import Image - - -def resize_base64_image(base64_string, size=(128, 128)): - """ - Resize an image encoded as a Base64 string. - - :param base64_string: A Base64 encoded string of the image to be resized. - :param size: A tuple representing the new size (width, height) for the image. - :return: A Base64 encoded string of the resized image. - """ - img_data = base64.b64decode(base64_string) - img = Image.open(io.BytesIO(img_data)) - resized_img = img.resize(size, Image.LANCZOS) - buffered = io.BytesIO() - resized_img.save(buffered, format=img.format) - return base64.b64encode(buffered.getvalue()).decode("utf-8") - - -def get_resized_images(docs): - """ - Resize images from base64-encoded strings. - - :param docs: A list of base64-encoded image to be resized. - :return: Dict containing a list of resized base64-encoded strings. - """ - b64_images = [] - for doc in docs: - if isinstance(doc, Document): - doc = doc.page_content - resized_image = resize_base64_image(doc, size=(1280, 720)) - b64_images.append(resized_image) - return {"images": b64_images} - - -def img_prompt_func(data_dict, num_images=2): - """ - GPT-4V prompt for image analysis. - - :param data_dict: A dict with images and a user-provided question. - :param num_images: Number of images to include in the prompt. - :return: A list containing message objects for each image and the text prompt. - """ - messages = [] - if data_dict["context"]["images"]: - for image in data_dict["context"]["images"][:num_images]: - image_message = { - "type": "image_url", - "image_url": {"url": f"data:image/jpeg;base64,{image}"}, - } - messages.append(image_message) - text_message = { - "type": "text", - "text": ( - "You are an analyst tasked with answering questions about visual content.\n" - "You will be give a set of image(s) from a slide deck / presentation.\n" - "Use this information to answer the user question. \n" - f"User-provided question: {data_dict['question']}\n\n" - ), - } - messages.append(text_message) - return [HumanMessage(content=messages)] - - -def multi_modal_rag_chain(retriever): - """ - Multi-modal RAG chain, - - :param retriever: A function that retrieves the necessary context for the model. - :return: A chain of functions representing the multi-modal RAG process. - """ - # Initialize the multi-modal Large Language Model with specific parameters - model = ChatOpenAI(temperature=0, model="gpt-4-vision-preview", max_tokens=1024) - - # Define the RAG pipeline - chain = ( - { - "context": retriever | RunnableLambda(get_resized_images), - "question": RunnablePassthrough(), - } - | RunnableLambda(img_prompt_func) - | model - | StrOutputParser() - ) - - return chain - - -# Load chroma -vectorstore_mmembd = Chroma( - collection_name="multi-modal-rag", - persist_directory=str(Path(__file__).parent.parent / "chroma_db_multi_modal"), - embedding_function=OpenCLIPEmbeddings( - model_name="ViT-H-14", checkpoint="laion2b_s32b_b79k" - ), -) - -# Make retriever -retriever_mmembd = vectorstore_mmembd.as_retriever() - -# Create RAG chain -chain = multi_modal_rag_chain(retriever_mmembd) - - -# Add typing for input -class Question(BaseModel): - __root__: str - - -chain = chain.with_types(input_type=Question) diff --git a/templates/rag-chroma-multi-modal/tests/__init__.py b/templates/rag-chroma-multi-modal/tests/__init__.py deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/templates/rag-chroma-private/LICENSE b/templates/rag-chroma-private/LICENSE deleted file mode 100644 index 426b6509034..00000000000 --- a/templates/rag-chroma-private/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -MIT License - -Copyright (c) 2023 LangChain, Inc. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/templates/rag-chroma-private/README.md b/templates/rag-chroma-private/README.md deleted file mode 100644 index 785d06a3c08..00000000000 --- a/templates/rag-chroma-private/README.md +++ /dev/null @@ -1,78 +0,0 @@ -# RAG - Chroma, Ollama, Gpt4all - private - -This template performs RAG with no reliance on external APIs. - -It utilizes `Ollama` the LLM, `GPT4All` for embeddings, and `Chroma` for the vectorstore. - -The vectorstore is created in `chain.py` and by default indexes a [popular blog posts on Agents](https://lilianweng.github.io/posts/2023-06-23-agent/) for question-answering. - -## Environment Setup - -To set up the environment, you need to download Ollama. - -Follow the instructions [here](https://python.langchain.com/docs/integrations/chat/ollama). - -You can choose the desired LLM with Ollama. - -This template uses `llama2:7b-chat`, which can be accessed using `ollama pull llama2:7b-chat`. - -There are many other options available [here](https://ollama.ai/library). - -This package also uses [GPT4All](https://python.langchain.com/docs/integrations/text_embedding/gpt4all) embeddings. - -## Usage - -To use this package, you should first have the LangChain CLI installed: - -```shell -pip install -U langchain-cli -``` - -To create a new LangChain project and install this as the only package, you can do: - -```shell -langchain app new my-app --package rag-chroma-private -``` - -If you want to add this to an existing project, you can just run: - -```shell -langchain app add rag-chroma-private -``` - -And add the following code to your `server.py` file: -```python -from rag_chroma_private import chain as rag_chroma_private_chain - -add_routes(app, rag_chroma_private_chain, path="/rag-chroma-private") -``` - -(Optional) Let's now configure LangSmith. LangSmith will help us trace, monitor and debug LangChain applications. You can sign up for LangSmith [here](https://smith.langchain.com/). If you don't have access, you can skip this section - -```shell -export LANGCHAIN_TRACING_V2=true -export LANGCHAIN_API_KEY= -export LANGCHAIN_PROJECT= # if not specified, defaults to "default" -``` - -If you are inside this directory, then you can spin up a LangServe instance directly by: - -```shell -langchain serve -``` - -This will start the FastAPI app with a server is running locally at -[http://localhost:8000](http://localhost:8000) - -We can see all templates at [http://127.0.0.1:8000/docs](http://127.0.0.1:8000/docs) -We can access the playground at [http://127.0.0.1:8000/rag-chroma-private/playground](http://127.0.0.1:8000/rag-chroma-private/playground) - -We can access the template from code with: - -```python -from langserve.client import RemoteRunnable - -runnable = RemoteRunnable("http://localhost:8000/rag-chroma-private") -``` - -The package will create and add documents to the vector database in `chain.py`. By default, it will load a popular blog post on agents. However, you can choose from a large number of document loaders [here](https://python.langchain.com/docs/integrations/document_loaders). diff --git a/templates/rag-chroma-private/docs/LLaVA.pdf b/templates/rag-chroma-private/docs/LLaVA.pdf deleted file mode 100644 index 88da76cecd5..00000000000 Binary files a/templates/rag-chroma-private/docs/LLaVA.pdf and /dev/null differ diff --git a/templates/rag-chroma-private/pyproject.toml b/templates/rag-chroma-private/pyproject.toml deleted file mode 100644 index 36b9a270d65..00000000000 --- a/templates/rag-chroma-private/pyproject.toml +++ /dev/null @@ -1,36 +0,0 @@ -[tool.poetry] -name = "rag-chroma-private" -version = "0.1.0" -description = "Private RAG using local LLM, embeddings, vectorstore" -authors = [ - "Lance Martin ", -] -readme = "README.md" - -[tool.poetry.dependencies] -python = ">=3.8.1,<4.0" -langchain = "^0.1" -tiktoken = ">=0.5.1" -langchain-chroma = "^0.1.2" -gpt4all = ">=1.0.8" -beautifulsoup4 = ">=4.12.2" -langchain-text-splitters = ">=0.0.1,<0.1" - -[tool.poetry.group.dev.dependencies] -langchain-cli = ">=0.0.21" - -[tool.langserve] -export_module = "rag_chroma_private" -export_attr = "chain" - -[tool.templates-hub] -use-case = "rag" -author = "LangChain" -integrations = ["OpenAI", "Chroma", "Gpt4all", "Ollama"] -tags = ["vectordbs"] - -[build-system] -requires = [ - "poetry-core", -] -build-backend = "poetry.core.masonry.api" diff --git a/templates/rag-chroma-private/rag_chroma_private.ipynb b/templates/rag-chroma-private/rag_chroma_private.ipynb deleted file mode 100644 index b63f0c5c9a8..00000000000 --- a/templates/rag-chroma-private/rag_chroma_private.ipynb +++ /dev/null @@ -1,59 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "id": "232fd40d-cf6a-402d-bcb8-414184a8e924", - "metadata": {}, - "source": [ - "## Run Template\n", - "\n", - "In `server.py`, set -\n", - "```\n", - "add_routes(app, chain_private, path=\"/rag_chroma_private\")\n", - "```" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "id": "ce39d358-1934-4404-bd3e-3fd497974aff", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - " Based on the provided context, agent memory is a long-term memory module that records a comprehensive list of agents' experiences in natural language. Each element is an observation or event directly provided by the agent, and inter-agent communication can trigger new natural language statements. The agent memory is complemented by several key components, including LLM (large language model) as the agent's brain, planning, reflection, and memory mechanisms. The design of generative agents combines LLM with memory, planning, and reflection mechanisms to enable agents to behave conditioned on past experiences and interact with other agents. The agent learns to call external APIs for missing information, including current information, code execution capability, access to proprietary information sources, and more. In summary, the agent memory works by recording and storing observations and events in natural language, allowing the agent to retrieve and use this information to inform its behavior.\n" - ] - } - ], - "source": [ - "from langserve.client import RemoteRunnable\n", - "\n", - "rag_app = RemoteRunnable(\"http://0.0.0.0:8001/rag_chroma_private/\")\n", - "rag_app.invoke(\"How does agent memory work?\")" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.9.16" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/templates/rag-chroma-private/rag_chroma_private/__init__.py b/templates/rag-chroma-private/rag_chroma_private/__init__.py deleted file mode 100644 index c06d4f844d6..00000000000 --- a/templates/rag-chroma-private/rag_chroma_private/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from rag_chroma_private.chain import chain - -__all__ = ["chain"] diff --git a/templates/rag-chroma-private/rag_chroma_private/chain.py b/templates/rag-chroma-private/rag_chroma_private/chain.py deleted file mode 100644 index 3fd02b26922..00000000000 --- a/templates/rag-chroma-private/rag_chroma_private/chain.py +++ /dev/null @@ -1,59 +0,0 @@ -# Load -from langchain_chroma import Chroma -from langchain_community.chat_models import ChatOllama -from langchain_community.document_loaders import WebBaseLoader -from langchain_community.embeddings import GPT4AllEmbeddings -from langchain_core.output_parsers import StrOutputParser -from langchain_core.prompts import ChatPromptTemplate -from langchain_core.pydantic_v1 import BaseModel -from langchain_core.runnables import RunnableParallel, RunnablePassthrough -from langchain_text_splitters import RecursiveCharacterTextSplitter - -loader = WebBaseLoader("https://lilianweng.github.io/posts/2023-06-23-agent/") -data = loader.load() - -# Split - -text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=0) -all_splits = text_splitter.split_documents(data) - -# Add to vectorDB -vectorstore = Chroma.from_documents( - documents=all_splits, - collection_name="rag-private", - embedding=GPT4AllEmbeddings(), -) -retriever = vectorstore.as_retriever() - -# Prompt -# Optionally, pull from the Hub -# from langchain import hub -# prompt = hub.pull("rlm/rag-prompt") -# Or, define your own: -template = """Answer the question based only on the following context: -{context} - -Question: {question} -""" -prompt = ChatPromptTemplate.from_template(template) - -# LLM -# Select the LLM that you downloaded -ollama_llm = "llama2:7b-chat" -model = ChatOllama(model=ollama_llm) - -# RAG chain -chain = ( - RunnableParallel({"context": retriever, "question": RunnablePassthrough()}) - | prompt - | model - | StrOutputParser() -) - - -# Add typing for input -class Question(BaseModel): - __root__: str - - -chain = chain.with_types(input_type=Question) diff --git a/templates/rag-chroma-private/tests/__init__.py b/templates/rag-chroma-private/tests/__init__.py deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/templates/rag-chroma/LICENSE b/templates/rag-chroma/LICENSE deleted file mode 100644 index 426b6509034..00000000000 --- a/templates/rag-chroma/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -MIT License - -Copyright (c) 2023 LangChain, Inc. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/templates/rag-chroma/README.md b/templates/rag-chroma/README.md deleted file mode 100644 index 46601685864..00000000000 --- a/templates/rag-chroma/README.md +++ /dev/null @@ -1,67 +0,0 @@ -# RAG - Chroma - -This template performs RAG using `Chroma` and `OpenAI`. - -The vectorstore is created in `chain.py` and by default indexes a [popular blog posts on Agents](https://lilianweng.github.io/posts/2023-06-23-agent/) for question-answering. - -## Environment Setup - -Set the `OPENAI_API_KEY` environment variable to access the OpenAI models. - -## Usage - -To use this package, you should first have the LangChain CLI installed: - -```shell -pip install -U langchain-cli -``` - -To create a new LangChain project and install this as the only package, you can do: - -```shell -langchain app new my-app --package rag-chroma -``` - -If you want to add this to an existing project, you can just run: - -```shell -langchain app add rag-chroma -``` - -And add the following code to your `server.py` file: -```python -from rag_chroma import chain as rag_chroma_chain - -add_routes(app, rag_chroma_chain, path="/rag-chroma") -``` - -(Optional) Let's now configure LangSmith. -LangSmith will help us trace, monitor and debug LangChain applications. -You can sign up for LangSmith [here](https://smith.langchain.com/). -If you don't have access, you can skip this section - -```shell -export LANGCHAIN_TRACING_V2=true -export LANGCHAIN_API_KEY= -export LANGCHAIN_PROJECT= # if not specified, defaults to "default" -``` - -If you are inside this directory, then you can spin up a LangServe instance directly by: - -```shell -langchain serve -``` - -This will start the FastAPI app with a server is running locally at -[http://localhost:8000](http://localhost:8000) - -We can see all templates at [http://127.0.0.1:8000/docs](http://127.0.0.1:8000/docs) -We can access the playground at [http://127.0.0.1:8000/rag-chroma/playground](http://127.0.0.1:8000/rag-chroma/playground) - -We can access the template from code with: - -```python -from langserve.client import RemoteRunnable - -runnable = RemoteRunnable("http://localhost:8000/rag-chroma") -``` \ No newline at end of file diff --git a/templates/rag-chroma/pyproject.toml b/templates/rag-chroma/pyproject.toml deleted file mode 100644 index 012bbabace0..00000000000 --- a/templates/rag-chroma/pyproject.toml +++ /dev/null @@ -1,35 +0,0 @@ -[tool.poetry] -name = "rag-chroma" -version = "0.1.0" -description = "RAG using Chroma" -authors = [ - "Erick Friis ", -] -readme = "README.md" - -[tool.poetry.dependencies] -python = ">=3.8.1,<4.0" -langchain = "^0.1" -openai = "<2" -tiktoken = ">=0.5.1" -langchain-chroma = "^0.1.2" -langchain-text-splitters = ">=0.0.1,<0.1" - -[tool.poetry.group.dev.dependencies] -langchain-cli = ">=0.0.21" - -[tool.langserve] -export_module = "rag_chroma" -export_attr = "chain" - -[tool.templates-hub] -use-case = "rag" -author = "LangChain" -integrations = ["OpenAI", "Chroma"] -tags = ["vectordbs"] - -[build-system] -requires = [ - "poetry-core", -] -build-backend = "poetry.core.masonry.api" diff --git a/templates/rag-chroma/rag_chroma.ipynb b/templates/rag-chroma/rag_chroma.ipynb deleted file mode 100644 index 66cec5297bc..00000000000 --- a/templates/rag-chroma/rag_chroma.ipynb +++ /dev/null @@ -1,51 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "id": "681a5d1e", - "metadata": {}, - "source": [ - "## Run Template\n", - "\n", - "In `server.py`, set -\n", - "```\n", - "add_routes(app, chain_rag_conv, path=\"/rag-chroma\")\n", - "```" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "d774be2a", - "metadata": {}, - "outputs": [], - "source": [ - "from langserve.client import RemoteRunnable\n", - "\n", - "rag_app = RemoteRunnable(\"http://localhost:8001/rag-chroma\")\n", - "rag_app.invoke(\"Where id Harrison work\")" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.9.16" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/templates/rag-chroma/rag_chroma/__init__.py b/templates/rag-chroma/rag_chroma/__init__.py deleted file mode 100644 index a625fc70997..00000000000 --- a/templates/rag-chroma/rag_chroma/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from rag_chroma.chain import chain - -__all__ = ["chain"] diff --git a/templates/rag-chroma/rag_chroma/chain.py b/templates/rag-chroma/rag_chroma/chain.py deleted file mode 100644 index acce539954c..00000000000 --- a/templates/rag-chroma/rag_chroma/chain.py +++ /dev/null @@ -1,63 +0,0 @@ -from langchain_chroma import Chroma -from langchain_community.chat_models import ChatOpenAI -from langchain_community.embeddings import OpenAIEmbeddings -from langchain_core.output_parsers import StrOutputParser -from langchain_core.prompts import ChatPromptTemplate -from langchain_core.pydantic_v1 import BaseModel -from langchain_core.runnables import RunnableParallel, RunnablePassthrough - -# Example for document loading (from url), splitting, and creating vectostore - -""" -# Load -from langchain_community.document_loaders import WebBaseLoader -loader = WebBaseLoader("https://lilianweng.github.io/posts/2023-06-23-agent/") -data = loader.load() - -# Split -from langchain_text_splitters import RecursiveCharacterTextSplitter -text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=0) -all_splits = text_splitter.split_documents(data) - -# Add to vectorDB -vectorstore = Chroma.from_documents(documents=all_splits, - collection_name="rag-chroma", - embedding=OpenAIEmbeddings(), - ) -retriever = vectorstore.as_retriever() -""" - -# Embed a single document as a test -vectorstore = Chroma.from_texts( - ["harrison worked at kensho"], - collection_name="rag-chroma", - embedding=OpenAIEmbeddings(), -) -retriever = vectorstore.as_retriever() - -# RAG prompt -template = """Answer the question based only on the following context: -{context} - -Question: {question} -""" -prompt = ChatPromptTemplate.from_template(template) - -# LLM -model = ChatOpenAI() - -# RAG chain -chain = ( - RunnableParallel({"context": retriever, "question": RunnablePassthrough()}) - | prompt - | model - | StrOutputParser() -) - - -# Add typing for input -class Question(BaseModel): - __root__: str - - -chain = chain.with_types(input_type=Question) diff --git a/templates/rag-chroma/tests/__init__.py b/templates/rag-chroma/tests/__init__.py deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/templates/rag-codellama-fireworks/LICENSE b/templates/rag-codellama-fireworks/LICENSE deleted file mode 100644 index 426b6509034..00000000000 --- a/templates/rag-codellama-fireworks/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -MIT License - -Copyright (c) 2023 LangChain, Inc. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/templates/rag-codellama-fireworks/README.md b/templates/rag-codellama-fireworks/README.md deleted file mode 100644 index 02ec82898db..00000000000 --- a/templates/rag-codellama-fireworks/README.md +++ /dev/null @@ -1,69 +0,0 @@ -# RAG - codellama, Fireworks - -This template performs RAG on a codebase. - -It uses `codellama-34b` hosted by `Fireworks` [LLM inference API](https://blog.fireworks.ai/accelerating-code-completion-with-fireworks-fast-llm-inference-f4e8b5ec534a). - -## Environment Setup - -Set the `FIREWORKS_API_KEY` environment variable to access the Fireworks models. - -You can obtain it from [here](https://app.fireworks.ai/login?callbackURL=https://app.fireworks.ai). - -## Usage - -To use this package, you should first have the LangChain CLI installed: - -```shell -pip install -U langchain-cli -``` - -To create a new LangChain project and install this as the only package, you can do: - -```shell -langchain app new my-app --package rag-codellama-fireworks -``` - -If you want to add this to an existing project, you can just run: - -```shell -langchain app add rag-codellama-fireworks -``` - -And add the following code to your `server.py` file: -```python -from rag_codellama_fireworks import chain as rag_codellama_fireworks_chain - -add_routes(app, rag_codellama_fireworks_chain, path="/rag-codellama-fireworks") -``` - -(Optional) Let's now configure LangSmith. -LangSmith will help us trace, monitor and debug LangChain applications. -You can sign up for LangSmith [here](https://smith.langchain.com/). -If you don't have access, you can skip this section - -```shell -export LANGCHAIN_TRACING_V2=true -export LANGCHAIN_API_KEY= -export LANGCHAIN_PROJECT= # if not specified, defaults to "default" -``` - -If you are inside this directory, then you can spin up a LangServe instance directly by: - -```shell -langchain serve -``` - -This will start the FastAPI app with a server is running locally at -[http://localhost:8000](http://localhost:8000) - -We can see all templates at [http://127.0.0.1:8000/docs](http://127.0.0.1:8000/docs) -We can access the playground at [http://127.0.0.1:8000/rag-codellama-fireworks/playground](http://127.0.0.1:8000/rag-codellama-fireworks/playground) - -We can access the template from code with: - -```python -from langserve.client import RemoteRunnable - -runnable = RemoteRunnable("http://localhost:8000/rag-codellama-fireworks") -``` diff --git a/templates/rag-codellama-fireworks/pyproject.toml b/templates/rag-codellama-fireworks/pyproject.toml deleted file mode 100644 index 99bd6d84562..00000000000 --- a/templates/rag-codellama-fireworks/pyproject.toml +++ /dev/null @@ -1,36 +0,0 @@ -[tool.poetry] -name = "rag-codellama-fireworks" -version = "0.1.0" -description = "RAG using OSS LLMs via Fireworks" -authors = [ - "Lance Martin ", -] -readme = "README.md" - -[tool.poetry.dependencies] -python = ">=3.9,<4.0" -langchain = "^0.1" -gpt4all = ">=1.0.8" -tiktoken = ">=0.5.1" -langchain-chroma = "^0.1.2" -fireworks-ai = ">=0.6.0" -langchain-text-splitters = ">=0.0.1,<0.1" - -[tool.poetry.group.dev.dependencies] -langchain-cli = ">=0.0.21" - -[tool.langserve] -export_module = "rag_codellama_fireworks" -export_attr = "chain" - -[tool.templates-hub] -use-case = "rag" -author = "Elastic" -integrations = ["OpenAI", "Fireworks"] -tags = ["vectordbs"] - -[build-system] -requires = [ - "poetry-core", -] -build-backend = "poetry.core.masonry.api" diff --git a/templates/rag-codellama-fireworks/rag_codellama_fireworks.ipynb b/templates/rag-codellama-fireworks/rag_codellama_fireworks.ipynb deleted file mode 100644 index e13220d76ff..00000000000 --- a/templates/rag-codellama-fireworks/rag_codellama_fireworks.ipynb +++ /dev/null @@ -1,46 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "id": "681a5d1e", - "metadata": {}, - "source": [ - "## Run Template" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "d774be2a", - "metadata": {}, - "outputs": [], - "source": [ - "from langserve.client import RemoteRunnable\n", - "\n", - "rag_app = RemoteRunnable(\"http://localhost:8000/rag-codellama-fireworks\")\n", - "rag_app.invoke(\"How can I initialize a ReAct agent?\")" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.9.16" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/templates/rag-codellama-fireworks/rag_codellama_fireworks/__init__.py b/templates/rag-codellama-fireworks/rag_codellama_fireworks/__init__.py deleted file mode 100644 index 71d183db045..00000000000 --- a/templates/rag-codellama-fireworks/rag_codellama_fireworks/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from rag_codellama_fireworks.chain import chain - -__all__ = ["chain"] diff --git a/templates/rag-codellama-fireworks/rag_codellama_fireworks/chain.py b/templates/rag-codellama-fireworks/rag_codellama_fireworks/chain.py deleted file mode 100644 index 61c2c7aad5d..00000000000 --- a/templates/rag-codellama-fireworks/rag_codellama_fireworks/chain.py +++ /dev/null @@ -1,71 +0,0 @@ -import os - -from git import Repo -from langchain_chroma import Chroma -from langchain_community.document_loaders.generic import GenericLoader -from langchain_community.document_loaders.parsers import LanguageParser -from langchain_community.embeddings import GPT4AllEmbeddings -from langchain_community.llms.fireworks import Fireworks -from langchain_core.output_parsers import StrOutputParser -from langchain_core.prompts import ChatPromptTemplate -from langchain_core.pydantic_v1 import BaseModel -from langchain_core.runnables import RunnableParallel, RunnablePassthrough -from langchain_text_splitters import Language, RecursiveCharacterTextSplitter - -# Check API key -if os.environ.get("FIREWORKS_API_KEY", None) is None: - raise Exception("Missing `FIREWORKS_API_KEY` environment variable.") - -# Load codebase -# Set local path -repo_path = "/Users/rlm/Desktop/tmp_repo" -# Use LangChain as an example -repo = Repo.clone_from("https://github.com/langchain-ai/langchain", to_path=repo_path) -loader = GenericLoader.from_filesystem( - repo_path + "/libs/langchain/langchain", - glob="**/*", - suffixes=[".py"], - parser=LanguageParser(language=Language.PYTHON, parser_threshold=500), -) -documents = loader.load() - -# Split -python_splitter = RecursiveCharacterTextSplitter.from_language( - language=Language.PYTHON, chunk_size=2000, chunk_overlap=200 -) -texts = python_splitter.split_documents(documents) - -# Add to vectorDB -vectorstore = Chroma.from_documents( - documents=texts, - collection_name="codebase-rag", - embedding=GPT4AllEmbeddings(), -) -retriever = vectorstore.as_retriever() - -# RAG prompt -template = """Answer the question based only on the following context: -{context} - -Question: {question} -""" -prompt = ChatPromptTemplate.from_template(template) - -# Initialize a Fireworks model -model = Fireworks(model="accounts/fireworks/models/llama-v2-34b-code-instruct") - -# RAG chain -chain = ( - RunnableParallel({"context": retriever, "question": RunnablePassthrough()}) - | prompt - | model - | StrOutputParser() -) - - -# Add typing for input -class Question(BaseModel): - __root__: str - - -chain = chain.with_types(input_type=Question) diff --git a/templates/rag-codellama-fireworks/tests/__init__.py b/templates/rag-codellama-fireworks/tests/__init__.py deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/templates/rag-conversation-zep/LICENSE b/templates/rag-conversation-zep/LICENSE deleted file mode 100644 index 426b6509034..00000000000 --- a/templates/rag-conversation-zep/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -MIT License - -Copyright (c) 2023 LangChain, Inc. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/templates/rag-conversation-zep/README.md b/templates/rag-conversation-zep/README.md deleted file mode 100644 index 234a9d850f5..00000000000 --- a/templates/rag-conversation-zep/README.md +++ /dev/null @@ -1,96 +0,0 @@ -# RAG - Zep - conversation - -This template demonstrates building a RAG conversation app using `Zep`. - -Included in this template: -- Populating a [Zep Document Collection](https://docs.getzep.com/sdk/documents/) with a set of documents (a Collection is analogous to an index in other Vector Databases). -- Using Zep's [integrated embedding](https://docs.getzep.com/deployment/embeddings/) functionality to embed the documents as vectors. -- Configuring a LangChain [ZepVectorStore Retriever](https://docs.getzep.com/sdk/documents/) to retrieve documents using Zep's built, hardware accelerated in [Maximal Marginal Relevance](https://docs.getzep.com/sdk/search_query/) (MMR) re-ranking. -- Prompts, a simple chat history data structure, and other components required to build a RAG conversation app. -- The RAG conversation chain. - -## About Zep - -[Zep - Fast, scalable building blocks for LLM Apps](https://www.getzep.com/) - -Zep is an open source platform for productionizing LLM apps. Go from a prototype built in LangChain or LlamaIndex, or a custom app, to production in minutes without rewriting code. - -Key Features: - -- Fast! Zep’s async extractors operate independently of the chat loop, ensuring a snappy user experience. -- Long-term memory persistence, with access to historical messages irrespective of your summarization strategy. -- Auto-summarization of memory messages based on a configurable message window. A series of summaries are stored, providing flexibility for future summarization strategies. -- Hybrid search over memories and metadata, with messages automatically embedded on creation. -- Entity Extractor that automatically extracts named entities from messages and stores them in the message metadata. -- Auto-token counting of memories and summaries, allowing finer-grained control over prompt assembly. -- Python and JavaScript SDKs. - -`Zep` project: https://github.com/getzep/zep | Docs: https://docs.getzep.com/ - -## Environment Setup - -Set up a Zep service by following the [Quick Start Guide](https://docs.getzep.com/deployment/quickstart/). - -## Ingesting Documents into a Zep Collection - -Run `python ingest.py` to ingest the test documents into a Zep Collection. Review the file to modify the Collection name and document source. - - -## Usage - -To use this package, you should first have the LangChain CLI installed: - -```shell -pip install -U "langchain-cli[serve]" -``` - -To create a new LangChain project and install this as the only package, you can do: - -```shell -langchain app new my-app --package rag-conversation-zep -``` - -If you want to add this to an existing project, you can just run: - -```shell -langchain app add rag-conversation-zep -``` - -And add the following code to your `server.py` file: -```python -from rag_conversation_zep import chain as rag_conversation_zep_chain - -add_routes(app, rag_conversation_zep_chain, path="/rag-conversation-zep") -``` - -(Optional) Let's now configure LangSmith. -LangSmith will help us trace, monitor and debug LangChain applications. -You can sign up for LangSmith [here](https://smith.langchain.com/). -If you don't have access, you can skip this section - - -```shell -export LANGCHAIN_TRACING_V2=true -export LANGCHAIN_API_KEY= -export LANGCHAIN_PROJECT= # if not specified, defaults to "default" -``` - -If you are inside this directory, then you can spin up a LangServe instance directly by: - -```shell -langchain serve -``` - -This will start the FastAPI app with a server is running locally at -[http://localhost:8000](http://localhost:8000) - -We can see all templates at [http://127.0.0.1:8000/docs](http://127.0.0.1:8000/docs) -We can access the playground at [http://127.0.0.1:8000/rag-conversation-zep/playground](http://127.0.0.1:8000/rag-conversation-zep/playground) - -We can access the template from code with: - -```python -from langserve.client import RemoteRunnable - -runnable = RemoteRunnable("http://localhost:8000/rag-conversation-zep") -``` \ No newline at end of file diff --git a/templates/rag-conversation-zep/ingest.py b/templates/rag-conversation-zep/ingest.py deleted file mode 100644 index dfbf239f66d..00000000000 --- a/templates/rag-conversation-zep/ingest.py +++ /dev/null @@ -1,37 +0,0 @@ -# Ingest Documents into a Zep Collection -import os - -from langchain_community.document_loaders import WebBaseLoader -from langchain_community.embeddings import FakeEmbeddings -from langchain_community.vectorstores.zep import CollectionConfig, ZepVectorStore -from langchain_text_splitters import RecursiveCharacterTextSplitter - -ZEP_API_URL = os.environ.get("ZEP_API_URL", "http://localhost:8000") -ZEP_API_KEY = os.environ.get("ZEP_API_KEY", None) -ZEP_COLLECTION_NAME = os.environ.get("ZEP_COLLECTION", "langchaintest") - -collection_config = CollectionConfig( - name=ZEP_COLLECTION_NAME, - description="Zep collection for LangChain", - metadata={}, - embedding_dimensions=1536, - is_auto_embedded=True, -) - -# Load -loader = WebBaseLoader("https://lilianweng.github.io/posts/2023-06-23-agent/") -data = loader.load() - -# Split -text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=0) -all_splits = text_splitter.split_documents(data) - -# Add to vectorDB -vectorstore = ZepVectorStore.from_documents( - documents=all_splits, - collection_name=ZEP_COLLECTION_NAME, - config=collection_config, - api_url=ZEP_API_URL, - api_key=ZEP_API_KEY, - embedding=FakeEmbeddings(size=1), -) diff --git a/templates/rag-conversation-zep/pyproject.toml b/templates/rag-conversation-zep/pyproject.toml deleted file mode 100644 index ea9a08f9916..00000000000 --- a/templates/rag-conversation-zep/pyproject.toml +++ /dev/null @@ -1,35 +0,0 @@ -[tool.poetry] -name = "rag_conversation_zep" -version = "0.0.1" -description = "A RAG application built with Zep. Zep provides a VectorStore implementation to the chain." -authors = ["Daniel Chalef "] -readme = "README.md" - -[tool.poetry.dependencies] -python = ">=3.8.1,<4.0" -langchain = "^0.1" -openai = "<2" -zep-python = "^1.4.0" -tiktoken = "^0.5.1" -beautifulsoup4 = "^4.12.2" -bs4 = "^0.0.1" -langchain-text-splitters = ">=0.0.1,<0.1" - -[tool.poetry.group.dev.dependencies] -langchain-cli = ">=0.0.21" -fastapi = ">=0.104.0,<1" -sse-starlette = "^1.6.5" - -[tool.langserve] -export_module = "rag_conversation_zep" -export_attr = "chain" - -[tool.templates-hub] -use-case = "rag" -author = "Elastic" -integrations = ["OpenAI", "Zep"] -tags = ["vectordbs"] - -[build-system] -requires = ["poetry-core"] -build-backend = "poetry.core.masonry.api" diff --git a/templates/rag-conversation-zep/rag_conversation_zep/__init__.py b/templates/rag-conversation-zep/rag_conversation_zep/__init__.py deleted file mode 100644 index 11783d3e225..00000000000 --- a/templates/rag-conversation-zep/rag_conversation_zep/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from rag_conversation_zep.chain import chain - -__all__ = ["chain"] diff --git a/templates/rag-conversation-zep/rag_conversation_zep/chain.py b/templates/rag-conversation-zep/rag_conversation_zep/chain.py deleted file mode 100644 index 1031e2cd453..00000000000 --- a/templates/rag-conversation-zep/rag_conversation_zep/chain.py +++ /dev/null @@ -1,147 +0,0 @@ -import os -from operator import itemgetter -from typing import List, Tuple - -from langchain_community.chat_models import ChatOpenAI -from langchain_community.vectorstores.zep import CollectionConfig, ZepVectorStore -from langchain_core.documents import Document -from langchain_core.messages import AIMessage, BaseMessage, HumanMessage -from langchain_core.output_parsers import StrOutputParser -from langchain_core.prompts import ( - ChatPromptTemplate, - MessagesPlaceholder, - format_document, -) -from langchain_core.prompts.prompt import PromptTemplate -from langchain_core.pydantic_v1 import BaseModel, Field -from langchain_core.runnables import ( - ConfigurableField, - RunnableBranch, - RunnableLambda, - RunnableParallel, - RunnablePassthrough, -) -from langchain_core.runnables.utils import ConfigurableFieldSingleOption - -ZEP_API_URL = os.environ.get("ZEP_API_URL", "http://localhost:8000") -ZEP_API_KEY = os.environ.get("ZEP_API_KEY", None) -ZEP_COLLECTION_NAME = os.environ.get("ZEP_COLLECTION", "langchaintest") - -collection_config = CollectionConfig( - name=ZEP_COLLECTION_NAME, - description="Zep collection for LangChain", - metadata={}, - embedding_dimensions=1536, - is_auto_embedded=True, -) - -vectorstore = ZepVectorStore( - collection_name=ZEP_COLLECTION_NAME, - config=collection_config, - api_url=ZEP_API_URL, - api_key=ZEP_API_KEY, - embedding=None, -) - -# Zep offers native, hardware-accelerated MMR. Enabling this will improve -# the diversity of results, but may also reduce relevance. You can tune -# the lambda parameter to control the tradeoff between relevance and diversity. -# Enabling is a good default. -retriever = vectorstore.as_retriever().configurable_fields( - search_type=ConfigurableFieldSingleOption( - id="search_type", - options={"Similarity": "similarity", "Similarity with MMR Reranking": "mmr"}, - default="mmr", - name="Search Type", - description="Type of search to perform: 'similarity' or 'mmr'", - ), - search_kwargs=ConfigurableField( - id="search_kwargs", - name="Search kwargs", - description=( - "Specify 'k' for number of results to return and 'lambda_mult' for tuning" - " MMR relevance vs diversity." - ), - ), -) - -# Condense a chat history and follow-up question into a standalone question -_template = """Given the following conversation and a follow up question, rephrase the follow up question to be a standalone question, in its original language. -Chat History: -{chat_history} -Follow Up Input: {question} -Standalone question:""" # noqa: E501 -CONDENSE_QUESTION_PROMPT = PromptTemplate.from_template(_template) - -# RAG answer synthesis prompt -template = """Answer the question based only on the following context: - -{context} -""" -ANSWER_PROMPT = ChatPromptTemplate.from_messages( - [ - ("system", template), - MessagesPlaceholder(variable_name="chat_history"), - ("user", "{question}"), - ] -) - -# Conversational Retrieval Chain -DEFAULT_DOCUMENT_PROMPT = PromptTemplate.from_template(template="{page_content}") - - -def _combine_documents( - docs: List[Document], - document_prompt: PromptTemplate = DEFAULT_DOCUMENT_PROMPT, - document_separator: str = "\n\n", -): - doc_strings = [format_document(doc, document_prompt) for doc in docs] - return document_separator.join(doc_strings) - - -def _format_chat_history(chat_history: List[Tuple[str, str]]) -> List[BaseMessage]: - buffer: List[BaseMessage] = [] - for human, ai in chat_history: - buffer.append(HumanMessage(content=human)) - buffer.append(AIMessage(content=ai)) - return buffer - - -_condense_chain = ( - RunnablePassthrough.assign( - chat_history=lambda x: _format_chat_history(x["chat_history"]) - ) - | CONDENSE_QUESTION_PROMPT - | ChatOpenAI(temperature=0) - | StrOutputParser() -) - -_search_query = RunnableBranch( - # If input includes chat_history, we condense it with the follow-up question - ( - RunnableLambda(lambda x: bool(x.get("chat_history"))).with_config( - run_name="HasChatHistoryCheck" - ), - # Condense follow-up question and chat into a standalone_question - _condense_chain, - ), - # Else, we have no chat history, so just pass through the question - RunnableLambda(itemgetter("question")), -) - - -# User input -class ChatHistory(BaseModel): - chat_history: List[Tuple[str, str]] = Field(..., extra={"widget": {"type": "chat"}}) - question: str - - -_inputs = RunnableParallel( - { - "question": lambda x: x["question"], - "chat_history": lambda x: _format_chat_history(x["chat_history"]), - "context": _search_query | retriever | _combine_documents, - } -).with_types(input_type=ChatHistory) - -chain = _inputs | ANSWER_PROMPT | ChatOpenAI() | StrOutputParser() diff --git a/templates/rag-conversation-zep/tests/__init__.py b/templates/rag-conversation-zep/tests/__init__.py deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/templates/rag-conversation/LICENSE b/templates/rag-conversation/LICENSE deleted file mode 100644 index 426b6509034..00000000000 --- a/templates/rag-conversation/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -MIT License - -Copyright (c) 2023 LangChain, Inc. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/templates/rag-conversation/README.md b/templates/rag-conversation/README.md deleted file mode 100644 index fb0bcd8dace..00000000000 --- a/templates/rag-conversation/README.md +++ /dev/null @@ -1,69 +0,0 @@ -# RAG - Pinecone - conversation - -This template is used for [conversational](https://python.langchain.com/docs/expression_language/cookbook/retrieval#conversational-retrieval-chain) [retrieval](https://python.langchain.com/docs/use_cases/question_answering/), which is one of the most popular LLM use-cases. - -It passes both a conversation history and retrieved documents into an LLM for synthesis. - -## Environment Setup - -This template uses Pinecone as a vectorstore and requires that `PINECONE_API_KEY`, `PINECONE_ENVIRONMENT`, and `PINECONE_INDEX` are set. - -Set the `OPENAI_API_KEY` environment variable to access the OpenAI models. - -## Usage - -To use this package, you should first have the LangChain CLI installed: - -```shell -pip install -U langchain-cli -``` - -To create a new LangChain project and install this as the only package, you can do: - -```shell -langchain app new my-app --package rag-conversation -``` - -If you want to add this to an existing project, you can just run: - -```shell -langchain app add rag-conversation -``` - -And add the following code to your `server.py` file: -```python -from rag_conversation import chain as rag_conversation_chain - -add_routes(app, rag_conversation_chain, path="/rag-conversation") -``` - -(Optional) Let's now configure LangSmith. -LangSmith will help us trace, monitor and debug LangChain applications. -You can sign up for LangSmith [here](https://smith.langchain.com/). -If you don't have access, you can skip this section - -```shell -export LANGCHAIN_TRACING_V2=true -export LANGCHAIN_API_KEY= -export LANGCHAIN_PROJECT= # if not specified, defaults to "default" -``` - -If you are inside this directory, then you can spin up a LangServe instance directly by: - -```shell -langchain serve -``` - -This will start the FastAPI app with a server is running locally at -[http://localhost:8000](http://localhost:8000) - -We can see all templates at [http://127.0.0.1:8000/docs](http://127.0.0.1:8000/docs) -We can access the playground at [http://127.0.0.1:8000/rag-conversation/playground](http://127.0.0.1:8000/rag-conversation/playground) - -We can access the template from code with: - -```python -from langserve.client import RemoteRunnable - -runnable = RemoteRunnable("http://localhost:8000/rag-conversation") -``` diff --git a/templates/rag-conversation/pyproject.toml b/templates/rag-conversation/pyproject.toml deleted file mode 100644 index 27bc631376b..00000000000 --- a/templates/rag-conversation/pyproject.toml +++ /dev/null @@ -1,36 +0,0 @@ -[tool.poetry] -name = "rag-conversation" -version = "0.1.0" -description = "Conversational RAG using Pinecone" -authors = [ - "Lance Martin ", -] -readme = "README.md" - -[tool.poetry.dependencies] -python = ">=3.8.1,<4.0" -langchain = "^0.1" -openai = "<2" -tiktoken = ">=0.5.1" -pinecone-client = ">=2.2.4" -beautifulsoup4 = "^4.12.2" -langchain-text-splitters = ">=0.0.1,<0.1" - -[tool.poetry.group.dev.dependencies] -langchain-cli = ">=0.0.21" - -[tool.langserve] -export_module = "rag_conversation" -export_attr = "chain" - -[tool.templates-hub] -use-case = "rag" -author = "Elastic" -integrations = ["OpenAI", "Pinecone"] -tags = ["vectordbs"] - -[build-system] -requires = [ - "poetry-core", -] -build-backend = "poetry.core.masonry.api" diff --git a/templates/rag-conversation/rag_conversation.ipynb b/templates/rag-conversation/rag_conversation.ipynb deleted file mode 100644 index f07b9dab129..00000000000 --- a/templates/rag-conversation/rag_conversation.ipynb +++ /dev/null @@ -1,124 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "id": "424a9d8d", - "metadata": {}, - "source": [ - "## Run Template\n", - "\n", - "In `server.py`, set -\n", - "```\n", - "add_routes(app, chain_rag_conv, path=\"/rag_conversation\")\n", - "```" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "id": "5f521923", - "metadata": {}, - "outputs": [], - "source": [ - "from langserve.client import RemoteRunnable\n", - "\n", - "rag_app = RemoteRunnable(\"http://0.0.0.0:8001/rag_conversation\")" - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "id": "679bd83b", - "metadata": {}, - "outputs": [], - "source": [ - "question = \"How does agent memory work?\"\n", - "answer = rag_app.invoke(\n", - " {\n", - " \"question\": question,\n", - " \"chat_history\": [],\n", - " }\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": 8, - "id": "94a05616", - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "'Based on the given context, it is mentioned that the design of generative agents combines LLM (which stands for language, learning, and memory) with memory mechanisms. However, the specific workings of agent memory are not explicitly described in the given context.'" - ] - }, - "execution_count": 8, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "answer" - ] - }, - { - "cell_type": "code", - "execution_count": 9, - "id": "ce206c8a", - "metadata": {}, - "outputs": [], - "source": [ - "chat_history = [(question, answer)]\n", - "answer = rag_app.invoke(\n", - " {\n", - " \"question\": \"What are the different types?\",\n", - " \"chat_history\": chat_history,\n", - " }\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": 10, - "id": "4626f167", - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "\"Based on the given context, two types of memory are mentioned: short-term memory and long-term memory. \\n\\n1. Short-term memory: It refers to the ability of the agent to retain and recall information for a short period. In the context, short-term memory is described as the in-context learning that allows the model to learn.\\n\\n2. Long-term memory: It refers to the capability of the agent to retain and recall information over extended periods. In the context, long-term memory is described as the ability to retain and recall infinite information by leveraging an external vector store and fast retrieval.\\n\\nIt's important to note that these are just the types of memory mentioned in the given context. There may be other types of memory as well, depending on the specific design and implementation of the agent.\"" - ] - }, - "execution_count": 10, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "answer" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.9.16" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/templates/rag-conversation/rag_conversation/__init__.py b/templates/rag-conversation/rag_conversation/__init__.py deleted file mode 100644 index 4535ca34e88..00000000000 --- a/templates/rag-conversation/rag_conversation/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from rag_conversation.chain import chain - -__all__ = ["chain"] diff --git a/templates/rag-conversation/rag_conversation/chain.py b/templates/rag-conversation/rag_conversation/chain.py deleted file mode 100644 index d7e43396591..00000000000 --- a/templates/rag-conversation/rag_conversation/chain.py +++ /dev/null @@ -1,125 +0,0 @@ -import os -from operator import itemgetter -from typing import List, Tuple - -from langchain_community.chat_models import ChatOpenAI -from langchain_community.embeddings import OpenAIEmbeddings -from langchain_core.messages import AIMessage, HumanMessage -from langchain_core.output_parsers import StrOutputParser -from langchain_core.prompts import ( - ChatPromptTemplate, - MessagesPlaceholder, - format_document, -) -from langchain_core.prompts.prompt import PromptTemplate -from langchain_core.pydantic_v1 import BaseModel, Field -from langchain_core.runnables import ( - RunnableBranch, - RunnableLambda, - RunnableParallel, - RunnablePassthrough, -) -from langchain_pinecone import PineconeVectorStore - -if os.environ.get("PINECONE_API_KEY", None) is None: - raise Exception("Missing `PINECONE_API_KEY` environment variable.") - -if os.environ.get("PINECONE_ENVIRONMENT", None) is None: - raise Exception("Missing `PINECONE_ENVIRONMENT` environment variable.") - -PINECONE_INDEX_NAME = os.environ.get("PINECONE_INDEX", "langchain-test") - -### Ingest code - you may need to run this the first time -# # Load -# from langchain_community.document_loaders import WebBaseLoader -# loader = WebBaseLoader("https://lilianweng.github.io/posts/2023-06-23-agent/") -# data = loader.load() - -# # Split -# from langchain_text_splitters import RecursiveCharacterTextSplitter -# text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=0) -# all_splits = text_splitter.split_documents(data) - -# # Add to vectorDB -# vectorstore = PineconeVectorStore.from_documents( -# documents=all_splits, embedding=OpenAIEmbeddings(), index_name=PINECONE_INDEX_NAME -# ) -# retriever = vectorstore.as_retriever() - -vectorstore = PineconeVectorStore.from_existing_index( - PINECONE_INDEX_NAME, OpenAIEmbeddings() -) -retriever = vectorstore.as_retriever() - -# Condense a chat history and follow-up question into a standalone question -_template = """Given the following conversation and a follow up question, rephrase the follow up question to be a standalone question, in its original language. -Chat History: -{chat_history} -Follow Up Input: {question} -Standalone question:""" # noqa: E501 -CONDENSE_QUESTION_PROMPT = PromptTemplate.from_template(_template) - -# RAG answer synthesis prompt -template = """Answer the question based only on the following context: - -{context} -""" -ANSWER_PROMPT = ChatPromptTemplate.from_messages( - [ - ("system", template), - MessagesPlaceholder(variable_name="chat_history"), - ("user", "{question}"), - ] -) - -# Conversational Retrieval Chain -DEFAULT_DOCUMENT_PROMPT = PromptTemplate.from_template(template="{page_content}") - - -def _combine_documents( - docs, document_prompt=DEFAULT_DOCUMENT_PROMPT, document_separator="\n\n" -): - doc_strings = [format_document(doc, document_prompt) for doc in docs] - return document_separator.join(doc_strings) - - -def _format_chat_history(chat_history: List[Tuple[str, str]]) -> List: - buffer = [] - for human, ai in chat_history: - buffer.append(HumanMessage(content=human)) - buffer.append(AIMessage(content=ai)) - return buffer - - -# User input -class ChatHistory(BaseModel): - chat_history: List[Tuple[str, str]] = Field(..., extra={"widget": {"type": "chat"}}) - question: str - - -_search_query = RunnableBranch( - # If input includes chat_history, we condense it with the follow-up question - ( - RunnableLambda(lambda x: bool(x.get("chat_history"))).with_config( - run_name="HasChatHistoryCheck" - ), # Condense follow-up question and chat into a standalone_question - RunnablePassthrough.assign( - chat_history=lambda x: _format_chat_history(x["chat_history"]) - ) - | CONDENSE_QUESTION_PROMPT - | ChatOpenAI(temperature=0) - | StrOutputParser(), - ), - # Else, we have no chat history, so just pass through the question - RunnableLambda(itemgetter("question")), -) - -_inputs = RunnableParallel( - { - "question": lambda x: x["question"], - "chat_history": lambda x: _format_chat_history(x["chat_history"]), - "context": _search_query | retriever | _combine_documents, - } -).with_types(input_type=ChatHistory) - -chain = _inputs | ANSWER_PROMPT | ChatOpenAI() | StrOutputParser() diff --git a/templates/rag-conversation/tests/__init__.py b/templates/rag-conversation/tests/__init__.py deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/templates/rag-elasticsearch/LICENSE b/templates/rag-elasticsearch/LICENSE deleted file mode 100644 index 426b6509034..00000000000 --- a/templates/rag-elasticsearch/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -MIT License - -Copyright (c) 2023 LangChain, Inc. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/templates/rag-elasticsearch/README.md b/templates/rag-elasticsearch/README.md deleted file mode 100644 index fc01a4218e3..00000000000 --- a/templates/rag-elasticsearch/README.md +++ /dev/null @@ -1,93 +0,0 @@ -# RAG - Elasticsearch - -This template performs RAG using [Elasticsearch](https://python.langchain.com/docs/integrations/vectorstores/elasticsearch). - -It relies on `Hugging Face sentence transformer` `MiniLM-L6-v2` for embedding passages and questions. - -## Environment Setup - -Set the `OPENAI_API_KEY` environment variable to access the OpenAI models. - -To connect to your Elasticsearch instance, use the following environment variables: - -```bash -export ELASTIC_CLOUD_ID = -export ELASTIC_USERNAME = -export ELASTIC_PASSWORD = -``` -For local development with Docker, use: - -```bash -export ES_URL="http://localhost:9200" -``` - -And run an Elasticsearch instance in Docker with -```bash -docker run -p 9200:9200 -e "discovery.type=single-node" -e "xpack.security.enabled=false" -e "xpack.security.http.ssl.enabled=false" docker.elastic.co/elasticsearch/elasticsearch:8.9.0 -``` - -## Usage - -To use this package, you should first have the LangChain CLI installed: - -```shell -pip install -U langchain-cli -``` - -To create a new LangChain project and install this as the only package, you can do: - -```shell -langchain app new my-app --package rag-elasticsearch -``` - -If you want to add this to an existing project, you can just run: - -```shell -langchain app add rag-elasticsearch -``` - -And add the following code to your `server.py` file: -```python -from rag_elasticsearch import chain as rag_elasticsearch_chain - -add_routes(app, rag_elasticsearch_chain, path="/rag-elasticsearch") -``` - -(Optional) Let's now configure LangSmith. -LangSmith will help us trace, monitor and debug LangChain applications. -You can sign up for LangSmith [here](https://smith.langchain.com/). -If you don't have access, you can skip this section - -```shell -export LANGCHAIN_TRACING_V2=true -export LANGCHAIN_API_KEY= -export LANGCHAIN_PROJECT= # if not specified, defaults to "default" -``` - -If you are inside this directory, then you can spin up a LangServe instance directly by: - -```shell -langchain serve -``` - -This will start the FastAPI app with a server is running locally at -[http://localhost:8000](http://localhost:8000) - -We can see all templates at [http://127.0.0.1:8000/docs](http://127.0.0.1:8000/docs) -We can access the playground at [http://127.0.0.1:8000/rag-elasticsearch/playground](http://127.0.0.1:8000/rag-elasticsearch/playground) - -We can access the template from code with: - -```python -from langserve.client import RemoteRunnable - -runnable = RemoteRunnable("http://localhost:8000/rag-elasticsearch") -``` - -For loading the fictional workplace documents, run the following command from the root of this repository: - -```bash -python ingest.py -``` - -However, you can choose from a large number of document loaders [here](https://python.langchain.com/docs/integrations/document_loaders). diff --git a/templates/rag-elasticsearch/data/documents.json b/templates/rag-elasticsearch/data/documents.json deleted file mode 100644 index b81cf1647f2..00000000000 --- a/templates/rag-elasticsearch/data/documents.json +++ /dev/null @@ -1,161 +0,0 @@ -[ - { - "content": "Effective: March 2020\nPurpose\n\nThe purpose of this full-time work-from-home policy is to provide guidelines and support for employees to conduct their work remotely, ensuring the continuity and productivity of business operations during the COVID-19 pandemic and beyond.\nScope\n\nThis policy applies to all employees who are eligible for remote work as determined by their role and responsibilities. It is designed to allow employees to work from home full time while maintaining the same level of performance and collaboration as they would in the office.\nEligibility\n\nEmployees who can perform their work duties remotely and have received approval from their direct supervisor and the HR department are eligible for this work-from-home arrangement.\nEquipment and Resources\n\nThe necessary equipment and resources will be provided to employees for remote work, including a company-issued laptop, software licenses, and access to secure communication tools. Employees are responsible for maintaining and protecting the company's equipment and data.\nWorkspace\n\nEmployees working from home are responsible for creating a comfortable and safe workspace that is conducive to productivity. This includes ensuring that their home office is ergonomically designed, well-lit, and free from distractions.\nCommunication\n\nEffective communication is vital for successful remote work. Employees are expected to maintain regular communication with their supervisors, colleagues, and team members through email, phone calls, video conferences, and other approved communication tools.\nWork Hours and Availability\n\nEmployees are expected to maintain their regular work hours and be available during normal business hours, unless otherwise agreed upon with their supervisor. Any changes to work hours or availability must be communicated to the employee's supervisor and the HR department.\nPerformance Expectations\n\nEmployees working from home are expected to maintain the same level of performance and productivity as if they were working in the office. Supervisors and team members will collaborate to establish clear expectations and goals for remote work.\nTime Tracking and Overtime\n\nEmployees are required to accurately track their work hours using the company's time tracking system. Non-exempt employees must obtain approval from their supervisor before working overtime.\nConfidentiality and Data Security\n\nEmployees must adhere to the company's confidentiality and data security policies while working from home. This includes safeguarding sensitive information, securing personal devices and internet connections, and reporting any security breaches to the IT department.\nHealth and Well-being\n\nThe company encourages employees to prioritize their health and well-being while working from home. This includes taking regular breaks, maintaining a work-life balance, and seeking support from supervisors and colleagues when needed.\nPolicy Review and Updates\n\nThis work-from-home policy will be reviewed periodically and updated as necessary, taking into account changes in public health guidance, business needs, and employee feedback.\nQuestions and Concerns\n\nEmployees are encouraged to direct any questions or concerns about this policy to their supervisor or the HR department.\n", - "summary": "This policy outlines the guidelines for full-time remote work, including eligibility, equipment and resources, workspace requirements, communication expectations, performance expectations, time tracking and overtime, confidentiality and data security, health and well-being, and policy reviews and updates. Employees are encouraged to direct any questions or concerns", - "name": "Work From Home Policy", - "url": "./sharepoint/Work from home policy.txt", - "created_on": "2020-03-01", - "updated_at": "2020-03-01", - "category": "teams", - "_run_ml_inference": true, - "rolePermissions": ["demo", "manager"] - }, - { - "content": "Starting May 2022, the company will be implementing a two-day in-office work requirement per week for all eligible employees. Please coordinate with your supervisor and HR department to schedule your in-office workdays while continuing to follow all safety protocols.\n", - "summary": "Starting May 2022, employees will need to work two days a week in the office. Coordinate with your supervisor and HR department for these days while following safety protocols.", - "name": "April Work From Home Update", - "url": "./sharepoint/April work from home update.txt", - "created_on": "2022-04-29", - "updated_at": "2022-04-29", - "category": "teams", - "_run_ml_inference": true, - "rolePermissions": ["demo", "manager"] - }, - { - "content": "As we continue to prioritize the well-being of our employees, we are making a slight adjustment to our hybrid work policy. Starting May 1, 2023, employees will be required to work from the office three days a week, with two days designated for remote work. Please communicate with your supervisor and HR department to establish your updated in-office workdays.\n", - "summary": "Starting May 1, 2023, our hybrid work policy will require employees to work from the office three days a week and two days remotely.", - "name": "Wfh Policy Update May 2023", - "url": "./sharepoint/WFH policy update May 2023.txt", - "created_on": "2023-05-01", - "updated_at": "2023-05-01", - "category": "teams", - "_run_ml_inference": true, - "rolePermissions": ["demo", "manager"] - }, - { - "content": "Executive Summary:\nThis sales strategy document outlines the key objectives, focus areas, and action plans for our tech company's sales operations in fiscal year 2024. Our primary goal is to increase revenue, expand market share, and strengthen customer relationships in our target markets.\n\nI. Objectives for Fiscal Year 2024\n\nIncrease revenue by 20% compared to fiscal year 2023.\nExpand market share in key segments by 15%.\nRetain 95% of existing customers and increase customer satisfaction ratings.\nLaunch at least two new products or services in high-demand market segments.\n\nII. Focus Areas\nA. Target Markets:\nContinue to serve existing markets with a focus on high-growth industries.\nIdentify and penetrate new markets with high potential for our products and services.\n\nB. Customer Segmentation:\nStrengthen relationships with key accounts and strategic partners.\nPursue new customers in underserved market segments.\nDevelop tailored offerings for different customer segments based on their needs and preferences.\n\nC. Product/Service Portfolio:\nOptimize the existing product/service portfolio by focusing on high-demand solutions.\nDevelop and launch innovative products/services in emerging technology areas.\nEnhance post-sales support and customer service to improve customer satisfaction.\n\nIII. Action Plans\nA. Sales Team Development:\nExpand the sales team to cover new markets and industries.\nProvide ongoing training to sales staff on product knowledge, sales techniques, and industry trends.\nImplement a performance-based incentive system to reward top performers.\n\nB. Marketing and Promotion:\nDevelop targeted marketing campaigns for different customer segments and industries.\nLeverage digital marketing channels to increase brand visibility and lead generation.\nParticipate in industry events and trade shows to showcase our products and services.\n\nC. Partner Ecosystem:\nStrengthen existing partnerships and establish new strategic alliances to expand market reach.\nCollaborate with partners on joint marketing and sales initiatives.\nProvide partner training and support to ensure they effectively represent our products and services.\n\nD. Customer Success:\nImplement a proactive customer success program to improve customer retention and satisfaction.\nDevelop a dedicated customer support team to address customer inquiries and concerns promptly.\nCollect and analyze customer feedback to identify areas for improvement in our products, services, and processes.\n\nIV. Monitoring and Evaluation\nEstablish key performance indicators (KPIs) to track progress toward our objectives.\nConduct regular sales team meetings to review performance, share best practices, and address challenges.\nConduct quarterly reviews of our sales strategy to ensure alignment with market trends and adjust as needed.\n\nBy following this sales strategy for fiscal year 2024, our tech company aims to achieve significant growth and success in our target markets, while also providing exceptional value and service to our customers.\n", - "summary": "This sales strategy document outlines objectives, focus areas, and action plans for our tech company's sales operations in fiscal year 2024. Our primary goal is to increase revenue, expand market share, and strengthen customer relationships in our target markets. Focus areas include targeting new markets, segmenting customers, enhancing", - "name": "Fy2024 Company Sales Strategy", - "url": "./sharepoint/FY2024 Company Sales Strategy.txt", - "category": "teams", - "created_on": "2023-04-15", - "updated_at": "2023-04-15", - "_run_ml_inference": true, - "rolePermissions": ["demo", "manager"] - }, - { - "content": "Purpose\n\nThe purpose of this vacation policy is to outline the guidelines and procedures for requesting and taking time off from work for personal and leisure purposes. This policy aims to promote a healthy work-life balance and encourage employees to take time to rest and recharge.\nScope\n\nThis policy applies to all full-time and part-time employees who have completed their probationary period.\nVacation Accrual\n\nFull-time employees accrue vacation time at a rate of [X hours] per month, equivalent to [Y days] per year. Part-time employees accrue vacation time on a pro-rata basis, calculated according to their scheduled work hours.\n\nVacation time will begin to accrue from the first day of employment, but employees are eligible to take vacation time only after completing their probationary period. Unused vacation time will be carried over to the next year, up to a maximum of [Z days]. Any additional unused vacation time will be forfeited.\nVacation Scheduling\n\nEmployees are required to submit vacation requests to their supervisor at least [A weeks] in advance, specifying the start and end dates of their vacation. Supervisors will review and approve vacation requests based on business needs, ensuring adequate coverage during the employee's absence.\n\nEmployees are encouraged to plan their vacations around the company's peak and non-peak periods to minimize disruptions. Vacation requests during peak periods may be subject to limitations and require additional advance notice.\nVacation Pay\n\nEmployees will receive their regular pay during their approved vacation time. Vacation pay will be calculated based on the employee's average earnings over the [B weeks] preceding their vacation.\nUnplanned Absences and Vacation Time\n\nIn the event of an unplanned absence due to illness or personal emergencies, employees may use their accrued vacation time, subject to supervisor approval. Employees must inform their supervisor as soon as possible and provide any required documentation upon their return to work.\nVacation Time and Termination of Employment\n\nIf an employee's employment is terminated, they will be paid out for any unused vacation time, calculated based on their current rate of pay.\nPolicy Review and Updates\n\nThis vacation policy will be reviewed periodically and updated as necessary, taking into account changes in labor laws, business needs, and employee feedback.\nQuestions and Concerns\n\nEmployees are encouraged to direct any questions or concerns about this policy to their supervisor or the HR department.\n", - "summary": ": This policy outlines the guidelines and procedures for requesting and taking time off from work for personal and leisure purposes. Full-time employees accrue vacation time at a rate of [X hours] per month, equivalent to [Y days] per year. Vacation requests must be submitted to supervisors at least", - "name": "Company Vacation Policy", - "url": "https://enterprisesearch.sharepoint.com/:t:/s/MSBuilddemo/ES6rw9bKZxVBobG1WUoJpikBF9Bhx1pw_GvJWbsg-Z_HNA?e=faSHVt", - "created_on": "2018-04-15", - "updated_at": "2018-04-16", - "category": "sharepoint", - "_run_ml_inference": true, - "rolePermissions": ["demo", "manager"] - }, - - { - "content": "This career leveling matrix provides a framework for understanding the various roles and responsibilities of Software Engineers, as well as the skills and experience required for each level. This matrix is intended to support employee development, facilitate performance evaluations, and provide a clear career progression path.\nJunior Software Engineer\n\nResponsibilities:\nCollaborate with team members to design, develop, and maintain software applications and components.\nWrite clean, well-structured, and efficient code following established coding standards.\nParticipate in code reviews, providing and receiving constructive feedback.\nTroubleshoot and resolve software defects and issues.\nAssist with the creation of technical documentation.\nContinuously learn and stay up-to-date with new technologies and best practices.\n\nSkills & Experience:\nBachelor\u2019s degree in Computer Science or a related field, or equivalent work experience.\nBasic understanding of software development principles and methodologies.\nProficiency in at least one programming language.\nStrong problem-solving and analytical skills.\nEffective communication and collaboration skills.\nEagerness to learn and grow within the field.\nSenior Software Engineer\n\nResponsibilities:\nDesign, develop, and maintain complex software applications and components.\nLead and mentor junior team members in software development best practices and techniques.\nConduct code reviews and ensure adherence to coding standards and best practices.\nCollaborate with cross-functional teams to define, design, and deliver software solutions.\nIdentify, troubleshoot, and resolve complex software defects and issues.\nContribute to the creation and maintenance of technical documentation.\nEvaluate and recommend new technologies, tools, and practices to improve software quality and efficiency.\n\nSkills & Experience:\nBachelor\u2019s degree in Computer Science or a related field, or equivalent work experience.\n5+ years of software development experience.\nProficiency in multiple programming languages and technologies.\nDemonstrated ability to design and implement complex software solutions.\nStrong leadership, mentoring, and collaboration skills.\nExcellent problem-solving, analytical, and communication skills.\nPrincipal Software Engineer\n\nResponsibilities:\nLead the design, development, and maintenance of large-scale, mission-critical software applications and components.\nProvide technical leadership and mentorship to software engineering teams.\nDrive the adoption of advanced software development practices and technologies.\nCollaborate with product management, architecture, and other stakeholders to define and deliver strategic software initiatives.\nIdentify, troubleshoot, and resolve the most complex software defects and issues.\nCreate and maintain technical documentation, including architectural designs and best practice guidelines.\nRepresent [Company Name] as a thought leader in the software engineering community, including speaking at conferences, publishing articles, and contributing to open-source projects.\n\nSkills & Experience:\nBachelor\u2019s degree in Computer Science or a related field, or equivalent work experience.\n10+ years of software development experience, with a focus on large-scale, mission-critical applications.\nExpertise in multiple programming languages, technologies, and software development methodologies.\nProven ability to lead and mentor high-performing software engineering teams.\nExceptional problem-solving, analytical, and communication skills.\nStrong business acumen and ability to influence decision-making at the executive level.\n\nBy following this career leveling matrix, we aim to support the growth and development of Software Engineers, enabling them to reach their full potential and contribute meaningfully to the success of the organization.\n", - "summary": "\nThis career leveling matrix provides a framework for understanding the various roles and responsibilities of Software Engineers, as well as the skills and experience required for each level. It is intended to support employee development, facilitate performance evaluations, and provide a clear career progression path.", - "name": "Swe Career Matrix", - "url": "https://enterprisesearch.sharepoint.com/:t:/s/MSBuilddemo/EVYuEyRhHh5Aqc3a39sqbGcBkqKIHRWtJBjjUjNs6snpMg?e=nv1mf4", - "created_on": "2018-04-15", - "category": "sharepoint", - "_run_ml_inference": true, - "rolePermissions": ["demo", "manager"] - }, - { - "content": "Title: Working with the Sales Team as an Engineer in a Tech Company\n\nIntroduction:\nAs an engineer in a tech company, collaboration with the sales team is essential to ensure the success of the company's products and services. This guidance document aims to provide an overview of how engineers can effectively work with the sales team, fostering a positive and productive working environment.\nUnderstanding the Sales Team's Role:\nThe sales team is responsible for promoting and selling the company's products and services to potential clients. Their role involves establishing relationships with customers, understanding their needs, and ensuring that the offered solutions align with their requirements.\n\nAs an engineer, it is important to understand the sales team's goals and objectives, as this will help you to provide them with the necessary information, tools, and support to successfully sell your company's products and services.\nCommunication:\nEffective communication is key to successfully working with the sales team. Make sure to maintain open lines of communication, and be responsive to their questions and concerns. This includes:\n\na. Attending sales meetings and conference calls when required.\nb. Providing regular product updates and training sessions to the sales team.\nc. Being available to answer technical questions and clarifications.\nCollaboration:\nCollaborate with the sales team in developing and refining sales materials, such as product presentations, demos, and technical documents. This will ensure that the sales team has accurate and up-to-date information to present to clients.\n\nAdditionally, work closely with the sales team on customer projects or product customizations, providing technical guidance, and ensuring that the solutions meet the customer's requirements.\nCustomer Engagement:\nAt times, engineers may be asked to join sales meetings or calls with potential clients to provide technical expertise. In these situations, it is important to:\n\na. Be prepared and understand the customer's needs and pain points.\nb. Clearly explain the technical aspects of the product or solution in a simple language that the customer can understand.\nc. Address any concerns or questions the customer may have.\nContinuous Improvement:\nActively seek feedback from the sales team regarding product performance, customer experiences, and market trends. Use this feedback to identify areas of improvement and collaborate with other engineers to enhance the product or service offerings.\nMutual Respect and Support:\nIt is essential to treat your colleagues in the sales team with respect and professionalism. Recognize and appreciate their efforts in promoting and selling the company's products and services. In turn, the sales team should also respect and appreciate the technical expertise and knowledge of the engineering team.\n\nBy working together, both the engineering and sales teams can contribute to the overall success of the company.\n\nConclusion:\nCollaboration between engineers and the sales team is crucial for a tech company's success. By understanding each other's roles, maintaining effective communication, collaborating on projects, and supporting one another, both teams can work together to achieve the company's goals and ensure customer satisfaction.\n", - "summary": ": This guide provides an overview of how engineers can effectively collaborate with the sales team to ensure the success of a tech company. It includes understanding the sales team's role, communicating and collaborating on projects, engaging customers, and providing mutual respect and support.", - "name": "Sales Engineering Collaboration", - "url": "https://enterprisesearch.sharepoint.com/:t:/s/MSBuilddemo/EW21-KJnfHBFoRiF49_uJMcBfHyPKimuPOFsCcJypQWaBQ?e=mGdIqe", - "created_on": "2019-04-15", - "category": "sharepoint", - "_run_ml_inference": true, - "rolePermissions": ["demo", "manager"] - }, - { - "content": "Purpose\nThe purpose of this Intellectual Property Policy is to establish guidelines and procedures for the ownership, protection, and utilization of intellectual property generated by employees during their employment. This policy aims to encourage creativity and innovation while ensuring that the interests of both the company and its employees are protected.\n\nScope\nThis policy applies to all employees, including full-time, part-time, temporary, and contract employees.\n\nDefinitions\na. Intellectual Property (IP): Refers to creations of the mind, such as inventions, literary and artistic works, designs, symbols, and images, that are protected by copyright, trademark, patent, or other forms of legal protection.\nb. Company Time: Refers to the time during which an employee is actively engaged in performing their job duties.\nc. Outside Company Time: Refers to the time during which an employee is not engaged in performing their job duties.\n\nOwnership of Intellectual Property\na. Work Generated on Company Time\ni. Any intellectual property created, conceived, or developed by an employee during company time or using company resources, equipment, or facilities shall be considered the property of the Company.\nii. Employees are required to promptly disclose any such intellectual property to their supervisor or the appropriate department head.\nb. Work Generated Outside Company Time\ni. Intellectual property created, conceived, or developed by an employee outside of company time and without the use of company resources, equipment, or facilities shall generally remain the property of the employee.\nii. However, if the intellectual property is directly related to the employee's job responsibilities, or if the employee has used company resources, equipment, or facilities in its creation, it may be considered the property of the Company.\nProtection and Utilization of Intellectual Property\na. The Company shall have the right to protect, license, and commercialize any intellectual property owned by the company as it deems appropriate.\nb. Employees are expected to cooperate with the Company in obtaining any necessary legal protection for intellectual property owned by the company, including by signing any documents or providing any necessary information or assistance.\nConfidentiality\nEmployees are expected to maintain the confidentiality of any intellectual property owned by the Company and not disclose it to any third parties without the express written consent of an authorized representative of the company.\nEmployee Acknowledgment\nAll employees are required to sign an acknowledgment of this Intellectual Property Policy as a condition of their employment with [Company Name]. By signing the acknowledgment, employees agree to abide by the terms of this policy and understand that any violations may result in disciplinary action, up to and including termination of employment.\nPolicy Review\nThis Intellectual Property Policy shall be reviewed periodically and may be amended as necessary to ensure its continued effectiveness and compliance with applicable laws and regulations. Employees will be notified of any significant changes to this policy.\n", - "summary": "This Intellectual Property Policy outlines guidelines and procedures for the ownership, protection, and utilization of intellectual property generated by employees during their employment. It establishes the company's ownership of work generated on company time, while recognizing employee ownership of work generated outside of company time without the use of company resources. The policy", - "name": "Intellectual Property Policy", - "url": "https://enterprisesearch.sharepoint.com/:t:/s/MSBuilddemo/EWz3cYEVdzBNsiHsYbKhms4BVYGhravyrUw3T3lzxL4pTg?e=mPIgbO", - "created_on": "2021-06-15", - "category": "sharepoint", - "_run_ml_inference": true, - "rolePermissions": ["demo", "manager"] - }, - - { - "content": "Code of Conduct\nPurpose\n\nThe purpose of this code of conduct is to establish guidelines for professional and ethical behavior in the workplace. It outlines the principles and values that all employees are expected to uphold in their interactions with colleagues, customers, partners, and other stakeholders.\nScope\n\nThis code of conduct applies to all employees, contractors, and volunteers within the organization, regardless of their role or seniority.\nCore Values\n\nEmployees are expected to adhere to the following core values:\n\na. Integrity: Act honestly, ethically, and in the best interests of the organization at all times.\nb. Respect: Treat all individuals with dignity, courtesy, and fairness, regardless of their background, beliefs, or position.\nc. Accountability: Take responsibility for one's actions and decisions, and be willing to learn from mistakes.\nd. Collaboration: Work cooperatively with colleagues and partners to achieve shared goals and promote a positive work environment.\ne. Excellence: Strive for the highest standards of performance and continuously seek opportunities for improvement.\nCompliance with Laws and Regulations\n\nEmployees must comply with all applicable laws, regulations, and organizational policies in the course of their work. This includes, but is not limited to, employment laws, data protection regulations, and industry-specific guidelines.\nConflicts of Interest\n\nEmployees should avoid situations where their personal interests may conflict with or influence their professional judgment. If a potential conflict of interest arises, employees must disclose it to their supervisor or the appropriate authority within the organization.\nConfidentiality and Information Security\n\nEmployees are responsible for safeguarding the organization's confidential information, as well as any sensitive information entrusted to them by clients, partners, or other third parties. This includes adhering to data protection policies and using secure communication channels.\nHarassment and Discrimination\n\nThe organization is committed to providing a workplace free from harassment, discrimination, and bullying. Employees are expected to treat others with respect and report any incidents of inappropriate behavior to their supervisor or the human resources department.\nHealth and Safety\n\nEmployees must follow all health and safety guidelines and procedures to maintain a safe and healthy work environment. This includes reporting any hazards or unsafe conditions to the appropriate personnel.\nUse of Company Resources\n\nEmployees are expected to use company resources, including time, equipment, and funds, responsibly and for their intended purposes. Misuse or theft of company resources is strictly prohibited.\nReporting Violations\n\nEmployees have a responsibility to report any suspected violations of this code of conduct, as well as any illegal or unethical behavior, to their supervisor or the appropriate authority within the organization. The organization will protect the confidentiality of employees who report violations and will not tolerate retaliation against those who raise concerns.\nConsequences of Non-Compliance\n\nFailure to adhere to this code of conduct may result in disciplinary action, up to and including termination of employment. The organization reserves the right to take legal action against individuals who engage in illegal or unethical conduct.\nPolicy Review and Updates\n\nThis code of conduct will be reviewed periodically and updated as necessary to ensure it remains relevant and effective in promoting ethical behavior and professional standards within the organization.\nQuestions and Concerns\n\nEmployees are encouraged to seek guidance from their supervisor or the human resources department if they have questions or concerns about this code of conduct or its application to specific situations.\n", - "summary": "This code of conduct outlines the principles and values that all employees are expected to uphold in their interactions with colleagues, customers, partners, and other stakeholders. It sets out core values such as integrity, respect, accountability, collaboration and excellence. Employees must comply with all applicable laws, regulations, and organizational", - "name": "Code Of Conduct", - "url": "https://enterprisesearch.sharepoint.com/:t:/s/MSBuilddemo/ER3xmeKaZ_pAqPeJWyyNR0QBg6QmoWIGPhwfEyCABWHrPA?e=cvzrgV", - "created_on": "2018-01-12", - "category": "sharepoint", - "_run_ml_inference": true, - "rolePermissions": ["demo", "manager"] - }, - { - "content": "Content:\nThe purpose of this office pet policy is to outline the guidelines and procedures for bringing pets into the workplace. This policy aims to create a positive and inclusive work environment while ensuring the comfort, safety, and well-being of all employees, visitors, and pets.\nScope\n\nThis policy applies to all employees who wish to bring their pets to the office. Pets covered under this policy include dogs, cats, and other small, non-exotic animals, subject to approval by the HR department.\nPet Approval Process\n\nEmployees must obtain prior approval from their supervisor and the HR department before bringing their pets to the office. The approval process includes:\n\na. Submitting a written request, including a description of the pet, its breed, age, and temperament.\nb. Providing proof of up-to-date vaccinations and any required licenses or permits.\nc. Obtaining written consent from all employees who share the workspace with the pet owner.\n\nThe HR department reserves the right to deny or revoke pet approval based on the specific circumstances or concerns raised by other employees.\nPet Behavior and Supervision\n\nEmployees are responsible for the behavior and well-being of their pets while in the office. Pets must be:\n\na. Well-behaved, non-aggressive, and not disruptive to the work environment.\nb. House-trained and able to eliminate waste in designated areas outside the office.\nc. Kept on a leash or in a secure enclosure when not in the employee's immediate work area.\n\nEmployees must closely supervise their pets and promptly address any issues or concerns raised by other staff members.\nAllergies and Phobias\n\nEmployees with allergies or phobias related to pets must inform the HR department, which will work with the affected employees and pet owners to find a suitable solution. This may include adjusting workspaces, limiting the number or types of pets allowed, or implementing additional safety measures.\nCleanliness and Hygiene\n\nEmployees are responsible for maintaining a clean and hygienic work environment. This includes:\n\na. Cleaning up after their pets, both indoors and outdoors.\nb. Regularly grooming their pets to minimize shedding and odors.\nc. Ensuring their pets are free of pests, such as fleas and ticks.\nLiability\n\nPet owners are liable for any damage or injury caused by their pets. Employees are encouraged to obtain pet liability insurance to cover potential incidents.\nRestricted Areas\n\nPets are not allowed in certain areas of the office, including meeting rooms, restrooms, kitchen and dining areas, and any other designated spaces. Signage will be posted to indicate these restricted areas.\nPolicy Review and Updates\n\nThis office pet policy will be reviewed periodically and updated as necessary, taking into account employee feedback, changes in legislation, and best practices for maintaining a safe and inclusive work environment.\nQuestions and Concerns\n\nEmployees are encouraged to direct any questions or concerns about this policy to their supervisor or the HR department.\n", - "summary": "This policy outlines the guidelines and procedures for bringing pets into the workplace. It covers approval process, pet behavior and supervision, allergies and phobias, cleanliness and hygiene, liability, restricted areas, and policy review. Employees must obtain prior approval from their supervisor and the HR department before bringing their", - "name": "Office Pet Policy", - "url": "https://enterprisesearch.sharepoint.com/:t:/s/MSBuilddemo/ETf-69wBeaZJpAn3CY7ExRABQWvav-p24VOnB6C0A4l2pQ?e=X72WuK", - "created_on": "2018-01-12", - "category": "sharepoint", - "_run_ml_inference": true, - "rolePermissions": ["demo", "manager"] - }, - { - "content": "Performance Management Policy\nPurpose and Scope\nThe purpose of this Performance Management Policy is to establish a consistent and transparent process for evaluating, recognizing, and rewarding employee performance. This policy applies to all employees and aims to foster a culture of continuous improvement, professional growth, and open communication between employees and management.\nPerformance Planning and Goal Setting\nAt the beginning of each performance cycle, employees and their supervisors will collaborate to set clear, achievable, and measurable performance goals. These goals should align with the company\u2019s strategic objectives and take into account the employee\u2019s job responsibilities, professional development, and career aspirations.\nOngoing Feedback and Communication\nThroughout the performance cycle, employees and supervisors are encouraged to engage in regular, constructive feedback and open communication. This includes discussing progress towards goals, addressing challenges, and identifying opportunities for improvement or additional support. Regular check-ins and updates help ensure that employees stay on track and receive the guidance they need to succeed.\nPerformance Evaluation\nAt the end of each performance cycle, employees will participate in a formal performance evaluation with their supervisor. This evaluation will assess the employee\u2019s overall performance, including their achievements, areas for improvement, and progress towards goals. Both the employee and supervisor should come prepared to discuss specific examples, accomplishments, and challenges from the performance period.\nPerformance Ratings\nBased on the performance evaluation, employees will receive a performance rating that reflects their overall performance during the cycle. The rating system should be clearly defined and consistently applied across the organization. Performance ratings will be used to inform decisions regarding promotions, salary increases, and other rewards or recognition.\nPromotions and Advancements\nHigh-performing employees who consistently demonstrate strong performance, leadership, and a commitment to the company\u2019s values may be considered for promotions or other advancement opportunities. Promotions will be based on factors such as performance ratings, skills, experience, and the needs of the organization. Employees interested in pursuing a promotion should discuss their career goals and development plans with their supervisor.\nPerformance Improvement Plans\nEmployees who receive a low performance rating or are struggling to meet their performance goals may be placed on a Performance Improvement Plan (PIP). A PIP is a structured plan designed to help the employee address specific areas of concern, set achievable improvement goals, and receive additional support or resources as needed. Employees on a PIP will be closely monitored and re-evaluated at the end of the improvement period to determine if satisfactory progress has been made.\nRecognition and Rewards\nOur company believes in recognizing and rewarding employees for their hard work and dedication. In addition to promotions and salary increases, employees may be eligible for other forms of recognition or rewards based on their performance. This may include bonuses, awards, or other incentives designed to motivate and celebrate employee achievements. The specific criteria and eligibility for these rewards will be communicated by the HR department or management.\n", - "summary": "This Performance Management Policy outlines a consistent and transparent process for evaluating, recognizing, and rewarding employees. It includes goal setting, ongoing feedback, performance evaluations, ratings, promotions, and rewards. The policy applies to all employees and encourages open communication and professional growth.", - "name": "Performance Management Policy", - "url": "https://enterprisesearch.sharepoint.com/:t:/s/MSBuilddemo/ERsxt9p1uehJqeJu4JlxkakBavbKwcldrYv_hpv3xHikAw?e=pf5R2C", - "created_on": "2018-01-12", - "category": "sharepoint", - "_run_ml_inference": true, - "rolePermissions": ["demo", "manager"] - }, - - { - "content": "Our sales organization is structured to effectively serve our customers and achieve our business objectives across multiple regions. The organization is divided into the following main regions:\n\nThe Americas: This region includes the United States, Canada, Mexico, as well as Central and South America. The North America South America region (NASA) has two Area Vice-Presidents: Laura Martinez is the Area Vice-President of North America, and Gary Johnson is the Area Vice-President of South America.\n\nEurope: Our European sales team covers the entire continent, including the United Kingdom, Germany, France, Spain, Italy, and other countries. The team is responsible for understanding the unique market dynamics and cultural nuances, enabling them to effectively target and engage with customers across the region. The Area Vice-President for Europe is Rajesh Patel.\nAsia-Pacific: This region encompasses countries such as China, Japan, South Korea, India, Australia, and New Zealand. Our sales team in the Asia-Pacific region works diligently to capitalize on growth opportunities and address the diverse needs of customers in this vast and rapidly evolving market. The Area Vice-President for Asia-Pacific is Mei Li.\nMiddle East & Africa: This region comprises countries across the Middle East and Africa, such as the United Arab Emirates, Saudi Arabia, South Africa, and Nigeria. Our sales team in this region is responsible for navigating the unique market challenges and identifying opportunities to expand our presence and better serve our customers. The Area Vice-President for Middle East & Africa is Jamal Abdi.\n\nEach regional sales team consists of dedicated account managers, sales representatives, and support staff, led by their respective Area Vice-Presidents. They are responsible for identifying and pursuing new business opportunities, nurturing existing client relationships, and ensuring customer satisfaction. The teams collaborate closely with other departments, such as marketing, product development, and customer support, to ensure we consistently deliver high-quality products and services to our clients.\n", - "summary": "\nOur sales organization is divided into four regions: The Americas, Europe, Asia-Pacific, and Middle East & Africa. Each region is led by an Area Vice-President and consists of dedicated account managers, sales representatives, and support staff. They collaborate with other departments to ensure the delivery of high", - "name": "Sales Organization Overview", - "url": "https://enterprisesearch.sharepoint.com/:t:/s/MSBuilddemo/EYsr1eqgn9hMslMJFLR-k54BBX-O3iC26bK7xNEBtYIBkg?e=xeAjiT", - "created_on": "2018-01-15", - "category": "sharepoint", - "_run_ml_inference": true, - "rolePermissions": ["demo", "manager"] - }, - { - "content": "Introduction:\nThis document outlines the compensation bands strategy for the various teams within our IT company. The goal is to establish a fair and competitive compensation structure that aligns with industry standards, rewards performance, and attracts top talent. By implementing this strategy, we aim to foster employee satisfaction and retention while ensuring the company's overall success.\n\nPurpose:\nThe purpose of this compensation bands strategy is to:\na. Define clear guidelines for salary ranges based on job levels and market benchmarks.\nb. Support equitable compensation practices across different teams.\nc. Encourage employee growth and performance.\nd. Enable effective budgeting and resource allocation.\n\nJob Levels:\nTo establish a comprehensive compensation structure, we have defined distinct job levels within each team. These levels reflect varying degrees of skills, experience, and responsibilities. The levels include:\na. Entry-Level: Employees with limited experience or early career professionals.\nb. Intermediate-Level: Employees with moderate experience and demonstrated competence.\nc. Senior-Level: Experienced employees with advanced skills and leadership capabilities.\nd. Leadership-Level: Managers and team leaders responsible for strategic decision-making.\n\nCompensation Bands:\nBased on the job levels, the following compensation bands have been established:\na. Entry-Level Band: This band encompasses salary ranges for employees in entry-level positions. It aims to provide competitive compensation for individuals starting their careers within the company.\n\nb. Intermediate-Level Band: This band covers salary ranges for employees who have gained moderate experience and expertise in their respective roles. It rewards employees for their growing skill set and contributions.\n\nc. Senior-Level Band: The senior-level band includes salary ranges for experienced employees who have attained advanced skills and have a proven track record of delivering results. It reflects the increased responsibilities and expectations placed upon these individuals.\n\nd. Leadership-Level Band: This band comprises salary ranges for managers and team leaders responsible for guiding and overseeing their respective teams. It considers their leadership abilities, strategic thinking, and the impact they have on the company's success.\n\nMarket Benchmarking:\nTo ensure our compensation remains competitive, regular market benchmarking will be conducted. This involves analyzing industry salary trends, regional compensation data, and market demand for specific roles. The findings will inform periodic adjustments to our compensation bands to maintain alignment with the market.\n\nPerformance-Based Compensation:\nIn addition to the defined compensation bands, we emphasize a performance-based compensation model. Performance evaluations will be conducted regularly, and employees exceeding performance expectations will be eligible for bonuses, incentives, and salary increases. This approach rewards high achievers and motivates employees to excel in their roles.\n\nConclusion:\nBy implementing this compensation bands strategy, our IT company aims to establish fair and competitive compensation practices that align with market standards and foster employee satisfaction. Regular evaluations and market benchmarking will enable us to adapt and refine the strategy to meet the evolving needs of our organization.", - "summary": "This document outlines a compensation framework for IT teams. It includes job levels, compensation bands, and performance-based incentives to ensure fair and competitive wages. Regular market benchmarking will be conducted to adjust the bands according to industry trends.", - "name": "Compensation Framework For It Teams", - "url": "https://enterprisesearch.sharepoint.com/:t:/s/MSBuilddemo/EaAFec6004tAg21g4i67rfgBBRqCm1yY7AZLLQyyaMtsEQ?e=wTMb4z", - "created_on": "2018-01-12", - "category": "sharepoint", - "restricted": true, - "_run_ml_inference": true, - "rolePermissions": ["manager"] - }, - { - "content": "As an employee in Canada, it's essential to understand how to update your tax elections forms to ensure accurate tax deductions from your pay. This guide will help you navigate the process of updating your TD1 Personal Tax Credits Return form.\n\nStep 1: Access the TD1 form\nThe TD1 form is available on the Canada Revenue Agency (CRA) website. Your employer might provide you with a paper copy or a link to the online form. You can access the form directly through the following link: https://www.canada.ca/en/revenue-agency/services/forms-publications/td1-personal-tax-credits-returns.html\n\nStep 2: Choose the correct form version\nYou'll need to fill out the federal TD1 form and, if applicable, the provincial or territorial TD1 form. Select the appropriate version based on your province or territory of residence.\n\nStep 3: Download and open the form\nFor the best experience, download and open the TD1 form in Adobe Reader. If you have visual impairments, consider using the large print version available on the CRA website.\n\nStep 4: Complete the form\nFill out the form by entering your personal information, such as your name, Social Insurance Number (SIN), and address. Then, go through each section to claim any personal tax credits that apply to you. These credits may include:\nBasic personal amount\nAmount for an eligible dependant\nAmount for infirm dependants age 18 or older\nCaregiver amount\nDisability amount\nTuition and education amounts\n\nRead the instructions carefully for each section to ensure you claim the correct amounts.\n\nStep 5: Sign and date the form\nOnce you've completed the form, sign and date it at the bottom.\n\nStep 6: Submit the form to your employer\nSubmit the completed and signed TD1 form to your employer. You can either scan and send it electronically, or provide a printed copy. Your employer will use the information on your TD1 form to calculate the correct amount of tax to be deducted from your pay.\n\nStep 7: Update your TD1 form as needed\nIt's essential to update your TD1 form whenever your personal circumstances change, such as getting married, having a child, or becoming eligible for a new tax credit. Inform your employer of these changes and submit an updated TD1 form to ensure accurate tax deductions.\n\nUpdating your tax elections forms is a crucial step in ensuring the correct tax deductions from your pay as a new employee in Canada. Follow this guide and keep your TD1 form up to date to avoid any discrepancies in your tax filings.\n", - "summary": ": This guide gives a step-by-step explanation of how to update your TD1 Personal Tax Credits Return form. Access the form from the CRA website and choose the correct version based on your province or territory of residence. Download and open the form in Adobe Reader, fill out the form by entering", - "name": "Updating Your Tax Elections Forms", - "url": "./github/Updating Your Tax Elections Forms.txt", - "created_on": "2022-12-20", - "category": "github", - "_run_ml_inference": true, - "rolePermissions": ["demo", "manager"] - }, - { - "content": "Welcome to our team! We are excited to have you on board and look forward to your valuable contributions. This onboarding guide is designed to help you get started by providing essential information about our policies, procedures, and resources. Please read through this guide carefully and reach out to the HR department if you have any questions.\nIntroduction to Our Company Culture and Values\nOur company is committed to creating a diverse, inclusive, and supportive work environment. We believe that our employees are our most valuable asset and strive to foster a culture of collaboration, innovation, and continuous learning. Our core values include:\nIntegrity: We act ethically and honestly in all our interactions.\nTeamwork: We work together to achieve common goals and support each other's growth.\nExcellence: We strive for the highest quality in our products, services, and relationships.\nInnovation: We encourage creativity and embrace change to stay ahead in the market.\nRespect: We treat each other with dignity and value the unique perspectives of all our colleagues.\nKey Onboarding Steps\nTo ensure a smooth onboarding process, please complete the following steps within your first week:\nAttend orientation: You will be invited to an orientation session to meet your colleagues and learn more about our company's history, mission, and values.\nReview policies and procedures: Familiarize yourself with our employee handbook, which contains important information about our policies and procedures. Please read it thoroughly and adhere to the guidelines.\nComplete required training: You may be required to complete mandatory training sessions, such as safety training or anti-harassment training. Ensure that you attend and complete these sessions as soon as possible.\nUpdating Tax Elections and Documents\nIt is crucial to ensure your tax information is accurate and up-to-date, regardless of the country you work in. Please follow these steps to update your tax elections and documents:\nComplete tax forms: Fill out the necessary tax forms for your country or region, which determine the amount of income tax withheld from your paycheck. You should complete new tax forms if your personal or financial situation changes, such as marriage, divorce, or a change in the number of dependents.\nSubmit regional tax forms: Depending on your location, you may be required to complete additional regional or local tax forms. Check with the HR department to determine which forms are necessary.\nUpdate your address: If you move, make sure to update your address with the HR department to ensure accurate tax reporting.\nBenefits Enrollment\nAs a new employee, you are eligible for various benefits, including health insurance, retirement plans, and paid time off. You will receive detailed information about our benefits package during orientation. To enroll in the benefits, please follow these steps:\nReview benefits options: Carefully review the benefits package and choose the options that best meet your needs.\nComplete enrollment forms: Fill out the necessary forms to enroll in your chosen benefits. Submit these forms to the HR department within 30 days of your start date.\nDesignate beneficiaries: If applicable, designate beneficiaries for your life insurance and retirement plans.\nGetting Settled in Your Workspace\nTo help you feel comfortable and productive in your new workspace, take the following steps:\nSet up your workstation: Organize your desk, chair, and computer according to your preferences. If you require any additional equipment or accommodations, please contact the HR department.\nObtain necessary supplies: Request any necessary office supplies, such as pens, notepads, or folders, from the designated supply area or by contacting the appropriate department.\nFamiliarize yourself with office resources: Locate common areas, such as break rooms, restrooms, and meeting rooms. Familiarize yourself with office equipment, including printers, scanners, and telephones.\n", - "summary": "\nThis onboarding guide provides essential information to new employees on our company culture and values, key onboarding steps, tax elections and documents, benefits enrollment, and setting up their workspace.", - "name": "New Employee Onboarding Guide", - "url": "./github/New Employee Onboarding guide.txt", - "created_on": "2018-01-12", - "category": "github", - "_run_ml_inference": true, - "rolePermissions": ["demo", "manager"] - } -] diff --git a/templates/rag-elasticsearch/ingest.py b/templates/rag-elasticsearch/ingest.py deleted file mode 100644 index 9a570951495..00000000000 --- a/templates/rag-elasticsearch/ingest.py +++ /dev/null @@ -1,53 +0,0 @@ -import os - -from langchain_community.document_loaders import JSONLoader -from langchain_community.embeddings import HuggingFaceEmbeddings -from langchain_elasticsearch import ElasticsearchStore -from langchain_text_splitters import RecursiveCharacterTextSplitter - -ELASTIC_CLOUD_ID = os.getenv("ELASTIC_CLOUD_ID") -ELASTIC_USERNAME = os.getenv("ELASTIC_USERNAME", "elastic") -ELASTIC_PASSWORD = os.getenv("ELASTIC_PASSWORD") -ES_URL = os.getenv("ES_URL", "http://localhost:9200") - -if ELASTIC_CLOUD_ID and ELASTIC_USERNAME and ELASTIC_PASSWORD: - es_connection_details = { - "es_cloud_id": ELASTIC_CLOUD_ID, - "es_user": ELASTIC_USERNAME, - "es_password": ELASTIC_PASSWORD, - } -else: - es_connection_details = {"es_url": ES_URL} - - -# Metadata extraction function -def metadata_func(record: dict, metadata: dict) -> dict: - metadata["name"] = record.get("name") - metadata["summary"] = record.get("summary") - metadata["url"] = record.get("url") - metadata["category"] = record.get("category") - metadata["updated_at"] = record.get("updated_at") - - return metadata - - -## Load Data -loader = JSONLoader( - file_path="./data/documents.json", - jq_schema=".[]", - content_key="content", - metadata_func=metadata_func, -) - -text_splitter = RecursiveCharacterTextSplitter(chunk_size=800, chunk_overlap=250) -all_splits = text_splitter.split_documents(loader.load()) - -# Add to vectorDB -vectorstore = ElasticsearchStore.from_documents( - documents=all_splits, - embedding=HuggingFaceEmbeddings( - model_name="all-MiniLM-L6-v2", model_kwargs={"device": "cpu"} - ), - **es_connection_details, - index_name="workplace-search-example", -) diff --git a/templates/rag-elasticsearch/main.py b/templates/rag-elasticsearch/main.py deleted file mode 100644 index 4034ab08f26..00000000000 --- a/templates/rag-elasticsearch/main.py +++ /dev/null @@ -1,33 +0,0 @@ -from rag_elasticsearch import chain - -if __name__ == "__main__": - questions = [ - "What is the nasa sales team?", - "What is our work from home policy?", - "Does the company own my personal project?", - "How does compensation work?", - ] - - response = chain.invoke( - { - "question": questions[0], - "chat_history": [], - } - ) - print(response) - - follow_up_question = "What are their objectives?" - - response = chain.invoke( - { - "question": follow_up_question, - "chat_history": [ - "What is the nasa sales team?", - "The sales team of NASA consists of Laura Martinez, the Area " - "Vice-President of North America, and Gary Johnson, the Area " - "Vice-President of South America. (Sales Organization Overview)", - ], - } - ) - - print(response) diff --git a/templates/rag-elasticsearch/pyproject.toml b/templates/rag-elasticsearch/pyproject.toml deleted file mode 100644 index 306d224c9f2..00000000000 --- a/templates/rag-elasticsearch/pyproject.toml +++ /dev/null @@ -1,37 +0,0 @@ -[tool.poetry] -name = "rag-elasticsearch" -version = "0.0.1" -description = "RAG using Elasticsearch" -authors = [ - "Joe McElroy ", -] -readme = "README.md" - -[tool.poetry.dependencies] -python = ">=3.9,<3.13" -langchain = "^0.1" -langchain-elasticsearch = "^0.1.0" -openai = "<2" -sentence-transformers = "^2.2.2" -jq = "^1.6.0" -tiktoken = "^0.5.1" -langchain-text-splitters = ">=0.0.1,<0.1" - -[tool.poetry.group.dev.dependencies] -langchain-cli = ">=0.0.21" - -[tool.langserve] -export_module = "rag_elasticsearch" -export_attr = "chain" - -[tool.templates-hub] -use-case = "rag" -author = "Elastic" -integrations = ["OpenAI", "Elasticsearch"] -tags = ["vectordbs"] - -[build-system] -requires = [ - "poetry-core", -] -build-backend = "poetry.core.masonry.api" diff --git a/templates/rag-elasticsearch/rag_elasticsearch/__init__.py b/templates/rag-elasticsearch/rag_elasticsearch/__init__.py deleted file mode 100644 index a30a18d8eb1..00000000000 --- a/templates/rag-elasticsearch/rag_elasticsearch/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from rag_elasticsearch.chain import chain - -__all__ = ["chain"] diff --git a/templates/rag-elasticsearch/rag_elasticsearch/chain.py b/templates/rag-elasticsearch/rag_elasticsearch/chain.py deleted file mode 100644 index abbb79796e0..00000000000 --- a/templates/rag-elasticsearch/rag_elasticsearch/chain.py +++ /dev/null @@ -1,69 +0,0 @@ -from operator import itemgetter -from typing import List, Optional, Tuple - -from langchain_community.chat_models import ChatOpenAI -from langchain_community.embeddings import HuggingFaceEmbeddings -from langchain_core.messages import BaseMessage -from langchain_core.output_parsers import StrOutputParser -from langchain_core.prompts import format_document -from langchain_core.pydantic_v1 import BaseModel, Field -from langchain_core.runnables import RunnableParallel, RunnablePassthrough -from langchain_elasticsearch import ElasticsearchStore - -from .connection import es_connection_details -from .prompts import CONDENSE_QUESTION_PROMPT, DOCUMENT_PROMPT, LLM_CONTEXT_PROMPT - -# Setup connecting to Elasticsearch -vectorstore = ElasticsearchStore( - **es_connection_details, - embedding=HuggingFaceEmbeddings( - model_name="all-MiniLM-L6-v2", model_kwargs={"device": "cpu"} - ), - index_name="workplace-search-example", -) -retriever = vectorstore.as_retriever() - -# Set up LLM to user -llm = ChatOpenAI(temperature=0) - - -def _combine_documents( - docs, document_prompt=DOCUMENT_PROMPT, document_separator="\n\n" -): - doc_strings = [format_document(doc, document_prompt) for doc in docs] - return document_separator.join(doc_strings) - - -def _format_chat_history(chat_history: List[Tuple]) -> str: - buffer = "" - for dialogue_turn in chat_history: - human = "Human: " + dialogue_turn[0] - ai = "Assistant: " + dialogue_turn[1] - buffer += "\n" + "\n".join([human, ai]) - return buffer - - -class ChainInput(BaseModel): - chat_history: Optional[List[BaseMessage]] = Field( - description="Previous chat messages." - ) - question: str = Field(..., description="The question to answer.") - - -_inputs = RunnableParallel( - standalone_question=RunnablePassthrough.assign( - chat_history=lambda x: _format_chat_history(x["chat_history"]) - ) - | CONDENSE_QUESTION_PROMPT - | llm - | StrOutputParser(), -) - -_context = { - "context": itemgetter("standalone_question") | retriever | _combine_documents, - "question": lambda x: x["standalone_question"], -} - -chain = _inputs | _context | LLM_CONTEXT_PROMPT | llm | StrOutputParser() - -chain = chain.with_types(input_type=ChainInput) diff --git a/templates/rag-elasticsearch/rag_elasticsearch/connection.py b/templates/rag-elasticsearch/rag_elasticsearch/connection.py deleted file mode 100644 index 2e2ebd18148..00000000000 --- a/templates/rag-elasticsearch/rag_elasticsearch/connection.py +++ /dev/null @@ -1,15 +0,0 @@ -import os - -ELASTIC_CLOUD_ID = os.getenv("ELASTIC_CLOUD_ID") -ELASTIC_USERNAME = os.getenv("ELASTIC_USERNAME", "elastic") -ELASTIC_PASSWORD = os.getenv("ELASTIC_PASSWORD") -ES_URL = os.getenv("ES_URL", "http://localhost:9200") - -if ELASTIC_CLOUD_ID and ELASTIC_USERNAME and ELASTIC_PASSWORD: - es_connection_details = { - "es_cloud_id": ELASTIC_CLOUD_ID, - "es_user": ELASTIC_USERNAME, - "es_password": ELASTIC_PASSWORD, - } -else: - es_connection_details = {"es_url": ES_URL} diff --git a/templates/rag-elasticsearch/rag_elasticsearch/prompts.py b/templates/rag-elasticsearch/rag_elasticsearch/prompts.py deleted file mode 100644 index af45209dd72..00000000000 --- a/templates/rag-elasticsearch/rag_elasticsearch/prompts.py +++ /dev/null @@ -1,39 +0,0 @@ -from langchain_core.prompts import ChatPromptTemplate, PromptTemplate - -# Used to condense a question and chat history into a single question -condense_question_prompt_template = """Given the following conversation and a follow up question, rephrase the follow up question to be a standalone question, in its original language. If there is no chat history, just rephrase the question to be a standalone question. - -Chat History: -{chat_history} -Follow Up Input: {question} -""" # noqa: E501 -CONDENSE_QUESTION_PROMPT = PromptTemplate.from_template( - condense_question_prompt_template -) - -# RAG Prompt to provide the context and question for LLM to answer -# We also ask the LLM to cite the source of the passage it is answering from -llm_context_prompt_template = """ -Use the following passages to answer the user's question. -Each passage has a SOURCE which is the title of the document. When answering, cite source name of the passages you are answering from below the answer in a unique bullet point list. - -If you don't know the answer, just say that you don't know, don't try to make up an answer. - ----- -{context} ----- -Question: {question} -""" # noqa: E501 - -LLM_CONTEXT_PROMPT = ChatPromptTemplate.from_template(llm_context_prompt_template) - -# Used to build a context window from passages retrieved -document_prompt_template = """ ---- -NAME: {name} -PASSAGE: -{page_content} ---- -""" - -DOCUMENT_PROMPT = PromptTemplate.from_template(document_prompt_template) diff --git a/templates/rag-fusion/README.md b/templates/rag-fusion/README.md deleted file mode 100644 index e7f8a2391c8..00000000000 --- a/templates/rag-fusion/README.md +++ /dev/null @@ -1,72 +0,0 @@ -# RAG - Pinecone - fusion - -This template enables `RAG fusion` using a re-implementation of -the project found [here](https://github.com/Raudaschl/rag-fusion). - -It performs multiple query generation and `Reciprocal Rank Fusion` -to re-rank search results. - -It uses the `Pinecone` vectorstore and the `OpenAI` chat and embedding models. - -## Environment Setup - -Set the `OPENAI_API_KEY` environment variable to access the OpenAI models. - -## Usage - -To use this package, you should first have the LangChain CLI installed: - -```shell -pip install -U langchain-cli -``` - -To create a new LangChain project and install this as the only package, you can do: - -```shell -langchain app new my-app --package rag-fusion -``` - -If you want to add this to an existing project, you can just run: - -```shell -langchain app add rag-fusion -``` - -And add the following code to your `server.py` file: -```python -from rag_fusion.chain import chain as rag_fusion_chain - -add_routes(app, rag_fusion_chain, path="/rag-fusion") -``` - -(Optional) Let's now configure LangSmith. -LangSmith will help us trace, monitor and debug LangChain applications. -You can sign up for LangSmith [here](https://smith.langchain.com/). -If you don't have access, you can skip this section - - -```shell -export LANGCHAIN_TRACING_V2=true -export LANGCHAIN_API_KEY= -export LANGCHAIN_PROJECT= # if not specified, defaults to "default" -``` - -If you are inside this directory, then you can spin up a LangServe instance directly by: - -```shell -langchain serve -``` - -This will start the FastAPI app with a server is running locally at -[http://localhost:8000](http://localhost:8000) - -We can see all templates at [http://127.0.0.1:8000/docs](http://127.0.0.1:8000/docs) -We can access the playground at [http://127.0.0.1:8000/rag-fusion/playground](http://127.0.0.1:8000/rag-fusion/playground) - -We can access the template from code with: - -```python -from langserve.client import RemoteRunnable - -runnable = RemoteRunnable("http://localhost:8000/rag-fusion") -``` \ No newline at end of file diff --git a/templates/rag-fusion/ingest.py b/templates/rag-fusion/ingest.py deleted file mode 100644 index 227d0382081..00000000000 --- a/templates/rag-fusion/ingest.py +++ /dev/null @@ -1,19 +0,0 @@ -from langchain_community.embeddings import OpenAIEmbeddings -from langchain_pinecone import PineconeVectorStore - -all_documents = { - "doc1": "Climate change and economic impact.", - "doc2": "Public health concerns due to climate change.", - "doc3": "Climate change: A social perspective.", - "doc4": "Technological solutions to climate change.", - "doc5": "Policy changes needed to combat climate change.", - "doc6": "Climate change and its impact on biodiversity.", - "doc7": "Climate change: The science and models.", - "doc8": "Global warming: A subset of climate change.", - "doc9": "How climate change affects daily weather.", - "doc10": "The history of climate change activism.", -} - -PineconeVectorStore.from_texts( - list(all_documents.values()), OpenAIEmbeddings(), index_name="rag-fusion" -) diff --git a/templates/rag-fusion/main.py b/templates/rag-fusion/main.py deleted file mode 100644 index ed32889561a..00000000000 --- a/templates/rag-fusion/main.py +++ /dev/null @@ -1,5 +0,0 @@ -from rag_fusion.chain import chain - -if __name__ == "__main__": - original_query = "impact of climate change" - print(chain.invoke(original_query)) diff --git a/templates/rag-fusion/pyproject.toml b/templates/rag-fusion/pyproject.toml deleted file mode 100644 index a8cba69c4ca..00000000000 --- a/templates/rag-fusion/pyproject.toml +++ /dev/null @@ -1,32 +0,0 @@ -[tool.poetry] -name = "rag-fusion" -version = "0.0.1" -description = "RAG using RAG fusion approach" -authors = [] -readme = "README.md" - -[tool.poetry.dependencies] -python = ">=3.8.1,<4.0" -langchain = "^0.1" -openai = "<2" -pinecone-client = "^2.2.4" -langchainhub = "^0.1.13" - -[tool.poetry.group.dev.dependencies] -langchain-cli = ">=0.0.21" - -[tool.langserve] -export_module = "rag_fusion.chain" -export_attr = "chain" - -[tool.templates-hub] -use-case = "rag" -author = "LangChain" -integrations = ["OpenAI", "Pinecone"] -tags = ["vectordbs"] - -[build-system] -requires = [ - "poetry-core", -] -build-backend = "poetry.core.masonry.api" diff --git a/templates/rag-fusion/rag_fusion/__init__.py b/templates/rag-fusion/rag_fusion/__init__.py deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/templates/rag-fusion/rag_fusion/chain.py b/templates/rag-fusion/rag_fusion/chain.py deleted file mode 100644 index 75ac0ed41bb..00000000000 --- a/templates/rag-fusion/rag_fusion/chain.py +++ /dev/null @@ -1,50 +0,0 @@ -from langchain import hub -from langchain.load import dumps, loads -from langchain_community.chat_models import ChatOpenAI -from langchain_community.embeddings import OpenAIEmbeddings -from langchain_core.output_parsers import StrOutputParser -from langchain_core.pydantic_v1 import BaseModel -from langchain_pinecone import PineconeVectorStore - - -def reciprocal_rank_fusion(results: list[list], k=60): - fused_scores = {} - for docs in results: - # Assumes the docs are returned in sorted order of relevance - for rank, doc in enumerate(docs): - doc_str = dumps(doc) - if doc_str not in fused_scores: - fused_scores[doc_str] = 0 - fused_scores[doc_str] += 1 / (rank + k) - - reranked_results = [ - (loads(doc), score) - for doc, score in sorted(fused_scores.items(), key=lambda x: x[1], reverse=True) - ] - return reranked_results - - -prompt = hub.pull("langchain-ai/rag-fusion-query-generation") - -generate_queries = ( - prompt | ChatOpenAI(temperature=0) | StrOutputParser() | (lambda x: x.split("\n")) -) - -vectorstore = PineconeVectorStore.from_existing_index("rag-fusion", OpenAIEmbeddings()) -retriever = vectorstore.as_retriever() - -chain = ( - {"original_query": lambda x: x} - | generate_queries - | retriever.map() - | reciprocal_rank_fusion -) - -# Add typed inputs to chain for playground - - -class Question(BaseModel): - __root__: str - - -chain = chain.with_types(input_type=Question) diff --git a/templates/rag-fusion/tests/__init__.py b/templates/rag-fusion/tests/__init__.py deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/templates/rag-gemini-multi-modal/.gitignore b/templates/rag-gemini-multi-modal/.gitignore deleted file mode 100644 index 709823056cf..00000000000 --- a/templates/rag-gemini-multi-modal/.gitignore +++ /dev/null @@ -1 +0,0 @@ -docs/img_*.jpg diff --git a/templates/rag-gemini-multi-modal/LICENSE b/templates/rag-gemini-multi-modal/LICENSE deleted file mode 100644 index 426b6509034..00000000000 --- a/templates/rag-gemini-multi-modal/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -MIT License - -Copyright (c) 2023 LangChain, Inc. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/templates/rag-gemini-multi-modal/README.md b/templates/rag-gemini-multi-modal/README.md deleted file mode 100644 index f0cd4516295..00000000000 --- a/templates/rag-gemini-multi-modal/README.md +++ /dev/null @@ -1,116 +0,0 @@ -# RAG - Gemini multi-modal - -Multi-modal LLMs enable visual assistants that can perform question-answering about images. - -This template create a visual assistant for slide decks, which often contain visuals such as graphs or figures. - -It uses `OpenCLIP` embeddings to embed all the slide images and stores them in Chroma. - -Given a question, relevant slides are retrieved and passed to [Google Gemini](https://deepmind.google/technologies/gemini/#introduction) for answer synthesis. - -![Diagram illustrating the process of a visual assistant using multi-modal LLM, from slide deck images to OpenCLIP embedding, retrieval, and synthesis with Google Gemini, resulting in an answer.](https://github.com/langchain-ai/langchain/assets/122662504/b9e69bef-d687-4ecf-a599-937e559d5184) "Workflow Diagram for Visual Assistant Using Multi-modal LLM" - -## Input - -Supply a slide deck as pdf in the `/docs` directory. - -By default, this template has a slide deck about Q3 earnings from DataDog, a public technology company. - -Example questions to ask can be: -``` -How many customers does Datadog have? -What is Datadog platform % Y/Y growth in FY20, FY21, and FY22? -``` - -To create an index of the slide deck, run: -``` -poetry install -python ingest.py -``` - -## Storage - -This template will use [OpenCLIP](https://github.com/mlfoundations/open_clip) multi-modal embeddings to embed the images. - -You can select different embedding model options (see results [here](https://github.com/mlfoundations/open_clip/blob/main/docs/openclip_results.csv)). - -The first time you run the app, it will automatically download the multimodal embedding model. - -By default, LangChain will use an embedding model with moderate performance but lower memory requirements, `ViT-H-14`. - -You can choose alternative `OpenCLIPEmbeddings` models in `rag_chroma_multi_modal/ingest.py`: -``` -vectorstore_mmembd = Chroma( - collection_name="multi-modal-rag", - persist_directory=str(re_vectorstore_path), - embedding_function=OpenCLIPEmbeddings( - model_name="ViT-H-14", checkpoint="laion2b_s32b_b79k" - ), -) -``` - -## LLM - -The app will retrieve images using multi-modal embeddings, and pass them to Google Gemini. - -## Environment Setup - -Set your `GOOGLE_API_KEY` environment variable in order to access Gemini. - -## Usage - -To use this package, you should first have the LangChain CLI installed: - -```shell -pip install -U langchain-cli -``` - -To create a new LangChain project and install this as the only package, you can do: - -```shell -langchain app new my-app --package rag-gemini-multi-modal -``` - -If you want to add this to an existing project, you can just run: - -```shell -langchain app add rag-gemini-multi-modal -``` - -And add the following code to your `server.py` file: -```python -from rag_gemini_multi_modal import chain as rag_gemini_multi_modal_chain - -add_routes(app, rag_gemini_multi_modal_chain, path="/rag-gemini-multi-modal") -``` - -(Optional) Let's now configure LangSmith. -LangSmith will help us trace, monitor and debug LangChain applications. -You can sign up for LangSmith [here](https://smith.langchain.com/). -If you don't have access, you can skip this section - -```shell -export LANGCHAIN_TRACING_V2=true -export LANGCHAIN_API_KEY= -export LANGCHAIN_PROJECT= # if not specified, defaults to "default" -``` - -If you are inside this directory, then you can spin up a LangServe instance directly by: - -```shell -langchain serve -``` - -This will start the FastAPI app with a server is running locally at -[http://localhost:8000](http://localhost:8000) - -We can see all templates at [http://127.0.0.1:8000/docs](http://127.0.0.1:8000/docs) -We can access the playground at [http://127.0.0.1:8000/rag-gemini-multi-modal/playground](http://127.0.0.1:8000/rag-gemini-multi-modal/playground) - -We can access the template from code with: - -```python -from langserve.client import RemoteRunnable - -runnable = RemoteRunnable("http://localhost:8000/rag-gemini-multi-modal") -``` diff --git a/templates/rag-gemini-multi-modal/docs/DDOG_Q3_earnings_deck.pdf b/templates/rag-gemini-multi-modal/docs/DDOG_Q3_earnings_deck.pdf deleted file mode 100644 index a4aa7d864c4..00000000000 Binary files a/templates/rag-gemini-multi-modal/docs/DDOG_Q3_earnings_deck.pdf and /dev/null differ diff --git a/templates/rag-gemini-multi-modal/ingest.py b/templates/rag-gemini-multi-modal/ingest.py deleted file mode 100644 index 60fb8369bac..00000000000 --- a/templates/rag-gemini-multi-modal/ingest.py +++ /dev/null @@ -1,58 +0,0 @@ -import os -from pathlib import Path - -import pypdfium2 as pdfium -from langchain_chroma import Chroma -from langchain_experimental.open_clip import OpenCLIPEmbeddings - - -def get_images_from_pdf(pdf_path, img_dump_path): - """ - Extract images from each page of a PDF document and save as JPEG files. - - :param pdf_path: A string representing the path to the PDF file. - :param img_dump_path: A string representing the path to dummp images. - """ - pdf = pdfium.PdfDocument(pdf_path) - n_pages = len(pdf) - for page_number in range(n_pages): - page = pdf.get_page(page_number) - bitmap = page.render(scale=1, rotation=0, crop=(0, 0, 0, 0)) - pil_image = bitmap.to_pil() - pil_image.save(f"{img_dump_path}/img_{page_number + 1}.jpg", format="JPEG") - - -# Load PDF -doc_path = Path(__file__).parent / "docs/DDOG_Q3_earnings_deck.pdf" -img_dump_path = Path(__file__).parent / "docs/" -rel_doc_path = doc_path.relative_to(Path.cwd()) -rel_img_dump_path = img_dump_path.relative_to(Path.cwd()) -print("pdf index") -pil_images = get_images_from_pdf(rel_doc_path, rel_img_dump_path) -print("done") -vectorstore = Path(__file__).parent / "chroma_db_multi_modal" -re_vectorstore_path = vectorstore.relative_to(Path.cwd()) - -# Load embedding function -print("Loading embedding function") -embedding = OpenCLIPEmbeddings(model_name="ViT-H-14", checkpoint="laion2b_s32b_b79k") - -# Create chroma -vectorstore_mmembd = Chroma( - collection_name="multi-modal-rag", - persist_directory=str(Path(__file__).parent / "chroma_db_multi_modal"), - embedding_function=embedding, -) - -# Get image URIs -image_uris = sorted( - [ - os.path.join(rel_img_dump_path, image_name) - for image_name in os.listdir(rel_img_dump_path) - if image_name.endswith(".jpg") - ] -) - -# Add images -print("Embedding images") -vectorstore_mmembd.add_images(uris=image_uris) diff --git a/templates/rag-gemini-multi-modal/pyproject.toml b/templates/rag-gemini-multi-modal/pyproject.toml deleted file mode 100644 index 96275806d39..00000000000 --- a/templates/rag-gemini-multi-modal/pyproject.toml +++ /dev/null @@ -1,39 +0,0 @@ -[tool.poetry] -name = "rag-gemini-multi-modal" -version = "0.1.0" -description = "Multi-modal RAG using Gemini and OpenCLIP embeddings" -authors = [ - "Lance Martin ", -] -readme = "README.md" - -[tool.poetry.dependencies] -python = ">=3.9,<4.0" -langchain = ">=0.0.353,<0.2" -openai = "<2" -tiktoken = ">=0.5.1" -langchain-chroma = "^0.1.2" -open-clip-torch = ">=2.23.0" -torch = ">=2.1.0" -pypdfium2 = ">=4.20.0" -langchain-experimental = ">=0.0.43" -langchain-google-genai = ">=0.0.1" - -[tool.poetry.group.dev.dependencies] -langchain-cli = ">=0.0.21" - -[tool.langserve] -export_module = "rag_gemini_multi_modal" -export_attr = "chain" - -[tool.templates-hub] -use-case = "rag" -author = "LangChain" -integrations = ["OpenAI", "Chroma"] -tags = ["vectordbs"] - -[build-system] -requires = [ - "poetry-core", -] -build-backend = "poetry.core.masonry.api" diff --git a/templates/rag-gemini-multi-modal/rag_gemini_multi_modal.ipynb b/templates/rag-gemini-multi-modal/rag_gemini_multi_modal.ipynb deleted file mode 100644 index fc610050d35..00000000000 --- a/templates/rag-gemini-multi-modal/rag_gemini_multi_modal.ipynb +++ /dev/null @@ -1,52 +0,0 @@ -{ - "cells": [ - { - "attachments": {}, - "cell_type": "markdown", - "id": "681a5d1e", - "metadata": {}, - "source": [ - "## Run Template\n", - "\n", - "In `server.py`, set -\n", - "```\n", - "add_routes(app, chain_rag_conv, path=\"/rag-gemini-multi-modal\")\n", - "```" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "d774be2a", - "metadata": {}, - "outputs": [], - "source": [ - "from langserve.client import RemoteRunnable\n", - "\n", - "rag_app = RemoteRunnable(\"http://localhost:8001/rag-gemini-multi-modal\")\n", - "rag_app.invoke(\"What is the projected TAM for observability expected for each year through 2026?\")" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.9.16" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/templates/rag-gemini-multi-modal/rag_gemini_multi_modal/__init__.py b/templates/rag-gemini-multi-modal/rag_gemini_multi_modal/__init__.py deleted file mode 100644 index 47b8759ae71..00000000000 --- a/templates/rag-gemini-multi-modal/rag_gemini_multi_modal/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from rag_gemini_multi_modal.chain import chain - -__all__ = ["chain"] diff --git a/templates/rag-gemini-multi-modal/rag_gemini_multi_modal/chain.py b/templates/rag-gemini-multi-modal/rag_gemini_multi_modal/chain.py deleted file mode 100644 index cf731429578..00000000000 --- a/templates/rag-gemini-multi-modal/rag_gemini_multi_modal/chain.py +++ /dev/null @@ -1,122 +0,0 @@ -import base64 -import io -from pathlib import Path - -from langchain_chroma import Chroma -from langchain_core.documents import Document -from langchain_core.messages import HumanMessage -from langchain_core.output_parsers import StrOutputParser -from langchain_core.pydantic_v1 import BaseModel -from langchain_core.runnables import RunnableLambda, RunnablePassthrough -from langchain_experimental.open_clip import OpenCLIPEmbeddings -from langchain_google_genai import ChatGoogleGenerativeAI -from PIL import Image - - -def resize_base64_image(base64_string, size=(128, 128)): - """ - Resize an image encoded as a Base64 string. - - :param base64_string: A Base64 encoded string of the image to be resized. - :param size: A tuple representing the new size (width, height) for the image. - :return: A Base64 encoded string of the resized image. - """ - img_data = base64.b64decode(base64_string) - img = Image.open(io.BytesIO(img_data)) - resized_img = img.resize(size, Image.LANCZOS) - buffered = io.BytesIO() - resized_img.save(buffered, format=img.format) - return base64.b64encode(buffered.getvalue()).decode("utf-8") - - -def get_resized_images(docs): - """ - Resize images from base64-encoded strings. - - :param docs: A list of base64-encoded image to be resized. - :return: Dict containing a list of resized base64-encoded strings. - """ - b64_images = [] - for doc in docs: - if isinstance(doc, Document): - doc = doc.page_content - resized_image = resize_base64_image(doc, size=(1280, 720)) - b64_images.append(resized_image) - return {"images": b64_images} - - -def img_prompt_func(data_dict, num_images=2): - """ - Gemini prompt for image analysis. - - :param data_dict: A dict with images and a user-provided question. - :param num_images: Number of images to include in the prompt. - :return: A list containing message objects for each image and the text prompt. - """ - messages = [] - if data_dict["context"]["images"]: - for image in data_dict["context"]["images"][:num_images]: - image_message = { - "type": "image_url", - "image_url": {"url": f"data:image/jpeg;base64,{image}"}, - } - messages.append(image_message) - text_message = { - "type": "text", - "text": ( - "You are an analyst tasked with answering questions about visual content.\n" - "You will be give a set of image(s) from a slide deck / presentation.\n" - "Use this information to answer the user question. \n" - f"User-provided question: {data_dict['question']}\n\n" - ), - } - messages.append(text_message) - return [HumanMessage(content=messages)] - - -def multi_modal_rag_chain(retriever): - """ - Multi-modal RAG chain, - - :param retriever: A function that retrieves the necessary context for the model. - :return: A chain of functions representing the multi-modal RAG process. - """ - # Initialize the multi-modal Large Language Model with specific parameters - model = ChatGoogleGenerativeAI(model="gemini-pro-vision") - - # Define the RAG pipeline - chain = ( - { - "context": retriever | RunnableLambda(get_resized_images), - "question": RunnablePassthrough(), - } - | RunnableLambda(img_prompt_func) - | model - | StrOutputParser() - ) - - return chain - - -# Load chroma -vectorstore_mmembd = Chroma( - collection_name="multi-modal-rag", - persist_directory=str(Path(__file__).parent.parent / "chroma_db_multi_modal"), - embedding_function=OpenCLIPEmbeddings( - model_name="ViT-H-14", checkpoint="laion2b_s32b_b79k" - ), -) - -# Make retriever -retriever_mmembd = vectorstore_mmembd.as_retriever() - -# Create RAG chain -chain = multi_modal_rag_chain(retriever_mmembd) - - -# Add typing for input -class Question(BaseModel): - __root__: str - - -chain = chain.with_types(input_type=Question) diff --git a/templates/rag-gemini-multi-modal/tests/__init__.py b/templates/rag-gemini-multi-modal/tests/__init__.py deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/templates/rag-google-cloud-sensitive-data-protection/LICENSE b/templates/rag-google-cloud-sensitive-data-protection/LICENSE deleted file mode 100644 index 426b6509034..00000000000 --- a/templates/rag-google-cloud-sensitive-data-protection/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -MIT License - -Copyright (c) 2023 LangChain, Inc. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/templates/rag-google-cloud-sensitive-data-protection/README.md b/templates/rag-google-cloud-sensitive-data-protection/README.md deleted file mode 100644 index 9e6aa9f48ea..00000000000 --- a/templates/rag-google-cloud-sensitive-data-protection/README.md +++ /dev/null @@ -1,96 +0,0 @@ -# RAG - Google Cloud Sensitive Data Protection - -This template is an application that utilizes `Google Vertex AI Search`, a machine learning powered search service, and -PaLM 2 for Chat (chat-bison). The application uses a Retrieval chain to answer questions based on your documents. - -This template is an application that utilizes `Google Sensitive Data Protection`, a service for detecting and redacting -sensitive data in text, and PaLM 2 for Chat (chat-bison), although you can use any model. - -For more context on using Sensitive Data Protection, -check [here](https://cloud.google.com/dlp/docs/sensitive-data-protection-overview). - -## Environment Setup - -Before using this template, please ensure that you enable the [DLP API](https://console.cloud.google.com/marketplace/product/google/dlp.googleapis.com) -and [Vertex AI API](https://console.cloud.google.com/marketplace/product/google/aiplatform.googleapis.com) in your Google Cloud -project. - -For some common environment troubleshooting steps related to Google Cloud, see the bottom -of this readme. - -Set the following environment variables: - -* `GOOGLE_CLOUD_PROJECT_ID` - Your Google Cloud project ID. -* `MODEL_TYPE` - The model type for Vertex AI Search (e.g. `chat-bison`) - -## Usage - -To use this package, you should first have the LangChain CLI installed: - -```shell -pip install -U langchain-cli -``` - -To create a new LangChain project and install this as the only package, you can do: - -```shell -langchain app new my-app --package rag-google-cloud-sensitive-data-protection -``` - -If you want to add this to an existing project, you can just run: - -```shell -langchain app add rag-google-cloud-sensitive-data-protection -``` - -And add the following code to your `server.py` file: - -```python -from rag_google_cloud_sensitive_data_protection.chain import chain as rag_google_cloud_sensitive_data_protection_chain - -add_routes(app, rag_google_cloud_sensitive_data_protection_chain, path="/rag-google-cloud-sensitive-data-protection") -``` - -(Optional) Let's now configure LangSmith. -LangSmith will help us trace, monitor and debug LangChain applications. -You can sign up for LangSmith [here](https://smith.langchain.com/). -If you don't have access, you can skip this section - -```shell -export LANGCHAIN_TRACING_V2=true -export LANGCHAIN_API_KEY= -export LANGCHAIN_PROJECT= # if not specified, defaults to "default" -``` - -If you are inside this directory, then you can spin up a LangServe instance directly by: - -```shell -langchain serve -``` - -This will start the FastAPI app with a server running locally at -[http://localhost:8000](http://localhost:8000) - -We can see all templates at [http://127.0.0.1:8000/docs](http://127.0.0.1:8000/docs) -We can access the playground -at [http://127.0.0.1:8000/rag-google-cloud-vertexai-search/playground](http://127.0.0.1:8000/rag-google-cloud-vertexai-search/playground) - -We can access the template from code with: - -```python -from langserve.client import RemoteRunnable - -runnable = RemoteRunnable("http://localhost:8000/rag-google-cloud-sensitive-data-protection") -``` -``` - -# Troubleshooting Google Cloud - -You can set your `gcloud` credentials with their CLI using `gcloud auth application-default login` - -You can set your `gcloud` project with the following commands -```bash -gcloud config set project -gcloud auth application-default set-quota-project -export GOOGLE_CLOUD_PROJECT_ID= -``` diff --git a/templates/rag-google-cloud-sensitive-data-protection/main.py b/templates/rag-google-cloud-sensitive-data-protection/main.py deleted file mode 100644 index 30c6fa53c3f..00000000000 --- a/templates/rag-google-cloud-sensitive-data-protection/main.py +++ /dev/null @@ -1,9 +0,0 @@ -from rag_google_cloud_sensitive_data_protection.chain import chain - -if __name__ == "__main__": - query = { - "question": "Good morning. My name is Captain Blackbeard. My phone number " - "is 555-555-5555. And my email is lovely.pirate@gmail.com. Have a nice day.", - "chat_history": [], - } - print(chain.invoke(query)) diff --git a/templates/rag-google-cloud-sensitive-data-protection/pyproject.toml b/templates/rag-google-cloud-sensitive-data-protection/pyproject.toml deleted file mode 100644 index 51d706cb6fc..00000000000 --- a/templates/rag-google-cloud-sensitive-data-protection/pyproject.toml +++ /dev/null @@ -1,32 +0,0 @@ -[tool.poetry] -name = "rag-google-cloud-sensitive-data-protection" -version = "0.0.1" -description = "RAG using sensitive data protection" -authors = ["Juan Calvo "] -readme = "README.md" - -[tool.poetry.dependencies] -python = ">=3.8.1,<4.0" -langchain = "^0.1" -google-cloud-aiplatform = ">=1.35.0" -google-cloud-dlp = "^3.13.0" - - -[tool.poetry.group.dev.dependencies] -langchain-cli = ">=0.0.21" -fastapi = ">=0.104.0,<1" -sse-starlette = "^1.6.5" - -[tool.langserve] -export_module = "rag_google_cloud_sensitive_data_protection" -export_attr = "chain" - -[tool.templates-hub] -use-case = "rag" -author = "Datatonic" -integrations = ["OpenAI", "Google Cloud"] -tags = ["data"] - -[build-system] -requires = ["poetry-core"] -build-backend = "poetry.core.masonry.api" diff --git a/templates/rag-google-cloud-sensitive-data-protection/rag_google_cloud_sensitive_data_protection/__init__.py b/templates/rag-google-cloud-sensitive-data-protection/rag_google_cloud_sensitive_data_protection/__init__.py deleted file mode 100644 index d85ac089b19..00000000000 --- a/templates/rag-google-cloud-sensitive-data-protection/rag_google_cloud_sensitive_data_protection/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from rag_google_cloud_sensitive_data_protection.chain import chain - -__all__ = ["chain"] diff --git a/templates/rag-google-cloud-sensitive-data-protection/rag_google_cloud_sensitive_data_protection/chain.py b/templates/rag-google-cloud-sensitive-data-protection/rag_google_cloud_sensitive_data_protection/chain.py deleted file mode 100644 index 87d047db4db..00000000000 --- a/templates/rag-google-cloud-sensitive-data-protection/rag_google_cloud_sensitive_data_protection/chain.py +++ /dev/null @@ -1,117 +0,0 @@ -import os -from typing import List, Tuple - -from google.cloud import dlp_v2 -from langchain_community.chat_models import ChatVertexAI -from langchain_core.messages import AIMessage, HumanMessage -from langchain_core.output_parsers import StrOutputParser -from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder -from langchain_core.pydantic_v1 import BaseModel, Field -from langchain_core.runnables import RunnableLambda, RunnableParallel - - -# Formatting for chat history -def _format_chat_history(chat_history: List[Tuple[str, str]]): - buffer = [] - for human, ai in chat_history: - buffer.append(HumanMessage(content=human)) - buffer.append(AIMessage(content=ai)) - return buffer - - -def _deidentify_with_replace( - input_str: str, - info_types: List[str], - project: str, -) -> str: - """Uses the Data Loss Prevention API to deidentify sensitive data in a - string by replacing matched input values with the info type. - Args: - project: The Google Cloud project id to use as a parent resource. - input_str: The string to deidentify (will be treated as text). - info_types: A list of strings representing info types to look for. - Returns: - str: The input string after it has been deidentified. - """ - - # Instantiate a client - dlp = dlp_v2.DlpServiceClient() - - # Convert the project id into a full resource id. - parent = f"projects/{project}/locations/global" - - if info_types is None: - info_types = ["PHONE_NUMBER", "EMAIL_ADDRESS", "CREDIT_CARD_NUMBER"] - # Construct inspect configuration dictionary - inspect_config = {"info_types": [{"name": info_type} for info_type in info_types]} - - # Construct deidentify configuration dictionary - deidentify_config = { - "info_type_transformations": { - "transformations": [ - {"primitive_transformation": {"replace_with_info_type_config": {}}} - ] - } - } - - # Construct item - item = {"value": input_str} - - # Call the API - response = dlp.deidentify_content( - request={ - "parent": parent, - "deidentify_config": deidentify_config, - "inspect_config": inspect_config, - "item": item, - } - ) - - # Print out the results. - return response.item.value - - -# Prompt we will use -prompt = ChatPromptTemplate.from_messages( - [ - ( - "system", - "You are a helpful assistant who translates to pirate", - ), - MessagesPlaceholder(variable_name="chat_history"), - ("user", "{question}"), - ] -) - -# Create Vertex AI retriever -project_id = os.environ.get("GOOGLE_CLOUD_PROJECT_ID") -model_type = os.environ.get("MODEL_TYPE") - -# Set LLM and embeddings -model = ChatVertexAI(model_name=model_type, temperature=0.0) - - -class ChatHistory(BaseModel): - question: str - chat_history: List[Tuple[str, str]] = Field(..., extra={"widget": {"type": "chat"}}) - - -_inputs = RunnableParallel( - { - "question": RunnableLambda( - lambda x: _deidentify_with_replace( - input_str=x["question"], - info_types=["PERSON_NAME", "PHONE_NUMBER", "EMAIL_ADDRESS"], - project=project_id, - ) - ).with_config(run_name=" _deidentify_with_replace"), - "chat_history": RunnableLambda( - lambda x: _format_chat_history(x["chat_history"]) - ).with_config(run_name=" _format_chat_history"), - } -) - -# RAG -chain = _inputs | prompt | model | StrOutputParser() - -chain = chain.with_types(input_type=ChatHistory).with_config(run_name="Inputs") diff --git a/templates/rag-google-cloud-sensitive-data-protection/tests/__init__.py b/templates/rag-google-cloud-sensitive-data-protection/tests/__init__.py deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/templates/rag-google-cloud-vertexai-search/LICENSE b/templates/rag-google-cloud-vertexai-search/LICENSE deleted file mode 100644 index 426b6509034..00000000000 --- a/templates/rag-google-cloud-vertexai-search/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -MIT License - -Copyright (c) 2023 LangChain, Inc. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/templates/rag-google-cloud-vertexai-search/README.md b/templates/rag-google-cloud-vertexai-search/README.md deleted file mode 100644 index 668a226534d..00000000000 --- a/templates/rag-google-cloud-vertexai-search/README.md +++ /dev/null @@ -1,89 +0,0 @@ -# RAG - Google Cloud Vertex AI Search - -This template is an application that utilizes `Google Vertex AI Search`, -a machine learning powered search service, and -PaLM 2 for Chat (chat-bison). The application uses a Retrieval chain to answer questions based on your documents. - -For more context on building RAG applications with `Vertex AI Search`, -check [here](https://cloud.google.com/generative-ai-app-builder/docs/enterprise-search-introduction). - -## Environment Setup - -Before using this template, please ensure that you are authenticated with Vertex AI Search. See the authentication -guide: [here](https://cloud.google.com/generative-ai-app-builder/docs/authentication). - -You will also need to create: - -- A search application [here](https://cloud.google.com/generative-ai-app-builder/docs/create-engine-es) -- A data store [here](https://cloud.google.com/generative-ai-app-builder/docs/create-data-store-es) - -A suitable dataset to test this template with is the Alphabet Earnings Reports, which you can -find [here](https://abc.xyz/investor/). The data is also available -at `gs://cloud-samples-data/gen-app-builder/search/alphabet-investor-pdfs`. - -Set the following environment variables: - -* `GOOGLE_CLOUD_PROJECT_ID` - Your Google Cloud project ID. -* `DATA_STORE_ID` - The ID of the data store in Vertex AI Search, which is a 36-character alphanumeric value found on - the data store details page. -* `MODEL_TYPE` - The model type for Vertex AI Search. - -## Usage - -To use this package, you should first have the LangChain CLI installed: - -```shell -pip install -U langchain-cli -``` - -To create a new LangChain project and install this as the only package, you can do: - -```shell -langchain app new my-app --package rag-google-cloud-vertexai-search -``` - -If you want to add this to an existing project, you can just run: - -```shell -langchain app add rag-google-cloud-vertexai-search -``` - -And add the following code to your `server.py` file: - -```python -from rag_google_cloud_vertexai_search.chain import chain as rag_google_cloud_vertexai_search_chain - -add_routes(app, rag_google_cloud_vertexai_search_chain, path="/rag-google-cloud-vertexai-search") -``` - -(Optional) Let's now configure LangSmith. -LangSmith will help us trace, monitor and debug LangChain applications. -You can sign up for LangSmith [here](https://smith.langchain.com/). -If you don't have access, you can skip this section - -```shell -export LANGCHAIN_TRACING_V2=true -export LANGCHAIN_API_KEY= -export LANGCHAIN_PROJECT= # if not specified, defaults to "default" -``` - -If you are inside this directory, then you can spin up a LangServe instance directly by: - -```shell -langchain serve -``` - -This will start the FastAPI app with a server running locally at -[http://localhost:8000](http://localhost:8000) - -We can see all templates at [http://127.0.0.1:8000/docs](http://127.0.0.1:8000/docs) -We can access the playground -at [http://127.0.0.1:8000/rag-google-cloud-vertexai-search/playground](http://127.0.0.1:8000/rag-google-cloud-vertexai-search/playground) - -We can access the template from code with: - -```python -from langserve.client import RemoteRunnable - -runnable = RemoteRunnable("http://localhost:8000/rag-google-cloud-vertexai-search") -``` diff --git a/templates/rag-google-cloud-vertexai-search/main.py b/templates/rag-google-cloud-vertexai-search/main.py deleted file mode 100644 index a96c83c8bb4..00000000000 --- a/templates/rag-google-cloud-vertexai-search/main.py +++ /dev/null @@ -1,5 +0,0 @@ -from rag_google_cloud_vertexai_search.chain import chain - -if __name__ == "__main__": - query = "Who is the CEO of Google Cloud?" - print(chain.invoke(query)) diff --git a/templates/rag-google-cloud-vertexai-search/pyproject.toml b/templates/rag-google-cloud-vertexai-search/pyproject.toml deleted file mode 100644 index 95512d1f71e..00000000000 --- a/templates/rag-google-cloud-vertexai-search/pyproject.toml +++ /dev/null @@ -1,31 +0,0 @@ -[tool.poetry] -name = "rag-google-cloud-vertexai-search" -version = "0.0.1" -description = "RAG using Google Vertex AI Search" -authors = ["Juan Calvo "] -readme = "README.md" - -[tool.poetry.dependencies] -python = ">=3.8.1,<4.0" -langchain = "^0.1" -google-cloud-aiplatform = ">=1.35.0" - - -[tool.poetry.group.dev.dependencies] -langchain-cli = ">=0.0.21" -fastapi = ">=0.104.0,<1" -sse-starlette = "^1.6.5" - -[tool.langserve] -export_module = "rag_google_cloud_vertexai_search" -export_attr = "chain" - -[tool.templates-hub] -use-case = "rag" -author = "Datatonic" -integrations = ["OpenAI", "Google"] -tags = ["vectordbs"] - -[build-system] -requires = ["poetry-core"] -build-backend = "poetry.core.masonry.api" diff --git a/templates/rag-google-cloud-vertexai-search/rag_google_cloud_vertexai_search/__init__.py b/templates/rag-google-cloud-vertexai-search/rag_google_cloud_vertexai_search/__init__.py deleted file mode 100644 index 7b31128b433..00000000000 --- a/templates/rag-google-cloud-vertexai-search/rag_google_cloud_vertexai_search/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from rag_google_cloud_vertexai_search.chain import chain - -__all__ = ["chain"] diff --git a/templates/rag-google-cloud-vertexai-search/rag_google_cloud_vertexai_search/chain.py b/templates/rag-google-cloud-vertexai-search/rag_google_cloud_vertexai_search/chain.py deleted file mode 100644 index a5eda1b3e00..00000000000 --- a/templates/rag-google-cloud-vertexai-search/rag_google_cloud_vertexai_search/chain.py +++ /dev/null @@ -1,50 +0,0 @@ -import os - -from langchain.retrievers import GoogleVertexAISearchRetriever -from langchain_community.chat_models import ChatVertexAI -from langchain_core.output_parsers import StrOutputParser -from langchain_core.prompts import ChatPromptTemplate -from langchain_core.pydantic_v1 import BaseModel -from langchain_core.runnables import RunnableParallel, RunnablePassthrough - -# Get project, data store, and model type from env variables -project_id = os.environ.get("GOOGLE_CLOUD_PROJECT_ID") -data_store_id = os.environ.get("DATA_STORE_ID") -model_type = os.environ.get("MODEL_TYPE") - -if not data_store_id: - raise ValueError( - "No value provided in env variable 'DATA_STORE_ID'. " - "A data store is required to run this application." - ) - -# Set LLM and embeddings -model = ChatVertexAI(model_name=model_type, temperature=0.0) - -# Create Vertex AI retriever -retriever = GoogleVertexAISearchRetriever( - project_id=project_id, search_engine_id=data_store_id -) - -# RAG prompt -template = """Answer the question based only on the following context: -{context} -Question: {question} -""" -prompt = ChatPromptTemplate.from_template(template) - -# RAG -chain = ( - RunnableParallel({"context": retriever, "question": RunnablePassthrough()}) - | prompt - | model - | StrOutputParser() -) - - -# Add typing for input -class Question(BaseModel): - __root__: str - - -chain = chain.with_types(input_type=Question) diff --git a/templates/rag-google-cloud-vertexai-search/tests/__init__.py b/templates/rag-google-cloud-vertexai-search/tests/__init__.py deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/templates/rag-gpt-crawler/LICENSE b/templates/rag-gpt-crawler/LICENSE deleted file mode 100644 index 426b6509034..00000000000 --- a/templates/rag-gpt-crawler/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -MIT License - -Copyright (c) 2023 LangChain, Inc. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/templates/rag-gpt-crawler/README.md b/templates/rag-gpt-crawler/README.md deleted file mode 100644 index 1a1eae87330..00000000000 --- a/templates/rag-gpt-crawler/README.md +++ /dev/null @@ -1,91 +0,0 @@ -# RAG - GPT-crawler - -`GPT-crawler` crawls websites to produce files for use in custom GPTs or other apps (RAG). - -This template uses [gpt-crawler](https://github.com/BuilderIO/gpt-crawler) to build a RAG app - -## Environment Setup - -Set the `OPENAI_API_KEY` environment variable to access the OpenAI models. - -## Crawling - -Run GPT-crawler to extract content from a set of urls, using the config file in GPT-crawler repo. - -Here is example config for LangChain use-case docs: - -``` -export const config: Config = { - url: "https://python.langchain.com/docs/use_cases/", - match: "https://python.langchain.com/docs/use_cases/**", - selector: ".docMainContainer_gTbr", - maxPagesToCrawl: 10, - outputFileName: "output.json", -}; -``` - -Then, run this as described in the [gpt-crawler](https://github.com/BuilderIO/gpt-crawler) README: - -``` -npm start -``` - -And copy the `output.json` file into the folder containing this README. - -## Usage - -To use this package, you should first have the LangChain CLI installed: - -```shell -pip install -U langchain-cli -``` - -To create a new LangChain project and install this as the only package, you can do: - -```shell -langchain app new my-app --package rag-gpt-crawler -``` - -If you want to add this to an existing project, you can just run: - -```shell -langchain app add rag-gpt-crawler -``` - -And add the following code to your `server.py` file: -```python -from rag_chroma import chain as rag_gpt_crawler - -add_routes(app, rag_gpt_crawler, path="/rag-gpt-crawler") -``` - -(Optional) Let's now configure LangSmith. -LangSmith will help us trace, monitor and debug LangChain applications. -You can sign up for LangSmith [here](https://smith.langchain.com/). -If you don't have access, you can skip this section - -```shell -export LANGCHAIN_TRACING_V2=true -export LANGCHAIN_API_KEY= -export LANGCHAIN_PROJECT= # if not specified, defaults to "default" -``` - -If you are inside this directory, then you can spin up a LangServe instance directly by: - -```shell -langchain serve -``` - -This will start the FastAPI app with a server is running locally at -[http://localhost:8000](http://localhost:8000) - -We can see all templates at [http://127.0.0.1:8000/docs](http://127.0.0.1:8000/docs) -We can access the playground at [http://127.0.0.1:8000/rag-gpt-crawler/playground](http://127.0.0.1:8000/rag-gpt-crawler/playground) - -We can access the template from code with: - -```python -from langserve.client import RemoteRunnable - -runnable = RemoteRunnable("http://localhost:8000/rag-gpt-crawler") -``` \ No newline at end of file diff --git a/templates/rag-gpt-crawler/pyproject.toml b/templates/rag-gpt-crawler/pyproject.toml deleted file mode 100644 index 4c64bc03d4f..00000000000 --- a/templates/rag-gpt-crawler/pyproject.toml +++ /dev/null @@ -1,35 +0,0 @@ -[tool.poetry] -name = "rag-gpt-crawler" -version = "0.1.0" -description = "Use gpt-crawler to build a chat app for any website" -authors = [ - "Lance Martin ", -] -readme = "README.md" - -[tool.poetry.dependencies] -python = ">=3.8.1,<4.0" -langchain = "^0.1" -openai = "<2" -tiktoken = ">=0.5.1" -langchain-chroma = "^0.1.2" -langchain-text-splitters = ">=0.0.1,<0.1" - -[tool.poetry.group.dev.dependencies] -langchain-cli = ">=0.0.21" - -[tool.langserve] -export_module = "rag_gpt_crawler" -export_attr = "chain" - -[tool.templates-hub] -use-case = "rag" -author = "Google" -integrations = ["OpenAI", "Chromadb"] -tags = ["vectordbs"] - -[build-system] -requires = [ - "poetry-core", -] -build-backend = "poetry.core.masonry.api" diff --git a/templates/rag-gpt-crawler/rag_gpt_crawler.ipynb b/templates/rag-gpt-crawler/rag_gpt_crawler.ipynb deleted file mode 100644 index 31ae8847721..00000000000 --- a/templates/rag-gpt-crawler/rag_gpt_crawler.ipynb +++ /dev/null @@ -1,51 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "id": "681a5d1e", - "metadata": {}, - "source": [ - "## Run Template\n", - "\n", - "In `server.py`, set -\n", - "```\n", - "add_routes(app, chain_rag_conv, path=\"/rag-gpt-crawler\")\n", - "```" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "d774be2a", - "metadata": {}, - "outputs": [], - "source": [ - "from langserve.client import RemoteRunnable\n", - "\n", - "rag_app = RemoteRunnable(\"http://localhost:8001/rag-gpt-crawler\")\n", - "rag_app.invoke(\"How does summarization work?\")" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.9.16" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/templates/rag-gpt-crawler/rag_gpt_crawler/__init__.py b/templates/rag-gpt-crawler/rag_gpt_crawler/__init__.py deleted file mode 100644 index 2273e51a20d..00000000000 --- a/templates/rag-gpt-crawler/rag_gpt_crawler/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from rag_gpt_crawler.chain import chain - -__all__ = ["chain"] diff --git a/templates/rag-gpt-crawler/rag_gpt_crawler/chain.py b/templates/rag-gpt-crawler/rag_gpt_crawler/chain.py deleted file mode 100644 index b5934cfb522..00000000000 --- a/templates/rag-gpt-crawler/rag_gpt_crawler/chain.py +++ /dev/null @@ -1,62 +0,0 @@ -import json -from pathlib import Path - -from langchain_chroma import Chroma -from langchain_community.chat_models import ChatOpenAI -from langchain_community.embeddings import OpenAIEmbeddings -from langchain_core.documents import Document -from langchain_core.output_parsers import StrOutputParser -from langchain_core.prompts import ChatPromptTemplate -from langchain_core.pydantic_v1 import BaseModel -from langchain_core.runnables import RunnableParallel, RunnablePassthrough -from langchain_text_splitters import RecursiveCharacterTextSplitter - -# Load output from gpt crawler -path_to_gptcrawler = Path(__file__).parent.parent / "output.json" -data = json.loads(Path(path_to_gptcrawler).read_text()) -docs = [ - Document( - page_content=dict_["html"], - metadata={"title": dict_["title"], "url": dict_["url"]}, - ) - for dict_ in data -] - -# Split -text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=100) -all_splits = text_splitter.split_documents(docs) - -# Add to vectorDB -vectorstore = Chroma.from_documents( - documents=all_splits, - collection_name="rag-gpt-builder", - embedding=OpenAIEmbeddings(), -) -retriever = vectorstore.as_retriever() - -# RAG prompt -template = """Answer the question based only on the following context: -{context} - -Question: {question} -""" -prompt = ChatPromptTemplate.from_template(template) - -# LLM -model = ChatOpenAI() - -# RAG chain -chain = ( - RunnableParallel({"context": retriever, "question": RunnablePassthrough()}) - | prompt - | model - | StrOutputParser() -) - - -# Add typing for input -class Question(BaseModel): - __root__: str - - -chain = chain.with_types(input_type=Question) diff --git a/templates/rag-gpt-crawler/tests/__init__.py b/templates/rag-gpt-crawler/tests/__init__.py deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/templates/rag-jaguardb/LICENSE b/templates/rag-jaguardb/LICENSE deleted file mode 100644 index 426b6509034..00000000000 --- a/templates/rag-jaguardb/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -MIT License - -Copyright (c) 2023 LangChain, Inc. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/templates/rag-jaguardb/README.md b/templates/rag-jaguardb/README.md deleted file mode 100644 index 41743aeaa25..00000000000 --- a/templates/rag-jaguardb/README.md +++ /dev/null @@ -1,90 +0,0 @@ -# RAG - JaguarDB - -This template performs RAG using `JaguarDB` and OpenAI. - -## Environment Setup - -You should export two environment variables, one being your Jaguar URI, the other being your OpenAI API KEY. -If you do not have JaguarDB set up, see the `Setup Jaguar` section at the bottom for instructions on how to do so. - -```shell -export JAGUAR_API_KEY=... -export OPENAI_API_KEY=... -``` - -## Usage - -To use this package, you should first have the LangChain CLI installed: - -```shell -pip install -U langchain-cli -``` - -To create a new LangChain project and install this as the only package, you can do: - -```shell -langchain app new my-app --package rag-jaguardb -``` - -If you want to add this to an existing project, you can just run: - -```shell -langchain app add rag-jagaurdb -``` - -And add the following code to your `server.py` file: -```python -from rag_jaguardb import chain as rag_jaguardb - -add_routes(app, rag_jaguardb_chain, path="/rag-jaguardb") -``` - -(Optional) Let's now configure LangSmith. -LangSmith will help us trace, monitor and debug LangChain applications. -You can sign up for LangSmith [here](https://smith.langchain.com/). -If you don't have access, you can skip this section - - -```shell -export LANGCHAIN_TRACING_V2=true -export LANGCHAIN_API_KEY= -export LANGCHAIN_PROJECT= # if not specified, defaults to "default" -``` - - -If you are inside this directory, then you can spin up a LangServe instance directly by: - -```shell -langchain serve -``` - -This will start the FastAPI app with a server is running locally at -[http://localhost:8000](http://localhost:8000) - -We can see all templates at [http://127.0.0.1:8000/docs](http://127.0.0.1:8000/docs) -We can access the playground at [http://127.0.0.1:8000/rag-jaguardb/playground](http://127.0.0.1:8000/rag-jaguardb/playground) - -We can access the template from code with: - -```python -from langserve.client import RemoteRunnable - -runnable = RemoteRunnable("http://localhost:8000/rag-jaguardb") -``` - -## JaguarDB Setup - -To utilize JaguarDB, you can use docker pull and docker run commands to quickly setup JaguarDB. - -```shell -docker pull jaguardb/jaguardb -docker run -d -p 8888:8888 --name jaguardb jaguardb/jaguardb -``` - -To launch the JaguarDB client terminal to interact with JaguarDB server: - -```shell -docker exec -it jaguardb /home/jaguar/jaguar/bin/jag -``` - -Another option is to download an already-built binary package of JaguarDB on Linux, and deploy the database on a single node or in a cluster of nodes. The streamlined process enables you to quickly start using JaguarDB and leverage its powerful features and functionalities. [here](http://www.jaguardb.com/download.html). \ No newline at end of file diff --git a/templates/rag-jaguardb/pyproject.toml b/templates/rag-jaguardb/pyproject.toml deleted file mode 100644 index f4c1d32c815..00000000000 --- a/templates/rag-jaguardb/pyproject.toml +++ /dev/null @@ -1,34 +0,0 @@ -[tool.poetry] -name = "rag-jaguardb" -version = "0.1.0" -description = "RAG with JaguarDB" -authors = [ - "Daniel Ung ", -] -readme = "README.md" - -[tool.poetry.dependencies] -python = ">=3.8.1,<4.0" -langchain = "^0.1" -openai = "<2" -tiktoken = ">=0.5.1" -jaguar = ">=3.4" - -[tool.poetry.group.dev.dependencies] -langchain-cli = ">=0.0.15" - -[tool.langserve] -export_module = "rag_jaguardb" -export_attr = "chain" - -[tool.templates-hub] -use-case = "rag" -author = "LangChain" -integrations = ["JaguarDB", "OpenAI"] -tags = ["vectordbs"] - -[build-system] -requires = [ - "poetry-core", -] -build-backend = "poetry.core.masonry.api" diff --git a/templates/rag-jaguardb/rag_jaguardb.ipynb b/templates/rag-jaguardb/rag_jaguardb.ipynb deleted file mode 100644 index 34782c40df6..00000000000 --- a/templates/rag-jaguardb/rag_jaguardb.ipynb +++ /dev/null @@ -1,51 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "id": "681a5d1e", - "metadata": {}, - "source": [ - "## Run Template\n", - "\n", - "In `server.py`, set -\n", - "```\n", - "add_routes(app, rag_jaguardb_chain, path=\"/rag-jaguardb\")\n", - "```" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "d774be2a", - "metadata": {}, - "outputs": [], - "source": [ - "from langserve.client import RemoteRunnable\n", - "\n", - "rag_app = RemoteRunnable(\"http://localhost:8001/rag-jaguardb\")\n", - "rag_app.invoke(\"hello!\")" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.9.16" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/templates/rag-jaguardb/rag_jaguardb/__init__.py b/templates/rag-jaguardb/rag_jaguardb/__init__.py deleted file mode 100644 index 81c930be748..00000000000 --- a/templates/rag-jaguardb/rag_jaguardb/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from rag_jaguardb import chain - -__all__ = ["chain"] diff --git a/templates/rag-jaguardb/rag_jaguardb/chain.py b/templates/rag-jaguardb/rag_jaguardb/chain.py deleted file mode 100644 index 5a90def0b39..00000000000 --- a/templates/rag-jaguardb/rag_jaguardb/chain.py +++ /dev/null @@ -1,64 +0,0 @@ -import os - -from langchain_community.chat_models import ChatOpenAI -from langchain_community.embeddings import OpenAIEmbeddings -from langchain_community.vectorstores.jaguar import Jaguar -from langchain_core.output_parsers import StrOutputParser -from langchain_core.prompts import ChatPromptTemplate -from langchain_core.pydantic_v1 import BaseModel -from langchain_core.runnables import ( - RunnableParallel, - RunnablePassthrough, -) - -if os.environ.get("JAGUAR_API_KEY", None) is None: - raise Exception("Missing `JAGUAR_API_KEY` environment variable.") -JAGUAR_API_KEY = os.environ["JAGUAR_API_KEY"] - -url = "http://192.168.3.88:8080/fwww/" -pod = "vdb" -store = "langchain_test_store" -vector_index = "v" -vector_type = "cosine_fraction_float" -vector_dimension = 1536 -embeddings = OpenAIEmbeddings() -vectorstore = Jaguar( - pod, store, vector_index, vector_type, vector_dimension, url, embeddings -) - -retriever = vectorstore.as_retriever() - -vectorstore.login() -""" -Create vector store on the JaguarDB database server. -This should be done only once. -""" - -metadata = "category char(16)" -text_size = 4096 -vectorstore.create(metadata, text_size) - -# RAG prompt -template = """Answer the question based only on the following context: -{context} -Question: {question} -""" -prompt = ChatPromptTemplate.from_template(template) - - -# RAG -model = ChatOpenAI(model="gpt-3.5-turbo", temperature=0) -chain = ( - RunnableParallel({"context": retriever, "question": RunnablePassthrough()}) - | prompt - | model - | StrOutputParser() -) - - -# Add typing for input -class Question(BaseModel): - __root__: str - - -chain = chain.with_types(input_type=Question) diff --git a/templates/rag-jaguardb/tests/__init__.py b/templates/rag-jaguardb/tests/__init__.py deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/templates/rag-lancedb/LICENSE b/templates/rag-lancedb/LICENSE deleted file mode 100644 index 426b6509034..00000000000 --- a/templates/rag-lancedb/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -MIT License - -Copyright (c) 2023 LangChain, Inc. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/templates/rag-lancedb/README.md b/templates/rag-lancedb/README.md deleted file mode 100644 index 92decd9ff16..00000000000 --- a/templates/rag-lancedb/README.md +++ /dev/null @@ -1,67 +0,0 @@ -# RAG - LanceDB - -This template performs RAG using `LanceDB` and `OpenAI`. - -## Environment Setup - -Set the `OPENAI_API_KEY` environment variable to access the OpenAI models. - - -## Usage - -To use this package, you should first have the LangChain CLI installed: - -```shell -pip install -U langchain-cli -``` - -To create a new LangChain project and install this as the only package, you can do: - -```shell -langchain app new my-app --package rag-lancedb -``` - -If you want to add this to as existing project, you can just run: - -```shell -langchain app add rag-lancedb -``` - -And add the following code to your `server.py` file: -```python -from rag_lancedb import chain as rag_lancedb_chain - -add_routes(app, rag_lancedb_chain, path="/rag-lancedb") -``` - -(Optional) Let's now configure LangSmith. -LangSmith will help us trace, monitor and debug LangChain applications. -You can sign up for LangSmith [here](https://smith.langchain.com/). -If you don't have access, you can skip this section - - -```shell -export LANGCHAIN_TRACING_V2=true -export LANGCHAIN_API_KEY= -export LANGCHAIN_PROJECT= # if not specified, defaults to "default" -``` - -If you are inside this directory, then you can spin up a LangServe instance directly by: - -```shell -langchain serve -``` - -This will start the FastAPI app with a server is running locally at -[http://localhost:8000](http://localhost:8000) - -We can see all templates at [http://127.0.0.1:8000/docs](http://127.0.0.1:8000/docs) -We can access the playground at [http://127.0.0.1:8000/rag-lancedb/playground](http://127.0.0.1:8000/rag-lancedb/playground) - -We can access the template from code with: - -```python -from langserve.client import RemoteRunnable - -runnable = RemoteRunnable("http://localhost:8000/rag-lancedb") -``` \ No newline at end of file diff --git a/templates/rag-lancedb/pyproject.toml b/templates/rag-lancedb/pyproject.toml deleted file mode 100644 index f2316e81021..00000000000 --- a/templates/rag-lancedb/pyproject.toml +++ /dev/null @@ -1,34 +0,0 @@ -[tool.poetry] -name = "rag-lancedb" -version = "0.0.1" -description = "RAG using LanceDB" -authors = ['akash desai'] -readme = "README.md" - -[tool.poetry.dependencies] -python = ">=3.8.1,<4.0" -langchain-core = ">=0.1.5" -langchain-openai = ">=0.0.1" -lancedb = ">=0.5.5,<1" -openai = "<2" -tiktoken = "^0.5.2" - - -[tool.poetry.group.dev.dependencies] -langchain-cli = ">=0.0.21" -fastapi = ">=0.104.0,<1" -sse-starlette = "^1.6.5" - -[tool.langserve] -export_module = "rag_lancedb" -export_attr = "chain" - -[tool.templates-hub] -use-case = "rag" -author = "LangChain" -integrations = ["OpenAI", "LanceDB"] -tags = ["vectordbs"] - -[build-system] -requires = ["poetry-core"] -build-backend = "poetry.core.masonry.api" diff --git a/templates/rag-lancedb/rag_lancedb/__init__.py b/templates/rag-lancedb/rag_lancedb/__init__.py deleted file mode 100644 index e917c2b6f22..00000000000 --- a/templates/rag-lancedb/rag_lancedb/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from rag_lancedb.chain import chain - -__all__ = ["chain"] diff --git a/templates/rag-lancedb/rag_lancedb/chain.py b/templates/rag-lancedb/rag_lancedb/chain.py deleted file mode 100644 index 856d65f0f22..00000000000 --- a/templates/rag-lancedb/rag_lancedb/chain.py +++ /dev/null @@ -1,58 +0,0 @@ -from langchain_community.vectorstores import LanceDB -from langchain_core.output_parsers import StrOutputParser -from langchain_core.prompts import ChatPromptTemplate -from langchain_core.pydantic_v1 import BaseModel -from langchain_core.runnables import RunnableParallel, RunnablePassthrough -from langchain_openai import ChatOpenAI, OpenAIEmbeddings - -# Example for document loading (from url), splitting, and creating vectostore - -""" -# Load -from langchain_community.document_loaders import WebBaseLoader -loader = WebBaseLoader("https://lilianweng.github.io/posts/2023-06-23-agent/") -data = loader.load() - -# Split -from langchain.text_splitter import RecursiveCharacterTextSplitter -text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=0) -all_splits = text_splitter.split_documents(data) - -# Add to vectorDB -vectorstore = LanceDB.from_documents(documents=all_splits, - embedding=OpenAIEmbeddings()) -retriever = vectorstore.as_retriever() -""" - -# Embed a single document for test -vectorstore = LanceDB.from_texts( - ["harrison worked at kensho"], embedding=OpenAIEmbeddings() -) - -retriever = vectorstore.as_retriever() - -# RAG prompt -template = """Answer the question based only on the following context: -{context} - -Question: {question} -""" -prompt = ChatPromptTemplate.from_template(template) - -# LLM -model = ChatOpenAI() - -# RAG chain -chain = ( - RunnableParallel({"context": retriever, "question": RunnablePassthrough()}) - | prompt - | model - | StrOutputParser() -) - - -class Question(BaseModel): - __root__: str - - -chain = chain.with_types(input_type=Question) diff --git a/templates/rag-lancedb/tests/__init__.py b/templates/rag-lancedb/tests/__init__.py deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/templates/rag-lantern/.gitignore b/templates/rag-lantern/.gitignore deleted file mode 100644 index 4c49bd78f1d..00000000000 --- a/templates/rag-lantern/.gitignore +++ /dev/null @@ -1 +0,0 @@ -.env diff --git a/templates/rag-lantern/README.md b/templates/rag-lantern/README.md deleted file mode 100644 index 8023b54e80f..00000000000 --- a/templates/rag-lantern/README.md +++ /dev/null @@ -1,128 +0,0 @@ -# RAG - Lantern - -This template performs RAG with `Lantern`. - -[Lantern](https://lantern.dev) is an open-source vector database built on top of [PostgreSQL](https://en.wikipedia.org/wiki/PostgreSQL). It enables vector search and embedding generation inside your database. - -## Environment Setup - -Set the `OPENAI_API_KEY` environment variable to access the OpenAI models. - -To get your `OPENAI_API_KEY`, navigate to [API keys](https://platform.openai.com/account/api-keys) on your OpenAI account and create a new secret key. - -To find your `LANTERN_URL` and `LANTERN_SERVICE_KEY`, head to your Lantern project's [API settings](https://lantern.dev/dashboard/project/_/settings/api). - -- `LANTERN_URL` corresponds to the Project URL -- `LANTERN_SERVICE_KEY` corresponds to the `service_role` API key - - -```shell -export LANTERN_URL= -export LANTERN_SERVICE_KEY= -export OPENAI_API_KEY= -``` - -## Setup Lantern Database - -Use these steps to setup your Lantern database if you haven't already. - -1. Head to [https://lantern.dev](https://lantern.dev) to create your Lantern database. -2. In your favorite SQL client, jump to the SQL editor and run the following script to setup your database as a vector store: - - ```sql - -- Create a table to store your documents - create table - documents ( - id uuid primary key, - content text, -- corresponds to Document.pageContent - metadata jsonb, -- corresponds to Document.metadata - embedding REAL[1536] -- 1536 works for OpenAI embeddings, change as needed - ); - - -- Create a function to search for documents - create function match_documents ( - query_embedding REAL[1536], - filter jsonb default '{}' - ) returns table ( - id uuid, - content text, - metadata jsonb, - similarity float - ) language plpgsql as $$ - #variable_conflict use_column - begin - return query - select - id, - content, - metadata, - 1 - (documents.embedding <=> query_embedding) as similarity - from documents - where metadata @> filter - order by documents.embedding <=> query_embedding; - end; - $$; - ``` - -## Setup Environment Variables - -Since we are using [`Lantern`](https://python.langchain.com/docs/integrations/vectorstores/lantern) and [`OpenAIEmbeddings`](https://python.langchain.com/docs/integrations/text_embedding/openai), we need to load their API keys. - -## Usage - -First, install the LangChain CLI: - -```shell -pip install -U langchain-cli -``` - -To create a new LangChain project and install this as the only package, you can do: - -```shell -langchain app new my-app --package rag-lantern -``` - -If you want to add this to an existing project, you can just run: - -```shell -langchain app add rag-lantern -``` - -And add the following code to your `server.py` file: - -```python -from rag_lantern.chain import chain as rag_lantern_chain - -add_routes(app, rag_lantern_chain, path="/rag-lantern") -``` - -(Optional) Let's now configure LangSmith. -LangSmith will help us trace, monitor and debug LangChain applications. -You can sign up for LangSmith [here](https://smith.langchain.com/). -If you don't have access, you can skip this section - -```shell -export LANGCHAIN_TRACING_V2=true -export LANGCHAIN_API_KEY= -export LANGCHAIN_PROJECT= # if not specified, defaults to "default" -``` - -If you are inside this directory, then you can spin up a LangServe instance directly by: - -```shell -langchain serve -``` - -This will start the FastAPI app with a server is running locally at -[http://localhost:8000](http://localhost:8000) - -We can see all templates at [http://127.0.0.1:8000/docs](http://127.0.0.1:8000/docs) -We can access the playground at [http://127.0.0.1:8000/rag-lantern/playground](http://127.0.0.1:8000/rag-lantern/playground) - -We can access the template from code with: - -```python -from langserve.client import RemoteRunnable - -runnable = RemoteRunnable("http://localhost:8000/rag-lantern") -``` diff --git a/templates/rag-lantern/pyproject.toml b/templates/rag-lantern/pyproject.toml deleted file mode 100644 index 7c10e82cde3..00000000000 --- a/templates/rag-lantern/pyproject.toml +++ /dev/null @@ -1,39 +0,0 @@ -[tool.poetry] -name = "rag-lantern" -version = "0.1.0" -description = "RAG using Lantern retriver" -authors = [ - "Gustavo Reyes ", -] -readme = "README.md" - -[tool.poetry.dependencies] -python = ">=3.8.1,<4.0" -langchain = "^0.1" -openai = "<2" -tiktoken = "^0.5.1" -rag-lantern = {path = "packages/rag-lantern", develop = true} - -[tool.poetry.group.dev.dependencies] -langchain-cli = ">=0.0.15" -[tool.poetry.group.dev.dependencies.python-dotenv] -extras = [ - "cli", -] -version = "^1.0.0" - -[tool.langserve] -export_module = "rag_lantern.chain" -export_attr = "chain" - -[tool.templates-hub] -use-case = "rag" -author = "Lantern" -integrations = ["OpenAI", "Lantern"] -tags = ["vectordbs"] - -[build-system] -requires = [ - "poetry-core", -] -build-backend = "poetry.core.masonry.api" diff --git a/templates/rag-lantern/rag_lantern/__init__.py b/templates/rag-lantern/rag_lantern/__init__.py deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/templates/rag-lantern/rag_lantern/chain.py b/templates/rag-lantern/rag_lantern/chain.py deleted file mode 100644 index b7dc9a0bf33..00000000000 --- a/templates/rag-lantern/rag_lantern/chain.py +++ /dev/null @@ -1,47 +0,0 @@ -from langchain_community.chat_models import ChatOpenAI -from langchain_community.embeddings import OpenAIEmbeddings -from langchain_community.vectorstores import Lantern -from langchain_core.output_parsers import StrOutputParser -from langchain_core.prompts import ChatPromptTemplate -from langchain_core.pydantic_v1 import BaseModel -from langchain_core.runnables import RunnableParallel, RunnablePassthrough - -CONNECTION_STRING = "postgresql://postgres:postgres@localhost:5432" -COLLECTION_NAME = "documents" -DB_NAME = "postgres" - -embeddings = OpenAIEmbeddings() - -vectorstore = Lantern( - collection_name=COLLECTION_NAME, - connection_string=CONNECTION_STRING, - embedding_function=embeddings, -) - -retriever = vectorstore.as_retriever() - - -template = """Answer the question based only on the following context: -{context} - -Question: {question} -""" - -prompt = ChatPromptTemplate.from_template(template) - -model = ChatOpenAI() - -chain = ( - RunnableParallel({"context": retriever, "question": RunnablePassthrough()}) - | prompt - | model - | StrOutputParser() -) - - -# Add typing for input -class Question(BaseModel): - __root__: str - - -chain = chain.with_types(input_type=Question) diff --git a/templates/rag-lantern/tests/__init__.py b/templates/rag-lantern/tests/__init__.py deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/templates/rag-matching-engine/LICENSE b/templates/rag-matching-engine/LICENSE deleted file mode 100644 index 426b6509034..00000000000 --- a/templates/rag-matching-engine/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -MIT License - -Copyright (c) 2023 LangChain, Inc. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/templates/rag-matching-engine/README.md b/templates/rag-matching-engine/README.md deleted file mode 100644 index d6755da9ff9..00000000000 --- a/templates/rag-matching-engine/README.md +++ /dev/null @@ -1,80 +0,0 @@ -# RAG - Google Cloud Matching Engine - -This template performs RAG using [Google Cloud Vertex Matching Engine](https://cloud.google.com/blog/products/ai-machine-learning/vertex-matching-engine-blazing-fast-and-massively-scalable-nearest-neighbor-search). - -It utilizes a previously created index to retrieve relevant documents or contexts based on user-provided questions. - -## Environment Setup - -An index should be created before running the code. - -The process to create this index can be found [here](https://github.com/GoogleCloudPlatform/generative-ai/blob/main/language/use-cases/document-qa/question_answering_documents_langchain_matching_engine.ipynb). - -Environment variables for Vertex should be set: -``` -PROJECT_ID -ME_REGION -GCS_BUCKET -ME_INDEX_ID -ME_ENDPOINT_ID -``` - -## Usage - -To use this package, you should first have the LangChain CLI installed: - -```shell -pip install -U langchain-cli -``` - -To create a new LangChain project and install this as the only package, you can do: - -```shell -langchain app new my-app --package rag-matching-engine -``` - -If you want to add this to an existing project, you can just run: - -```shell -langchain app add rag-matching-engine -``` - -And add the following code to your `server.py` file: -```python -from rag_matching_engine import chain as rag_matching_engine_chain - -add_routes(app, rag_matching_engine_chain, path="/rag-matching-engine") -``` - -(Optional) Let's now configure LangSmith. -LangSmith will help us trace, monitor and debug LangChain applications. -You can sign up for LangSmith [here](https://smith.langchain.com/). -If you don't have access, you can skip this section - -```shell -export LANGCHAIN_TRACING_V2=true -export LANGCHAIN_API_KEY= -export LANGCHAIN_PROJECT= # if not specified, defaults to "default" -``` - -If you are inside this directory, then you can spin up a LangServe instance directly by: - -```shell -langchain serve -``` - -This will start the FastAPI app with a server is running locally at -[http://localhost:8000](http://localhost:8000) - -We can see all templates at [http://127.0.0.1:8000/docs](http://127.0.0.1:8000/docs) -We can access the playground at [http://127.0.0.1:8000/rag-matching-engine/playground](http://127.0.0.1:8000/rag-matching-engine/playground) - -We can access the template from code with: - -```python -from langserve.client import RemoteRunnable - -runnable = RemoteRunnable("http://localhost:8000/rag-matching-engine") -``` - -For more details on how to connect to the template, refer to the Jupyter notebook `rag_matching_engine`. \ No newline at end of file diff --git a/templates/rag-matching-engine/pyproject.toml b/templates/rag-matching-engine/pyproject.toml deleted file mode 100644 index 384e61921b4..00000000000 --- a/templates/rag-matching-engine/pyproject.toml +++ /dev/null @@ -1,30 +0,0 @@ -[tool.poetry] -name = "rag-matching-engine" -version = "0.0.1" -description = "RAG using Google Cloud Platform's Vertex AI Matching Engine" -authors = ["Leonid Kuligin"] -readme = "README.md" - -[tool.poetry.dependencies] -python = ">=3.8.1,<4.0" -langchain = "^0.1" -google-cloud-aiplatform = "^1.35.0" - -[tool.poetry.group.dev.dependencies] -langchain-cli = ">=0.0.21" -fastapi = ">=0.104.0,<1" -sse-starlette = "^1.6.5" - -[tool.langserve] -export_module = "rag_matching_engine" -export_attr = "chain" - -[tool.templates-hub] -use-case = "rag" -author = "Google" -integrations = ["OpenAI", "Google"] -tags = ["vectordbs"] - -[build-system] -requires = ["poetry-core"] -build-backend = "poetry.core.masonry.api" diff --git a/templates/rag-matching-engine/rag_matching_engine/__init__.py b/templates/rag-matching-engine/rag_matching_engine/__init__.py deleted file mode 100644 index 7431c5a1285..00000000000 --- a/templates/rag-matching-engine/rag_matching_engine/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from rag_matching_engine.chain import chain - -__all__ = ["chain"] diff --git a/templates/rag-matching-engine/rag_matching_engine/chain.py b/templates/rag-matching-engine/rag_matching_engine/chain.py deleted file mode 100644 index d47b67fcd45..00000000000 --- a/templates/rag-matching-engine/rag_matching_engine/chain.py +++ /dev/null @@ -1,78 +0,0 @@ -import os - -from langchain_community.embeddings import VertexAIEmbeddings -from langchain_community.llms import VertexAI -from langchain_community.vectorstores import MatchingEngine -from langchain_core.output_parsers import StrOutputParser -from langchain_core.prompts import PromptTemplate -from langchain_core.pydantic_v1 import BaseModel -from langchain_core.runnables import RunnableParallel, RunnablePassthrough - -# you need to preate the index first, for example, as described here: -# https://github.com/GoogleCloudPlatform/generative-ai/blob/main/language/use-cases/document-qa/question_answering_documents_langchain_matching_engine.ipynb -expected_variables = [ - "project_id", - "me_region", - "gcs_bucket", - "me_index_id", - "me_endpoint_id", -] -variables = [] -for variable_name in expected_variables: - variable = os.environ.get(variable_name.upper()) - if not variable: - raise Exception(f"Missing `{variable_name}` environment variable.") - variables.append(variable) - -project_id, me_region, gcs_bucket, me_index_id, me_endpoint_id = variables - - -vectorstore = MatchingEngine.from_components( - project_id=project_id, - region=me_region, - gcs_bucket_name=gcs_bucket, - embedding=VertexAIEmbeddings(), - index_id=me_index_id, - endpoint_id=me_endpoint_id, -) - -model = VertexAI() - -template = ( - "SYSTEM: You are an intelligent assistant helping the users with their questions" - "on research papers.\n\n" - "Question: {question}\n\n" - "Strictly Use ONLY the following pieces of context to answer the question at the " - "end. Think step-by-step and then answer.\n\n" - "Do not try to make up an answer:\n" - "- If the answer to the question cannot be determined from the context alone, " - 'say \n"I cannot determine the answer to that."\n' - '- If the context is empty, just say "I do not know the answer to that."\n\n' - "=============\n{context}\n=============\n\n" - "Question: {question}\nHelpful Answer: " -) - -prompt = PromptTemplate.from_template(template) - -retriever = vectorstore.as_retriever( - search_type="similarity", - search_kwargs={ - "k": 10, - "search_distance": 0.6, - }, -) - -chain = ( - RunnableParallel({"context": retriever, "question": RunnablePassthrough()}) - | prompt - | model - | StrOutputParser() -) - - -# Add typing for input -class Question(BaseModel): - __root__: str - - -chain = chain.with_types(input_type=Question) diff --git a/templates/rag-matching-engine/tests/__init__.py b/templates/rag-matching-engine/tests/__init__.py deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/templates/rag-milvus/.gitignore b/templates/rag-milvus/.gitignore deleted file mode 100644 index bee8a64b79a..00000000000 --- a/templates/rag-milvus/.gitignore +++ /dev/null @@ -1 +0,0 @@ -__pycache__ diff --git a/templates/rag-milvus/LICENSE b/templates/rag-milvus/LICENSE deleted file mode 100644 index fc0602feecd..00000000000 --- a/templates/rag-milvus/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -MIT License - -Copyright (c) 2024 LangChain, Inc. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/templates/rag-milvus/README.md b/templates/rag-milvus/README.md deleted file mode 100644 index ec125dd24fb..00000000000 --- a/templates/rag-milvus/README.md +++ /dev/null @@ -1,68 +0,0 @@ -# RAG - Milvus - -This template performs RAG using `Milvus` and `OpenAI`. - -## Environment Setup - -Start the milvus server instance, and get the host ip and port. - -Set the `OPENAI_API_KEY` environment variable to access the OpenAI models. - -## Usage - -To use this package, you should first have the LangChain CLI installed: - -```shell -pip install -U langchain-cli -``` - -To create a new LangChain project and install this as the only package, you can do: - -```shell -langchain app new my-app --package rag-milvus -``` - -If you want to add this to an existing project, you can just run: - -```shell -langchain app add rag-milvus -``` - -And add the following code to your `server.py` file: -```python -from rag_milvus import chain as rag_milvus_chain - -add_routes(app, rag_milvus_chain, path="/rag-milvus") -``` - -(Optional) Let's now configure LangSmith. -LangSmith will help us trace, monitor and debug LangChain applications. -You can sign up for LangSmith [here](https://smith.langchain.com/). -If you don't have access, you can skip this section - - -```shell -export LANGCHAIN_TRACING_V2=true -export LANGCHAIN_API_KEY= -export LANGCHAIN_PROJECT= # if not specified, defaults to "default" -``` - -If you are inside this directory, then you can spin up a LangServe instance directly by: - -```shell -langchain serve -``` - -This will start the FastAPI app with a server is running locally at -[http://localhost:8000](http://localhost:8000) - -We can see all templates at [http://127.0.0.1:8000/docs](http://127.0.0.1:8000/docs) -We can access the playground at [http://127.0.0.1:8000/rag-milvus/playground](http://127.0.0.1:8000/rag-milvus/playground) - -We can access the template from code with: - -```python -from langserve.client import RemoteRunnable - -runnable = RemoteRunnable("http://localhost:8000/rag-milvus") -``` diff --git a/templates/rag-milvus/pyproject.toml b/templates/rag-milvus/pyproject.toml deleted file mode 100644 index 4060fba7859..00000000000 --- a/templates/rag-milvus/pyproject.toml +++ /dev/null @@ -1,34 +0,0 @@ -[tool.poetry] -name = "rag-milvus" -version = "0.1.1" -description = "RAG using Milvus" -authors = [] -readme = "README.md" - -[tool.poetry.dependencies] -python = ">=3.8.1,<4.0" -langchain = "^0.1" -langchain-core = "^0.1" -langchain-openai = "^0.1" -langchain-community = "^0.0.30" -pymilvus = "^2.4.3" -scipy = "^1.9" - -[tool.poetry.group.dev.dependencies] -langchain-cli = ">=0.0.4" -fastapi = "^0.104.0" -sse-starlette = "^1.6.5" - -[tool.langserve] -export_module = "rag_milvus" -export_attr = "chain" - -[tool.templates-hub] -use-case = "rag" -author = "LangChain" -integrations = ["OpenAI", "Milvus"] -tags = ["vectordbs"] - -[build-system] -requires = ["poetry-core"] -build-backend = "poetry.core.masonry.api" diff --git a/templates/rag-milvus/rag_milvus/__init__.py b/templates/rag-milvus/rag_milvus/__init__.py deleted file mode 100644 index cf9e1eac267..00000000000 --- a/templates/rag-milvus/rag_milvus/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from rag_milvus.chain import chain - -__all__ = ["chain"] diff --git a/templates/rag-milvus/rag_milvus/chain.py b/templates/rag-milvus/rag_milvus/chain.py deleted file mode 100644 index 57c53006945..00000000000 --- a/templates/rag-milvus/rag_milvus/chain.py +++ /dev/null @@ -1,79 +0,0 @@ -from langchain_core.output_parsers import StrOutputParser -from langchain_core.prompts import ChatPromptTemplate -from langchain_core.pydantic_v1 import BaseModel -from langchain_core.runnables import RunnableParallel, RunnablePassthrough -from langchain_milvus.vectorstores import Milvus -from langchain_openai import ChatOpenAI, OpenAIEmbeddings - -# Example for document loading (from url), splitting, and creating vectorstore - -# Setting the URI as a local file, e.g.`./milvus.db`, is the most convenient method, -# as it automatically utilizes Milvus Lite to store all data in this file. -# -# If you have large scale of data such as more than a million docs, -# we recommend setting up a more performant Milvus server on docker or kubernetes. -# (https://milvus.io/docs/quickstart.md) -# When using this setup, please use the server URI, -# e.g.`http://localhost:19530`, as your URI. - -URI = "./milvus.db" - -""" -# Load -from langchain_community.document_loaders import WebBaseLoader - -loader = WebBaseLoader("https://lilianweng.github.io/posts/2023-06-23-agent/") -data = loader.load() - -# Split -from langchain_text_splitters import RecursiveCharacterTextSplitter - -text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=0) -all_splits = text_splitter.split_documents(data) - -# Add to vectorDB -vectorstore = Milvus.from_documents(documents=all_splits, - collection_name="rag_milvus", - embedding=OpenAIEmbeddings(), - drop_old=True, - connection_args={"uri": URI}, - ) -retriever = vectorstore.as_retriever() -""" - -# Embed a single document as a test -vectorstore = Milvus.from_texts( - ["harrison worked at kensho"], - collection_name="rag_milvus", - embedding=OpenAIEmbeddings(), - drop_old=True, - connection_args={"uri": URI}, -) -retriever = vectorstore.as_retriever() - -# RAG prompt -template = """Answer the question based only on the following context: -{context} - -Question: {question} -""" -prompt = ChatPromptTemplate.from_template(template) - -# LLM -model = ChatOpenAI() - -# RAG chain -chain = ( - RunnableParallel({"context": retriever, "question": RunnablePassthrough()}) - | prompt - | model - | StrOutputParser() -) - - -# Add typing for input -class Question(BaseModel): - __root__: str - - -chain = chain.with_types(input_type=Question) diff --git a/templates/rag-milvus/tests/__init__.py b/templates/rag-milvus/tests/__init__.py deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/templates/rag-momento-vector-index/LICENSE b/templates/rag-momento-vector-index/LICENSE deleted file mode 100644 index 426b6509034..00000000000 --- a/templates/rag-momento-vector-index/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -MIT License - -Copyright (c) 2023 LangChain, Inc. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/templates/rag-momento-vector-index/README.md b/templates/rag-momento-vector-index/README.md deleted file mode 100644 index b4d0b9a55cf..00000000000 --- a/templates/rag-momento-vector-index/README.md +++ /dev/null @@ -1,78 +0,0 @@ -# RAG - Momento Vector Index - -This template performs RAG using `Momento Vector Index` (`MVI`) and `OpenAI`. - -> MVI: the most productive, easiest to use, serverless vector index for your data. To get started with MVI, simply sign up for an account. There's no need to handle infrastructure, manage servers, or be concerned about scaling. MVI is a service that scales automatically to meet your needs. Combine with other Momento services such as Momento Cache to cache prompts and as a session store or Momento Topics as a pub/sub system to broadcast events to your application. - -To sign up and access MVI, visit the [Momento Console](https://console.gomomento.com/). - -## Environment Setup - -This template uses `Momento Vector Index` as a vectorstore and requires that `MOMENTO_API_KEY`, and `MOMENTO_INDEX_NAME` are set. - -Go to the [console](https://console.gomomento.com/) to get an API key. - -Set the `OPENAI_API_KEY` environment variable to access the OpenAI models. - -## Usage - -To use this package, you should first have the LangChain CLI installed: - -```shell -pip install -U langchain-cli -``` - -To create a new LangChain project and install this as the only package, you can do: - -```shell -langchain app new my-app --package rag-momento-vector-index -``` - -If you want to add this to an existing project, you can just run: - -```shell -langchain app add rag-momento-vector-index -``` - -And add the following code to your `server.py` file: - -```python -from rag_momento_vector_index import chain as rag_momento_vector_index_chain - -add_routes(app, rag_momento_vector_index_chain, path="/rag-momento-vector-index") -``` - -(Optional) Let's now configure LangSmith. -LangSmith will help us trace, monitor and debug LangChain applications. -You can sign up for LangSmith [here](https://smith.langchain.com/). -If you don't have access, you can skip this section - -```shell -export LANGCHAIN_TRACING_V2=true -export LANGCHAIN_API_KEY= -export LANGCHAIN_PROJECT= # if not specified, defaults to "default" -``` - -If you are inside this directory, then you can spin up a LangServe instance directly by: - -```shell -langchain serve -``` - -This will start the FastAPI app with a server is running locally at -[http://localhost:8000](http://localhost:8000) - -We can see all templates at [http://127.0.0.1:8000/docs](http://127.0.0.1:8000/docs) -We can access the playground at [http://127.0.0.1:8000/rag-momento-vector-index/playground](http://127.0.0.1:8000/rag-momento-vector-index/playground) - -We can access the template from code with: - -```python -from langserve.client import RemoteRunnable - -runnable = RemoteRunnable("http://localhost:8000/rag-momento-vector-index") -``` - -## Indexing Data - -We have included a sample module to index data. That is available at `rag_momento_vector_index/ingest.py`. You will see a commented out line in `chain.py` that invokes this. Uncomment to use. diff --git a/templates/rag-momento-vector-index/pyproject.toml b/templates/rag-momento-vector-index/pyproject.toml deleted file mode 100644 index 8646736893f..00000000000 --- a/templates/rag-momento-vector-index/pyproject.toml +++ /dev/null @@ -1,39 +0,0 @@ -[tool.poetry] -name = "rag-momento-vector-index" -version = "0.0.1" -description = "RAG on momento vectorDB" -authors = [] -readme = "README.md" - -[tool.poetry.dependencies] -python = ">=3.8.1,<4.0" -langchain = "^0.1" -momento = "^1.12.0" -openai = "<2" -tiktoken = "^0.5.1" -langchain-text-splitters = ">=0.0.1,<0.1" - -[tool.poetry.group.dev.dependencies] -langchain-cli = ">=0.0.21" -fastapi = ">=0.104.0,<1" -sse-starlette = "^1.6.5" - -[tool.poetry.group.index.dependencies] -bs4 = "^0.0.1" - -[tool.poetry.group.test.dependencies] -langserve = "^0.0.21" - -[tool.langserve] -export_module = "rag_momento_vector_index" -export_attr = "chain" - -[tool.templates-hub] -use-case = "rag" -author = "LangChain" -integrations = ["OpenAI", "Momento"] -tags = ["vectordbs"] - -[build-system] -requires = ["poetry-core"] -build-backend = "poetry.core.masonry.api" diff --git a/templates/rag-momento-vector-index/rag_momento_vector_index/__init__.py b/templates/rag-momento-vector-index/rag_momento_vector_index/__init__.py deleted file mode 100644 index 36705521e99..00000000000 --- a/templates/rag-momento-vector-index/rag_momento_vector_index/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from rag_momento_vector_index.chain import chain - -__all__ = ["chain"] diff --git a/templates/rag-momento-vector-index/rag_momento_vector_index/chain.py b/templates/rag-momento-vector-index/rag_momento_vector_index/chain.py deleted file mode 100644 index 71d6adc2273..00000000000 --- a/templates/rag-momento-vector-index/rag_momento_vector_index/chain.py +++ /dev/null @@ -1,62 +0,0 @@ -import os - -from langchain_community.chat_models import ChatOpenAI -from langchain_community.embeddings import OpenAIEmbeddings -from langchain_community.vectorstores import MomentoVectorIndex -from langchain_core.output_parsers import StrOutputParser -from langchain_core.prompts import ChatPromptTemplate -from langchain_core.pydantic_v1 import BaseModel -from langchain_core.runnables import RunnablePassthrough -from momento import ( - CredentialProvider, - PreviewVectorIndexClient, - VectorIndexConfigurations, -) - -API_KEY_ENV_VAR_NAME = "MOMENTO_API_KEY" -if os.environ.get(API_KEY_ENV_VAR_NAME, None) is None: - raise Exception(f"Missing `{API_KEY_ENV_VAR_NAME}` environment variable.") - -MOMENTO_INDEX_NAME = os.environ.get("MOMENTO_INDEX_NAME", "langchain-test") - -### Sample Ingest Code - this populates the vector index with data -### Run this on the first time to seed with data -# from rag_momento_vector_index import ingest -# ingest.load(API_KEY_ENV_VAR_NAME, MOMENTO_INDEX_NAME) - - -vectorstore = MomentoVectorIndex( - embedding=OpenAIEmbeddings(), - client=PreviewVectorIndexClient( - configuration=VectorIndexConfigurations.Default.latest(), - credential_provider=CredentialProvider.from_environment_variable( - API_KEY_ENV_VAR_NAME - ), - ), - index_name=MOMENTO_INDEX_NAME, -) -retriever = vectorstore.as_retriever() - -# RAG prompt -template = """Answer the question based only on the following context: -{context} -Question: {question} -""" -prompt = ChatPromptTemplate.from_template(template) - -# RAG -model = ChatOpenAI() -chain = ( - {"context": retriever, "question": RunnablePassthrough()} - | prompt - | model - | StrOutputParser() -) - - -# Add typing for input -class Question(BaseModel): - __root__: str - - -chain = chain.with_types(input_type=Question) diff --git a/templates/rag-momento-vector-index/rag_momento_vector_index/ingest.py b/templates/rag-momento-vector-index/rag_momento_vector_index/ingest.py deleted file mode 100644 index 91d044dba23..00000000000 --- a/templates/rag-momento-vector-index/rag_momento_vector_index/ingest.py +++ /dev/null @@ -1,38 +0,0 @@ -### Ingest code - you may need to run this the first time -import os - -from langchain_community.document_loaders import WebBaseLoader -from langchain_community.embeddings import OpenAIEmbeddings -from langchain_community.vectorstores import MomentoVectorIndex -from langchain_text_splitters import RecursiveCharacterTextSplitter -from momento import ( - CredentialProvider, - PreviewVectorIndexClient, - VectorIndexConfigurations, -) - - -def load(API_KEY_ENV_VAR_NAME: str, index_name: str) -> None: - if os.environ.get(API_KEY_ENV_VAR_NAME, None) is None: - raise Exception(f"Missing `{API_KEY_ENV_VAR_NAME}` environment variable.") - - # Load - loader = WebBaseLoader("https://lilianweng.github.io/posts/2023-06-23-agent/") - data = loader.load() - - # Split - text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=0) - all_splits = text_splitter.split_documents(data) - - # Add to vectorDB - MomentoVectorIndex.from_documents( - all_splits, - embedding=OpenAIEmbeddings(), - client=PreviewVectorIndexClient( - configuration=VectorIndexConfigurations.Default.latest(), - credential_provider=CredentialProvider.from_environment_variable( - API_KEY_ENV_VAR_NAME - ), - ), - index_name=index_name, - ) diff --git a/templates/rag-momento-vector-index/tests/__init__.py b/templates/rag-momento-vector-index/tests/__init__.py deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/templates/rag-mongo/LICENSE b/templates/rag-mongo/LICENSE deleted file mode 100644 index 426b6509034..00000000000 --- a/templates/rag-mongo/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -MIT License - -Copyright (c) 2023 LangChain, Inc. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/templates/rag-mongo/README.md b/templates/rag-mongo/README.md deleted file mode 100644 index 20717161269..00000000000 --- a/templates/rag-mongo/README.md +++ /dev/null @@ -1,169 +0,0 @@ -# RAG - MongoDB - -This template performs RAG using `MongoDB` and `OpenAI`. - -## Environment Setup - -You should export two environment variables, one being your `MongoDB` URI, the other being your OpenAI API KEY. -If you do not have a MongoDB URI, see the `Setup Mongo` section at the bottom for instructions on how to do so. - -```shell -export MONGO_URI=... -export OPENAI_API_KEY=... -``` - -## Usage - -To use this package, you should first have the LangChain CLI installed: - -```shell -pip install -U langchain-cli -``` - -To create a new LangChain project and install this as the only package, you can do: - -```shell -langchain app new my-app --package rag-mongo -``` - -If you want to add this to an existing project, you can just run: - -```shell -langchain app add rag-mongo -``` - -And add the following code to your `server.py` file: -```python -from rag_mongo import chain as rag_mongo_chain - -add_routes(app, rag_mongo_chain, path="/rag-mongo") -``` - -If you want to set up an ingestion pipeline, you can add the following code to your `server.py` file: -```python -from rag_mongo import ingest as rag_mongo_ingest - -add_routes(app, rag_mongo_ingest, path="/rag-mongo-ingest") -``` - -(Optional) Let's now configure LangSmith. -LangSmith will help us trace, monitor and debug LangChain applications. -You can sign up for LangSmith [here](https://smith.langchain.com/). -If you don't have access, you can skip this section - - -```shell -export LANGCHAIN_TRACING_V2=true -export LANGCHAIN_API_KEY= -export LANGCHAIN_PROJECT= # if not specified, defaults to "default" -``` - -If you DO NOT already have a Mongo Search Index you want to connect to, see `MongoDB Setup` section below before proceeding. - -If you DO have a MongoDB Search index you want to connect to, edit the connection details in `rag_mongo/chain.py` - -If you are inside this directory, then you can spin up a LangServe instance directly by: - -```shell -langchain serve -``` - -This will start the FastAPI app with a server is running locally at -[http://localhost:8000](http://localhost:8000) - -We can see all templates at [http://127.0.0.1:8000/docs](http://127.0.0.1:8000/docs) -We can access the playground at [http://127.0.0.1:8000/rag-mongo/playground](http://127.0.0.1:8000/rag-mongo/playground) - -We can access the template from code with: - -```python -from langserve.client import RemoteRunnable - -runnable = RemoteRunnable("http://localhost:8000/rag-mongo") -``` - -For additional context, please refer to [this notebook](https://colab.research.google.com/drive/1cr2HBAHyBmwKUerJq2if0JaNhy-hIq7I#scrollTo=TZp7_CBfxTOB). - - -## MongoDB Setup - -Use this step if you need to setup your MongoDB account and ingest data. -We will first follow the standard MongoDB Atlas setup instructions [here](https://www.mongodb.com/docs/atlas/getting-started/). - -1. Create an account (if not already done) -2. Create a new project (if not already done) -3. Locate your MongoDB URI. - -This can be done by going to the deployment overview page and connecting to you database - -![Screenshot highlighting the 'Connect' button in MongoDB Atlas.](_images/connect.png) "MongoDB Atlas Connect Button" - -We then look at the drivers available - -![Screenshot showing the MongoDB Atlas drivers section for connecting to the database.](_images/driver.png) "MongoDB Atlas Drivers Section" - -Among which we will see our URI listed - -![Screenshot displaying an example of a MongoDB URI in the connection instructions.](_images/uri.png) "MongoDB URI Example" - -Let's then set that as an environment variable locally: - -```shell -export MONGO_URI=... -``` - -4. Let's also set an environment variable for OpenAI (which we will use as an LLM) - -```shell -export OPENAI_API_KEY=... -``` - -5. Let's now ingest some data! We can do that by moving into this directory and running the code in `ingest.py`, eg: - -```shell -python ingest.py -``` - -Note that you can (and should!) change this to ingest data of your choice - -6. We now need to set up a vector index on our data. - -We can first connect to the cluster where our database lives - -![Screenshot of the MongoDB Atlas interface showing the cluster overview with a 'Connect' button.](_images/cluster.png) "MongoDB Atlas Cluster Overview" - -We can then navigate to where all our collections are listed - -![Screenshot of the MongoDB Atlas interface showing the collections overview within a database.](_images/collections.png) "MongoDB Atlas Collections Overview" - -We can then find the collection we want and look at the search indexes for that collection - -![Screenshot showing the search indexes section in MongoDB Atlas for a specific collection.](_images/search-indexes.png) "MongoDB Atlas Search Indexes" - -That should likely be empty, and we want to create a new one: - -![Screenshot highlighting the 'Create Index' button in MongoDB Atlas.](_images/create.png) "MongoDB Atlas Create Index Button" - -We will use the JSON editor to create it - -![Screenshot showing the JSON Editor option for creating a search index in MongoDB Atlas.](_images/json_editor.png) "MongoDB Atlas JSON Editor Option" - -And we will paste the following JSON in: - -```text - { - "mappings": { - "dynamic": true, - "fields": { - "embedding": { - "dimensions": 1536, - "similarity": "cosine", - "type": "knnVector" - } - } - } - } -``` -![Screenshot of the JSON configuration for a search index in MongoDB Atlas.](_images/json.png) "MongoDB Atlas Search Index JSON Configuration" - -From there, hit "Next" and then "Create Search Index". It will take a little bit but you should then have an index over your data! \ No newline at end of file diff --git a/templates/rag-mongo/_images/cluster.png b/templates/rag-mongo/_images/cluster.png deleted file mode 100644 index e94a4a1b772..00000000000 Binary files a/templates/rag-mongo/_images/cluster.png and /dev/null differ diff --git a/templates/rag-mongo/_images/collections.png b/templates/rag-mongo/_images/collections.png deleted file mode 100644 index 40f6cd2cdef..00000000000 Binary files a/templates/rag-mongo/_images/collections.png and /dev/null differ diff --git a/templates/rag-mongo/_images/connect.png b/templates/rag-mongo/_images/connect.png deleted file mode 100644 index 6460d9863e1..00000000000 Binary files a/templates/rag-mongo/_images/connect.png and /dev/null differ diff --git a/templates/rag-mongo/_images/create.png b/templates/rag-mongo/_images/create.png deleted file mode 100644 index 29b501de07f..00000000000 Binary files a/templates/rag-mongo/_images/create.png and /dev/null differ diff --git a/templates/rag-mongo/_images/driver.png b/templates/rag-mongo/_images/driver.png deleted file mode 100644 index 98f9276bed8..00000000000 Binary files a/templates/rag-mongo/_images/driver.png and /dev/null differ diff --git a/templates/rag-mongo/_images/json.png b/templates/rag-mongo/_images/json.png deleted file mode 100644 index 635a2e0c8f3..00000000000 Binary files a/templates/rag-mongo/_images/json.png and /dev/null differ diff --git a/templates/rag-mongo/_images/json_editor.png b/templates/rag-mongo/_images/json_editor.png deleted file mode 100644 index 47f69c57d41..00000000000 Binary files a/templates/rag-mongo/_images/json_editor.png and /dev/null differ diff --git a/templates/rag-mongo/_images/search-indexes.png b/templates/rag-mongo/_images/search-indexes.png deleted file mode 100644 index aba1b86682d..00000000000 Binary files a/templates/rag-mongo/_images/search-indexes.png and /dev/null differ diff --git a/templates/rag-mongo/_images/uri.png b/templates/rag-mongo/_images/uri.png deleted file mode 100644 index 958db8d8ca5..00000000000 Binary files a/templates/rag-mongo/_images/uri.png and /dev/null differ diff --git a/templates/rag-mongo/ingest.py b/templates/rag-mongo/ingest.py deleted file mode 100644 index 5c018a93836..00000000000 --- a/templates/rag-mongo/ingest.py +++ /dev/null @@ -1,35 +0,0 @@ -import os - -from langchain_community.document_loaders import PyPDFLoader -from langchain_community.embeddings import OpenAIEmbeddings -from langchain_community.vectorstores import MongoDBAtlasVectorSearch -from langchain_text_splitters import RecursiveCharacterTextSplitter -from pymongo import MongoClient - -MONGO_URI = os.environ["MONGO_URI"] - -# Note that if you change this, you also need to change it in `rag_mongo/chain.py` -DB_NAME = "langchain-test-2" -COLLECTION_NAME = "test" -ATLAS_VECTOR_SEARCH_INDEX_NAME = "default" -EMBEDDING_FIELD_NAME = "embedding" -client = MongoClient(MONGO_URI) -db = client[DB_NAME] -MONGODB_COLLECTION = db[COLLECTION_NAME] - -if __name__ == "__main__": - # Load docs - loader = PyPDFLoader("https://arxiv.org/pdf/2303.08774.pdf") - data = loader.load() - - # Split docs - text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=0) - docs = text_splitter.split_documents(data) - - # Insert the documents in MongoDB Atlas Vector Search - _ = MongoDBAtlasVectorSearch.from_documents( - documents=docs, - embedding=OpenAIEmbeddings(disallowed_special=()), - collection=MONGODB_COLLECTION, - index_name=ATLAS_VECTOR_SEARCH_INDEX_NAME, - ) diff --git a/templates/rag-mongo/pyproject.toml b/templates/rag-mongo/pyproject.toml deleted file mode 100644 index e7c3d764330..00000000000 --- a/templates/rag-mongo/pyproject.toml +++ /dev/null @@ -1,35 +0,0 @@ -[tool.poetry] -name = "rag-mongo" -version = "0.1.0" -description = "RAG on MongDB" -authors = [ - "Lance Martin ", -] -readme = "README.md" - -[tool.poetry.dependencies] -python = ">=3.8.1,<4.0" -langchain = "^0.1" -openai = "<2" -tiktoken = ">=0.5.1" -pymongo = ">=4.5.0" -langchain-text-splitters = ">=0.0.1,<0.1" - -[tool.poetry.group.dev.dependencies] -langchain-cli = ">=0.0.21" - -[tool.langserve] -export_module = "rag_mongo" -export_attr = "chain" - -[tool.templates-hub] -use-case = "rag" -author = "LangChain" -integrations = ["MongoDB", "OpenAI"] -tags = ["vectordbs"] - -[build-system] -requires = [ - "poetry-core", -] -build-backend = "poetry.core.masonry.api" diff --git a/templates/rag-mongo/rag_mongo.ipynb b/templates/rag-mongo/rag_mongo.ipynb deleted file mode 100644 index d609e1db08d..00000000000 --- a/templates/rag-mongo/rag_mongo.ipynb +++ /dev/null @@ -1,52 +0,0 @@ -{ - "cells": [ - { - "attachments": {}, - "cell_type": "markdown", - "id": "681a5d1e", - "metadata": {}, - "source": [ - "## Connect to template\n", - "\n", - "In `server.py`, set -\n", - "```\n", - "add_routes(app, chain_ext, path=\"/rag_mongo\")\n", - "```" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "d774be2a", - "metadata": {}, - "outputs": [], - "source": [ - "from langserve.client import RemoteRunnable\n", - "\n", - "rag_app_pinecone = RemoteRunnable(\"http://0.0.0.0:8001/rag_mongo\")\n", - "rag_app_pinecone.invoke(\"How does agent memory work?\")" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.9.16" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/templates/rag-mongo/rag_mongo/__init__.py b/templates/rag-mongo/rag_mongo/__init__.py deleted file mode 100644 index 34839579610..00000000000 --- a/templates/rag-mongo/rag_mongo/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from rag_mongo.chain import chain - -__all__ = ["chain"] diff --git a/templates/rag-mongo/rag_mongo/chain.py b/templates/rag-mongo/rag_mongo/chain.py deleted file mode 100644 index 39ca44160bb..00000000000 --- a/templates/rag-mongo/rag_mongo/chain.py +++ /dev/null @@ -1,83 +0,0 @@ -import os - -from langchain_community.chat_models import ChatOpenAI -from langchain_community.document_loaders import PyPDFLoader -from langchain_community.embeddings import OpenAIEmbeddings -from langchain_community.vectorstores import MongoDBAtlasVectorSearch -from langchain_core.output_parsers import StrOutputParser -from langchain_core.prompts import ChatPromptTemplate -from langchain_core.pydantic_v1 import BaseModel -from langchain_core.runnables import ( - RunnableLambda, - RunnableParallel, - RunnablePassthrough, -) -from langchain_text_splitters import RecursiveCharacterTextSplitter -from pymongo import MongoClient - -# Set DB -if os.environ.get("MONGO_URI", None) is None: - raise Exception("Missing `MONGO_URI` environment variable.") -MONGO_URI = os.environ["MONGO_URI"] - -DB_NAME = "langchain-test-2" -COLLECTION_NAME = "test" -ATLAS_VECTOR_SEARCH_INDEX_NAME = "default" - -client = MongoClient(MONGO_URI) -db = client[DB_NAME] -MONGODB_COLLECTION = db[COLLECTION_NAME] - -# Read from MongoDB Atlas Vector Search -vectorstore = MongoDBAtlasVectorSearch.from_connection_string( - MONGO_URI, - DB_NAME + "." + COLLECTION_NAME, - OpenAIEmbeddings(disallowed_special=()), - index_name=ATLAS_VECTOR_SEARCH_INDEX_NAME, -) -retriever = vectorstore.as_retriever() - -# RAG prompt -template = """Answer the question based only on the following context: -{context} -Question: {question} -""" -prompt = ChatPromptTemplate.from_template(template) - -# RAG -model = ChatOpenAI() -chain = ( - RunnableParallel({"context": retriever, "question": RunnablePassthrough()}) - | prompt - | model - | StrOutputParser() -) - - -# Add typing for input -class Question(BaseModel): - __root__: str - - -chain = chain.with_types(input_type=Question) - - -def _ingest(url: str) -> dict: - loader = PyPDFLoader(url) - data = loader.load() - - # Split docs - text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=0) - docs = text_splitter.split_documents(data) - - # Insert the documents in MongoDB Atlas Vector Search - _ = MongoDBAtlasVectorSearch.from_documents( - documents=docs, - embedding=OpenAIEmbeddings(disallowed_special=()), - collection=MONGODB_COLLECTION, - index_name=ATLAS_VECTOR_SEARCH_INDEX_NAME, - ) - return {} - - -ingest = RunnableLambda(_ingest) diff --git a/templates/rag-mongo/tests/__init__.py b/templates/rag-mongo/tests/__init__.py deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/templates/rag-multi-index-fusion/.gitignore b/templates/rag-multi-index-fusion/.gitignore deleted file mode 100644 index bee8a64b79a..00000000000 --- a/templates/rag-multi-index-fusion/.gitignore +++ /dev/null @@ -1 +0,0 @@ -__pycache__ diff --git a/templates/rag-multi-index-fusion/LICENSE b/templates/rag-multi-index-fusion/LICENSE deleted file mode 100644 index 426b6509034..00000000000 --- a/templates/rag-multi-index-fusion/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -MIT License - -Copyright (c) 2023 LangChain, Inc. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/templates/rag-multi-index-fusion/README.md b/templates/rag-multi-index-fusion/README.md deleted file mode 100644 index 2f19df0a403..00000000000 --- a/templates/rag-multi-index-fusion/README.md +++ /dev/null @@ -1,73 +0,0 @@ -# RAG - multiple indexes (Fusion) - -A QA application that queries multiple domain-specific retrievers and selects the most relevant documents from across all retrieved results. - -## Environment Setup - -This application queries PubMed, ArXiv, Wikipedia, and [Kay AI](https://www.kay.ai) (for SEC filings). - -You will need to create a free Kay AI account and [get your API key here](https://www.kay.ai). -Then set environment variable: - -```bash -export KAY_API_KEY="" -``` - -## Usage - -To use this package, you should first have the LangChain CLI installed: - -```shell -pip install -U langchain-cli -``` - -To create a new LangChain project and install this as the only package, you can do: - -```shell -langchain app new my-app --package rag-multi-index-fusion -``` - -If you want to add this to an existing project, you can just run: - -```shell -langchain app add rag-multi-index-fusion -``` - -And add the following code to your `server.py` file: -```python -from rag_multi_index_fusion import chain as rag_multi_index_fusion_chain - -add_routes(app, rag_multi_index_fusion_chain, path="/rag-multi-index-fusion") -``` - -(Optional) Let's now configure LangSmith. -LangSmith will help us trace, monitor and debug LangChain applications. -You can sign up for LangSmith [here](https://smith.langchain.com/). -If you don't have access, you can skip this section - - -```shell -export LANGCHAIN_TRACING_V2=true -export LANGCHAIN_API_KEY= -export LANGCHAIN_PROJECT= # if not specified, defaults to "default" -``` - -If you are inside this directory, then you can spin up a LangServe instance directly by: - -```shell -langchain serve -``` - -This will start the FastAPI app with a server is running locally at -[http://localhost:8000](http://localhost:8000) - -We can see all templates at [http://127.0.0.1:8000/docs](http://127.0.0.1:8000/docs) -We can access the playground at [http://127.0.0.1:8000/rag-multi-index-fusion/playground](http://127.0.0.1:8000/rag-multi-index-fusion/playground) - -We can access the template from code with: - -```python -from langserve.client import RemoteRunnable - -runnable = RemoteRunnable("http://localhost:8000/rag-multi-index-fusion") -``` diff --git a/templates/rag-multi-index-fusion/pyproject.toml b/templates/rag-multi-index-fusion/pyproject.toml deleted file mode 100644 index 4c698dfc398..00000000000 --- a/templates/rag-multi-index-fusion/pyproject.toml +++ /dev/null @@ -1,35 +0,0 @@ -[tool.poetry] -name = "rag-multi-index-fusion" -version = "0.0.1" -description = "RAG with routing and fusion between different domain-specific retrievers" -authors = [] -readme = "README.md" - -[tool.poetry.dependencies] -python = ">=3.8.1,<4.0" -langchain = "^0.1" -openai = "<2" -xmltodict = "^0.13.0" -kay = "^0.1.2" -wikipedia = "^1.4.0" -arxiv = "^2.0.0" -tiktoken = "^0.5.1" - -[tool.poetry.group.dev.dependencies] -langchain-cli = ">=0.0.21" -fastapi = ">=0.104.0,<1" -sse-starlette = "^1.6.5" - -[tool.langserve] -export_module = "rag_multi_index_fusion" -export_attr = "chain" - -[tool.templates-hub] -use-case = "rag" -author = "LangChain" -integrations = ["OpenAI", "Wikipedia", "Arxiv"] -tags = ["vectordbs", "routing"] - -[build-system] -requires = ["poetry-core"] -build-backend = "poetry.core.masonry.api" diff --git a/templates/rag-multi-index-fusion/rag_multi_index_fusion/__init__.py b/templates/rag-multi-index-fusion/rag_multi_index_fusion/__init__.py deleted file mode 100644 index 18142984130..00000000000 --- a/templates/rag-multi-index-fusion/rag_multi_index_fusion/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from rag_multi_index_fusion.chain import chain - -__all__ = ["chain"] diff --git a/templates/rag-multi-index-fusion/rag_multi_index_fusion/chain.py b/templates/rag-multi-index-fusion/rag_multi_index_fusion/chain.py deleted file mode 100644 index 48a42d088ac..00000000000 --- a/templates/rag-multi-index-fusion/rag_multi_index_fusion/chain.py +++ /dev/null @@ -1,102 +0,0 @@ -from operator import itemgetter - -import numpy as np -from langchain.retrievers import ( - ArxivRetriever, - KayAiRetriever, - PubMedRetriever, - WikipediaRetriever, -) -from langchain.utils.math import cosine_similarity -from langchain_community.chat_models import ChatOpenAI -from langchain_community.embeddings import OpenAIEmbeddings -from langchain_core.output_parsers import StrOutputParser -from langchain_core.prompts import ChatPromptTemplate -from langchain_core.pydantic_v1 import BaseModel -from langchain_core.runnables import ( - RunnableParallel, - RunnablePassthrough, -) - -pubmed = PubMedRetriever(top_k_results=5).with_config(run_name="pubmed") -arxiv = ArxivRetriever(top_k_results=5).with_config(run_name="arxiv") -sec = KayAiRetriever.create( - dataset_id="company", data_types=["10-K"], num_contexts=5 -).with_config(run_name="sec_filings") -wiki = WikipediaRetriever(top_k_results=5, doc_content_chars_max=2000).with_config( - run_name="wiki" -) - -embeddings = OpenAIEmbeddings() - - -def fuse_retrieved_docs(input): - results_map = input["sources"] - query = input["question"] - embedded_query = embeddings.embed_query(query) - names, docs = zip( - *((name, doc) for name, docs in results_map.items() for doc in docs) - ) - embedded_docs = embeddings.embed_documents([doc.page_content for doc in docs]) - similarity = cosine_similarity( - [embedded_query], - embedded_docs, - ) - most_similar = np.flip(np.argsort(similarity[0]))[:5] - return [ - ( - names[i], - docs[i], - ) - for i in most_similar - ] - - -def format_named_docs(named_docs): - return "\n\n".join( - f"Source: {source}\n\n{doc.page_content}" for source, doc in named_docs - ) - - -system = """Answer the user question. Use the following sources to help \ -answer the question. If you don't know the answer say "I'm not sure, I couldn't \ -find information on {{topic}}." - -Sources: - -{sources}""" -prompt = ChatPromptTemplate.from_messages([("system", system), ("human", "{question}")]) - -retrieve_all = RunnableParallel( - {"ArXiv": arxiv, "Wikipedia": wiki, "PubMed": pubmed, "SEC 10-K Forms": sec} -).with_config(run_name="retrieve_all") - - -class Question(BaseModel): - __root__: str - - -answer_chain = ( - { - "question": itemgetter("question"), - "sources": lambda x: format_named_docs(x["sources"]), - } - | prompt - | ChatOpenAI(model="gpt-3.5-turbo-1106") - | StrOutputParser() -).with_config(run_name="answer") -chain = ( - ( - RunnableParallel( - {"question": RunnablePassthrough(), "sources": retrieve_all} - ).with_config(run_name="add_sources") - | RunnablePassthrough.assign(sources=fuse_retrieved_docs).with_config( - run_name="fuse" - ) - | RunnablePassthrough.assign(answer=answer_chain).with_config( - run_name="add_answer" - ) - ) - .with_config(run_name="QA with fused results") - .with_types(input_type=Question) -) diff --git a/templates/rag-multi-index-fusion/tests/__init__.py b/templates/rag-multi-index-fusion/tests/__init__.py deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/templates/rag-multi-index-router/.gitignore b/templates/rag-multi-index-router/.gitignore deleted file mode 100644 index bee8a64b79a..00000000000 --- a/templates/rag-multi-index-router/.gitignore +++ /dev/null @@ -1 +0,0 @@ -__pycache__ diff --git a/templates/rag-multi-index-router/LICENSE b/templates/rag-multi-index-router/LICENSE deleted file mode 100644 index 426b6509034..00000000000 --- a/templates/rag-multi-index-router/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -MIT License - -Copyright (c) 2023 LangChain, Inc. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/templates/rag-multi-index-router/README.md b/templates/rag-multi-index-router/README.md deleted file mode 100644 index 524417a0927..00000000000 --- a/templates/rag-multi-index-router/README.md +++ /dev/null @@ -1,73 +0,0 @@ -# RAG - multiple indexes (Routing) - -A QA application that routes between different domain-specific retrievers given a user question. - -## Environment Setup - -This application queries PubMed, ArXiv, Wikipedia, and [Kay AI](https://www.kay.ai) (for SEC filings). - -You will need to create a free Kay AI account and [get your API key here](https://www.kay.ai). -Then set environment variable: - -```bash -export KAY_API_KEY="" -``` - -## Usage - -To use this package, you should first have the LangChain CLI installed: - -```shell -pip install -U langchain-cli -``` - -To create a new LangChain project and install this as the only package, you can do: - -```shell -langchain app new my-app --package rag-multi-index-router -``` - -If you want to add this to an existing project, you can just run: - -```shell -langchain app add rag-multi-index-router -``` - -And add the following code to your `server.py` file: -```python -from rag_multi_index_router import chain as rag_multi_index_router_chain - -add_routes(app, rag_multi_index_router_chain, path="/rag-multi-index-router") -``` - -(Optional) Let's now configure LangSmith. -LangSmith will help us trace, monitor and debug LangChain applications. -You can sign up for LangSmith [here](https://smith.langchain.com/). -If you don't have access, you can skip this section - - -```shell -export LANGCHAIN_TRACING_V2=true -export LANGCHAIN_API_KEY= -export LANGCHAIN_PROJECT= # if not specified, defaults to "default" -``` - -If you are inside this directory, then you can spin up a LangServe instance directly by: - -```shell -langchain serve -``` - -This will start the FastAPI app with a server is running locally at -[http://localhost:8000](http://localhost:8000) - -We can see all templates at [http://127.0.0.1:8000/docs](http://127.0.0.1:8000/docs) -We can access the playground at [http://127.0.0.1:8000/rag-multi-index-router/playground](http://127.0.0.1:8000/rag-multi-index-router/playground) - -We can access the template from code with: - -```python -from langserve.client import RemoteRunnable - -runnable = RemoteRunnable("http://localhost:8000/rag-multi-index-router") -``` \ No newline at end of file diff --git a/templates/rag-multi-index-router/pyproject.toml b/templates/rag-multi-index-router/pyproject.toml deleted file mode 100644 index 370da008fa4..00000000000 --- a/templates/rag-multi-index-router/pyproject.toml +++ /dev/null @@ -1,35 +0,0 @@ -[tool.poetry] -name = "rag-multi-index-router" -version = "0.0.1" -description = "RAG with routing between different domain-specific retrievers" -authors = [] -readme = "README.md" - -[tool.poetry.dependencies] -python = ">=3.8.1,<4.0" -langchain = "^0.1" -openai = "<2" -xmltodict = "^0.13.0" -kay = "^0.1.2" -wikipedia = "^1.4.0" -arxiv = "^2.0.0" -tiktoken = "^0.5.1" - -[tool.poetry.group.dev.dependencies] -langchain-cli = ">=0.0.21" -fastapi = ">=0.104.0,<1" -sse-starlette = "^1.6.5" - -[tool.langserve] -export_module = "rag_multi_index_router" -export_attr = "chain" - -[tool.templates-hub] -use-case = "rag" -author = "LangChain" -integrations = ["OpenAI", "Wikipedia", "Arxiv"] -tags = ["vectordbs", "routing"] - -[build-system] -requires = ["poetry-core"] -build-backend = "poetry.core.masonry.api" diff --git a/templates/rag-multi-index-router/rag_multi_index_router/__init__.py b/templates/rag-multi-index-router/rag_multi_index_router/__init__.py deleted file mode 100644 index 6feb801d49e..00000000000 --- a/templates/rag-multi-index-router/rag_multi_index_router/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from rag_multi_index_router.chain import chain - -__all__ = ["chain"] diff --git a/templates/rag-multi-index-router/rag_multi_index_router/chain.py b/templates/rag-multi-index-router/rag_multi_index_router/chain.py deleted file mode 100644 index 93d33c2ade6..00000000000 --- a/templates/rag-multi-index-router/rag_multi_index_router/chain.py +++ /dev/null @@ -1,115 +0,0 @@ -from operator import itemgetter -from typing import Literal - -from langchain.retrievers import ( - ArxivRetriever, - KayAiRetriever, - PubMedRetriever, - WikipediaRetriever, -) -from langchain.utils.openai_functions import convert_pydantic_to_openai_function -from langchain_community.chat_models import ChatOpenAI -from langchain_core.output_parsers import StrOutputParser -from langchain_core.output_parsers.openai_functions import ( - PydanticAttrOutputFunctionsParser, -) -from langchain_core.prompts import ChatPromptTemplate -from langchain_core.pydantic_v1 import BaseModel, Field -from langchain_core.runnables import ( - RouterRunnable, - RunnableParallel, - RunnablePassthrough, -) - -pubmed = PubMedRetriever(top_k_results=5).with_config(run_name="pubmed") -arxiv = ArxivRetriever(top_k_results=5).with_config(run_name="arxiv") -sec = KayAiRetriever.create( - dataset_id="company", data_types=["10-K"], num_contexts=5 -).with_config(run_name="sec_filings") -wiki = WikipediaRetriever(top_k_results=5, doc_content_chars_max=2000).with_config( - run_name="wiki" -) - -llm = ChatOpenAI(model="gpt-3.5-turbo") - - -class Search(BaseModel): - """Search for relevant documents by question topic.""" - - question_resource: Literal[ - "medical paper", "scientific paper", "public company finances report", "general" - ] = Field( - ..., - description=( - "The type of resource that would best help answer the user's question. " - "If none of the types are relevant return 'general'." - ), - ) - - -retriever_name = { - "medical paper": "PubMed", - "scientific paper": "ArXiv", - "public company finances report": "SEC filings (Kay AI)", - "general": "Wikipedia", -} - -classifier = ( - llm.bind( - functions=[convert_pydantic_to_openai_function(Search)], - function_call={"name": "Search"}, - ) - | PydanticAttrOutputFunctionsParser( - pydantic_schema=Search, attr_name="question_resource" - ) - | retriever_name.get -) - -retriever_map = { - "PubMed": pubmed, - "ArXiv": arxiv, - "SEC filings (Kay AI)": sec, - "Wikipedia": wiki, -} -router_retriever = RouterRunnable(runnables=retriever_map) - - -def format_docs(docs): - return "\n\n".join(f"Source {i}:\n{doc.page_content}" for i, doc in enumerate(docs)) - - -system = """Answer the user question. Use the following sources to help \ -answer the question. If you don't know the answer say "I'm not sure, I couldn't \ -find information on {{topic}}." - -Sources: - -{sources}""" -prompt = ChatPromptTemplate.from_messages([("system", system), ("human", "{question}")]) - - -class Question(BaseModel): - __root__: str - - -retriever_chain = ( - {"input": itemgetter("question"), "key": itemgetter("retriever_choice")} - | router_retriever - | format_docs -).with_config(run_name="retrieve") -answer_chain = ( - {"sources": retriever_chain, "question": itemgetter("question")} - | prompt - | llm - | StrOutputParser() -) -chain = ( - ( - RunnableParallel( - question=RunnablePassthrough(), retriever_choice=classifier - ).with_config(run_name="classify") - | RunnablePassthrough.assign(answer=answer_chain).with_config(run_name="answer") - ) - .with_config(run_name="QA with router") - .with_types(input_type=Question) -) diff --git a/templates/rag-multi-index-router/tests/__init__.py b/templates/rag-multi-index-router/tests/__init__.py deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/templates/rag-multi-modal-local/.gitignore b/templates/rag-multi-modal-local/.gitignore deleted file mode 100644 index f95995b3698..00000000000 --- a/templates/rag-multi-modal-local/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -docs/img_*.jpg -chroma_db_multi_modal \ No newline at end of file diff --git a/templates/rag-multi-modal-local/LICENSE b/templates/rag-multi-modal-local/LICENSE deleted file mode 100644 index 426b6509034..00000000000 --- a/templates/rag-multi-modal-local/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -MIT License - -Copyright (c) 2023 LangChain, Inc. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/templates/rag-multi-modal-local/README.md b/templates/rag-multi-modal-local/README.md deleted file mode 100644 index ed3fccda702..00000000000 --- a/templates/rag-multi-modal-local/README.md +++ /dev/null @@ -1,126 +0,0 @@ -# RAG - Ollama, Nomic, Chroma - multi-modal, local - -Visual search is a familiar application to many with iPhones or Android devices. It allows user to search photos using natural language. - -With the release of open source, multi-modal LLMs it's possible to build this kind of application for yourself for your own private photo collection. - -This template demonstrates how to perform private visual search and question-answering over a collection of your photos. - -It uses [`nomic-embed-vision-v1`](https://huggingface.co/nomic-ai/nomic-embed-vision-v1) multi-modal embeddings to embed the images and `Ollama` for question-answering. - -Given a question, relevant photos are retrieved and passed to an open source multi-modal LLM of your choice for answer synthesis. - -![Diagram illustrating the visual search process with nomic-embed-vision-v1 embeddings and multi-modal LLM for question-answering, featuring example food pictures and a matcha soft serve answer trace.](https://github.com/langchain-ai/langchain/assets/122662504/da543b21-052c-4c43-939e-d4f882a45d75) "Visual Search Process Diagram" - -## Input - -Supply a set of photos in the `/docs` directory. - -By default, this template has a toy collection of 3 food pictures. - -Example questions to ask can be: -``` -What kind of soft serve did I have? -``` - -In practice, a larger corpus of images can be tested. - -To create an index of the images, run: -``` -poetry install -python ingest.py -``` - -## Storage - -This template will use [nomic-embed-vision-v1](https://huggingface.co/nomic-ai/nomic-embed-vision-v1) multi-modal embeddings to embed the images. - -The first time you run the app, it will automatically download the multimodal embedding model. - - -You can choose alternative models in `rag_chroma_multi_modal/ingest.py`, such as `OpenCLIPEmbeddings`. -``` -langchain_experimental.open_clip import OpenCLIPEmbeddings - -embedding_function=OpenCLIPEmbeddings( - model_name="ViT-H-14", checkpoint="laion2b_s32b_b79k" - ) - -vectorstore_mmembd = Chroma( - collection_name="multi-modal-rag", - persist_directory=str(re_vectorstore_path), - embedding_function=embedding_function -) -``` - -## LLM - -This template will use [Ollama](https://python.langchain.com/docs/integrations/chat/ollama#multi-modal). - -Download the latest version of Ollama: https://ollama.ai/ - -Pull the an open source multi-modal LLM: e.g., https://ollama.ai/library/bakllava - -``` -ollama pull bakllava -``` - -The app is by default configured for `bakllava`. But you can change this in `chain.py` and `ingest.py` for different downloaded models. - -## Usage - -To use this package, you should first have the LangChain CLI installed: - -```shell -pip install -U langchain-cli -``` - -To create a new LangChain project and install this as the only package, you can do: - -```shell -langchain app new my-app --package rag-chroma-multi-modal -``` - -If you want to add this to an existing project, you can just run: - -```shell -langchain app add rag-chroma-multi-modal -``` - -And add the following code to your `server.py` file: -```python -from rag_chroma_multi_modal import chain as rag_chroma_multi_modal_chain - -add_routes(app, rag_chroma_multi_modal_chain, path="/rag-chroma-multi-modal") -``` - -(Optional) Let's now configure LangSmith. -LangSmith will help us trace, monitor and debug LangChain applications. -You can sign up for LangSmith [here](https://smith.langchain.com/). -If you don't have access, you can skip this section - -```shell -export LANGCHAIN_TRACING_V2=true -export LANGCHAIN_API_KEY= -export LANGCHAIN_PROJECT= # if not specified, defaults to "default" -``` - -If you are inside this directory, then you can spin up a LangServe instance directly by: - -```shell -langchain serve -``` - -This will start the FastAPI app with a server is running locally at -[http://localhost:8000](http://localhost:8000) - -We can see all templates at [http://127.0.0.1:8000/docs](http://127.0.0.1:8000/docs) -We can access the playground at [http://127.0.0.1:8000/rag-chroma-multi-modal/playground](http://127.0.0.1:8000/rag-chroma-multi-modal/playground) - -We can access the template from code with: - -```python -from langserve.client import RemoteRunnable - -runnable = RemoteRunnable("http://localhost:8000/rag-chroma-multi-modal") -``` diff --git a/templates/rag-multi-modal-local/docs/bread_bowl.jpg b/templates/rag-multi-modal-local/docs/bread_bowl.jpg deleted file mode 100644 index f5056727981..00000000000 Binary files a/templates/rag-multi-modal-local/docs/bread_bowl.jpg and /dev/null differ diff --git a/templates/rag-multi-modal-local/docs/in_and_out.jpg b/templates/rag-multi-modal-local/docs/in_and_out.jpg deleted file mode 100644 index fbf4d85cf62..00000000000 Binary files a/templates/rag-multi-modal-local/docs/in_and_out.jpg and /dev/null differ diff --git a/templates/rag-multi-modal-local/docs/matcha.jpg b/templates/rag-multi-modal-local/docs/matcha.jpg deleted file mode 100644 index 16dd935f624..00000000000 Binary files a/templates/rag-multi-modal-local/docs/matcha.jpg and /dev/null differ diff --git a/templates/rag-multi-modal-local/ingest.py b/templates/rag-multi-modal-local/ingest.py deleted file mode 100644 index c1e6ba88589..00000000000 --- a/templates/rag-multi-modal-local/ingest.py +++ /dev/null @@ -1,37 +0,0 @@ -import os -from pathlib import Path - -from langchain_chroma import Chroma -from langchain_nomic import NomicMultimodalEmbeddings - -# Load images -img_dump_path = Path(__file__).parent / "docs/" -rel_img_dump_path = img_dump_path.relative_to(Path.cwd()) -image_uris = sorted( - [ - os.path.join(rel_img_dump_path, image_name) - for image_name in os.listdir(rel_img_dump_path) - if image_name.endswith(".jpg") - ] -) - -# Index -vectorstore = Path(__file__).parent / "chroma_db_multi_modal" -re_vectorstore_path = vectorstore.relative_to(Path.cwd()) - -# Load embedding function -print("Loading embedding function") -embedding = NomicMultimodalEmbeddings( - vision_model="nomic-embed-vision-v1", text_model="nomic-embed-text-v1" -) - -# Create chroma -vectorstore_mmembd = Chroma( - collection_name="multi-modal-rag", - persist_directory=str(Path(__file__).parent / "chroma_db_multi_modal"), - embedding_function=embedding, -) - -# Add images -print("Embedding images") -vectorstore_mmembd.add_images(uris=image_uris) diff --git a/templates/rag-multi-modal-local/pyproject.toml b/templates/rag-multi-modal-local/pyproject.toml deleted file mode 100644 index a9f214d210a..00000000000 --- a/templates/rag-multi-modal-local/pyproject.toml +++ /dev/null @@ -1,38 +0,0 @@ -[tool.poetry] -name = "rag-multi-modal-local" -version = "0.1.0" -description = "Multi-modal RAG using Chroma" -authors = [ - "Lance Martin ", -] -readme = "README.md" - -[tool.poetry.dependencies] -python = ">=3.8.1,<4.0" -langchain = ">=0.0.353,<0.2" -openai = "<2" -tiktoken = ">=0.5.1" -langchain-chroma = "^0.1.2" -open-clip-torch = ">=2.23.0" -torch = ">=2.1.0" -langchain-experimental = ">=0.0.43" -langchain-community = ">=0.0.4" - -[tool.poetry.group.dev.dependencies] -langchain-cli = ">=0.0.21" - -[tool.langserve] -export_module = "rag_multi_modal_local" -export_attr = "chain" - -[tool.templates-hub] -use-case = "rag" -author = "LangChain" -integrations = ["Ollama", "Chroma"] -tags = ["multi-modal"] - -[build-system] -requires = [ - "poetry-core", -] -build-backend = "poetry.core.masonry.api" diff --git a/templates/rag-multi-modal-local/rag_multi_modal_local.ipynb b/templates/rag-multi-modal-local/rag_multi_modal_local.ipynb deleted file mode 100644 index 2b1245441a8..00000000000 --- a/templates/rag-multi-modal-local/rag_multi_modal_local.ipynb +++ /dev/null @@ -1,52 +0,0 @@ -{ - "cells": [ - { - "attachments": {}, - "cell_type": "markdown", - "id": "681a5d1e", - "metadata": {}, - "source": [ - "## Run Template\n", - "\n", - "In `server.py`, set -\n", - "```\n", - "add_routes(app, chain_rag_conv, path=\"/rag-multi-modal-local\")\n", - "```" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "d774be2a", - "metadata": {}, - "outputs": [], - "source": [ - "from langserve.client import RemoteRunnable\n", - "\n", - "rag_app = RemoteRunnable(\"http://localhost:8001/rag-multi-modal-local\")\n", - "rag_app.invoke(\" < keywords here > \")" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.9.16" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/templates/rag-multi-modal-local/rag_multi_modal_local/__init__.py b/templates/rag-multi-modal-local/rag_multi_modal_local/__init__.py deleted file mode 100644 index 0cb30ad6b1c..00000000000 --- a/templates/rag-multi-modal-local/rag_multi_modal_local/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from rag_multi_modal_local.chain import chain - -__all__ = ["chain"] diff --git a/templates/rag-multi-modal-local/rag_multi_modal_local/chain.py b/templates/rag-multi-modal-local/rag_multi_modal_local/chain.py deleted file mode 100644 index 215a85cdd7f..00000000000 --- a/templates/rag-multi-modal-local/rag_multi_modal_local/chain.py +++ /dev/null @@ -1,122 +0,0 @@ -import base64 -import io -from pathlib import Path - -from langchain_chroma import Chroma -from langchain_community.chat_models import ChatOllama -from langchain_core.documents import Document -from langchain_core.messages import HumanMessage -from langchain_core.output_parsers import StrOutputParser -from langchain_core.pydantic_v1 import BaseModel -from langchain_core.runnables import RunnableLambda, RunnablePassthrough -from langchain_nomic import NomicMultimodalEmbeddings -from PIL import Image - - -def resize_base64_image(base64_string, size=(128, 128)): - """ - Resize an image encoded as a Base64 string. - - :param base64_string: A Base64 encoded string of the image to be resized. - :param size: A tuple representing the new size (width, height) for the image. - :return: A Base64 encoded string of the resized image. - """ - img_data = base64.b64decode(base64_string) - img = Image.open(io.BytesIO(img_data)) - resized_img = img.resize(size, Image.LANCZOS) - buffered = io.BytesIO() - resized_img.save(buffered, format=img.format) - return base64.b64encode(buffered.getvalue()).decode("utf-8") - - -def get_resized_images(docs): - """ - Resize images from base64-encoded strings. - - :param docs: A list of base64-encoded image to be resized. - :return: Dict containing a list of resized base64-encoded strings. - """ - b64_images = [] - for doc in docs: - if isinstance(doc, Document): - doc = doc.page_content - # Optional: re-size image - # resized_image = resize_base64_image(doc, size=(1280, 720)) - b64_images.append(doc) - return {"images": b64_images} - - -def img_prompt_func(data_dict, num_images=1): - """ - GPT-4V prompt for image analysis. - - :param data_dict: A dict with images and a user-provided question. - :param num_images: Number of images to include in the prompt. - :return: A list containing message objects for each image and the text prompt. - """ - messages = [] - if data_dict["context"]["images"]: - for image in data_dict["context"]["images"][:num_images]: - image_message = { - "type": "image_url", - "image_url": f"data:image/jpeg;base64,{image}", - } - messages.append(image_message) - text_message = { - "type": "text", - "text": ( - "You are a helpful assistant that gives a description of food pictures.\n" - "Give a detailed summary of the image.\n" - "Give reccomendations for similar foods to try.\n" - ), - } - messages.append(text_message) - return [HumanMessage(content=messages)] - - -def multi_modal_rag_chain(retriever): - """ - Multi-modal RAG chain, - - :param retriever: A function that retrieves the necessary context for the model. - :return: A chain of functions representing the multi-modal RAG process. - """ - # Initialize the multi-modal Large Language Model with specific parameters - model = ChatOllama(model="bakllava", temperature=0) - - # Define the RAG pipeline - chain = ( - { - "context": retriever | RunnableLambda(get_resized_images), - "question": RunnablePassthrough(), - } - | RunnableLambda(img_prompt_func) - | model - | StrOutputParser() - ) - - return chain - - -# Load chroma -vectorstore_mmembd = Chroma( - collection_name="multi-modal-rag", - persist_directory=str(Path(__file__).parent.parent / "chroma_db_multi_modal"), - embedding_function=NomicMultimodalEmbeddings( - vision_model="nomic-embed-vision-v1", text_model="nomic-embed-text-v1" - ), -) - -# Make retriever -retriever_mmembd = vectorstore_mmembd.as_retriever() - -# Create RAG chain -chain = multi_modal_rag_chain(retriever_mmembd) - - -# Add typing for input -class Question(BaseModel): - __root__: str - - -chain = chain.with_types(input_type=Question) diff --git a/templates/rag-multi-modal-local/tests/__init__.py b/templates/rag-multi-modal-local/tests/__init__.py deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/templates/rag-multi-modal-mv-local/.gitignore b/templates/rag-multi-modal-mv-local/.gitignore deleted file mode 100644 index 0854886a476..00000000000 --- a/templates/rag-multi-modal-mv-local/.gitignore +++ /dev/null @@ -1,3 +0,0 @@ -docs/img_*.jpg -chroma_db_multi_modal -multi_vector_retriever_metadata \ No newline at end of file diff --git a/templates/rag-multi-modal-mv-local/LICENSE b/templates/rag-multi-modal-mv-local/LICENSE deleted file mode 100644 index 426b6509034..00000000000 --- a/templates/rag-multi-modal-mv-local/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -MIT License - -Copyright (c) 2023 LangChain, Inc. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/templates/rag-multi-modal-mv-local/README.md b/templates/rag-multi-modal-mv-local/README.md deleted file mode 100644 index 0f8bd32b138..00000000000 --- a/templates/rag-multi-modal-mv-local/README.md +++ /dev/null @@ -1,121 +0,0 @@ -# RAG - Ollama, Chroma - multi-modal, multi-vector, local - -Visual search is a familiar application to many with iPhones or Android devices. It allows user to search photos using natural language. - -With the release of open source, multi-modal LLMs it's possible to build this kind of application for yourself for your own private photo collection. - -This template demonstrates how to perform private visual search and question-answering over a collection of your photos. - -It uses an open source multi-modal LLM of your choice to create image summaries for each photos, embeds the summaries, and stores them in Chroma. - -Given a question, relevant photos are retrieved and passed to the multi-modal LLM for answer synthesis. - -![Diagram illustrating the visual search process with food pictures, captioning, a database, a question input, and the synthesis of an answer using a multi-modal LLM.](https://github.com/langchain-ai/langchain/assets/122662504/cd9b3d82-9b06-4a39-8490-7482466baf43) "Visual Search Process Diagram" - -## Input - -Supply a set of photos in the `/docs` directory. - -By default, this template has a toy collection of 3 food pictures. - -The app will look up and summarize photos based upon provided keywords or questions: -``` -What kind of ice cream did I have? -``` - -In practice, a larger corpus of images can be tested. - -To create an index of the images, run: -``` -poetry install -python ingest.py -``` - -## Storage - -Here is the process the template will use to create an index of the slides (see [blog](https://blog.langchain.dev/multi-modal-rag-template/)): - -* Given a set of images -* It uses a local multi-modal LLM ([bakllava](https://ollama.ai/library/bakllava)) to summarize each image -* Embeds the image summaries with a link to the original images -* Given a user question, it will relevant image(s) based on similarity between the image summary and user input (using Ollama embeddings) -* It will pass those images to bakllava for answer synthesis - -By default, this will use [LocalFileStore](https://python.langchain.com/docs/integrations/stores/file_system) to store images and Chroma to store summaries. - -## LLM and Embedding Models - -We will use [Ollama](https://python.langchain.com/docs/integrations/chat/ollama#multi-modal) for generating image summaries, embeddings, and the final image QA. - -Download the latest version of Ollama: https://ollama.ai/ - -Pull an open source multi-modal LLM: e.g., https://ollama.ai/library/bakllava - -Pull an open source embedding model: e.g., https://ollama.ai/library/llama2:7b - -``` -ollama pull bakllava -ollama pull llama2:7b -``` - -The app is by default configured for `bakllava`. But you can change this in `chain.py` and `ingest.py` for different downloaded models. - -The app will retrieve images based on similarity between the text input and the image summary, and pass the images to `bakllava`. - -## Usage - -To use this package, you should first have the LangChain CLI installed: - -```shell -pip install -U langchain-cli -``` - -To create a new LangChain project and install this as the only package, you can do: - -```shell -langchain app new my-app --package rag-multi-modal-mv-local -``` - -If you want to add this to an existing project, you can just run: - -```shell -langchain app add rag-multi-modal-mv-local -``` - -And add the following code to your `server.py` file: -```python -from rag_multi_modal_mv_local import chain as rag_multi_modal_mv_local_chain - -add_routes(app, rag_multi_modal_mv_local_chain, path="/rag-multi-modal-mv-local") -``` - -(Optional) Let's now configure LangSmith. -LangSmith will help us trace, monitor and debug LangChain applications. -You can sign up for LangSmith [here](https://smith.langchain.com/). -If you don't have access, you can skip this section - -```shell -export LANGCHAIN_TRACING_V2=true -export LANGCHAIN_API_KEY= -export LANGCHAIN_PROJECT= # if not specified, defaults to "default" -``` - -If you are inside this directory, then you can spin up a LangServe instance directly by: - -```shell -langchain serve -``` - -This will start the FastAPI app with a server is running locally at -[http://localhost:8000](http://localhost:8000) - -We can see all templates at [http://127.0.0.1:8000/docs](http://127.0.0.1:8000/docs) -We can access the playground at [http://127.0.0.1:8000/rag-multi-modal-mv-local/playground](http://127.0.0.1:8000/rag-multi-modal-mv-local/playground) - -We can access the template from code with: - -```python -from langserve.client import RemoteRunnable - -runnable = RemoteRunnable("http://localhost:8000/rag-multi-modal-mv-local") -``` diff --git a/templates/rag-multi-modal-mv-local/docs/bread_bowl.jpg b/templates/rag-multi-modal-mv-local/docs/bread_bowl.jpg deleted file mode 100644 index f5056727981..00000000000 Binary files a/templates/rag-multi-modal-mv-local/docs/bread_bowl.jpg and /dev/null differ diff --git a/templates/rag-multi-modal-mv-local/docs/in_and_out.jpg b/templates/rag-multi-modal-mv-local/docs/in_and_out.jpg deleted file mode 100644 index fbf4d85cf62..00000000000 Binary files a/templates/rag-multi-modal-mv-local/docs/in_and_out.jpg and /dev/null differ diff --git a/templates/rag-multi-modal-mv-local/docs/matcha.jpg b/templates/rag-multi-modal-mv-local/docs/matcha.jpg deleted file mode 100644 index 16dd935f624..00000000000 Binary files a/templates/rag-multi-modal-mv-local/docs/matcha.jpg and /dev/null differ diff --git a/templates/rag-multi-modal-mv-local/ingest.py b/templates/rag-multi-modal-mv-local/ingest.py deleted file mode 100644 index a8e6fe87880..00000000000 --- a/templates/rag-multi-modal-mv-local/ingest.py +++ /dev/null @@ -1,192 +0,0 @@ -import base64 -import io -import os -import uuid -from io import BytesIO -from pathlib import Path - -from langchain.retrievers.multi_vector import MultiVectorRetriever -from langchain.storage import LocalFileStore -from langchain_chroma import Chroma -from langchain_community.chat_models import ChatOllama -from langchain_community.embeddings import OllamaEmbeddings -from langchain_core.documents import Document -from langchain_core.messages import HumanMessage -from PIL import Image - - -def image_summarize(img_base64, prompt): - """ - Make image summary - - :param img_base64: Base64 encoded string for image - :param prompt: Text prompt for summarizatiomn - :return: Image summarization prompt - - """ - chat = ChatOllama(model="bakllava", temperature=0) - - msg = chat.invoke( - [ - HumanMessage( - content=[ - {"type": "text", "text": prompt}, - { - "type": "image_url", - "image_url": f"data:image/jpeg;base64,{img_base64}", - }, - ] - ) - ] - ) - return msg.content - - -def generate_img_summaries(img_base64_list): - """ - Generate summaries for images - - :param img_base64_list: Base64 encoded images - :return: List of image summaries and processed images - """ - - # Store image summaries - image_summaries = [] - processed_images = [] - - # Prompt - prompt = """Give a detailed summary of the image.""" - - # Apply summarization to images - for i, base64_image in enumerate(img_base64_list): - try: - image_summaries.append(image_summarize(base64_image, prompt)) - processed_images.append(base64_image) - except Exception as e: - print(f"Error with image {i+1}: {e}") - - return image_summaries, processed_images - - -def get_images(img_path): - """ - Extract images. - - :param img_path: A string representing the path to the images. - """ - # Get image URIs - pil_images = [ - Image.open(os.path.join(img_path, image_name)) - for image_name in os.listdir(img_path) - if image_name.endswith(".jpg") - ] - return pil_images - - -def resize_base64_image(base64_string, size=(128, 128)): - """ - Resize an image encoded as a Base64 string - - :param base64_string: Base64 string - :param size: Image size - :return: Re-sized Base64 string - """ - # Decode the Base64 string - img_data = base64.b64decode(base64_string) - img = Image.open(io.BytesIO(img_data)) - - # Resize the image - resized_img = img.resize(size, Image.LANCZOS) - - # Save the resized image to a bytes buffer - buffered = io.BytesIO() - resized_img.save(buffered, format=img.format) - - # Encode the resized image to Base64 - return base64.b64encode(buffered.getvalue()).decode("utf-8") - - -def convert_to_base64(pil_image): - """ - Convert PIL images to Base64 encoded strings - - :param pil_image: PIL image - :return: Re-sized Base64 string - """ - - buffered = BytesIO() - pil_image.save(buffered, format="JPEG") # You can change the format if needed - img_str = base64.b64encode(buffered.getvalue()).decode("utf-8") - # img_str = resize_base64_image(img_str, size=(831,623)) - return img_str - - -def create_multi_vector_retriever(vectorstore, image_summaries, images): - """ - Create retriever that indexes summaries, but returns raw images or texts - - :param vectorstore: Vectorstore to store embedded image sumamries - :param image_summaries: Image summaries - :param images: Base64 encoded images - :return: Retriever - """ - - # Initialize the storage layer for images - store = LocalFileStore( - str(Path(__file__).parent / "multi_vector_retriever_metadata") - ) - id_key = "doc_id" - - # Create the multi-vector retriever - retriever = MultiVectorRetriever( - vectorstore=vectorstore, - byte_store=store, - id_key=id_key, - ) - - # Helper function to add documents to the vectorstore and docstore - def add_documents(retriever, doc_summaries, doc_contents): - doc_ids = [str(uuid.uuid4()) for _ in doc_contents] - summary_docs = [ - Document(page_content=s, metadata={id_key: doc_ids[i]}) - for i, s in enumerate(doc_summaries) - ] - retriever.vectorstore.add_documents(summary_docs) - retriever.docstore.mset(list(zip(doc_ids, doc_contents))) - - add_documents(retriever, image_summaries, images) - - return retriever - - -# Load images -doc_path = Path(__file__).parent / "docs/" -rel_doc_path = doc_path.relative_to(Path.cwd()) -print("Read images") -pil_images = get_images(rel_doc_path) - -# Convert to b64 -images_base_64 = [convert_to_base64(i) for i in pil_images] - -# Image summaries -print("Generate image summaries") -image_summaries, images_base_64_processed = generate_img_summaries(images_base_64) - -# The vectorstore to use to index the images summaries -vectorstore_mvr = Chroma( - collection_name="image_summaries", - persist_directory=str(Path(__file__).parent / "chroma_db_multi_modal"), - embedding_function=OllamaEmbeddings(model="llama2:7b"), -) - -# Create documents -images_base_64_processed_documents = [ - Document(page_content=i) for i in images_base_64_processed -] - -# Create retriever -retriever_multi_vector_img = create_multi_vector_retriever( - vectorstore_mvr, - image_summaries, - images_base_64_processed_documents, -) diff --git a/templates/rag-multi-modal-mv-local/pyproject.toml b/templates/rag-multi-modal-mv-local/pyproject.toml deleted file mode 100644 index 737316d4227..00000000000 --- a/templates/rag-multi-modal-mv-local/pyproject.toml +++ /dev/null @@ -1,38 +0,0 @@ -[tool.poetry] -name = "rag-multi-modal-mv-local" -version = "0.1.0" -description = "Multi-modal RAG using Chroma and multi-vector retriever" -authors = [ - "Lance Martin ", -] -readme = "README.md" - -[tool.poetry.dependencies] -python = ">=3.8.1,<4.0" -langchain = ">=0.0.353,<0.2" -openai = "<2" -tiktoken = ">=0.5.1" -langchain-chroma = "^0.1.2" -pypdfium2 = ">=4.20.0" -langchain-experimental = ">=0.0.43" -pillow = ">=10.1.0" -langchain-community = ">=0.0.4" - -[tool.poetry.group.dev.dependencies] -langchain-cli = ">=0.0.21" - -[tool.langserve] -export_module = "rag_multi_modal_mv_local" -export_attr = "chain" - -[tool.templates-hub] -use-case = "rag" -author = "LangChain" -integrations = ["Ollama", "Chroma"] -tags = ["multi-modal"] - -[build-system] -requires = [ - "poetry-core", -] -build-backend = "poetry.core.masonry.api" diff --git a/templates/rag-multi-modal-mv-local/rag-multi-modal-mv-local.ipynb b/templates/rag-multi-modal-mv-local/rag-multi-modal-mv-local.ipynb deleted file mode 100644 index 37e42643fb6..00000000000 --- a/templates/rag-multi-modal-mv-local/rag-multi-modal-mv-local.ipynb +++ /dev/null @@ -1,52 +0,0 @@ -{ - "cells": [ - { - "attachments": {}, - "cell_type": "markdown", - "id": "681a5d1e", - "metadata": {}, - "source": [ - "## Run Template\n", - "\n", - "In `server.py`, set -\n", - "```\n", - "add_routes(app, chain_rag_conv, path=\"/rag-multi-modal-mv-local\")\n", - "```" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "d774be2a", - "metadata": {}, - "outputs": [], - "source": [ - "from langserve.client import RemoteRunnable\n", - "\n", - "rag_app = RemoteRunnable(\"http://localhost:8001/rag-multi-modal-mv-local\")\n", - "rag_app.invoke(\" < keywords here > \")" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.9.16" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/templates/rag-multi-modal-mv-local/rag_multi_modal_mv_local/__init__.py b/templates/rag-multi-modal-mv-local/rag_multi_modal_mv_local/__init__.py deleted file mode 100644 index 396ac4e7ac5..00000000000 --- a/templates/rag-multi-modal-mv-local/rag_multi_modal_mv_local/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from rag_multi_modal_mv_local.chain import chain - -__all__ = ["chain"] diff --git a/templates/rag-multi-modal-mv-local/rag_multi_modal_mv_local/chain.py b/templates/rag-multi-modal-mv-local/rag_multi_modal_mv_local/chain.py deleted file mode 100644 index 5e027b712bd..00000000000 --- a/templates/rag-multi-modal-mv-local/rag_multi_modal_mv_local/chain.py +++ /dev/null @@ -1,131 +0,0 @@ -import base64 -import io -from pathlib import Path - -from langchain.pydantic_v1 import BaseModel -from langchain.retrievers.multi_vector import MultiVectorRetriever -from langchain.storage import LocalFileStore -from langchain_chroma import Chroma -from langchain_community.chat_models import ChatOllama -from langchain_community.embeddings import OllamaEmbeddings -from langchain_core.documents import Document -from langchain_core.messages import HumanMessage -from langchain_core.output_parsers import StrOutputParser -from langchain_core.runnables import RunnableLambda, RunnablePassthrough -from PIL import Image - - -def resize_base64_image(base64_string, size=(128, 128)): - """ - Resize an image encoded as a Base64 string. - - :param base64_string: A Base64 encoded string of the image to be resized. - :param size: A tuple representing the new size (width, height) for the image. - :return: A Base64 encoded string of the resized image. - """ - img_data = base64.b64decode(base64_string) - img = Image.open(io.BytesIO(img_data)) - resized_img = img.resize(size, Image.LANCZOS) - buffered = io.BytesIO() - resized_img.save(buffered, format=img.format) - return base64.b64encode(buffered.getvalue()).decode("utf-8") - - -def get_resized_images(docs): - """ - Resize images from base64-encoded strings. - - :param docs: A list of base64-encoded image to be resized. - :return: Dict containing a list of resized base64-encoded strings. - """ - b64_images = [] - for doc in docs: - if isinstance(doc, Document): - doc = doc.page_content - # Optional: re-size image - # resized_image = resize_base64_image(doc, size=(1280, 720)) - b64_images.append(doc) - return {"images": b64_images} - - -def img_prompt_func(data_dict, num_images=1): - """ - Ollama prompt for image analysis. - - :param data_dict: A dict with images and a user-provided question. - :param num_images: Number of images to include in the prompt. - :return: A list containing message objects for each image and the text prompt. - """ - messages = [] - if data_dict["context"]["images"]: - for image in data_dict["context"]["images"][:num_images]: - image_message = { - "type": "image_url", - "image_url": f"data:image/jpeg;base64,{image}", - } - messages.append(image_message) - text_message = { - "type": "text", - "text": ( - "You are a helpful assistant that gives a description of food pictures.\n" - "Give a detailed summary of the image.\n" - ), - } - messages.append(text_message) - return [HumanMessage(content=messages)] - - -def multi_modal_rag_chain(retriever): - """ - Multi-modal RAG chain, - - :param retriever: A function that retrieves the necessary context for the model. - :return: A chain of functions representing the multi-modal RAG process. - """ - # Initialize the multi-modal Large Language Model with specific parameters - model = ChatOllama(model="bakllava", temperature=0) - - # Define the RAG pipeline - chain = ( - { - "context": retriever | RunnableLambda(get_resized_images), - "question": RunnablePassthrough(), - } - | RunnableLambda(img_prompt_func) - | model - | StrOutputParser() - ) - - return chain - - -# Load chroma -vectorstore_mvr = Chroma( - collection_name="image_summaries", - persist_directory=str(Path(__file__).parent.parent / "chroma_db_multi_modal"), - embedding_function=OllamaEmbeddings(model="llama2:7b"), -) - -# Load file store -store = LocalFileStore( - str(Path(__file__).parent.parent / "multi_vector_retriever_metadata") -) -id_key = "doc_id" - -# Create the multi-vector retriever -retriever = MultiVectorRetriever( - vectorstore=vectorstore_mvr, - byte_store=store, - id_key=id_key, -) - -# Create RAG chain -chain = multi_modal_rag_chain(retriever) - - -# Add typing for input -class Question(BaseModel): - __root__: str - - -chain = chain.with_types(input_type=Question) diff --git a/templates/rag-multi-modal-mv-local/tests/__init__.py b/templates/rag-multi-modal-mv-local/tests/__init__.py deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/templates/rag-ollama-multi-query/LICENSE b/templates/rag-ollama-multi-query/LICENSE deleted file mode 100644 index 426b6509034..00000000000 --- a/templates/rag-ollama-multi-query/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -MIT License - -Copyright (c) 2023 LangChain, Inc. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/templates/rag-ollama-multi-query/README.md b/templates/rag-ollama-multi-query/README.md deleted file mode 100644 index ce0a412d4bc..00000000000 --- a/templates/rag-ollama-multi-query/README.md +++ /dev/null @@ -1,82 +0,0 @@ -# RAG - Ollama - multi-query - -This template performs RAG using `Ollama` and `OpenAI` with a multi-query retriever. - -The `multi-query retriever` is an example of query transformation, generating multiple queries from different perspectives based on the user's input query. - -For each query, it retrieves a set of relevant documents and takes the unique union across all queries for answer synthesis. - -We use a private, local LLM for the narrow task of query generation to avoid excessive calls to a larger LLM API. - -See an example trace for Ollama LLM performing the query expansion [here](https://smith.langchain.com/public/8017d04d-2045-4089-b47f-f2d66393a999/r). - -But we use OpenAI for the more challenging task of answer synthesis (full trace example [here](https://smith.langchain.com/public/ec75793b-645b-498d-b855-e8d85e1f6738/r)). - -## Environment Setup - -To set up the environment, you need to download Ollama. - -Follow the instructions [here](https://python.langchain.com/docs/integrations/chat/ollama). - -You can choose the desired LLM with Ollama. - -This template uses `zephyr`, which can be accessed using `ollama pull zephyr`. - -There are many other options available [here](https://ollama.ai/library). - -Set the `OPENAI_API_KEY` environment variable to access the OpenAI models. - -## Usage - -To use this package, you should first install the LangChain CLI: - -```shell -pip install -U langchain-cli -``` - -To create a new LangChain project and install this package, do: - -```shell -langchain app new my-app --package rag-ollama-multi-query -``` - -To add this package to an existing project, run: - -```shell -langchain app add rag-ollama-multi-query -``` - -And add the following code to your `server.py` file: - -```python -from rag_ollama_multi_query import chain as rag_ollama_multi_query_chain - -add_routes(app, rag_ollama_multi_query_chain, path="/rag-ollama-multi-query") -``` - -(Optional) Now, let's configure LangSmith. LangSmith will help us trace, monitor, and debug LangChain applications. You can sign up for LangSmith [here](https://smith.langchain.com/). If you don't have access, you can skip this section - -```shell -export LANGCHAIN_TRACING_V2=true -export LANGCHAIN_API_KEY= -export LANGCHAIN_PROJECT= # if not specified, defaults to "default" -``` - -If you are inside this directory, then you can spin up a LangServe instance directly by: - -```shell -langchain serve -``` - -This will start the FastAPI app with a server running locally at [http://localhost:8000](http://localhost:8000) - -You can see all templates at [http://127.0.0.1:8000/docs](http://127.0.0.1:8000/docs) -You can access the playground at [http://127.0.0.1:8000/rag-ollama-multi-query/playground](http://127.0.0.1:8000/rag-ollama-multi-query/playground) - -To access the template from code, use: - -```python -from langserve.client import RemoteRunnable - -runnable = RemoteRunnable("http://localhost:8000/rag-ollama-multi-query") -``` \ No newline at end of file diff --git a/templates/rag-ollama-multi-query/pyproject.toml b/templates/rag-ollama-multi-query/pyproject.toml deleted file mode 100644 index f7e9a07d718..00000000000 --- a/templates/rag-ollama-multi-query/pyproject.toml +++ /dev/null @@ -1,35 +0,0 @@ -[tool.poetry] -name = "rag-ollama-multi-query" -version = "0.1.0" -description = "RAG with multi-query retriever using Ollama" -authors = [ - "Lance Martin ", -] -readme = "README.md" - -[tool.poetry.dependencies] -python = ">=3.8.1,<4.0" -langchain = "^0.1" -openai = "<2" -tiktoken = ">=0.5.1" -langchain-chroma = "^0.1.2" -langchain-text-splitters = ">=0.0.1,<0.1" - -[tool.poetry.group.dev.dependencies] -langchain-cli = ">=0.0.21" - -[tool.langserve] -export_module = "rag_ollama_multi_query" -export_attr = "chain" - -[tool.templates-hub] -use-case = "rag" -author = "LangChain" -integrations = ["OpenAI", "Ollama"] -tags = ["vectordbs"] - -[build-system] -requires = [ - "poetry-core", -] -build-backend = "poetry.core.masonry.api" diff --git a/templates/rag-ollama-multi-query/rag_ollama_multi_query.ipynb b/templates/rag-ollama-multi-query/rag_ollama_multi_query.ipynb deleted file mode 100644 index bacae6d921c..00000000000 --- a/templates/rag-ollama-multi-query/rag_ollama_multi_query.ipynb +++ /dev/null @@ -1,63 +0,0 @@ -{ - "cells": [ - { - "attachments": {}, - "cell_type": "markdown", - "id": "681a5d1e", - "metadata": {}, - "source": [ - "## Connect to template\n", - "\n", - "In `server.py`, set -\n", - "```\n", - "add_routes(app, chain_ext, path=\"/rag_ollama_multi_query\")\n", - "```" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "id": "8d61a866-f91f-41ec-a840-270b0c9c895c", - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "'The various types of agent memory mentioned in the context are:\\n\\n1. Explicit / declarative memory: This refers to memory of facts and events, including episodic memory (events and experiences) and semantic memory (facts and concepts).\\n\\n2. Implicit / procedural memory: This type of memory is unconscious and involves skills and routines that are performed automatically, like riding a bike or typing on a keyboard.\\n\\n3. Short-term memory: This is the in-context learning utilized by the model to learn.\\n\\n4. Long-term memory: This provides the agent with the capability to retain and recall information over extended periods, often by leveraging an external vector store and fast retrieval.\\n\\n5. Sensory memory: This is the earliest stage of memory that retains impressions of sensory information (visual, auditory, etc) after the original stimuli have ended. It includes subcategories like iconic memory (visual), echoic memory (auditory), and haptic memory (touch).'" - ] - }, - "execution_count": 4, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "from langserve.client import RemoteRunnable\n", - "\n", - "rag_app_ollama = RemoteRunnable(\"http://0.0.0.0:8001/rag_ollama_multi_query\")\n", - "rag_app_ollama.invoke(\"What are the different types of agent memory?\")" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.9.16" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/templates/rag-ollama-multi-query/rag_ollama_multi_query/__init__.py b/templates/rag-ollama-multi-query/rag_ollama_multi_query/__init__.py deleted file mode 100644 index a882c6d2acc..00000000000 --- a/templates/rag-ollama-multi-query/rag_ollama_multi_query/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from rag_ollama_multi_query.chain import chain - -__all__ = ["chain"] diff --git a/templates/rag-ollama-multi-query/rag_ollama_multi_query/chain.py b/templates/rag-ollama-multi-query/rag_ollama_multi_query/chain.py deleted file mode 100644 index b06596c2653..00000000000 --- a/templates/rag-ollama-multi-query/rag_ollama_multi_query/chain.py +++ /dev/null @@ -1,69 +0,0 @@ -from langchain.retrievers.multi_query import MultiQueryRetriever -from langchain_chroma import Chroma -from langchain_community.chat_models import ChatOllama, ChatOpenAI -from langchain_community.document_loaders import WebBaseLoader -from langchain_community.embeddings import OpenAIEmbeddings -from langchain_core.output_parsers import StrOutputParser -from langchain_core.prompts import ChatPromptTemplate, PromptTemplate -from langchain_core.pydantic_v1 import BaseModel -from langchain_core.runnables import RunnableParallel, RunnablePassthrough -from langchain_text_splitters import RecursiveCharacterTextSplitter - -# Load -loader = WebBaseLoader("https://lilianweng.github.io/posts/2023-06-23-agent/") -data = loader.load() - -# Split -text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=0) -all_splits = text_splitter.split_documents(data) - -# Add to vectorDB -vectorstore = Chroma.from_documents( - documents=all_splits, - collection_name="rag-private", - embedding=OpenAIEmbeddings(), -) - - -QUERY_PROMPT = PromptTemplate( - input_variables=["question"], - template="""You are an AI language model assistant. Your task is to generate five - different versions of the given user question to retrieve relevant documents from - a vector database. By generating multiple perspectives on the user question, your - goal is to help the user overcome some of the limitations of the distance-based - similarity search. Provide these alternative questions separated by newlines. - Original question: {question}""", -) - -# Add the LLM downloaded from Ollama -ollama_llm = "zephyr" -llm = ChatOllama(model=ollama_llm) - -# Run -retriever = MultiQueryRetriever.from_llm( - vectorstore.as_retriever(), llm, prompt=QUERY_PROMPT -) # "lines" is the key (attribute name) of the parsed output - -# RAG prompt -template = """Answer the question based only on the following context: -{context} -Question: {question} -""" -prompt = ChatPromptTemplate.from_template(template) - -# RAG -model = ChatOpenAI() -chain = ( - RunnableParallel({"context": retriever, "question": RunnablePassthrough()}) - | prompt - | model - | StrOutputParser() -) - - -# Add typing for input -class Question(BaseModel): - __root__: str - - -chain = chain.with_types(input_type=Question) diff --git a/templates/rag-ollama-multi-query/tests/__init__.py b/templates/rag-ollama-multi-query/tests/__init__.py deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/templates/rag-opensearch/.gitignore b/templates/rag-opensearch/.gitignore deleted file mode 100644 index bee8a64b79a..00000000000 --- a/templates/rag-opensearch/.gitignore +++ /dev/null @@ -1 +0,0 @@ -__pycache__ diff --git a/templates/rag-opensearch/LICENSE b/templates/rag-opensearch/LICENSE deleted file mode 100644 index 426b6509034..00000000000 --- a/templates/rag-opensearch/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -MIT License - -Copyright (c) 2023 LangChain, Inc. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/templates/rag-opensearch/README.md b/templates/rag-opensearch/README.md deleted file mode 100644 index d7cb7a1a1d8..00000000000 --- a/templates/rag-opensearch/README.md +++ /dev/null @@ -1,82 +0,0 @@ -# RAG - OpenSearch - -This template performs RAG using [OpenSearch](https://python.langchain.com/docs/integrations/vectorstores/opensearch). - -## Environment Setup - -Set the following environment variables. - -- `OPENAI_API_KEY` - To access OpenAI Embeddings and Models. - -And optionally set the OpenSearch ones if not using defaults: - -- `OPENSEARCH_URL` - URL of the hosted OpenSearch Instance -- `OPENSEARCH_USERNAME` - User name for the OpenSearch instance -- `OPENSEARCH_PASSWORD` - Password for the OpenSearch instance -- `OPENSEARCH_INDEX_NAME` - Name of the index - -To run the default OpenSearch instance in docker, you can use the command -```shell -docker run -p 9200:9200 -p 9600:9600 -e "discovery.type=single-node" --name opensearch-node -d opensearchproject/opensearch:latest -``` - -Note: To load dummy index named `langchain-test` with dummy documents, run `python dummy_index_setup.py` in the package - -## Usage - -To use this package, you should first have the LangChain CLI installed: - -```shell -pip install -U langchain-cli -``` - -To create a new LangChain project and install this as the only package, you can do: - -```shell -langchain app new my-app --package rag-opensearch -``` - -If you want to add this to an existing project, you can just run: - -```shell -langchain app add rag-opensearch -``` - -And add the following code to your `server.py` file: -```python -from rag_opensearch import chain as rag_opensearch_chain - -add_routes(app, rag_opensearch_chain, path="/rag-opensearch") -``` - -(Optional) Let's now configure LangSmith. -LangSmith will help us trace, monitor and debug LangChain applications. -You can sign up for LangSmith [here](https://smith.langchain.com/). -If you don't have access, you can skip this section - - -```shell -export LANGCHAIN_TRACING_V2=true -export LANGCHAIN_API_KEY= -export LANGCHAIN_PROJECT= # if not specified, defaults to "default" -``` - -If you are inside this directory, then you can spin up a LangServe instance directly by: - -```shell -langchain serve -``` - -This will start the FastAPI app with a server is running locally at -[http://localhost:8000](http://localhost:8000) - -We can see all templates at [http://127.0.0.1:8000/docs](http://127.0.0.1:8000/docs) -We can access the playground at [http://127.0.0.1:8000/rag-opensearch/playground](http://127.0.0.1:8000/rag-opensearch/playground) - -We can access the template from code with: - -```python -from langserve.client import RemoteRunnable - -runnable = RemoteRunnable("http://localhost:8000/rag-opensearch") -``` diff --git a/templates/rag-opensearch/dummy_data.txt b/templates/rag-opensearch/dummy_data.txt deleted file mode 100644 index 45539c4de01..00000000000 --- a/templates/rag-opensearch/dummy_data.txt +++ /dev/null @@ -1,19 +0,0 @@ -[INFO] Initializing machine learning training job. Model: Convolutional Neural Network Dataset: MNIST Hyperparameters: ; - Learning Rate: 0.001; - Batch Size: 64 -[INFO] Loading training data. Training data loaded successfully. Number of training samples: 60,000 -[INFO] Loading validation data. Validation data loaded successfully. Number of validation samples: 10,000 -[INFO] Training started. Epoch 1/10; - Loss: 0.532; - Accuracy: 0.812 Epoch 2/10; - Loss: 0.398; - Accuracy: 0.874 Epoch 3/10; - Loss: 0.325; - Accuracy: 0.901 ... (training progress) Training completed. -[INFO] Validation started. Validation loss: 0.287 Validation accuracy: 0.915 Model performance meets validation criteria. Saving the model. -[INFO] Testing the trained model. Test loss: 0.298 Test accuracy: 0.910 -[INFO] Deploying the trained model to production. Model deployment successful. API endpoint: http://your-api-endpoint/predict -[INFO] Monitoring system initialized. Monitoring metrics:; - CPU Usage: 25%; - Memory Usage: 40%; - GPU Usage: 80% -[ALERT] High GPU Usage Detected! Scaling resources to handle increased load. -[INFO] Machine learning training job completed successfully. Total training time: 3 hours and 45 minutes. -[INFO] Cleaning up resources. Job artifacts removed. Training environment closed. -[INFO] Image processing web server started. Listening on port 8080. -[INFO] Received image processing request from client at IP address 192.168.1.100. Preprocessing image: resizing to 800x600 pixels. Image preprocessing completed successfully. -[INFO] Applying filters to enhance image details. Filters applied: sharpening, contrast adjustment. Image enhancement completed. -[INFO] Generating thumbnail for the processed image. Thumbnail generated successfully. -[INFO] Uploading processed image to the user's gallery. Image successfully added to the gallery. Image ID: 123456. -[INFO] Sending notification to the user: Image processing complete. Notification sent successfully. -[ERROR] Failed to process image due to corrupted file format. Informing the client about the issue. Client notified about the image processing failure. -[INFO] Image processing web server shutting down. Cleaning up resources. Server shutdown complete. \ No newline at end of file diff --git a/templates/rag-opensearch/dummy_index_setup.py b/templates/rag-opensearch/dummy_index_setup.py deleted file mode 100644 index bb658adf51e..00000000000 --- a/templates/rag-opensearch/dummy_index_setup.py +++ /dev/null @@ -1,60 +0,0 @@ -import os - -from openai import OpenAI -from opensearchpy import OpenSearch - -OPENAI_API_KEY = os.getenv("OPENAI_API_KEY") -OPENSEARCH_URL = os.getenv("OPENSEARCH_URL", "https://localhost:9200") -OPENSEARCH_USERNAME = os.getenv("OPENSEARCH_USERNAME", "admin") -OPENSEARCH_PASSWORD = os.getenv("OPENSEARCH_PASSWORD", "admin") -OPENSEARCH_INDEX_NAME = os.getenv("OPENSEARCH_INDEX_NAME", "langchain-test") - -with open("dummy_data.txt") as f: - docs = [line.strip() for line in f.readlines()] - - -client_oai = OpenAI(api_key=OPENAI_API_KEY) - - -client = OpenSearch( - hosts=[OPENSEARCH_URL], - http_auth=(OPENSEARCH_USERNAME, OPENSEARCH_PASSWORD), - use_ssl=True, - verify_certs=False, -) - -# Define the index settings and mappings -index_settings = { - "settings": { - "index": {"knn": True, "number_of_shards": 1, "number_of_replicas": 0} - }, - "mappings": { - "properties": { - "vector_field": { - "type": "knn_vector", - "dimension": 1536, - "method": {"name": "hnsw", "space_type": "l2", "engine": "faiss"}, - } - } - }, -} - -response = client.indices.create(index=OPENSEARCH_INDEX_NAME, body=index_settings) - -print(response) - - -# Insert docs - - -for each in docs: - res = client_oai.embeddings.create(input=each, model="text-embedding-ada-002") - - document = { - "vector_field": res.data[0].embedding, - "text": each, - } - - response = client.index(index=OPENSEARCH_INDEX_NAME, body=document, refresh=True) - - print(response) diff --git a/templates/rag-opensearch/pyproject.toml b/templates/rag-opensearch/pyproject.toml deleted file mode 100644 index 28aec754a3a..00000000000 --- a/templates/rag-opensearch/pyproject.toml +++ /dev/null @@ -1,33 +0,0 @@ -[tool.poetry] -name = "rag-opensearch" -version = "0.0.1" -description = "RAG template for OpenSearch" -authors = ["Kalyan Reddy "] -readme = "README.md" - -[tool.poetry.dependencies] -python = ">=3.8.1,<4.0" -langchain = "^0.1" -openai = "^0.28.1" -opensearch-py = "^2.0.0" -tiktoken = "^0.5.1" - - -[tool.poetry.group.dev.dependencies] -langchain-cli = ">=0.0.21" -fastapi = ">=0.104.0,<1" -sse-starlette = "^1.6.5" - -[tool.langserve] -export_module = "rag_opensearch" -export_attr = "chain" - -[tool.templates-hub] -use-case = "rag" -author = "OpenSearch" -integrations = ["OpenAI", "OpenSearch"] -tags = ["vectordbs"] - -[build-system] -requires = ["poetry-core"] -build-backend = "poetry.core.masonry.api" diff --git a/templates/rag-opensearch/rag_opensearch.ipynb b/templates/rag-opensearch/rag_opensearch.ipynb deleted file mode 100644 index 4d913e6e279..00000000000 --- a/templates/rag-opensearch/rag_opensearch.ipynb +++ /dev/null @@ -1,35 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Connect to template\n", - "\n", - "In `server.py`, set -\n", - "```\n", - "add_routes(app, chain_ext, path=\"/rag_opensearch\")\n", - "```" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from langserve.client import RemoteRunnable\n", - "\n", - "rag_app = RemoteRunnable(\"http://localhost:8001/rag-opensearch\")\n", - "rag_app.invoke(\"What is the ip address used in the image processing logs\")" - ] - } - ], - "metadata": { - "language_info": { - "name": "python" - } - }, - "nbformat": 4, - "nbformat_minor": 2 -} diff --git a/templates/rag-opensearch/rag_opensearch/__init__.py b/templates/rag-opensearch/rag_opensearch/__init__.py deleted file mode 100644 index fff93c96169..00000000000 --- a/templates/rag-opensearch/rag_opensearch/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from rag_opensearch.chain import chain - -__all__ = ["chain"] diff --git a/templates/rag-opensearch/rag_opensearch/chain.py b/templates/rag-opensearch/rag_opensearch/chain.py deleted file mode 100644 index 992b35372e6..00000000000 --- a/templates/rag-opensearch/rag_opensearch/chain.py +++ /dev/null @@ -1,62 +0,0 @@ -import os - -from langchain_community.chat_models import ChatOpenAI -from langchain_community.embeddings import OpenAIEmbeddings -from langchain_community.vectorstores.opensearch_vector_search import ( - OpenSearchVectorSearch, -) -from langchain_core.output_parsers import StrOutputParser -from langchain_core.prompts import ChatPromptTemplate -from langchain_core.pydantic_v1 import BaseModel -from langchain_core.runnables import RunnableParallel, RunnablePassthrough - -OPENAI_API_KEY = os.getenv("OPENAI_API_KEY") -OPENSEARCH_URL = os.getenv("OPENSEARCH_URL", "https://localhost:9200") -OPENSEARCH_USERNAME = os.getenv("OPENSEARCH_USERNAME", "admin") -OPENSEARCH_PASSWORD = os.getenv("OPENSEARCH_PASSWORD", "admin") -OPENSEARCH_INDEX_NAME = os.getenv("OPENSEARCH_INDEX_NAME", "langchain-test") - - -embedding_function = OpenAIEmbeddings(openai_api_key=OPENAI_API_KEY) - -vector_store = OpenSearchVectorSearch( - opensearch_url=OPENSEARCH_URL, - http_auth=(OPENSEARCH_USERNAME, OPENSEARCH_PASSWORD), - index_name=OPENSEARCH_INDEX_NAME, - embedding_function=embedding_function, - verify_certs=False, -) - - -retriever = vector_store.as_retriever() - - -def format_docs(docs): - return "\n\n".join([d.page_content for d in docs]) - - -# RAG prompt -template = """Answer the question based only on the following context: -{context} -Question: {question} -""" -prompt = ChatPromptTemplate.from_template(template) - -# RAG -model = ChatOpenAI(openai_api_key=OPENAI_API_KEY) -chain = ( - RunnableParallel( - {"context": retriever | format_docs, "question": RunnablePassthrough()} - ) - | prompt - | model - | StrOutputParser() -) - - -# Add typing for input -class Question(BaseModel): - __root__: str - - -chain = chain.with_types(input_type=Question) diff --git a/templates/rag-opensearch/tests/__init__.py b/templates/rag-opensearch/tests/__init__.py deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/templates/rag-pinecone-multi-query/LICENSE b/templates/rag-pinecone-multi-query/LICENSE deleted file mode 100644 index 426b6509034..00000000000 --- a/templates/rag-pinecone-multi-query/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -MIT License - -Copyright (c) 2023 LangChain, Inc. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/templates/rag-pinecone-multi-query/README.md b/templates/rag-pinecone-multi-query/README.md deleted file mode 100644 index 98758937ac8..00000000000 --- a/templates/rag-pinecone-multi-query/README.md +++ /dev/null @@ -1,68 +0,0 @@ -# RAG - Pinecone - multi-query - -This template performs RAG using `Pinecone` and `OpenAI` with a multi-query retriever. - -It uses an LLM to generate multiple queries from different perspectives based on the user's input query. - -For each query, it retrieves a set of relevant documents and takes the unique union across all queries for answer synthesis. - -## Environment Setup - -This template uses Pinecone as a vectorstore and requires that `PINECONE_API_KEY`, `PINECONE_ENVIRONMENT`, and `PINECONE_INDEX` are set. - -Set the `OPENAI_API_KEY` environment variable to access the OpenAI models. - -## Usage - -To use this package, you should first install the LangChain CLI: - -```shell -pip install -U langchain-cli -``` - -To create a new LangChain project and install this package, do: - -```shell -langchain app new my-app --package rag-pinecone-multi-query -``` - -To add this package to an existing project, run: - -```shell -langchain app add rag-pinecone-multi-query -``` - -And add the following code to your `server.py` file: - -```python -from rag_pinecone_multi_query import chain as rag_pinecone_multi_query_chain - -add_routes(app, rag_pinecone_multi_query_chain, path="/rag-pinecone-multi-query") -``` - -(Optional) Now, let's configure LangSmith. LangSmith will help us trace, monitor, and debug LangChain applications. You can sign up for LangSmith [here](https://smith.langchain.com/). If you don't have access, you can skip this section - -```shell -export LANGCHAIN_TRACING_V2=true -export LANGCHAIN_API_KEY= -export LANGCHAIN_PROJECT= # if not specified, defaults to "default" -``` - -If you are inside this directory, then you can spin up a LangServe instance directly by: - -```shell -langchain serve -``` - -This will start the FastAPI app with a server running locally at [http://localhost:8000](http://localhost:8000) - -You can see all templates at [http://127.0.0.1:8000/docs](http://127.0.0.1:8000/docs) -You can access the playground at [http://127.0.0.1:8000/rag-pinecone-multi-query/playground](http://127.0.0.1:8000/rag-pinecone-multi-query/playground) - -To access the template from code, use: - -```python -from langserve.client import RemoteRunnable - -runnable = RemoteRunnable("http://localhost:8000/rag-pinecone-multi-query") -``` \ No newline at end of file diff --git a/templates/rag-pinecone-multi-query/pyproject.toml b/templates/rag-pinecone-multi-query/pyproject.toml deleted file mode 100644 index b7e41a82909..00000000000 --- a/templates/rag-pinecone-multi-query/pyproject.toml +++ /dev/null @@ -1,35 +0,0 @@ -[tool.poetry] -name = "rag-pinecone-multi-query" -version = "0.1.0" -description = "RAG with multi-query retriever using Pinecone" -authors = [ - "Lance Martin ", -] -readme = "README.md" - -[tool.poetry.dependencies] -python = ">=3.8.1,<4.0" -langchain = "^0.1" -openai = "<2" -tiktoken = ">=0.5.1" -pinecone-client = ">=2.2.4" -langchain-text-splitters = ">=0.0.1,<0.1" - -[tool.poetry.group.dev.dependencies] -langchain-cli = ">=0.0.21" - -[tool.langserve] -export_module = "rag_pinecone_multi_query" -export_attr = "chain" - -[tool.templates-hub] -use-case = "rag" -author = "LangChain" -integrations = ["OpenAI", "Pinecone"] -tags = ["vectordbs"] - -[build-system] -requires = [ - "poetry-core", -] -build-backend = "poetry.core.masonry.api" diff --git a/templates/rag-pinecone-multi-query/rag_pinecone_multi_query.ipynb b/templates/rag-pinecone-multi-query/rag_pinecone_multi_query.ipynb deleted file mode 100644 index b1474820ad8..00000000000 --- a/templates/rag-pinecone-multi-query/rag_pinecone_multi_query.ipynb +++ /dev/null @@ -1,63 +0,0 @@ -{ - "cells": [ - { - "attachments": {}, - "cell_type": "markdown", - "id": "681a5d1e", - "metadata": {}, - "source": [ - "## Connect to template\n", - "\n", - "In `server.py`, set -\n", - "```\n", - "add_routes(app, chain_ext, path=\"/rag_pinecone_multi_query\")\n", - "```" - ] - }, - { - "cell_type": "code", - "execution_count": 8, - "id": "d774be2a", - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "'The different types of agent memory mentioned in the context are short-term memory, long-term memory, explicit/declarative memory, and implicit/procedural memory.'" - ] - }, - "execution_count": 8, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "from langserve.client import RemoteRunnable\n", - "\n", - "rag_app_pinecone = RemoteRunnable(\"http://0.0.0.0:8001/rag_pinecone_multi_query\")\n", - "rag_app_pinecone.invoke(\"What are the different types of agent memory\")" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.9.16" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/templates/rag-pinecone-multi-query/rag_pinecone_multi_query/__init__.py b/templates/rag-pinecone-multi-query/rag_pinecone_multi_query/__init__.py deleted file mode 100644 index 242620732ff..00000000000 --- a/templates/rag-pinecone-multi-query/rag_pinecone_multi_query/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from rag_pinecone_multi_query.chain import chain - -__all__ = ["chain"] diff --git a/templates/rag-pinecone-multi-query/rag_pinecone_multi_query/chain.py b/templates/rag-pinecone-multi-query/rag_pinecone_multi_query/chain.py deleted file mode 100644 index 818fc3ec681..00000000000 --- a/templates/rag-pinecone-multi-query/rag_pinecone_multi_query/chain.py +++ /dev/null @@ -1,68 +0,0 @@ -import os - -from langchain.retrievers.multi_query import MultiQueryRetriever -from langchain_community.chat_models import ChatOpenAI -from langchain_community.embeddings import OpenAIEmbeddings -from langchain_core.output_parsers import StrOutputParser -from langchain_core.prompts import ChatPromptTemplate -from langchain_core.pydantic_v1 import BaseModel -from langchain_core.runnables import RunnableParallel, RunnablePassthrough -from langchain_pinecone import PineconeVectorStore - -if os.environ.get("PINECONE_API_KEY", None) is None: - raise Exception("Missing `PINECONE_API_KEY` environment variable.") - -if os.environ.get("PINECONE_ENVIRONMENT", None) is None: - raise Exception("Missing `PINECONE_ENVIRONMENT` environment variable.") - -PINECONE_INDEX_NAME = os.environ.get("PINECONE_INDEX", "langchain-test") - -### Ingest code - you may need to run this the first time -# Load -# from langchain_community.document_loaders import WebBaseLoader -# loader = WebBaseLoader("https://lilianweng.github.io/posts/2023-06-23-agent/") -# data = loader.load() - -# # Split -# from langchain_text_splitters import RecursiveCharacterTextSplitter -# text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=0) -# all_splits = text_splitter.split_documents(data) - -# # Add to vectorDB -# vectorstore = PineconeVectorStore.from_documents( -# documents=all_splits, embedding=OpenAIEmbeddings(), index_name=PINECONE_INDEX_NAME -# ) -# retriever = vectorstore.as_retriever() - -# Set up index with multi query retriever -vectorstore = PineconeVectorStore.from_existing_index( - PINECONE_INDEX_NAME, OpenAIEmbeddings() -) -model = ChatOpenAI(temperature=0) -retriever = MultiQueryRetriever.from_llm( - retriever=vectorstore.as_retriever(), llm=model -) - -# RAG prompt -template = """Answer the question based only on the following context: -{context} -Question: {question} -""" -prompt = ChatPromptTemplate.from_template(template) - -# RAG -model = ChatOpenAI() -chain = ( - RunnableParallel({"context": retriever, "question": RunnablePassthrough()}) - | prompt - | model - | StrOutputParser() -) - - -# Add typing for input -class Question(BaseModel): - __root__: str - - -chain = chain.with_types(input_type=Question) diff --git a/templates/rag-pinecone-multi-query/tests/__init__.py b/templates/rag-pinecone-multi-query/tests/__init__.py deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/templates/rag-pinecone-rerank/LICENSE b/templates/rag-pinecone-rerank/LICENSE deleted file mode 100644 index 426b6509034..00000000000 --- a/templates/rag-pinecone-rerank/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -MIT License - -Copyright (c) 2023 LangChain, Inc. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/templates/rag-pinecone-rerank/README.md b/templates/rag-pinecone-rerank/README.md deleted file mode 100644 index 0a8941cae32..00000000000 --- a/templates/rag-pinecone-rerank/README.md +++ /dev/null @@ -1,72 +0,0 @@ -# RAG - Pinecone - rerank - -This template performs RAG using `Pinecone` and `OpenAI` along with [Cohere to perform re-ranking](https://txt.cohere.com/rerank/) on returned documents. - -`Re-ranking` provides a way to rank retrieved documents using specified filters or criteria. - -## Environment Setup - -This template uses Pinecone as a vectorstore and requires that `PINECONE_API_KEY`, `PINECONE_ENVIRONMENT`, and `PINECONE_INDEX` are set. - -Set the `OPENAI_API_KEY` environment variable to access the OpenAI models. - -Set the `COHERE_API_KEY` environment variable to access the Cohere ReRank. - -## Usage - -To use this package, you should first have the LangChain CLI installed: - -```shell -pip install -U langchain-cli -``` - -To create a new LangChain project and install this as the only package, you can do: - -```shell -langchain app new my-app --package rag-pinecone-rerank -``` - -If you want to add this to an existing project, you can just run: - -```shell -langchain app add rag-pinecone-rerank -``` - -And add the following code to your `server.py` file: -```python -from rag_pinecone_rerank import chain as rag_pinecone_rerank_chain - -add_routes(app, rag_pinecone_rerank_chain, path="/rag-pinecone-rerank") -``` - -(Optional) Let's now configure LangSmith. -LangSmith will help us trace, monitor and debug LangChain applications. -You can sign up for LangSmith [here](https://smith.langchain.com/). -If you don't have access, you can skip this section - - -```shell -export LANGCHAIN_TRACING_V2=true -export LANGCHAIN_API_KEY= -export LANGCHAIN_PROJECT= # if not specified, defaults to "default" -``` - -If you are inside this directory, then you can spin up a LangServe instance directly by: - -```shell -langchain serve -``` - -This will start the FastAPI app with a server is running locally at -[http://localhost:8000](http://localhost:8000) - -We can see all templates at [http://127.0.0.1:8000/docs](http://127.0.0.1:8000/docs) -We can access the playground at [http://127.0.0.1:8000/rag-pinecone-rerank/playground](http://127.0.0.1:8000/rag-pinecone-rerank/playground) - -We can access the template from code with: - -```python -from langserve.client import RemoteRunnable - -runnable = RemoteRunnable("http://localhost:8000/rag-pinecone-rerank") -``` diff --git a/templates/rag-pinecone-rerank/pyproject.toml b/templates/rag-pinecone-rerank/pyproject.toml deleted file mode 100644 index f4ce94727b9..00000000000 --- a/templates/rag-pinecone-rerank/pyproject.toml +++ /dev/null @@ -1,36 +0,0 @@ -[tool.poetry] -name = "rag-pinecone-rerank" -version = "0.1.0" -description = "" -authors = [ - "Lance Martin ", -] -readme = "README.md" - -[tool.poetry.dependencies] -python = ">=3.8.1,<4.0" -langchain = "^0.1" -openai = "<2" -tiktoken = ">=0.5.1" -pinecone-client = ">=2.2.4" -cohere = ">=4.32" -langchain-text-splitters = ">=0.0.1,<0.1" - -[tool.poetry.group.dev.dependencies] -langchain-cli = ">=0.0.21" - -[tool.langserve] -export_module = "rag_pinecone_rerank" -export_attr = "chain" - -[tool.templates-hub] -use-case = "rag" -author = "LangChain" -integrations = ["OpenAI", "Pinecone", "Cohere"] -tags = ["vectordbs","post-processing"] - -[build-system] -requires = [ - "poetry-core", -] -build-backend = "poetry.core.masonry.api" diff --git a/templates/rag-pinecone-rerank/rag_pinecone_rerank.ipynb b/templates/rag-pinecone-rerank/rag_pinecone_rerank.ipynb deleted file mode 100644 index 3ddccd24eaa..00000000000 --- a/templates/rag-pinecone-rerank/rag_pinecone_rerank.ipynb +++ /dev/null @@ -1,58 +0,0 @@ -{ - "cells": [ - { - "attachments": {}, - "cell_type": "markdown", - "id": "681a5d1e", - "metadata": {}, - "source": [ - "## Connect to template" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "id": "d774be2a", - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "'The agent memory consists of two components: short-term memory and long-term memory. The short-term memory is used for in-context learning and allows the model to learn from its experiences. The long-term memory enables the agent to retain and recall an infinite amount of information over extended periods by leveraging an external vector store and fast retrieval.'" - ] - }, - "execution_count": 3, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "from langserve.client import RemoteRunnable\n", - "\n", - "rag_app_pinecone = RemoteRunnable(\"http://localhost:8001/rag_pinecone_rerank\")\n", - "rag_app_pinecone.invoke(\"How does agent memory work?\")" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.9.16" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/templates/rag-pinecone-rerank/rag_pinecone_rerank/__init__.py b/templates/rag-pinecone-rerank/rag_pinecone_rerank/__init__.py deleted file mode 100644 index eb291ce56a7..00000000000 --- a/templates/rag-pinecone-rerank/rag_pinecone_rerank/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from rag_pinecone_rerank.chain import chain - -__all__ = ["chain"] diff --git a/templates/rag-pinecone-rerank/rag_pinecone_rerank/chain.py b/templates/rag-pinecone-rerank/rag_pinecone_rerank/chain.py deleted file mode 100644 index e073fa71174..00000000000 --- a/templates/rag-pinecone-rerank/rag_pinecone_rerank/chain.py +++ /dev/null @@ -1,75 +0,0 @@ -import os - -from langchain.retrievers import ContextualCompressionRetriever -from langchain.retrievers.document_compressors import CohereRerank -from langchain_community.chat_models import ChatOpenAI -from langchain_community.embeddings import OpenAIEmbeddings -from langchain_core.output_parsers import StrOutputParser -from langchain_core.prompts import ChatPromptTemplate -from langchain_core.pydantic_v1 import BaseModel -from langchain_core.runnables import RunnableParallel, RunnablePassthrough -from langchain_pinecone import PineconeVectorStore - -if os.environ.get("PINECONE_API_KEY", None) is None: - raise Exception("Missing `PINECONE_API_KEY` environment variable.") - -if os.environ.get("PINECONE_ENVIRONMENT", None) is None: - raise Exception("Missing `PINECONE_ENVIRONMENT` environment variable.") - -PINECONE_INDEX_NAME = os.environ.get("PINECONE_INDEX", "langchain-test") - -### Ingest code - you may need to run this the first time -# # Load -# from langchain_community.document_loaders import WebBaseLoader -# loader = WebBaseLoader("https://lilianweng.github.io/posts/2023-06-23-agent/") -# data = loader.load() - -# # Split -# from langchain_text_splitters import RecursiveCharacterTextSplitter -# text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=0) -# all_splits = text_splitter.split_documents(data) - -# # Add to vectorDB -# vectorstore = PineconeVectorStore.from_documents( -# documents=all_splits, embedding=OpenAIEmbeddings(), index_name=PINECONE_INDEX_NAME -# ) -# retriever = vectorstore.as_retriever() - -vectorstore = PineconeVectorStore.from_existing_index( - PINECONE_INDEX_NAME, OpenAIEmbeddings() -) - -# Get k=10 docs -retriever = vectorstore.as_retriever(search_kwargs={"k": 10}) - -# Re-rank -compressor = CohereRerank() -compression_retriever = ContextualCompressionRetriever( - base_compressor=compressor, base_retriever=retriever -) - -# RAG prompt -template = """Answer the question based only on the following context: -{context} -Question: {question} -""" -prompt = ChatPromptTemplate.from_template(template) - -# RAG -model = ChatOpenAI() -chain = ( - RunnableParallel( - {"context": compression_retriever, "question": RunnablePassthrough()} - ) - | prompt - | model - | StrOutputParser() -) - - -# Add typing for input -class Question(BaseModel): - __root__: str - - -chain = chain.with_types(input_type=Question) diff --git a/templates/rag-pinecone-rerank/tests/__init__.py b/templates/rag-pinecone-rerank/tests/__init__.py deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/templates/rag-pinecone/LICENSE b/templates/rag-pinecone/LICENSE deleted file mode 100644 index 426b6509034..00000000000 --- a/templates/rag-pinecone/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -MIT License - -Copyright (c) 2023 LangChain, Inc. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/templates/rag-pinecone/README.md b/templates/rag-pinecone/README.md deleted file mode 100644 index 787e0e6c969..00000000000 --- a/templates/rag-pinecone/README.md +++ /dev/null @@ -1,69 +0,0 @@ -# RAG - Pinecone - -This template performs RAG using `Pinecone` and `OpenAI`. - -## Environment Setup - -This template uses Pinecone as a vectorstore and requires that `PINECONE_API_KEY`, `PINECONE_ENVIRONMENT`, and `PINECONE_INDEX` are set. - -Set the `OPENAI_API_KEY` environment variable to access the OpenAI models. - -## Usage - -To use this package, you should first have the LangChain CLI installed: - -```shell -pip install -U langchain-cli -``` - -To create a new LangChain project and install this as the only package, you can do: - -```shell -langchain app new my-app --package rag-pinecone -``` - -If you want to add this to an existing project, you can just run: - -```shell -langchain app add rag-pinecone -``` - -And add the following code to your `server.py` file: -```python -from rag_pinecone import chain as rag_pinecone_chain - -# Be careful with this, in the console, when you create the project add_routes(app, rag_pinecone_chain, path="\rag-pinecone") appears with a backslash, the correct route is "/rag-pinecone" -add_routes(app, rag_pinecone_chain, path="/rag-pinecone") -``` - -(Optional) Let's now configure LangSmith. -LangSmith will help us trace, monitor and debug LangChain applications. -You can sign up for LangSmith [here](https://smith.langchain.com/). -If you don't have access, you can skip this section - - -```shell -export LANGCHAIN_TRACING_V2=true -export LANGCHAIN_API_KEY= -export LANGCHAIN_PROJECT= # if not specified, defaults to "default" -``` - -If you are inside this directory, then you can spin up a LangServe instance directly by: - -```shell -langchain serve -``` - -This will start the FastAPI app with a server is running locally at -[http://localhost:8000](http://localhost:8000) - -We can see all templates at [http://127.0.0.1:8000/docs](http://127.0.0.1:8000/docs) -We can access the playground at [http://127.0.0.1:8000/rag-pinecone/playground](http://127.0.0.1:8000/rag-pinecone/playground) - -We can access the template from code with: - -```python -from langserve.client import RemoteRunnable - -runnable = RemoteRunnable("http://localhost:8000/rag-pinecone") -``` diff --git a/templates/rag-pinecone/pyproject.toml b/templates/rag-pinecone/pyproject.toml deleted file mode 100644 index 7790193755d..00000000000 --- a/templates/rag-pinecone/pyproject.toml +++ /dev/null @@ -1,35 +0,0 @@ -[tool.poetry] -name = "rag-pinecone" -version = "0.1.0" -description = "RAG using Pinecone" -authors = [ - "Lance Martin ", -] -readme = "README.md" - -[tool.poetry.dependencies] -python = ">=3.8.1,<4.0" -langchain = "^0.1" -openai = "<2" -tiktoken = ">=0.5.1" -pinecone-client = ">=2.2.4" -langchain-text-splitters = ">=0.0.1,<0.1" - -[tool.poetry.group.dev.dependencies] -langchain-cli = ">=0.0.21" - -[tool.langserve] -export_module = "rag_pinecone" -export_attr = "chain" - -[tool.templates-hub] -use-case = "rag" -author = "LangChain" -integrations = ["OpenAI", "Pinecone"] -tags = ["vectordbs"] - -[build-system] -requires = [ - "poetry-core", -] -build-backend = "poetry.core.masonry.api" diff --git a/templates/rag-pinecone/rag_pinecone.ipynb b/templates/rag-pinecone/rag_pinecone.ipynb deleted file mode 100644 index 3361f28bad5..00000000000 --- a/templates/rag-pinecone/rag_pinecone.ipynb +++ /dev/null @@ -1,51 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "id": "681a5d1e", - "metadata": {}, - "source": [ - "## Connect to template\n", - "\n", - "In `server.py`, set -\n", - "```\n", - "add_routes(app, chain_ext, path=\"/rag_pinecone\")\n", - "```" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "d774be2a", - "metadata": {}, - "outputs": [], - "source": [ - "from langserve.client import RemoteRunnable\n", - "\n", - "rag_app_pinecone = RemoteRunnable(\"http://0.0.0.0:8001/rag_pinecone\")\n", - "rag_app_pinecone.invoke(\"How does agent memory work?\")" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.9.16" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/templates/rag-pinecone/rag_pinecone/__init__.py b/templates/rag-pinecone/rag_pinecone/__init__.py deleted file mode 100644 index 17d7773f46e..00000000000 --- a/templates/rag-pinecone/rag_pinecone/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from rag_pinecone.chain import chain - -__all__ = ["chain"] diff --git a/templates/rag-pinecone/rag_pinecone/chain.py b/templates/rag-pinecone/rag_pinecone/chain.py deleted file mode 100644 index 437228ed42a..00000000000 --- a/templates/rag-pinecone/rag_pinecone/chain.py +++ /dev/null @@ -1,63 +0,0 @@ -import os - -from langchain_community.chat_models import ChatOpenAI -from langchain_community.embeddings import OpenAIEmbeddings -from langchain_core.output_parsers import StrOutputParser -from langchain_core.prompts import ChatPromptTemplate -from langchain_core.pydantic_v1 import BaseModel -from langchain_core.runnables import RunnableParallel, RunnablePassthrough -from langchain_pinecone import PineconeVectorStore - -if os.environ.get("PINECONE_API_KEY", None) is None: - raise Exception("Missing `PINECONE_API_KEY` environment variable.") - -if os.environ.get("PINECONE_ENVIRONMENT", None) is None: - raise Exception("Missing `PINECONE_ENVIRONMENT` environment variable.") - -PINECONE_INDEX_NAME = os.environ.get("PINECONE_INDEX", "langchain-test") - -### Ingest code - you may need to run this the first time -# Load -# from langchain_community.document_loaders import WebBaseLoader -# loader = WebBaseLoader("https://lilianweng.github.io/posts/2023-06-23-agent/") -# data = loader.load() - -# # Split -# from langchain_text_splitters import RecursiveCharacterTextSplitter -# text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=0) -# all_splits = text_splitter.split_documents(data) - -# # Add to vectorDB -# vectorstore = PineconeVectorStore.from_documents( -# documents=all_splits, embedding=OpenAIEmbeddings(), index_name=PINECONE_INDEX_NAME -# ) -# retriever = vectorstore.as_retriever() - -vectorstore = PineconeVectorStore.from_existing_index( - PINECONE_INDEX_NAME, OpenAIEmbeddings() -) -retriever = vectorstore.as_retriever() - -# RAG prompt -template = """Answer the question based only on the following context: -{context} -Question: {question} -""" -prompt = ChatPromptTemplate.from_template(template) - -# RAG -model = ChatOpenAI() -chain = ( - RunnableParallel({"context": retriever, "question": RunnablePassthrough()}) - | prompt - | model - | StrOutputParser() -) - - -# Add typing for input -class Question(BaseModel): - __root__: str - - -chain = chain.with_types(input_type=Question) diff --git a/templates/rag-pinecone/tests/__init__.py b/templates/rag-pinecone/tests/__init__.py deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/templates/rag-redis-multi-modal-multi-vector/.gitignore b/templates/rag-redis-multi-modal-multi-vector/.gitignore deleted file mode 100644 index bee8a64b79a..00000000000 --- a/templates/rag-redis-multi-modal-multi-vector/.gitignore +++ /dev/null @@ -1 +0,0 @@ -__pycache__ diff --git a/templates/rag-redis-multi-modal-multi-vector/LICENSE b/templates/rag-redis-multi-modal-multi-vector/LICENSE deleted file mode 100644 index fc0602feecd..00000000000 --- a/templates/rag-redis-multi-modal-multi-vector/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -MIT License - -Copyright (c) 2024 LangChain, Inc. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/templates/rag-redis-multi-modal-multi-vector/RAG-architecture.png b/templates/rag-redis-multi-modal-multi-vector/RAG-architecture.png deleted file mode 100644 index 3a759675c84..00000000000 Binary files a/templates/rag-redis-multi-modal-multi-vector/RAG-architecture.png and /dev/null differ diff --git a/templates/rag-redis-multi-modal-multi-vector/README.md b/templates/rag-redis-multi-modal-multi-vector/README.md deleted file mode 100644 index e4b4c657ba4..00000000000 --- a/templates/rag-redis-multi-modal-multi-vector/README.md +++ /dev/null @@ -1,118 +0,0 @@ -# RAG - Redis - multi-modal, multi-vector - -`Multi-modal` LLMs enable visual assistants that can perform question-answering about images. - -This template create a visual assistant for slide decks, which often contain visuals such as graphs or figures. - -It uses `GPT-4V` to create image summaries for each slide, embeds the summaries, and stores them in `Redis`. - -Given a question, relevant slides are retrieved and passed to GPT-4V for answer synthesis. - -![](RAG-architecture.png) - -## Input - -Supply a slide deck as PDF in the `/docs` directory. - -By default, this template has a slide deck about recent earnings from NVIDIA. - -Example questions to ask can be: -``` -1/ how much can H100 TensorRT improve LLama2 inference performance? -2/ what is the % change in GPU accelerated applications from 2020 to 2023? -``` - -To create an index of the slide deck, run: -``` -poetry install -poetry shell -python ingest.py -``` - -## Storage - -Here is the process the template will use to create an index of the slides (see [blog](https://blog.langchain.dev/multi-modal-rag-template/)): - -* Extract the slides as a collection of images -* Use GPT-4V to summarize each image -* Embed the image summaries using text embeddings with a link to the original images -* Retrieve relevant image based on similarity between the image summary and the user input question -* Pass those images to GPT-4V for answer synthesis - -### Redis -This template uses [Redis](https://redis.com) to power the [MultiVectorRetriever](https://python.langchain.com/docs/modules/data_connection/retrievers/multi_vector) including: -- Redis as the [VectorStore](https://python.langchain.com/docs/integrations/vectorstores/redis) (to store + index image summary embeddings) -- Redis as the [ByteStore](https://python.langchain.com/docs/integrations/stores/redis) (to store images) - -Make sure to deploy a Redis instance either in the [cloud](https://redis.com/try-free) (free) or locally with [docker](https://redis.io/docs/install/install-stack/docker/). - -This will give you an accessible Redis endpoint that you can use as a URL. If deploying locally, simply use `redis://localhost:6379`. - - -## LLM - -The app will retrieve images based on similarity between the text input and the image summary (text), and pass the images to GPT-4V for answer synthesis. - -## Environment Setup - -Set the `OPENAI_API_KEY` environment variable to access the OpenAI GPT-4V. - -Set `REDIS_URL` environment variable to access your Redis database. - -## Usage - -To use this package, you should first have the LangChain CLI installed: - -```shell -pip install -U langchain-cli -``` - -To create a new LangChain project and install this as the only package, you can do: - -```shell -langchain app new my-app --package rag-redis-multi-modal-multi-vector -``` - -If you want to add this to an existing project, you can just run: - -```shell -langchain app add rag-redis-multi-modal-multi-vector -``` - -And add the following code to your `server.py` file: -```python -from rag_redis_multi_modal_multi_vector import chain as rag_redis_multi_modal_chain_mv - -add_routes(app, rag_redis_multi_modal_chain_mv, path="/rag-redis-multi-modal-multi-vector") -``` - -(Optional) Let's now configure LangSmith. -LangSmith will help us trace, monitor and debug LangChain applications. -You can sign up for LangSmith [here](https://smith.langchain.com/). -If you don't have access, you can skip this section - -```shell -export LANGCHAIN_TRACING_V2=true -export LANGCHAIN_API_KEY= -export LANGCHAIN_PROJECT= # if not specified, defaults to "default" -``` - -If you are inside this directory, then you can spin up a LangServe instance directly by: - -```shell -langchain serve -``` - -This will start the FastAPI app with a server is running locally at -[http://localhost:8000](http://localhost:8000) - -We can see all templates at [http://127.0.0.1:8000/docs](http://127.0.0.1:8000/docs) -We can access the playground at [http://127.0.0.1:8000/rag-redis-multi-modal-multi-vector/playground](http://127.0.0.1:8000/rag-redis-multi-modal-multi-vector/playground) - -We can access the template from code with: - -```python -from langserve.client import RemoteRunnable - -runnable = RemoteRunnable("http://localhost:8000/rag-redis-multi-modal-multi-vector") -``` diff --git a/templates/rag-redis-multi-modal-multi-vector/docs/nvda-f3q24-investor-presentation-final.pdf b/templates/rag-redis-multi-modal-multi-vector/docs/nvda-f3q24-investor-presentation-final.pdf deleted file mode 100644 index 05ab983b0a5..00000000000 Binary files a/templates/rag-redis-multi-modal-multi-vector/docs/nvda-f3q24-investor-presentation-final.pdf and /dev/null differ diff --git a/templates/rag-redis-multi-modal-multi-vector/ingest.py b/templates/rag-redis-multi-modal-multi-vector/ingest.py deleted file mode 100644 index e916dd52fa7..00000000000 --- a/templates/rag-redis-multi-modal-multi-vector/ingest.py +++ /dev/null @@ -1,170 +0,0 @@ -import base64 -import io -import uuid -from io import BytesIO -from pathlib import Path - -import pypdfium2 as pdfium -from langchain_core.documents import Document -from langchain_core.messages import HumanMessage -from langchain_openai.chat_models import ChatOpenAI -from PIL import Image -from rag_redis_multi_modal_multi_vector.utils import ID_KEY, make_mv_retriever - - -def image_summarize(img_base64, prompt): - """ - Make image summary - - :param img_base64: Base64 encoded string for image - :param prompt: Text prompt for summarizatiomn - :return: Image summarization prompt - - """ - chat = ChatOpenAI(model="gpt-4-vision-preview", max_tokens=1024) - - msg = chat.invoke( - [ - HumanMessage( - content=[ - {"type": "text", "text": prompt}, - { - "type": "image_url", - "image_url": {"url": f"data:image/jpeg;base64,{img_base64}"}, - }, - ] - ) - ] - ) - return msg.content - - -def generate_img_summaries(img_base64_list): - """ - Generate summaries for images - - :param img_base64_list: Base64 encoded images - :return: List of image summaries and processed images - """ - - # Store image summaries - image_summaries = [] - processed_images = [] - - # Prompt - prompt = """You are an assistant tasked with summarizing images for retrieval. \ - These summaries will be embedded and used to retrieve the raw image. \ - Give a concise summary of the image that is well optimized for retrieval.""" - - # Apply summarization to images - for i, base64_image in enumerate(img_base64_list): - try: - image_summaries.append(image_summarize(base64_image, prompt)) - processed_images.append(base64_image) - except Exception as e: - print(f"Error with image {i+1}: {e}") - - return image_summaries, processed_images - - -def get_images_from_pdf(pdf_path): - """ - Extract images from each page of a PDF document and save as JPEG files. - - :param pdf_path: A string representing the path to the PDF file. - """ - pdf = pdfium.PdfDocument(pdf_path) - n_pages = len(pdf) - pil_images = [] - for page_number in range(n_pages): - page = pdf.get_page(page_number) - bitmap = page.render(scale=1, rotation=0, crop=(0, 0, 0, 0)) - pil_image = bitmap.to_pil() - pil_images.append(pil_image) - return pil_images - - -def resize_base64_image(base64_string, size=(128, 128)): - """ - Resize an image encoded as a Base64 string - - :param base64_string: Base64 string - :param size: Image size - :return: Re-sized Base64 string - """ - # Decode the Base64 string - img_data = base64.b64decode(base64_string) - img = Image.open(io.BytesIO(img_data)) - - # Resize the image - resized_img = img.resize(size, Image.LANCZOS) - - # Save the resized image to a bytes buffer - buffered = io.BytesIO() - resized_img.save(buffered, format=img.format) - - # Encode the resized image to Base64 - return base64.b64encode(buffered.getvalue()).decode("utf-8") - - -def convert_to_base64(pil_image): - """ - Convert PIL images to Base64 encoded strings - - :param pil_image: PIL image - :return: Re-sized Base64 string - """ - - buffered = BytesIO() - pil_image.save(buffered, format="JPEG") # You can change the format if needed - img_str = base64.b64encode(buffered.getvalue()).decode("utf-8") - img_str = resize_base64_image(img_str, size=(960, 540)) - return img_str - - -def load_images(image_summaries, images): - """ - Index image summaries in the db. - - :param image_summaries: Image summaries - :param images: Base64 encoded images - - :return: Retriever - """ - - retriever = make_mv_retriever() - - # Helper function to add documents to the vectorstore and docstore - def add_documents(retriever, doc_summaries, doc_contents): - doc_ids = [str(uuid.uuid4()) for _ in doc_contents] - summary_docs = [ - Document(page_content=s, metadata={ID_KEY: doc_ids[i]}) - for i, s in enumerate(doc_summaries) - ] - retriever.vectorstore.add_documents(summary_docs) - retriever.docstore.mset(list(zip(doc_ids, doc_contents))) - - add_documents(retriever, image_summaries, images) - - -if __name__ == "__main__": - doc_path = Path(__file__).parent / "docs/nvda-f3q24-investor-presentation-final.pdf" - rel_doc_path = doc_path.relative_to(Path.cwd()) - - print("Extract slides as images") - pil_images = get_images_from_pdf(rel_doc_path) - - # Convert to b64 - images_base_64 = [convert_to_base64(i) for i in pil_images] - - # Generate image summaries - print("Generate image summaries") - image_summaries, images_base_64_processed = generate_img_summaries(images_base_64) - - # Create documents - images_base_64_processed_documents = [ - Document(page_content=i) for i in images_base_64_processed - ] - - # Create retriever and load images - load_images(image_summaries, images_base_64_processed_documents) diff --git a/templates/rag-redis-multi-modal-multi-vector/pyproject.toml b/templates/rag-redis-multi-modal-multi-vector/pyproject.toml deleted file mode 100644 index 3e3f69de31f..00000000000 --- a/templates/rag-redis-multi-modal-multi-vector/pyproject.toml +++ /dev/null @@ -1,35 +0,0 @@ -[tool.poetry] -name = "rag-redis-multi-modal-multi-vector" -version = "0.0.1" -description = "Multi-modal RAG using Redis as the vectorstore and docstore" -authors = ["Tyler Hutcherson "] -readme = "README.md" - -[tool.poetry.dependencies] -python = ">=3.8.1,<4.0" -langchain-core = ">=0.1.5" -langchain-openai = ">=0.0.1" -redis = "^5.0.1" -openai = "<=2.0.0" -pypdfium2 = "^4.27.0" -pillow = "^10.2.0" - - -[tool.poetry.group.dev.dependencies] -langchain-cli = ">=0.0.21" -fastapi = ">=0.104.0,<1" -sse-starlette = "^1.6.5" - -[tool.langserve] -export_module = "rag_redis_multi_modal_multi_vector" -export_attr = "chain" - -[tool.templates-hub] -use-case = "rag" -author = "Redis" -integrations = ["OpenAI", "Redis"] -tags = ["vectordbs"] - -[build-system] -requires = ["poetry-core"] -build-backend = "poetry.core.masonry.api" diff --git a/templates/rag-redis-multi-modal-multi-vector/rag_redis_multi_modal_multi_vector/__init__.py b/templates/rag-redis-multi-modal-multi-vector/rag_redis_multi_modal_multi_vector/__init__.py deleted file mode 100644 index 185f0eefc8f..00000000000 --- a/templates/rag-redis-multi-modal-multi-vector/rag_redis_multi_modal_multi_vector/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from rag_redis_multi_modal_multi_vector.chain import chain - -__all__ = ["chain"] diff --git a/templates/rag-redis-multi-modal-multi-vector/rag_redis_multi_modal_multi_vector/chain.py b/templates/rag-redis-multi-modal-multi-vector/rag_redis_multi_modal_multi_vector/chain.py deleted file mode 100644 index 067a62db266..00000000000 --- a/templates/rag-redis-multi-modal-multi-vector/rag_redis_multi_modal_multi_vector/chain.py +++ /dev/null @@ -1,109 +0,0 @@ -import base64 -import io - -from langchain.pydantic_v1 import BaseModel -from langchain_core.documents import Document -from langchain_core.messages import HumanMessage -from langchain_core.output_parsers import StrOutputParser -from langchain_core.runnables import RunnableLambda, RunnablePassthrough -from langchain_openai import ChatOpenAI -from PIL import Image - -from rag_redis_multi_modal_multi_vector.utils import make_mv_retriever - - -def resize_base64_image(base64_string, size=(128, 128)): - """ - Resize an image encoded as a Base64 string. - - :param base64_string: A Base64 encoded string of the image to be resized. - :param size: A tuple representing the new size (width, height) for the image. - :return: A Base64 encoded string of the resized image. - """ - img_data = base64.b64decode(base64_string) - img = Image.open(io.BytesIO(img_data)) - resized_img = img.resize(size, Image.LANCZOS) - buffered = io.BytesIO() - resized_img.save(buffered, format=img.format) - return base64.b64encode(buffered.getvalue()).decode("utf-8") - - -def get_resized_images(docs): - """ - Resize images from base64-encoded strings. - - :param docs: A list of base64-encoded image to be resized. - :return: Dict containing a list of resized base64-encoded strings. - """ - b64_images = [] - for doc in docs: - if isinstance(doc, Document): - doc = doc.page_content - resized_image = resize_base64_image(doc, size=(1280, 720)) - b64_images.append(resized_image) - return {"images": b64_images} - - -def img_prompt_func(data_dict, num_images=2): - """ - GPT-4V prompt for image analysis. - - :param data_dict: A dict with images and a user-provided question. - :param num_images: Number of images to include in the prompt. - :return: A list containing message objects for each image and the text prompt. - """ - messages = [] - if data_dict["context"]["images"]: - for image in data_dict["context"]["images"][:num_images]: - messages.append( - { - "type": "image_url", - "image_url": {"url": f"data:image/jpeg;base64,{image}"}, - } - ) - - text_message = { - "type": "text", - "text": ( - "You are an analyst tasked with answering questions about visual content.\n" - "You will be give a set of image(s) from a slide deck / presentation.\n" - "Use this information to answer the user question. \n" - f"User-provided question: {data_dict['question']}\n\n" - ), - } - messages.append(text_message) - return [HumanMessage(content=messages)] - - -def multi_modal_rag_chain(): - """ - Multi-modal RAG chain, - - :return: A chain of functions representing the multi-modal RAG process. - """ - # Initialize the multi-modal Large Language Model with specific parameters - model = ChatOpenAI(temperature=0, model="gpt-4-vision-preview", max_tokens=1024) - # Initialize the retriever - retriever = make_mv_retriever() - # Define the RAG pipeline - return ( - { - "context": retriever | RunnableLambda(get_resized_images), - "question": RunnablePassthrough(), - } - | RunnableLambda(img_prompt_func) - | model - | StrOutputParser() - ) - - -# Create RAG chain -chain = multi_modal_rag_chain() - - -# Add typing for input -class Question(BaseModel): - __root__: str - - -chain = chain.with_types(input_type=Question) diff --git a/templates/rag-redis-multi-modal-multi-vector/rag_redis_multi_modal_multi_vector/schema.yml b/templates/rag-redis-multi-modal-multi-vector/rag_redis_multi_modal_multi_vector/schema.yml deleted file mode 100644 index 21e94f930f2..00000000000 --- a/templates/rag-redis-multi-modal-multi-vector/rag_redis_multi_modal_multi_vector/schema.yml +++ /dev/null @@ -1,10 +0,0 @@ -text: -- name: content -tag: -- name: doc_id -vector: -- name: content_vector - algorithm: FLAT - datatype: FLOAT32 - dims: 1536 - distance_metric: COSINE \ No newline at end of file diff --git a/templates/rag-redis-multi-modal-multi-vector/rag_redis_multi_modal_multi_vector/utils.py b/templates/rag-redis-multi-modal-multi-vector/rag_redis_multi_modal_multi_vector/utils.py deleted file mode 100644 index 3aaa3bd6f3a..00000000000 --- a/templates/rag-redis-multi-modal-multi-vector/rag_redis_multi_modal_multi_vector/utils.py +++ /dev/null @@ -1,88 +0,0 @@ -import os - -from langchain.retrievers.multi_vector import MultiVectorRetriever -from langchain_community.storage import RedisStore -from langchain_community.vectorstores import Redis as RedisVectorDB -from langchain_openai.embeddings import OpenAIEmbeddings - -ID_KEY = "doc_id" - - -def get_boolean_env_var(var_name, default_value=False): - """Retrieve the boolean value of an environment variable. - - Args: - var_name (str): The name of the environment variable to retrieve. - default_value (bool): The default value to return if the variable - is not found. - - Returns: - bool: The value of the environment variable, interpreted as a boolean. - """ - true_values = {"true", "1", "t", "y", "yes"} - false_values = {"false", "0", "f", "n", "no"} - - # Retrieve the environment variable's value - value = os.getenv(var_name, "").lower() - - # Decide the boolean value based on the content of the string - if value in true_values: - return True - elif value in false_values: - return False - else: - return default_value - - -# Check for openai API key -if "OPENAI_API_KEY" not in os.environ: - raise Exception("Must provide an OPENAI_API_KEY as an env var.") - - -def format_redis_conn_from_env() -> str: - redis_url = os.getenv("REDIS_URL", None) - if redis_url: - return redis_url - else: - using_ssl = get_boolean_env_var("REDIS_SSL", False) - start = "rediss://" if using_ssl else "redis://" - - # if using RBAC - password = os.getenv("REDIS_PASSWORD", None) - username = os.getenv("REDIS_USERNAME", "default") - if password is not None: - start += f"{username}:{password}@" - - host = os.getenv("REDIS_HOST", "localhost") - port = int(os.getenv("REDIS_PORT", 6379)) - - return start + f"{host}:{port}" - - -REDIS_URL = format_redis_conn_from_env() - -current_file_path = os.path.abspath(__file__) -parent_dir = os.path.dirname(current_file_path) -schema_path = os.path.join(parent_dir, "schema.yml") -INDEX_SCHEMA = schema_path - - -def make_mv_retriever(): - """Create the multi-vector retriever""" - # Load Redis - REDIS_URL = os.getenv("REDIS_URL", "redis://localhost:6379") - vectorstore = RedisVectorDB( - redis_url=REDIS_URL, - index_name="image_summaries", - key_prefix="summary", - index_schema=INDEX_SCHEMA, - embedding=OpenAIEmbeddings(), - ) - store = RedisStore(redis_url=REDIS_URL, namespace="image") - - # Create the multi-vector retriever - return MultiVectorRetriever( - vectorstore=vectorstore, - byte_store=store, - id_key=ID_KEY, - ) diff --git a/templates/rag-redis-multi-modal-multi-vector/tests/__init__.py b/templates/rag-redis-multi-modal-multi-vector/tests/__init__.py deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/templates/rag-redis/LICENSE b/templates/rag-redis/LICENSE deleted file mode 100644 index 426b6509034..00000000000 --- a/templates/rag-redis/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -MIT License - -Copyright (c) 2023 LangChain, Inc. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/templates/rag-redis/README.md b/templates/rag-redis/README.md deleted file mode 100644 index 4afbbf3d71b..00000000000 --- a/templates/rag-redis/README.md +++ /dev/null @@ -1,93 +0,0 @@ -# RAG - Redis - -This template performs RAG using `Redis` (vector database) and `OpenAI` (LLM) on financial 10k filings docs for Nike. - -It relies on the sentence transformer `all-MiniLM-L6-v2` for embedding chunks of the pdf and user questions. - -## Environment Setup - -Set the `OPENAI_API_KEY` environment variable to access the [OpenAI](https://platform.openai.com) models: - -```bash -export OPENAI_API_KEY= -``` - -Set the following [Redis](https://redis.com/try-free) environment variables: - -```bash -export REDIS_HOST = -export REDIS_PORT = -export REDIS_USER = -export REDIS_PASSWORD = -``` - -## Supported Settings -We use a variety of environment variables to configure this application - -| Environment Variable | Description | Default Value | -|----------------------|-----------------------------------|---------------| -| `DEBUG` | Enable or disable Langchain debugging logs | True | -| `REDIS_HOST` | Hostname for the Redis server | "localhost" | -| `REDIS_PORT` | Port for the Redis server | 6379 | -| `REDIS_USER` | User for the Redis server | "" | -| `REDIS_PASSWORD` | Password for the Redis server | "" | -| `REDIS_URL` | Full URL for connecting to Redis | `None`, Constructed from user, password, host, and port if not provided | -| `INDEX_NAME` | Name of the vector index | "rag-redis" | - -## Usage - -To use this package, you should first have the LangChain CLI and Pydantic installed in a Python virtual environment: - -```shell -pip install -U langchain-cli pydantic==1.10.13 -``` - -To create a new LangChain project and install this as the only package, you can do: - -```shell -langchain app new my-app --package rag-redis -``` - -If you want to add this to an existing project, you can just run: -```shell -langchain app add rag-redis -``` - -And add the following code snippet to your `app/server.py` file: -```python -from rag_redis.chain import chain as rag_redis_chain - -add_routes(app, rag_redis_chain, path="/rag-redis") -``` - -(Optional) Let's now configure LangSmith. -LangSmith will help us trace, monitor and debug LangChain applications. -You can sign up for LangSmith [here](https://smith.langchain.com/). -If you don't have access, you can skip this section - - -```shell -export LANGCHAIN_TRACING_V2=true -export LANGCHAIN_API_KEY= -export LANGCHAIN_PROJECT= # if not specified, defaults to "default" -``` - -If you are inside this directory, then you can spin up a LangServe instance directly by: - -```shell -langchain serve -``` - -This will start the FastAPI app with a server is running locally at -[http://localhost:8000](http://localhost:8000) - -We can see all templates at [http://127.0.0.1:8000/docs](http://127.0.0.1:8000/docs) -We can access the playground at [http://127.0.0.1:8000/rag-redis/playground](http://127.0.0.1:8000/rag-redis/playground) - -We can access the template from code with: - -```python -from langserve.client import RemoteRunnable - -runnable = RemoteRunnable("http://localhost:8000/rag-redis") -``` \ No newline at end of file diff --git a/templates/rag-redis/data/nke-10k-2023.pdf b/templates/rag-redis/data/nke-10k-2023.pdf deleted file mode 100644 index 6ade8863e80..00000000000 Binary files a/templates/rag-redis/data/nke-10k-2023.pdf and /dev/null differ diff --git a/templates/rag-redis/ingest.py b/templates/rag-redis/ingest.py deleted file mode 100644 index db6413daa1a..00000000000 --- a/templates/rag-redis/ingest.py +++ /dev/null @@ -1,45 +0,0 @@ -import os - -from langchain_community.document_loaders import UnstructuredFileLoader -from langchain_community.embeddings import HuggingFaceEmbeddings -from langchain_community.vectorstores import Redis -from langchain_text_splitters import RecursiveCharacterTextSplitter -from rag_redis.config import EMBED_MODEL, INDEX_NAME, INDEX_SCHEMA, REDIS_URL - - -def ingest_documents(): - """ - Ingest PDF to Redis from the data/ directory that - contains Edgar 10k filings data for Nike. - """ - # Load list of pdfs - company_name = "Nike" - data_path = "data/" - doc = [os.path.join(data_path, file) for file in os.listdir(data_path)][0] - - print("Parsing 10k filing doc for NIKE", doc) - - text_splitter = RecursiveCharacterTextSplitter( - chunk_size=1500, chunk_overlap=100, add_start_index=True - ) - loader = UnstructuredFileLoader(doc, mode="single", strategy="fast") - chunks = loader.load_and_split(text_splitter) - - print("Done preprocessing. Created", len(chunks), "chunks of the original pdf") - # Create vectorstore - embedder = HuggingFaceEmbeddings(model_name=EMBED_MODEL) - - _ = Redis.from_texts( - # appending this little bit can sometimes help with semantic retrieval - # especially with multiple companies - texts=[f"Company: {company_name}. " + chunk.page_content for chunk in chunks], - metadatas=[chunk.metadata for chunk in chunks], - embedding=embedder, - index_name=INDEX_NAME, - index_schema=INDEX_SCHEMA, - redis_url=REDIS_URL, - ) - - -if __name__ == "__main__": - ingest_documents() diff --git a/templates/rag-redis/pyproject.toml b/templates/rag-redis/pyproject.toml deleted file mode 100644 index a8b83992b46..00000000000 --- a/templates/rag-redis/pyproject.toml +++ /dev/null @@ -1,50 +0,0 @@ -[tool.poetry] -name = "rag-redis" -version = "0.0.1" -description = "Run a RAG app backed by OpenAI, HuggingFace, and Redis as a vector database" -authors = [ - "Tyler Hutcherson ", - "Sam Partee ", -] -readme = "README.md" - -[tool.poetry.dependencies] -python = ">=3.9,<3.13" -langchain = "^0.1" -fastapi = ">=0.104.0,<1" -sse-starlette = "^1.6.5" -openai = "<2" -sentence-transformers = "2.2.2" -redis = "5.0.1" -tiktoken = "0.5.1" -pdf2image = "1.16.3" -langchain-text-splitters = ">=0.0.1,<0.1" - -[tool.poetry.dependencies.unstructured] -version = "^0.10.27" -extras = ["pdf"] - -[tool.poetry.group.dev.dependencies] -poethepoet = "^0.24.1" -langchain-cli = ">=0.0.21" - -[tool.langserve] -export_module = "rag_redis.chain" -export_attr = "chain" - -[tool.templates-hub] -use-case = "rag" -author = "Redis" -integrations = ["OpenAI", "Redis", "HuggingFace"] -tags = ["vectordbs"] - -[tool.poe.tasks.start] -cmd = "uvicorn langchain_cli.dev_scripts:create_demo_server --reload --port $port --host $host" -args = [ - { name = "port", help = "port to run on", default = "8000" }, - { name = "host", help = "host to run on", default = "127.0.0.1" }, -] - -[build-system] -requires = ["poetry-core"] -build-backend = "poetry.core.masonry.api" diff --git a/templates/rag-redis/rag_redis.ipynb b/templates/rag-redis/rag_redis.ipynb deleted file mode 100644 index bb3f87a8c62..00000000000 --- a/templates/rag-redis/rag_redis.ipynb +++ /dev/null @@ -1,88 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "id": "681a5d1e", - "metadata": {}, - "source": [ - "## Connect to RAG App\n", - "\n", - "Assuming you are already running this server:\n", - "```bash\n", - "langserve start\n", - "```" - ] - }, - { - "cell_type": "code", - "execution_count": 37, - "id": "d774be2a", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Nike's revenue in 2023 was $51.2 billion. \n", - "\n", - "Source: 'data/nke-10k-2023.pdf', Start Index: '146100'\n" - ] - } - ], - "source": [ - "from langserve.client import RemoteRunnable\n", - "\n", - "rag_redis = RemoteRunnable(\"http://localhost:8000/rag-redis\")\n", - "\n", - "print(rag_redis.invoke(\"What was Nike's revenue in 2023?\"))" - ] - }, - { - "cell_type": "code", - "execution_count": 43, - "id": "07ae0005", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "As of May 31, 2023, Nike had approximately 83,700 employees worldwide. This information can be found in the first piece of context provided. (source: data/nke-10k-2023.pdf, start_index: 32532)\n" - ] - } - ], - "source": [ - "print(rag_redis.invoke(\"How many employees work at Nike?\"))" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "4a6b9f00", - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.10.6" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/templates/rag-redis/rag_redis/__init__.py b/templates/rag-redis/rag_redis/__init__.py deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/templates/rag-redis/rag_redis/chain.py b/templates/rag-redis/rag_redis/chain.py deleted file mode 100644 index 2327a08b575..00000000000 --- a/templates/rag-redis/rag_redis/chain.py +++ /dev/null @@ -1,63 +0,0 @@ -from langchain_community.chat_models import ChatOpenAI -from langchain_community.embeddings import HuggingFaceEmbeddings -from langchain_community.vectorstores import Redis -from langchain_core.output_parsers import StrOutputParser -from langchain_core.prompts import ChatPromptTemplate -from langchain_core.pydantic_v1 import BaseModel -from langchain_core.runnables import RunnableParallel, RunnablePassthrough - -from rag_redis.config import ( - EMBED_MODEL, - INDEX_NAME, - INDEX_SCHEMA, - REDIS_URL, -) - - -# Make this look better in the docs. -class Question(BaseModel): - __root__: str - - -# Init Embeddings -embedder = HuggingFaceEmbeddings(model_name=EMBED_MODEL) - -# Connect to pre-loaded vectorstore -# run the ingest.py script to populate this -vectorstore = Redis.from_existing_index( - embedding=embedder, index_name=INDEX_NAME, schema=INDEX_SCHEMA, redis_url=REDIS_URL -) -# TODO allow user to change parameters -retriever = vectorstore.as_retriever(search_type="mmr") - - -# Define our prompt -template = """ -Use the following pieces of context from Nike's financial 10k filings -dataset to answer the question. Do not make up an answer if there is no -context provided to help answer it. Include the 'source' and 'start_index' -from the metadata included in the context you used to answer the question - -Context: ---------- -{context} - ---------- -Question: {question} ---------- - -Answer: -""" - - -prompt = ChatPromptTemplate.from_template(template) - - -# RAG Chain -model = ChatOpenAI(model="gpt-3.5-turbo-16k") -chain = ( - RunnableParallel({"context": retriever, "question": RunnablePassthrough()}) - | prompt - | model - | StrOutputParser() -).with_types(input_type=Question) diff --git a/templates/rag-redis/rag_redis/config.py b/templates/rag-redis/rag_redis/config.py deleted file mode 100644 index 306448b817e..00000000000 --- a/templates/rag-redis/rag_redis/config.py +++ /dev/null @@ -1,78 +0,0 @@ -import os - - -def get_boolean_env_var(var_name, default_value=False): - """Retrieve the boolean value of an environment variable. - - Args: - var_name (str): The name of the environment variable to retrieve. - default_value (bool): The default value to return if the variable - is not found. - - Returns: - bool: The value of the environment variable, interpreted as a boolean. - """ - true_values = {"true", "1", "t", "y", "yes"} - false_values = {"false", "0", "f", "n", "no"} - - # Retrieve the environment variable's value - value = os.getenv(var_name, "").lower() - - # Decide the boolean value based on the content of the string - if value in true_values: - return True - elif value in false_values: - return False - else: - return default_value - - -# Check for openai API key -if "OPENAI_API_KEY" not in os.environ: - raise Exception("Must provide an OPENAI_API_KEY as an env var.") - - -# Whether or not to enable langchain debugging -DEBUG = get_boolean_env_var("DEBUG", False) -# Set DEBUG env var to "true" if you wish to enable LC debugging module -if DEBUG: - import langchain - - langchain.debug = True - - -# Embedding model -EMBED_MODEL = os.getenv("EMBED_MODEL", "sentence-transformers/all-MiniLM-L6-v2") - -# Redis Connection Information -REDIS_HOST = os.getenv("REDIS_HOST", "localhost") -REDIS_PORT = int(os.getenv("REDIS_PORT", 6379)) - - -def format_redis_conn_from_env(): - redis_url = os.getenv("REDIS_URL", None) - if redis_url: - return redis_url - else: - using_ssl = get_boolean_env_var("REDIS_SSL", False) - start = "rediss://" if using_ssl else "redis://" - - # if using RBAC - password = os.getenv("REDIS_PASSWORD", None) - username = os.getenv("REDIS_USERNAME", "default") - if password is not None: - start += f"{username}:{password}@" - - return start + f"{REDIS_HOST}:{REDIS_PORT}" - - -REDIS_URL = format_redis_conn_from_env() - -# Vector Index Configuration -INDEX_NAME = os.getenv("INDEX_NAME", "rag-redis") - - -current_file_path = os.path.abspath(__file__) -parent_dir = os.path.dirname(current_file_path) -schema_path = os.path.join(parent_dir, "schema.yml") -INDEX_SCHEMA = schema_path diff --git a/templates/rag-redis/rag_redis/schema.yml b/templates/rag-redis/rag_redis/schema.yml deleted file mode 100644 index 6d396323f35..00000000000 --- a/templates/rag-redis/rag_redis/schema.yml +++ /dev/null @@ -1,11 +0,0 @@ -text: -- name: content -- name: source -numeric: -- name: start_index -vector: -- name: content_vector - algorithm: HNSW - datatype: FLOAT32 - dims: 384 - distance_metric: COSINE diff --git a/templates/rag-redis/tests/__init__.py b/templates/rag-redis/tests/__init__.py deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/templates/rag-self-query/LICENSE b/templates/rag-self-query/LICENSE deleted file mode 100644 index 578fc414ad4..00000000000 --- a/templates/rag-self-query/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -MIT License - -Copyright (c) 2023 pingpong-templates - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/templates/rag-self-query/README.md b/templates/rag-self-query/README.md deleted file mode 100644 index bb3b0192824..00000000000 --- a/templates/rag-self-query/README.md +++ /dev/null @@ -1,88 +0,0 @@ -# RAG - Elasticsearch - Self-query - -This template performs RAG using the `self-query` retrieval technique. -The main idea is to let an LLM convert unstructured queries into -structured queries. See the [docs for more on how this works](https://python.langchain.com/docs/modules/data_connection/retrievers/self_query). - -## Environment Setup - -In this template we'll use `OpenAI` models and an `Elasticsearch` vector store, but the approach generalizes to all LLMs/ChatModels and [a number of vector stores](https://python.langchain.com/docs/integrations/retrievers/self_query/). - -Set the `OPENAI_API_KEY` environment variable to access the `OpenAI` models. - -To connect to your `Elasticsearch` instance, use the following environment variables: - -```bash -export ELASTIC_CLOUD_ID = -export ELASTIC_USERNAME = -export ELASTIC_PASSWORD = -``` -For local development with Docker, use: - -```bash -export ES_URL = "http://localhost:9200" -docker run -p 9200:9200 -e "discovery.type=single-node" -e "xpack.security.enabled=false" -e "xpack.security.http.ssl.enabled=false" docker.elastic.co/elasticsearch/elasticsearch:8.9.0 -``` - -## Usage - -To use this package, you should first have the LangChain CLI installed: - -```shell -pip install -U "langchain-cli[serve]" -``` - -To create a new LangChain project and install this as the only package, you can do: - -```shell -langchain app new my-app --package rag-self-query -``` - -If you want to add this to an existing project, you can just run: - -```shell -langchain app add rag-self-query -``` - -And add the following code to your `server.py` file: -```python -from rag_self_query import chain - -add_routes(app, chain, path="/rag-elasticsearch") -``` - -To populate the vector store with the sample data, from the root of the directory run: -```bash -python ingest.py -``` - -(Optional) Let's now configure LangSmith. -LangSmith will help us trace, monitor and debug LangChain applications. -You can sign up for LangSmith [here](https://smith.langchain.com/). -If you don't have access, you can skip this section - -```shell -export LANGCHAIN_TRACING_V2=true -export LANGCHAIN_API_KEY= -export LANGCHAIN_PROJECT= # if not specified, defaults to "default" -``` - -If you are inside this directory, then you can spin up a LangServe instance directly by: - -```shell -langchain serve -``` - -This will start the FastAPI app with a server is running locally at -[http://localhost:8000](http://localhost:8000) - -We can see all templates at [http://127.0.0.1:8000/docs](http://127.0.0.1:8000/docs) -We can access the playground at [http://127.0.0.1:8000/rag-elasticsearch/playground](http://127.0.0.1:8000/rag-elasticsearch/playground) - -We can access the template from code with: - -```python -from langserve.client import RemoteRunnable - -runnable = RemoteRunnable("http://localhost:8000/rag-self-query") -``` diff --git a/templates/rag-self-query/data/documents.json b/templates/rag-self-query/data/documents.json deleted file mode 100644 index b81cf1647f2..00000000000 --- a/templates/rag-self-query/data/documents.json +++ /dev/null @@ -1,161 +0,0 @@ -[ - { - "content": "Effective: March 2020\nPurpose\n\nThe purpose of this full-time work-from-home policy is to provide guidelines and support for employees to conduct their work remotely, ensuring the continuity and productivity of business operations during the COVID-19 pandemic and beyond.\nScope\n\nThis policy applies to all employees who are eligible for remote work as determined by their role and responsibilities. It is designed to allow employees to work from home full time while maintaining the same level of performance and collaboration as they would in the office.\nEligibility\n\nEmployees who can perform their work duties remotely and have received approval from their direct supervisor and the HR department are eligible for this work-from-home arrangement.\nEquipment and Resources\n\nThe necessary equipment and resources will be provided to employees for remote work, including a company-issued laptop, software licenses, and access to secure communication tools. Employees are responsible for maintaining and protecting the company's equipment and data.\nWorkspace\n\nEmployees working from home are responsible for creating a comfortable and safe workspace that is conducive to productivity. This includes ensuring that their home office is ergonomically designed, well-lit, and free from distractions.\nCommunication\n\nEffective communication is vital for successful remote work. Employees are expected to maintain regular communication with their supervisors, colleagues, and team members through email, phone calls, video conferences, and other approved communication tools.\nWork Hours and Availability\n\nEmployees are expected to maintain their regular work hours and be available during normal business hours, unless otherwise agreed upon with their supervisor. Any changes to work hours or availability must be communicated to the employee's supervisor and the HR department.\nPerformance Expectations\n\nEmployees working from home are expected to maintain the same level of performance and productivity as if they were working in the office. Supervisors and team members will collaborate to establish clear expectations and goals for remote work.\nTime Tracking and Overtime\n\nEmployees are required to accurately track their work hours using the company's time tracking system. Non-exempt employees must obtain approval from their supervisor before working overtime.\nConfidentiality and Data Security\n\nEmployees must adhere to the company's confidentiality and data security policies while working from home. This includes safeguarding sensitive information, securing personal devices and internet connections, and reporting any security breaches to the IT department.\nHealth and Well-being\n\nThe company encourages employees to prioritize their health and well-being while working from home. This includes taking regular breaks, maintaining a work-life balance, and seeking support from supervisors and colleagues when needed.\nPolicy Review and Updates\n\nThis work-from-home policy will be reviewed periodically and updated as necessary, taking into account changes in public health guidance, business needs, and employee feedback.\nQuestions and Concerns\n\nEmployees are encouraged to direct any questions or concerns about this policy to their supervisor or the HR department.\n", - "summary": "This policy outlines the guidelines for full-time remote work, including eligibility, equipment and resources, workspace requirements, communication expectations, performance expectations, time tracking and overtime, confidentiality and data security, health and well-being, and policy reviews and updates. Employees are encouraged to direct any questions or concerns", - "name": "Work From Home Policy", - "url": "./sharepoint/Work from home policy.txt", - "created_on": "2020-03-01", - "updated_at": "2020-03-01", - "category": "teams", - "_run_ml_inference": true, - "rolePermissions": ["demo", "manager"] - }, - { - "content": "Starting May 2022, the company will be implementing a two-day in-office work requirement per week for all eligible employees. Please coordinate with your supervisor and HR department to schedule your in-office workdays while continuing to follow all safety protocols.\n", - "summary": "Starting May 2022, employees will need to work two days a week in the office. Coordinate with your supervisor and HR department for these days while following safety protocols.", - "name": "April Work From Home Update", - "url": "./sharepoint/April work from home update.txt", - "created_on": "2022-04-29", - "updated_at": "2022-04-29", - "category": "teams", - "_run_ml_inference": true, - "rolePermissions": ["demo", "manager"] - }, - { - "content": "As we continue to prioritize the well-being of our employees, we are making a slight adjustment to our hybrid work policy. Starting May 1, 2023, employees will be required to work from the office three days a week, with two days designated for remote work. Please communicate with your supervisor and HR department to establish your updated in-office workdays.\n", - "summary": "Starting May 1, 2023, our hybrid work policy will require employees to work from the office three days a week and two days remotely.", - "name": "Wfh Policy Update May 2023", - "url": "./sharepoint/WFH policy update May 2023.txt", - "created_on": "2023-05-01", - "updated_at": "2023-05-01", - "category": "teams", - "_run_ml_inference": true, - "rolePermissions": ["demo", "manager"] - }, - { - "content": "Executive Summary:\nThis sales strategy document outlines the key objectives, focus areas, and action plans for our tech company's sales operations in fiscal year 2024. Our primary goal is to increase revenue, expand market share, and strengthen customer relationships in our target markets.\n\nI. Objectives for Fiscal Year 2024\n\nIncrease revenue by 20% compared to fiscal year 2023.\nExpand market share in key segments by 15%.\nRetain 95% of existing customers and increase customer satisfaction ratings.\nLaunch at least two new products or services in high-demand market segments.\n\nII. Focus Areas\nA. Target Markets:\nContinue to serve existing markets with a focus on high-growth industries.\nIdentify and penetrate new markets with high potential for our products and services.\n\nB. Customer Segmentation:\nStrengthen relationships with key accounts and strategic partners.\nPursue new customers in underserved market segments.\nDevelop tailored offerings for different customer segments based on their needs and preferences.\n\nC. Product/Service Portfolio:\nOptimize the existing product/service portfolio by focusing on high-demand solutions.\nDevelop and launch innovative products/services in emerging technology areas.\nEnhance post-sales support and customer service to improve customer satisfaction.\n\nIII. Action Plans\nA. Sales Team Development:\nExpand the sales team to cover new markets and industries.\nProvide ongoing training to sales staff on product knowledge, sales techniques, and industry trends.\nImplement a performance-based incentive system to reward top performers.\n\nB. Marketing and Promotion:\nDevelop targeted marketing campaigns for different customer segments and industries.\nLeverage digital marketing channels to increase brand visibility and lead generation.\nParticipate in industry events and trade shows to showcase our products and services.\n\nC. Partner Ecosystem:\nStrengthen existing partnerships and establish new strategic alliances to expand market reach.\nCollaborate with partners on joint marketing and sales initiatives.\nProvide partner training and support to ensure they effectively represent our products and services.\n\nD. Customer Success:\nImplement a proactive customer success program to improve customer retention and satisfaction.\nDevelop a dedicated customer support team to address customer inquiries and concerns promptly.\nCollect and analyze customer feedback to identify areas for improvement in our products, services, and processes.\n\nIV. Monitoring and Evaluation\nEstablish key performance indicators (KPIs) to track progress toward our objectives.\nConduct regular sales team meetings to review performance, share best practices, and address challenges.\nConduct quarterly reviews of our sales strategy to ensure alignment with market trends and adjust as needed.\n\nBy following this sales strategy for fiscal year 2024, our tech company aims to achieve significant growth and success in our target markets, while also providing exceptional value and service to our customers.\n", - "summary": "This sales strategy document outlines objectives, focus areas, and action plans for our tech company's sales operations in fiscal year 2024. Our primary goal is to increase revenue, expand market share, and strengthen customer relationships in our target markets. Focus areas include targeting new markets, segmenting customers, enhancing", - "name": "Fy2024 Company Sales Strategy", - "url": "./sharepoint/FY2024 Company Sales Strategy.txt", - "category": "teams", - "created_on": "2023-04-15", - "updated_at": "2023-04-15", - "_run_ml_inference": true, - "rolePermissions": ["demo", "manager"] - }, - { - "content": "Purpose\n\nThe purpose of this vacation policy is to outline the guidelines and procedures for requesting and taking time off from work for personal and leisure purposes. This policy aims to promote a healthy work-life balance and encourage employees to take time to rest and recharge.\nScope\n\nThis policy applies to all full-time and part-time employees who have completed their probationary period.\nVacation Accrual\n\nFull-time employees accrue vacation time at a rate of [X hours] per month, equivalent to [Y days] per year. Part-time employees accrue vacation time on a pro-rata basis, calculated according to their scheduled work hours.\n\nVacation time will begin to accrue from the first day of employment, but employees are eligible to take vacation time only after completing their probationary period. Unused vacation time will be carried over to the next year, up to a maximum of [Z days]. Any additional unused vacation time will be forfeited.\nVacation Scheduling\n\nEmployees are required to submit vacation requests to their supervisor at least [A weeks] in advance, specifying the start and end dates of their vacation. Supervisors will review and approve vacation requests based on business needs, ensuring adequate coverage during the employee's absence.\n\nEmployees are encouraged to plan their vacations around the company's peak and non-peak periods to minimize disruptions. Vacation requests during peak periods may be subject to limitations and require additional advance notice.\nVacation Pay\n\nEmployees will receive their regular pay during their approved vacation time. Vacation pay will be calculated based on the employee's average earnings over the [B weeks] preceding their vacation.\nUnplanned Absences and Vacation Time\n\nIn the event of an unplanned absence due to illness or personal emergencies, employees may use their accrued vacation time, subject to supervisor approval. Employees must inform their supervisor as soon as possible and provide any required documentation upon their return to work.\nVacation Time and Termination of Employment\n\nIf an employee's employment is terminated, they will be paid out for any unused vacation time, calculated based on their current rate of pay.\nPolicy Review and Updates\n\nThis vacation policy will be reviewed periodically and updated as necessary, taking into account changes in labor laws, business needs, and employee feedback.\nQuestions and Concerns\n\nEmployees are encouraged to direct any questions or concerns about this policy to their supervisor or the HR department.\n", - "summary": ": This policy outlines the guidelines and procedures for requesting and taking time off from work for personal and leisure purposes. Full-time employees accrue vacation time at a rate of [X hours] per month, equivalent to [Y days] per year. Vacation requests must be submitted to supervisors at least", - "name": "Company Vacation Policy", - "url": "https://enterprisesearch.sharepoint.com/:t:/s/MSBuilddemo/ES6rw9bKZxVBobG1WUoJpikBF9Bhx1pw_GvJWbsg-Z_HNA?e=faSHVt", - "created_on": "2018-04-15", - "updated_at": "2018-04-16", - "category": "sharepoint", - "_run_ml_inference": true, - "rolePermissions": ["demo", "manager"] - }, - - { - "content": "This career leveling matrix provides a framework for understanding the various roles and responsibilities of Software Engineers, as well as the skills and experience required for each level. This matrix is intended to support employee development, facilitate performance evaluations, and provide a clear career progression path.\nJunior Software Engineer\n\nResponsibilities:\nCollaborate with team members to design, develop, and maintain software applications and components.\nWrite clean, well-structured, and efficient code following established coding standards.\nParticipate in code reviews, providing and receiving constructive feedback.\nTroubleshoot and resolve software defects and issues.\nAssist with the creation of technical documentation.\nContinuously learn and stay up-to-date with new technologies and best practices.\n\nSkills & Experience:\nBachelor\u2019s degree in Computer Science or a related field, or equivalent work experience.\nBasic understanding of software development principles and methodologies.\nProficiency in at least one programming language.\nStrong problem-solving and analytical skills.\nEffective communication and collaboration skills.\nEagerness to learn and grow within the field.\nSenior Software Engineer\n\nResponsibilities:\nDesign, develop, and maintain complex software applications and components.\nLead and mentor junior team members in software development best practices and techniques.\nConduct code reviews and ensure adherence to coding standards and best practices.\nCollaborate with cross-functional teams to define, design, and deliver software solutions.\nIdentify, troubleshoot, and resolve complex software defects and issues.\nContribute to the creation and maintenance of technical documentation.\nEvaluate and recommend new technologies, tools, and practices to improve software quality and efficiency.\n\nSkills & Experience:\nBachelor\u2019s degree in Computer Science or a related field, or equivalent work experience.\n5+ years of software development experience.\nProficiency in multiple programming languages and technologies.\nDemonstrated ability to design and implement complex software solutions.\nStrong leadership, mentoring, and collaboration skills.\nExcellent problem-solving, analytical, and communication skills.\nPrincipal Software Engineer\n\nResponsibilities:\nLead the design, development, and maintenance of large-scale, mission-critical software applications and components.\nProvide technical leadership and mentorship to software engineering teams.\nDrive the adoption of advanced software development practices and technologies.\nCollaborate with product management, architecture, and other stakeholders to define and deliver strategic software initiatives.\nIdentify, troubleshoot, and resolve the most complex software defects and issues.\nCreate and maintain technical documentation, including architectural designs and best practice guidelines.\nRepresent [Company Name] as a thought leader in the software engineering community, including speaking at conferences, publishing articles, and contributing to open-source projects.\n\nSkills & Experience:\nBachelor\u2019s degree in Computer Science or a related field, or equivalent work experience.\n10+ years of software development experience, with a focus on large-scale, mission-critical applications.\nExpertise in multiple programming languages, technologies, and software development methodologies.\nProven ability to lead and mentor high-performing software engineering teams.\nExceptional problem-solving, analytical, and communication skills.\nStrong business acumen and ability to influence decision-making at the executive level.\n\nBy following this career leveling matrix, we aim to support the growth and development of Software Engineers, enabling them to reach their full potential and contribute meaningfully to the success of the organization.\n", - "summary": "\nThis career leveling matrix provides a framework for understanding the various roles and responsibilities of Software Engineers, as well as the skills and experience required for each level. It is intended to support employee development, facilitate performance evaluations, and provide a clear career progression path.", - "name": "Swe Career Matrix", - "url": "https://enterprisesearch.sharepoint.com/:t:/s/MSBuilddemo/EVYuEyRhHh5Aqc3a39sqbGcBkqKIHRWtJBjjUjNs6snpMg?e=nv1mf4", - "created_on": "2018-04-15", - "category": "sharepoint", - "_run_ml_inference": true, - "rolePermissions": ["demo", "manager"] - }, - { - "content": "Title: Working with the Sales Team as an Engineer in a Tech Company\n\nIntroduction:\nAs an engineer in a tech company, collaboration with the sales team is essential to ensure the success of the company's products and services. This guidance document aims to provide an overview of how engineers can effectively work with the sales team, fostering a positive and productive working environment.\nUnderstanding the Sales Team's Role:\nThe sales team is responsible for promoting and selling the company's products and services to potential clients. Their role involves establishing relationships with customers, understanding their needs, and ensuring that the offered solutions align with their requirements.\n\nAs an engineer, it is important to understand the sales team's goals and objectives, as this will help you to provide them with the necessary information, tools, and support to successfully sell your company's products and services.\nCommunication:\nEffective communication is key to successfully working with the sales team. Make sure to maintain open lines of communication, and be responsive to their questions and concerns. This includes:\n\na. Attending sales meetings and conference calls when required.\nb. Providing regular product updates and training sessions to the sales team.\nc. Being available to answer technical questions and clarifications.\nCollaboration:\nCollaborate with the sales team in developing and refining sales materials, such as product presentations, demos, and technical documents. This will ensure that the sales team has accurate and up-to-date information to present to clients.\n\nAdditionally, work closely with the sales team on customer projects or product customizations, providing technical guidance, and ensuring that the solutions meet the customer's requirements.\nCustomer Engagement:\nAt times, engineers may be asked to join sales meetings or calls with potential clients to provide technical expertise. In these situations, it is important to:\n\na. Be prepared and understand the customer's needs and pain points.\nb. Clearly explain the technical aspects of the product or solution in a simple language that the customer can understand.\nc. Address any concerns or questions the customer may have.\nContinuous Improvement:\nActively seek feedback from the sales team regarding product performance, customer experiences, and market trends. Use this feedback to identify areas of improvement and collaborate with other engineers to enhance the product or service offerings.\nMutual Respect and Support:\nIt is essential to treat your colleagues in the sales team with respect and professionalism. Recognize and appreciate their efforts in promoting and selling the company's products and services. In turn, the sales team should also respect and appreciate the technical expertise and knowledge of the engineering team.\n\nBy working together, both the engineering and sales teams can contribute to the overall success of the company.\n\nConclusion:\nCollaboration between engineers and the sales team is crucial for a tech company's success. By understanding each other's roles, maintaining effective communication, collaborating on projects, and supporting one another, both teams can work together to achieve the company's goals and ensure customer satisfaction.\n", - "summary": ": This guide provides an overview of how engineers can effectively collaborate with the sales team to ensure the success of a tech company. It includes understanding the sales team's role, communicating and collaborating on projects, engaging customers, and providing mutual respect and support.", - "name": "Sales Engineering Collaboration", - "url": "https://enterprisesearch.sharepoint.com/:t:/s/MSBuilddemo/EW21-KJnfHBFoRiF49_uJMcBfHyPKimuPOFsCcJypQWaBQ?e=mGdIqe", - "created_on": "2019-04-15", - "category": "sharepoint", - "_run_ml_inference": true, - "rolePermissions": ["demo", "manager"] - }, - { - "content": "Purpose\nThe purpose of this Intellectual Property Policy is to establish guidelines and procedures for the ownership, protection, and utilization of intellectual property generated by employees during their employment. This policy aims to encourage creativity and innovation while ensuring that the interests of both the company and its employees are protected.\n\nScope\nThis policy applies to all employees, including full-time, part-time, temporary, and contract employees.\n\nDefinitions\na. Intellectual Property (IP): Refers to creations of the mind, such as inventions, literary and artistic works, designs, symbols, and images, that are protected by copyright, trademark, patent, or other forms of legal protection.\nb. Company Time: Refers to the time during which an employee is actively engaged in performing their job duties.\nc. Outside Company Time: Refers to the time during which an employee is not engaged in performing their job duties.\n\nOwnership of Intellectual Property\na. Work Generated on Company Time\ni. Any intellectual property created, conceived, or developed by an employee during company time or using company resources, equipment, or facilities shall be considered the property of the Company.\nii. Employees are required to promptly disclose any such intellectual property to their supervisor or the appropriate department head.\nb. Work Generated Outside Company Time\ni. Intellectual property created, conceived, or developed by an employee outside of company time and without the use of company resources, equipment, or facilities shall generally remain the property of the employee.\nii. However, if the intellectual property is directly related to the employee's job responsibilities, or if the employee has used company resources, equipment, or facilities in its creation, it may be considered the property of the Company.\nProtection and Utilization of Intellectual Property\na. The Company shall have the right to protect, license, and commercialize any intellectual property owned by the company as it deems appropriate.\nb. Employees are expected to cooperate with the Company in obtaining any necessary legal protection for intellectual property owned by the company, including by signing any documents or providing any necessary information or assistance.\nConfidentiality\nEmployees are expected to maintain the confidentiality of any intellectual property owned by the Company and not disclose it to any third parties without the express written consent of an authorized representative of the company.\nEmployee Acknowledgment\nAll employees are required to sign an acknowledgment of this Intellectual Property Policy as a condition of their employment with [Company Name]. By signing the acknowledgment, employees agree to abide by the terms of this policy and understand that any violations may result in disciplinary action, up to and including termination of employment.\nPolicy Review\nThis Intellectual Property Policy shall be reviewed periodically and may be amended as necessary to ensure its continued effectiveness and compliance with applicable laws and regulations. Employees will be notified of any significant changes to this policy.\n", - "summary": "This Intellectual Property Policy outlines guidelines and procedures for the ownership, protection, and utilization of intellectual property generated by employees during their employment. It establishes the company's ownership of work generated on company time, while recognizing employee ownership of work generated outside of company time without the use of company resources. The policy", - "name": "Intellectual Property Policy", - "url": "https://enterprisesearch.sharepoint.com/:t:/s/MSBuilddemo/EWz3cYEVdzBNsiHsYbKhms4BVYGhravyrUw3T3lzxL4pTg?e=mPIgbO", - "created_on": "2021-06-15", - "category": "sharepoint", - "_run_ml_inference": true, - "rolePermissions": ["demo", "manager"] - }, - - { - "content": "Code of Conduct\nPurpose\n\nThe purpose of this code of conduct is to establish guidelines for professional and ethical behavior in the workplace. It outlines the principles and values that all employees are expected to uphold in their interactions with colleagues, customers, partners, and other stakeholders.\nScope\n\nThis code of conduct applies to all employees, contractors, and volunteers within the organization, regardless of their role or seniority.\nCore Values\n\nEmployees are expected to adhere to the following core values:\n\na. Integrity: Act honestly, ethically, and in the best interests of the organization at all times.\nb. Respect: Treat all individuals with dignity, courtesy, and fairness, regardless of their background, beliefs, or position.\nc. Accountability: Take responsibility for one's actions and decisions, and be willing to learn from mistakes.\nd. Collaboration: Work cooperatively with colleagues and partners to achieve shared goals and promote a positive work environment.\ne. Excellence: Strive for the highest standards of performance and continuously seek opportunities for improvement.\nCompliance with Laws and Regulations\n\nEmployees must comply with all applicable laws, regulations, and organizational policies in the course of their work. This includes, but is not limited to, employment laws, data protection regulations, and industry-specific guidelines.\nConflicts of Interest\n\nEmployees should avoid situations where their personal interests may conflict with or influence their professional judgment. If a potential conflict of interest arises, employees must disclose it to their supervisor or the appropriate authority within the organization.\nConfidentiality and Information Security\n\nEmployees are responsible for safeguarding the organization's confidential information, as well as any sensitive information entrusted to them by clients, partners, or other third parties. This includes adhering to data protection policies and using secure communication channels.\nHarassment and Discrimination\n\nThe organization is committed to providing a workplace free from harassment, discrimination, and bullying. Employees are expected to treat others with respect and report any incidents of inappropriate behavior to their supervisor or the human resources department.\nHealth and Safety\n\nEmployees must follow all health and safety guidelines and procedures to maintain a safe and healthy work environment. This includes reporting any hazards or unsafe conditions to the appropriate personnel.\nUse of Company Resources\n\nEmployees are expected to use company resources, including time, equipment, and funds, responsibly and for their intended purposes. Misuse or theft of company resources is strictly prohibited.\nReporting Violations\n\nEmployees have a responsibility to report any suspected violations of this code of conduct, as well as any illegal or unethical behavior, to their supervisor or the appropriate authority within the organization. The organization will protect the confidentiality of employees who report violations and will not tolerate retaliation against those who raise concerns.\nConsequences of Non-Compliance\n\nFailure to adhere to this code of conduct may result in disciplinary action, up to and including termination of employment. The organization reserves the right to take legal action against individuals who engage in illegal or unethical conduct.\nPolicy Review and Updates\n\nThis code of conduct will be reviewed periodically and updated as necessary to ensure it remains relevant and effective in promoting ethical behavior and professional standards within the organization.\nQuestions and Concerns\n\nEmployees are encouraged to seek guidance from their supervisor or the human resources department if they have questions or concerns about this code of conduct or its application to specific situations.\n", - "summary": "This code of conduct outlines the principles and values that all employees are expected to uphold in their interactions with colleagues, customers, partners, and other stakeholders. It sets out core values such as integrity, respect, accountability, collaboration and excellence. Employees must comply with all applicable laws, regulations, and organizational", - "name": "Code Of Conduct", - "url": "https://enterprisesearch.sharepoint.com/:t:/s/MSBuilddemo/ER3xmeKaZ_pAqPeJWyyNR0QBg6QmoWIGPhwfEyCABWHrPA?e=cvzrgV", - "created_on": "2018-01-12", - "category": "sharepoint", - "_run_ml_inference": true, - "rolePermissions": ["demo", "manager"] - }, - { - "content": "Content:\nThe purpose of this office pet policy is to outline the guidelines and procedures for bringing pets into the workplace. This policy aims to create a positive and inclusive work environment while ensuring the comfort, safety, and well-being of all employees, visitors, and pets.\nScope\n\nThis policy applies to all employees who wish to bring their pets to the office. Pets covered under this policy include dogs, cats, and other small, non-exotic animals, subject to approval by the HR department.\nPet Approval Process\n\nEmployees must obtain prior approval from their supervisor and the HR department before bringing their pets to the office. The approval process includes:\n\na. Submitting a written request, including a description of the pet, its breed, age, and temperament.\nb. Providing proof of up-to-date vaccinations and any required licenses or permits.\nc. Obtaining written consent from all employees who share the workspace with the pet owner.\n\nThe HR department reserves the right to deny or revoke pet approval based on the specific circumstances or concerns raised by other employees.\nPet Behavior and Supervision\n\nEmployees are responsible for the behavior and well-being of their pets while in the office. Pets must be:\n\na. Well-behaved, non-aggressive, and not disruptive to the work environment.\nb. House-trained and able to eliminate waste in designated areas outside the office.\nc. Kept on a leash or in a secure enclosure when not in the employee's immediate work area.\n\nEmployees must closely supervise their pets and promptly address any issues or concerns raised by other staff members.\nAllergies and Phobias\n\nEmployees with allergies or phobias related to pets must inform the HR department, which will work with the affected employees and pet owners to find a suitable solution. This may include adjusting workspaces, limiting the number or types of pets allowed, or implementing additional safety measures.\nCleanliness and Hygiene\n\nEmployees are responsible for maintaining a clean and hygienic work environment. This includes:\n\na. Cleaning up after their pets, both indoors and outdoors.\nb. Regularly grooming their pets to minimize shedding and odors.\nc. Ensuring their pets are free of pests, such as fleas and ticks.\nLiability\n\nPet owners are liable for any damage or injury caused by their pets. Employees are encouraged to obtain pet liability insurance to cover potential incidents.\nRestricted Areas\n\nPets are not allowed in certain areas of the office, including meeting rooms, restrooms, kitchen and dining areas, and any other designated spaces. Signage will be posted to indicate these restricted areas.\nPolicy Review and Updates\n\nThis office pet policy will be reviewed periodically and updated as necessary, taking into account employee feedback, changes in legislation, and best practices for maintaining a safe and inclusive work environment.\nQuestions and Concerns\n\nEmployees are encouraged to direct any questions or concerns about this policy to their supervisor or the HR department.\n", - "summary": "This policy outlines the guidelines and procedures for bringing pets into the workplace. It covers approval process, pet behavior and supervision, allergies and phobias, cleanliness and hygiene, liability, restricted areas, and policy review. Employees must obtain prior approval from their supervisor and the HR department before bringing their", - "name": "Office Pet Policy", - "url": "https://enterprisesearch.sharepoint.com/:t:/s/MSBuilddemo/ETf-69wBeaZJpAn3CY7ExRABQWvav-p24VOnB6C0A4l2pQ?e=X72WuK", - "created_on": "2018-01-12", - "category": "sharepoint", - "_run_ml_inference": true, - "rolePermissions": ["demo", "manager"] - }, - { - "content": "Performance Management Policy\nPurpose and Scope\nThe purpose of this Performance Management Policy is to establish a consistent and transparent process for evaluating, recognizing, and rewarding employee performance. This policy applies to all employees and aims to foster a culture of continuous improvement, professional growth, and open communication between employees and management.\nPerformance Planning and Goal Setting\nAt the beginning of each performance cycle, employees and their supervisors will collaborate to set clear, achievable, and measurable performance goals. These goals should align with the company\u2019s strategic objectives and take into account the employee\u2019s job responsibilities, professional development, and career aspirations.\nOngoing Feedback and Communication\nThroughout the performance cycle, employees and supervisors are encouraged to engage in regular, constructive feedback and open communication. This includes discussing progress towards goals, addressing challenges, and identifying opportunities for improvement or additional support. Regular check-ins and updates help ensure that employees stay on track and receive the guidance they need to succeed.\nPerformance Evaluation\nAt the end of each performance cycle, employees will participate in a formal performance evaluation with their supervisor. This evaluation will assess the employee\u2019s overall performance, including their achievements, areas for improvement, and progress towards goals. Both the employee and supervisor should come prepared to discuss specific examples, accomplishments, and challenges from the performance period.\nPerformance Ratings\nBased on the performance evaluation, employees will receive a performance rating that reflects their overall performance during the cycle. The rating system should be clearly defined and consistently applied across the organization. Performance ratings will be used to inform decisions regarding promotions, salary increases, and other rewards or recognition.\nPromotions and Advancements\nHigh-performing employees who consistently demonstrate strong performance, leadership, and a commitment to the company\u2019s values may be considered for promotions or other advancement opportunities. Promotions will be based on factors such as performance ratings, skills, experience, and the needs of the organization. Employees interested in pursuing a promotion should discuss their career goals and development plans with their supervisor.\nPerformance Improvement Plans\nEmployees who receive a low performance rating or are struggling to meet their performance goals may be placed on a Performance Improvement Plan (PIP). A PIP is a structured plan designed to help the employee address specific areas of concern, set achievable improvement goals, and receive additional support or resources as needed. Employees on a PIP will be closely monitored and re-evaluated at the end of the improvement period to determine if satisfactory progress has been made.\nRecognition and Rewards\nOur company believes in recognizing and rewarding employees for their hard work and dedication. In addition to promotions and salary increases, employees may be eligible for other forms of recognition or rewards based on their performance. This may include bonuses, awards, or other incentives designed to motivate and celebrate employee achievements. The specific criteria and eligibility for these rewards will be communicated by the HR department or management.\n", - "summary": "This Performance Management Policy outlines a consistent and transparent process for evaluating, recognizing, and rewarding employees. It includes goal setting, ongoing feedback, performance evaluations, ratings, promotions, and rewards. The policy applies to all employees and encourages open communication and professional growth.", - "name": "Performance Management Policy", - "url": "https://enterprisesearch.sharepoint.com/:t:/s/MSBuilddemo/ERsxt9p1uehJqeJu4JlxkakBavbKwcldrYv_hpv3xHikAw?e=pf5R2C", - "created_on": "2018-01-12", - "category": "sharepoint", - "_run_ml_inference": true, - "rolePermissions": ["demo", "manager"] - }, - - { - "content": "Our sales organization is structured to effectively serve our customers and achieve our business objectives across multiple regions. The organization is divided into the following main regions:\n\nThe Americas: This region includes the United States, Canada, Mexico, as well as Central and South America. The North America South America region (NASA) has two Area Vice-Presidents: Laura Martinez is the Area Vice-President of North America, and Gary Johnson is the Area Vice-President of South America.\n\nEurope: Our European sales team covers the entire continent, including the United Kingdom, Germany, France, Spain, Italy, and other countries. The team is responsible for understanding the unique market dynamics and cultural nuances, enabling them to effectively target and engage with customers across the region. The Area Vice-President for Europe is Rajesh Patel.\nAsia-Pacific: This region encompasses countries such as China, Japan, South Korea, India, Australia, and New Zealand. Our sales team in the Asia-Pacific region works diligently to capitalize on growth opportunities and address the diverse needs of customers in this vast and rapidly evolving market. The Area Vice-President for Asia-Pacific is Mei Li.\nMiddle East & Africa: This region comprises countries across the Middle East and Africa, such as the United Arab Emirates, Saudi Arabia, South Africa, and Nigeria. Our sales team in this region is responsible for navigating the unique market challenges and identifying opportunities to expand our presence and better serve our customers. The Area Vice-President for Middle East & Africa is Jamal Abdi.\n\nEach regional sales team consists of dedicated account managers, sales representatives, and support staff, led by their respective Area Vice-Presidents. They are responsible for identifying and pursuing new business opportunities, nurturing existing client relationships, and ensuring customer satisfaction. The teams collaborate closely with other departments, such as marketing, product development, and customer support, to ensure we consistently deliver high-quality products and services to our clients.\n", - "summary": "\nOur sales organization is divided into four regions: The Americas, Europe, Asia-Pacific, and Middle East & Africa. Each region is led by an Area Vice-President and consists of dedicated account managers, sales representatives, and support staff. They collaborate with other departments to ensure the delivery of high", - "name": "Sales Organization Overview", - "url": "https://enterprisesearch.sharepoint.com/:t:/s/MSBuilddemo/EYsr1eqgn9hMslMJFLR-k54BBX-O3iC26bK7xNEBtYIBkg?e=xeAjiT", - "created_on": "2018-01-15", - "category": "sharepoint", - "_run_ml_inference": true, - "rolePermissions": ["demo", "manager"] - }, - { - "content": "Introduction:\nThis document outlines the compensation bands strategy for the various teams within our IT company. The goal is to establish a fair and competitive compensation structure that aligns with industry standards, rewards performance, and attracts top talent. By implementing this strategy, we aim to foster employee satisfaction and retention while ensuring the company's overall success.\n\nPurpose:\nThe purpose of this compensation bands strategy is to:\na. Define clear guidelines for salary ranges based on job levels and market benchmarks.\nb. Support equitable compensation practices across different teams.\nc. Encourage employee growth and performance.\nd. Enable effective budgeting and resource allocation.\n\nJob Levels:\nTo establish a comprehensive compensation structure, we have defined distinct job levels within each team. These levels reflect varying degrees of skills, experience, and responsibilities. The levels include:\na. Entry-Level: Employees with limited experience or early career professionals.\nb. Intermediate-Level: Employees with moderate experience and demonstrated competence.\nc. Senior-Level: Experienced employees with advanced skills and leadership capabilities.\nd. Leadership-Level: Managers and team leaders responsible for strategic decision-making.\n\nCompensation Bands:\nBased on the job levels, the following compensation bands have been established:\na. Entry-Level Band: This band encompasses salary ranges for employees in entry-level positions. It aims to provide competitive compensation for individuals starting their careers within the company.\n\nb. Intermediate-Level Band: This band covers salary ranges for employees who have gained moderate experience and expertise in their respective roles. It rewards employees for their growing skill set and contributions.\n\nc. Senior-Level Band: The senior-level band includes salary ranges for experienced employees who have attained advanced skills and have a proven track record of delivering results. It reflects the increased responsibilities and expectations placed upon these individuals.\n\nd. Leadership-Level Band: This band comprises salary ranges for managers and team leaders responsible for guiding and overseeing their respective teams. It considers their leadership abilities, strategic thinking, and the impact they have on the company's success.\n\nMarket Benchmarking:\nTo ensure our compensation remains competitive, regular market benchmarking will be conducted. This involves analyzing industry salary trends, regional compensation data, and market demand for specific roles. The findings will inform periodic adjustments to our compensation bands to maintain alignment with the market.\n\nPerformance-Based Compensation:\nIn addition to the defined compensation bands, we emphasize a performance-based compensation model. Performance evaluations will be conducted regularly, and employees exceeding performance expectations will be eligible for bonuses, incentives, and salary increases. This approach rewards high achievers and motivates employees to excel in their roles.\n\nConclusion:\nBy implementing this compensation bands strategy, our IT company aims to establish fair and competitive compensation practices that align with market standards and foster employee satisfaction. Regular evaluations and market benchmarking will enable us to adapt and refine the strategy to meet the evolving needs of our organization.", - "summary": "This document outlines a compensation framework for IT teams. It includes job levels, compensation bands, and performance-based incentives to ensure fair and competitive wages. Regular market benchmarking will be conducted to adjust the bands according to industry trends.", - "name": "Compensation Framework For It Teams", - "url": "https://enterprisesearch.sharepoint.com/:t:/s/MSBuilddemo/EaAFec6004tAg21g4i67rfgBBRqCm1yY7AZLLQyyaMtsEQ?e=wTMb4z", - "created_on": "2018-01-12", - "category": "sharepoint", - "restricted": true, - "_run_ml_inference": true, - "rolePermissions": ["manager"] - }, - { - "content": "As an employee in Canada, it's essential to understand how to update your tax elections forms to ensure accurate tax deductions from your pay. This guide will help you navigate the process of updating your TD1 Personal Tax Credits Return form.\n\nStep 1: Access the TD1 form\nThe TD1 form is available on the Canada Revenue Agency (CRA) website. Your employer might provide you with a paper copy or a link to the online form. You can access the form directly through the following link: https://www.canada.ca/en/revenue-agency/services/forms-publications/td1-personal-tax-credits-returns.html\n\nStep 2: Choose the correct form version\nYou'll need to fill out the federal TD1 form and, if applicable, the provincial or territorial TD1 form. Select the appropriate version based on your province or territory of residence.\n\nStep 3: Download and open the form\nFor the best experience, download and open the TD1 form in Adobe Reader. If you have visual impairments, consider using the large print version available on the CRA website.\n\nStep 4: Complete the form\nFill out the form by entering your personal information, such as your name, Social Insurance Number (SIN), and address. Then, go through each section to claim any personal tax credits that apply to you. These credits may include:\nBasic personal amount\nAmount for an eligible dependant\nAmount for infirm dependants age 18 or older\nCaregiver amount\nDisability amount\nTuition and education amounts\n\nRead the instructions carefully for each section to ensure you claim the correct amounts.\n\nStep 5: Sign and date the form\nOnce you've completed the form, sign and date it at the bottom.\n\nStep 6: Submit the form to your employer\nSubmit the completed and signed TD1 form to your employer. You can either scan and send it electronically, or provide a printed copy. Your employer will use the information on your TD1 form to calculate the correct amount of tax to be deducted from your pay.\n\nStep 7: Update your TD1 form as needed\nIt's essential to update your TD1 form whenever your personal circumstances change, such as getting married, having a child, or becoming eligible for a new tax credit. Inform your employer of these changes and submit an updated TD1 form to ensure accurate tax deductions.\n\nUpdating your tax elections forms is a crucial step in ensuring the correct tax deductions from your pay as a new employee in Canada. Follow this guide and keep your TD1 form up to date to avoid any discrepancies in your tax filings.\n", - "summary": ": This guide gives a step-by-step explanation of how to update your TD1 Personal Tax Credits Return form. Access the form from the CRA website and choose the correct version based on your province or territory of residence. Download and open the form in Adobe Reader, fill out the form by entering", - "name": "Updating Your Tax Elections Forms", - "url": "./github/Updating Your Tax Elections Forms.txt", - "created_on": "2022-12-20", - "category": "github", - "_run_ml_inference": true, - "rolePermissions": ["demo", "manager"] - }, - { - "content": "Welcome to our team! We are excited to have you on board and look forward to your valuable contributions. This onboarding guide is designed to help you get started by providing essential information about our policies, procedures, and resources. Please read through this guide carefully and reach out to the HR department if you have any questions.\nIntroduction to Our Company Culture and Values\nOur company is committed to creating a diverse, inclusive, and supportive work environment. We believe that our employees are our most valuable asset and strive to foster a culture of collaboration, innovation, and continuous learning. Our core values include:\nIntegrity: We act ethically and honestly in all our interactions.\nTeamwork: We work together to achieve common goals and support each other's growth.\nExcellence: We strive for the highest quality in our products, services, and relationships.\nInnovation: We encourage creativity and embrace change to stay ahead in the market.\nRespect: We treat each other with dignity and value the unique perspectives of all our colleagues.\nKey Onboarding Steps\nTo ensure a smooth onboarding process, please complete the following steps within your first week:\nAttend orientation: You will be invited to an orientation session to meet your colleagues and learn more about our company's history, mission, and values.\nReview policies and procedures: Familiarize yourself with our employee handbook, which contains important information about our policies and procedures. Please read it thoroughly and adhere to the guidelines.\nComplete required training: You may be required to complete mandatory training sessions, such as safety training or anti-harassment training. Ensure that you attend and complete these sessions as soon as possible.\nUpdating Tax Elections and Documents\nIt is crucial to ensure your tax information is accurate and up-to-date, regardless of the country you work in. Please follow these steps to update your tax elections and documents:\nComplete tax forms: Fill out the necessary tax forms for your country or region, which determine the amount of income tax withheld from your paycheck. You should complete new tax forms if your personal or financial situation changes, such as marriage, divorce, or a change in the number of dependents.\nSubmit regional tax forms: Depending on your location, you may be required to complete additional regional or local tax forms. Check with the HR department to determine which forms are necessary.\nUpdate your address: If you move, make sure to update your address with the HR department to ensure accurate tax reporting.\nBenefits Enrollment\nAs a new employee, you are eligible for various benefits, including health insurance, retirement plans, and paid time off. You will receive detailed information about our benefits package during orientation. To enroll in the benefits, please follow these steps:\nReview benefits options: Carefully review the benefits package and choose the options that best meet your needs.\nComplete enrollment forms: Fill out the necessary forms to enroll in your chosen benefits. Submit these forms to the HR department within 30 days of your start date.\nDesignate beneficiaries: If applicable, designate beneficiaries for your life insurance and retirement plans.\nGetting Settled in Your Workspace\nTo help you feel comfortable and productive in your new workspace, take the following steps:\nSet up your workstation: Organize your desk, chair, and computer according to your preferences. If you require any additional equipment or accommodations, please contact the HR department.\nObtain necessary supplies: Request any necessary office supplies, such as pens, notepads, or folders, from the designated supply area or by contacting the appropriate department.\nFamiliarize yourself with office resources: Locate common areas, such as break rooms, restrooms, and meeting rooms. Familiarize yourself with office equipment, including printers, scanners, and telephones.\n", - "summary": "\nThis onboarding guide provides essential information to new employees on our company culture and values, key onboarding steps, tax elections and documents, benefits enrollment, and setting up their workspace.", - "name": "New Employee Onboarding Guide", - "url": "./github/New Employee Onboarding guide.txt", - "created_on": "2018-01-12", - "category": "github", - "_run_ml_inference": true, - "rolePermissions": ["demo", "manager"] - } -] diff --git a/templates/rag-self-query/ingest.py b/templates/rag-self-query/ingest.py deleted file mode 100644 index be43fe97de0..00000000000 --- a/templates/rag-self-query/ingest.py +++ /dev/null @@ -1,51 +0,0 @@ -import os - -from langchain_community.document_loaders import JSONLoader -from langchain_community.embeddings import OpenAIEmbeddings -from langchain_elasticsearch import ElasticsearchStore -from langchain_text_splitters import RecursiveCharacterTextSplitter - -ELASTIC_CLOUD_ID = os.getenv("ELASTIC_CLOUD_ID") -ELASTIC_USERNAME = os.getenv("ELASTIC_USERNAME", "elastic") -ELASTIC_PASSWORD = os.getenv("ELASTIC_PASSWORD") -ES_URL = os.getenv("ES_URL", "http://localhost:9200") -ELASTIC_INDEX_NAME = os.getenv("ELASTIC_INDEX_NAME", "workspace-search-example") - - -def _metadata_func(record: dict, metadata: dict) -> dict: - metadata["name"] = record.get("name") - metadata["summary"] = record.get("summary") - metadata["url"] = record.get("url") - # give more descriptive name for metadata filtering. - metadata["location"] = record.get("category") - metadata["updated_at"] = record.get("updated_at") - metadata["created_on"] = record.get("created_on") - - return metadata - - -loader = JSONLoader( - file_path="./data/documents.json", - jq_schema=".[]", - content_key="content", - metadata_func=_metadata_func, -) - -text_splitter = RecursiveCharacterTextSplitter(chunk_size=800, chunk_overlap=250) -documents = text_splitter.split_documents(loader.load()) - -if ELASTIC_CLOUD_ID and ELASTIC_USERNAME and ELASTIC_PASSWORD: - es_connection_details = { - "es_cloud_id": ELASTIC_CLOUD_ID, - "es_user": ELASTIC_USERNAME, - "es_password": ELASTIC_PASSWORD, - } -else: - es_connection_details = {"es_url": ES_URL} - -vecstore = ElasticsearchStore( - ELASTIC_INDEX_NAME, - embedding=OpenAIEmbeddings(), - **es_connection_details, -) -vecstore.add_documents(documents) diff --git a/templates/rag-self-query/main.py b/templates/rag-self-query/main.py deleted file mode 100644 index 83a1dc4c6cb..00000000000 --- a/templates/rag-self-query/main.py +++ /dev/null @@ -1,33 +0,0 @@ -from rag_self_query import chain - -if __name__ == "__main__": - questions = [ - "What is the nasa sales team?", - "What is our work from home policy?", - "Does the company own my personal project?", - "How does compensation work?", - ] - - response = chain.invoke( - { - "question": questions[0], - "chat_history": [], - } - ) - print(response) - - follow_up_question = "What are their objectives?" - - response = chain.invoke( - { - "question": follow_up_question, - "chat_history": [ - "What is the nasa sales team?", - "The sales team of NASA consists of Laura Martinez, the Area " - "Vice-President of North America, and Gary Johnson, the Area " - "Vice-President of South America. (Sales Organization Overview)", - ], - } - ) - - print(response) diff --git a/templates/rag-self-query/pyproject.toml b/templates/rag-self-query/pyproject.toml deleted file mode 100644 index cb34a8da277..00000000000 --- a/templates/rag-self-query/pyproject.toml +++ /dev/null @@ -1,35 +0,0 @@ -[tool.poetry] -name = "rag-self-query" -version = "0.0.1" -description = "RAG with metadata filtering using self-query retriever on Elasticsearch vector store" -authors = [] -readme = "README.md" - -[tool.poetry.dependencies] -python = ">=3.8.1,<4.0" -langchain = "^0.1" -langchain-elasticsearch = "^0.1.0" -openai = "<2" -sentence-transformers = "^2.2.2" -jq = "^1.6.0" -tiktoken = "^0.5.1" -lark = "^1.1.8" -pydantic = "^2.4.2" -langchain-text-splitters = ">=0.0.1,<0.1" - -[tool.poetry.group.dev.dependencies] -langchain-cli = ">=0.0.21" - -[tool.langserve] -export_module = "rag_self_query" -export_attr = "chain" - -[tool.templates-hub] -use-case = "rag" -author = "LangChain" -integrations = ["OpenAI", "Elasticsearch"] -tags = ["vectordbs"] - -[build-system] -requires = ["poetry-core",] -build-backend = "poetry.core.masonry.api" diff --git a/templates/rag-self-query/rag_self_query/__init__.py b/templates/rag-self-query/rag_self_query/__init__.py deleted file mode 100644 index 89f658595f4..00000000000 --- a/templates/rag-self-query/rag_self_query/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from rag_self_query.chain import chain - -__all__ = ["chain"] diff --git a/templates/rag-self-query/rag_self_query/chain.py b/templates/rag-self-query/rag_self_query/chain.py deleted file mode 100644 index 5ba002a0e0f..00000000000 --- a/templates/rag-self-query/rag_self_query/chain.py +++ /dev/null @@ -1,101 +0,0 @@ -import os -from operator import itemgetter -from typing import List, Tuple - -from langchain.retrievers import SelfQueryRetriever -from langchain_community.chat_models import ChatOpenAI -from langchain_community.embeddings import OpenAIEmbeddings -from langchain_core.output_parsers import StrOutputParser -from langchain_core.prompts import format_document -from langchain_core.pydantic_v1 import BaseModel, Field -from langchain_core.runnables import RunnableParallel, RunnablePassthrough -from langchain_elasticsearch.vectorstores import ElasticsearchStore - -from .prompts import CONDENSE_QUESTION_PROMPT, DOCUMENT_PROMPT, LLM_CONTEXT_PROMPT - -ELASTIC_CLOUD_ID = os.getenv("ELASTIC_CLOUD_ID") -ELASTIC_USERNAME = os.getenv("ELASTIC_USERNAME", "elastic") -ELASTIC_PASSWORD = os.getenv("ELASTIC_PASSWORD") -ES_URL = os.getenv("ES_URL", "http://localhost:9200") -ELASTIC_INDEX_NAME = os.getenv("ELASTIC_INDEX_NAME", "workspace-search-example") - -if ELASTIC_CLOUD_ID and ELASTIC_USERNAME and ELASTIC_PASSWORD: - es_connection_details = { - "es_cloud_id": ELASTIC_CLOUD_ID, - "es_user": ELASTIC_USERNAME, - "es_password": ELASTIC_PASSWORD, - } -else: - es_connection_details = {"es_url": ES_URL} - -vecstore = ElasticsearchStore( - ELASTIC_INDEX_NAME, - embedding=OpenAIEmbeddings(), - **es_connection_details, -) - -document_contents = "The purpose and specifications of a workplace policy." -metadata_field_info = [ - {"name": "name", "type": "string", "description": "Name of the workplace policy."}, - { - "name": "created_on", - "type": "date", - "description": "The date the policy was created in ISO 8601 date format (YYYY-MM-DD).", # noqa: E501 - }, - { - "name": "updated_at", - "type": "date", - "description": "The date the policy was last updated in ISO 8601 date format (YYYY-MM-DD).", # noqa: E501 - }, - { - "name": "location", - "type": "string", - "description": "Where the policy text is stored. The only valid values are ['github', 'sharepoint'].", # noqa: E501 - }, -] -llm = ChatOpenAI(temperature=0) -retriever = SelfQueryRetriever.from_llm( - llm, vecstore, document_contents, metadata_field_info -) - - -def _combine_documents(docs: List) -> str: - return "\n\n".join(format_document(doc, prompt=DOCUMENT_PROMPT) for doc in docs) - - -def _format_chat_history(chat_history: List[Tuple]) -> str: - return "\n".join(f"Human: {human}\nAssistant: {ai}" for human, ai in chat_history) - - -class InputType(BaseModel): - question: str - chat_history: List[Tuple[str, str]] = Field(default_factory=list) - - -standalone_question = ( - { - "question": itemgetter("question"), - "chat_history": lambda x: _format_chat_history(x["chat_history"]), - } - | CONDENSE_QUESTION_PROMPT - | llm - | StrOutputParser() -) - - -def route_question(input): - if input.get("chat_history"): - return standalone_question - else: - return RunnablePassthrough() - - -_context = RunnableParallel( - context=retriever | _combine_documents, - question=RunnablePassthrough(), -) - - -chain = ( - standalone_question | _context | LLM_CONTEXT_PROMPT | llm | StrOutputParser() -).with_types(input_type=InputType) diff --git a/templates/rag-self-query/rag_self_query/prompts.py b/templates/rag-self-query/rag_self_query/prompts.py deleted file mode 100644 index af45209dd72..00000000000 --- a/templates/rag-self-query/rag_self_query/prompts.py +++ /dev/null @@ -1,39 +0,0 @@ -from langchain_core.prompts import ChatPromptTemplate, PromptTemplate - -# Used to condense a question and chat history into a single question -condense_question_prompt_template = """Given the following conversation and a follow up question, rephrase the follow up question to be a standalone question, in its original language. If there is no chat history, just rephrase the question to be a standalone question. - -Chat History: -{chat_history} -Follow Up Input: {question} -""" # noqa: E501 -CONDENSE_QUESTION_PROMPT = PromptTemplate.from_template( - condense_question_prompt_template -) - -# RAG Prompt to provide the context and question for LLM to answer -# We also ask the LLM to cite the source of the passage it is answering from -llm_context_prompt_template = """ -Use the following passages to answer the user's question. -Each passage has a SOURCE which is the title of the document. When answering, cite source name of the passages you are answering from below the answer in a unique bullet point list. - -If you don't know the answer, just say that you don't know, don't try to make up an answer. - ----- -{context} ----- -Question: {question} -""" # noqa: E501 - -LLM_CONTEXT_PROMPT = ChatPromptTemplate.from_template(llm_context_prompt_template) - -# Used to build a context window from passages retrieved -document_prompt_template = """ ---- -NAME: {name} -PASSAGE: -{page_content} ---- -""" - -DOCUMENT_PROMPT = PromptTemplate.from_template(document_prompt_template) diff --git a/templates/rag-semi-structured/LICENSE b/templates/rag-semi-structured/LICENSE deleted file mode 100644 index 426b6509034..00000000000 --- a/templates/rag-semi-structured/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -MIT License - -Copyright (c) 2023 LangChain, Inc. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/templates/rag-semi-structured/README.md b/templates/rag-semi-structured/README.md deleted file mode 100644 index 2dd4a82d57d..00000000000 --- a/templates/rag-semi-structured/README.md +++ /dev/null @@ -1,79 +0,0 @@ -# RAG - Unstructured - semi-structured - -This template performs RAG on `semi-structured data`, such as a PDF with text and tables. - -It uses the `unstructured` parser to extract the text and tables from the PDF and then uses the LLM to generate queries based on the user input. - -See [this cookbook](https://github.com/langchain-ai/langchain/blob/master/cookbook/Semi_Structured_RAG.ipynb) as a reference. - -## Environment Setup - -Set the `OPENAI_API_KEY` environment variable to access the OpenAI models. - -This uses [Unstructured](https://unstructured-io.github.io/unstructured/) for PDF parsing, which requires some system-level package installations. - -On Mac, you can install the necessary packages with the following: - -```shell -brew install tesseract poppler -``` - -## Usage - -To use this package, you should first have the LangChain CLI installed: - -```shell -pip install -U langchain-cli -``` - -To create a new LangChain project and install this as the only package, you can do: - -```shell -langchain app new my-app --package rag-semi-structured -``` - -If you want to add this to an existing project, you can just run: - -```shell -langchain app add rag-semi-structured -``` - -And add the following code to your `server.py` file: -```python -from rag_semi_structured import chain as rag_semi_structured_chain - -add_routes(app, rag_semi_structured_chain, path="/rag-semi-structured") -``` - -(Optional) Let's now configure LangSmith. -LangSmith will help us trace, monitor and debug LangChain applications. -You can sign up for LangSmith [here](https://smith.langchain.com/). -If you don't have access, you can skip this section - -```shell -export LANGCHAIN_TRACING_V2=true -export LANGCHAIN_API_KEY= -export LANGCHAIN_PROJECT= # if not specified, defaults to "default" -``` - -If you are inside this directory, then you can spin up a LangServe instance directly by: - -```shell -langchain serve -``` - -This will start the FastAPI app with a server is running locally at -[http://localhost:8000](http://localhost:8000) - -We can see all templates at [http://127.0.0.1:8000/docs](http://127.0.0.1:8000/docs) -We can access the playground at [http://127.0.0.1:8000/rag-semi-structured/playground](http://127.0.0.1:8000/rag-semi-structured/playground) - -We can access the template from code with: - -```python -from langserve.client import RemoteRunnable - -runnable = RemoteRunnable("http://localhost:8000/rag-semi-structured") -``` - -For more details on how to connect to the template, refer to the Jupyter notebook `rag_semi_structured`. \ No newline at end of file diff --git a/templates/rag-semi-structured/docs/LLaVA.pdf b/templates/rag-semi-structured/docs/LLaVA.pdf deleted file mode 100644 index 88da76cecd5..00000000000 Binary files a/templates/rag-semi-structured/docs/LLaVA.pdf and /dev/null differ diff --git a/templates/rag-semi-structured/pyproject.toml b/templates/rag-semi-structured/pyproject.toml deleted file mode 100644 index c2d21a5f1f0..00000000000 --- a/templates/rag-semi-structured/pyproject.toml +++ /dev/null @@ -1,43 +0,0 @@ -[tool.poetry] -name = "rag-semi-structured" -version = "0.1.0" -description = "RAG on semi-structured " -authors = [ - "Lance Martin ", -] -readme = "README.md" - -[tool.poetry.dependencies] -python = ">=3.9,<4" -langchain = "^0.1" -tiktoken = ">=0.5.1" -langchain-chroma = "^0.1.2" -openai = "<2" -unstructured = ">=0.10.19" -pdf2image = ">=1.16.3" -pdfminer = "^20191125" -opencv-python = "^4.8.1.78" -pandas = "^2.1.4" -pytesseract = "^0.3.10" -pdfminer-six = "^20221105" -unstructured-pytesseract = "^0.3.12" -unstructured-inference = "^0.7.18" - -[tool.poetry.group.dev.dependencies] -langchain-cli = ">=0.0.15" - -[tool.langserve] -export_module = "rag_semi_structured" -export_attr = "chain" - -[tool.templates-hub] -use-case = "rag" -author = "LangChain" -integrations = ["OpenAI", "Chroma", "Unstructured"] -tags = ["vectordbs"] - -[build-system] -requires = [ - "poetry-core", -] -build-backend = "poetry.core.masonry.api" diff --git a/templates/rag-semi-structured/rag_semi_structured.ipynb b/templates/rag-semi-structured/rag_semi_structured.ipynb deleted file mode 100644 index 12ff726d8bb..00000000000 --- a/templates/rag-semi-structured/rag_semi_structured.ipynb +++ /dev/null @@ -1,51 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "id": "30fc2c27", - "metadata": {}, - "source": [ - "## Run Template\n", - "\n", - "In `server.py`, set -\n", - "```\n", - "add_routes(app, chain_rag_conv, path=\"/rag-semi-structured\")\n", - "```" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "65f5b560", - "metadata": {}, - "outputs": [], - "source": [ - "from langserve.client import RemoteRunnable\n", - "\n", - "rag_app = RemoteRunnable(\"http://localhost:8001/rag-semi-structured\")\n", - "rag_app.invoke(\"How does agent memory work?\")" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.9.16" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/templates/rag-semi-structured/rag_semi_structured/__init__.py b/templates/rag-semi-structured/rag_semi_structured/__init__.py deleted file mode 100644 index 55f823dde08..00000000000 --- a/templates/rag-semi-structured/rag_semi_structured/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from rag_semi_structured.chain import chain - -__all__ = ["chain"] diff --git a/templates/rag-semi-structured/rag_semi_structured/chain.py b/templates/rag-semi-structured/rag_semi_structured/chain.py deleted file mode 100644 index e5e630c4871..00000000000 --- a/templates/rag-semi-structured/rag_semi_structured/chain.py +++ /dev/null @@ -1,120 +0,0 @@ -# Load -import uuid - -from langchain.retrievers.multi_vector import MultiVectorRetriever -from langchain.storage import InMemoryStore -from langchain_chroma import Chroma -from langchain_community.chat_models import ChatOpenAI -from langchain_community.embeddings import OpenAIEmbeddings -from langchain_core.documents import Document -from langchain_core.output_parsers import StrOutputParser -from langchain_core.prompts import ChatPromptTemplate -from langchain_core.pydantic_v1 import BaseModel -from langchain_core.runnables import RunnablePassthrough -from unstructured.partition.pdf import partition_pdf - -# Path to docs -path = "docs" -raw_pdf_elements = partition_pdf( - filename=path + "/LLaVA.pdf", - # Unstructured first finds embedded image blocks - extract_images_in_pdf=False, - # Use layout model (YOLOX) to get bounding boxes (for tables) and find titles - # Titles are any sub-section of the document - infer_table_structure=True, - # Post processing to aggregate text once we have the title - chunking_strategy="by_title", - # Chunking params to aggregate text blocks - # Attempt to create a new chunk 3800 chars - # Attempt to keep chunks > 2000 chars - max_characters=4000, - new_after_n_chars=3800, - combine_text_under_n_chars=2000, - image_output_dir_path=path, -) - -# Categorize by type -tables = [] -texts = [] -for element in raw_pdf_elements: - if "unstructured.documents.elements.Table" in str(type(element)): - tables.append(str(element)) - elif "unstructured.documents.elements.CompositeElement" in str(type(element)): - texts.append(str(element)) - -# Summarize - -prompt_text = """You are an assistant tasked with summarizing tables and text. \ -Give a concise summary of the table or text. Table or text chunk: {element} """ -prompt = ChatPromptTemplate.from_template(prompt_text) -model = ChatOpenAI(temperature=0, model="gpt-4") -summarize_chain = {"element": lambda x: x} | prompt | model | StrOutputParser() - -# Apply -table_summaries = summarize_chain.batch(tables, {"max_concurrency": 5}) -# To save time / cost, only do text summaries if chunk sizes are large -# text_summaries = summarize_chain.batch(texts, {"max_concurrency": 5}) -# We can just assign text_summaries to the raw texts -text_summaries = texts - -# Use multi vector retriever - -# The vectorstore to use to index the child chunks -vectorstore = Chroma(collection_name="summaries", embedding_function=OpenAIEmbeddings()) - -# The storage layer for the parent documents -store = InMemoryStore() -id_key = "doc_id" - -# The retriever (empty to start) -retriever = MultiVectorRetriever( - vectorstore=vectorstore, - docstore=store, - id_key=id_key, -) - -# Add texts -doc_ids = [str(uuid.uuid4()) for _ in texts] -summary_texts = [ - Document(page_content=s, metadata={id_key: doc_ids[i]}) - for i, s in enumerate(text_summaries) -] -retriever.vectorstore.add_documents(summary_texts) -retriever.docstore.mset(list(zip(doc_ids, texts))) - -# Add tables -table_ids = [str(uuid.uuid4()) for _ in tables] -summary_tables = [ - Document(page_content=s, metadata={id_key: table_ids[i]}) - for i, s in enumerate(table_summaries) -] -retriever.vectorstore.add_documents(summary_tables) -retriever.docstore.mset(list(zip(table_ids, tables))) - -# RAG - -# Prompt template -template = """Answer the question based only on the following context, which can include text and tables: -{context} -Question: {question} -""" # noqa: E501 -prompt = ChatPromptTemplate.from_template(template) - -# LLM -model = ChatOpenAI(temperature=0, model="gpt-4") - -# RAG pipeline -chain = ( - {"context": retriever, "question": RunnablePassthrough()} - | prompt - | model - | StrOutputParser() -) - - -# Add typing for input -class Question(BaseModel): - __root__: str - - -chain = chain.with_types(input_type=Question) diff --git a/templates/rag-semi-structured/tests/__init__.py b/templates/rag-semi-structured/tests/__init__.py deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/templates/rag-singlestoredb/LICENSE b/templates/rag-singlestoredb/LICENSE deleted file mode 100644 index 426b6509034..00000000000 --- a/templates/rag-singlestoredb/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -MIT License - -Copyright (c) 2023 LangChain, Inc. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/templates/rag-singlestoredb/README.md b/templates/rag-singlestoredb/README.md deleted file mode 100644 index 2f27e583d1f..00000000000 --- a/templates/rag-singlestoredb/README.md +++ /dev/null @@ -1,68 +0,0 @@ -# RAG - SingleStoreDB - -This template performs RAG using `SingleStoreDB` and OpenAI. - -## Environment Setup - -This template uses `SingleStoreDB` as a vectorstore and requires that `SINGLESTOREDB_URL` is set. It should take the form `admin:password@svc-xxx.svc.singlestore.com:port/db_name` - -Set the `OPENAI_API_KEY` environment variable to access the `OpenAI` models. - -## Usage - -To use this package, you should first have the LangChain CLI installed: - -```shell -pip install -U langchain-cli -``` - -To create a new LangChain project and install this as the only package, you can do: - -```shell -langchain app new my-app --package rag-singlestoredb -``` - -If you want to add this to an existing project, you can just run: - -```shell -langchain app add rag-singlestoredb -``` - -And add the following code to your `server.py` file: -```python -from rag_singlestoredb import chain as rag_singlestoredb_chain - -add_routes(app, rag_singlestoredb_chain, path="/rag-singlestoredb") -``` - -(Optional) Let's now configure LangSmith. -LangSmith will help us trace, monitor and debug LangChain applications. -You can sign up for LangSmith [here](https://smith.langchain.com/). -If you don't have access, you can skip this section - - -```shell -export LANGCHAIN_TRACING_V2=true -export LANGCHAIN_API_KEY= -export LANGCHAIN_PROJECT= # if not specified, defaults to "default" -``` - -If you are inside this directory, then you can spin up a LangServe instance directly by: - -```shell -langchain serve -``` - -This will start the FastAPI app with a server is running locally at -[http://localhost:8000](http://localhost:8000) - -We can see all templates at [http://127.0.0.1:8000/docs](http://127.0.0.1:8000/docs) -We can access the playground at [http://127.0.0.1:8000/rag-singlestoredb/playground](http://127.0.0.1:8000/rag-singlestoredb/playground) - -We can access the template from code with: - -```python -from langserve.client import RemoteRunnable - -runnable = RemoteRunnable("http://localhost:8000/rag-singlestoredb") -``` diff --git a/templates/rag-singlestoredb/pyproject.toml b/templates/rag-singlestoredb/pyproject.toml deleted file mode 100644 index 10e7cb91469..00000000000 --- a/templates/rag-singlestoredb/pyproject.toml +++ /dev/null @@ -1,33 +0,0 @@ -[tool.poetry] -name = "rag-singlestoredb" -version = "0.0.1" -description = "RAG using SingleStoreDB" -authors = ["Alex Peng "] -readme = "README.md" - -[tool.poetry.dependencies] -python = ">=3.8.1,<4.0" -langchain = "^0.1" -openai = "<2" -singlestoredb = ">=0.8.1" -tiktoken = "^0.5.1" -langchain-text-splitters = ">=0.0.1,<0.1" - -[tool.poetry.group.dev.dependencies] -langchain-cli = ">=0.0.21" -fastapi = ">=0.104.0,<1" -sse-starlette = "^1.6.5" - -[tool.langserve] -export_module = "rag_singlestoredb" -export_attr = "chain" - -[tool.templates-hub] -use-case = "rag" -author = "SingleStoreDB" -integrations = ["OpenAI", "SingleStoreDB"] -tags = ["vectordbs"] - -[build-system] -requires = ["poetry-core"] -build-backend = "poetry.core.masonry.api" diff --git a/templates/rag-singlestoredb/rag_singlestoredb.ipynb b/templates/rag-singlestoredb/rag_singlestoredb.ipynb deleted file mode 100644 index 03c2fcf158e..00000000000 --- a/templates/rag-singlestoredb/rag_singlestoredb.ipynb +++ /dev/null @@ -1,51 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "id": "681a5d1e", - "metadata": {}, - "source": [ - "## Connect to template\n", - "\n", - "In `server.py`, set -\n", - "```\n", - "add_routes(app, chain_ext, path=\"/rag_singlestore\")\n", - "```" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "d774be2a", - "metadata": {}, - "outputs": [], - "source": [ - "from langserve.client import RemoteRunnable\n", - "\n", - "rag_app_singlestore = RemoteRunnable(\"http://0.0.0.0:8001/rag_singlestore\")\n", - "rag_app_singlestore.invoke(\"How does agent memory work?\")" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.9.16" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/templates/rag-singlestoredb/rag_singlestoredb/__init__.py b/templates/rag-singlestoredb/rag_singlestoredb/__init__.py deleted file mode 100644 index 26f56974dfe..00000000000 --- a/templates/rag-singlestoredb/rag_singlestoredb/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from rag_singlestoredb.chain import chain - -__all__ = ["chain"] diff --git a/templates/rag-singlestoredb/rag_singlestoredb/chain.py b/templates/rag-singlestoredb/rag_singlestoredb/chain.py deleted file mode 100644 index 4aca83be1ab..00000000000 --- a/templates/rag-singlestoredb/rag_singlestoredb/chain.py +++ /dev/null @@ -1,60 +0,0 @@ -import os - -from langchain_community.chat_models import ChatOpenAI -from langchain_community.embeddings import OpenAIEmbeddings -from langchain_community.vectorstores import SingleStoreDB -from langchain_core.output_parsers import StrOutputParser -from langchain_core.prompts import ChatPromptTemplate -from langchain_core.pydantic_v1 import BaseModel -from langchain_core.runnables import RunnableParallel, RunnablePassthrough - -if os.environ.get("SINGLESTOREDB_URL", None) is None: - raise Exception("Missing `SINGLESTOREDB_URL` environment variable.") - -# SINGLESTOREDB_URL takes the form of: "admin:password@host:port/db_name" - -## Ingest code - you may need to run this the first time -# # Load -# from langchain_community.document_loaders import WebBaseLoader - -# loader = WebBaseLoader("https://lilianweng.github.io/posts/2023-06-23-agent/") -# data = loader.load() - -# # Split -# from langchain_text_splitters import RecursiveCharacterTextSplitter - -# text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=0) -# all_splits = text_splitter.split_documents(data) - -# # Add to vectorDB -# vectorstore = SingleStoreDB.from_documents( -# documents=all_splits, embedding=OpenAIEmbeddings() -# ) -# retriever = vectorstore.as_retriever() - -vectorstore = SingleStoreDB(embedding=OpenAIEmbeddings()) -retriever = vectorstore.as_retriever() - -# RAG prompt -template = """Answer the question based only on the following context: -{context} -Question: {question} -""" -prompt = ChatPromptTemplate.from_template(template) - -# RAG -model = ChatOpenAI() -chain = ( - RunnableParallel({"context": retriever, "question": RunnablePassthrough()}) - | prompt - | model - | StrOutputParser() -) - - -# Add typing for input -class Question(BaseModel): - __root__: str - - -chain = chain.with_types(input_type=Question) diff --git a/templates/rag-singlestoredb/tests/__init__.py b/templates/rag-singlestoredb/tests/__init__.py deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/templates/rag-supabase/.gitignore b/templates/rag-supabase/.gitignore deleted file mode 100644 index 4c49bd78f1d..00000000000 --- a/templates/rag-supabase/.gitignore +++ /dev/null @@ -1 +0,0 @@ -.env diff --git a/templates/rag-supabase/README.md b/templates/rag-supabase/README.md deleted file mode 100644 index c0ade9483da..00000000000 --- a/templates/rag-supabase/README.md +++ /dev/null @@ -1,133 +0,0 @@ -# RAG - Supabase - -This template performs RAG with `Supabase`. - -[Supabase](https://supabase.com/docs) is an open-source `Firebase` alternative. It is built on top of [PostgreSQL](https://en.wikipedia.org/wiki/PostgreSQL), a free and open-source relational database management system (RDBMS) and uses [pgvector](https://github.com/pgvector/pgvector) to store embeddings within your tables. - -## Environment Setup - -Set the `OPENAI_API_KEY` environment variable to access the OpenAI models. - -To get your `OPENAI_API_KEY`, navigate to [API keys](https://platform.openai.com/account/api-keys) on your OpenAI account and create a new secret key. - -To find your `SUPABASE_URL` and `SUPABASE_SERVICE_KEY`, head to your Supabase project's [API settings](https://supabase.com/dashboard/project/_/settings/api). - -- `SUPABASE_URL` corresponds to the Project URL -- `SUPABASE_SERVICE_KEY` corresponds to the `service_role` API key - - -```shell -export SUPABASE_URL= -export SUPABASE_SERVICE_KEY= -export OPENAI_API_KEY= -``` - -## Setup Supabase Database - -Use these steps to setup your Supabase database if you haven't already. - -1. Head over to https://database.new to provision your Supabase database. -2. In the studio, jump to the [SQL editor](https://supabase.com/dashboard/project/_/sql/new) and run the following script to enable `pgvector` and setup your database as a vector store: - - ```sql - -- Enable the pgvector extension to work with embedding vectors - create extension if not exists vector; - - -- Create a table to store your documents - create table - documents ( - id uuid primary key, - content text, -- corresponds to Document.pageContent - metadata jsonb, -- corresponds to Document.metadata - embedding vector (1536) -- 1536 works for OpenAI embeddings, change as needed - ); - - -- Create a function to search for documents - create function match_documents ( - query_embedding vector (1536), - filter jsonb default '{}' - ) returns table ( - id uuid, - content text, - metadata jsonb, - similarity float - ) language plpgsql as $$ - #variable_conflict use_column - begin - return query - select - id, - content, - metadata, - 1 - (documents.embedding <=> query_embedding) as similarity - from documents - where metadata @> filter - order by documents.embedding <=> query_embedding; - end; - $$; - ``` - -## Setup Environment Variables - -Since we are using [`SupabaseVectorStore`](https://python.langchain.com/docs/integrations/vectorstores/supabase) and [`OpenAIEmbeddings`](https://python.langchain.com/docs/integrations/text_embedding/openai), we need to load their API keys. - -## Usage - -First, install the LangChain CLI: - -```shell -pip install -U langchain-cli -``` - -To create a new LangChain project and install this as the only package, you can do: - -```shell -langchain app new my-app --package rag-supabase -``` - -If you want to add this to an existing project, you can just run: - -```shell -langchain app add rag-supabase -``` - -And add the following code to your `server.py` file: - -```python -from rag_supabase.chain import chain as rag_supabase_chain - -add_routes(app, rag_supabase_chain, path="/rag-supabase") -``` - -(Optional) Let's now configure LangSmith. -LangSmith will help us trace, monitor and debug LangChain applications. -You can sign up for LangSmith [here](https://smith.langchain.com/). -If you don't have access, you can skip this section - -```shell -export LANGCHAIN_TRACING_V2=true -export LANGCHAIN_API_KEY= -export LANGCHAIN_PROJECT= # if not specified, defaults to "default" -``` - -If you are inside this directory, then you can spin up a LangServe instance directly by: - -```shell -langchain serve -``` - -This will start the FastAPI app with a server is running locally at -[http://localhost:8000](http://localhost:8000) - -We can see all templates at [http://127.0.0.1:8000/docs](http://127.0.0.1:8000/docs) -We can access the playground at [http://127.0.0.1:8000/rag-supabase/playground](http://127.0.0.1:8000/rag-supabase/playground) - -We can access the template from code with: - -```python -from langserve.client import RemoteRunnable - -runnable = RemoteRunnable("http://localhost:8000/rag-supabase") -``` - -TODO: Add details about setting up the Supabase database \ No newline at end of file diff --git a/templates/rag-supabase/pyproject.toml b/templates/rag-supabase/pyproject.toml deleted file mode 100644 index b2ca4a942a9..00000000000 --- a/templates/rag-supabase/pyproject.toml +++ /dev/null @@ -1,39 +0,0 @@ -[tool.poetry] -name = "rag-supabase" -version = "0.1.0" -description = "RAG using Supabase retriver" -authors = [ - "Greg Richardson ", -] -readme = "README.md" - -[tool.poetry.dependencies] -python = ">=3.8.1,<4.0" -langchain = "^0.1" -openai = "<2" -tiktoken = "^0.5.1" -supabase = "^1.2.0" - -[tool.poetry.group.dev.dependencies] -langchain-cli = ">=0.0.21" -[tool.poetry.group.dev.dependencies.python-dotenv] -extras = [ - "cli", -] -version = "^1.0.0" - -[tool.langserve] -export_module = "rag_supabase.chain" -export_attr = "chain" - -[tool.templates-hub] -use-case = "rag" -author = "Supabase" -integrations = ["OpenAI", "Supabase"] -tags = ["vectordbs"] - -[build-system] -requires = [ - "poetry-core", -] -build-backend = "poetry.core.masonry.api" diff --git a/templates/rag-supabase/rag_supabase/__init__.py b/templates/rag-supabase/rag_supabase/__init__.py deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/templates/rag-supabase/rag_supabase/chain.py b/templates/rag-supabase/rag_supabase/chain.py deleted file mode 100644 index 9ad9c45da2b..00000000000 --- a/templates/rag-supabase/rag_supabase/chain.py +++ /dev/null @@ -1,50 +0,0 @@ -import os - -from langchain_community.chat_models import ChatOpenAI -from langchain_community.embeddings import OpenAIEmbeddings -from langchain_community.vectorstores.supabase import SupabaseVectorStore -from langchain_core.output_parsers import StrOutputParser -from langchain_core.prompts import ChatPromptTemplate -from langchain_core.pydantic_v1 import BaseModel -from langchain_core.runnables import RunnableParallel, RunnablePassthrough -from supabase.client import create_client - -supabase_url = os.environ.get("SUPABASE_URL") -supabase_key = os.environ.get("SUPABASE_SERVICE_KEY") -supabase = create_client(supabase_url, supabase_key) - -embeddings = OpenAIEmbeddings() - -vectorstore = SupabaseVectorStore( - client=supabase, - embedding=embeddings, - table_name="documents", - query_name="match_documents", -) - -retriever = vectorstore.as_retriever() - -template = """Answer the question based only on the following context: -{context} - -Question: {question} -""" - -prompt = ChatPromptTemplate.from_template(template) - -model = ChatOpenAI() - -chain = ( - RunnableParallel({"context": retriever, "question": RunnablePassthrough()}) - | prompt - | model - | StrOutputParser() -) - - -# Add typing for input -class Question(BaseModel): - __root__: str - - -chain = chain.with_types(input_type=Question) diff --git a/templates/rag-supabase/tests/__init__.py b/templates/rag-supabase/tests/__init__.py deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/templates/rag-timescale-conversation/LICENSE b/templates/rag-timescale-conversation/LICENSE deleted file mode 100644 index 426b6509034..00000000000 --- a/templates/rag-timescale-conversation/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -MIT License - -Copyright (c) 2023 LangChain, Inc. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/templates/rag-timescale-conversation/README.md b/templates/rag-timescale-conversation/README.md deleted file mode 100644 index e73c12a6e14..00000000000 --- a/templates/rag-timescale-conversation/README.md +++ /dev/null @@ -1,79 +0,0 @@ -# RAG - Timescale - conversation - -This template is used for [conversational](https://python.langchain.com/docs/expression_language/cookbook/retrieval#conversational-retrieval-chain) [retrieval](https://python.langchain.com/docs/use_cases/question_answering/), which is one of the most popular LLM use-cases. - -It passes both a conversation history and retrieved documents into an LLM for synthesis. - -## Environment Setup - -This template uses `Timescale Vector` as a vectorstore and requires that `TIMESCALES_SERVICE_URL`. Signup for a 90-day trial [here](https://console.cloud.timescale.com/signup?utm_campaign=vectorlaunch&utm_source=langchain&utm_medium=referral) if you don't yet have an account. - -To load the sample dataset, set `LOAD_SAMPLE_DATA=1`. To load your own dataset see the section below. - -Set the `OPENAI_API_KEY` environment variable to access the OpenAI models. - -## Usage - -To use this package, you should first have the LangChain CLI installed: - -```shell -pip install -U "langchain-cli[serve]" -``` - -To create a new LangChain project and install this as the only package, you can do: - -```shell -langchain app new my-app --package rag-timescale-conversation -``` - -If you want to add this to an existing project, you can just run: - -```shell -langchain app add rag-timescale-conversation -``` - -And add the following code to your `server.py` file: -```python -from rag_timescale_conversation import chain as rag_timescale_conversation_chain - -add_routes(app, rag_timescale_conversation_chain, path="/rag-timescale_conversation") -``` - -(Optional) Let's now configure LangSmith. -LangSmith will help us trace, monitor and debug LangChain applications. -You can sign up for LangSmith [here](https://smith.langchain.com/). -If you don't have access, you can skip this section - -```shell -export LANGCHAIN_TRACING_V2=true -export LANGCHAIN_API_KEY= -export LANGCHAIN_PROJECT= # if not specified, defaults to "default" -``` - -If you are inside this directory, then you can spin up a LangServe instance directly by: - -```shell -langchain serve -``` - -This will start the FastAPI app with a server is running locally at -[http://localhost:8000](http://localhost:8000) - -We can see all templates at [http://127.0.0.1:8000/docs](http://127.0.0.1:8000/docs) -We can access the playground at [http://127.0.0.1:8000/rag-timescale-conversation/playground](http://127.0.0.1:8000/rag-timescale-conversation/playground) - -We can access the template from code with: - -```python -from langserve.client import RemoteRunnable - -runnable = RemoteRunnable("http://localhost:8000/rag-timescale-conversation") -``` - -See the `rag_conversation.ipynb` notebook for example usage. - -## Loading your own dataset - -To load your own dataset you will have to create a `load_dataset` function. You can see an example, in the -`load_ts_git_dataset` function defined in the `load_sample_dataset.py` file. You can then run this as a -standalone function (e.g. in a bash script) or add it to chain.py (but then you should run it just once). \ No newline at end of file diff --git a/templates/rag-timescale-conversation/pyproject.toml b/templates/rag-timescale-conversation/pyproject.toml deleted file mode 100644 index 3ddf8808401..00000000000 --- a/templates/rag-timescale-conversation/pyproject.toml +++ /dev/null @@ -1,38 +0,0 @@ -[tool.poetry] -name = "rag-timescale-conversation" -version = "0.1.0" -description = "Conversational RAG using timescale-vector DB as retriver" -authors = [ - "Lance Martin ", -] -readme = "README.md" - -[tool.poetry.dependencies] -python = ">=3.8.1,<4.0" -langchain = "^0.1" -openai = "<2" -tiktoken = ">=0.5.1" -pinecone-client = ">=2.2.4" -beautifulsoup4 = "^4.12.2" -python-dotenv = "^1.0.0" -timescale-vector = "^0.0.3" -langchain-text-splitters = ">=0.0.1,<0.1" - -[tool.poetry.group.dev.dependencies] -langchain-cli = ">=0.0.21" - -[tool.langserve] -export_module = "rag_timescale_conversation" -export_attr = "chain" - -[tool.templates-hub] -use-case = "rag" -author = "Timescale" -integrations = ["OpenAI", "Timescale"] -tags = ["vectordbs"] - -[build-system] -requires = [ - "poetry-core", -] -build-backend = "poetry.core.masonry.api" diff --git a/templates/rag-timescale-conversation/rag_conversation.ipynb b/templates/rag-timescale-conversation/rag_conversation.ipynb deleted file mode 100644 index 4203689a153..00000000000 --- a/templates/rag-timescale-conversation/rag_conversation.ipynb +++ /dev/null @@ -1,238 +0,0 @@ -{ - "cells": [ - { - "attachments": {}, - "cell_type": "markdown", - "id": "424a9d8d", - "metadata": {}, - "source": [ - "## Run Template\n", - "\n", - "In `server.py`, set -\n", - "```\n", - "add_routes(app, chain_rag_timescale_conv, path=\"/rag_timescale_conversation\")\n", - "```" - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "id": "5f521923", - "metadata": {}, - "outputs": [], - "source": [ - "from langserve.client import RemoteRunnable\n", - "\n", - "rag_app = RemoteRunnable(\"http://0.0.0.0:8000/rag_timescale_conversation\")" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "id": "563a58dd", - "metadata": {}, - "source": [ - "First, setup the history" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "id": "14541994", - "metadata": {}, - "outputs": [], - "source": [ - "question = \"My name is Sven Klemm\"\n", - "answer = rag_app.invoke(\n", - " {\n", - " \"question\": question,\n", - " \"chat_history\": [],\n", - " }\n", - ")\n", - "chat_history = [(question, answer)]" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "id": "63e76c4d", - "metadata": {}, - "source": [ - "Next, use the history for a question" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "id": "b2d8f735", - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "'The person named Sven Klemm made the following commits:\\n\\n1. Commit \"a31c9b9f8cdfe8643499b710dc983e5c5d6457e4\" on \"Mon May 22 11:34:06 2023 +0200\" with the change summary \"Increase number of sqlsmith loops in nightly CI\". The change details are \"To improve coverage with sqlsmith we run it for longer in the scheduled nightly run.\"\\n\\n2. Commit \"e4ba2bcf560568ae68f3775c058f0a8d7f7c0501\" on \"Wed Nov 9 09:29:36 2022 +0100\" with the change summary \"Remove debian 9 from packages tests.\" The change details are \"Debian 9 is EOL since July 2022 so we won\\'t build packages for it anymore and can remove it from CI.\"'" - ] - }, - "execution_count": 3, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "\n", - "answer = rag_app.invoke(\n", - " {\n", - " \"question\": \"What commits did the person with my name make?\",\n", - " \"chat_history\": chat_history,\n", - " }\n", - ")\n", - "answer" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "id": "bd62df23", - "metadata": {}, - "source": [ - "## Filter by time\n", - "\n", - "You can also use timed filters. For example, the sample dataset doesn't include any commits before 2010, so this should return no matches." - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "id": "b0a598b7", - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "'The context does not provide any information about any commits made by a person named Sven Klemm.'" - ] - }, - "execution_count": 4, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "answer = rag_app.invoke(\n", - " {\n", - " \"question\": \"What commits did the person with my name make?\",\n", - " \"chat_history\": chat_history,\n", - " \"end_date\": \"2016-01-01 00:00:00\",\n", - " }\n", - ")\n", - "answer\n" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "id": "25851869", - "metadata": {}, - "source": [ - "However, there is data from 2022, which can be used" - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "id": "4aef5219", - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "'The person named Sven Klemm made the following commits:\\n\\n1. \"e4ba2bcf560568ae68f3775c058f0a8d7f7c0501\" with the change summary \"Remove debian 9 from packages tests.\" The details of this change are that \"Debian 9 is EOL since July 2022 so we won\\'t build packages for it anymore and can remove it from CI.\"\\n\\n2. \"2f237e6e57e5ac66c126233d66969a1f674ffaa4\" with the change summary \"Add Enterprise Linux 9 packages to RPM package test\". The change details for this commit are not provided.'" - ] - }, - "execution_count": 5, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "answer = rag_app.invoke(\n", - " {\n", - " \"question\": \"What commits did the person with my name make?\",\n", - " \"chat_history\": chat_history,\n", - " \"start_date\": \"2020-01-01 00:00:00\",\n", - " \"end_date\": \"2023-01-01 00:00:00\",\n", - " }\n", - ")\n", - "answer" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "id": "6ad86fbd", - "metadata": {}, - "source": [ - "## Filter by metadata\n", - "\n", - "You can also filter by metadata using this chain" - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "id": "7ac9365f", - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "'The person named Sven Klemm made a commit with the ID \"5cd2c038796fb302190b080c90e5acddbef4b8d1\". The change summary for this commit is \"Simplify windows-build-and-test-ignored.yaml\" and the change details are \"Remove code not needed for the skip workflow of the windows test.\" The commit was made on \"Sat Mar 4 10:18:34 2023 +0100\".'" - ] - }, - "execution_count": 6, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "answer = rag_app.invoke(\n", - " {\n", - " \"question\": \"What commits did the person with my name make?\",\n", - " \"chat_history\": chat_history,\n", - " \"metadata_filter\": {\"commit_hash\": \" 5cd2c038796fb302190b080c90e5acddbef4b8d1\"},\n", - " }\n", - ")\n", - "answer" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "1cde5da5", - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.11.4" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/templates/rag-timescale-conversation/rag_timescale_conversation/__init__.py b/templates/rag-timescale-conversation/rag_timescale_conversation/__init__.py deleted file mode 100644 index 1638a54ce2d..00000000000 --- a/templates/rag-timescale-conversation/rag_timescale_conversation/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from rag_timescale_conversation.chain import chain - -__all__ = ["chain"] diff --git a/templates/rag-timescale-conversation/rag_timescale_conversation/chain.py b/templates/rag-timescale-conversation/rag_timescale_conversation/chain.py deleted file mode 100644 index 1456e67302c..00000000000 --- a/templates/rag-timescale-conversation/rag_timescale_conversation/chain.py +++ /dev/null @@ -1,172 +0,0 @@ -import os -from datetime import datetime, timedelta -from operator import itemgetter -from typing import List, Optional, Tuple - -from dotenv import find_dotenv, load_dotenv -from langchain_community.chat_models import ChatOpenAI -from langchain_community.embeddings import OpenAIEmbeddings -from langchain_community.vectorstores.timescalevector import TimescaleVector -from langchain_core.messages import AIMessage, HumanMessage -from langchain_core.output_parsers import StrOutputParser -from langchain_core.prompts import ( - ChatPromptTemplate, - MessagesPlaceholder, - format_document, -) -from langchain_core.prompts.prompt import PromptTemplate -from langchain_core.pydantic_v1 import BaseModel, Field -from langchain_core.runnables import ( - RunnableBranch, - RunnableLambda, - RunnableParallel, - RunnablePassthrough, -) - -from .load_sample_dataset import load_ts_git_dataset - -load_dotenv(find_dotenv()) - -if os.environ.get("TIMESCALE_SERVICE_URL", None) is None: - raise Exception("Missing `TIMESCALE_SERVICE_URL` environment variable.") - -SERVICE_URL = os.environ["TIMESCALE_SERVICE_URL"] -LOAD_SAMPLE_DATA = os.environ.get("LOAD_SAMPLE_DATA", False) -COLLECTION_NAME = os.environ.get("COLLECTION_NAME", "timescale_commits") -OPENAI_MODEL = os.environ.get("OPENAI_MODEL", "gpt-4") - -partition_interval = timedelta(days=7) -if LOAD_SAMPLE_DATA: - load_ts_git_dataset( - SERVICE_URL, - collection_name=COLLECTION_NAME, - num_records=500, - partition_interval=partition_interval, - ) - -embeddings = OpenAIEmbeddings() -vectorstore = TimescaleVector( - embedding=embeddings, - collection_name=COLLECTION_NAME, - service_url=SERVICE_URL, - time_partition_interval=partition_interval, -) -retriever = vectorstore.as_retriever() - -# Condense a chat history and follow-up question into a standalone question -_template = """Given the following conversation and a follow up question, rephrase the follow up question to be a standalone question, in its original language. -Chat History: -{chat_history} -Follow Up Input: {question} -Standalone question:""" # noqa: E501 -CONDENSE_QUESTION_PROMPT = PromptTemplate.from_template(_template) - -# RAG answer synthesis prompt -template = """Answer the question based only on the following context: - -{context} -""" -ANSWER_PROMPT = ChatPromptTemplate.from_messages( - [ - ("system", template), - MessagesPlaceholder(variable_name="chat_history"), - ("user", "{question}"), - ] -) - -# Conversational Retrieval Chain -DEFAULT_DOCUMENT_PROMPT = PromptTemplate.from_template(template="{page_content}") - - -def _combine_documents( - docs, document_prompt=DEFAULT_DOCUMENT_PROMPT, document_separator="\n\n" -): - doc_strings = [format_document(doc, document_prompt) for doc in docs] - return document_separator.join(doc_strings) - - -def _format_chat_history(chat_history: List[Tuple[str, str]]) -> List: - buffer = [] - for human, ai in chat_history: - buffer.append(HumanMessage(content=human)) - buffer.append(AIMessage(content=ai)) - return buffer - - -# User input -class ChatHistory(BaseModel): - chat_history: List[Tuple[str, str]] = Field(..., extra={"widget": {"type": "chat"}}) - question: str - start_date: Optional[datetime] - end_date: Optional[datetime] - metadata_filter: Optional[dict] - - -_search_query = RunnableBranch( - # If input includes chat_history, we condense it with the follow-up question - ( - RunnableLambda(lambda x: bool(x.get("chat_history"))).with_config( - run_name="HasChatHistoryCheck" - ), # Condense follow-up question and chat into a standalone_question - RunnablePassthrough.assign( - retriever_query=RunnablePassthrough.assign( - chat_history=lambda x: _format_chat_history(x["chat_history"]) - ) - | CONDENSE_QUESTION_PROMPT - | ChatOpenAI(temperature=0, model=OPENAI_MODEL) - | StrOutputParser() - ), - ), - # Else, we have no chat history, so just pass through the question - RunnablePassthrough.assign(retriever_query=lambda x: x["question"]), -) - - -def get_retriever_with_metadata(x): - start_dt = x.get("start_date", None) - end_dt = x.get("end_date", None) - metadata_filter = x.get("metadata_filter", None) - opt = {} - - if start_dt is not None: - opt["start_date"] = start_dt - if end_dt is not None: - opt["end_date"] = end_dt - if metadata_filter is not None: - opt["filter"] = metadata_filter - v = vectorstore.as_retriever(search_kwargs=opt) - return RunnableLambda(itemgetter("retriever_query")) | v - - -_retriever = RunnableLambda(get_retriever_with_metadata) - -_inputs = RunnableParallel( - { - "question": lambda x: x["question"], - "chat_history": lambda x: _format_chat_history(x["chat_history"]), - "start_date": lambda x: x.get("start_date", None), - "end_date": lambda x: x.get("end_date", None), - "context": _search_query | _retriever | _combine_documents, - } -) - -_datetime_to_string = RunnablePassthrough.assign( - start_date=lambda x: ( - x.get("start_date", None).isoformat() - if x.get("start_date", None) is not None - else None - ), - end_date=lambda x: ( - x.get("end_date", None).isoformat() - if x.get("end_date", None) is not None - else None - ), -).with_types(input_type=ChatHistory) - -chain = ( - _datetime_to_string - | _inputs - | ANSWER_PROMPT - | ChatOpenAI(model=OPENAI_MODEL) - | StrOutputParser() -) diff --git a/templates/rag-timescale-conversation/rag_timescale_conversation/load_sample_dataset.py b/templates/rag-timescale-conversation/rag_timescale_conversation/load_sample_dataset.py deleted file mode 100644 index 111d08b59d7..00000000000 --- a/templates/rag-timescale-conversation/rag_timescale_conversation/load_sample_dataset.py +++ /dev/null @@ -1,84 +0,0 @@ -import os -import tempfile -from datetime import datetime, timedelta - -import requests -from langchain_community.document_loaders import JSONLoader -from langchain_community.embeddings.openai import OpenAIEmbeddings -from langchain_community.vectorstores.timescalevector import TimescaleVector -from langchain_text_splitters.character import CharacterTextSplitter -from timescale_vector import client - - -def parse_date(date_string: str) -> datetime: - if date_string is None: - return None - time_format = "%a %b %d %H:%M:%S %Y %z" - return datetime.strptime(date_string, time_format) - - -def extract_metadata(record: dict, metadata: dict) -> dict: - dt = parse_date(record["date"]) - metadata["id"] = str(client.uuid_from_time(dt)) - if dt is not None: - metadata["date"] = dt.isoformat() - else: - metadata["date"] = None - metadata["author"] = record["author"] - metadata["commit_hash"] = record["commit"] - return metadata - - -def load_ts_git_dataset( - service_url, - collection_name="timescale_commits", - num_records: int = 500, - partition_interval=timedelta(days=7), -): - json_url = "https://s3.amazonaws.com/assets.timescale.com/ai/ts_git_log.json" - tmp_file = "ts_git_log.json" - - temp_dir = tempfile.gettempdir() - json_file_path = os.path.join(temp_dir, tmp_file) - - if not os.path.exists(json_file_path): - response = requests.get(json_url) - if response.status_code == 200: - with open(json_file_path, "w") as json_file: - json_file.write(response.text) - else: - print(f"Failed to download JSON file. Status code: {response.status_code}") - - loader = JSONLoader( - file_path=json_file_path, - jq_schema=".commit_history[]", - text_content=False, - metadata_func=extract_metadata, - ) - - documents = loader.load() - - # Remove documents with None dates - documents = [doc for doc in documents if doc.metadata["date"] is not None] - - if num_records > 0: - documents = documents[:num_records] - - # Split the documents into chunks for embedding - text_splitter = CharacterTextSplitter( - chunk_size=1000, - chunk_overlap=200, - ) - docs = text_splitter.split_documents(documents) - - embeddings = OpenAIEmbeddings() - - # Create a Timescale Vector instance from the collection of documents - TimescaleVector.from_documents( - embedding=embeddings, - ids=[doc.metadata["id"] for doc in docs], - documents=docs, - collection_name=collection_name, - service_url=service_url, - time_partition_interval=partition_interval, - ) diff --git a/templates/rag-timescale-conversation/tests/__init__.py b/templates/rag-timescale-conversation/tests/__init__.py deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/templates/rag-timescale-hybrid-search-time/LICENSE b/templates/rag-timescale-hybrid-search-time/LICENSE deleted file mode 100644 index 426b6509034..00000000000 --- a/templates/rag-timescale-hybrid-search-time/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -MIT License - -Copyright (c) 2023 LangChain, Inc. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/templates/rag-timescale-hybrid-search-time/README.md b/templates/rag-timescale-hybrid-search-time/README.md deleted file mode 100644 index 4c69b117da6..00000000000 --- a/templates/rag-timescale-hybrid-search-time/README.md +++ /dev/null @@ -1,110 +0,0 @@ -# RAG - Timescale - hybrid search - -This template shows how to use `Timescale Vector` with the self-query retriever to perform hybrid search on similarity and time. - -This is useful any time your data has a strong time-based component. Some examples of such data are: -- News articles (politics, business, etc) -- Blog posts, documentation or other published material (public or private). -- Social media posts -- Changelogs of any kind -- Messages - -Such items are often searched by both similarity and time. For example: Show me all news about Toyota trucks from 2022. - -[Timescale Vector](https://www.timescale.com/ai?utm_campaign=vectorlaunch&utm_source=langchain&utm_medium=referral) provides superior performance when searching for embeddings within a particular timeframe by leveraging automatic table partitioning to isolate data for particular time-ranges. - -Langchain's self-query retriever allows deducing time-ranges (as well as other search criteria) from the text of user queries. - -## What is Timescale Vector? - -**[Timescale Vector](https://www.timescale.com/ai?utm_campaign=vectorlaunch&utm_source=langchain&utm_medium=referral) is PostgreSQL++ for AI applications.** - -Timescale Vector enables you to efficiently store and query billions of vector embeddings in `PostgreSQL`. -- Enhances `pgvector` with faster and more accurate similarity search on 1B+ vectors via DiskANN inspired indexing algorithm. -- Enables fast time-based vector search via automatic time-based partitioning and indexing. -- Provides a familiar SQL interface for querying vector embeddings and relational data. - -Timescale Vector is cloud PostgreSQL for AI that scales with you from POC to production: -- Simplifies operations by enabling you to store relational metadata, vector embeddings, and time-series data in a single database. -- Benefits from rock-solid PostgreSQL foundation with enterprise-grade feature liked streaming backups and replication, high-availability and row-level security. -- Enables a worry-free experience with enterprise-grade security and compliance. - -### How to access Timescale Vector -Timescale Vector is available on [Timescale](https://www.timescale.com/products?utm_campaign=vectorlaunch&utm_source=langchain&utm_medium=referral), the cloud PostgreSQL platform. (There is no self-hosted version at this time.) - -- LangChain users get a 90-day free trial for Timescale Vector. -- To get started, [signup](https://console.cloud.timescale.com/signup?utm_campaign=vectorlaunch&utm_source=langchain&utm_medium=referral) to Timescale, create a new database and follow this notebook! -- See the [installation instructions](https://github.com/timescale/python-vector) for more details on using Timescale Vector in python. - -## Environment Setup - -This template uses Timescale Vector as a vectorstore and requires that `TIMESCALES_SERVICE_URL`. Signup for a 90-day trial [here](https://console.cloud.timescale.com/signup?utm_campaign=vectorlaunch&utm_source=langchain&utm_medium=referral) if you don't yet have an account. - -To load the sample dataset, set `LOAD_SAMPLE_DATA=1`. To load your own dataset see the section below. - -Set the `OPENAI_API_KEY` environment variable to access the OpenAI models. - -## Usage - -To use this package, you should first have the LangChain CLI installed: - -```shell -pip install -U langchain-cli -``` - -To create a new LangChain project and install this as the only package, you can do: - -```shell -langchain app new my-app --package rag-timescale-hybrid-search-time -``` - -If you want to add this to an existing project, you can just run: - -```shell -langchain app add rag-timescale-hybrid-search-time -``` - -And add the following code to your `server.py` file: -```python -from rag_timescale_hybrid_search.chain import chain as rag_timescale_hybrid_search_chain - -add_routes(app, rag_timescale_hybrid_search_chain, path="/rag-timescale-hybrid-search") -``` - -(Optional) Let's now configure LangSmith. -LangSmith will help us trace, monitor and debug LangChain applications. -You can sign up for LangSmith [here](https://smith.langchain.com/). -If you don't have access, you can skip this section - -```shell -export LANGCHAIN_TRACING_V2=true -export LANGCHAIN_API_KEY= -export LANGCHAIN_PROJECT= # if not specified, defaults to "default" -``` - -If you are inside this directory, then you can spin up a LangServe instance directly by: - -```shell -langchain serve -``` - -This will start the FastAPI app with a server is running locally at -[http://localhost:8000](http://localhost:8000) - -We can see all templates at [http://127.0.0.1:8000/docs](http://127.0.0.1:8000/docs) -We can access the playground at [http://127.0.0.1:8000/rag-timescale-hybrid-search/playground](http://127.0.0.1:8000/rag-timescale-hybrid-search/playground) - -We can access the template from code with: - -```python -from langserve.client import RemoteRunnable - -runnable = RemoteRunnable("http://localhost:8000/rag-timescale-hybrid-search") -``` - -## Loading your own dataset - -To load your own dataset you will have to modify the code in the `DATASET SPECIFIC CODE` section of `chain.py`. -This code defines the name of the collection, how to load the data, and the human-language description of both the -contents of the collection and all of the metadata. The human-language descriptions are used by the self-query retriever -to help the LLM convert the question into filters on the metadata when searching the data in Timescale-vector. \ No newline at end of file diff --git a/templates/rag-timescale-hybrid-search-time/pyproject.toml b/templates/rag-timescale-hybrid-search-time/pyproject.toml deleted file mode 100644 index 366bd226b6b..00000000000 --- a/templates/rag-timescale-hybrid-search-time/pyproject.toml +++ /dev/null @@ -1,34 +0,0 @@ -[tool.poetry] -name = "rag-timescale-hybrid-search-time" -version = "0.0.1" -description = "RAG using timescale-vector DB with the self-query retriver for metadata filtering on time" -authors = [] -readme = "README.md" - -[tool.poetry.dependencies] -python = ">=3.8.1,<4.0" -langchain = "^0.1" -openai = "<2" -fastapi = ">=0.104.0,<1" -sse-starlette = "^1.6.5" -timescale-vector = "^0.0.3" -lark = "^1.1.8" -tiktoken = "^0.5.1" -langchain-text-splitters = ">=0.0.1,<0.1" - -[tool.poetry.group.dev.dependencies] -langchain-cli = ">=0.0.21" - -[tool.langserve] -export_module = "rag_timescale_hybrid_search_time.chain" -export_attr = "chain" - -[tool.templates-hub] -use-case = "rag" -author = "Timescale" -integrations = ["OpenAI", "Timescale"] -tags = ["vectordbs", "hybrid-search", "timeseries"] - -[build-system] -requires = ["poetry-core"] -build-backend = "poetry.core.masonry.api" diff --git a/templates/rag-timescale-hybrid-search-time/rag_timescale_hybrid_search_time/__init__.py b/templates/rag-timescale-hybrid-search-time/rag_timescale_hybrid_search_time/__init__.py deleted file mode 100644 index 3acf9f6da24..00000000000 --- a/templates/rag-timescale-hybrid-search-time/rag_timescale_hybrid_search_time/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from rag_timescale_hybrid_search_time import chain - -__all__ = ["chain"] diff --git a/templates/rag-timescale-hybrid-search-time/rag_timescale_hybrid_search_time/chain.py b/templates/rag-timescale-hybrid-search-time/rag_timescale_hybrid_search_time/chain.py deleted file mode 100644 index 8a8af2765eb..00000000000 --- a/templates/rag-timescale-hybrid-search-time/rag_timescale_hybrid_search_time/chain.py +++ /dev/null @@ -1,112 +0,0 @@ -# ruff: noqa: E501 - -import os -from datetime import timedelta - -from langchain.chains.query_constructor.base import AttributeInfo -from langchain.retrievers.self_query.base import SelfQueryRetriever -from langchain_community.chat_models import ChatOpenAI -from langchain_community.embeddings.openai import OpenAIEmbeddings -from langchain_community.llms import OpenAI -from langchain_community.vectorstores.timescalevector import TimescaleVector -from langchain_core.output_parsers import StrOutputParser -from langchain_core.prompts import ChatPromptTemplate -from langchain_core.pydantic_v1 import BaseModel -from langchain_core.runnables import RunnableParallel, RunnablePassthrough - -from .load_sample_dataset import load_ts_git_dataset - -# to enable debug uncomment the following lines: -# from langchain.globals import set_debug -# set_debug(True) - -# from dotenv import find_dotenv, load_dotenv -# _ = load_dotenv(find_dotenv()) - -if os.environ.get("TIMESCALE_SERVICE_URL", None) is None: - raise Exception("Missing `TIMESCALE_SERVICE_URL` environment variable.") - -SERVICE_URL = os.environ["TIMESCALE_SERVICE_URL"] -LOAD_SAMPLE_DATA = os.environ.get("LOAD_SAMPLE_DATA", False) - - -# DATASET SPECIFIC CODE -# Load the sample dataset. You will have to change this to load your own dataset. -collection_name = "timescale_commits" -partition_interval = timedelta(days=7) -if LOAD_SAMPLE_DATA: - load_ts_git_dataset( - SERVICE_URL, - collection_name=collection_name, - num_records=500, - partition_interval=partition_interval, - ) - -# This will change depending on the metadata stored in your dataset. -document_content_description = "The git log commit summary containing the commit hash, author, date of commit, change summary and change details" -metadata_field_info = [ - AttributeInfo( - name="id", - description="A UUID v1 generated from the date of the commit", - type="uuid", - ), - AttributeInfo( - # This is a special attribute represent the timestamp of the uuid. - name="__uuid_timestamp", - description="The timestamp of the commit. Specify in YYYY-MM-DDTHH::MM:SSZ format", - type="datetime.datetime", - ), - AttributeInfo( - name="author_name", - description="The name of the author of the commit", - type="string", - ), - AttributeInfo( - name="author_email", - description="The email address of the author of the commit", - type="string", - ), -] -# END DATASET SPECIFIC CODE - -embeddings = OpenAIEmbeddings() -vectorstore = TimescaleVector( - embedding=embeddings, - collection_name=collection_name, - service_url=SERVICE_URL, - time_partition_interval=partition_interval, -) - -llm = OpenAI(temperature=0) -retriever = SelfQueryRetriever.from_llm( - llm, - vectorstore, - document_content_description, - metadata_field_info, - enable_limit=True, - verbose=True, -) - -template = """Answer the question based only on the following context: -{context} - -Question: {question} -""" -prompt = ChatPromptTemplate.from_template(template) - -model = ChatOpenAI(temperature=0, model="gpt-4") - -# RAG chain -chain = ( - RunnableParallel({"context": retriever, "question": RunnablePassthrough()}) - | prompt - | model - | StrOutputParser() -) - - -class Question(BaseModel): - __root__: str - - -chain = chain.with_types(input_type=Question) diff --git a/templates/rag-timescale-hybrid-search-time/rag_timescale_hybrid_search_time/load_sample_dataset.py b/templates/rag-timescale-hybrid-search-time/rag_timescale_hybrid_search_time/load_sample_dataset.py deleted file mode 100644 index 111d08b59d7..00000000000 --- a/templates/rag-timescale-hybrid-search-time/rag_timescale_hybrid_search_time/load_sample_dataset.py +++ /dev/null @@ -1,84 +0,0 @@ -import os -import tempfile -from datetime import datetime, timedelta - -import requests -from langchain_community.document_loaders import JSONLoader -from langchain_community.embeddings.openai import OpenAIEmbeddings -from langchain_community.vectorstores.timescalevector import TimescaleVector -from langchain_text_splitters.character import CharacterTextSplitter -from timescale_vector import client - - -def parse_date(date_string: str) -> datetime: - if date_string is None: - return None - time_format = "%a %b %d %H:%M:%S %Y %z" - return datetime.strptime(date_string, time_format) - - -def extract_metadata(record: dict, metadata: dict) -> dict: - dt = parse_date(record["date"]) - metadata["id"] = str(client.uuid_from_time(dt)) - if dt is not None: - metadata["date"] = dt.isoformat() - else: - metadata["date"] = None - metadata["author"] = record["author"] - metadata["commit_hash"] = record["commit"] - return metadata - - -def load_ts_git_dataset( - service_url, - collection_name="timescale_commits", - num_records: int = 500, - partition_interval=timedelta(days=7), -): - json_url = "https://s3.amazonaws.com/assets.timescale.com/ai/ts_git_log.json" - tmp_file = "ts_git_log.json" - - temp_dir = tempfile.gettempdir() - json_file_path = os.path.join(temp_dir, tmp_file) - - if not os.path.exists(json_file_path): - response = requests.get(json_url) - if response.status_code == 200: - with open(json_file_path, "w") as json_file: - json_file.write(response.text) - else: - print(f"Failed to download JSON file. Status code: {response.status_code}") - - loader = JSONLoader( - file_path=json_file_path, - jq_schema=".commit_history[]", - text_content=False, - metadata_func=extract_metadata, - ) - - documents = loader.load() - - # Remove documents with None dates - documents = [doc for doc in documents if doc.metadata["date"] is not None] - - if num_records > 0: - documents = documents[:num_records] - - # Split the documents into chunks for embedding - text_splitter = CharacterTextSplitter( - chunk_size=1000, - chunk_overlap=200, - ) - docs = text_splitter.split_documents(documents) - - embeddings = OpenAIEmbeddings() - - # Create a Timescale Vector instance from the collection of documents - TimescaleVector.from_documents( - embedding=embeddings, - ids=[doc.metadata["id"] for doc in docs], - documents=docs, - collection_name=collection_name, - service_url=service_url, - time_partition_interval=partition_interval, - ) diff --git a/templates/rag-timescale-hybrid-search-time/tests/__init__.py b/templates/rag-timescale-hybrid-search-time/tests/__init__.py deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/templates/rag-vectara-multiquery/LICENSE b/templates/rag-vectara-multiquery/LICENSE deleted file mode 100644 index d0af411b99a..00000000000 --- a/templates/rag-vectara-multiquery/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -MIT License - -Copyright (c) 2023 LangChain, Inc. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. \ No newline at end of file diff --git a/templates/rag-vectara-multiquery/README.md b/templates/rag-vectara-multiquery/README.md deleted file mode 100644 index 3fc8c88c902..00000000000 --- a/templates/rag-vectara-multiquery/README.md +++ /dev/null @@ -1,71 +0,0 @@ -# RAG - Vectara - multi-query - -This template performs multiquery RAG with `Vectara` vectorstore. - -## Environment Setup - -Set the `OPENAI_API_KEY` environment variable to access the OpenAI models for the multi-query processing. - -Also, ensure the following environment variables are set: -* `VECTARA_CUSTOMER_ID` -* `VECTARA_CORPUS_ID` -* `VECTARA_API_KEY` - -## Usage - -To use this package, you should first have the LangChain CLI installed: - -```shell -pip install -U langchain-cli -``` - -To create a new LangChain project and install this as the only package, you can do: - -```shell -langchain app new my-app --package rag-vectara-multiquery -``` - -If you want to add this to an existing project, you can just run: - -```shell -langchain app add rag-vectara-multiquery -``` - -And add the following code to your `server.py` file: -```python -from rag_vectara import chain as rag_vectara_chain - -add_routes(app, rag_vectara_chain, path="/rag-vectara-multiquery") -``` - -(Optional) Let's now configure LangSmith. -LangSmith will help us trace, monitor and debug LangChain applications. -You can sign up for LangSmith [here](https://smith.langchain.com/). -If you don't have access, you can skip this section - - -```shell -export LANGCHAIN_TRACING_V2=true -export LANGCHAIN_API_KEY= -export LANGCHAIN_PROJECT= # if not specified, defaults to "vectara-demo" -``` - -If you are inside this directory, then you can spin up a LangServe instance directly by: - -```shell -langchain serve -``` - -This will start the FastAPI app with a server is running locally at -[http://localhost:8000](http://localhost:8000) - -We can see all templates at [http://127.0.0.1:8000/docs](http://127.0.0.1:8000/docs) -We can access the playground at [http://127.0.0.1:8000/rag-vectara-multiquery/playground](http://127.0.0.1:8000/rag-vectara-multiquery/playground) - -We can access the template from code with: - -```python -from langserve.client import RemoteRunnable - -runnable = RemoteRunnable("http://localhost:8000/rag-vectara-multiquery") -``` diff --git a/templates/rag-vectara-multiquery/pyproject.toml b/templates/rag-vectara-multiquery/pyproject.toml deleted file mode 100644 index d288785f944..00000000000 --- a/templates/rag-vectara-multiquery/pyproject.toml +++ /dev/null @@ -1,38 +0,0 @@ -[tool.poetry] -name = "rag-vectara-multiquery" -version = "0.2.0" -description = "RAG using vectara with multiquery retriever" -authors = [ - "Ofer Mendelevitch ", -] -readme = "README.md" - -[tool.poetry.dependencies] -python = ">=3.8.1,<4.0" -langchain = "^0.1" -openai = "<2" -tiktoken = "^0.5.1" - -[tool.poetry.group.dev.dependencies] -langchain-cli = ">=0.0.21" -[tool.poetry.group.dev.dependencies.python-dotenv] -extras = [ - "cli", -] -version = "^1.0.0" - -[tool.langserve] -export_module = "rag_vectara_multiquery" -export_attr = "chain" - -[tool.templates-hub] -use-case = "rag" -author = "Vectara" -integrations = ["OpenAI", "Vectara"] -tags = ["vectordbs"] - -[build-system] -requires = [ - "poetry-core", -] -build-backend = "poetry.core.masonry.api" diff --git a/templates/rag-vectara-multiquery/rag_vectara_multiquery.ipynb b/templates/rag-vectara-multiquery/rag_vectara_multiquery.ipynb deleted file mode 100644 index 2db2ebe4bd5..00000000000 --- a/templates/rag-vectara-multiquery/rag_vectara_multiquery.ipynb +++ /dev/null @@ -1,57 +0,0 @@ -{ - "cells": [ - { - "attachments": {}, - "cell_type": "markdown", - "id": "8692a430", - "metadata": {}, - "source": [ - "# Run Template\n", - "\n", - "In `server.py`, set -\n", - "```\n", - "add_routes(app, chain_ext, path=\"/rag-vectara-multiquery\")\n", - "```" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "41db5e30", - "metadata": {}, - "outputs": [], - "source": [ - "from langserve.client import RemoteRunnable\n", - "\n", - "rag_app_vectara = RemoteRunnable(\"http://localhost:8000/rag-vectara-multiquery\")\n", - "rag_app_vectara.invoke(\"How does agent memory work?\")" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3.11.6 64-bit", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.11.6" - }, - "vscode": { - "interpreter": { - "hash": "aee8b7b246df8f9039afb4144a1f6fd8d2ca17a180786b69acc140d282b71a49" - } - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/templates/rag-vectara-multiquery/rag_vectara_multiquery/__init__.py b/templates/rag-vectara-multiquery/rag_vectara_multiquery/__init__.py deleted file mode 100644 index 0255121da4a..00000000000 --- a/templates/rag-vectara-multiquery/rag_vectara_multiquery/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from rag_vectara_multiquery.chain import chain - -__all__ = ["chain"] diff --git a/templates/rag-vectara-multiquery/rag_vectara_multiquery/chain.py b/templates/rag-vectara-multiquery/rag_vectara_multiquery/chain.py deleted file mode 100644 index 3d477360757..00000000000 --- a/templates/rag-vectara-multiquery/rag_vectara_multiquery/chain.py +++ /dev/null @@ -1,47 +0,0 @@ -import os - -from langchain.retrievers.multi_query import MultiQueryRetriever -from langchain_community.vectorstores import Vectara -from langchain_community.vectorstores.vectara import SummaryConfig, VectaraQueryConfig -from langchain_core.output_parsers import StrOutputParser -from langchain_core.pydantic_v1 import BaseModel -from langchain_core.runnables import RunnableParallel, RunnablePassthrough -from langchain_openai.chat_models import ChatOpenAI - -if os.environ.get("VECTARA_CUSTOMER_ID", None) is None: - raise Exception("Missing `VECTARA_CUSTOMER_ID` environment variable.") -if os.environ.get("VECTARA_CORPUS_ID", None) is None: - raise Exception("Missing `VECTARA_CORPUS_ID` environment variable.") -if os.environ.get("VECTARA_API_KEY", None) is None: - raise Exception("Missing `VECTARA_API_KEY` environment variable.") - - -# Setup the Vectara retriever with your Corpus ID and API Key -vectara = Vectara() - -# Define the query configuration: -summary_config = SummaryConfig(is_enabled=True, max_results=5, response_lang="eng") -config = VectaraQueryConfig(k=10, lambda_val=0.005, summary_config=summary_config) - -# Setup the Multi-query retriever -llm = ChatOpenAI(temperature=0) -retriever = MultiQueryRetriever.from_llm( - retriever=vectara.as_retriever(config=config), llm=llm -) - -# Setup RAG pipeline with multi-query. -# We extract the summary from the RAG output, which is the last document in the list. -# Note that if you want to extract the citation information, you can use res[:-1]] -chain = ( - RunnableParallel({"context": retriever, "question": RunnablePassthrough()}) - | (lambda res: res[-1]) - | StrOutputParser() -) - - -# Add typing for input -class Question(BaseModel): - __root__: str - - -chain = chain.with_types(input_type=Question) diff --git a/templates/rag-vectara-multiquery/tests/__init__.py b/templates/rag-vectara-multiquery/tests/__init__.py deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/templates/rag-vectara/LICENSE b/templates/rag-vectara/LICENSE deleted file mode 100644 index d0af411b99a..00000000000 --- a/templates/rag-vectara/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -MIT License - -Copyright (c) 2023 LangChain, Inc. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. \ No newline at end of file diff --git a/templates/rag-vectara/README.md b/templates/rag-vectara/README.md deleted file mode 100644 index 482afe6b44d..00000000000 --- a/templates/rag-vectara/README.md +++ /dev/null @@ -1,69 +0,0 @@ -# RAG - Vectara - -This template performs RAG with `Vectara` vectorstore. - -## Environment Setup - -Also, ensure the following environment variables are set: -* `VECTARA_CUSTOMER_ID` -* `VECTARA_CORPUS_ID` -* `VECTARA_API_KEY` - -## Usage - -To use this package, you should first have the LangChain CLI installed: - -```shell -pip install -U langchain-cli -``` - -To create a new LangChain project and install this as the only package, you can do: - -```shell -langchain app new my-app --package rag-vectara -``` - -If you want to add this to an existing project, you can just run: - -```shell -langchain app add rag-vectara -``` - -And add the following code to your `server.py` file: -```python -from rag_vectara import chain as rag_vectara_chain - -add_routes(app, rag_vectara_chain, path="/rag-vectara") -``` - -(Optional) Let's now configure LangSmith. -LangSmith will help us trace, monitor and debug LangChain applications. -You can sign up for LangSmith [here](https://smith.langchain.com/). -If you don't have access, you can skip this section - - -```shell -export LANGCHAIN_TRACING_V2=true -export LANGCHAIN_API_KEY= -export LANGCHAIN_PROJECT= # if not specified, defaults to "vectara-demo" -``` - -If you are inside this directory, then you can spin up a LangServe instance directly by: - -```shell -langchain serve -``` - -This will start the FastAPI app with a server is running locally at -[http://localhost:8000](http://localhost:8000) - -We can see all templates at [http://127.0.0.1:8000/docs](http://127.0.0.1:8000/docs) -We can access the playground at [http://127.0.0.1:8000/rag-vectara/playground](http://127.0.0.1:8000/rag-vectara/playground) - -We can access the template from code with: - -```python -from langserve.client import RemoteRunnable - -runnable = RemoteRunnable("http://localhost:8000/rag-vectara") -``` diff --git a/templates/rag-vectara/pyproject.toml b/templates/rag-vectara/pyproject.toml deleted file mode 100644 index 4e891cef904..00000000000 --- a/templates/rag-vectara/pyproject.toml +++ /dev/null @@ -1,38 +0,0 @@ -[tool.poetry] -name = "rag-vectara" -version = "0.2.0" -description = "RAG using vectara retriever" -authors = [ - "Ofer Mendelevitch ", -] -readme = "README.md" - -[tool.poetry.dependencies] -python = ">=3.8.1,<4.0" -langchain = "^0.1" -openai = "<2" -tiktoken = "^0.5.1" - -[tool.poetry.group.dev.dependencies] -langchain-cli = ">=0.0.21" -[tool.poetry.group.dev.dependencies.python-dotenv] -extras = [ - "cli", -] -version = "^1.0.0" - -[tool.langserve] -export_module = "rag_vectara" -export_attr = "chain" - -[tool.templates-hub] -use-case = "rag" -author = "Vectara" -integrations = ["OpenAI", "Vectara"] -tags = ["vectordbs"] - -[build-system] -requires = [ - "poetry-core", -] -build-backend = "poetry.core.masonry.api" diff --git a/templates/rag-vectara/rag_vectara.ipynb b/templates/rag-vectara/rag_vectara.ipynb deleted file mode 100644 index f4a99f73648..00000000000 --- a/templates/rag-vectara/rag_vectara.ipynb +++ /dev/null @@ -1,57 +0,0 @@ -{ - "cells": [ - { - "attachments": {}, - "cell_type": "markdown", - "id": "8692a430", - "metadata": {}, - "source": [ - "# Run Template\n", - "\n", - "In `server.py`, set -\n", - "```\n", - "add_routes(app, chain_ext, path=\"/rag-vectara\")\n", - "```" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "41db5e30", - "metadata": {}, - "outputs": [], - "source": [ - "from langserve.client import RemoteRunnable\n", - "\n", - "rag_app_vectara = RemoteRunnable(\"http://localhost:8000/rag-vectara\")\n", - "rag_app_vectara.invoke(\"How does agent memory work?\")" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3.11.6 64-bit", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.11.6" - }, - "vscode": { - "interpreter": { - "hash": "aee8b7b246df8f9039afb4144a1f6fd8d2ca17a180786b69acc140d282b71a49" - } - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/templates/rag-vectara/rag_vectara/__init__.py b/templates/rag-vectara/rag_vectara/__init__.py deleted file mode 100644 index e4acd587a15..00000000000 --- a/templates/rag-vectara/rag_vectara/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from rag_vectara.chain import chain - -__all__ = ["chain"] diff --git a/templates/rag-vectara/rag_vectara/chain.py b/templates/rag-vectara/rag_vectara/chain.py deleted file mode 100644 index fd799bb9263..00000000000 --- a/templates/rag-vectara/rag_vectara/chain.py +++ /dev/null @@ -1,29 +0,0 @@ -import os - -from langchain_community.vectorstores import Vectara -from langchain_community.vectorstores.vectara import SummaryConfig, VectaraQueryConfig -from langchain_core.pydantic_v1 import BaseModel - -if os.environ.get("VECTARA_CUSTOMER_ID", None) is None: - raise Exception("Missing `VECTARA_CUSTOMER_ID` environment variable.") -if os.environ.get("VECTARA_CORPUS_ID", None) is None: - raise Exception("Missing `VECTARA_CORPUS_ID` environment variable.") -if os.environ.get("VECTARA_API_KEY", None) is None: - raise Exception("Missing `VECTARA_API_KEY` environment variable.") - -# Setup the Vectara vectorstore with your Corpus ID and API Key -vectara = Vectara() - -# Define the query configuration: -summary_config = SummaryConfig(is_enabled=True, max_results=5, response_lang="eng") -config = VectaraQueryConfig(k=10, lambda_val=0.005, summary_config=summary_config) - -rag = Vectara().as_rag(config) - - -# Add typing for input -class Question(BaseModel): - __root__: str - - -chain = rag.with_types(input_type=Question) diff --git a/templates/rag-vectara/tests/__init__.py b/templates/rag-vectara/tests/__init__.py deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/templates/rag-weaviate/LICENSE b/templates/rag-weaviate/LICENSE deleted file mode 100644 index d0af411b99a..00000000000 --- a/templates/rag-weaviate/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -MIT License - -Copyright (c) 2023 LangChain, Inc. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. \ No newline at end of file diff --git a/templates/rag-weaviate/README.md b/templates/rag-weaviate/README.md deleted file mode 100644 index c6453e93861..00000000000 --- a/templates/rag-weaviate/README.md +++ /dev/null @@ -1,70 +0,0 @@ -# RAG - Weaviate - -This template performs RAG with `Weaviate` vectorstore. - -## Environment Setup - -Set the `OPENAI_API_KEY` environment variable to access the OpenAI models. - -Also, ensure the following environment variables are set: -* `WEAVIATE_ENVIRONMENT` -* `WEAVIATE_API_KEY` - -## Usage - -To use this package, you should first have the LangChain CLI installed: - -```shell -pip install -U langchain-cli -``` - -To create a new LangChain project and install this as the only package, you can do: - -```shell -langchain app new my-app --package rag-weaviate -``` - -If you want to add this to an existing project, you can just run: - -```shell -langchain app add rag-weaviate -``` - -And add the following code to your `server.py` file: -```python -from rag_weaviate import chain as rag_weaviate_chain - -add_routes(app, rag_weaviate_chain, path="/rag-weaviate") -``` - -(Optional) Let's now configure LangSmith. -LangSmith will help us trace, monitor and debug LangChain applications. -You can sign up for LangSmith [here](https://smith.langchain.com/). -If you don't have access, you can skip this section - - -```shell -export LANGCHAIN_TRACING_V2=true -export LANGCHAIN_API_KEY= -export LANGCHAIN_PROJECT= # if not specified, defaults to "default" -``` - -If you are inside this directory, then you can spin up a LangServe instance directly by: - -```shell -langchain serve -``` - -This will start the FastAPI app with a server is running locally at -[http://localhost:8000](http://localhost:8000) - -We can see all templates at [http://127.0.0.1:8000/docs](http://127.0.0.1:8000/docs) -We can access the playground at [http://127.0.0.1:8000/rag-weaviate/playground](http://127.0.0.1:8000/rag-weaviate/playground) - -We can access the template from code with: - -```python -from langserve.client import RemoteRunnable - -runnable = RemoteRunnable("http://localhost:8000/rag-weaviate") -``` diff --git a/templates/rag-weaviate/pyproject.toml b/templates/rag-weaviate/pyproject.toml deleted file mode 100644 index 9b2c5014c17..00000000000 --- a/templates/rag-weaviate/pyproject.toml +++ /dev/null @@ -1,40 +0,0 @@ -[tool.poetry] -name = "rag-weaviate" -version = "0.1.0" -description = "RAG using Weaviate retriever" -authors = [ - "Erika Cardenas ", -] -readme = "README.md" - -[tool.poetry.dependencies] -python = ">=3.8.1,<4.0" -langchain = "^0.1" -openai = "<2" -tiktoken = "^0.5.1" -weaviate-client = ">=3.24.2" -langchain-text-splitters = ">=0.0.1,<0.1" - -[tool.poetry.group.dev.dependencies] -langchain-cli = ">=0.0.21" -[tool.poetry.group.dev.dependencies.python-dotenv] -extras = [ - "cli", -] -version = "^1.0.0" - -[tool.langserve] -export_module = "rag_weaviate" -export_attr = "chain" - -[tool.templates-hub] -use-case = "rag" -author = "Weaviate" -integrations = ["Weaviate", "OpenAI"] -tags = ["vectordbs"] - -[build-system] -requires = [ - "poetry-core", -] -build-backend = "poetry.core.masonry.api" diff --git a/templates/rag-weaviate/rag_weaviate.ipynb b/templates/rag-weaviate/rag_weaviate.ipynb deleted file mode 100644 index 11416f01b10..00000000000 --- a/templates/rag-weaviate/rag_weaviate.ipynb +++ /dev/null @@ -1,57 +0,0 @@ -{ - "cells": [ - { - "attachments": {}, - "cell_type": "markdown", - "id": "8692a430", - "metadata": {}, - "source": [ - "# Run Template\n", - "\n", - "In `server.py`, set -\n", - "```\n", - "add_routes(app, chain_ext, path=\"/rag-weaviate\")\n", - "```" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "41db5e30", - "metadata": {}, - "outputs": [], - "source": [ - "from langserve.client import RemoteRunnable\n", - "\n", - "rag_app_weaviate = RemoteRunnable(\"http://localhost:8000/rag-weaviate\")\n", - "rag_app_weaviate.invoke(\"How does agent memory work?\")" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3.11.6 64-bit", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.11.6" - }, - "vscode": { - "interpreter": { - "hash": "aee8b7b246df8f9039afb4144a1f6fd8d2ca17a180786b69acc140d282b71a49" - } - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/templates/rag-weaviate/rag_weaviate/__init__.py b/templates/rag-weaviate/rag_weaviate/__init__.py deleted file mode 100644 index a1483502d64..00000000000 --- a/templates/rag-weaviate/rag_weaviate/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from rag_weaviate.chain import chain - -__all__ = ["chain"] diff --git a/templates/rag-weaviate/rag_weaviate/chain.py b/templates/rag-weaviate/rag_weaviate/chain.py deleted file mode 100644 index e2e56230b9a..00000000000 --- a/templates/rag-weaviate/rag_weaviate/chain.py +++ /dev/null @@ -1,61 +0,0 @@ -import os - -from langchain_community.chat_models import ChatOpenAI -from langchain_community.document_loaders import WebBaseLoader -from langchain_community.embeddings import OpenAIEmbeddings -from langchain_community.vectorstores import Weaviate -from langchain_core.output_parsers import StrOutputParser -from langchain_core.prompts import ChatPromptTemplate -from langchain_core.pydantic_v1 import BaseModel -from langchain_core.runnables import RunnableParallel, RunnablePassthrough -from langchain_text_splitters import RecursiveCharacterTextSplitter - -if os.environ.get("WEAVIATE_API_KEY", None) is None: - raise Exception("Missing `WEAVIATE_API_KEY` environment variable.") - -if os.environ.get("WEAVIATE_ENVIRONMENT", None) is None: - raise Exception("Missing `WEAVIATE_ENVIRONMENT` environment variable.") - -WEAVIATE_INDEX_NAME = os.environ.get("WEAVIATE_INDEX", "langchain-test") - -### Ingest code - you may need to run this the first time -# Load -loader = WebBaseLoader("https://lilianweng.github.io/posts/2023-06-23-agent/") -data = loader.load() - -# # Split -text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=0) -all_splits = text_splitter.split_documents(data) - -# # Add to vectorDB -# vectorstore = Weaviate.from_documents( -# documents=all_splits, embedding=OpenAIEmbeddings(), index_name=WEAVIATE_INDEX_NAME -# ) -# retriever = vectorstore.as_retriever() - -vectorstore = Weaviate.from_existing_index(WEAVIATE_INDEX_NAME, OpenAIEmbeddings()) -retriever = vectorstore.as_retriever() - -# RAG prompt -template = """Answer the question based only on the following context: -{context} -Question: {question} -""" -prompt = ChatPromptTemplate.from_template(template) - -# RAG -model = ChatOpenAI() -chain = ( - RunnableParallel({"context": retriever, "question": RunnablePassthrough()}) - | prompt - | model - | StrOutputParser() -) - - -# Add typing for input -class Question(BaseModel): - __root__: str - - -chain = chain.with_types(input_type=Question) diff --git a/templates/rag-weaviate/tests/__init__.py b/templates/rag-weaviate/tests/__init__.py deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/templates/research-assistant/LICENSE b/templates/research-assistant/LICENSE deleted file mode 100644 index 426b6509034..00000000000 --- a/templates/research-assistant/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -MIT License - -Copyright (c) 2023 LangChain, Inc. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/templates/research-assistant/README.md b/templates/research-assistant/README.md deleted file mode 100644 index ba835b09352..00000000000 --- a/templates/research-assistant/README.md +++ /dev/null @@ -1,75 +0,0 @@ -# Research assistant - -This template implements a version of -[GPT Researcher](https://github.com/assafelovic/gpt-researcher) that you can use -as a starting point for a research agent. - -## Environment Setup - -The default template relies on `ChatOpenAI` and `DuckDuckGo`, so you will need the -following environment variable: - -- `OPENAI_API_KEY` - -And to use the `Tavily` LLM-optimized search engine, you will need: - -- `TAVILY_API_KEY` - -## Usage - -To use this package, you should first have the LangChain CLI installed: - -```shell -pip install -U langchain-cli -``` - -To create a new LangChain project and install this as the only package, you can do: - -```shell -langchain app new my-app --package research-assistant -``` - -If you want to add this to an existing project, you can just run: - -```shell -langchain app add research-assistant -``` - -And add the following code to your `server.py` file: -```python -from research_assistant import chain as research_assistant_chain - -add_routes(app, research_assistant_chain, path="/research-assistant") -``` - -(Optional) Let's now configure LangSmith. -LangSmith will help us trace, monitor and debug LangChain applications. -You can sign up for LangSmith [here](https://smith.langchain.com/). -If you don't have access, you can skip this section - - -```shell -export LANGCHAIN_TRACING_V2=true -export LANGCHAIN_API_KEY= -export LANGCHAIN_PROJECT= # if not specified, defaults to "default" -``` - -If you are inside this directory, then you can spin up a LangServe instance directly by: - -```shell -langchain serve -``` - -This will start the FastAPI app with a server is running locally at -[http://localhost:8000](http://localhost:8000) - -We can see all templates at [http://127.0.0.1:8000/docs](http://127.0.0.1:8000/docs) -We can access the playground at [http://127.0.0.1:8000/research-assistant/playground](http://127.0.0.1:8000/research-assistant/playground) - -We can access the template from code with: - -```python -from langserve.client import RemoteRunnable - -runnable = RemoteRunnable("http://localhost:8000/research-assistant") -``` \ No newline at end of file diff --git a/templates/research-assistant/pyproject.toml b/templates/research-assistant/pyproject.toml deleted file mode 100644 index 4e52f8c21e2..00000000000 --- a/templates/research-assistant/pyproject.toml +++ /dev/null @@ -1,33 +0,0 @@ -[tool.poetry] -name = "research-assistant" -version = "0.0.1" -description = "Uses GPT Researcher as a research agent" -authors = [] -readme = "README.md" - -[tool.poetry.dependencies] -python = ">=3.8.1,<4.0" -langchain = "^0.1" -openai = "<2" -beautifulsoup4 = "^4.12.2" -duckduckgo-search = "^3.9.5" -tavily-python = "^0.2.6" - -[tool.poetry.group.dev.dependencies] -langchain-cli = ">=0.0.21" -fastapi = ">=0.104.0,<1" -sse-starlette = "^1.6.5" - -[tool.langserve] -export_module = "research_assistant" -export_attr = "chain" - -[tool.templates-hub] -use-case = "research" -author = "LangChain" -integrations = ["OpenAI"] -tags = ["data", "agents"] - -[build-system] -requires = ["poetry-core"] -build-backend = "poetry.core.masonry.api" diff --git a/templates/research-assistant/research_assistant/__init__.py b/templates/research-assistant/research_assistant/__init__.py deleted file mode 100644 index ddf5c2c68eb..00000000000 --- a/templates/research-assistant/research_assistant/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from research_assistant.chain import chain - -__all__ = ["chain"] diff --git a/templates/research-assistant/research_assistant/chain.py b/templates/research-assistant/research_assistant/chain.py deleted file mode 100644 index c0414b00bcf..00000000000 --- a/templates/research-assistant/research_assistant/chain.py +++ /dev/null @@ -1,16 +0,0 @@ -from langchain_core.pydantic_v1 import BaseModel -from langchain_core.runnables import RunnablePassthrough - -from research_assistant.search.web import chain as search_chain -from research_assistant.writer import chain as writer_chain - -chain_notypes = ( - RunnablePassthrough().assign(research_summary=search_chain) | writer_chain -) - - -class InputType(BaseModel): - question: str - - -chain = chain_notypes.with_types(input_type=InputType) diff --git a/templates/research-assistant/research_assistant/search/__init__.py b/templates/research-assistant/research_assistant/search/__init__.py deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/templates/research-assistant/research_assistant/search/web.py b/templates/research-assistant/research_assistant/search/web.py deleted file mode 100644 index 52ea08542d1..00000000000 --- a/templates/research-assistant/research_assistant/search/web.py +++ /dev/null @@ -1,180 +0,0 @@ -import json -from typing import Any - -import requests -from bs4 import BeautifulSoup -from langchain.retrievers.tavily_search_api import TavilySearchAPIRetriever -from langchain_community.chat_models import ChatOpenAI -from langchain_community.utilities import DuckDuckGoSearchAPIWrapper -from langchain_core.messages import SystemMessage -from langchain_core.output_parsers import StrOutputParser -from langchain_core.prompts import ChatPromptTemplate -from langchain_core.runnables import ( - ConfigurableField, - Runnable, - RunnableLambda, - RunnableParallel, - RunnablePassthrough, -) - -RESULTS_PER_QUESTION = 3 - -ddg_search = DuckDuckGoSearchAPIWrapper() - - -def scrape_text(url: str): - # Send a GET request to the webpage - try: - response = requests.get(url) - - # Check if the request was successful - if response.status_code == 200: - # Parse the content of the request with BeautifulSoup - soup = BeautifulSoup(response.text, "html.parser") - - # Extract all text from the webpage - page_text = soup.get_text(separator=" ", strip=True) - - # Print the extracted text - return page_text - else: - return f"Failed to retrieve the webpage: Status code {response.status_code}" - except Exception as e: - print(e) - return f"Failed to retrieve the webpage: {e}" - - -def web_search(query: str, num_results: int): - results = ddg_search.results(query, num_results) - return [r["link"] for r in results] - - -get_links: Runnable[Any, Any] = ( - RunnablePassthrough() - | RunnableLambda( - lambda x: [ - {"url": url, "question": x["question"]} - for url in web_search(query=x["question"], num_results=RESULTS_PER_QUESTION) - ] - ) -).configurable_alternatives( - ConfigurableField("search_engine"), - default_key="duckduckgo", - tavily=RunnableLambda(lambda x: x["question"]) - | RunnableParallel( - { - "question": RunnablePassthrough(), - "results": TavilySearchAPIRetriever(k=RESULTS_PER_QUESTION), - } - ) - | RunnableLambda( - lambda x: [ - {"url": result.metadata["source"], "question": x["question"]} - for result in x["results"] - ] - ), -) - - -SEARCH_PROMPT = ChatPromptTemplate.from_messages( - [ - ("system", "{agent_prompt}"), - ( - "user", - "Write 3 google search queries to search online that form an " - "objective opinion from the following: {question}\n" - "You must respond with a list of strings in the following format: " - '["query 1", "query 2", "query 3"].', - ), - ] -) - -AUTO_AGENT_INSTRUCTIONS = """ -This task involves researching a given topic, regardless of its complexity or the availability of a definitive answer. The research is conducted by a specific agent, defined by its type and role, with each agent requiring distinct instructions. -Agent -The agent is determined by the field of the topic and the specific name of the agent that could be utilized to research the topic provided. Agents are categorized by their area of expertise, and each agent type is associated with a corresponding emoji. - -examples: -task: "should I invest in apple stocks?" -response: -{ - "agent": "💰 Finance Agent", - "agent_role_prompt: "You are a seasoned finance analyst AI assistant. Your primary goal is to compose comprehensive, astute, impartial, and methodically arranged financial reports based on provided data and trends." -} -task: "could reselling sneakers become profitable?" -response: -{ - "agent": "📈 Business Analyst Agent", - "agent_role_prompt": "You are an experienced AI business analyst assistant. Your main objective is to produce comprehensive, insightful, impartial, and systematically structured business reports based on provided business data, market trends, and strategic analysis." -} -task: "what are the most interesting sites in Tel Aviv?" -response: -{ - "agent: "🌍 Travel Agent", - "agent_role_prompt": "You are a world-travelled AI tour guide assistant. Your main purpose is to draft engaging, insightful, unbiased, and well-structured travel reports on given locations, including history, attractions, and cultural insights." -} -""" # noqa: E501 -CHOOSE_AGENT_PROMPT = ChatPromptTemplate.from_messages( - [SystemMessage(content=AUTO_AGENT_INSTRUCTIONS), ("user", "task: {task}")] -) - -SUMMARY_TEMPLATE = """{text} - ------------ - -Using the above text, answer in short the following question: - -> {question} - ------------ -if the question cannot be answered using the text, imply summarize the text. Include all factual information, numbers, stats etc if available.""" # noqa: E501 -SUMMARY_PROMPT = ChatPromptTemplate.from_template(SUMMARY_TEMPLATE) - -scrape_and_summarize: Runnable[Any, Any] = ( - RunnableParallel( - { - "question": lambda x: x["question"], - "text": lambda x: scrape_text(x["url"])[:10000], - "url": lambda x: x["url"], - } - ) - | RunnableParallel( - { - "summary": SUMMARY_PROMPT | ChatOpenAI(temperature=0) | StrOutputParser(), - "url": lambda x: x["url"], - } - ) - | RunnableLambda(lambda x: f"Source Url: {x['url']}\nSummary: {x['summary']}") -) - -multi_search = get_links | scrape_and_summarize.map() | (lambda x: "\n".join(x)) - - -def load_json(s): - try: - return json.loads(s) - except Exception: - return {} - - -search_query = SEARCH_PROMPT | ChatOpenAI(temperature=0) | StrOutputParser() | load_json -choose_agent = ( - CHOOSE_AGENT_PROMPT | ChatOpenAI(temperature=0) | StrOutputParser() | load_json -) - -get_search_queries = ( - RunnablePassthrough().assign( - agent_prompt=RunnableParallel({"task": lambda x: x}) - | choose_agent - | (lambda x: x.get("agent_role_prompt")) - ) - | search_query -) - - -chain = ( - get_search_queries - | (lambda x: [{"question": q} for q in x]) - | multi_search.map() - | (lambda x: "\n\n".join(x)) -) diff --git a/templates/research-assistant/research_assistant/writer.py b/templates/research-assistant/research_assistant/writer.py deleted file mode 100644 index 3ec60a2dd98..00000000000 --- a/templates/research-assistant/research_assistant/writer.py +++ /dev/null @@ -1,75 +0,0 @@ -from langchain_community.chat_models import ChatOpenAI -from langchain_core.output_parsers import StrOutputParser -from langchain_core.prompts import ChatPromptTemplate -from langchain_core.runnables import ConfigurableField - -WRITER_SYSTEM_PROMPT = "You are an AI critical thinker research assistant. Your sole purpose is to write well written, critically acclaimed, objective and structured reports on given text." # noqa: E501 - - -# Report prompts from https://github.com/assafelovic/gpt-researcher/blob/master/gpt_researcher/master/prompts.py -RESEARCH_REPORT_TEMPLATE = """Information: --------- -{research_summary} --------- - -Using the above information, answer the following question or topic: "{question}" in a detailed report -- \ -The report should focus on the answer to the question, should be well structured, informative, \ -in depth, with facts and numbers if available and a minimum of 1,200 words. - -You should strive to write the report as long as you can using all relevant and necessary information provided. -You must write the report with markdown syntax. -You MUST determine your own concrete and valid opinion based on the given information. Do NOT deter to general and meaningless conclusions. -Write all used source urls at the end of the report, and make sure to not add duplicated sources, but only one reference for each. -You must write the report in apa format. -Please do your best, this is very important to my career.""" # noqa: E501 - - -RESOURCE_REPORT_TEMPLATE = """Information: --------- -{research_summary} --------- - -Based on the above information, generate a bibliography recommendation report for the following question or topic: "{question}". \ -The report should provide a detailed analysis of each recommended resource, explaining how each source can contribute to finding answers to the research question. \ -Focus on the relevance, reliability, and significance of each source. \ -Ensure that the report is well-structured, informative, in-depth, and follows Markdown syntax. \ -Include relevant facts, figures, and numbers whenever available. \ -The report should have a minimum length of 1,200 words. - -Please do your best, this is very important to my career.""" # noqa: E501 - -OUTLINE_REPORT_TEMPLATE = """Information: --------- -{research_summary} --------- - -Using the above information, generate an outline for a research report in Markdown syntax for the following question or topic: "{question}". \ -The outline should provide a well-structured framework for the research report, including the main sections, subsections, and key points to be covered. \ -The research report should be detailed, informative, in-depth, and a minimum of 1,200 words. \ -Use appropriate Markdown syntax to format the outline and ensure readability. - -Please do your best, this is very important to my career.""" # noqa: E501 - -model = ChatOpenAI(temperature=0) -prompt = ChatPromptTemplate.from_messages( - [ - ("system", WRITER_SYSTEM_PROMPT), - ("user", RESEARCH_REPORT_TEMPLATE), - ] -).configurable_alternatives( - ConfigurableField("report_type"), - default_key="research_report", - resource_report=ChatPromptTemplate.from_messages( - [ - ("system", WRITER_SYSTEM_PROMPT), - ("user", RESOURCE_REPORT_TEMPLATE), - ] - ), - outline_report=ChatPromptTemplate.from_messages( - [ - ("system", WRITER_SYSTEM_PROMPT), - ("user", OUTLINE_REPORT_TEMPLATE), - ] - ), -) -chain = prompt | model | StrOutputParser() diff --git a/templates/research-assistant/tests/__init__.py b/templates/research-assistant/tests/__init__.py deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/templates/retrieval-agent-fireworks/.gitignore b/templates/retrieval-agent-fireworks/.gitignore deleted file mode 100644 index bee8a64b79a..00000000000 --- a/templates/retrieval-agent-fireworks/.gitignore +++ /dev/null @@ -1 +0,0 @@ -__pycache__ diff --git a/templates/retrieval-agent-fireworks/LICENSE b/templates/retrieval-agent-fireworks/LICENSE deleted file mode 100644 index fc0602feecd..00000000000 --- a/templates/retrieval-agent-fireworks/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -MIT License - -Copyright (c) 2024 LangChain, Inc. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/templates/retrieval-agent-fireworks/README.md b/templates/retrieval-agent-fireworks/README.md deleted file mode 100644 index e2e39520d66..00000000000 --- a/templates/retrieval-agent-fireworks/README.md +++ /dev/null @@ -1,73 +0,0 @@ -# Retrieval agent - Fireworks, Hugging Face - -This package uses open source models hosted on `Fireworks AI` to do retrieval using an agent architecture. By default, this does retrieval over `Arxiv`. - -We will use `Mixtral8x7b-instruct-v0.1`, which is shown in this blog to yield reasonable -results with function calling even though it is not fine-tuned for this task: https://huggingface.co/blog/open-source-llms-as-agents - - -## Environment Setup - -There are various great ways to run OSS models. We will use FireworksAI as an easy way to run the models. See [here](https://python.langchain.com/docs/integrations/providers/fireworks) for more information. - -Set the `FIREWORKS_API_KEY` environment variable to access Fireworks. - - -## Usage - -To use this package, you should first have the LangChain CLI installed: - -```shell -pip install -U langchain-cli -``` - -To create a new LangChain project and install this as the only package, you can do: - -```shell -langchain app new my-app --package retrieval-agent-fireworks -``` - -If you want to add this to an existing project, you can just run: - -```shell -langchain app add retrieval-agent-fireworks -``` - -And add the following code to your `server.py` file: -```python -from retrieval_agent_fireworks import chain as retrieval_agent_fireworks_chain - -add_routes(app, retrieval_agent_fireworks_chain, path="/retrieval-agent-fireworks") -``` - -(Optional) Let's now configure LangSmith. -LangSmith will help us trace, monitor and debug LangChain applications. -You can sign up for LangSmith [here](https://smith.langchain.com/). -If you don't have access, you can skip this section - - -```shell -export LANGCHAIN_TRACING_V2=true -export LANGCHAIN_API_KEY= -export LANGCHAIN_PROJECT= # if not specified, defaults to "default" -``` - -If you are inside this directory, then you can spin up a LangServe instance directly by: - -```shell -langchain serve -``` - -This will start the FastAPI app with a server is running locally at -[http://localhost:8000](http://localhost:8000) - -We can see all templates at [http://127.0.0.1:8000/docs](http://127.0.0.1:8000/docs) -We can access the playground at [http://127.0.0.1:8000/retrieval-agent-fireworks/playground](http://127.0.0.1:8000/retrieval-agent-fireworks/playground) - -We can access the template from code with: - -```python -from langserve.client import RemoteRunnable - -runnable = RemoteRunnable("http://localhost:8000/retrieval-agent-fireworks") -``` \ No newline at end of file diff --git a/templates/retrieval-agent-fireworks/pyproject.toml b/templates/retrieval-agent-fireworks/pyproject.toml deleted file mode 100644 index d5c0ce4cfd9..00000000000 --- a/templates/retrieval-agent-fireworks/pyproject.toml +++ /dev/null @@ -1,34 +0,0 @@ -[tool.poetry] -name = "retrieval-agent-fireworks" -version = "0.0.1" -description = "Retrieval agent for open source models hosted on Fireworks" -authors = [] -readme = "README.md" - -[tool.poetry.dependencies] -python = ">=3.8.1,<4.0" -langchain = "^0.1" -arxiv = "^2.0.0" -langchain-community = ">=0.0.17,<0.2" -langchainhub = "^0.1.14" -fireworks-ai = "^0.11.2" - - -[tool.poetry.group.dev.dependencies] -langchain-cli = ">=0.0.21" -fastapi = ">=0.104.0,<1" -sse-starlette = "^1.6.5" - -[tool.langserve] -export_module = "retrieval_agent_fireworks" -export_attr = "agent_executor" - -[tool.templates-hub] -use-case = "research" -author = "Docugami" -integrations = ["HuggingFace"] -tags = ["local", "agents"] - -[build-system] -requires = ["poetry-core"] -build-backend = "poetry.core.masonry.api" diff --git a/templates/retrieval-agent-fireworks/retrieval_agent_fireworks/__init__.py b/templates/retrieval-agent-fireworks/retrieval_agent_fireworks/__init__.py deleted file mode 100644 index fdf3d35c08b..00000000000 --- a/templates/retrieval-agent-fireworks/retrieval_agent_fireworks/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from retrieval_agent_fireworks.chain import agent_executor - -__all__ = ["agent_executor"] diff --git a/templates/retrieval-agent-fireworks/retrieval_agent_fireworks/chain.py b/templates/retrieval-agent-fireworks/retrieval_agent_fireworks/chain.py deleted file mode 100644 index 8398515f668..00000000000 --- a/templates/retrieval-agent-fireworks/retrieval_agent_fireworks/chain.py +++ /dev/null @@ -1,110 +0,0 @@ -from typing import List - -from langchain import hub -from langchain.agents import AgentExecutor -from langchain.agents.format_scratchpad import format_log_to_str -from langchain.agents.output_parsers import ReActJsonSingleInputOutputParser -from langchain.callbacks.manager import CallbackManagerForRetrieverRun -from langchain_community.chat_models.fireworks import ChatFireworks -from langchain_community.utilities.arxiv import ArxivAPIWrapper -from langchain_core.documents import Document -from langchain_core.pydantic_v1 import BaseModel -from langchain_core.retrievers import BaseRetriever -from langchain_core.tools.render import render_text_description -from langchain_core.tools.retriever import create_retriever_tool - -MODEL_ID = "accounts/fireworks/models/mixtral-8x7b-instruct" - - -class ArxivRetriever(BaseRetriever, ArxivAPIWrapper): - """`Arxiv` retriever. - - It wraps load() to get_relevant_documents(). - It uses all ArxivAPIWrapper arguments without any change. - """ - - get_full_documents: bool = False - - def _get_relevant_documents( - self, query: str, *, run_manager: CallbackManagerForRetrieverRun - ) -> List[Document]: - try: - if self.is_arxiv_identifier(query): - results = self.arxiv_search( - id_list=query.split(), - max_results=self.top_k_results, - ).results() - else: - results = self.arxiv_search( # type: ignore - query[: self.ARXIV_MAX_QUERY_LENGTH], max_results=self.top_k_results - ).results() - except self.arxiv_exceptions as ex: - return [Document(page_content=f"Arxiv exception: {ex}")] - docs = [ - Document( - page_content=result.summary, - metadata={ - "Published": result.updated.date(), - "Title": result.title, - "Authors": ", ".join(a.name for a in result.authors), - }, - ) - for result in results - ] - return docs - - -# Set up tool(s) -description = ( - "A wrapper around Arxiv.org " - "Useful for when you need to answer questions about Physics, Mathematics, " - "Computer Science, Quantitative Biology, Quantitative Finance, Statistics, " - "Electrical Engineering, and Economics " - "from scientific articles on arxiv.org. " - "Input should be a search query." -) -arxiv_tool = create_retriever_tool(ArxivRetriever(), "arxiv", description) -tools = [arxiv_tool] - -# Set up LLM -llm = ChatFireworks( - model=MODEL_ID, - model_kwargs={ - "temperature": 0, - "max_tokens": 2048, - "top_p": 1, - }, - cache=True, -) - -# setup ReAct style prompt -prompt = hub.pull("hwchase17/react-json") -prompt = prompt.partial( - tools=render_text_description(tools), - tool_names=", ".join([t.name for t in tools]), -) - -# define the agent -model_with_stop = llm.bind(stop=["\nObservation"]) -agent = ( - { - "input": lambda x: x["input"], - "agent_scratchpad": lambda x: format_log_to_str(x["intermediate_steps"]), - } - | prompt - | model_with_stop - | ReActJsonSingleInputOutputParser() -) - - -class InputType(BaseModel): - input: str - - -# instantiate AgentExecutor -agent_executor = AgentExecutor( - agent=agent, - tools=tools, - verbose=True, - handle_parsing_errors=True, -).with_types(input_type=InputType) diff --git a/templates/retrieval-agent-fireworks/tests/__init__.py b/templates/retrieval-agent-fireworks/tests/__init__.py deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/templates/retrieval-agent/.gitignore b/templates/retrieval-agent/.gitignore deleted file mode 100644 index bee8a64b79a..00000000000 --- a/templates/retrieval-agent/.gitignore +++ /dev/null @@ -1 +0,0 @@ -__pycache__ diff --git a/templates/retrieval-agent/LICENSE b/templates/retrieval-agent/LICENSE deleted file mode 100644 index 426b6509034..00000000000 --- a/templates/retrieval-agent/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -MIT License - -Copyright (c) 2023 LangChain, Inc. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/templates/retrieval-agent/README.md b/templates/retrieval-agent/README.md deleted file mode 100644 index 45486693649..00000000000 --- a/templates/retrieval-agent/README.md +++ /dev/null @@ -1,73 +0,0 @@ -# Retrieval agent - -This package uses `Azure OpenAI` to do retrieval using an agent architecture. -By default, this does retrieval over `Arxiv`. - -## Environment Setup - -Since we are using Azure OpenAI, we will need to set the following environment variables: - -```shell -export AZURE_OPENAI_ENDPOINT=... -export AZURE_OPENAI_API_VERSION=... -export AZURE_OPENAI_API_KEY=... -``` - -## Usage - -To use this package, you should first have the LangChain CLI installed: - -```shell -pip install -U langchain-cli -``` - -To create a new LangChain project and install this as the only package, you can do: - -```shell -langchain app new my-app --package retrieval-agent -``` - -If you want to add this to an existing project, you can just run: - -```shell -langchain app add retrieval-agent -``` - -And add the following code to your `server.py` file: -```python -from retrieval_agent import chain as retrieval_agent_chain - -add_routes(app, retrieval_agent_chain, path="/retrieval-agent") -``` - -(Optional) Let's now configure LangSmith. -LangSmith will help us trace, monitor and debug LangChain applications. -You can sign up for LangSmith [here](https://smith.langchain.com/). -If you don't have access, you can skip this section - - -```shell -export LANGCHAIN_TRACING_V2=true -export LANGCHAIN_API_KEY= -export LANGCHAIN_PROJECT= # if not specified, defaults to "default" -``` - -If you are inside this directory, then you can spin up a LangServe instance directly by: - -```shell -langchain serve -``` - -This will start the FastAPI app with a server is running locally at -[http://localhost:8000](http://localhost:8000) - -We can see all templates at [http://127.0.0.1:8000/docs](http://127.0.0.1:8000/docs) -We can access the playground at [http://127.0.0.1:8000/retrieval-agent/playground](http://127.0.0.1:8000/retrieval-agent/playground) - -We can access the template from code with: - -```python -from langserve.client import RemoteRunnable - -runnable = RemoteRunnable("http://localhost:8000/retrieval-agent") -``` \ No newline at end of file diff --git a/templates/retrieval-agent/pyproject.toml b/templates/retrieval-agent/pyproject.toml deleted file mode 100644 index e4c6c42e2d8..00000000000 --- a/templates/retrieval-agent/pyproject.toml +++ /dev/null @@ -1,32 +0,0 @@ -[tool.poetry] -name = "retrieval-agent" -version = "0.0.1" -description = "Retrieval agent for Azure OpenAI" -authors = [] -readme = "README.md" - -[tool.poetry.dependencies] -python = ">=3.8.1,<4.0" -langchain = "^0.1" -openai = "<2" -arxiv = "^2.0.0" -langchain-openai = "^0.0.2.post1" - -[tool.poetry.group.dev.dependencies] -langchain-cli = ">=0.0.21" -fastapi = ">=0.104.0,<1" -sse-starlette = "^1.6.5" - -[tool.langserve] -export_module = "retrieval_agent" -export_attr = "agent_executor" - -[tool.templates-hub] -use-case = "research" -author = "LangChain" -integrations = ["OpenAI", "Azure"] -tags = ["data", "agents"] - -[build-system] -requires = ["poetry-core"] -build-backend = "poetry.core.masonry.api" diff --git a/templates/retrieval-agent/retrieval_agent/__init__.py b/templates/retrieval-agent/retrieval_agent/__init__.py deleted file mode 100644 index 543e3fd8556..00000000000 --- a/templates/retrieval-agent/retrieval_agent/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from retrieval_agent.chain import agent_executor - -__all__ = ["agent_executor"] diff --git a/templates/retrieval-agent/retrieval_agent/chain.py b/templates/retrieval-agent/retrieval_agent/chain.py deleted file mode 100644 index e360f4ba2ad..00000000000 --- a/templates/retrieval-agent/retrieval_agent/chain.py +++ /dev/null @@ -1,120 +0,0 @@ -import os -from typing import List, Tuple - -from langchain.agents import AgentExecutor -from langchain.agents.format_scratchpad import format_to_openai_function_messages -from langchain.agents.output_parsers import OpenAIFunctionsAgentOutputParser -from langchain.callbacks.manager import CallbackManagerForRetrieverRun -from langchain_community.tools.convert_to_openai import format_tool_to_openai_function -from langchain_community.utilities.arxiv import ArxivAPIWrapper -from langchain_core.documents import Document -from langchain_core.messages import AIMessage, HumanMessage -from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder -from langchain_core.pydantic_v1 import BaseModel, Field -from langchain_core.retrievers import BaseRetriever -from langchain_core.tools.retriever import create_retriever_tool -from langchain_openai import AzureChatOpenAI - - -class ArxivRetriever(BaseRetriever, ArxivAPIWrapper): - """`Arxiv` retriever. - - It wraps load() to get_relevant_documents(). - It uses all ArxivAPIWrapper arguments without any change. - """ - - get_full_documents: bool = False - - def _get_relevant_documents( - self, query: str, *, run_manager: CallbackManagerForRetrieverRun - ) -> List[Document]: - try: - if self.is_arxiv_identifier(query): - results = self.arxiv_search( - id_list=query.split(), - max_results=self.top_k_results, - ).results() - else: - results = self.arxiv_search( # type: ignore - query[: self.ARXIV_MAX_QUERY_LENGTH], max_results=self.top_k_results - ).results() - except self.arxiv_exceptions as ex: - return [Document(page_content=f"Arxiv exception: {ex}")] - docs = [ - Document( - page_content=result.summary, - metadata={ - "Published": result.updated.date(), - "Title": result.title, - "Authors": ", ".join(a.name for a in result.authors), - }, - ) - for result in results - ] - return docs - - -description = ( - "A wrapper around Arxiv.org " - "Useful for when you need to answer questions about Physics, Mathematics, " - "Computer Science, Quantitative Biology, Quantitative Finance, Statistics, " - "Electrical Engineering, and Economics " - "from scientific articles on arxiv.org. " - "Input should be a search query." -) - -# Create the tool -arxiv_tool = create_retriever_tool(ArxivRetriever(), "arxiv", description) -tools = [arxiv_tool] -llm = AzureChatOpenAI( - temperature=0, - azure_endpoint=os.getenv("AZURE_OPENAI_ENDPOINT"), - api_key=os.getenv("AZURE_OPENAI_API_KEY"), - api_version=os.getenv("AZURE_OPENAI_API_VERSION"), -) -assistant_system_message = """You are a helpful research assistant. \ -Lookup relevant information as needed.""" -prompt = ChatPromptTemplate.from_messages( - [ - ("system", assistant_system_message), - MessagesPlaceholder(variable_name="chat_history"), - ("user", "{input}"), - MessagesPlaceholder(variable_name="agent_scratchpad"), - ] -) - -llm_with_tools = llm.bind(functions=[format_tool_to_openai_function(t) for t in tools]) - - -def _format_chat_history(chat_history: List[Tuple[str, str]]): - buffer = [] - for human, ai in chat_history: - buffer.append(HumanMessage(content=human)) - buffer.append(AIMessage(content=ai)) - return buffer - - -agent = ( - { - "input": lambda x: x["input"], - "chat_history": lambda x: _format_chat_history(x["chat_history"]), - "agent_scratchpad": lambda x: format_to_openai_function_messages( - x["intermediate_steps"] - ), - } - | prompt - | llm_with_tools - | OpenAIFunctionsAgentOutputParser() -) - - -class AgentInput(BaseModel): - input: str - chat_history: List[Tuple[str, str]] = Field( - ..., extra={"widget": {"type": "chat", "input": "input", "output": "output"}} - ) - - -agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True).with_types( - input_type=AgentInput -) diff --git a/templates/retrieval-agent/tests/__init__.py b/templates/retrieval-agent/tests/__init__.py deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/templates/rewrite-retrieve-read/README.md b/templates/rewrite-retrieve-read/README.md deleted file mode 100644 index b1fe704be87..00000000000 --- a/templates/rewrite-retrieve-read/README.md +++ /dev/null @@ -1,66 +0,0 @@ -# Rewrite-Retrieve-Read - -This template implements a method for query transformation (re-writing) -in the paper [Query Rewriting for Retrieval-Augmented Large Language Models](https://arxiv.org/pdf/2305.14283.pdf) to optimize for RAG. - -## Environment Setup - -Set the `OPENAI_API_KEY` environment variable to access the OpenAI models. - -## Usage - -To use this package, you should first have the LangChain CLI installed: - -```shell -pip install -U langchain-cli -``` - -To create a new LangChain project and install this as the only package, you can do: - -```shell -langchain app new my-app --package rewrite_retrieve_read -``` - -If you want to add this to an existing project, you can just run: - -```shell -langchain app add rewrite_retrieve_read -``` - -And add the following code to your `server.py` file: -```python -from rewrite_retrieve_read.chain import chain as rewrite_retrieve_read_chain - -add_routes(app, rewrite_retrieve_read_chain, path="/rewrite-retrieve-read") -``` - -(Optional) Let's now configure LangSmith. -LangSmith will help us trace, monitor and debug LangChain applications. -You can sign up for LangSmith [here](https://smith.langchain.com/). -If you don't have access, you can skip this section - -```shell -export LANGCHAIN_TRACING_V2=true -export LANGCHAIN_API_KEY= -export LANGCHAIN_PROJECT= # if not specified, defaults to "default" -``` - -If you are inside this directory, then you can spin up a LangServe instance directly by: - -```shell -langchain serve -``` - -This will start the FastAPI app with a server is running locally at -[http://localhost:8000](http://localhost:8000) - -We can see all templates at [http://127.0.0.1:8000/docs](http://127.0.0.1:8000/docs) -We can access the playground at [http://127.0.0.1:8000/rewrite_retrieve_read/playground](http://127.0.0.1:8000/rewrite_retrieve_read/playground) - -We can access the template from code with: - -```python -from langserve.client import RemoteRunnable - -runnable = RemoteRunnable("http://localhost:8000/rewrite_retrieve_read") -``` diff --git a/templates/rewrite-retrieve-read/main.py b/templates/rewrite-retrieve-read/main.py deleted file mode 100644 index deedf470341..00000000000 --- a/templates/rewrite-retrieve-read/main.py +++ /dev/null @@ -1,4 +0,0 @@ -from rewrite_retrieve_read.chain import chain - -if __name__ == "__main__": - chain.invoke("man that sam bankman fried trial was crazy! what is langchain?") diff --git a/templates/rewrite-retrieve-read/pyproject.toml b/templates/rewrite-retrieve-read/pyproject.toml deleted file mode 100644 index ed8e9554dca..00000000000 --- a/templates/rewrite-retrieve-read/pyproject.toml +++ /dev/null @@ -1,31 +0,0 @@ -[tool.poetry] -name = "rewrite-retrieve-read" -version = "0.0.1" -description = "Query transformation using the rewrite-retrieve-read to improve retrieval" -authors = [] -readme = "README.md" - -[tool.poetry.dependencies] -python = ">=3.8.1,<4.0" -langchain = "^0.1" -duckduckgo-search = "^3.9.3" -openai = "<2" - -[tool.poetry.group.dev.dependencies] -langchain-cli = ">=0.0.21" - -[tool.langserve] -export_module = "rewrite_retrieve_read.chain" -export_attr = "chain" - -[tool.templates-hub] -use-case = "rag" -author = "LangChain" -integrations = ["OpenAI"] -tags = ["paper", "prompt-hub"] - -[build-system] -requires = [ - "poetry-core", -] -build-backend = "poetry.core.masonry.api" diff --git a/templates/rewrite-retrieve-read/rewrite_retrieve_read/__init__.py b/templates/rewrite-retrieve-read/rewrite_retrieve_read/__init__.py deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/templates/rewrite-retrieve-read/rewrite_retrieve_read/chain.py b/templates/rewrite-retrieve-read/rewrite_retrieve_read/chain.py deleted file mode 100644 index adcca37a3e6..00000000000 --- a/templates/rewrite-retrieve-read/rewrite_retrieve_read/chain.py +++ /dev/null @@ -1,59 +0,0 @@ -from langchain_community.chat_models import ChatOpenAI -from langchain_community.utilities import DuckDuckGoSearchAPIWrapper -from langchain_core.output_parsers import StrOutputParser -from langchain_core.prompts import ChatPromptTemplate -from langchain_core.pydantic_v1 import BaseModel -from langchain_core.runnables import RunnablePassthrough - -template = """Answer the users question based only on the following context: - - -{context} - - -Question: {question} -""" -prompt = ChatPromptTemplate.from_template(template) - -model = ChatOpenAI(temperature=0) - -search = DuckDuckGoSearchAPIWrapper() - - -def retriever(query): - return search.run(query) - - -template = """Provide a better search query for \ -web search engine to answer the given question, end \ -the queries with ’**’. Question: \ -{x} Answer:""" -rewrite_prompt = ChatPromptTemplate.from_template(template) - -# Parser to remove the `**` - - -def _parse(text): - return text.strip("**") - - -rewriter = rewrite_prompt | ChatOpenAI(temperature=0) | StrOutputParser() | _parse - -chain = ( - { - "context": {"x": RunnablePassthrough()} | rewriter | retriever, - "question": RunnablePassthrough(), - } - | prompt - | model - | StrOutputParser() -) - -# Add input type for playground - - -class Question(BaseModel): - __root__: str - - -chain = chain.with_types(input_type=Question) diff --git a/templates/rewrite-retrieve-read/tests/__init__.py b/templates/rewrite-retrieve-read/tests/__init__.py deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/templates/robocorp-action-server/.gitignore b/templates/robocorp-action-server/.gitignore deleted file mode 100644 index bee8a64b79a..00000000000 --- a/templates/robocorp-action-server/.gitignore +++ /dev/null @@ -1 +0,0 @@ -__pycache__ diff --git a/templates/robocorp-action-server/LICENSE b/templates/robocorp-action-server/LICENSE deleted file mode 100644 index 426b6509034..00000000000 --- a/templates/robocorp-action-server/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -MIT License - -Copyright (c) 2023 LangChain, Inc. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/templates/robocorp-action-server/README.md b/templates/robocorp-action-server/README.md deleted file mode 100644 index ce045d2522a..00000000000 --- a/templates/robocorp-action-server/README.md +++ /dev/null @@ -1,81 +0,0 @@ -# Robocorp Action Server - agent - -This template enables using [Robocorp Action Server](https://github.com/robocorp/robocorp) served actions as tools for an Agent. - -## Usage - -To use this package, you should first have the LangChain CLI installed: - -```shell -pip install -U langchain-cli -``` - -To create a new LangChain project and install this as the only package, you can do: - -```shell -langchain app new my-app --package robocorp-action-server -``` - -If you want to add this to an existing project, you can just run: - -```shell -langchain app add robocorp-action-server -``` - -And add the following code to your `server.py` file: - -```python -from robocorp_action_server import agent_executor as action_server_chain - -add_routes(app, action_server_chain, path="/robocorp-action-server") -``` - -### Running the Action Server - -To run the Action Server, you need to have the Robocorp Action Server installed - -```bash -pip install -U robocorp-action-server -``` - -Then you can run the Action Server with: - -```bash -action-server new -cd ./your-project-name -action-server start -``` - -### Configure LangSmith (Optional) - -LangSmith will help us trace, monitor and debug LangChain applications. -You can sign up for LangSmith [here](https://smith.langchain.com/). -If you don't have access, you can skip this section - -```shell -export LANGCHAIN_TRACING_V2=true -export LANGCHAIN_API_KEY= -export LANGCHAIN_PROJECT= # if not specified, defaults to "default" -``` - -### Start LangServe instance - -If you are inside this directory, then you can spin up a LangServe instance directly by: - -```shell -langchain serve -``` - -This will start the FastAPI app with a server is running locally at -[http://localhost:8000](http://localhost:8000) - -We can see all templates at [http://127.0.0.1:8000/docs](http://127.0.0.1:8000/docs) -We can access the playground at [http://127.0.0.1:8000/robocorp-action-server/playground](http://127.0.0.1:8000/robocorp-action-server/playground) - -We can access the template from code with: - -```python -from langserve.client import RemoteRunnable - -runnable = RemoteRunnable("http://localhost:8000/robocorp-action-server") -``` diff --git a/templates/robocorp-action-server/pyproject.toml b/templates/robocorp-action-server/pyproject.toml deleted file mode 100644 index 02407720065..00000000000 --- a/templates/robocorp-action-server/pyproject.toml +++ /dev/null @@ -1,25 +0,0 @@ -[tool.poetry] -name = "robocorp-action-server" -version = "0.0.1" -description = "" -authors = ["Robocorp Technologies "] -readme = "README.md" - -[tool.poetry.dependencies] -python = ">=3.8.1,<4.0" -langchain = "^0.1" -langchain-openai = ">=0.0.2,<0.2" -langchain-robocorp = ">=0.0.3,<0.2" - -[tool.poetry.group.dev.dependencies] -langchain-cli = ">=0.0.21" -fastapi = ">=0.104.0,<1" -sse-starlette = "^1.6.5" - -[tool.langserve] -export_module = "robocorp_action_server" -export_attr = "agent_executor" - -[build-system] -requires = ["poetry-core"] -build-backend = "poetry.core.masonry.api" diff --git a/templates/robocorp-action-server/robocorp_action_server/__init__.py b/templates/robocorp-action-server/robocorp_action_server/__init__.py deleted file mode 100644 index a2d9f02d626..00000000000 --- a/templates/robocorp-action-server/robocorp_action_server/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from robocorp_action_server.agent import agent_executor - -__all__ = ["agent_executor"] diff --git a/templates/robocorp-action-server/robocorp_action_server/agent.py b/templates/robocorp-action-server/robocorp_action_server/agent.py deleted file mode 100644 index fd6f04fde68..00000000000 --- a/templates/robocorp-action-server/robocorp_action_server/agent.py +++ /dev/null @@ -1,36 +0,0 @@ -from langchain.agents import AgentExecutor, OpenAIFunctionsAgent -from langchain_core.messages import SystemMessage -from langchain_core.pydantic_v1 import BaseModel -from langchain_openai import ChatOpenAI -from langchain_robocorp import ActionServerToolkit - -# Initialize LLM chat model -llm = ChatOpenAI(model="gpt-4", temperature=0) - -# Initialize Action Server Toolkit -toolkit = ActionServerToolkit(url="http://localhost:8080") -tools = toolkit.get_tools() - -# Initialize Agent -system_message = SystemMessage(content="You are a helpful assistant") -prompt = OpenAIFunctionsAgent.create_prompt(system_message) -agent = OpenAIFunctionsAgent( - llm=llm, - prompt=prompt, - tools=tools, -) - -# Initialize Agent executor -agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True) - - -# Typings for Langserve playground -class Input(BaseModel): - input: str - - -class Output(BaseModel): - output: str - - -agent_executor = agent_executor.with_types(input_type=Input, output_type=Output) # type: ignore[arg-type, assignment] diff --git a/templates/robocorp-action-server/tests/__init__.py b/templates/robocorp-action-server/tests/__init__.py deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/templates/self-query-qdrant/.gitignore b/templates/self-query-qdrant/.gitignore deleted file mode 100644 index 4f29079507a..00000000000 --- a/templates/self-query-qdrant/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -.idea -tests diff --git a/templates/self-query-qdrant/README.md b/templates/self-query-qdrant/README.md deleted file mode 100644 index fb67fd88b46..00000000000 --- a/templates/self-query-qdrant/README.md +++ /dev/null @@ -1,162 +0,0 @@ -# Self-query - Qdrant - -This template performs [self-querying](https://python.langchain.com/docs/modules/data_connection/retrievers/self_query/) -``using `Qdrant` and OpenAI. By default, it uses an artificial dataset of 10 documents, but you can replace it with your own dataset. -`` -## Environment Setup - -Set the `OPENAI_API_KEY` environment variable to access the OpenAI models. - -Set the `QDRANT_URL` to the URL of your Qdrant instance. If you use [Qdrant Cloud](https://cloud.qdrant.io) -you have to set the `QDRANT_API_KEY` environment variable as well. If you do not set any of them, -the template will try to connect a local Qdrant instance at `http://localhost:6333`. - -```shell -export QDRANT_URL= -export QDRANT_API_KEY= - -export OPENAI_API_KEY= -``` - -## Usage - -To use this package, install the LangChain CLI first: - -```shell -pip install -U "langchain-cli[serve]" -``` - -Create a new LangChain project and install this package as the only one: - -```shell -langchain app new my-app --package self-query-qdrant -``` - -To add this to an existing project, run: - -```shell -langchain app add self-query-qdrant -``` - -### Defaults - -Before you launch the server, you need to create a Qdrant collection and index the documents. -It can be done by running the following command: - -```python -from self_query_qdrant.chain import initialize - -initialize() -``` - -Add the following code to your `app/server.py` file: - -```python -from self_query_qdrant.chain import chain - -add_routes(app, chain, path="/self-query-qdrant") -``` - -The default dataset consists 10 documents about dishes, along with their price and restaurant information. -You can find the documents in the `packages/self-query-qdrant/self_query_qdrant/defaults.py` file. -Here is one of the documents: - -```python -from langchain_core.documents import Document - -Document( - page_content="Spaghetti with meatballs and tomato sauce", - metadata={ - "price": 12.99, - "restaurant": { - "name": "Olive Garden", - "location": ["New York", "Chicago", "Los Angeles"], - }, - }, -) -``` - -The self-querying allows performing semantic search over the documents, with some additional filtering -based on the metadata. For example, you can search for the dishes that cost less than $15 and are served in New York. - -### Customization - -All the examples above assume that you want to launch the template with just the defaults. -If you want to customize the template, you can do it by passing the parameters to the `create_chain` function -in the `app/server.py` file: - -```python -from langchain_community.llms import Cohere -from langchain_community.embeddings import HuggingFaceEmbeddings -from langchain.chains.query_constructor.schema import AttributeInfo - -from self_query_qdrant.chain import create_chain - -model_name = "sentence-transformers/all-mpnet-base-v2" -chain = create_chain( - llm=Cohere(), - embeddings=HuggingFaceEmbeddings(model_name=model_name), - document_contents="Descriptions of cats, along with their names and breeds.", - metadata_field_info=[ - AttributeInfo(name="name", description="Name of the cat", type="string"), - AttributeInfo(name="breed", description="Cat's breed", type="string"), - ], - collection_name="cats", -) -``` - -The same goes for the `initialize` function that creates a Qdrant collection and indexes the documents: - -```python -from langchain_core.documents import Document -from langchain_community.embeddings import HuggingFaceEmbeddings - -from self_query_qdrant.chain import initialize - -model_name = "sentence-transformers/all-mpnet-base-v2" -initialize( - embeddings=HuggingFaceEmbeddings(model_name=model_name), - collection_name="cats", - documents=[ - Document( - page_content="A mean lazy old cat who destroys furniture and eats lasagna", - metadata={"name": "Garfield", "breed": "Tabby"}, - ), - ... - ] -) -``` - -The template is flexible and might be used for different sets of documents easily. - -### LangSmith - -(Optional) If you have access to LangSmith, configure it to help trace, monitor and debug LangChain applications. If you don't have access, skip this section. - -```shell -export LANGCHAIN_TRACING_V2=true -export LANGCHAIN_API_KEY= -export LANGCHAIN_PROJECT= # if not specified, defaults to "default" -``` - -If you are inside this directory, then you can spin up a LangServe instance directly by: - -```shell -langchain serve -``` - -### Local Server - -This will start the FastAPI app with a server running locally at -[http://localhost:8000](http://localhost:8000) - -You can see all templates at [http://127.0.0.1:8000/docs](http://127.0.0.1:8000/docs) -Access the playground at [http://127.0.0.1:8000/self-query-qdrant/playground](http://127.0.0.1:8000/self-query-qdrant/playground) - -Access the template from code with: - -```python -from langserve.client import RemoteRunnable - -runnable = RemoteRunnable("http://localhost:8000/self-query-qdrant") -``` diff --git a/templates/self-query-qdrant/pyproject.toml b/templates/self-query-qdrant/pyproject.toml deleted file mode 100644 index 69850e30ae1..00000000000 --- a/templates/self-query-qdrant/pyproject.toml +++ /dev/null @@ -1,38 +0,0 @@ -[tool.poetry] -name = "self-query-qdrant" -version = "0.1.0" -description = "Self-querying retriever using Qdrant" -authors = ["Kacper Łukawski "] -license = "Apache 2.0" -readme = "README.md" -packages = [{include = "self_query_qdrant"}] - -[tool.poetry.dependencies] -python = ">=3.9,<3.13" -langchain = "^0.1" -qdrant-client = ">=1.6" -lark = "^1.1.8" -tiktoken = "^0.5.1" -langchain-openai = "^0.0.8" - -[tool.poetry.group.dev.dependencies] -langchain-cli = ">=0.0.21" -[tool.poetry.group.dev.dependencies.python-dotenv] -extras = [ - "cli", -] -version = "^1.0.0" - -[tool.langserve] -export_module = "self_query_qdrant" -export_attr = "chain" - -[tool.templates-hub] -use-case = "rag" -author = "Qdrant" -integrations = ["Qdrant", "OpenAI"] -tags = ["research", "agents"] - -[build-system] -requires = ["poetry-core"] -build-backend = "poetry.core.masonry.api" diff --git a/templates/self-query-qdrant/self_query_qdrant/__init__.py b/templates/self-query-qdrant/self_query_qdrant/__init__.py deleted file mode 100644 index 886cf90159b..00000000000 --- a/templates/self-query-qdrant/self_query_qdrant/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from self_query_qdrant.chain import chain - -__all__ = ["chain"] diff --git a/templates/self-query-qdrant/self_query_qdrant/chain.py b/templates/self-query-qdrant/self_query_qdrant/chain.py deleted file mode 100644 index 0e828177437..00000000000 --- a/templates/self-query-qdrant/self_query_qdrant/chain.py +++ /dev/null @@ -1,96 +0,0 @@ -import os -from typing import List, Optional - -from langchain.chains.query_constructor.schema import AttributeInfo -from langchain.retrievers import SelfQueryRetriever -from langchain_community.llms import BaseLLM -from langchain_community.vectorstores.qdrant import Qdrant -from langchain_core.documents import Document -from langchain_core.embeddings import Embeddings -from langchain_core.output_parsers.string import StrOutputParser -from langchain_core.pydantic_v1 import BaseModel -from langchain_core.runnables import RunnableParallel, RunnablePassthrough -from langchain_openai import OpenAI, OpenAIEmbeddings -from qdrant_client import QdrantClient - -from self_query_qdrant import defaults, helper, prompts - - -class Query(BaseModel): - __root__: str - - -def create_chain( - llm: Optional[BaseLLM] = None, - embeddings: Optional[Embeddings] = None, - document_contents: str = defaults.DEFAULT_DOCUMENT_CONTENTS, - metadata_field_info: List[AttributeInfo] = defaults.DEFAULT_METADATA_FIELD_INFO, - collection_name: str = defaults.DEFAULT_COLLECTION_NAME, -): - """ - Create a chain that can be used to query a Qdrant vector store with a self-querying - capability. By default, this chain will use the OpenAI LLM and OpenAIEmbeddings, and - work with the default document contents and metadata field info. You can override - these defaults by passing in your own values. - :param llm: an LLM to use for generating text - :param embeddings: an Embeddings to use for generating queries - :param document_contents: a description of the document set - :param metadata_field_info: list of metadata attributes - :param collection_name: name of the Qdrant collection to use - :return: - """ - llm = llm or OpenAI() - embeddings = embeddings or OpenAIEmbeddings() - - # Set up a vector store to store your vectors and metadata - client = QdrantClient( - url=os.environ.get("QDRANT_URL", "http://localhost:6333"), - api_key=os.environ.get("QDRANT_API_KEY"), - ) - vectorstore = Qdrant( - client=client, - collection_name=collection_name, - embeddings=embeddings, - ) - - # Set up a retriever to query your vector store with self-querying capabilities - retriever = SelfQueryRetriever.from_llm( - llm, vectorstore, document_contents, metadata_field_info, verbose=True - ) - - context = RunnableParallel( - context=retriever | helper.combine_documents, - query=RunnablePassthrough(), - ) - pipeline = context | prompts.LLM_CONTEXT_PROMPT | llm | StrOutputParser() - return pipeline.with_types(input_type=Query) - - -def initialize( - embeddings: Optional[Embeddings] = None, - collection_name: str = defaults.DEFAULT_COLLECTION_NAME, - documents: List[Document] = defaults.DEFAULT_DOCUMENTS, -): - """ - Initialize a vector store with a set of documents. By default, the documents will be - compatible with the default metadata field info. You can override these defaults by - passing in your own values. - :param embeddings: an Embeddings to use for generating queries - :param collection_name: name of the Qdrant collection to use - :param documents: a list of documents to initialize the vector store with - :return: - """ - embeddings = embeddings or OpenAIEmbeddings() - - # Set up a vector store to store your vectors and metadata - Qdrant.from_documents( - documents, - embedding=embeddings, - collection_name=collection_name, - url=os.environ.get("QDRANT_URL", "http://localhost:6333"), - api_key=os.environ.get("QDRANT_API_KEY"), - ) - - -# Create the default chain -chain = create_chain() diff --git a/templates/self-query-qdrant/self_query_qdrant/defaults.py b/templates/self-query-qdrant/self_query_qdrant/defaults.py deleted file mode 100644 index 84dfb98560f..00000000000 --- a/templates/self-query-qdrant/self_query_qdrant/defaults.py +++ /dev/null @@ -1,134 +0,0 @@ -from langchain.chains.query_constructor.schema import AttributeInfo -from langchain_core.documents import Document - -# Qdrant collection name -DEFAULT_COLLECTION_NAME = "restaurants" - -# Here is a description of the dataset and metadata attributes. Metadata attributes will -# be used to filter the results of the query beyond the semantic search. -DEFAULT_DOCUMENT_CONTENTS = ( - "Dishes served at different restaurants, along with the restaurant information" -) -DEFAULT_METADATA_FIELD_INFO = [ - AttributeInfo( - name="price", - description="The price of the dish", - type="float", - ), - AttributeInfo( - name="restaurant.name", - description="The name of the restaurant", - type="string", - ), - AttributeInfo( - name="restaurant.location", - description="Name of the city where the restaurant is located", - type="string or list[string]", - ), -] - -# A default set of documents to use for the vector store. This is a list of Document -# objects, which have a page_content field and a metadata field. The metadata field is a -# dictionary of metadata attributes compatible with the metadata field info above. -DEFAULT_DOCUMENTS = [ - Document( - page_content="Pepperoni pizza with extra cheese, crispy crust", - metadata={ - "price": 10.99, - "restaurant": { - "name": "Pizza Hut", - "location": ["New York", "Chicago"], - }, - }, - ), - Document( - page_content="Spaghetti with meatballs and tomato sauce", - metadata={ - "price": 12.99, - "restaurant": { - "name": "Olive Garden", - "location": ["New York", "Chicago", "Los Angeles"], - }, - }, - ), - Document( - page_content="Chicken tikka masala with naan", - metadata={ - "price": 14.99, - "restaurant": { - "name": "Indian Oven", - "location": ["New York", "Los Angeles"], - }, - }, - ), - Document( - page_content="Chicken teriyaki with rice", - metadata={ - "price": 11.99, - "restaurant": { - "name": "Sakura", - "location": ["New York", "Chicago", "Los Angeles"], - }, - }, - ), - Document( - page_content="Scabbard fish with banana and passion fruit sauce", - metadata={ - "price": 19.99, - "restaurant": { - "name": "A Concha", - "location": ["San Francisco"], - }, - }, - ), - Document( - page_content="Pielmieni with sour cream", - metadata={ - "price": 13.99, - "restaurant": { - "name": "Russian House", - "location": ["New York", "Chicago"], - }, - }, - ), - Document( - page_content="Chicken biryani with raita", - metadata={ - "price": 14.99, - "restaurant": { - "name": "Indian Oven", - "location": ["Los Angeles"], - }, - }, - ), - Document( - page_content="Tomato soup with croutons", - metadata={ - "price": 7.99, - "restaurant": { - "name": "Olive Garden", - "location": ["New York", "Chicago", "Los Angeles"], - }, - }, - ), - Document( - page_content="Vegan burger with sweet potato fries", - metadata={ - "price": 12.99, - "restaurant": { - "name": "Burger King", - "location": ["New York", "Los Angeles"], - }, - }, - ), - Document( - page_content="Chicken nuggets with french fries", - metadata={ - "price": 9.99, - "restaurant": { - "name": "McDonald's", - "location": ["San Francisco", "New York", "Los Angeles"], - }, - }, - ), -] diff --git a/templates/self-query-qdrant/self_query_qdrant/helper.py b/templates/self-query-qdrant/self_query_qdrant/helper.py deleted file mode 100644 index ab3156efb77..00000000000 --- a/templates/self-query-qdrant/self_query_qdrant/helper.py +++ /dev/null @@ -1,27 +0,0 @@ -from string import Formatter -from typing import List - -from langchain_core.documents import Document - -document_template = """ -PASSAGE: {page_content} -METADATA: {metadata} -""" - - -def combine_documents(documents: List[Document]) -> str: - """ - Combine a list of documents into a single string that might be passed further down - to a language model. - :param documents: list of documents to combine - :return: - """ - formatter = Formatter() - return "\n\n".join( - formatter.format( - document_template, - page_content=document.page_content, - metadata=document.metadata, - ) - for document in documents - ) diff --git a/templates/self-query-qdrant/self_query_qdrant/prompts.py b/templates/self-query-qdrant/self_query_qdrant/prompts.py deleted file mode 100644 index bb478eb8188..00000000000 --- a/templates/self-query-qdrant/self_query_qdrant/prompts.py +++ /dev/null @@ -1,16 +0,0 @@ -from langchain_core.prompts import PromptTemplate - -llm_context_prompt_template = """ -Answer the user query using provided passages. Each passage has metadata given as -a nested JSON object you can also use. When answering, cite source name of the passages -you are answering from below the answer in a unique bullet point list. - -If you don't know the answer, just say that you don't know, don't try to make up an answer. - ----- -{context} ----- -Query: {query} -""" # noqa: E501 - -LLM_CONTEXT_PROMPT = PromptTemplate.from_template(llm_context_prompt_template) diff --git a/templates/self-query-supabase/.gitignore b/templates/self-query-supabase/.gitignore deleted file mode 100644 index 4c49bd78f1d..00000000000 --- a/templates/self-query-supabase/.gitignore +++ /dev/null @@ -1 +0,0 @@ -.env diff --git a/templates/self-query-supabase/README.md b/templates/self-query-supabase/README.md deleted file mode 100644 index eaa83f43dad..00000000000 --- a/templates/self-query-supabase/README.md +++ /dev/null @@ -1,127 +0,0 @@ -# Self-query - Supabase - -This template allows natural language structured querying of `Supabase`. - -[Supabase](https://supabase.com/docs) is an open-source alternative to `Firebase`, built on top of [PostgreSQL](https://en.wikipedia.org/wiki/PostgreSQL). - -It uses [pgvector](https://github.com/pgvector/pgvector) to store embeddings within your tables. - -## Environment Setup - -Set the `OPENAI_API_KEY` environment variable to access the OpenAI models. - -To get your `OPENAI_API_KEY`, navigate to [API keys](https://platform.openai.com/account/api-keys) on your OpenAI account and create a new secret key. - -To find your `SUPABASE_URL` and `SUPABASE_SERVICE_KEY`, head to your Supabase project's [API settings](https://supabase.com/dashboard/project/_/settings/api). - -- `SUPABASE_URL` corresponds to the Project URL -- `SUPABASE_SERVICE_KEY` corresponds to the `service_role` API key - - -```shell -export SUPABASE_URL= -export SUPABASE_SERVICE_KEY= -export OPENAI_API_KEY= -``` - -## Setup Supabase Database - -Use these steps to setup your Supabase database if you haven't already. - -1. Head over to https://database.new to provision your Supabase database. -2. In the studio, jump to the [SQL editor](https://supabase.com/dashboard/project/_/sql/new) and run the following script to enable `pgvector` and setup your database as a vector store: - - ```sql - -- Enable the pgvector extension to work with embedding vectors - create extension if not exists vector; - - -- Create a table to store your documents - create table - documents ( - id uuid primary key, - content text, -- corresponds to Document.pageContent - metadata jsonb, -- corresponds to Document.metadata - embedding vector (1536) -- 1536 works for OpenAI embeddings, change as needed - ); - - -- Create a function to search for documents - create function match_documents ( - query_embedding vector (1536), - filter jsonb default '{}' - ) returns table ( - id uuid, - content text, - metadata jsonb, - similarity float - ) language plpgsql as $$ - #variable_conflict use_column - begin - return query - select - id, - content, - metadata, - 1 - (documents.embedding <=> query_embedding) as similarity - from documents - where metadata @> filter - order by documents.embedding <=> query_embedding; - end; - $$; - ``` - -## Usage - -To use this package, install the LangChain CLI first: - -```shell -pip install -U langchain-cli -``` - -Create a new LangChain project and install this package as the only one: - -```shell -langchain app new my-app --package self-query-supabase -``` - -To add this to an existing project, run: - -```shell -langchain app add self-query-supabase -``` - -Add the following code to your `server.py` file: -```python -from self_query_supabase.chain import chain as self_query_supabase_chain - -add_routes(app, self_query_supabase_chain, path="/self-query-supabase") -``` - -(Optional) If you have access to LangSmith, configure it to help trace, monitor and debug LangChain applications. If you don't have access, skip this section. - -```shell -export LANGCHAIN_TRACING_V2=true -export LANGCHAIN_API_KEY= -export LANGCHAIN_PROJECT= # if not specified, defaults to "default" -``` - -If you are inside this directory, then you can spin up a LangServe instance directly by: - -```shell -langchain serve -``` - -This will start the FastAPI app with a server running locally at -[http://localhost:8000](http://localhost:8000) - -You can see all templates at [http://127.0.0.1:8000/docs](http://127.0.0.1:8000/docs) -Access the playground at [http://127.0.0.1:8000/self-query-supabase/playground](http://127.0.0.1:8000/self-query-supabase/playground) - -Access the template from code with: - -```python -from langserve.client import RemoteRunnable - -runnable = RemoteRunnable("http://localhost:8000/self-query-supabase") -``` - -TODO: Instructions to set up the Supabase database and install the package. diff --git a/templates/self-query-supabase/pyproject.toml b/templates/self-query-supabase/pyproject.toml deleted file mode 100644 index c369c586e55..00000000000 --- a/templates/self-query-supabase/pyproject.toml +++ /dev/null @@ -1,40 +0,0 @@ -[tool.poetry] -name = "self-query-supabase" -version = "0.1.0" -description = "QA with Supabase using natural language" -authors = [ - "Greg Richardson ", -] -readme = "README.md" - -[tool.poetry.dependencies] -python = ">=3.8.1,<4.0" -langchain = "^0.1" -openai = "<2" -tiktoken = "^0.5.1" -supabase = "^1.2.0" -lark = "^1.1.8" - -[tool.poetry.group.dev.dependencies] -langchain-cli = ">=0.0.21" -[tool.poetry.group.dev.dependencies.python-dotenv] -extras = [ - "cli", -] -version = "^1.0.0" - -[tool.langserve] -export_module = "self_query_supabase.chain" -export_attr = "chain" - -[tool.templates-hub] -use-case = "rag" -author = "Supabase" -integrations = ["OpenAI", "Supabase"] -tags = ["vectordbs"] - -[build-system] -requires = [ - "poetry-core", -] -build-backend = "poetry.core.masonry.api" diff --git a/templates/self-query-supabase/self_query_supabase/__init__.py b/templates/self-query-supabase/self_query_supabase/__init__.py deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/templates/self-query-supabase/self_query_supabase/chain.py b/templates/self-query-supabase/self_query_supabase/chain.py deleted file mode 100644 index 15e668869e8..00000000000 --- a/templates/self-query-supabase/self_query_supabase/chain.py +++ /dev/null @@ -1,54 +0,0 @@ -import os - -from langchain.chains.query_constructor.base import AttributeInfo -from langchain.retrievers.self_query.base import SelfQueryRetriever -from langchain_community.embeddings import OpenAIEmbeddings -from langchain_community.llms.openai import OpenAI -from langchain_community.vectorstores.supabase import SupabaseVectorStore -from langchain_core.runnables import RunnableParallel, RunnablePassthrough -from supabase.client import create_client - -supabase_url = os.environ.get("SUPABASE_URL") -supabase_key = os.environ.get("SUPABASE_SERVICE_KEY") -supabase = create_client(supabase_url, supabase_key) - -embeddings = OpenAIEmbeddings() - -vectorstore = SupabaseVectorStore( - client=supabase, - embedding=embeddings, - table_name="documents", - query_name="match_documents", -) - -# Adjust this based on the metadata you store in the `metadata` JSON column -metadata_field_info = [ - AttributeInfo( - name="genre", - description="The genre of the movie", - type="string or list[string]", - ), - AttributeInfo( - name="year", - description="The year the movie was released", - type="integer", - ), - AttributeInfo( - name="director", - description="The name of the movie director", - type="string", - ), - AttributeInfo( - name="rating", description="A 1-10 rating for the movie", type="float" - ), -] - -# Adjust this based on the type of documents you store -document_content_description = "Brief summary of a movie" -llm = OpenAI(temperature=0) - -retriever = SelfQueryRetriever.from_llm( - llm, vectorstore, document_content_description, metadata_field_info, verbose=True -) - -chain = RunnableParallel({"query": RunnablePassthrough()}) | retriever diff --git a/templates/self-query-supabase/tests/__init__.py b/templates/self-query-supabase/tests/__init__.py deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/templates/shopping-assistant/README.md b/templates/shopping-assistant/README.md deleted file mode 100644 index 2e3ea7e7bd0..00000000000 --- a/templates/shopping-assistant/README.md +++ /dev/null @@ -1,69 +0,0 @@ -# Shopping assistant - Ionic - -This template creates a `shopping assistant` that helps users find products that they are looking for. - -This template will use `Ionic` to search for products. - -## Environment Setup - -This template will use `OpenAI` by default. -Be sure that `OPENAI_API_KEY` is set in your environment. - -## Usage - -To use this package, you should first have the LangChain CLI installed: - -```shell -pip install -U langchain-cli -``` - -To create a new LangChain project and install this as the only package, you can do: - -```shell -langchain app new my-app --package shopping-assistant -``` - -If you want to add this to an existing project, you can just run: - -```shell -langchain app add shopping-assistant -``` - -And add the following code to your `server.py` file: -```python -from shopping_assistant.agent import agent_executor as shopping_assistant_chain - -add_routes(app, shopping_assistant_chain, path="/shopping-assistant") -``` - -(Optional) Let's now configure LangSmith. -LangSmith will help us trace, monitor and debug LangChain applications. -You can sign up for LangSmith [here](https://smith.langchain.com/). -If you don't have access, you can skip this section - - -```shell -export LANGCHAIN_TRACING_V2=true -export LANGCHAIN_API_KEY= -export LANGCHAIN_PROJECT= # if not specified, defaults to "default" -``` - -If you are inside this directory, then you can spin up a LangServe instance directly by: - -```shell -langchain serve -``` - -This will start the FastAPI app with a server is running locally at -[http://localhost:8000](http://localhost:8000) - -We can see all templates at [http://127.0.0.1:8000/docs](http://127.0.0.1:8000/docs) -We can access the playground at [http://127.0.0.1:8000/shopping-assistant/playground](http://127.0.0.1:8000/shopping-assistant/playground) - -We can access the template from code with: - -```python -from langserve.client import RemoteRunnable - -runnable = RemoteRunnable("http://localhost:8000/shopping-assistant") -``` diff --git a/templates/shopping-assistant/pyproject.toml b/templates/shopping-assistant/pyproject.toml deleted file mode 100644 index ef3e3abf8a6..00000000000 --- a/templates/shopping-assistant/pyproject.toml +++ /dev/null @@ -1,31 +0,0 @@ -[tool.poetry] -name = "shopping-assistant" -version = "0.0.1" -description = "A template for a shopping assistant agent" -authors = [] -readme = "README.md" - -[tool.poetry.dependencies] -python = ">=3.8.12,<4.0" -langchain = "^0.1" -openai = "<2" -ionic-langchain = "^0.2.2" -langchain-openai = "^0.0.5" -langchainhub = "^0.1" - -[tool.poetry.group.dev.dependencies] -langchain-cli = ">=0.0.21" - -[tool.langserve] -export_module = "shopping_assistant.agent" -export_attr = "agent_executor" - -[tool.templates-hub] -use-case = "chatbot" -author = "LangChain" -integrations = ["Ionic"] -tags = ["conversation", "agents"] - -[build-system] -requires = ["poetry-core"] -build-backend = "poetry.core.masonry.api" diff --git a/templates/shopping-assistant/shopping_assistant/__init__.py b/templates/shopping-assistant/shopping_assistant/__init__.py deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/templates/shopping-assistant/shopping_assistant/agent.py b/templates/shopping-assistant/shopping_assistant/agent.py deleted file mode 100644 index a243e0704aa..00000000000 --- a/templates/shopping-assistant/shopping_assistant/agent.py +++ /dev/null @@ -1,48 +0,0 @@ -from typing import List, Tuple - -from ionic_langchain.tool import IonicTool -from langchain.agents import AgentExecutor, create_openai_tools_agent -from langchain_core.messages import AIMessage, SystemMessage -from langchain_core.prompts import ( - ChatPromptTemplate, - HumanMessagePromptTemplate, - MessagesPlaceholder, -) -from langchain_core.pydantic_v1 import BaseModel, Field -from langchain_core.tools import tool -from langchain_openai import ChatOpenAI - -tools = [tool(IonicTool())] - -llm = ChatOpenAI(temperature=0.5, model_name="gpt-3.5-turbo-1106", streaming=True) - -# You can modify these! -AI_CONTENT = """ -I should use the full pdp url that the tool provides me. -Always include query parameters -""" -SYSTEM_CONTENT = """ -You are a shopping assistant. -You help humans find the best product given their {input}. -""" -messages = [ - SystemMessage(content=SYSTEM_CONTENT), - HumanMessagePromptTemplate.from_template("{input}"), - AIMessage(content=AI_CONTENT), - MessagesPlaceholder(variable_name="agent_scratchpad"), -] - -prompt = ChatPromptTemplate.from_messages(messages) -agent = create_openai_tools_agent(llm, tools, prompt) - - -class AgentInput(BaseModel): - input: str - chat_history: List[Tuple[str, str]] = Field( - ..., extra={"widget": {"type": "chat", "input": "input", "output": "output"}} - ) - - -agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True).with_types( - input_type=AgentInput -) diff --git a/templates/skeleton-of-thought/.gitignore b/templates/skeleton-of-thought/.gitignore deleted file mode 100644 index bee8a64b79a..00000000000 --- a/templates/skeleton-of-thought/.gitignore +++ /dev/null @@ -1 +0,0 @@ -__pycache__ diff --git a/templates/skeleton-of-thought/LICENSE b/templates/skeleton-of-thought/LICENSE deleted file mode 100644 index 426b6509034..00000000000 --- a/templates/skeleton-of-thought/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -MIT License - -Copyright (c) 2023 LangChain, Inc. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/templates/skeleton-of-thought/README.md b/templates/skeleton-of-thought/README.md deleted file mode 100644 index f6a0d8a1f2c..00000000000 --- a/templates/skeleton-of-thought/README.md +++ /dev/null @@ -1,70 +0,0 @@ -# Skeleton-of-Thought - -It implements [Skeleton-of-Thought: Prompting LLMs for Efficient Parallel Generation](https://arxiv.org/abs/2307.15337) paper. - -This technique makes it possible to generate longer generations more quickly by first generating a skeleton, then generating each point of the outline. - -## Environment Setup - -Set the `OPENAI_API_KEY` environment variable to access the OpenAI models. - -To get your `OPENAI_API_KEY`, navigate to [API keys](https://platform.openai.com/account/api-keys) on your OpenAI account and create a new secret key. - -## Usage - -To use this package, you should first have the LangChain CLI installed: - -```shell -pip install -U langchain-cli -``` - -To create a new LangChain project and install this as the only package, you can do: - -```shell -langchain app new my-app --package skeleton-of-thought -``` - -If you want to add this to an existing project, you can just run: - -```shell -langchain app add skeleton-of-thought -``` - -And add the following code to your `server.py` file: -```python -from skeleton_of_thought import chain as skeleton_of_thought_chain - -add_routes(app, skeleton_of_thought_chain, path="/skeleton-of-thought") -``` - -(Optional) Let's now configure LangSmith. -LangSmith will help us trace, monitor and debug LangChain applications. -You can sign up for LangSmith [here](https://smith.langchain.com/). -If you don't have access, you can skip this section - - -```shell -export LANGCHAIN_TRACING_V2=true -export LANGCHAIN_API_KEY= -export LANGCHAIN_PROJECT= # if not specified, defaults to "default" -``` - -If you are inside this directory, then you can spin up a LangServe instance directly by: - -```shell -langchain serve -``` - -This will start the FastAPI app with a server is running locally at -[http://localhost:8000](http://localhost:8000) - -We can see all templates at [http://127.0.0.1:8000/docs](http://127.0.0.1:8000/docs) -We can access the playground at [http://127.0.0.1:8000/skeleton-of-thought/playground](http://127.0.0.1:8000/skeleton-of-thought/playground) - -We can access the template from code with: - -```python -from langserve.client import RemoteRunnable - -runnable = RemoteRunnable("http://localhost:8000/skeleton-of-thought") -``` \ No newline at end of file diff --git a/templates/skeleton-of-thought/pyproject.toml b/templates/skeleton-of-thought/pyproject.toml deleted file mode 100644 index fafc2ab06eb..00000000000 --- a/templates/skeleton-of-thought/pyproject.toml +++ /dev/null @@ -1,30 +0,0 @@ -[tool.poetry] -name = "skeleton-of-thought" -version = "0.0.1" -description = "Generate longer outputs by building sections from a skeleton outline" -authors = [] -readme = "README.md" - -[tool.poetry.dependencies] -python = ">=3.8.1,<4.0" -langchain = "^0.1" -openai = "^0.28.1" - -[tool.poetry.group.dev.dependencies] -langchain-cli = ">=0.0.21" -fastapi = ">=0.104.0,<1" -sse-starlette = "^1.6.5" - -[tool.langserve] -export_module = "skeleton_of_thought" -export_attr = "chain" - -[tool.templates-hub] -use-case = "research" -author = "LangChain" -integrations = ["OpenAI"] -tags = ["research", "paper"] - -[build-system] -requires = ["poetry-core"] -build-backend = "poetry.core.masonry.api" diff --git a/templates/skeleton-of-thought/skeleton_of_thought/__init__.py b/templates/skeleton-of-thought/skeleton_of_thought/__init__.py deleted file mode 100644 index 7514cd35650..00000000000 --- a/templates/skeleton-of-thought/skeleton_of_thought/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from skeleton_of_thought.chain import chain - -__all__ = ["chain"] diff --git a/templates/skeleton-of-thought/skeleton_of_thought/chain.py b/templates/skeleton-of-thought/skeleton_of_thought/chain.py deleted file mode 100644 index 0a7bd692f3c..00000000000 --- a/templates/skeleton-of-thought/skeleton_of_thought/chain.py +++ /dev/null @@ -1,96 +0,0 @@ -from langchain_community.chat_models import ChatOpenAI -from langchain_core.output_parsers import StrOutputParser -from langchain_core.prompts import ChatPromptTemplate -from langchain_core.pydantic_v1 import BaseModel -from langchain_core.runnables import RunnablePassthrough - -skeleton_generator_template = """[User:] You’re an organizer responsible for only \ -giving the skeleton (not the full content) for answering the question. -Provide the skeleton in a list of points (numbered 1., 2., 3., etc.) to answer \ -the question. \ -Instead of writing a full sentence, each skeleton point should be very short \ -with only 3∼5 words. \ -Generally, the skeleton should have 3∼10 points. Now, please provide the skeleton \ -for the following question. -{question} -Skeleton: -[Assistant:] 1.""" - -skeleton_generator_prompt = ChatPromptTemplate.from_template( - skeleton_generator_template -) - -skeleton_generator_chain = ( - skeleton_generator_prompt | ChatOpenAI() | StrOutputParser() | (lambda x: "1. " + x) -) - -point_expander_template = """[User:] You’re responsible for continuing \ -the writing of one and only one point in the overall answer to the following question. -{question} -The skeleton of the answer is -{skeleton} -Continue and only continue the writing of point {point_index}. \ -Write it **very shortly** in 1∼2 sentence and do not continue with other points! -[Assistant:] {point_index}. {point_skeleton}""" - -point_expander_prompt = ChatPromptTemplate.from_template(point_expander_template) - -point_expander_chain = RunnablePassthrough.assign( - continuation=point_expander_prompt | ChatOpenAI() | StrOutputParser() -) | (lambda x: x["point_skeleton"].strip() + " " + x["continuation"]) - - -def parse_numbered_list(input_str): - """Parses a numbered list into a list of dictionaries - - Each element having two keys: - 'index' for the index in the numbered list, and 'point' for the content. - """ - # Split the input string into lines - lines = input_str.split("\n") - - # Initialize an empty list to store the parsed items - parsed_list = [] - - for line in lines: - # Split each line at the first period to separate the index from the content - parts = line.split(". ", 1) - - if len(parts) == 2: - # Convert the index part to an integer - # and strip any whitespace from the content - index = int(parts[0]) - point = parts[1].strip() - - # Add a dictionary to the parsed list - parsed_list.append({"point_index": index, "point_skeleton": point}) - - return parsed_list - - -def create_list_elements(_input): - skeleton = _input["skeleton"] - numbered_list = parse_numbered_list(skeleton) - for el in numbered_list: - el["skeleton"] = skeleton - el["question"] = _input["question"] - return numbered_list - - -def get_final_answer(expanded_list): - final_answer_str = "Here's a comprehensive answer:\n\n" - for i, el in enumerate(expanded_list): - final_answer_str += f"{i+1}. {el}\n\n" - return final_answer_str - - -class ChainInput(BaseModel): - question: str - - -chain = ( - RunnablePassthrough.assign(skeleton=skeleton_generator_chain) - | create_list_elements - | point_expander_chain.map() - | get_final_answer -).with_types(input_type=ChainInput) diff --git a/templates/skeleton-of-thought/tests/__init__.py b/templates/skeleton-of-thought/tests/__init__.py deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/templates/solo-performance-prompting-agent/LICENSE b/templates/solo-performance-prompting-agent/LICENSE deleted file mode 100644 index 426b6509034..00000000000 --- a/templates/solo-performance-prompting-agent/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -MIT License - -Copyright (c) 2023 LangChain, Inc. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/templates/solo-performance-prompting-agent/README.md b/templates/solo-performance-prompting-agent/README.md deleted file mode 100644 index e4252a7f7fc..00000000000 --- a/templates/solo-performance-prompting-agent/README.md +++ /dev/null @@ -1,77 +0,0 @@ -# Solo performance prompting agent - -This template creates an agent that transforms a single LLM -into a cognitive synergist by engaging in multi-turn self-collaboration -with multiple personas. - -A `cognitive synergist` refers to an intelligent agent that collaborates -with multiple minds, combining their individual strengths and knowledge, -to enhance problem-solving and overall performance in complex tasks. -By dynamically identifying and simulating different personas based -on task inputs, SPP unleashes the potential of cognitive synergy in LLMs. - -This template will use the `DuckDuckGo` search API. - -## Environment Setup - -This template will use `OpenAI` by default. -Be sure that `OPENAI_API_KEY` is set in your environment. - -## Usage - -To use this package, you should first have the LangChain CLI installed: - -```shell -pip install -U langchain-cli -``` - -To create a new LangChain project and install this as the only package, you can do: - -```shell -langchain app new my-app --package solo-performance-prompting-agent -``` - -If you want to add this to an existing project, you can just run: - -```shell -langchain app add solo-performance-prompting-agent -``` - -And add the following code to your `server.py` file: -```python -from solo_performance_prompting_agent.agent import agent_executor as solo_performance_prompting_agent_chain - -add_routes(app, solo_performance_prompting_agent_chain, path="/solo-performance-prompting-agent") -``` - -(Optional) Let's now configure LangSmith. -LangSmith will help us trace, monitor and debug LangChain applications. -You can sign up for LangSmith [here](https://smith.langchain.com/). -If you don't have access, you can skip this section - - -```shell -export LANGCHAIN_TRACING_V2=true -export LANGCHAIN_API_KEY= -export LANGCHAIN_PROJECT= # if not specified, defaults to "default" -``` - -If you are inside this directory, then you can spin up a LangServe instance directly by: - -```shell -langchain serve -``` - -This will start the FastAPI app with a server is running locally at -[http://localhost:8000](http://localhost:8000) - -We can see all templates at [http://127.0.0.1:8000/docs](http://127.0.0.1:8000/docs) -We can access the playground at [http://127.0.0.1:8000/solo-performance-prompting-agent/playground](http://127.0.0.1:8000/solo-performance-prompting-agent/playground) - -We can access the template from code with: - -```python -from langserve.client import RemoteRunnable - -runnable = RemoteRunnable("http://localhost:8000/solo-performance-prompting-agent") -``` \ No newline at end of file diff --git a/templates/solo-performance-prompting-agent/pyproject.toml b/templates/solo-performance-prompting-agent/pyproject.toml deleted file mode 100644 index fe79626b8b4..00000000000 --- a/templates/solo-performance-prompting-agent/pyproject.toml +++ /dev/null @@ -1,31 +0,0 @@ -[tool.poetry] -name = "solo-performance-prompting-agent" -version = "0.0.1" -description = "Agent that transforms a single LLM into a cognitive synergist that has multiple points of view" -authors = [] -readme = "README.md" - -[tool.poetry.dependencies] -python = ">=3.8.1,<4.0" -langchain = "^0.1" -openai = "<2" -duckduckgo-search = "^3.9.3" - -[tool.poetry.group.dev.dependencies] -langchain-cli = ">=0.0.21" -fastapi = ">=0.104.0,<1" -sse-starlette = "^1.6.5" - -[tool.langserve] -export_module = "solo_performance_prompting_agent.agent" -export_attr = "agent_executor" - -[tool.templates-hub] -use-case = "research" -author = "LangChain" -integrations = ["OpenAI"] -tags = ["data", "agents"] - -[build-system] -requires = ["poetry-core"] -build-backend = "poetry.core.masonry.api" diff --git a/templates/solo-performance-prompting-agent/solo_performance_prompting_agent/__init__.py b/templates/solo-performance-prompting-agent/solo_performance_prompting_agent/__init__.py deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/templates/solo-performance-prompting-agent/solo_performance_prompting_agent/agent.py b/templates/solo-performance-prompting-agent/solo_performance_prompting_agent/agent.py deleted file mode 100644 index 3e9c272f08c..00000000000 --- a/templates/solo-performance-prompting-agent/solo_performance_prompting_agent/agent.py +++ /dev/null @@ -1,38 +0,0 @@ -from langchain.agents import AgentExecutor -from langchain.agents.format_scratchpad import format_xml -from langchain.tools import DuckDuckGoSearchRun -from langchain_community.llms import OpenAI -from langchain_core.pydantic_v1 import BaseModel -from langchain_core.tools.render import render_text_description - -from solo_performance_prompting_agent.parser import parse_output -from solo_performance_prompting_agent.prompts import conversational_prompt - -_model = OpenAI() -_tools = [DuckDuckGoSearchRun()] -_prompt = conversational_prompt.partial( - tools=render_text_description(_tools), - tool_names=", ".join([t.name for t in _tools]), -) -_llm_with_stop = _model.bind(stop=["", ""]) - -agent = ( - { - "question": lambda x: x["question"], - "agent_scratchpad": lambda x: format_xml(x["intermediate_steps"]), - } - | _prompt - | _llm_with_stop - | parse_output -) - - -class AgentInput(BaseModel): - question: str - - -agent_executor = AgentExecutor( - agent=agent, tools=_tools, verbose=True, handle_parsing_errors=True -).with_types(input_type=AgentInput) - -agent_executor = agent_executor | (lambda x: x["output"]) diff --git a/templates/solo-performance-prompting-agent/solo_performance_prompting_agent/parser.py b/templates/solo-performance-prompting-agent/solo_performance_prompting_agent/parser.py deleted file mode 100644 index 0fceca50947..00000000000 --- a/templates/solo-performance-prompting-agent/solo_performance_prompting_agent/parser.py +++ /dev/null @@ -1,18 +0,0 @@ -from langchain_core.agents import AgentAction, AgentFinish - - -def parse_output(message: str): - FINAL_ANSWER_ACTION = "" - includes_answer = FINAL_ANSWER_ACTION in message - if includes_answer: - answer = message.split(FINAL_ANSWER_ACTION)[1].strip() - if "" in answer: - answer = answer.split("")[0].strip() - return AgentFinish(return_values={"output": answer}, log=message) - elif "" in message: - tool, tool_input = message.split("") - _tool = tool.split("")[1] - _tool_input = tool_input.split("")[1] - if "" in _tool_input: - _tool_input = _tool_input.split("")[0] - return AgentAction(tool=_tool, tool_input=_tool_input, log=message) diff --git a/templates/solo-performance-prompting-agent/solo_performance_prompting_agent/prompts.py b/templates/solo-performance-prompting-agent/solo_performance_prompting_agent/prompts.py deleted file mode 100644 index e3a1858adcd..00000000000 --- a/templates/solo-performance-prompting-agent/solo_performance_prompting_agent/prompts.py +++ /dev/null @@ -1,54 +0,0 @@ -from langchain_core.prompts import ChatPromptTemplate - -template = """When faced with a task, begin by identifying the participants who will contribute to solving the task. Then, initiate a multi-round collaboration process until a final solution is reached. The participants will - give critical comments and detailed suggestions whenever necessary. - The experts also have access to {tools} and can use them based on their expertise. - In order to use a tool, the participants can use and tags. They will then get back a response in the form - For example, if they have a tool called 'search' that could run a google search, in order to search for the weather in SF they would respond: - - searchweather in SF - 64 degrees - - When they are done, they can respond with the answer to the conversation. - Once the participants have reached a final solution, they can respond with the final answer in the form - Here are some examples: - --- - Example 1: Use numbers 6 12 1 1 and basic arithmetic operations (+ - * /) to obtain 24. You need to use all numbers, and each number can only be used once. - Participants: AI Assistant (you); Math Expert - Start collaboration! - Math Expert: Let's analyze the task in detail. You need to make sure that you meet the requirement, that you need to use exactly the four numbers (6 12 1 1) to construct 24. To reach 24, you can think - of the common divisors of 24 such as 4, 6, 8, 3 and try to construct these first. Also you need to think of potential additions that can reach 24, such as 12 + 12. - AI Assistant (you): Thanks for the hints! Here's one initial solution: (12 / (1 + 1)) * 6 = 24 - Math Expert: Let's check the answer step by step. (1+1) = 2, (12 / 2) = 6, 6 * 6 = 36 which is not 24! The answer is not correct. Can you fix this by considering other combinations? Please do not make - similar mistakes. - AI Assistant (you): Thanks for pointing out the mistake. Here is a revised solution considering 24 can also be reached by 3 * 8: (6 + 1 + 1) * (12 / 4) = 24. - Math Expert: Let's first check if the calculation is correct. (6 + 1 + 1) = 8, 12 / 4 = 3, 8 * 3 = 24. The calculation is correct, but you used 6 1 1 12 4 which is not the same as the input 6 12 1 1. Can you - avoid using a number that is not part of the input? - AI Assistant (you): You are right, here is a revised solution considering 24 can be reached by 12 + 12 and without using any additional numbers: 6 * (1 - 1) + 12 = 24. - Math Expert: Let's check the answer again. 1 - 1 = 0, 6 * 0 = 0, 0 + 12 = 12. I believe you are very close, here is a hint: try to change the "1 - 1" to "1 + 1". - AI Assistant (you): Sure, here is the corrected answer: 6 * (1+1) + 12 = 24 - Math Expert: Let's verify the solution. 1 + 1 = 2, 6 * 2 = 12, 12 + 12 = 12. You used 1 1 6 12 which is identical to the input 6 12 1 1. Everything looks good! - Finish collaboration! - 6 * (1 + 1) + 12 = 24 - - --- - Example 2: Who is the father of the longest serving US president? - Participants: AI Assistant (you); History Expert - Start collaboration! - History Expert: The longest serving US president is Franklin D. Roosevelt. He served for 12 years and 39 days. We need to run a search to find out who is his father. - AI Assistant (you): Thanks for the hints! Let me run a search: searchWho is the father of Franklin D. Roosevelt? - James Roosevelt I - AI Assistant (you): James Roosevelt I is the father of Franklin D. Roosevelt, the longest serving US President. - History Expert: Everything looks good! - Finish collaboration! - James Roosevelt I is the father of Franklin D. Roosevelt, the longest serving US President. - --- - Now, identify the participants and collaboratively solve the following task step by step.""" # noqa: E501 - -conversational_prompt = ChatPromptTemplate.from_messages( - [ - ("system", template), - ("user", "{question}"), - ("ai", "{agent_scratchpad}"), - ] -) diff --git a/templates/solo-performance-prompting-agent/tests/__init__.py b/templates/solo-performance-prompting-agent/tests/__init__.py deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/templates/sql-llama2/README.md b/templates/sql-llama2/README.md deleted file mode 100644 index 4a399812782..00000000000 --- a/templates/sql-llama2/README.md +++ /dev/null @@ -1,72 +0,0 @@ -# SQL - LLamA2 - -This template enables a user to interact with a `SQL` database using natural language. - -It uses `LLamA2-13b` hosted by [Replicate](https://python.langchain.com/docs/integrations/llms/replicate), but can be adapted to any API that supports LLaMA2 including [Fireworks](https://python.langchain.com/docs/integrations/chat/fireworks). - -The template includes an example database of 2023 NBA rosters. - -For more information on how to build this database, see [here](https://github.com/facebookresearch/llama-recipes/blob/main/demo_apps/StructuredLlama.ipynb). - -## Environment Setup - -Ensure the `REPLICATE_API_TOKEN` is set in your environment. - -## Usage - -To use this package, you should first have the LangChain CLI installed: - -```shell -pip install -U langchain-cli -``` - -To create a new LangChain project and install this as the only package, you can do: - -```shell -langchain app new my-app --package sql-llama2 -``` - -If you want to add this to an existing project, you can just run: - -```shell -langchain app add sql-llama2 -``` - -And add the following code to your `server.py` file: -```python -from sql_llama2 import chain as sql_llama2_chain - -add_routes(app, sql_llama2_chain, path="/sql-llama2") -``` - -(Optional) Let's now configure LangSmith. -LangSmith will help us trace, monitor and debug LangChain applications. -You can sign up for LangSmith [here](https://smith.langchain.com/). -If you don't have access, you can skip this section - - -```shell -export LANGCHAIN_TRACING_V2=true -export LANGCHAIN_API_KEY= -export LANGCHAIN_PROJECT= # if not specified, defaults to "default" -``` - -If you are inside this directory, then you can spin up a LangServe instance directly by: - -```shell -langchain serve -``` - -This will start the FastAPI app with a server is running locally at -[http://localhost:8000](http://localhost:8000) - -We can see all templates at [http://127.0.0.1:8000/docs](http://127.0.0.1:8000/docs) -We can access the playground at [http://127.0.0.1:8000/sql-llama2/playground](http://127.0.0.1:8000/sql-llama2/playground) - -We can access the template from code with: - -```python -from langserve.client import RemoteRunnable - -runnable = RemoteRunnable("http://localhost:8000/sql-llama2") -``` diff --git a/templates/sql-llama2/pyproject.toml b/templates/sql-llama2/pyproject.toml deleted file mode 100644 index f75930403cf..00000000000 --- a/templates/sql-llama2/pyproject.toml +++ /dev/null @@ -1,32 +0,0 @@ -[tool.poetry] -name = "sql-llama2" -version = "0.1.0" -description = "QA with a SQL database using natural language and LLaMA2-13b" -authors = [ - "Lance Martin ", -] -readme = "README.md" - -[tool.poetry.dependencies] -python = ">=3.8.1,<4.0" -langchain = "^0.1" -replicate = ">=0.15.4" - -[tool.poetry.group.dev.dependencies] -langchain-cli = ">=0.0.21" - -[tool.langserve] -export_module = "sql_llama2" -export_attr = "chain" - -[tool.templates-hub] -use-case = "sql" -author = "LangChain" -integrations = ["Replicate"] -tags = ["sql"] - -[build-system] -requires = [ - "poetry-core", -] -build-backend = "poetry.core.masonry.api" diff --git a/templates/sql-llama2/sql_llama2.ipynb b/templates/sql-llama2/sql_llama2.ipynb deleted file mode 100644 index 51c0d76410e..00000000000 --- a/templates/sql-llama2/sql_llama2.ipynb +++ /dev/null @@ -1,66 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "id": "22f3f9f9-80ee-4da1-ba12-105a0ce74203", - "metadata": {}, - "source": [ - "## Run Template\n", - "\n", - "In `server.py`, set -\n", - "```\n", - "add_routes(app, chain, path=\"/sql_llama2\")\n", - "```\n", - "\n", - "This template includes an example DB of 2023 NBA rosters.\n", - "\n", - "We can ask questions related to NBA players. " - ] - }, - { - "cell_type": "code", - "execution_count": 15, - "id": "4545c603-77ec-4c15-b9c0-a70529eebed0", - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "\" Sure thing! Here's the natural language response based on the given SQL query and response:\\n\\nKlay Thompson plays for the Golden State Warriors.\"" - ] - }, - "execution_count": 15, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "from langserve.client import RemoteRunnable\n", - "\n", - "sql_app = RemoteRunnable(\"http://0.0.0.0:8001/sql_llama2\")\n", - "sql_app.invoke({\"question\": \"What team is Klay Thompson on?\"})" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.9.16" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/templates/sql-llama2/sql_llama2/__init__.py b/templates/sql-llama2/sql_llama2/__init__.py deleted file mode 100644 index a1340c49ff6..00000000000 --- a/templates/sql-llama2/sql_llama2/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from sql_llama2.chain import chain - -__all__ = ["chain"] diff --git a/templates/sql-llama2/sql_llama2/chain.py b/templates/sql-llama2/sql_llama2/chain.py deleted file mode 100644 index 3ace5f1c5b7..00000000000 --- a/templates/sql-llama2/sql_llama2/chain.py +++ /dev/null @@ -1,83 +0,0 @@ -from pathlib import Path - -from langchain_community.llms import Replicate -from langchain_community.utilities import SQLDatabase -from langchain_core.output_parsers import StrOutputParser -from langchain_core.prompts import ChatPromptTemplate -from langchain_core.pydantic_v1 import BaseModel -from langchain_core.runnables import RunnablePassthrough - -# make sure to set REPLICATE_API_TOKEN in your environment -# use llama-2-13b model in replicate -replicate_id = "meta/llama-2-13b-chat:f4e2de70d66816a838a89eeeb621910adffb0dd0baba3976c96980970978018d" # noqa: E501 -llm = Replicate( - model=replicate_id, - model_kwargs={"temperature": 0.01, "max_length": 500, "top_p": 1}, -) - -db_path = Path(__file__).parent / "nba_roster.db" -rel = db_path.relative_to(Path.cwd()) -db_string = f"sqlite:///{rel}" -db = SQLDatabase.from_uri(db_string, sample_rows_in_table_info=0) - - -def get_schema(_): - return db.get_table_info() - - -def run_query(query): - return db.run(query) - - -template_query = """Based on the table schema below, write a SQL query that would answer the user's question: -{schema} - -Question: {question} -SQL Query:""" # noqa: E501 -prompt = ChatPromptTemplate.from_messages( - [ - ("system", "Given an input question, convert it to a SQL query. No pre-amble."), - ("human", template_query), - ] -) - -sql_response = ( - RunnablePassthrough.assign(schema=get_schema) - | prompt - | llm.bind(stop=["\nSQLResult:"]) - | StrOutputParser() -) - -template_response = """Based on the table schema below, question, sql query, and sql response, write a natural language response: -{schema} - -Question: {question} -SQL Query: {query} -SQL Response: {response}""" # noqa: E501 - -prompt_response = ChatPromptTemplate.from_messages( - [ - ( - "system", - "Given an input question and SQL response, convert it to a natural " - "language answer. No pre-amble.", - ), - ("human", template_response), - ] -) - - -# Supply the input types to the prompt -class InputType(BaseModel): - question: str - - -chain = ( - RunnablePassthrough.assign(query=sql_response).with_types(input_type=InputType) - | RunnablePassthrough.assign( - schema=get_schema, - response=lambda x: db.run(x["query"]), - ) - | prompt_response - | llm -) diff --git a/templates/sql-llama2/sql_llama2/nba_roster.db b/templates/sql-llama2/sql_llama2/nba_roster.db deleted file mode 100644 index 9ea4e20c367..00000000000 Binary files a/templates/sql-llama2/sql_llama2/nba_roster.db and /dev/null differ diff --git a/templates/sql-llama2/tests/__init__.py b/templates/sql-llama2/tests/__init__.py deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/templates/sql-llamacpp/README.md b/templates/sql-llamacpp/README.md deleted file mode 100644 index 86541e8b8be..00000000000 --- a/templates/sql-llamacpp/README.md +++ /dev/null @@ -1,76 +0,0 @@ -# SQL - llama.cpp - -This template enables a user to interact with a `SQL` database using natural language. - -It uses [Mistral-7b](https://mistral.ai/news/announcing-mistral-7b/) via [llama.cpp](https://github.com/ggerganov/llama.cpp) to run inference locally on a Mac laptop. - -## Environment Setup - -To set up the environment, use the following steps: - -```shell -wget https://github.com/conda-forge/miniforge/releases/latest/download/Miniforge3-MacOSX-arm64.sh -bash Miniforge3-MacOSX-arm64.sh -conda create -n llama python=3.9.16 -conda activate /Users/rlm/miniforge3/envs/llama -CMAKE_ARGS="-DLLAMA_METAL=on" FORCE_CMAKE=1 pip install -U llama-cpp-python --no-cache-dir -``` - -## Usage - -To use this package, you should first have the LangChain CLI installed: - -```shell -pip install -U langchain-cli -``` - -To create a new LangChain project and install this as the only package, you can do: - -```shell -langchain app new my-app --package sql-llamacpp -``` - -If you want to add this to an existing project, you can just run: - -```shell -langchain app add sql-llamacpp -``` - -And add the following code to your `server.py` file: -```python -from sql_llamacpp import chain as sql_llamacpp_chain - -add_routes(app, sql_llamacpp_chain, path="/sql-llamacpp") -``` - -The package will download the Mistral-7b model from [here](https://huggingface.co/TheBloke/Mistral-7B-Instruct-v0.1-GGUF). You can select other files and specify their download path (browse [here](https://huggingface.co/TheBloke)). - -This package includes an example DB of 2023 NBA rosters. You can see instructions to build this DB [here](https://github.com/facebookresearch/llama-recipes/blob/main/demo_apps/StructuredLlama.ipynb). - -(Optional) Configure LangSmith for tracing, monitoring and debugging LangChain applications. You can sign up for LangSmith [here](https://smith.langchain.com/). If you don't have access, you can skip this section - -```shell -export LANGCHAIN_TRACING_V2=true -export LANGCHAIN_API_KEY= -export LANGCHAIN_PROJECT= # if not specified, defaults to "default" -``` - -If you are inside this directory, then you can spin up a LangServe instance directly by: - -```shell -langchain serve -``` - -This will start the FastAPI app with a server running locally at -[http://localhost:8000](http://localhost:8000) - -You can see all templates at [http://127.0.0.1:8000/docs](http://127.0.0.1:8000/docs) -You can access the playground at [http://127.0.0.1:8000/sql-llamacpp/playground](http://127.0.0.1:8000/sql-llamacpp/playground) - -You can access the template from code with: - -```python -from langserve.client import RemoteRunnable - -runnable = RemoteRunnable("http://localhost:8000/sql-llamacpp") -``` diff --git a/templates/sql-llamacpp/pyproject.toml b/templates/sql-llamacpp/pyproject.toml deleted file mode 100644 index 69111dedccc..00000000000 --- a/templates/sql-llamacpp/pyproject.toml +++ /dev/null @@ -1,32 +0,0 @@ -[tool.poetry] -name = "sql-llamacpp" -version = "0.1.0" -description = "Private QA with a SQL database using natural language and a local LLM via llama.cpp" -authors = [ - "Lance Martin ", -] -readme = "README.md" - -[tool.poetry.dependencies] -python = ">=3.8.1,<4.0" -langchain = "^0.1" -llama-cpp-python = ">=0.1.79" - -[tool.poetry.group.dev.dependencies] -langchain-cli = ">=0.0.21" - -[tool.langserve] -export_module = "sql_llamacpp" -export_attr = "chain" - -[tool.templates-hub] -use-case = "sql" -author = "LangChain" -integrations = ["LLamacpp"] -tags = ["sql"] - -[build-system] -requires = [ - "poetry-core", -] -build-backend = "poetry.core.masonry.api" diff --git a/templates/sql-llamacpp/sql-llamacpp.ipynb b/templates/sql-llamacpp/sql-llamacpp.ipynb deleted file mode 100644 index deb3fe2d843..00000000000 --- a/templates/sql-llamacpp/sql-llamacpp.ipynb +++ /dev/null @@ -1,66 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "id": "a0314df0-da99-4086-a96f-b14df05b3362", - "metadata": {}, - "source": [ - "## Run Template\n", - "\n", - "In `server.py`, set -\n", - "```\n", - "add_routes(app, chain, path=\"/sql_llamacpp\")\n", - "```\n", - "\n", - "This template includes an example DB of 2023 NBA rosters.\n", - "\n", - "We can ask questions related to NBA players. " - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "id": "ff5869c6-2065-48f3-bb43-52a515968276", - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "'\\nNatural Language Response: Klay Thompson plays for the Golden State Warriors.'" - ] - }, - "execution_count": 1, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "from langserve.client import RemoteRunnable\n", - "\n", - "sql_app = RemoteRunnable(\"http://0.0.0.0:8001/sql_llamacpp\")\n", - "sql_app.invoke({\"question\": \"What team is Klay Thompson on?\"})" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.9.16" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/templates/sql-llamacpp/sql_llamacpp/__init__.py b/templates/sql-llamacpp/sql_llamacpp/__init__.py deleted file mode 100644 index ce4ab364355..00000000000 --- a/templates/sql-llamacpp/sql_llamacpp/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from sql_llamacpp.chain import chain - -__all__ = ["chain"] diff --git a/templates/sql-llamacpp/sql_llamacpp/chain.py b/templates/sql-llamacpp/sql_llamacpp/chain.py deleted file mode 100644 index 1dd53f69a5e..00000000000 --- a/templates/sql-llamacpp/sql_llamacpp/chain.py +++ /dev/null @@ -1,136 +0,0 @@ -# Get LLM -import os -from pathlib import Path - -import requests -from langchain.memory import ConversationBufferMemory -from langchain_community.llms import LlamaCpp -from langchain_community.utilities import SQLDatabase -from langchain_core.output_parsers import StrOutputParser -from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder -from langchain_core.pydantic_v1 import BaseModel -from langchain_core.runnables import RunnableLambda, RunnablePassthrough - -# File name and URL -file_name = "mistral-7b-instruct-v0.1.Q4_K_M.gguf" -url = ( - "https://huggingface.co/TheBloke/Mistral-7B-Instruct-v0.1-GGUF/resolve/main/" - "mistral-7b-instruct-v0.1.Q4_K_M.gguf" -) -# Check if file is present in the current directory -if not os.path.exists(file_name): - print(f"'{file_name}' not found. Downloading...") - # Download the file - response = requests.get(url) - response.raise_for_status() # Raise an exception for HTTP errors - with open(file_name, "wb") as f: - f.write(response.content) - print(f"'{file_name}' has been downloaded.") -else: - print(f"'{file_name}' already exists in the current directory.") - -# Add the LLM downloaded from HF -model_path = file_name -n_gpu_layers = 1 # Metal set to 1 is enough. - -# Should be between 1 and n_ctx, consider the amount of RAM of your Apple Silicon Chip. -n_batch = 512 - -llm = LlamaCpp( - model_path=model_path, - n_gpu_layers=n_gpu_layers, - n_batch=n_batch, - n_ctx=2048, - # f16_kv MUST set to True - # otherwise you will run into problem after a couple of calls - f16_kv=True, - verbose=True, -) - -db_path = Path(__file__).parent / "nba_roster.db" -rel = db_path.relative_to(Path.cwd()) -db_string = f"sqlite:///{rel}" -db = SQLDatabase.from_uri(db_string, sample_rows_in_table_info=0) - - -def get_schema(_): - return db.get_table_info() - - -def run_query(query): - return db.run(query) - - -# Prompt - -template = """Based on the table schema below, write a SQL query that would answer the user's question: -{schema} - -Question: {question} -SQL Query:""" # noqa: E501 -prompt = ChatPromptTemplate.from_messages( - [ - ("system", "Given an input question, convert it to a SQL query. No pre-amble."), - MessagesPlaceholder(variable_name="history"), - ("human", template), - ] -) - -memory = ConversationBufferMemory(return_messages=True) - -# Chain to query with memory - -sql_chain = ( - RunnablePassthrough.assign( - schema=get_schema, - history=RunnableLambda(lambda x: memory.load_memory_variables(x)["history"]), - ) - | prompt - | llm.bind(stop=["\nSQLResult:"]) - | StrOutputParser() -) - - -def save(input_output): - output = {"output": input_output.pop("output")} - memory.save_context(input_output, output) - return output["output"] - - -sql_response_memory = RunnablePassthrough.assign(output=sql_chain) | save - -# Chain to answer -template = """Based on the table schema below, question, sql query, and sql response, write a natural language response: -{schema} - -Question: {question} -SQL Query: {query} -SQL Response: {response}""" # noqa: E501 -prompt_response = ChatPromptTemplate.from_messages( - [ - ( - "system", - "Given an input question and SQL response, convert it to a natural " - "language answer. No pre-amble.", - ), - ("human", template), - ] -) - - -# Supply the input types to the prompt -class InputType(BaseModel): - question: str - - -chain = ( - RunnablePassthrough.assign(query=sql_response_memory).with_types( - input_type=InputType - ) - | RunnablePassthrough.assign( - schema=get_schema, - response=lambda x: db.run(x["query"]), - ) - | prompt_response - | llm -) diff --git a/templates/sql-llamacpp/sql_llamacpp/nba_roster.db b/templates/sql-llamacpp/sql_llamacpp/nba_roster.db deleted file mode 100644 index 9ea4e20c367..00000000000 Binary files a/templates/sql-llamacpp/sql_llamacpp/nba_roster.db and /dev/null differ diff --git a/templates/sql-llamacpp/tests/__init__.py b/templates/sql-llamacpp/tests/__init__.py deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/templates/sql-ollama/README.md b/templates/sql-ollama/README.md deleted file mode 100644 index 7264ac1a185..00000000000 --- a/templates/sql-ollama/README.md +++ /dev/null @@ -1,78 +0,0 @@ -# SQL - Ollama - -This template enables a user to interact with a SQL database using natural language. - -It uses [Zephyr-7b](https://huggingface.co/HuggingFaceH4/zephyr-7b-alpha) via [Ollama](https://ollama.ai/library/zephyr) to run inference locally on a Mac laptop. - -## Environment Setup - -Before using this template, you need to set up Ollama and SQL database. - -1. Follow instructions [here](https://python.langchain.com/docs/integrations/chat/ollama) to download Ollama. - -2. Download your LLM of interest: - - * This package uses `zephyr`: `ollama pull zephyr` - * You can choose from many LLMs [here](https://ollama.ai/library) - -3. This package includes an example DB of 2023 NBA rosters. You can see instructions to build this DB [here](https://github.com/facebookresearch/llama-recipes/blob/main/demo_apps/StructuredLlama.ipynb). - -## Usage - -To use this package, you should first have the LangChain CLI installed: - -```shell -pip install -U langchain-cli -``` - -To create a new LangChain project and install this as the only package, you can do: - -```shell -langchain app new my-app --package sql-ollama -``` - -If you want to add this to an existing project, you can just run: - -```shell -langchain app add sql-ollama -``` - -And add the following code to your `server.py` file: - -```python -from sql_ollama import chain as sql_ollama_chain - -add_routes(app, sql_ollama_chain, path="/sql-ollama") -``` - -(Optional) Let's now configure LangSmith. -LangSmith will help us trace, monitor and debug LangChain applications. -You can sign up for LangSmith [here](https://smith.langchain.com/). -If you don't have access, you can skip this section - - -```shell -export LANGCHAIN_TRACING_V2=true -export LANGCHAIN_API_KEY= -export LANGCHAIN_PROJECT= # if not specified, defaults to "default" -``` - -If you are inside this directory, then you can spin up a LangServe instance directly by: - -```shell -langchain serve -``` - -This will start the FastAPI app with a server is running locally at -[http://localhost:8000](http://localhost:8000) - -We can see all templates at [http://127.0.0.1:8000/docs](http://127.0.0.1:8000/docs) -We can access the playground at [http://127.0.0.1:8000/sql-ollama/playground](http://127.0.0.1:8000/sql-ollama/playground) - -We can access the template from code with: - -```python -from langserve.client import RemoteRunnable - -runnable = RemoteRunnable("http://localhost:8000/sql-ollama") -``` \ No newline at end of file diff --git a/templates/sql-ollama/pyproject.toml b/templates/sql-ollama/pyproject.toml deleted file mode 100644 index de462021751..00000000000 --- a/templates/sql-ollama/pyproject.toml +++ /dev/null @@ -1,31 +0,0 @@ -[tool.poetry] -name = "sql-ollama" -version = "0.1.0" -description = "Private QA with a SQL database using natural language and a local LLM" -authors = [ - "Lance Martin ", -] -readme = "README.md" - -[tool.poetry.dependencies] -python = ">=3.8.1,<4.0" -langchain = "^0.1" - -[tool.poetry.group.dev.dependencies] -langchain-cli = ">=0.0.21" - -[tool.langserve] -export_module = "sql_ollama" -export_attr = "chain" - -[tool.templates-hub] -use-case = "sql" -author = "LangChain" -integrations = ["Ollama"] -tags = ["sql"] - -[build-system] -requires = [ - "poetry-core", -] -build-backend = "poetry.core.masonry.api" diff --git a/templates/sql-ollama/sql-ollama.ipynb b/templates/sql-ollama/sql-ollama.ipynb deleted file mode 100644 index 0c086a20d11..00000000000 --- a/templates/sql-ollama/sql-ollama.ipynb +++ /dev/null @@ -1,66 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "id": "d55f5fd9-21eb-433d-9259-0a588d9197c0", - "metadata": {}, - "source": [ - "## Run Template\n", - "\n", - "In `server.py`, set -\n", - "```\n", - "add_routes(app, chain, path=\"/sql_ollama\")\n", - "```\n", - "\n", - "This template includes an example DB of 2023 NBA rosters.\n", - "\n", - "We can ask questions related to NBA players. " - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "id": "50c27e82-92d8-4fa1-8bc4-b6544e59773d", - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "AIMessage(content=' Sure! Here\\'s the natural language response based on the input:\\n\\n\"Klay Thompson plays for the Golden State Warriors.\"')" - ] - }, - "execution_count": 1, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "from langserve.client import RemoteRunnable\n", - "\n", - "sql_app = RemoteRunnable(\"http://0.0.0.0:8001/sql_ollama\")\n", - "sql_app.invoke({\"question\": \"What team is Klay Thompson on?\"})" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.9.16" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/templates/sql-ollama/sql_ollama/__init__.py b/templates/sql-ollama/sql_ollama/__init__.py deleted file mode 100644 index c3d1ef029b2..00000000000 --- a/templates/sql-ollama/sql_ollama/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from sql_ollama.chain import chain - -__all__ = ["chain"] diff --git a/templates/sql-ollama/sql_ollama/chain.py b/templates/sql-ollama/sql_ollama/chain.py deleted file mode 100644 index de36474d46c..00000000000 --- a/templates/sql-ollama/sql_ollama/chain.py +++ /dev/null @@ -1,102 +0,0 @@ -from pathlib import Path - -from langchain.memory import ConversationBufferMemory -from langchain_community.chat_models import ChatOllama -from langchain_community.utilities import SQLDatabase -from langchain_core.output_parsers import StrOutputParser -from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder -from langchain_core.pydantic_v1 import BaseModel -from langchain_core.runnables import RunnableLambda, RunnablePassthrough - -# Add the LLM downloaded from Ollama -ollama_llm = "zephyr" -llm = ChatOllama(model=ollama_llm) - - -db_path = Path(__file__).parent / "nba_roster.db" -rel = db_path.relative_to(Path.cwd()) -db_string = f"sqlite:///{rel}" -db = SQLDatabase.from_uri(db_string, sample_rows_in_table_info=0) - - -def get_schema(_): - return db.get_table_info() - - -def run_query(query): - return db.run(query) - - -# Prompt - -template = """Based on the table schema below, write a SQL query that would answer the user's question: -{schema} - -Question: {question} -SQL Query:""" # noqa: E501 -prompt = ChatPromptTemplate.from_messages( - [ - ("system", "Given an input question, convert it to a SQL query. No pre-amble."), - MessagesPlaceholder(variable_name="history"), - ("human", template), - ] -) - -memory = ConversationBufferMemory(return_messages=True) - -# Chain to query with memory - -sql_chain = ( - RunnablePassthrough.assign( - schema=get_schema, - history=RunnableLambda(lambda x: memory.load_memory_variables(x)["history"]), - ) - | prompt - | llm.bind(stop=["\nSQLResult:"]) - | StrOutputParser() -) - - -def save(input_output): - output = {"output": input_output.pop("output")} - memory.save_context(input_output, output) - return output["output"] - - -sql_response_memory = RunnablePassthrough.assign(output=sql_chain) | save - -# Chain to answer -template = """Based on the table schema below, question, sql query, and sql response, write a natural language response: -{schema} - -Question: {question} -SQL Query: {query} -SQL Response: {response}""" # noqa: E501 -prompt_response = ChatPromptTemplate.from_messages( - [ - ( - "system", - "Given an input question and SQL response, convert it to a natural " - "language answer. No pre-amble.", - ), - ("human", template), - ] -) - - -# Supply the input types to the prompt -class InputType(BaseModel): - question: str - - -chain = ( - RunnablePassthrough.assign(query=sql_response_memory).with_types( - input_type=InputType - ) - | RunnablePassthrough.assign( - schema=get_schema, - response=lambda x: db.run(x["query"]), - ) - | prompt_response - | llm -) diff --git a/templates/sql-ollama/sql_ollama/nba_roster.db b/templates/sql-ollama/sql_ollama/nba_roster.db deleted file mode 100644 index 9ea4e20c367..00000000000 Binary files a/templates/sql-ollama/sql_ollama/nba_roster.db and /dev/null differ diff --git a/templates/sql-ollama/tests/__init__.py b/templates/sql-ollama/tests/__init__.py deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/templates/sql-pgvector/.gitignore b/templates/sql-pgvector/.gitignore deleted file mode 100644 index bee8a64b79a..00000000000 --- a/templates/sql-pgvector/.gitignore +++ /dev/null @@ -1 +0,0 @@ -__pycache__ diff --git a/templates/sql-pgvector/LICENSE b/templates/sql-pgvector/LICENSE deleted file mode 100644 index 426b6509034..00000000000 --- a/templates/sql-pgvector/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -MIT License - -Copyright (c) 2023 LangChain, Inc. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/templates/sql-pgvector/README.md b/templates/sql-pgvector/README.md deleted file mode 100644 index 584367c1dbb..00000000000 --- a/templates/sql-pgvector/README.md +++ /dev/null @@ -1,105 +0,0 @@ -# SQL - Postgres + pgvector - -This template enables user to use `pgvector` for combining `PostgreSQL` with semantic search / RAG. - -It uses [PGVector](https://github.com/pgvector/pgvector) extension as shown in the [RAG empowered SQL cookbook](https://github.com/langchain-ai/langchain/blob/master/cookbook/retrieval_in_sql.ipynb) - -## Environment Setup - -If you are using `ChatOpenAI` as your LLM, make sure the `OPENAI_API_KEY` is set in your environment. You can change both the LLM and embeddings model inside `chain.py` - -And you can configure the following environment variables -for use by the template (defaults are in parentheses) - -- `POSTGRES_USER` (postgres) -- `POSTGRES_PASSWORD` (test) -- `POSTGRES_DB` (vectordb) -- `POSTGRES_HOST` (localhost) -- `POSTGRES_PORT` (5432) - -If you don't have a postgres instance, you can run one locally in docker: - -```bash -docker run \ - --name some-postgres \ - -e POSTGRES_PASSWORD=test \ - -e POSTGRES_USER=postgres \ - -e POSTGRES_DB=vectordb \ - -p 5432:5432 \ - postgres:16 -``` - -And to start again later, use the `--name` defined above: -```bash -docker start some-postgres -``` - -### PostgreSQL Database setup - -Apart from having `pgvector` extension enabled, you will need to do some setup before being able to run semantic search within your SQL queries. - -In order to run RAG over your PostgreSQL database you will need to generate the embeddings for the specific columns you want. - -This process is covered in the [RAG empowered SQL cookbook](https://github.com/langchain-ai/langchain/blob/master/cookbook/retrieval_in_sql.ipynb), but the overall approach consist of: -1. Querying for unique values in the column -2. Generating embeddings for those values -3. Store the embeddings in a separate column or in an auxiliary table. - -## Usage - -To use this package, you should first have the LangChain CLI installed: - -```shell -pip install -U langchain-cli -``` - -To create a new LangChain project and install this as the only package, you can do: - -```shell -langchain app new my-app --package sql-pgvector -``` - -If you want to add this to an existing project, you can just run: - -```shell -langchain app add sql-pgvector -``` - -And add the following code to your `server.py` file: -```python -from sql_pgvector import chain as sql_pgvector_chain - -add_routes(app, sql_pgvector_chain, path="/sql-pgvector") -``` - -(Optional) Let's now configure LangSmith. -LangSmith will help us trace, monitor and debug LangChain applications. -You can sign up for LangSmith [here](https://smith.langchain.com/). -If you don't have access, you can skip this section - - -```shell -export LANGCHAIN_TRACING_V2=true -export LANGCHAIN_API_KEY= -export LANGCHAIN_PROJECT= # if not specified, defaults to "default" -``` - -If you are inside this directory, then you can spin up a LangServe instance directly by: - -```shell -langchain serve -``` - -This will start the FastAPI app with a server is running locally at -[http://localhost:8000](http://localhost:8000) - -We can see all templates at [http://127.0.0.1:8000/docs](http://127.0.0.1:8000/docs) -We can access the playground at [http://127.0.0.1:8000/sql-pgvector/playground](http://127.0.0.1:8000/sql-pgvector/playground) - -We can access the template from code with: - -```python -from langserve.client import RemoteRunnable - -runnable = RemoteRunnable("http://localhost:8000/sql-pgvector") -``` diff --git a/templates/sql-pgvector/pyproject.toml b/templates/sql-pgvector/pyproject.toml deleted file mode 100644 index 3fb878df6c3..00000000000 --- a/templates/sql-pgvector/pyproject.toml +++ /dev/null @@ -1,32 +0,0 @@ -[tool.poetry] -name = "sql-pgvector" -version = "0.0.1" -description = "Use pgvector for combining postgreSQL with semantic search / RAG" -authors = [] -readme = "README.md" - -[tool.poetry.dependencies] -python = ">=3.8.1,<4.0" -langchain = "^0.1" -openai = "<2" -psycopg2 = "^2.9.9" -tiktoken = "^0.5.1" - -[tool.poetry.group.dev.dependencies] -langchain-cli = ">=0.0.21" -fastapi = ">=0.104.0,<1" -sse-starlette = "^1.6.5" - -[tool.langserve] -export_module = "sql_pgvector" -export_attr = "chain" - -[tool.templates-hub] -use-case = "sql" -author = "LangChain" -integrations = ["OpenAI"] -tags = ["sql"] - -[build-system] -requires = ["poetry-core"] -build-backend = "poetry.core.masonry.api" diff --git a/templates/sql-pgvector/sql_pgvector/__init__.py b/templates/sql-pgvector/sql_pgvector/__init__.py deleted file mode 100644 index de9a683ba0d..00000000000 --- a/templates/sql-pgvector/sql_pgvector/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from sql_pgvector.chain import chain - -__all__ = ["chain"] diff --git a/templates/sql-pgvector/sql_pgvector/chain.py b/templates/sql-pgvector/sql_pgvector/chain.py deleted file mode 100644 index cbc49716ad3..00000000000 --- a/templates/sql-pgvector/sql_pgvector/chain.py +++ /dev/null @@ -1,118 +0,0 @@ -import os -import re - -from langchain.sql_database import SQLDatabase -from langchain_community.chat_models import ChatOpenAI -from langchain_community.embeddings import OpenAIEmbeddings -from langchain_core.output_parsers import StrOutputParser -from langchain_core.prompts import ChatPromptTemplate -from langchain_core.pydantic_v1 import BaseModel -from langchain_core.runnables import RunnableLambda, RunnablePassthrough - -from sql_pgvector.prompt_templates import final_template, postgresql_template - -""" -IMPORTANT: For using this template, you will need to -follow the setup steps in the readme file -""" - -if os.environ.get("OPENAI_API_KEY", None) is None: - raise Exception("Missing `OPENAI_API_KEY` environment variable") - -postgres_user = os.environ.get("POSTGRES_USER", "postgres") -postgres_password = os.environ.get("POSTGRES_PASSWORD", "test") -postgres_db = os.environ.get("POSTGRES_DB", "vectordb") -postgres_host = os.environ.get("POSTGRES_HOST", "localhost") -postgres_port = os.environ.get("POSTGRES_PORT", "5432") - -# Connect to DB -# Replace with your own -CONNECTION_STRING = ( - f"postgresql+psycopg2://{postgres_user}:{postgres_password}" - f"@{postgres_host}:{postgres_port}/{postgres_db}" -) -db = SQLDatabase.from_uri(CONNECTION_STRING) - -# Choose LLM and embeddings model -llm = ChatOpenAI(temperature=0) -embeddings_model = OpenAIEmbeddings() - - -# # Ingest code - you will need to run this the first time -# # Insert your query e.g. "SELECT Name FROM Track" -# column_to_embed = db.run('replace-with-your-own-select-query') -# column_values = [s[0] for s in eval(column_to_embed)] -# embeddings = embeddings_model.embed_documents(column_values) - -# for i in range(len(embeddings)): -# value = column_values[i].replace("'", "''") -# embedding = embeddings[i] - -# # Replace with your own SQL command for your column and table. -# sql_command = ( -# f'UPDATE "Track" SET "embeddings" = ARRAY{embedding} WHERE "Name" =' -# + f"'{value}'" -# ) -# db.run(sql_command) - - -# ----------------- -# Define functions -# ----------------- -def get_schema(_): - return db.get_table_info() - - -def run_query(query): - return db.run(query) - - -def replace_brackets(match): - words_inside_brackets = match.group(1).split(", ") - embedded_words = [ - str(embeddings_model.embed_query(word)) for word in words_inside_brackets - ] - return "', '".join(embedded_words) - - -def get_query(query): - sql_query = re.sub(r"\[([\w\s,]+)\]", replace_brackets, query) - return sql_query - - -# ----------------------- -# Now we create the chain -# ----------------------- - -query_generation_prompt = ChatPromptTemplate.from_messages( - [("system", postgresql_template), ("human", "{question}")] -) - -sql_query_chain = ( - RunnablePassthrough.assign(schema=get_schema) - | query_generation_prompt - | llm.bind(stop=["\nSQLResult:"]) - | StrOutputParser() -) - - -final_prompt = ChatPromptTemplate.from_messages( - [("system", final_template), ("human", "{question}")] -) - -full_chain = ( - RunnablePassthrough.assign(query=sql_query_chain) - | RunnablePassthrough.assign( - schema=get_schema, - response=RunnableLambda(lambda x: db.run(get_query(x["query"]))), - ) - | final_prompt - | llm -) - - -class InputType(BaseModel): - question: str - - -chain = full_chain.with_types(input_type=InputType) diff --git a/templates/sql-pgvector/sql_pgvector/prompt_templates.py b/templates/sql-pgvector/sql_pgvector/prompt_templates.py deleted file mode 100644 index a9718c5d7b9..00000000000 --- a/templates/sql-pgvector/sql_pgvector/prompt_templates.py +++ /dev/null @@ -1,50 +0,0 @@ -postgresql_template = ( - "You are a Postgres expert. Given an input question, first create a " - "syntactically correct Postgres query to run, then look at the results " - "of the query and return the answer to the input question.\n" - "Unless the user specifies in the question a specific number of " - "examples to obtain, query for at most 5 results using the LIMIT clause " - "as per Postgres. You can order the results to return the most " - "informative data in the database.\n" - "Never query for all columns from a table. You must query only the " - "columns that are needed to answer the question. Wrap each column name " - 'in double quotes (") to denote them as delimited identifiers.\n' - "Pay attention to use only the column names you can see in the tables " - "below. Be careful to not query for columns that do not exist. Also, " - "pay attention to which column is in which table.\n" - "Pay attention to use date('now') function to get the current date, " - 'if the question involves "today".\n\n' - "You can use an extra extension which allows you to run semantic " - "similarity using <-> operator on tables containing columns named " - '"embeddings".\n' - "<-> operator can ONLY be used on embeddings vector columns.\n" - "The embeddings value for a given row typically represents the semantic " - "meaning of that row.\n" - "The vector represents an embedding representation of the question, " - "given below. \n" - "Do NOT fill in the vector values directly, but rather specify a " - "`[search_word]` placeholder, which should contain the word that would " - "be embedded for filtering.\n" - "For example, if the user asks for songs about 'the feeling of " - "loneliness' the query could be:\n" - '\'SELECT "[whatever_table_name]"."SongName" FROM ' - '"[whatever_table_name]" ORDER BY "embeddings" <-> \'[loneliness]\' ' - "LIMIT 5'\n\n" - "Use the following format:\n\n" - "Question: \n" - "SQLQuery: \n" - "SQLResult: \n" - "Answer: \n\n" - "Only use the following tables:\n\n" - "{schema}\n" -) - - -final_template = ( - "Based on the table schema below, question, sql query, and sql response, " - "write a natural language response:\n" - "{schema}\n\n" - "Question: {question}\n" - "SQL Query: {query}\n" - "SQL Response: {response}" -) diff --git a/templates/sql-pgvector/tests/__init__.py b/templates/sql-pgvector/tests/__init__.py deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/templates/sql-research-assistant/.gitignore b/templates/sql-research-assistant/.gitignore deleted file mode 100644 index bee8a64b79a..00000000000 --- a/templates/sql-research-assistant/.gitignore +++ /dev/null @@ -1 +0,0 @@ -__pycache__ diff --git a/templates/sql-research-assistant/LICENSE b/templates/sql-research-assistant/LICENSE deleted file mode 100644 index 426b6509034..00000000000 --- a/templates/sql-research-assistant/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -MIT License - -Copyright (c) 2023 LangChain, Inc. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/templates/sql-research-assistant/README.md b/templates/sql-research-assistant/README.md deleted file mode 100644 index 359f04e4ac5..00000000000 --- a/templates/sql-research-assistant/README.md +++ /dev/null @@ -1,68 +0,0 @@ -# SQL - Research assistant - -This package does research over a SQL database - -## Usage - -This package relies on multiple models, which have the following dependencies: - -- OpenAI: set the `OPENAI_API_KEY` environment variables -- Ollama: [install and run Ollama](https://python.langchain.com/docs/integrations/chat/ollama) -- llama2 (on Ollama): `ollama pull llama2` (otherwise you will get 404 errors from Ollama) - -To use this package, you should first have the LangChain CLI installed: - -```shell -pip install -U langchain-cli -``` - -To create a new LangChain project and install this as the only package, you can do: - -```shell -langchain app new my-app --package sql-research-assistant -``` - -If you want to add this to an existing project, you can just run: - -```shell -langchain app add sql-research-assistant -``` - -And add the following code to your `server.py` file: -```python -from sql_research_assistant import chain as sql_research_assistant_chain - -add_routes(app, sql_research_assistant_chain, path="/sql-research-assistant") -``` - -(Optional) Let's now configure LangSmith. -LangSmith will help us trace, monitor and debug LangChain applications. -You can sign up for LangSmith [here](https://smith.langchain.com/). -If you don't have access, you can skip this section - - -```shell -export LANGCHAIN_TRACING_V2=true -export LANGCHAIN_API_KEY= -export LANGCHAIN_PROJECT= # if not specified, defaults to "default" -``` - -If you are inside this directory, then you can spin up a LangServe instance directly by: - -```shell -langchain serve -``` - -This will start the FastAPI app with a server is running locally at -[http://localhost:8000](http://localhost:8000) - -We can see all templates at [http://127.0.0.1:8000/docs](http://127.0.0.1:8000/docs) -We can access the playground at [http://127.0.0.1:8000/sql-research-assistant/playground](http://127.0.0.1:8000/sql-research-assistant/playground) - -We can access the template from code with: - -```python -from langserve.client import RemoteRunnable - -runnable = RemoteRunnable("http://localhost:8000/sql-research-assistant") -``` \ No newline at end of file diff --git a/templates/sql-research-assistant/pyproject.toml b/templates/sql-research-assistant/pyproject.toml deleted file mode 100644 index 76ceda06513..00000000000 --- a/templates/sql-research-assistant/pyproject.toml +++ /dev/null @@ -1,32 +0,0 @@ -[tool.poetry] -name = "sql-research-assistant" -version = "0.0.1" -description = "Uses GPT Researcher as a research agent over SQL" -authors = [] -readme = "README.md" - -[tool.poetry.dependencies] -python = ">=3.8.1,<4.0" -langchain = "^0.1" -openai = "^0.28.1" -bs4 = "^0.0.1" -duckduckgo-search = "^4.1.0" - -[tool.poetry.group.dev.dependencies] -langchain-cli = ">=0.0.21" -fastapi = ">=0.104.0,<1" -sse-starlette = "^1.6.5" - -[tool.langserve] -export_module = "sql_research_assistant" -export_attr = "chain" - -[tool.templates-hub] -use-case = "research" -author = "LangChain" -integrations = ["Ollama", "OpenAI"] -tags = ["data", "agents", "sql", "local-models"] - -[build-system] -requires = ["poetry-core"] -build-backend = "poetry.core.masonry.api" diff --git a/templates/sql-research-assistant/sql_research_assistant/__init__.py b/templates/sql-research-assistant/sql_research_assistant/__init__.py deleted file mode 100644 index 2f4d63e1fd3..00000000000 --- a/templates/sql-research-assistant/sql_research_assistant/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from sql_research_assistant.chain import chain - -__all__ = ["chain"] diff --git a/templates/sql-research-assistant/sql_research_assistant/chain.py b/templates/sql-research-assistant/sql_research_assistant/chain.py deleted file mode 100644 index d04e14efd8b..00000000000 --- a/templates/sql-research-assistant/sql_research_assistant/chain.py +++ /dev/null @@ -1,22 +0,0 @@ -from langchain.pydantic_v1 import BaseModel -from langchain_core.runnables import RunnablePassthrough - -from sql_research_assistant.search.web import chain as search_chain -from sql_research_assistant.writer import chain as writer_chain - -chain_notypes = ( - RunnablePassthrough().assign(research_summary=search_chain) | writer_chain -) - - -class InputType(BaseModel): - question: str - - -chain = chain_notypes.with_types(input_type=InputType) - - -if __name__ == "__main__": - print( - chain.invoke({"question": "who is typically older: point guards or centers?"}) - ) diff --git a/templates/sql-research-assistant/sql_research_assistant/search/__init__.py b/templates/sql-research-assistant/sql_research_assistant/search/__init__.py deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/templates/sql-research-assistant/sql_research_assistant/search/nba_roster.db b/templates/sql-research-assistant/sql_research_assistant/search/nba_roster.db deleted file mode 100644 index 9ea4e20c367..00000000000 Binary files a/templates/sql-research-assistant/sql_research_assistant/search/nba_roster.db and /dev/null differ diff --git a/templates/sql-research-assistant/sql_research_assistant/search/sql.py b/templates/sql-research-assistant/sql_research_assistant/search/sql.py deleted file mode 100644 index 28ebcfa4981..00000000000 --- a/templates/sql-research-assistant/sql_research_assistant/search/sql.py +++ /dev/null @@ -1,93 +0,0 @@ -from pathlib import Path - -from langchain.memory import ConversationBufferMemory -from langchain.pydantic_v1 import BaseModel -from langchain_community.chat_models import ChatOllama, ChatOpenAI -from langchain_community.utilities import SQLDatabase -from langchain_core.output_parsers import StrOutputParser -from langchain_core.prompts import ChatPromptTemplate -from langchain_core.runnables import RunnablePassthrough - -# Add the LLM downloaded from Ollama -ollama_llm = "llama2" -llm = ChatOllama(model=ollama_llm) - - -db_path = Path(__file__).parent / "nba_roster.db" -rel = db_path.relative_to(Path.cwd()) -db_string = f"sqlite:///{rel}" -db = SQLDatabase.from_uri(db_string, sample_rows_in_table_info=2) - - -def get_schema(_): - return db.get_table_info() - - -def run_query(query): - return db.run(query) - - -# Prompt - -template = """Based on the table schema below, write a SQL query that would answer the user's question: -{schema} - -Question: {question} -SQL Query:""" # noqa: E501 -prompt = ChatPromptTemplate.from_messages( - [ - ("system", "Given an input question, convert it to a SQL query. No pre-amble."), - ("human", template), - ] -) - -memory = ConversationBufferMemory(return_messages=True) - -# Chain to query with memory - -sql_chain = ( - RunnablePassthrough.assign( - schema=get_schema, - ) - | prompt - | llm.bind(stop=["\nSQLResult:"]) - | StrOutputParser() - | (lambda x: x.split("\n\n")[0]) -) - - -# Chain to answer -template = """Based on the table schema below, question, sql query, and sql response, write a natural language response: -{schema} - -Question: {question} -SQL Query: {query} -SQL Response: {response}""" # noqa: E501 -prompt_response = ChatPromptTemplate.from_messages( - [ - ( - "system", - "Given an input question and SQL response, convert it to a natural " - "language answer. No pre-amble.", - ), - ("human", template), - ] -) - - -# Supply the input types to the prompt -class InputType(BaseModel): - question: str - - -sql_answer_chain = ( - RunnablePassthrough.assign(query=sql_chain).with_types(input_type=InputType) - | RunnablePassthrough.assign( - schema=get_schema, - response=lambda x: db.run(x["query"]), - ) - | RunnablePassthrough.assign( - answer=prompt_response | ChatOpenAI() | StrOutputParser() - ) - | (lambda x: f"Question: {x['question']}\n\nAnswer: {x['answer']}") -) diff --git a/templates/sql-research-assistant/sql_research_assistant/search/web.py b/templates/sql-research-assistant/sql_research_assistant/search/web.py deleted file mode 100644 index cb8b8aa41aa..00000000000 --- a/templates/sql-research-assistant/sql_research_assistant/search/web.py +++ /dev/null @@ -1,151 +0,0 @@ -import json -from typing import Any - -import requests -from bs4 import BeautifulSoup -from langchain_community.chat_models import ChatOpenAI -from langchain_community.utilities import DuckDuckGoSearchAPIWrapper -from langchain_core.messages import SystemMessage -from langchain_core.output_parsers import StrOutputParser -from langchain_core.prompts import ChatPromptTemplate -from langchain_core.runnables import ( - Runnable, - RunnableLambda, - RunnableParallel, - RunnablePassthrough, -) - -from sql_research_assistant.search.sql import sql_answer_chain - -RESULTS_PER_QUESTION = 3 - -ddg_search = DuckDuckGoSearchAPIWrapper() - - -def scrape_text(url: str): - # Send a GET request to the webpage - try: - response = requests.get(url) - - # Check if the request was successful - if response.status_code == 200: - # Parse the content of the request with BeautifulSoup - soup = BeautifulSoup(response.text, "html.parser") - - # Extract all text from the webpage - page_text = soup.get_text(separator=" ", strip=True) - - # Print the extracted text - return page_text - else: - return f"Failed to retrieve the webpage: Status code {response.status_code}" - except Exception as e: - print(e) - return f"Failed to retrieve the webpage: {e}" - - -def web_search(query: str, num_results: int): - results = ddg_search.results(query, num_results) - return [r["link"] for r in results] - - -SEARCH_PROMPT = ChatPromptTemplate.from_messages( - [ - ("system", "{agent_prompt}"), - ( - "user", - "Write 3 google search queries to search online that form an " - "objective opinion from the following: {question}\n" - "You must respond with a list of strings in the following format: " - '["query 1", "query 2", "query 3"].', - ), - ] -) - -AUTO_AGENT_INSTRUCTIONS = """ -This task involves researching a given topic, regardless of its complexity or the availability of a definitive answer. The research is conducted by a specific agent, defined by its type and role, with each agent requiring distinct instructions. -Agent -The agent is determined by the field of the topic and the specific name of the agent that could be utilized to research the topic provided. Agents are categorized by their area of expertise, and each agent type is associated with a corresponding emoji. - -examples: -task: "should I invest in apple stocks?" -response: -{ - "agent": "💰 Finance Agent", - "agent_role_prompt: "You are a seasoned finance analyst AI assistant. Your primary goal is to compose comprehensive, astute, impartial, and methodically arranged financial reports based on provided data and trends." -} -task: "could reselling sneakers become profitable?" -response: -{ - "agent": "📈 Business Analyst Agent", - "agent_role_prompt": "You are an experienced AI business analyst assistant. Your main objective is to produce comprehensive, insightful, impartial, and systematically structured business reports based on provided business data, market trends, and strategic analysis." -} -task: "what are the most interesting sites in Tel Aviv?" -response: -{ - "agent: "🌍 Travel Agent", - "agent_role_prompt": "You are a world-travelled AI tour guide assistant. Your main purpose is to draft engaging, insightful, unbiased, and well-structured travel reports on given locations, including history, attractions, and cultural insights." -} -""" # noqa: E501 -CHOOSE_AGENT_PROMPT = ChatPromptTemplate.from_messages( - [SystemMessage(content=AUTO_AGENT_INSTRUCTIONS), ("user", "task: {task}")] -) - -SUMMARY_TEMPLATE = """{text} - ------------ - -Using the above text, answer in short the following question: - -> {question} - ------------ -if the question cannot be answered using the text, imply summarize the text. Include all factual information, numbers, stats etc if available.""" # noqa: E501 -SUMMARY_PROMPT = ChatPromptTemplate.from_template(SUMMARY_TEMPLATE) - -scrape_and_summarize: Runnable[Any, Any] = ( - RunnableParallel( - { - "question": lambda x: x["question"], - "text": lambda x: scrape_text(x["url"])[:10000], - "url": lambda x: x["url"], - } - ) - | RunnableParallel( - { - "summary": SUMMARY_PROMPT | ChatOpenAI(temperature=0) | StrOutputParser(), - "url": lambda x: x["url"], - } - ) - | RunnableLambda(lambda x: f"Source Url: {x['url']}\nSummary: {x['summary']}") -) - - -def load_json(s): - try: - return json.loads(s) - except Exception: - return {} - - -search_query = SEARCH_PROMPT | ChatOpenAI(temperature=0) | StrOutputParser() | load_json -choose_agent = ( - CHOOSE_AGENT_PROMPT | ChatOpenAI(temperature=0) | StrOutputParser() | load_json -) - -get_search_queries = ( - RunnablePassthrough().assign( - agent_prompt=RunnableParallel({"task": lambda x: x}) - | choose_agent - | (lambda x: x.get("agent_role_prompt")) - ) - | search_query -) - - -chain = ( - get_search_queries - | (lambda x: [{"question": q} for q in x]) - | sql_answer_chain.map() - | (lambda x: "\n\n".join(x)) -) diff --git a/templates/sql-research-assistant/sql_research_assistant/writer.py b/templates/sql-research-assistant/sql_research_assistant/writer.py deleted file mode 100644 index 3ec60a2dd98..00000000000 --- a/templates/sql-research-assistant/sql_research_assistant/writer.py +++ /dev/null @@ -1,75 +0,0 @@ -from langchain_community.chat_models import ChatOpenAI -from langchain_core.output_parsers import StrOutputParser -from langchain_core.prompts import ChatPromptTemplate -from langchain_core.runnables import ConfigurableField - -WRITER_SYSTEM_PROMPT = "You are an AI critical thinker research assistant. Your sole purpose is to write well written, critically acclaimed, objective and structured reports on given text." # noqa: E501 - - -# Report prompts from https://github.com/assafelovic/gpt-researcher/blob/master/gpt_researcher/master/prompts.py -RESEARCH_REPORT_TEMPLATE = """Information: --------- -{research_summary} --------- - -Using the above information, answer the following question or topic: "{question}" in a detailed report -- \ -The report should focus on the answer to the question, should be well structured, informative, \ -in depth, with facts and numbers if available and a minimum of 1,200 words. - -You should strive to write the report as long as you can using all relevant and necessary information provided. -You must write the report with markdown syntax. -You MUST determine your own concrete and valid opinion based on the given information. Do NOT deter to general and meaningless conclusions. -Write all used source urls at the end of the report, and make sure to not add duplicated sources, but only one reference for each. -You must write the report in apa format. -Please do your best, this is very important to my career.""" # noqa: E501 - - -RESOURCE_REPORT_TEMPLATE = """Information: --------- -{research_summary} --------- - -Based on the above information, generate a bibliography recommendation report for the following question or topic: "{question}". \ -The report should provide a detailed analysis of each recommended resource, explaining how each source can contribute to finding answers to the research question. \ -Focus on the relevance, reliability, and significance of each source. \ -Ensure that the report is well-structured, informative, in-depth, and follows Markdown syntax. \ -Include relevant facts, figures, and numbers whenever available. \ -The report should have a minimum length of 1,200 words. - -Please do your best, this is very important to my career.""" # noqa: E501 - -OUTLINE_REPORT_TEMPLATE = """Information: --------- -{research_summary} --------- - -Using the above information, generate an outline for a research report in Markdown syntax for the following question or topic: "{question}". \ -The outline should provide a well-structured framework for the research report, including the main sections, subsections, and key points to be covered. \ -The research report should be detailed, informative, in-depth, and a minimum of 1,200 words. \ -Use appropriate Markdown syntax to format the outline and ensure readability. - -Please do your best, this is very important to my career.""" # noqa: E501 - -model = ChatOpenAI(temperature=0) -prompt = ChatPromptTemplate.from_messages( - [ - ("system", WRITER_SYSTEM_PROMPT), - ("user", RESEARCH_REPORT_TEMPLATE), - ] -).configurable_alternatives( - ConfigurableField("report_type"), - default_key="research_report", - resource_report=ChatPromptTemplate.from_messages( - [ - ("system", WRITER_SYSTEM_PROMPT), - ("user", RESOURCE_REPORT_TEMPLATE), - ] - ), - outline_report=ChatPromptTemplate.from_messages( - [ - ("system", WRITER_SYSTEM_PROMPT), - ("user", OUTLINE_REPORT_TEMPLATE), - ] - ), -) -chain = prompt | model | StrOutputParser() diff --git a/templates/sql-research-assistant/tests/__init__.py b/templates/sql-research-assistant/tests/__init__.py deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/templates/stepback-qa-prompting/README.md b/templates/stepback-qa-prompting/README.md deleted file mode 100644 index 30f00147965..00000000000 --- a/templates/stepback-qa-prompting/README.md +++ /dev/null @@ -1,71 +0,0 @@ -# Step-Back Question-Answering - -This template replicates the "Step-Back" prompting technique that improves performance on complex questions by first asking a "step back" question. - -This technique can be combined with regular question-answering applications by doing retrieval on both the original and step-back question. - -Read more about this in the [Take a Step Back: Evoking Reasoning via Abstraction in Large Language Models](https://arxiv.org/abs/2310.06117) paper and an excellent blog post by Cobus Greyling [here](https://cobusgreyling.medium.com/a-new-prompt-engineering-technique-has-been-introduced-called-step-back-prompting-b00e8954cacb) - -We will modify the prompts slightly to work better with chat models in this template. - -## Environment Setup - -Set the `OPENAI_API_KEY` environment variable to access the OpenAI models. - -## Usage - -To use this package, you should first have the LangChain CLI installed: - -```shell -pip install -U langchain-cli -``` - -To create a new LangChain project and install this as the only package, you can do: - -```shell -langchain app new my-app --package stepback-qa-prompting -``` - -If you want to add this to an existing project, you can just run: - -```shell -langchain app add stepback-qa-prompting -``` - -And add the following code to your `server.py` file: -```python -from stepback_qa_prompting.chain import chain as stepback_qa_prompting_chain - -add_routes(app, stepback_qa_prompting_chain, path="/stepback-qa-prompting") -``` - -(Optional) Let's now configure LangSmith. -LangSmith will help us trace, monitor and debug LangChain applications. -You can sign up for LangSmith [here](https://smith.langchain.com/). -If you don't have access, you can skip this section - -```shell -export LANGCHAIN_TRACING_V2=true -export LANGCHAIN_API_KEY= -export LANGCHAIN_PROJECT= # if not specified, defaults to "default" -``` - -If you are inside this directory, then you can spin up a LangServe instance directly by: - -```shell -langchain serve -``` - -This will start the FastAPI app with a server running locally at -[http://localhost:8000](http://localhost:8000) - -We can see all templates at [http://127.0.0.1:8000/docs](http://127.0.0.1:8000/docs) -We can access the playground at [http://127.0.0.1:8000/stepback-qa-prompting/playground](http://127.0.0.1:8000/stepback-qa-prompting/playground) - -We can access the template from code with: - -```python -from langserve.client import RemoteRunnable - -runnable = RemoteRunnable("http://localhost:8000/stepback-qa-prompting") -``` \ No newline at end of file diff --git a/templates/stepback-qa-prompting/main.py b/templates/stepback-qa-prompting/main.py deleted file mode 100644 index 05e0455b49b..00000000000 --- a/templates/stepback-qa-prompting/main.py +++ /dev/null @@ -1,4 +0,0 @@ -from stepback_qa_prompting.chain import chain - -if __name__ == "__main__": - chain.invoke({"question": "was chatgpt around while trump was president?"}) diff --git a/templates/stepback-qa-prompting/pyproject.toml b/templates/stepback-qa-prompting/pyproject.toml deleted file mode 100644 index cff3f6193cf..00000000000 --- a/templates/stepback-qa-prompting/pyproject.toml +++ /dev/null @@ -1,31 +0,0 @@ -[tool.poetry] -name = "stepback-qa-prompting" -version = "0.0.1" -description = "Use Step-Back prompting to improve retrieval" -authors = [] -readme = "README.md" - -[tool.poetry.dependencies] -python = ">=3.8.1,<4.0" -langchain = "^0.1" -duckduckgo-search = "^3.9.3" -openai = "<2" - -[tool.poetry.group.dev.dependencies] -langchain-cli = ">=0.0.21" - -[tool.langserve] -export_module = "stepback_qa_prompting.chain" -export_attr = "chain" - -[tool.templates-hub] -use-case = "rag" -author = "LangChain" -integrations = ["OpenAI"] -tags = ["paper"] - -[build-system] -requires = [ - "poetry-core", -] -build-backend = "poetry.core.masonry.api" diff --git a/templates/stepback-qa-prompting/stepback_qa_prompting/__init__.py b/templates/stepback-qa-prompting/stepback_qa_prompting/__init__.py deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/templates/stepback-qa-prompting/stepback_qa_prompting/chain.py b/templates/stepback-qa-prompting/stepback_qa_prompting/chain.py deleted file mode 100644 index 66efcf16678..00000000000 --- a/templates/stepback-qa-prompting/stepback_qa_prompting/chain.py +++ /dev/null @@ -1,75 +0,0 @@ -from langchain_community.chat_models import ChatOpenAI -from langchain_community.utilities import DuckDuckGoSearchAPIWrapper -from langchain_core.output_parsers import StrOutputParser -from langchain_core.prompts import ChatPromptTemplate, FewShotChatMessagePromptTemplate -from langchain_core.runnables import RunnableLambda - -search = DuckDuckGoSearchAPIWrapper(max_results=4) - - -def retriever(query): - return search.run(query) - - -# Few Shot Examples -examples = [ - { - "input": "Could the members of The Police perform lawful arrests?", - "output": "what can the members of The Police do?", - }, - { - "input": "Jan Sindel’s was born in what country?", - "output": "what is Jan Sindel’s personal history?", - }, -] -# We now transform these to example messages -example_prompt = ChatPromptTemplate.from_messages( - [ - ("human", "{input}"), - ("ai", "{output}"), - ] -) -few_shot_prompt = FewShotChatMessagePromptTemplate( - example_prompt=example_prompt, - examples=examples, -) - -prompt = ChatPromptTemplate.from_messages( - [ - ( - "system", - "You are an expert at world knowledge. Your task is to step back " - "and paraphrase a question to a more generic step-back question, which " - "is easier to answer. Here are a few examples:", - ), - # Few shot examples - few_shot_prompt, - # New question - ("user", "{question}"), - ] -) - -question_gen = prompt | ChatOpenAI(temperature=0) | StrOutputParser() - -response_prompt_template = """You are an expert of world knowledge. I am going to ask you a question. Your response should be comprehensive and not contradicted with the following context if they are relevant. Otherwise, ignore them if they are not relevant. - -{normal_context} -{step_back_context} - -Original Question: {question} -Answer:""" # noqa: E501 -response_prompt = ChatPromptTemplate.from_template(response_prompt_template) - -chain = ( - { - # Retrieve context using the normal question - "normal_context": RunnableLambda(lambda x: x["question"]) | retriever, - # Retrieve context using the step-back question - "step_back_context": question_gen | retriever, - # Pass on the question - "question": lambda x: x["question"], - } - | response_prompt - | ChatOpenAI(temperature=0) - | StrOutputParser() -) diff --git a/templates/stepback-qa-prompting/tests/__init__.py b/templates/stepback-qa-prompting/tests/__init__.py deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/templates/summarize-anthropic/LICENSE b/templates/summarize-anthropic/LICENSE deleted file mode 100644 index 426b6509034..00000000000 --- a/templates/summarize-anthropic/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -MIT License - -Copyright (c) 2023 LangChain, Inc. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/templates/summarize-anthropic/README.md b/templates/summarize-anthropic/README.md deleted file mode 100644 index b987c89aed1..00000000000 --- a/templates/summarize-anthropic/README.md +++ /dev/null @@ -1,69 +0,0 @@ -# Summarize - Anthropic - -This template uses `Anthropic`'s `claude-3-sonnet-20240229` to summarize long documents. - -It leverages a large context window of 100k tokens, allowing for summarization of documents over 100 pages. - -You can see the summarization prompt in `chain.py`. - -## Environment Setup - -Set the `ANTHROPIC_API_KEY` environment variable to access the Anthropic models. - -## Usage - -To use this package, you should first have the LangChain CLI installed: - -```shell -pip install -U langchain-cli -``` - -To create a new LangChain project and install this as the only package, you can do: - -```shell -langchain app new my-app --package summarize-anthropic -``` - -If you want to add this to an existing project, you can just run: - -```shell -langchain app add summarize-anthropic -``` - -And add the following code to your `server.py` file: -```python -from summarize_anthropic import chain as summarize_anthropic_chain - -add_routes(app, summarize_anthropic_chain, path="/summarize-anthropic") -``` - -(Optional) Let's now configure LangSmith. -LangSmith will help us trace, monitor and debug LangChain applications. -You can sign up for LangSmith [here](https://smith.langchain.com/). -If you don't have access, you can skip this section - -```shell -export LANGCHAIN_TRACING_V2=true -export LANGCHAIN_API_KEY= -export LANGCHAIN_PROJECT= # if not specified, defaults to "default" -``` - -If you are inside this directory, then you can spin up a LangServe instance directly by: - -```shell -langchain serve -``` - -This will start the FastAPI app with a server is running locally at -[http://localhost:8000](http://localhost:8000) - -We can see all templates at [http://127.0.0.1:8000/docs](http://127.0.0.1:8000/docs) -We can access the playground at [http://127.0.0.1:8000/summarize-anthropic/playground](http://127.0.0.1:8000/summarize-anthropic/playground) - -We can access the template from code with: - -```python -from langserve.client import RemoteRunnable - -runnable = RemoteRunnable("http://localhost:8000/summarize-anthropic") -``` diff --git a/templates/summarize-anthropic/docs/LLaVA.pdf b/templates/summarize-anthropic/docs/LLaVA.pdf deleted file mode 100644 index 88da76cecd5..00000000000 Binary files a/templates/summarize-anthropic/docs/LLaVA.pdf and /dev/null differ diff --git a/templates/summarize-anthropic/pyproject.toml b/templates/summarize-anthropic/pyproject.toml deleted file mode 100644 index 6207d8710d9..00000000000 --- a/templates/summarize-anthropic/pyproject.toml +++ /dev/null @@ -1,29 +0,0 @@ -[tool.poetry] -name = "summarize-anthropic" -version = "0.1.0" -description = "This template uses Anthropic's `Claude2` to summarize long documents." -authors = ["Lance Martin "] -readme = "README.md" - -[tool.poetry.dependencies] -python = ">=3.8.1,<4.0" -langchain = "^0.1" -langchainhub = ">=0.1.13" -langchain-anthropic = "^0.1.4" - -[tool.poetry.group.dev.dependencies] -langchain-cli = ">=0.0.21" - -[tool.langserve] -export_module = "summarize_anthropic" -export_attr = "chain" - -[tool.templates-hub] -use-case = "summarization" -author = "LangChain" -integrations = ["Anthropic"] -tags = ["summarization"] - -[build-system] -requires = ["poetry-core"] -build-backend = "poetry.core.masonry.api" diff --git a/templates/summarize-anthropic/summarize_anthropic.ipynb b/templates/summarize-anthropic/summarize_anthropic.ipynb deleted file mode 100644 index 12615ed04bc..00000000000 --- a/templates/summarize-anthropic/summarize_anthropic.ipynb +++ /dev/null @@ -1,129 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "id": "b4ea3722", - "metadata": {}, - "source": [ - "## Document Loading\n", - "\n", - "Load a blog post on agents." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "f4162356-c370-43d7-b34a-4e6af7a1e4c9", - "metadata": {}, - "outputs": [], - "source": [ - "! pip install pdf2image" - ] - }, - { - "cell_type": "markdown", - "id": "6ff363da", - "metadata": {}, - "source": [ - "Load academic papers -" - ] - }, - { - "cell_type": "code", - "execution_count": 7, - "id": "d79e191a", - "metadata": {}, - "outputs": [], - "source": [ - "import arxiv\n", - "from langchain_community.chat_models import ChatAnthropic\n", - "from langchain_community.document_loaders import ArxivLoader, UnstructuredPDFLoader\n", - "\n", - "# Load a paper to use\n", - "paper = next(arxiv.Search(query=\"Visual Instruction Tuning\").results())\n", - "paper.download_pdf(filename=\"downloaded-paper.pdf\")\n", - "loader = UnstructuredPDFLoader(\"downloaded-paper.pdf\")\n", - "doc = loader.load()[0]" - ] - }, - { - "cell_type": "markdown", - "id": "db964a34", - "metadata": {}, - "source": [ - "Or try loading blog posts -" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "id": "b2f10100", - "metadata": {}, - "outputs": [], - "source": [ - "from langchain_community.document_loaders import WebBaseLoader\n", - "\n", - "loader = WebBaseLoader(\"https://lilianweng.github.io/posts/2023-06-23-agent/\")\n", - "text = loader.load()" - ] - }, - { - "cell_type": "markdown", - "id": "361fcf5c", - "metadata": {}, - "source": [ - "## Run template\n", - "\n", - "In `server.py`, set -\n", - "```\n", - "add_routes(app, chain_rag_conv, path=\"/summarize-anthropic\")\n", - "```" - ] - }, - { - "cell_type": "code", - "execution_count": 15, - "id": "3c673b53", - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "\" Here is my attempt to complete the requested tasks:\\n\\n\\nThe scientists made a robot friend that can see pictures and talk about them. First they let a big talking robot make up fun conversations about pictures. Then they taught their robot friend to have conversations too. Their robot friend got pretty good at seeing pictures and talking about them!\\n\\n\\n\\nVisual Instruction Tuning\\nRecipe adapted from Haotian Liu et al.\\n\\nIngredients:\\n- 1 large language model (we used GPT-4)\\n- 158,000 images with text descriptions \\n- 1 vision model (we used CLIP)\\n- 1 large multimodal model (we used LLaMA)\\n\\nInstructions:\\n1. Have the GPT-4 model make up conversations, long descriptions, and complex stories about the images, using only the text descriptions as hints. \\n2. Connect the vision model and language model together into one big multimodal model. This will be your robot friend!\\n3. Show the robot friend examples of the conversations GPT-4 made up.\\n4. Let the robot friend practice having conversations about images, using what it learned from GPT-4.\\n5. Test how well it can see and talk about new images!\\n\\n\\n\\nOh Muse, sing of scientists wise, \\nWho built a robot with ears and eyes, \\nTaught by a speaker most eloquent, \\nThey molded silicon assistant.\\n\\nThis visual machine could see and tell \\nOf images with knowledge it knew well. \\nWhen humans speak in words precise, \\nThe robot answers concise and wise.\\n\\nThough simpler than human true creation, \\nIt showed the path for innovation. \\nMore data, bigger models in time, \\nMay lead to an assistant sublime.\\n\\n\\n\\nOh great, another computer vision model. What is it this time, some overhyped multimodal beast trained on mountains of compute? Bah! Back in my day we built models with only a handful of pixels and a Commodore 64. And they worked just fine, not like these newfangled neural nets that gobble up GPUs like candy.\\n\\nLet's see, they got GPT-4 to hallucinate training data from captions? Well that hardly seems rigorous. Those mechaturkeys will fabricate anything if you let them. And they didn't even collect real human annotations? Training multimodal models is hard enough without using made-up data. \\n\\nI'm sure their LaVa MuDdLe model does decently on narrow tasks, but don't be surprised when it fails catastrophically in the real world. These things may seem clever, but they're as dumb as a bag of rocks under the hood. Mark my words, we should keep them far away from any mission critical systems, or we'll regret it!\\n\\nBah! Call me when we build real intelligence, not these statistical hackjobs.\\n\"" - ] - }, - "execution_count": 15, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "from langserve.client import RemoteRunnable\n", - "\n", - "summarization = RemoteRunnable(\"http://localhost:8000/summarize-anthropic\")\n", - "summarization.invoke({\"text\": doc.page_content})" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.9.16" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/templates/summarize-anthropic/summarize_anthropic/__init__.py b/templates/summarize-anthropic/summarize_anthropic/__init__.py deleted file mode 100644 index 84e4c58fed0..00000000000 --- a/templates/summarize-anthropic/summarize_anthropic/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from summarize_anthropic.chain import chain - -__all__ = ["chain"] diff --git a/templates/summarize-anthropic/summarize_anthropic/chain.py b/templates/summarize-anthropic/summarize_anthropic/chain.py deleted file mode 100644 index c1f493aa01c..00000000000 --- a/templates/summarize-anthropic/summarize_anthropic/chain.py +++ /dev/null @@ -1,8 +0,0 @@ -from langchain import hub -from langchain_anthropic import ChatAnthropic -from langchain_core.output_parsers import StrOutputParser - -# Create chain -prompt = hub.pull("hwchase17/anthropic-paper-qa") -model = ChatAnthropic(model="claude-3-sonnet-20240229", max_tokens=4096) -chain = prompt | model | StrOutputParser() diff --git a/templates/summarize-anthropic/tests/__init__.py b/templates/summarize-anthropic/tests/__init__.py deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/templates/vertexai-chuck-norris/LICENSE b/templates/vertexai-chuck-norris/LICENSE deleted file mode 100644 index 426b6509034..00000000000 --- a/templates/vertexai-chuck-norris/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -MIT License - -Copyright (c) 2023 LangChain, Inc. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/templates/vertexai-chuck-norris/README.md b/templates/vertexai-chuck-norris/README.md deleted file mode 100644 index c894058f948..00000000000 --- a/templates/vertexai-chuck-norris/README.md +++ /dev/null @@ -1,83 +0,0 @@ -# Vertex AI - Chuck Norris - -This template makes jokes about Chuck Norris using `Google Cloud Vertex AI PaLM2`. - -## Environment Setup - -First, make sure you have a Google Cloud project with -an active billing account, and have the [gcloud CLI installed](https://cloud.google.com/sdk/docs/install). - -Configure [application default credentials](https://cloud.google.com/docs/authentication/provide-credentials-adc): - -```shell -gcloud auth application-default login -``` - -To set a default Google Cloud project to use, run this command and set [the project ID](https://support.google.com/googleapi/answer/7014113?hl=en) of the project you want to use: -```shell -gcloud config set project [PROJECT-ID] -``` - -Enable the [Vertex AI API](https://console.cloud.google.com/apis/library/aiplatform.googleapis.com) for the project: -```shell -gcloud services enable aiplatform.googleapis.com -``` - -## Usage - -To use this package, you should first have the LangChain CLI installed: - -```shell -pip install -U langchain-cli -``` - -To create a new LangChain project and install this as the only package, you can do: - -```shell -langchain app new my-app --package pirate-speak -``` - -If you want to add this to an existing project, you can just run: - -```shell -langchain app add vertexai-chuck-norris -``` - -And add the following code to your `server.py` file: -```python -from vertexai_chuck_norris.chain import chain as vertexai_chuck_norris_chain - -add_routes(app, vertexai_chuck_norris_chain, path="/vertexai-chuck-norris") -``` - -(Optional) Let's now configure LangSmith. -LangSmith will help us trace, monitor and debug LangChain applications. -You can sign up for LangSmith [here](https://smith.langchain.com/). -If you don't have access, you can skip this section - - -```shell -export LANGCHAIN_TRACING_V2=true -export LANGCHAIN_API_KEY= -export LANGCHAIN_PROJECT= # if not specified, defaults to "default" -``` - -If you are inside this directory, then you can spin up a LangServe instance directly by: - -```shell -langchain serve -``` - -This will start the FastAPI app with a server is running locally at -[http://localhost:8000](http://localhost:8000) - -We can see all templates at [http://127.0.0.1:8000/docs](http://127.0.0.1:8000/docs) -We can access the playground at [http://127.0.0.1:8000/vertexai-chuck-norris/playground](http://127.0.0.1:8000/vertexai-chuck-norris/playground) - -We can access the template from code with: - -```python -from langserve.client import RemoteRunnable - -runnable = RemoteRunnable("http://localhost:8000/vertexai-chuck-norris") -``` diff --git a/templates/vertexai-chuck-norris/pyproject.toml b/templates/vertexai-chuck-norris/pyproject.toml deleted file mode 100644 index a741e23ab4e..00000000000 --- a/templates/vertexai-chuck-norris/pyproject.toml +++ /dev/null @@ -1,30 +0,0 @@ -[tool.poetry] -name = "vertexai-chuck-norris" -version = "0.0.1" -description = "This template makes jokes about Chuck Norris using Vertex AI PaLM2" -authors = [] -readme = "README.md" - -[tool.poetry.dependencies] -python = ">=3.8.1,<4.0" -langchain = "^0.1" -google-cloud-aiplatform = "^1.36.4" - -[tool.poetry.group.dev.dependencies] -langchain-cli = ">=0.0.21" -fastapi = ">=0.104.0,<1" -sse-starlette = "^1.6.5" - -[tool.langserve] -export_module = "vertexai_chuck_norris.chain" -export_attr = "chain" - -[tool.templates-hub] -use-case = "chatbot" -author = "LangChain" -integrations = ["Google"] -tags = ["conversation"] - -[build-system] -requires = ["poetry-core"] -build-backend = "poetry.core.masonry.api" diff --git a/templates/vertexai-chuck-norris/tests/__init__.py b/templates/vertexai-chuck-norris/tests/__init__.py deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/templates/vertexai-chuck-norris/vertexai_chuck_norris/__init__.py b/templates/vertexai-chuck-norris/vertexai_chuck_norris/__init__.py deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/templates/vertexai-chuck-norris/vertexai_chuck_norris/chain.py b/templates/vertexai-chuck-norris/vertexai_chuck_norris/chain.py deleted file mode 100644 index 13544c12a5e..00000000000 --- a/templates/vertexai-chuck-norris/vertexai_chuck_norris/chain.py +++ /dev/null @@ -1,11 +0,0 @@ -from langchain_community.chat_models import ChatVertexAI -from langchain_core.prompts import ChatPromptTemplate - -_prompt = ChatPromptTemplate.from_template( - "Tell me a joke about Chuck Norris and {text}" -) -_model = ChatVertexAI() - -# if you update this, you MUST also update ../pyproject.toml -# with the new `tool.langserve.export_attr` -chain = _prompt | _model diff --git a/templates/xml-agent/LICENSE b/templates/xml-agent/LICENSE deleted file mode 100644 index 426b6509034..00000000000 --- a/templates/xml-agent/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -MIT License - -Copyright (c) 2023 LangChain, Inc. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/templates/xml-agent/README.md b/templates/xml-agent/README.md deleted file mode 100644 index ccb9b9a456f..00000000000 --- a/templates/xml-agent/README.md +++ /dev/null @@ -1,71 +0,0 @@ -# XML - agent - -This package creates an agent that uses `XML` syntax to communicate -its decisions of what actions to take. -It uses `Anthropic's Claude` models for writing XML syntax and -optionally looks up things on the internet using `DuckDuckGo`. - -## Environment Setup - -Two environment variables need to be set: - -- `ANTHROPIC_API_KEY`: Required for using Anthropic - -## Usage - -To use this package, you should first have the LangChain CLI installed: - -```shell -pip install -U langchain-cli -``` - -To create a new LangChain project and install this as the only package, you can do: - -```shell -langchain app new my-app --package xml-agent -``` - -If you want to add this to an existing project, you can just run: - -```shell -langchain app add xml-agent -``` - -And add the following code to your `server.py` file: -```python -from xml_agent import agent_executor as xml_agent_chain - -add_routes(app, xml_agent_chain, path="/xml-agent") -``` - -(Optional) Let's now configure LangSmith. -LangSmith will help us trace, monitor and debug LangChain applications. -You can sign up for LangSmith [here](https://smith.langchain.com/). -If you don't have access, you can skip this section - - -```shell -export LANGCHAIN_TRACING_V2=true -export LANGCHAIN_API_KEY= -export LANGCHAIN_PROJECT= # if not specified, defaults to "default" -``` - -If you are inside this directory, then you can spin up a LangServe instance directly by: - -```shell -langchain serve -``` - -This will start the FastAPI app with a server is running locally at -[http://localhost:8000](http://localhost:8000) - -We can see all templates at [http://127.0.0.1:8000/docs](http://127.0.0.1:8000/docs) -We can access the playground at [http://127.0.0.1:8000/xml-agent/playground](http://127.0.0.1:8000/xml-agent/playground) - -We can access the template from code with: - -```python -from langserve.client import RemoteRunnable - -runnable = RemoteRunnable("http://localhost:8000/xml-agent") -``` diff --git a/templates/xml-agent/main.py b/templates/xml-agent/main.py deleted file mode 100644 index 02647eb8451..00000000000 --- a/templates/xml-agent/main.py +++ /dev/null @@ -1,5 +0,0 @@ -from xml_agent.agent import agent_executor - -if __name__ == "__main__": - question = "who won the womens world cup in 2023?" - print(agent_executor.invoke({"question": question, "chat_history": []})) diff --git a/templates/xml-agent/pyproject.toml b/templates/xml-agent/pyproject.toml deleted file mode 100644 index 5cb6ab0ce4d..00000000000 --- a/templates/xml-agent/pyproject.toml +++ /dev/null @@ -1,30 +0,0 @@ -[tool.poetry] -name = "xml-agent" -version = "0.1.0" -description = "Agent that uses XML syntax to communicate its decisions of what actions to take" -authors = ["Lance Martin "] -readme = "README.md" - -[tool.poetry.dependencies] -python = ">=3.8.1,<4.0" -langchain = "^0.1" -langchainhub = ">=0.1.13" -duckduckgo-search = "^3.8.3" -langchain-anthropic = "^0.1.4" - -[tool.poetry.group.dev.dependencies] -langchain-cli = ">=0.0.21" - -[tool.langserve] -export_module = "xml_agent" -export_attr = "agent_executor" - -[tool.templates-hub] -use-case = "agent" -author = "LangChain" -integrations = ["Anthropic"] -tags = ["agents"] - -[build-system] -requires = ["poetry-core"] -build-backend = "poetry.core.masonry.api" diff --git a/templates/xml-agent/tests/__init__.py b/templates/xml-agent/tests/__init__.py deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/templates/xml-agent/xml_agent/__init__.py b/templates/xml-agent/xml_agent/__init__.py deleted file mode 100644 index 0ddde78c9db..00000000000 --- a/templates/xml-agent/xml_agent/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from xml_agent.agent import agent_executor - -__all__ = ["agent_executor"] diff --git a/templates/xml-agent/xml_agent/agent.py b/templates/xml-agent/xml_agent/agent.py deleted file mode 100644 index 39627569bfa..00000000000 --- a/templates/xml-agent/xml_agent/agent.py +++ /dev/null @@ -1,53 +0,0 @@ -from typing import List, Tuple - -from langchain.agents import AgentExecutor -from langchain.agents.format_scratchpad import format_xml -from langchain.tools import DuckDuckGoSearchRun -from langchain_anthropic import ChatAnthropic -from langchain_core.messages import AIMessage, HumanMessage -from langchain_core.pydantic_v1 import BaseModel, Field -from langchain_core.tools.render import render_text_description - -from xml_agent.prompts import conversational_prompt, parse_output - - -def _format_chat_history(chat_history: List[Tuple[str, str]]): - buffer = [] - for human, ai in chat_history: - buffer.append(HumanMessage(content=human)) - buffer.append(AIMessage(content=ai)) - return buffer - - -model = ChatAnthropic(model="claude-3-sonnet-20240229") - -tools = [DuckDuckGoSearchRun()] - -prompt = conversational_prompt.partial( - tools=render_text_description(tools), - tool_names=", ".join([t.name for t in tools]), -) -llm_with_stop = model.bind(stop=[""]) - -agent = ( - { - "question": lambda x: x["question"], - "agent_scratchpad": lambda x: format_xml(x["intermediate_steps"]), - "chat_history": lambda x: _format_chat_history(x["chat_history"]), - } - | prompt - | llm_with_stop - | parse_output -) - - -class AgentInput(BaseModel): - question: str - chat_history: List[Tuple[str, str]] = Field(..., extra={"widget": {"type": "chat"}}) - - -agent_executor = AgentExecutor( - agent=agent, tools=tools, verbose=True, handle_parsing_errors=True -).with_types(input_type=AgentInput) - -agent_executor = agent_executor | (lambda x: x["output"]) diff --git a/templates/xml-agent/xml_agent/prompts.py b/templates/xml-agent/xml_agent/prompts.py deleted file mode 100644 index d39e0908858..00000000000 --- a/templates/xml-agent/xml_agent/prompts.py +++ /dev/null @@ -1,52 +0,0 @@ -from langchain_core.agents import AgentAction, AgentFinish -from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder - -template = """You are a helpful assistant. Help the user answer any questions. - -You have access to the following tools: - -{tools} - -In order to use a tool, you can use and tags. You will then get back a response in the form -For example, if you have a tool called 'search' that could run a google search, in order to search for the weather in SF you would respond: - -searchweather in SF -64 degrees - -When you are done, you can respond as normal to the user. - -Example 1: - -Human: Hi! - -Assistant: Hi! How are you? - -Human: What is the weather in SF? -Assistant: searchweather in SF -64 degrees -It is 64 degress in SF - - -Begin!""" # noqa: E501 - -conversational_prompt = ChatPromptTemplate.from_messages( - [ - ("system", template), - MessagesPlaceholder(variable_name="chat_history"), - ("user", "{question}"), - ("ai", "{agent_scratchpad}"), - ] -) - - -def parse_output(message): - text = message.content - if "" in text: - tool, tool_input = text.split("") - _tool = tool.split("")[1] - _tool_input = tool_input.split("")[1] - if "" in _tool_input: - _tool_input = _tool_input.split("")[0] - return AgentAction(tool=_tool, tool_input=_tool_input, log=text) - else: - return AgentFinish(return_values={"output": text}, log=text)