Compare commits

..

1 Commits

Author SHA1 Message Date
Chester Curme
d55355f00a check for deprecated imports 2024-03-31 10:03:02 -04:00
1346 changed files with 72027 additions and 58937 deletions

View File

@@ -19,7 +19,6 @@ if __name__ == "__main__":
"test": set(),
"extended-test": set(),
}
docs_edited = False
if len(files) == 300:
# max diff length is 300 files - there are likely files missing
@@ -48,17 +47,6 @@ if __name__ == "__main__":
found = True
if found:
dirs_to_run["extended-test"].add(dir_)
elif file.startswith("libs/standard-tests"):
# TODO: update to include all packages that rely on standard-tests (all partner packages)
# note: won't run on external repo partners
dirs_to_run["lint"].add("libs/standard-tests")
dirs_to_run["test"].add("libs/partners/mistralai")
dirs_to_run["test"].add("libs/partners/openai")
dirs_to_run["test"].add("libs/partners/anthropic")
dirs_to_run["test"].add("libs/partners/ai21")
dirs_to_run["test"].add("libs/partners/fireworks")
dirs_to_run["test"].add("libs/partners/groq")
elif file.startswith("libs/cli"):
# todo: add cli makefile
pass
@@ -77,8 +65,6 @@ if __name__ == "__main__":
"an update for this new library!"
)
elif any(file.startswith(p) for p in ["docs/", "templates/", "cookbook/"]):
if file.startswith("docs/"):
docs_edited = True
dirs_to_run["lint"].add(".")
outputs = {
@@ -87,7 +73,6 @@ if __name__ == "__main__":
),
"dirs-to-test": list(dirs_to_run["test"] | dirs_to_run["extended-test"]),
"dirs-to-extended-test": list(dirs_to_run["extended-test"]),
"docs-edited": "true" if docs_edited else "",
}
for key, value in outputs.items():
json_output = json.dumps(value)

View File

@@ -13,16 +13,13 @@ MIN_VERSION_LIBS = [
def get_min_version(version: str) -> str:
# base regex for x.x.x with cases for rc/post/etc
# valid strings: https://peps.python.org/pep-0440/#public-version-identifiers
vstring = r"\d+(?:\.\d+){0,2}(?:(?:a|b|rc|\.post|\.dev)\d+)?"
# case ^x.x.x
_match = re.match(f"^\\^({vstring})$", version)
_match = re.match(r"^\^(\d+(?:\.\d+){0,2})$", version)
if _match:
return _match.group(1)
# case >=x.x.x,<y.y.y
_match = re.match(f"^>=({vstring}),<({vstring})$", version)
_match = re.match(r"^>=(\d+(?:\.\d+){0,2}),<(\d+(?:\.\d+){0,2})$", version)
if _match:
_min = _match.group(1)
_max = _match.group(2)
@@ -30,7 +27,7 @@ def get_min_version(version: str) -> str:
return _min
# case x.x.x
_match = re.match(f"^({vstring})$", version)
_match = re.match(r"^(\d+(?:\.\d+){0,2})$", version)
if _match:
return _match.group(1)
@@ -55,9 +52,6 @@ def get_min_version_from_toml(toml_path: str):
# Get the version string
version_string = dependencies[lib]
if isinstance(version_string, dict):
version_string = version_string["version"]
# Use parse_version to get the minimum supported version from version_string
min_version = get_min_version(version_string)

View File

@@ -58,7 +58,6 @@ jobs:
MISTRAL_API_KEY: ${{ secrets.MISTRAL_API_KEY }}
TOGETHER_API_KEY: ${{ secrets.TOGETHER_API_KEY }}
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
GROQ_API_KEY: ${{ secrets.GROQ_API_KEY }}
NVIDIA_API_KEY: ${{ secrets.NVIDIA_API_KEY }}
GOOGLE_SEARCH_API_KEY: ${{ secrets.GOOGLE_SEARCH_API_KEY }}
GOOGLE_CSE_ID: ${{ secrets.GOOGLE_CSE_ID }}
@@ -78,7 +77,6 @@ jobs:
MONGODB_ATLAS_URI: ${{ secrets.MONGODB_ATLAS_URI }}
VOYAGE_API_KEY: ${{ secrets.VOYAGE_API_KEY }}
COHERE_API_KEY: ${{ secrets.COHERE_API_KEY }}
UPSTAGE_API_KEY: ${{ secrets.UPSTAGE_API_KEY }}
run: |
make integration_tests

View File

@@ -215,7 +215,7 @@ jobs:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} # for airbyte
MONGODB_ATLAS_URI: ${{ secrets.MONGODB_ATLAS_URI }}
VOYAGE_API_KEY: ${{ secrets.VOYAGE_API_KEY }}
UPSTAGE_API_KEY: ${{ secrets.UPSTAGE_API_KEY }}
COHERE_API_KEY: ${{ secrets.COHERE_API_KEY }}
run: make integration_tests
working-directory: ${{ inputs.working-directory }}

View File

@@ -1,59 +0,0 @@
name: release note experiments
run-name: Release note for ${{ inputs.working-directory }} by @${{ github.actor }}
on:
workflow_dispatch:
inputs:
working-directory:
required: true
type: string
default: 'libs/langchain'
env:
PYTHON_VERSION: "3.11"
POETRY_VERSION: "1.7.1"
jobs:
build:
runs-on: ubuntu-latest
outputs:
pkg-name: ${{ steps.check-version.outputs.pkg-name }}
version: ${{ steps.check-version.outputs.version }}
steps:
- uses: actions/checkout@v4
- name: Set up Python + Poetry ${{ env.POETRY_VERSION }}
uses: "./.github/actions/poetry_setup"
with:
python-version: ${{ env.PYTHON_VERSION }}
poetry-version: ${{ env.POETRY_VERSION }}
working-directory: ${{ inputs.working-directory }}
cache-key: release
- name: Check Version
id: check-version
shell: bash
working-directory: ${{ inputs.working-directory }}
run: |
echo pkg-name="$(poetry version | cut -d ' ' -f 1)" >> $GITHUB_OUTPUT
echo version="$(poetry version --short)" >> $GITHUB_OUTPUT
release-notes:
needs:
- build
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Set up Python + Poetry ${{ env.POETRY_VERSION }}
uses: "./.github/actions/poetry_setup"
with:
python-version: ${{ env.PYTHON_VERSION }}
poetry-version: ${{ env.POETRY_VERSION }}
working-directory: ${{ inputs.working-directory }}
cache-key: release
- name: Generate Release Notes
env:
TAG_NAME: ${{ needs.build.outputs.pkg-name }}-v${{ needs.build.outputs.version }}
RELEASE_NAME: ${{ needs.build.outputs.pkg-name }}==${{ needs.build.outputs.version }}
run: |
echo "TAG_NAME=${TAG_NAME}"
echo "RELEASE_NAME=${RELEASE_NAME}"

View File

@@ -36,7 +36,6 @@ jobs:
dirs-to-lint: ${{ steps.set-matrix.outputs.dirs-to-lint }}
dirs-to-test: ${{ steps.set-matrix.outputs.dirs-to-test }}
dirs-to-extended-test: ${{ steps.set-matrix.outputs.dirs-to-extended-test }}
docs-edited: ${{ steps.set-matrix.outputs.docs-edited }}
lint:
name: cd ${{ matrix.working-directory }}
needs: [ build ]
@@ -61,9 +60,9 @@ jobs:
working-directory: ${{ matrix.working-directory }}
secrets: inherit
test-doc-imports:
test_doc_imports:
needs: [ build ]
if: ${{ needs.build.outputs.dirs-to-test != '[]' || needs.build.outputs.docs-edited }}
if: ${{ needs.build.outputs.dirs-to-test != '[]' }}
uses: ./.github/workflows/_test_doc_imports.yml
secrets: inherit
@@ -141,7 +140,7 @@ jobs:
echo "$STATUS" | grep 'nothing to commit, working tree clean'
ci_success:
name: "CI Success"
needs: [build, lint, test, compile-integration-tests, dependencies, extended-tests, test-doc-imports]
needs: [build, lint, test, compile-integration-tests, dependencies, extended-tests]
if: |
always()
runs-on: ubuntu-latest

View File

@@ -10,21 +10,19 @@ env:
jobs:
build:
defaults:
run:
working-directory: libs/langchain
runs-on: ubuntu-latest
environment: Scheduled testing
strategy:
matrix:
python-version:
- "3.8"
- "3.9"
- "3.10"
- "3.11"
working-directory:
- "libs/partners/openai"
- "libs/partners/anthropic"
# - "libs/partners/ai21" # standard-tests broken
- "libs/partners/fireworks"
# - "libs/partners/groq" # rate-limited
- "libs/partners/mistralai"
# - "libs/partners/together" # rate-limited
name: Python ${{ matrix.python-version }} - ${{ matrix.working-directory }}
name: Python ${{ matrix.python-version }}
steps:
- uses: actions/checkout@v4
@@ -33,7 +31,7 @@ jobs:
with:
python-version: ${{ matrix.python-version }}
poetry-version: ${{ env.POETRY_VERSION }}
working-directory: ${{ matrix.working-directory }}
working-directory: libs/langchain
cache-key: scheduled
- name: 'Authenticate to Google Cloud'
@@ -42,15 +40,26 @@ jobs:
with:
credentials_json: '${{ secrets.GOOGLE_CREDENTIALS }}'
- name: Configure AWS Credentials
uses: aws-actions/configure-aws-credentials@v4
with:
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
aws-region: ${{ vars.AWS_REGION }}
- name: Install dependencies
working-directory: ${{ matrix.working-directory }}
working-directory: libs/langchain
shell: bash
run: |
echo "Running scheduled tests, installing dependencies with poetry..."
poetry install --with=test_integration,test
- name: Run integration tests
working-directory: ${{ matrix.working-directory }}
- name: Install deps outside pyproject
if: ${{ startsWith(inputs.working-directory, 'libs/community/') }}
shell: bash
run: poetry run pip install "boto3<2" "google-cloud-aiplatform<2"
- name: Run tests
shell: bash
env:
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
@@ -61,16 +70,11 @@ jobs:
AZURE_OPENAI_CHAT_DEPLOYMENT_NAME: ${{ secrets.AZURE_OPENAI_CHAT_DEPLOYMENT_NAME }}
AZURE_OPENAI_LLM_DEPLOYMENT_NAME: ${{ secrets.AZURE_OPENAI_LLM_DEPLOYMENT_NAME }}
AZURE_OPENAI_EMBEDDINGS_DEPLOYMENT_NAME: ${{ secrets.AZURE_OPENAI_EMBEDDINGS_DEPLOYMENT_NAME }}
AI21_API_KEY: ${{ secrets.AI21_API_KEY }}
FIREWORKS_API_KEY: ${{ secrets.FIREWORKS_API_KEY }}
GROQ_API_KEY: ${{ secrets.GROQ_API_KEY }}
MISTRAL_API_KEY: ${{ secrets.MISTRAL_API_KEY }}
TOGETHER_API_KEY: ${{ secrets.TOGETHER_API_KEY }}
run: |
make integration_test
make scheduled_tests
- name: Ensure the tests did not create any additional files
working-directory: ${{ matrix.working-directory }}
shell: bash
run: |
set -eu

View File

@@ -34,40 +34,34 @@ conda install langchain -c conda-forge
## 🤔 What is LangChain?
**LangChain** is a framework for developing applications powered by large language models (LLMs).
**LangChain** is a framework for developing applications powered by language models. It enables applications that:
- **Are context-aware**: connect a language model to sources of context (prompt instructions, few shot examples, content to ground its response in, etc.)
- **Reason**: rely on a language model to reason (about how to answer based on provided context, what actions to take, etc.)
For these applications, LangChain simplifies the entire application lifecycle:
This framework consists of several parts.
- **LangChain Libraries**: The Python and JavaScript libraries. Contains interfaces and integrations for a myriad of components, a basic run time for combining these components into chains and agents, and off-the-shelf implementations of chains and agents.
- **[LangChain Templates](templates)**: A collection of easily deployable reference architectures for a wide variety of tasks.
- **[LangServe](https://github.com/langchain-ai/langserve)**: A library for deploying LangChain chains as a REST API.
- **[LangSmith](https://smith.langchain.com)**: A developer platform that lets you debug, test, evaluate, and monitor chains built on any LLM framework and seamlessly integrates with LangChain.
- **[LangGraph](https://python.langchain.com/docs/langgraph)**: LangGraph is a library for building stateful, multi-actor applications with LLMs, built on top of (and intended to be used with) LangChain. It extends the LangChain Expression Language with the ability to coordinate multiple chains (or actors) across multiple steps of computation in a cyclic manner.
- **Open-source libraries**: Build your applications using LangChain's [modular building blocks](https://python.langchain.com/docs/expression_language/) and [components](https://python.langchain.com/docs/modules/). Integrate with hundreds of [third-party providers](https://python.langchain.com/docs/integrations/platforms/).
- **Productionization**: Inspect, monitor, and evaluate your apps with [LangSmith](https://python.langchain.com/docs/langsmith/) so that you can constantly optimize and deploy with confidence.
- **Deployment**: Turn any chain into a REST API with [LangServe](https://python.langchain.com/docs/langserve).
### Open-source libraries
- **`langchain-core`**: Base abstractions and LangChain Expression Language.
- **`langchain-community`**: Third party integrations.
- Some integrations have been further split into **partner packages** that only rely on **`langchain-core`**. Examples include **`langchain_openai`** and **`langchain_anthropic`**.
- **`langchain`**: Chains, agents, and retrieval strategies that make up an application's cognitive architecture.
- **[LangGraph](https://python.langchain.com/docs/langgraph)**: A library for building robust and stateful multi-actor applications with LLMs by modeling steps as edges and nodes in a graph.
### Productionization:
- **[LangSmith](https://python.langchain.com/docs/langsmith)**: A developer platform that lets you debug, test, evaluate, and monitor chains built on any LLM framework and seamlessly integrates with LangChain.
### Deployment:
- **[LangServe](https://python.langchain.com/docs/langserve)**: A library for deploying LangChain chains as REST APIs.
The LangChain libraries themselves are made up of several different packages.
- **[`langchain-core`](libs/core)**: Base abstractions and LangChain Expression Language.
- **[`langchain-community`](libs/community)**: Third party integrations.
- **[`langchain`](libs/langchain)**: Chains, agents, and retrieval strategies that make up an application's cognitive architecture.
![Diagram outlining the hierarchical organization of the LangChain framework, displaying the interconnected parts across multiple layers.](docs/static/svg/langchain_stack.svg "LangChain Architecture Overview")
## 🧱 What can you build with LangChain?
**❓ Question answering with RAG**
**❓ Retrieval augmented generation**
- [Documentation](https://python.langchain.com/docs/use_cases/question_answering/)
- End-to-end Example: [Chat LangChain](https://chat.langchain.com) and [repo](https://github.com/langchain-ai/chat-langchain)
**🧱 Extracting structured output**
**💬 Analyzing structured data**
- [Documentation](https://python.langchain.com/docs/use_cases/extraction/)
- End-to-end Example: [SQL Llama2 Template](https://github.com/langchain-ai/langchain-extract/)
- [Documentation](https://python.langchain.com/docs/use_cases/qa_structured/sql)
- End-to-end Example: [SQL Llama2 Template](https://github.com/langchain-ai/langchain/tree/master/templates/sql-llama2)
**🤖 Chatbots**
@@ -78,51 +72,34 @@ And much more! Head to the [Use cases](https://python.langchain.com/docs/use_cas
## 🚀 How does LangChain help?
The main value props of the LangChain libraries are:
1. **Components**: composable building blocks, tools and integrations for working with language models. Components are modular and easy-to-use, whether you are using the rest of the LangChain framework or not
1. **Components**: composable tools and integrations for working with language models. Components are modular and easy-to-use, whether you are using the rest of the LangChain framework or not
2. **Off-the-shelf chains**: built-in assemblages of components for accomplishing higher-level tasks
Off-the-shelf chains make it easy to get started. Components make it easy to customize existing chains and build new ones.
## LangChain Expression Language (LCEL)
LCEL is the foundation of many of LangChain's components, and is a declarative way to compose chains. LCEL was designed from day 1 to support putting prototypes in production, with no code changes, from the simplest “prompt + LLM” chain to the most complex chains.
- **[Overview](https://python.langchain.com/docs/expression_language/)**: LCEL and its benefits
- **[Interface](https://python.langchain.com/docs/expression_language/interface)**: The standard interface for LCEL objects
- **[Primitives](https://python.langchain.com/docs/expression_language/primitives)**: More on the primitives LCEL includes
## Components
Components fall into the following **modules**:
**📃 Model I/O:**
This includes [prompt management](https://python.langchain.com/docs/modules/model_io/prompts/), [prompt optimization](https://python.langchain.com/docs/modules/model_io/prompts/example_selectors/), a generic interface for [chat models](https://python.langchain.com/docs/modules/model_io/chat/) and [LLMs](https://python.langchain.com/docs/modules/model_io/llms/), and common utilities for working with [model outputs](https://python.langchain.com/docs/modules/model_io/output_parsers/).
This includes prompt management, prompt optimization, a generic interface for all LLMs, and common utilities for working with LLMs.
**📚 Retrieval:**
Retrieval Augmented Generation involves [loading data](https://python.langchain.com/docs/modules/data_connection/document_loaders/) from a variety of sources, [preparing it](https://python.langchain.com/docs/modules/data_connection/document_loaders/), [then retrieving it](https://python.langchain.com/docs/modules/data_connection/retrievers/) for use in the generation step.
Data Augmented Generation involves specific types of chains that first interact with an external data source to fetch data for use in the generation step. Examples include summarization of long pieces of text and question/answering over specific data sources.
**🤖 Agents:**
Agents allow an LLM autonomy over how a task is accomplished. Agents make decisions about which Actions to take, then take that Action, observe the result, and repeat until the task is complete done. LangChain provides a [standard interface for agents](https://python.langchain.com/docs/modules/agents/), a [selection of agents](https://python.langchain.com/docs/modules/agents/agent_types/) to choose from, and examples of end-to-end agents.
Agents involve an LLM making decisions about which Actions to take, taking that Action, seeing an Observation, and repeating that until done. LangChain provides a standard interface for agents, a selection of agents to choose from, and examples of end-to-end agents.
## 📖 Documentation
Please see [here](https://python.langchain.com) for full documentation, which includes:
- [Getting started](https://python.langchain.com/docs/get_started/introduction): installation, setting up the environment, simple examples
- [Use case](https://python.langchain.com/docs/use_cases/) walkthroughs and best practice [guides](https://python.langchain.com/docs/guides/)
- Overviews of the [interfaces](https://python.langchain.com/docs/expression_language/), [components](https://python.langchain.com/docs/modules/), and [integrations](https://python.langchain.com/docs/integrations/providers)
You can also check out the full [API Reference docs](https://api.python.langchain.com).
## 🌐 Ecosystem
- [🦜🛠️ LangSmith](https://python.langchain.com/docs/langsmith/): Tracing and evaluating your language model applications and intelligent agents to help you move from prototype to production.
- [🦜🕸️ LangGraph](https://python.langchain.com/docs/langgraph): Creating stateful, multi-actor applications with LLMs, built on top of (and intended to be used with) LangChain primitives.
- [🦜🏓 LangServe](https://python.langchain.com/docs/langserve): Deploying LangChain runnables and chains as REST APIs.
- [LangChain Templates](https://python.langchain.com/docs/templates/): Example applications hosted with LangServe.
- Overview of the [interfaces](https://python.langchain.com/docs/expression_language/), [modules](https://python.langchain.com/docs/modules/), and [integrations](https://python.langchain.com/docs/integrations/providers)
- [Use case](https://python.langchain.com/docs/use_cases/qa_structured/sql) walkthroughs and best practice [guides](https://python.langchain.com/docs/guides/adapters/openai)
- [LangSmith](https://python.langchain.com/docs/langsmith/), [LangServe](https://python.langchain.com/docs/langserve), and [LangChain Template](https://python.langchain.com/docs/templates/) overviews
- [Reference](https://api.python.langchain.com): full API docs
## 💁 Contributing

View File

@@ -38,9 +38,9 @@
"\n",
"To run locally, we use Ollama.ai. \n",
"\n",
"See [here](/docs/integrations/chat/ollama) for details on installation and setup.\n",
"See [here](https://python.langchain.com/docs/integrations/chat/ollama) for details on installation and setup.\n",
"\n",
"Also, see [here](/docs/guides/development/local_llms) for our full guide on local LLMs.\n",
"Also, see [here](https://python.langchain.com/docs/guides/local_llms) for our full guide on local LLMs.\n",
" \n",
"To use an external API, which is not private, we can use Replicate."
]

View File

@@ -535,9 +535,9 @@
" print(f\"--Generated {len(all_clusters)} clusters--\")\n",
"\n",
" # Summarization\n",
" template = \"\"\"Here is a sub-set of LangChain Expression Language doc. \n",
" template = \"\"\"Here is a sub-set of LangChain Expression Langauge doc. \n",
" \n",
" LangChain Expression Language provides a way to compose chain in LangChain.\n",
" LangChain Expression Langauge provides a way to compose chain in LangChain.\n",
" \n",
" Give a detailed summary of the documentation provided.\n",
" \n",

View File

@@ -191,15 +191,15 @@
"source": [
"## Multi-vector retriever\n",
"\n",
"Use [multi-vector-retriever](/docs/modules/data_connection/retrievers/multi_vector#summary).\n",
"Use [multi-vector-retriever](https://python.langchain.com/docs/modules/data_connection/retrievers/multi_vector#summary).\n",
"\n",
"Summaries are used to retrieve raw tables and / or raw chunks of text.\n",
"\n",
"### Text and Table summaries\n",
"\n",
"Here, we use Ollama to run LLaMA2 locally. \n",
"Here, we use ollama.ai to run LLaMA2 locally. \n",
"\n",
"See details on installation [here](/docs/guides/development/local_llms)."
"See details on installation [here](https://python.langchain.com/docs/guides/local_llms)."
]
},
{

File diff suppressed because one or more lines are too long

View File

@@ -59,7 +59,7 @@
},
"outputs": [],
"source": [
"llm = ChatOpenAI(model=\"gpt-4\", temperature=1.0)"
"llm = ChatOpenAI(model_name=\"gpt-4\", temperature=1.0)"
]
},
{

View File

@@ -933,7 +933,7 @@
"**Answer**: The LangChain class includes various types of retrievers such as:\n",
"\n",
"- ArxivRetriever\n",
"- AzureAISearchRetriever\n",
"- AzureCognitiveSearchRetriever\n",
"- BM25Retriever\n",
"- ChaindeskRetriever\n",
"- ChatGPTPluginRetriever\n",
@@ -993,7 +993,7 @@
{
"data": {
"text/plain": [
"{'question': 'LangChain possesses a variety of retrievers including:\\n\\n1. ArxivRetriever\\n2. AzureAISearchRetriever\\n3. BM25Retriever\\n4. ChaindeskRetriever\\n5. ChatGPTPluginRetriever\\n6. ContextualCompressionRetriever\\n7. DocArrayRetriever\\n8. ElasticSearchBM25Retriever\\n9. EnsembleRetriever\\n10. GoogleVertexAISearchRetriever\\n11. AmazonKendraRetriever\\n12. KNNRetriever\\n13. LlamaIndexGraphRetriever\\n14. LlamaIndexRetriever\\n15. MergerRetriever\\n16. MetalRetriever\\n17. MilvusRetriever\\n18. MultiQueryRetriever\\n19. ParentDocumentRetriever\\n20. PineconeHybridSearchRetriever\\n21. PubMedRetriever\\n22. RePhraseQueryRetriever\\n23. RemoteLangChainRetriever\\n24. SelfQueryRetriever\\n25. SVMRetriever\\n26. TFIDFRetriever\\n27. TimeWeightedVectorStoreRetriever\\n28. VespaRetriever\\n29. WeaviateHybridSearchRetriever\\n30. WebResearchRetriever\\n31. WikipediaRetriever\\n32. ZepRetriever\\n33. ZillizRetriever\\n\\nIt also includes self query translators like:\\n\\n1. ChromaTranslator\\n2. DeepLakeTranslator\\n3. MyScaleTranslator\\n4. PineconeTranslator\\n5. QdrantTranslator\\n6. WeaviateTranslator\\n\\nAnd remote retrievers like:\\n\\n1. RemoteLangChainRetriever'}"
"{'question': 'LangChain possesses a variety of retrievers including:\\n\\n1. ArxivRetriever\\n2. AzureCognitiveSearchRetriever\\n3. BM25Retriever\\n4. ChaindeskRetriever\\n5. ChatGPTPluginRetriever\\n6. ContextualCompressionRetriever\\n7. DocArrayRetriever\\n8. ElasticSearchBM25Retriever\\n9. EnsembleRetriever\\n10. GoogleVertexAISearchRetriever\\n11. AmazonKendraRetriever\\n12. KNNRetriever\\n13. LlamaIndexGraphRetriever\\n14. LlamaIndexRetriever\\n15. MergerRetriever\\n16. MetalRetriever\\n17. MilvusRetriever\\n18. MultiQueryRetriever\\n19. ParentDocumentRetriever\\n20. PineconeHybridSearchRetriever\\n21. PubMedRetriever\\n22. RePhraseQueryRetriever\\n23. RemoteLangChainRetriever\\n24. SelfQueryRetriever\\n25. SVMRetriever\\n26. TFIDFRetriever\\n27. TimeWeightedVectorStoreRetriever\\n28. VespaRetriever\\n29. WeaviateHybridSearchRetriever\\n30. WebResearchRetriever\\n31. WikipediaRetriever\\n32. ZepRetriever\\n33. ZillizRetriever\\n\\nIt also includes self query translators like:\\n\\n1. ChromaTranslator\\n2. DeepLakeTranslator\\n3. MyScaleTranslator\\n4. PineconeTranslator\\n5. QdrantTranslator\\n6. WeaviateTranslator\\n\\nAnd remote retrievers like:\\n\\n1. RemoteLangChainRetriever'}"
]
},
"execution_count": 31,
@@ -1117,7 +1117,7 @@
"The LangChain class includes various types of retrievers such as:\n",
"\n",
"- ArxivRetriever\n",
"- AzureAISearchRetriever\n",
"- AzureCognitiveSearchRetriever\n",
"- BM25Retriever\n",
"- ChaindeskRetriever\n",
"- ChatGPTPluginRetriever\n",

View File

@@ -84,7 +84,7 @@
"metadata": {},
"outputs": [],
"source": [
"llm = ChatOpenAI(model=\"gpt-4\", temperature=0)\n",
"llm = ChatOpenAI(model_name=\"gpt-4\", temperature=0)\n",
"chain = ElasticsearchDatabaseChain.from_llm(llm=llm, database=db, verbose=True)"
]
},

View File

@@ -229,7 +229,7 @@
" prompt = hub.pull(\"rlm/rag-prompt\")\n",
"\n",
" # LLM\n",
" llm = ChatOpenAI(model=\"gpt-3.5-turbo\", temperature=0, streaming=True)\n",
" llm = ChatOpenAI(model_name=\"gpt-3.5-turbo\", temperature=0, streaming=True)\n",
"\n",
" # Post-processing\n",
" def format_docs(docs):\n",

View File

@@ -236,7 +236,7 @@
" prompt = hub.pull(\"rlm/rag-prompt\")\n",
"\n",
" # LLM\n",
" llm = ChatOpenAI(model=\"gpt-3.5-turbo\", temperature=0)\n",
" llm = ChatOpenAI(model_name=\"gpt-3.5-turbo\", temperature=0)\n",
"\n",
" # Post-processing\n",
" def format_docs(docs):\n",

View File

@@ -1,818 +0,0 @@
{
"cells": [
{
"cell_type": "markdown",
"id": "70b333e6",
"metadata": {},
"source": [
"[![View Article](https://img.shields.io/badge/View%20Article-blue)](https://www.mongodb.com/developer/products/atlas/advanced-rag-langchain-mongodb/)\n"
]
},
{
"cell_type": "markdown",
"id": "d84a72ea",
"metadata": {},
"source": [
"# Adding Semantic Caching and Memory to your RAG Application using MongoDB and LangChain\n",
"\n",
"In this notebook, we will see how to use the new MongoDBCache and MongoDBChatMessageHistory in your RAG application.\n"
]
},
{
"cell_type": "markdown",
"id": "65527202",
"metadata": {},
"source": [
"## Step 1: Install required libraries\n",
"\n",
"- **datasets**: Python library to get access to datasets available on Hugging Face Hub\n",
"\n",
"- **langchain**: Python toolkit for LangChain\n",
"\n",
"- **langchain-mongodb**: Python package to use MongoDB as a vector store, semantic cache, chat history store etc. in LangChain\n",
"\n",
"- **langchain-openai**: Python package to use OpenAI models with LangChain\n",
"\n",
"- **pymongo**: Python toolkit for MongoDB\n",
"\n",
"- **pandas**: Python library for data analysis, exploration, and manipulation"
]
},
{
"cell_type": "code",
"execution_count": 1,
"id": "cbc22fa4",
"metadata": {},
"outputs": [],
"source": [
"! pip install -qU datasets langchain langchain-mongodb langchain-openai pymongo pandas"
]
},
{
"cell_type": "markdown",
"id": "39c41e87",
"metadata": {},
"source": [
"## Step 2: Setup pre-requisites\n",
"\n",
"* Set the MongoDB connection string. Follow the steps [here](https://www.mongodb.com/docs/manual/reference/connection-string/) to get the connection string from the Atlas UI.\n",
"\n",
"* Set the OpenAI API key. Steps to obtain an API key as [here](https://help.openai.com/en/articles/4936850-where-do-i-find-my-openai-api-key)"
]
},
{
"cell_type": "code",
"execution_count": 2,
"id": "b56412ae",
"metadata": {},
"outputs": [],
"source": [
"import getpass"
]
},
{
"cell_type": "code",
"execution_count": 3,
"id": "16a20d7a",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Enter your MongoDB connection string:········\n"
]
}
],
"source": [
"MONGODB_URI = getpass.getpass(\"Enter your MongoDB connection string:\")"
]
},
{
"cell_type": "code",
"execution_count": 4,
"id": "978682d4",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Enter your OpenAI API key:········\n"
]
}
],
"source": [
"OPENAI_API_KEY = getpass.getpass(\"Enter your OpenAI API key:\")"
]
},
{
"cell_type": "code",
"execution_count": 5,
"id": "606081c5",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"········\n"
]
}
],
"source": [
"# Optional-- If you want to enable Langsmith -- good for debugging\n",
"import os\n",
"\n",
"os.environ[\"LANGCHAIN_TRACING_V2\"] = \"true\"\n",
"os.environ[\"LANGCHAIN_API_KEY\"] = getpass.getpass()"
]
},
{
"cell_type": "markdown",
"id": "f6b8302c",
"metadata": {},
"source": [
"## Step 3: Download the dataset\n",
"\n",
"We will be using MongoDB's [embedded_movies](https://huggingface.co/datasets/MongoDB/embedded_movies) dataset"
]
},
{
"cell_type": "code",
"execution_count": 6,
"id": "1a3433a6",
"metadata": {},
"outputs": [],
"source": [
"import pandas as pd\n",
"from datasets import load_dataset"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "aee5311b",
"metadata": {},
"outputs": [],
"source": [
"# Ensure you have an HF_TOKEN in your development enviornment:\n",
"# access tokens can be created or copied from the Hugging Face platform (https://huggingface.co/docs/hub/en/security-tokens)\n",
"\n",
"# Load MongoDB's embedded_movies dataset from Hugging Face\n",
"# https://huggingface.co/datasets/MongoDB/airbnb_embeddings\n",
"\n",
"data = load_dataset(\"MongoDB/embedded_movies\")"
]
},
{
"cell_type": "code",
"execution_count": 8,
"id": "1d630a26",
"metadata": {},
"outputs": [],
"source": [
"df = pd.DataFrame(data[\"train\"])"
]
},
{
"cell_type": "markdown",
"id": "a1f94f43",
"metadata": {},
"source": [
"## Step 4: Data analysis\n",
"\n",
"Make sure length of the dataset is what we expect, drop Nones etc."
]
},
{
"cell_type": "code",
"execution_count": 10,
"id": "b276df71",
"metadata": {},
"outputs": [
{
"data": {
"text/html": [
"<div>\n",
"<style scoped>\n",
" .dataframe tbody tr th:only-of-type {\n",
" vertical-align: middle;\n",
" }\n",
"\n",
" .dataframe tbody tr th {\n",
" vertical-align: top;\n",
" }\n",
"\n",
" .dataframe thead th {\n",
" text-align: right;\n",
" }\n",
"</style>\n",
"<table border=\"1\" class=\"dataframe\">\n",
" <thead>\n",
" <tr style=\"text-align: right;\">\n",
" <th></th>\n",
" <th>fullplot</th>\n",
" <th>type</th>\n",
" <th>plot_embedding</th>\n",
" <th>num_mflix_comments</th>\n",
" <th>runtime</th>\n",
" <th>writers</th>\n",
" <th>imdb</th>\n",
" <th>countries</th>\n",
" <th>rated</th>\n",
" <th>plot</th>\n",
" <th>title</th>\n",
" <th>languages</th>\n",
" <th>metacritic</th>\n",
" <th>directors</th>\n",
" <th>awards</th>\n",
" <th>genres</th>\n",
" <th>poster</th>\n",
" <th>cast</th>\n",
" </tr>\n",
" </thead>\n",
" <tbody>\n",
" <tr>\n",
" <th>0</th>\n",
" <td>Young Pauline is left a lot of money when her ...</td>\n",
" <td>movie</td>\n",
" <td>[0.00072939653, -0.026834568, 0.013515796, -0....</td>\n",
" <td>0</td>\n",
" <td>199.0</td>\n",
" <td>[Charles W. Goddard (screenplay), Basil Dickey...</td>\n",
" <td>{'id': 4465, 'rating': 7.6, 'votes': 744}</td>\n",
" <td>[USA]</td>\n",
" <td>None</td>\n",
" <td>Young Pauline is left a lot of money when her ...</td>\n",
" <td>The Perils of Pauline</td>\n",
" <td>[English]</td>\n",
" <td>NaN</td>\n",
" <td>[Louis J. Gasnier, Donald MacKenzie]</td>\n",
" <td>{'nominations': 0, 'text': '1 win.', 'wins': 1}</td>\n",
" <td>[Action]</td>\n",
" <td>https://m.media-amazon.com/images/M/MV5BMzgxOD...</td>\n",
" <td>[Pearl White, Crane Wilbur, Paul Panzer, Edwar...</td>\n",
" </tr>\n",
" </tbody>\n",
"</table>\n",
"</div>"
],
"text/plain": [
" fullplot type \\\n",
"0 Young Pauline is left a lot of money when her ... movie \n",
"\n",
" plot_embedding num_mflix_comments \\\n",
"0 [0.00072939653, -0.026834568, 0.013515796, -0.... 0 \n",
"\n",
" runtime writers \\\n",
"0 199.0 [Charles W. Goddard (screenplay), Basil Dickey... \n",
"\n",
" imdb countries rated \\\n",
"0 {'id': 4465, 'rating': 7.6, 'votes': 744} [USA] None \n",
"\n",
" plot title \\\n",
"0 Young Pauline is left a lot of money when her ... The Perils of Pauline \n",
"\n",
" languages metacritic directors \\\n",
"0 [English] NaN [Louis J. Gasnier, Donald MacKenzie] \n",
"\n",
" awards genres \\\n",
"0 {'nominations': 0, 'text': '1 win.', 'wins': 1} [Action] \n",
"\n",
" poster \\\n",
"0 https://m.media-amazon.com/images/M/MV5BMzgxOD... \n",
"\n",
" cast \n",
"0 [Pearl White, Crane Wilbur, Paul Panzer, Edwar... "
]
},
"execution_count": 10,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"# Previewing the contents of the data\n",
"df.head(1)"
]
},
{
"cell_type": "code",
"execution_count": 11,
"id": "22ab375d",
"metadata": {},
"outputs": [],
"source": [
"# Only keep records where the fullplot field is not null\n",
"df = df[df[\"fullplot\"].notna()]"
]
},
{
"cell_type": "code",
"execution_count": 12,
"id": "fceed99a",
"metadata": {},
"outputs": [],
"source": [
"# Renaming the embedding field to \"embedding\" -- required by LangChain\n",
"df.rename(columns={\"plot_embedding\": \"embedding\"}, inplace=True)"
]
},
{
"cell_type": "markdown",
"id": "aedec13a",
"metadata": {},
"source": [
"## Step 5: Create a simple RAG chain using MongoDB as the vector store"
]
},
{
"cell_type": "code",
"execution_count": 13,
"id": "11d292f3",
"metadata": {},
"outputs": [],
"source": [
"from langchain_mongodb import MongoDBAtlasVectorSearch\n",
"from pymongo import MongoClient\n",
"\n",
"# Initialize MongoDB python client\n",
"client = MongoClient(MONGODB_URI, appname=\"devrel.content.python\")\n",
"\n",
"DB_NAME = \"langchain_chatbot\"\n",
"COLLECTION_NAME = \"data\"\n",
"ATLAS_VECTOR_SEARCH_INDEX_NAME = \"vector_index\"\n",
"collection = client[DB_NAME][COLLECTION_NAME]"
]
},
{
"cell_type": "code",
"execution_count": 14,
"id": "d8292d53",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"DeleteResult({'n': 1000, 'electionId': ObjectId('7fffffff00000000000000f6'), 'opTime': {'ts': Timestamp(1710523288, 1033), 't': 246}, 'ok': 1.0, '$clusterTime': {'clusterTime': Timestamp(1710523288, 1042), 'signature': {'hash': b\"i\\xa8\\xe9'\\x1ed\\xf2u\\xf3L\\xff\\xb1\\xf5\\xbfA\\x90\\xabJ\\x12\\x83\", 'keyId': 7299545392000008318}}, 'operationTime': Timestamp(1710523288, 1033)}, acknowledged=True)"
]
},
"execution_count": 14,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"# Delete any existing records in the collection\n",
"collection.delete_many({})"
]
},
{
"cell_type": "code",
"execution_count": 16,
"id": "36c68914",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Data ingestion into MongoDB completed\n"
]
}
],
"source": [
"# Data Ingestion\n",
"records = df.to_dict(\"records\")\n",
"collection.insert_many(records)\n",
"\n",
"print(\"Data ingestion into MongoDB completed\")"
]
},
{
"cell_type": "code",
"execution_count": 18,
"id": "cbfca0b8",
"metadata": {},
"outputs": [],
"source": [
"from langchain_openai import OpenAIEmbeddings\n",
"\n",
"# Using the text-embedding-ada-002 since that's what was used to create embeddings in the movies dataset\n",
"embeddings = OpenAIEmbeddings(\n",
" openai_api_key=OPENAI_API_KEY, model=\"text-embedding-ada-002\"\n",
")"
]
},
{
"cell_type": "code",
"execution_count": 19,
"id": "798e176c",
"metadata": {},
"outputs": [],
"source": [
"# Vector Store Creation\n",
"vector_store = MongoDBAtlasVectorSearch.from_connection_string(\n",
" connection_string=MONGODB_URI,\n",
" namespace=DB_NAME + \".\" + COLLECTION_NAME,\n",
" embedding=embeddings,\n",
" index_name=ATLAS_VECTOR_SEARCH_INDEX_NAME,\n",
" text_key=\"fullplot\",\n",
")"
]
},
{
"cell_type": "code",
"execution_count": 49,
"id": "c71cd087",
"metadata": {},
"outputs": [],
"source": [
"# Using the MongoDB vector store as a retriever in a RAG chain\n",
"retriever = vector_store.as_retriever(search_type=\"similarity\", search_kwargs={\"k\": 5})"
]
},
{
"cell_type": "code",
"execution_count": 25,
"id": "b6588cd3",
"metadata": {},
"outputs": [],
"source": [
"from langchain_core.output_parsers import StrOutputParser\n",
"from langchain_core.prompts import ChatPromptTemplate\n",
"from langchain_core.runnables import RunnablePassthrough\n",
"from langchain_openai import ChatOpenAI\n",
"\n",
"# Generate context using the retriever, and pass the user question through\n",
"retrieve = {\n",
" \"context\": retriever | (lambda docs: \"\\n\\n\".join([d.page_content for d in docs])),\n",
" \"question\": RunnablePassthrough(),\n",
"}\n",
"template = \"\"\"Answer the question based only on the following context: \\\n",
"{context}\n",
"\n",
"Question: {question}\n",
"\"\"\"\n",
"# Defining the chat prompt\n",
"prompt = ChatPromptTemplate.from_template(template)\n",
"# Defining the model to be used for chat completion\n",
"model = ChatOpenAI(temperature=0, openai_api_key=OPENAI_API_KEY)\n",
"# Parse output as a string\n",
"parse_output = StrOutputParser()\n",
"\n",
"# Naive RAG chain\n",
"naive_rag_chain = retrieve | prompt | model | parse_output"
]
},
{
"cell_type": "code",
"execution_count": 26,
"id": "aaae21f5",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"'Once a Thief'"
]
},
"execution_count": 26,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"naive_rag_chain.invoke(\"What is the best movie to watch when sad?\")"
]
},
{
"cell_type": "markdown",
"id": "75f929ef",
"metadata": {},
"source": [
"## Step 6: Create a RAG chain with chat history"
]
},
{
"cell_type": "code",
"execution_count": 27,
"id": "94e7bd4a",
"metadata": {},
"outputs": [],
"source": [
"from langchain_core.prompts import MessagesPlaceholder\n",
"from langchain_core.runnables.history import RunnableWithMessageHistory\n",
"from langchain_mongodb.chat_message_histories import MongoDBChatMessageHistory"
]
},
{
"cell_type": "code",
"execution_count": 29,
"id": "5bb30860",
"metadata": {},
"outputs": [],
"source": [
"def get_session_history(session_id: str) -> MongoDBChatMessageHistory:\n",
" return MongoDBChatMessageHistory(\n",
" MONGODB_URI, session_id, database_name=DB_NAME, collection_name=\"history\"\n",
" )"
]
},
{
"cell_type": "code",
"execution_count": 50,
"id": "f51d0f35",
"metadata": {},
"outputs": [],
"source": [
"# Given a follow-up question and history, create a standalone question\n",
"standalone_system_prompt = \"\"\"\n",
"Given a chat history and a follow-up question, rephrase the follow-up question to be a standalone question. \\\n",
"Do NOT answer the question, just reformulate it if needed, otherwise return it as is. \\\n",
"Only return the final standalone question. \\\n",
"\"\"\"\n",
"standalone_question_prompt = ChatPromptTemplate.from_messages(\n",
" [\n",
" (\"system\", standalone_system_prompt),\n",
" MessagesPlaceholder(variable_name=\"history\"),\n",
" (\"human\", \"{question}\"),\n",
" ]\n",
")\n",
"\n",
"question_chain = standalone_question_prompt | model | parse_output"
]
},
{
"cell_type": "code",
"execution_count": 51,
"id": "f3ef3354",
"metadata": {},
"outputs": [],
"source": [
"# Generate context by passing output of the question_chain i.e. the standalone question to the retriever\n",
"retriever_chain = RunnablePassthrough.assign(\n",
" context=question_chain\n",
" | retriever\n",
" | (lambda docs: \"\\n\\n\".join([d.page_content for d in docs]))\n",
")"
]
},
{
"cell_type": "code",
"execution_count": 55,
"id": "5afb7345",
"metadata": {},
"outputs": [],
"source": [
"# Create a prompt that includes the context, history and the follow-up question\n",
"rag_system_prompt = \"\"\"Answer the question based only on the following context: \\\n",
"{context}\n",
"\"\"\"\n",
"rag_prompt = ChatPromptTemplate.from_messages(\n",
" [\n",
" (\"system\", rag_system_prompt),\n",
" MessagesPlaceholder(variable_name=\"history\"),\n",
" (\"human\", \"{question}\"),\n",
" ]\n",
")"
]
},
{
"cell_type": "code",
"execution_count": 56,
"id": "f95f47d0",
"metadata": {},
"outputs": [],
"source": [
"# RAG chain\n",
"rag_chain = retriever_chain | rag_prompt | model | parse_output"
]
},
{
"cell_type": "code",
"execution_count": 57,
"id": "9618d395",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"'The best movie to watch when feeling down could be \"Last Action Hero.\" It\\'s a fun and action-packed film that blends reality and fantasy, offering an escape from the real world and providing an entertaining distraction.'"
]
},
"execution_count": 57,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"# RAG chain with history\n",
"with_message_history = RunnableWithMessageHistory(\n",
" rag_chain,\n",
" get_session_history,\n",
" input_messages_key=\"question\",\n",
" history_messages_key=\"history\",\n",
")\n",
"with_message_history.invoke(\n",
" {\"question\": \"What is the best movie to watch when sad?\"},\n",
" {\"configurable\": {\"session_id\": \"1\"}},\n",
")"
]
},
{
"cell_type": "code",
"execution_count": 58,
"id": "6e3080d1",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"'I apologize for the confusion. Another movie that might lift your spirits when you\\'re feeling sad is \"Smilla\\'s Sense of Snow.\" It\\'s a mystery thriller that could engage your mind and distract you from your sadness with its intriguing plot and suspenseful storyline.'"
]
},
"execution_count": 58,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"with_message_history.invoke(\n",
" {\n",
" \"question\": \"Hmmm..I don't want to watch that one. Can you suggest something else?\"\n",
" },\n",
" {\"configurable\": {\"session_id\": \"1\"}},\n",
")"
]
},
{
"cell_type": "code",
"execution_count": 59,
"id": "daea2953",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"'For a lighter movie option, you might enjoy \"Cousins.\" It\\'s a comedy film set in Barcelona with action and humor, offering a fun and entertaining escape from reality. The storyline is engaging and filled with comedic moments that could help lift your spirits.'"
]
},
"execution_count": 59,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"with_message_history.invoke(\n",
" {\"question\": \"How about something more light?\"},\n",
" {\"configurable\": {\"session_id\": \"1\"}},\n",
")"
]
},
{
"cell_type": "markdown",
"id": "0de23a88",
"metadata": {},
"source": [
"## Step 7: Get faster responses using Semantic Cache\n",
"\n",
"**NOTE:** Semantic cache only caches the input to the LLM. When using it in retrieval chains, remember that documents retrieved can change between runs resulting in cache misses for semantically similar queries."
]
},
{
"cell_type": "code",
"execution_count": 61,
"id": "5d6b6741",
"metadata": {},
"outputs": [],
"source": [
"from langchain_core.globals import set_llm_cache\n",
"from langchain_mongodb.cache import MongoDBAtlasSemanticCache\n",
"\n",
"set_llm_cache(\n",
" MongoDBAtlasSemanticCache(\n",
" connection_string=MONGODB_URI,\n",
" embedding=embeddings,\n",
" collection_name=\"semantic_cache\",\n",
" database_name=DB_NAME,\n",
" index_name=ATLAS_VECTOR_SEARCH_INDEX_NAME,\n",
" wait_until_ready=True, # Optional, waits until the cache is ready to be used\n",
" )\n",
")"
]
},
{
"cell_type": "code",
"execution_count": 62,
"id": "9825bc7b",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"CPU times: user 87.8 ms, sys: 670 µs, total: 88.5 ms\n",
"Wall time: 1.24 s\n"
]
},
{
"data": {
"text/plain": [
"'Once a Thief'"
]
},
"execution_count": 62,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"%%time\n",
"naive_rag_chain.invoke(\"What is the best movie to watch when sad?\")"
]
},
{
"cell_type": "code",
"execution_count": 63,
"id": "a5e518cf",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"CPU times: user 43.5 ms, sys: 4.16 ms, total: 47.7 ms\n",
"Wall time: 255 ms\n"
]
},
{
"data": {
"text/plain": [
"'Once a Thief'"
]
},
"execution_count": 63,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"%%time\n",
"naive_rag_chain.invoke(\"What is the best movie to watch when sad?\")"
]
},
{
"cell_type": "code",
"execution_count": 64,
"id": "3d3d3ad3",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"CPU times: user 115 ms, sys: 171 µs, total: 115 ms\n",
"Wall time: 1.38 s\n"
]
},
{
"data": {
"text/plain": [
"'I would recommend watching \"Last Action Hero\" when sad, as it is a fun and action-packed film that can help lift your spirits.'"
]
},
"execution_count": 64,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"%%time\n",
"naive_rag_chain.invoke(\"Which movie do I watch when sad?\")"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "conda_pytorch_p310",
"language": "python",
"name": "conda_pytorch_p310"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.13"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

View File

@@ -84,7 +84,7 @@
"from langchain.retrievers import KayAiRetriever\n",
"from langchain_openai import ChatOpenAI\n",
"\n",
"model = ChatOpenAI(model=\"gpt-3.5-turbo\")\n",
"model = ChatOpenAI(model_name=\"gpt-3.5-turbo\")\n",
"retriever = KayAiRetriever.create(\n",
" dataset_id=\"company\", data_types=[\"PressRelease\"], num_contexts=6\n",
")\n",

View File

@@ -274,7 +274,7 @@
"db = SQLDatabase.from_uri(\n",
" CONNECTION_STRING\n",
") # We reconnect to db so the new columns are loaded as well.\n",
"llm = ChatOpenAI(model=\"gpt-4\", temperature=0)\n",
"llm = ChatOpenAI(model_name=\"gpt-4\", temperature=0)\n",
"\n",
"sql_query_chain = (\n",
" RunnablePassthrough.assign(schema=get_schema)\n",

View File

@@ -22,8 +22,7 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain import hub\n",
"from langchain.agents import AgentExecutor, Tool, ZeroShotAgent, create_react_agent\n",
"from langchain.agents import AgentExecutor, Tool, ZeroShotAgent\n",
"from langchain.chains import LLMChain\n",
"from langchain.memory import ConversationBufferMemory, ReadOnlySharedMemory\n",
"from langchain.prompts import PromptTemplate\n",
@@ -85,7 +84,19 @@
"metadata": {},
"outputs": [],
"source": [
"prompt = hub.pull(\"hwchase17/react\")"
"prefix = \"\"\"Have a conversation with a human, answering the following questions as best you can. You have access to the following tools:\"\"\"\n",
"suffix = \"\"\"Begin!\"\n",
"\n",
"{chat_history}\n",
"Question: {input}\n",
"{agent_scratchpad}\"\"\"\n",
"\n",
"prompt = ZeroShotAgent.create_prompt(\n",
" tools,\n",
" prefix=prefix,\n",
" suffix=suffix,\n",
" input_variables=[\"input\", \"chat_history\", \"agent_scratchpad\"],\n",
")"
]
},
{
@@ -103,14 +114,16 @@
"metadata": {},
"outputs": [],
"source": [
"model = OpenAI()\n",
"agent = create_react_agent(model, tools, prompt)\n",
"agent_executor = AgentExecutor(agent=agent, tools=tools, memory=memory)"
"llm_chain = LLMChain(llm=OpenAI(temperature=0), prompt=prompt)\n",
"agent = ZeroShotAgent(llm_chain=llm_chain, tools=tools, verbose=True)\n",
"agent_chain = AgentExecutor.from_agent_and_tools(\n",
" agent=agent, tools=tools, verbose=True, memory=memory\n",
")"
]
},
{
"cell_type": "code",
"execution_count": 36,
"execution_count": 6,
"id": "ca4bc1fb",
"metadata": {},
"outputs": [
@@ -120,15 +133,15 @@
"text": [
"\n",
"\n",
"\u001B[1m> Entering new AgentExecutor chain...\u001B[0m\n",
"\u001B[32;1m\u001B[1;3mThought: I should research ChatGPT to answer this question.\n",
"\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n",
"\u001b[32;1m\u001b[1;3mThought: I should research ChatGPT to answer this question.\n",
"Action: Search\n",
"Action Input: \"ChatGPT\"\u001B[0m\n",
"Observation: \u001B[36;1m\u001B[1;3mNov 30, 2022 ... We've trained a model called ChatGPT which interacts in a conversational way. The dialogue format makes it possible for ChatGPT to answer ... ChatGPT is an artificial intelligence chatbot developed by OpenAI and launched in November 2022. It is built on top of OpenAI's GPT-3 family of large ... ChatGPT. We've trained a model called ChatGPT which interacts in a conversational way. The dialogue format makes it possible for ChatGPT to answer ... Feb 2, 2023 ... ChatGPT, the popular chatbot from OpenAI, is estimated to have reached 100 million monthly active users in January, just two months after ... 2 days ago ... ChatGPT recently launched a new version of its own plagiarism detection tool, with hopes that it will squelch some of the criticism around how ... An API for accessing new AI models developed by OpenAI. Feb 19, 2023 ... ChatGPT is an AI chatbot system that OpenAI released in November to show off and test what a very large, powerful AI system can accomplish. You ... ChatGPT is fine-tuned from GPT-3.5, a language model trained to produce text. ChatGPT was optimized for dialogue by using Reinforcement Learning with Human ... 3 days ago ... Visual ChatGPT connects ChatGPT and a series of Visual Foundation Models to enable sending and receiving images during chatting. Dec 1, 2022 ... ChatGPT is a natural language processing tool driven by AI technology that allows you to have human-like conversations and much more with a ...\u001B[0m\n",
"Thought:\u001B[32;1m\u001B[1;3m I now know the final answer.\n",
"Final Answer: ChatGPT is an artificial intelligence chatbot developed by OpenAI and launched in November 2022. It is built on top of OpenAI's GPT-3 family of large language models and is optimized for dialogue by using Reinforcement Learning with Human-in-the-Loop. It is also capable of sending and receiving images during chatting.\u001B[0m\n",
"Action Input: \"ChatGPT\"\u001b[0m\n",
"Observation: \u001b[36;1m\u001b[1;3mNov 30, 2022 ... We've trained a model called ChatGPT which interacts in a conversational way. The dialogue format makes it possible for ChatGPT to answer ... ChatGPT is an artificial intelligence chatbot developed by OpenAI and launched in November 2022. It is built on top of OpenAI's GPT-3 family of large ... ChatGPT. We've trained a model called ChatGPT which interacts in a conversational way. The dialogue format makes it possible for ChatGPT to answer ... Feb 2, 2023 ... ChatGPT, the popular chatbot from OpenAI, is estimated to have reached 100 million monthly active users in January, just two months after ... 2 days ago ... ChatGPT recently launched a new version of its own plagiarism detection tool, with hopes that it will squelch some of the criticism around how ... An API for accessing new AI models developed by OpenAI. Feb 19, 2023 ... ChatGPT is an AI chatbot system that OpenAI released in November to show off and test what a very large, powerful AI system can accomplish. You ... ChatGPT is fine-tuned from GPT-3.5, a language model trained to produce text. ChatGPT was optimized for dialogue by using Reinforcement Learning with Human ... 3 days ago ... Visual ChatGPT connects ChatGPT and a series of Visual Foundation Models to enable sending and receiving images during chatting. Dec 1, 2022 ... ChatGPT is a natural language processing tool driven by AI technology that allows you to have human-like conversations and much more with a ...\u001b[0m\n",
"Thought:\u001b[32;1m\u001b[1;3m I now know the final answer.\n",
"Final Answer: ChatGPT is an artificial intelligence chatbot developed by OpenAI and launched in November 2022. It is built on top of OpenAI's GPT-3 family of large language models and is optimized for dialogue by using Reinforcement Learning with Human-in-the-Loop. It is also capable of sending and receiving images during chatting.\u001b[0m\n",
"\n",
"\u001B[1m> Finished chain.\u001B[0m\n"
"\u001b[1m> Finished chain.\u001b[0m\n"
]
},
{
@@ -140,40 +153,10 @@
"execution_count": 6,
"metadata": {},
"output_type": "execute_result"
},
{
"ename": "KeyboardInterrupt",
"evalue": "",
"output_type": "error",
"traceback": [
"\u001B[0;31m---------------------------------------------------------------------------\u001B[0m",
"\u001B[0;31mKeyboardInterrupt\u001B[0m Traceback (most recent call last)",
"Cell \u001B[0;32mIn[36], line 1\u001B[0m\n\u001B[0;32m----> 1\u001B[0m \u001B[43magent_executor\u001B[49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43minvoke\u001B[49m\u001B[43m(\u001B[49m\u001B[43m{\u001B[49m\u001B[38;5;124;43m\"\u001B[39;49m\u001B[38;5;124;43minput\u001B[39;49m\u001B[38;5;124;43m\"\u001B[39;49m\u001B[43m:\u001B[49m\u001B[38;5;124;43m\"\u001B[39;49m\u001B[38;5;124;43mWhat is ChatGPT?\u001B[39;49m\u001B[38;5;124;43m\"\u001B[39;49m\u001B[43m}\u001B[49m\u001B[43m)\u001B[49m\n",
"File \u001B[0;32m~/code/langchain/libs/langchain/langchain/chains/base.py:163\u001B[0m, in \u001B[0;36mChain.invoke\u001B[0;34m(self, input, config, **kwargs)\u001B[0m\n\u001B[1;32m 161\u001B[0m \u001B[38;5;28;01mexcept\u001B[39;00m \u001B[38;5;167;01mBaseException\u001B[39;00m \u001B[38;5;28;01mas\u001B[39;00m e:\n\u001B[1;32m 162\u001B[0m run_manager\u001B[38;5;241m.\u001B[39mon_chain_error(e)\n\u001B[0;32m--> 163\u001B[0m \u001B[38;5;28;01mraise\u001B[39;00m e\n\u001B[1;32m 164\u001B[0m run_manager\u001B[38;5;241m.\u001B[39mon_chain_end(outputs)\n\u001B[1;32m 166\u001B[0m \u001B[38;5;28;01mif\u001B[39;00m include_run_info:\n",
"File \u001B[0;32m~/code/langchain/libs/langchain/langchain/chains/base.py:153\u001B[0m, in \u001B[0;36mChain.invoke\u001B[0;34m(self, input, config, **kwargs)\u001B[0m\n\u001B[1;32m 150\u001B[0m \u001B[38;5;28;01mtry\u001B[39;00m:\n\u001B[1;32m 151\u001B[0m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39m_validate_inputs(inputs)\n\u001B[1;32m 152\u001B[0m outputs \u001B[38;5;241m=\u001B[39m (\n\u001B[0;32m--> 153\u001B[0m \u001B[38;5;28;43mself\u001B[39;49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43m_call\u001B[49m\u001B[43m(\u001B[49m\u001B[43minputs\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[43mrun_manager\u001B[49m\u001B[38;5;241;43m=\u001B[39;49m\u001B[43mrun_manager\u001B[49m\u001B[43m)\u001B[49m\n\u001B[1;32m 154\u001B[0m \u001B[38;5;28;01mif\u001B[39;00m new_arg_supported\n\u001B[1;32m 155\u001B[0m \u001B[38;5;28;01melse\u001B[39;00m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39m_call(inputs)\n\u001B[1;32m 156\u001B[0m )\n\u001B[1;32m 158\u001B[0m final_outputs: Dict[\u001B[38;5;28mstr\u001B[39m, Any] \u001B[38;5;241m=\u001B[39m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39mprep_outputs(\n\u001B[1;32m 159\u001B[0m inputs, outputs, return_only_outputs\n\u001B[1;32m 160\u001B[0m )\n\u001B[1;32m 161\u001B[0m \u001B[38;5;28;01mexcept\u001B[39;00m \u001B[38;5;167;01mBaseException\u001B[39;00m \u001B[38;5;28;01mas\u001B[39;00m e:\n",
"File \u001B[0;32m~/code/langchain/libs/langchain/langchain/agents/agent.py:1432\u001B[0m, in \u001B[0;36mAgentExecutor._call\u001B[0;34m(self, inputs, run_manager)\u001B[0m\n\u001B[1;32m 1430\u001B[0m \u001B[38;5;66;03m# We now enter the agent loop (until it returns something).\u001B[39;00m\n\u001B[1;32m 1431\u001B[0m \u001B[38;5;28;01mwhile\u001B[39;00m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39m_should_continue(iterations, time_elapsed):\n\u001B[0;32m-> 1432\u001B[0m next_step_output \u001B[38;5;241m=\u001B[39m \u001B[38;5;28;43mself\u001B[39;49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43m_take_next_step\u001B[49m\u001B[43m(\u001B[49m\n\u001B[1;32m 1433\u001B[0m \u001B[43m \u001B[49m\u001B[43mname_to_tool_map\u001B[49m\u001B[43m,\u001B[49m\n\u001B[1;32m 1434\u001B[0m \u001B[43m \u001B[49m\u001B[43mcolor_mapping\u001B[49m\u001B[43m,\u001B[49m\n\u001B[1;32m 1435\u001B[0m \u001B[43m \u001B[49m\u001B[43minputs\u001B[49m\u001B[43m,\u001B[49m\n\u001B[1;32m 1436\u001B[0m \u001B[43m \u001B[49m\u001B[43mintermediate_steps\u001B[49m\u001B[43m,\u001B[49m\n\u001B[1;32m 1437\u001B[0m \u001B[43m \u001B[49m\u001B[43mrun_manager\u001B[49m\u001B[38;5;241;43m=\u001B[39;49m\u001B[43mrun_manager\u001B[49m\u001B[43m,\u001B[49m\n\u001B[1;32m 1438\u001B[0m \u001B[43m \u001B[49m\u001B[43m)\u001B[49m\n\u001B[1;32m 1439\u001B[0m \u001B[38;5;28;01mif\u001B[39;00m \u001B[38;5;28misinstance\u001B[39m(next_step_output, AgentFinish):\n\u001B[1;32m 1440\u001B[0m \u001B[38;5;28;01mreturn\u001B[39;00m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39m_return(\n\u001B[1;32m 1441\u001B[0m next_step_output, intermediate_steps, run_manager\u001B[38;5;241m=\u001B[39mrun_manager\n\u001B[1;32m 1442\u001B[0m )\n",
"File \u001B[0;32m~/code/langchain/libs/langchain/langchain/agents/agent.py:1138\u001B[0m, in \u001B[0;36mAgentExecutor._take_next_step\u001B[0;34m(self, name_to_tool_map, color_mapping, inputs, intermediate_steps, run_manager)\u001B[0m\n\u001B[1;32m 1129\u001B[0m \u001B[38;5;28;01mdef\u001B[39;00m \u001B[38;5;21m_take_next_step\u001B[39m(\n\u001B[1;32m 1130\u001B[0m \u001B[38;5;28mself\u001B[39m,\n\u001B[1;32m 1131\u001B[0m name_to_tool_map: Dict[\u001B[38;5;28mstr\u001B[39m, BaseTool],\n\u001B[0;32m (...)\u001B[0m\n\u001B[1;32m 1135\u001B[0m run_manager: Optional[CallbackManagerForChainRun] \u001B[38;5;241m=\u001B[39m \u001B[38;5;28;01mNone\u001B[39;00m,\n\u001B[1;32m 1136\u001B[0m ) \u001B[38;5;241m-\u001B[39m\u001B[38;5;241m>\u001B[39m Union[AgentFinish, List[Tuple[AgentAction, \u001B[38;5;28mstr\u001B[39m]]]:\n\u001B[1;32m 1137\u001B[0m \u001B[38;5;28;01mreturn\u001B[39;00m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39m_consume_next_step(\n\u001B[0;32m-> 1138\u001B[0m [\n\u001B[1;32m 1139\u001B[0m a\n\u001B[1;32m 1140\u001B[0m \u001B[38;5;28;01mfor\u001B[39;00m a \u001B[38;5;129;01min\u001B[39;00m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39m_iter_next_step(\n\u001B[1;32m 1141\u001B[0m name_to_tool_map,\n\u001B[1;32m 1142\u001B[0m color_mapping,\n\u001B[1;32m 1143\u001B[0m inputs,\n\u001B[1;32m 1144\u001B[0m intermediate_steps,\n\u001B[1;32m 1145\u001B[0m run_manager,\n\u001B[1;32m 1146\u001B[0m )\n\u001B[1;32m 1147\u001B[0m ]\n\u001B[1;32m 1148\u001B[0m )\n",
"File \u001B[0;32m~/code/langchain/libs/langchain/langchain/agents/agent.py:1138\u001B[0m, in \u001B[0;36m<listcomp>\u001B[0;34m(.0)\u001B[0m\n\u001B[1;32m 1129\u001B[0m \u001B[38;5;28;01mdef\u001B[39;00m \u001B[38;5;21m_take_next_step\u001B[39m(\n\u001B[1;32m 1130\u001B[0m \u001B[38;5;28mself\u001B[39m,\n\u001B[1;32m 1131\u001B[0m name_to_tool_map: Dict[\u001B[38;5;28mstr\u001B[39m, BaseTool],\n\u001B[0;32m (...)\u001B[0m\n\u001B[1;32m 1135\u001B[0m run_manager: Optional[CallbackManagerForChainRun] \u001B[38;5;241m=\u001B[39m \u001B[38;5;28;01mNone\u001B[39;00m,\n\u001B[1;32m 1136\u001B[0m ) \u001B[38;5;241m-\u001B[39m\u001B[38;5;241m>\u001B[39m Union[AgentFinish, List[Tuple[AgentAction, \u001B[38;5;28mstr\u001B[39m]]]:\n\u001B[1;32m 1137\u001B[0m \u001B[38;5;28;01mreturn\u001B[39;00m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39m_consume_next_step(\n\u001B[0;32m-> 1138\u001B[0m [\n\u001B[1;32m 1139\u001B[0m a\n\u001B[1;32m 1140\u001B[0m \u001B[38;5;28;01mfor\u001B[39;00m a \u001B[38;5;129;01min\u001B[39;00m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39m_iter_next_step(\n\u001B[1;32m 1141\u001B[0m name_to_tool_map,\n\u001B[1;32m 1142\u001B[0m color_mapping,\n\u001B[1;32m 1143\u001B[0m inputs,\n\u001B[1;32m 1144\u001B[0m intermediate_steps,\n\u001B[1;32m 1145\u001B[0m run_manager,\n\u001B[1;32m 1146\u001B[0m )\n\u001B[1;32m 1147\u001B[0m ]\n\u001B[1;32m 1148\u001B[0m )\n",
"File \u001B[0;32m~/code/langchain/libs/langchain/langchain/agents/agent.py:1223\u001B[0m, in \u001B[0;36mAgentExecutor._iter_next_step\u001B[0;34m(self, name_to_tool_map, color_mapping, inputs, intermediate_steps, run_manager)\u001B[0m\n\u001B[1;32m 1221\u001B[0m \u001B[38;5;28;01myield\u001B[39;00m agent_action\n\u001B[1;32m 1222\u001B[0m \u001B[38;5;28;01mfor\u001B[39;00m agent_action \u001B[38;5;129;01min\u001B[39;00m actions:\n\u001B[0;32m-> 1223\u001B[0m \u001B[38;5;28;01myield\u001B[39;00m \u001B[38;5;28;43mself\u001B[39;49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43m_perform_agent_action\u001B[49m\u001B[43m(\u001B[49m\n\u001B[1;32m 1224\u001B[0m \u001B[43m \u001B[49m\u001B[43mname_to_tool_map\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[43mcolor_mapping\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[43magent_action\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[43mrun_manager\u001B[49m\n\u001B[1;32m 1225\u001B[0m \u001B[43m \u001B[49m\u001B[43m)\u001B[49m\n",
"File \u001B[0;32m~/code/langchain/libs/langchain/langchain/agents/agent.py:1245\u001B[0m, in \u001B[0;36mAgentExecutor._perform_agent_action\u001B[0;34m(self, name_to_tool_map, color_mapping, agent_action, run_manager)\u001B[0m\n\u001B[1;32m 1243\u001B[0m tool_run_kwargs[\u001B[38;5;124m\"\u001B[39m\u001B[38;5;124mllm_prefix\u001B[39m\u001B[38;5;124m\"\u001B[39m] \u001B[38;5;241m=\u001B[39m \u001B[38;5;124m\"\u001B[39m\u001B[38;5;124m\"\u001B[39m\n\u001B[1;32m 1244\u001B[0m \u001B[38;5;66;03m# We then call the tool on the tool input to get an observation\u001B[39;00m\n\u001B[0;32m-> 1245\u001B[0m observation \u001B[38;5;241m=\u001B[39m \u001B[43mtool\u001B[49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43mrun\u001B[49m\u001B[43m(\u001B[49m\n\u001B[1;32m 1246\u001B[0m \u001B[43m \u001B[49m\u001B[43magent_action\u001B[49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43mtool_input\u001B[49m\u001B[43m,\u001B[49m\n\u001B[1;32m 1247\u001B[0m \u001B[43m \u001B[49m\u001B[43mverbose\u001B[49m\u001B[38;5;241;43m=\u001B[39;49m\u001B[38;5;28;43mself\u001B[39;49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43mverbose\u001B[49m\u001B[43m,\u001B[49m\n\u001B[1;32m 1248\u001B[0m \u001B[43m \u001B[49m\u001B[43mcolor\u001B[49m\u001B[38;5;241;43m=\u001B[39;49m\u001B[43mcolor\u001B[49m\u001B[43m,\u001B[49m\n\u001B[1;32m 1249\u001B[0m \u001B[43m \u001B[49m\u001B[43mcallbacks\u001B[49m\u001B[38;5;241;43m=\u001B[39;49m\u001B[43mrun_manager\u001B[49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43mget_child\u001B[49m\u001B[43m(\u001B[49m\u001B[43m)\u001B[49m\u001B[43m \u001B[49m\u001B[38;5;28;43;01mif\u001B[39;49;00m\u001B[43m \u001B[49m\u001B[43mrun_manager\u001B[49m\u001B[43m \u001B[49m\u001B[38;5;28;43;01melse\u001B[39;49;00m\u001B[43m \u001B[49m\u001B[38;5;28;43;01mNone\u001B[39;49;00m\u001B[43m,\u001B[49m\n\u001B[1;32m 1250\u001B[0m \u001B[43m \u001B[49m\u001B[38;5;241;43m*\u001B[39;49m\u001B[38;5;241;43m*\u001B[39;49m\u001B[43mtool_run_kwargs\u001B[49m\u001B[43m,\u001B[49m\n\u001B[1;32m 1251\u001B[0m \u001B[43m \u001B[49m\u001B[43m)\u001B[49m\n\u001B[1;32m 1252\u001B[0m \u001B[38;5;28;01melse\u001B[39;00m:\n\u001B[1;32m 1253\u001B[0m tool_run_kwargs \u001B[38;5;241m=\u001B[39m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39magent\u001B[38;5;241m.\u001B[39mtool_run_logging_kwargs()\n",
"File \u001B[0;32m~/code/langchain/libs/core/langchain_core/tools.py:422\u001B[0m, in \u001B[0;36mBaseTool.run\u001B[0;34m(self, tool_input, verbose, start_color, color, callbacks, tags, metadata, run_name, run_id, **kwargs)\u001B[0m\n\u001B[1;32m 420\u001B[0m \u001B[38;5;28;01mexcept\u001B[39;00m (\u001B[38;5;167;01mException\u001B[39;00m, \u001B[38;5;167;01mKeyboardInterrupt\u001B[39;00m) \u001B[38;5;28;01mas\u001B[39;00m e:\n\u001B[1;32m 421\u001B[0m run_manager\u001B[38;5;241m.\u001B[39mon_tool_error(e)\n\u001B[0;32m--> 422\u001B[0m \u001B[38;5;28;01mraise\u001B[39;00m e\n\u001B[1;32m 423\u001B[0m \u001B[38;5;28;01melse\u001B[39;00m:\n\u001B[1;32m 424\u001B[0m run_manager\u001B[38;5;241m.\u001B[39mon_tool_end(observation, color\u001B[38;5;241m=\u001B[39mcolor, name\u001B[38;5;241m=\u001B[39m\u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39mname, \u001B[38;5;241m*\u001B[39m\u001B[38;5;241m*\u001B[39mkwargs)\n",
"File \u001B[0;32m~/code/langchain/libs/core/langchain_core/tools.py:381\u001B[0m, in \u001B[0;36mBaseTool.run\u001B[0;34m(self, tool_input, verbose, start_color, color, callbacks, tags, metadata, run_name, run_id, **kwargs)\u001B[0m\n\u001B[1;32m 378\u001B[0m parsed_input \u001B[38;5;241m=\u001B[39m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39m_parse_input(tool_input)\n\u001B[1;32m 379\u001B[0m tool_args, tool_kwargs \u001B[38;5;241m=\u001B[39m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39m_to_args_and_kwargs(parsed_input)\n\u001B[1;32m 380\u001B[0m observation \u001B[38;5;241m=\u001B[39m (\n\u001B[0;32m--> 381\u001B[0m \u001B[38;5;28;43mself\u001B[39;49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43m_run\u001B[49m\u001B[43m(\u001B[49m\u001B[38;5;241;43m*\u001B[39;49m\u001B[43mtool_args\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[43mrun_manager\u001B[49m\u001B[38;5;241;43m=\u001B[39;49m\u001B[43mrun_manager\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[38;5;241;43m*\u001B[39;49m\u001B[38;5;241;43m*\u001B[39;49m\u001B[43mtool_kwargs\u001B[49m\u001B[43m)\u001B[49m\n\u001B[1;32m 382\u001B[0m \u001B[38;5;28;01mif\u001B[39;00m new_arg_supported\n\u001B[1;32m 383\u001B[0m \u001B[38;5;28;01melse\u001B[39;00m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39m_run(\u001B[38;5;241m*\u001B[39mtool_args, \u001B[38;5;241m*\u001B[39m\u001B[38;5;241m*\u001B[39mtool_kwargs)\n\u001B[1;32m 384\u001B[0m )\n\u001B[1;32m 385\u001B[0m \u001B[38;5;28;01mexcept\u001B[39;00m ValidationError \u001B[38;5;28;01mas\u001B[39;00m e:\n\u001B[1;32m 386\u001B[0m \u001B[38;5;28;01mif\u001B[39;00m \u001B[38;5;129;01mnot\u001B[39;00m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39mhandle_validation_error:\n",
"File \u001B[0;32m~/code/langchain/libs/core/langchain_core/tools.py:588\u001B[0m, in \u001B[0;36mTool._run\u001B[0;34m(self, run_manager, *args, **kwargs)\u001B[0m\n\u001B[1;32m 579\u001B[0m \u001B[38;5;28;01mif\u001B[39;00m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39mfunc:\n\u001B[1;32m 580\u001B[0m new_argument_supported \u001B[38;5;241m=\u001B[39m signature(\u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39mfunc)\u001B[38;5;241m.\u001B[39mparameters\u001B[38;5;241m.\u001B[39mget(\u001B[38;5;124m\"\u001B[39m\u001B[38;5;124mcallbacks\u001B[39m\u001B[38;5;124m\"\u001B[39m)\n\u001B[1;32m 581\u001B[0m \u001B[38;5;28;01mreturn\u001B[39;00m (\n\u001B[1;32m 582\u001B[0m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39mfunc(\n\u001B[1;32m 583\u001B[0m \u001B[38;5;241m*\u001B[39margs,\n\u001B[1;32m 584\u001B[0m callbacks\u001B[38;5;241m=\u001B[39mrun_manager\u001B[38;5;241m.\u001B[39mget_child() \u001B[38;5;28;01mif\u001B[39;00m run_manager \u001B[38;5;28;01melse\u001B[39;00m \u001B[38;5;28;01mNone\u001B[39;00m,\n\u001B[1;32m 585\u001B[0m \u001B[38;5;241m*\u001B[39m\u001B[38;5;241m*\u001B[39mkwargs,\n\u001B[1;32m 586\u001B[0m )\n\u001B[1;32m 587\u001B[0m \u001B[38;5;28;01mif\u001B[39;00m new_argument_supported\n\u001B[0;32m--> 588\u001B[0m \u001B[38;5;28;01melse\u001B[39;00m \u001B[38;5;28;43mself\u001B[39;49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43mfunc\u001B[49m\u001B[43m(\u001B[49m\u001B[38;5;241;43m*\u001B[39;49m\u001B[43margs\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[38;5;241;43m*\u001B[39;49m\u001B[38;5;241;43m*\u001B[39;49m\u001B[43mkwargs\u001B[49m\u001B[43m)\u001B[49m\n\u001B[1;32m 589\u001B[0m )\n\u001B[1;32m 590\u001B[0m \u001B[38;5;28;01mraise\u001B[39;00m \u001B[38;5;167;01mNotImplementedError\u001B[39;00m(\u001B[38;5;124m\"\u001B[39m\u001B[38;5;124mTool does not support sync\u001B[39m\u001B[38;5;124m\"\u001B[39m)\n",
"File \u001B[0;32m~/code/langchain/libs/community/langchain_community/utilities/google_search.py:94\u001B[0m, in \u001B[0;36mGoogleSearchAPIWrapper.run\u001B[0;34m(self, query)\u001B[0m\n\u001B[1;32m 92\u001B[0m \u001B[38;5;250m\u001B[39m\u001B[38;5;124;03m\"\"\"Run query through GoogleSearch and parse result.\"\"\"\u001B[39;00m\n\u001B[1;32m 93\u001B[0m snippets \u001B[38;5;241m=\u001B[39m []\n\u001B[0;32m---> 94\u001B[0m results \u001B[38;5;241m=\u001B[39m \u001B[38;5;28;43mself\u001B[39;49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43m_google_search_results\u001B[49m\u001B[43m(\u001B[49m\u001B[43mquery\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[43mnum\u001B[49m\u001B[38;5;241;43m=\u001B[39;49m\u001B[38;5;28;43mself\u001B[39;49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43mk\u001B[49m\u001B[43m)\u001B[49m\n\u001B[1;32m 95\u001B[0m \u001B[38;5;28;01mif\u001B[39;00m \u001B[38;5;28mlen\u001B[39m(results) \u001B[38;5;241m==\u001B[39m \u001B[38;5;241m0\u001B[39m:\n\u001B[1;32m 96\u001B[0m \u001B[38;5;28;01mreturn\u001B[39;00m \u001B[38;5;124m\"\u001B[39m\u001B[38;5;124mNo good Google Search Result was found\u001B[39m\u001B[38;5;124m\"\u001B[39m\n",
"File \u001B[0;32m~/code/langchain/libs/community/langchain_community/utilities/google_search.py:62\u001B[0m, in \u001B[0;36mGoogleSearchAPIWrapper._google_search_results\u001B[0;34m(self, search_term, **kwargs)\u001B[0m\n\u001B[1;32m 60\u001B[0m \u001B[38;5;28;01mif\u001B[39;00m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39msiterestrict:\n\u001B[1;32m 61\u001B[0m cse \u001B[38;5;241m=\u001B[39m cse\u001B[38;5;241m.\u001B[39msiterestrict()\n\u001B[0;32m---> 62\u001B[0m res \u001B[38;5;241m=\u001B[39m \u001B[43mcse\u001B[49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43mlist\u001B[49m\u001B[43m(\u001B[49m\u001B[43mq\u001B[49m\u001B[38;5;241;43m=\u001B[39;49m\u001B[43msearch_term\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[43mcx\u001B[49m\u001B[38;5;241;43m=\u001B[39;49m\u001B[38;5;28;43mself\u001B[39;49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43mgoogle_cse_id\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[38;5;241;43m*\u001B[39;49m\u001B[38;5;241;43m*\u001B[39;49m\u001B[43mkwargs\u001B[49m\u001B[43m)\u001B[49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43mexecute\u001B[49m\u001B[43m(\u001B[49m\u001B[43m)\u001B[49m\n\u001B[1;32m 63\u001B[0m \u001B[38;5;28;01mreturn\u001B[39;00m res\u001B[38;5;241m.\u001B[39mget(\u001B[38;5;124m\"\u001B[39m\u001B[38;5;124mitems\u001B[39m\u001B[38;5;124m\"\u001B[39m, [])\n",
"File \u001B[0;32m~/code/langchain/.venv/lib/python3.10/site-packages/googleapiclient/_helpers.py:130\u001B[0m, in \u001B[0;36mpositional.<locals>.positional_decorator.<locals>.positional_wrapper\u001B[0;34m(*args, **kwargs)\u001B[0m\n\u001B[1;32m 128\u001B[0m \u001B[38;5;28;01melif\u001B[39;00m positional_parameters_enforcement \u001B[38;5;241m==\u001B[39m POSITIONAL_WARNING:\n\u001B[1;32m 129\u001B[0m logger\u001B[38;5;241m.\u001B[39mwarning(message)\n\u001B[0;32m--> 130\u001B[0m \u001B[38;5;28;01mreturn\u001B[39;00m \u001B[43mwrapped\u001B[49m\u001B[43m(\u001B[49m\u001B[38;5;241;43m*\u001B[39;49m\u001B[43margs\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[38;5;241;43m*\u001B[39;49m\u001B[38;5;241;43m*\u001B[39;49m\u001B[43mkwargs\u001B[49m\u001B[43m)\u001B[49m\n",
"File \u001B[0;32m~/code/langchain/.venv/lib/python3.10/site-packages/googleapiclient/http.py:923\u001B[0m, in \u001B[0;36mHttpRequest.execute\u001B[0;34m(self, http, num_retries)\u001B[0m\n\u001B[1;32m 920\u001B[0m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39mheaders[\u001B[38;5;124m\"\u001B[39m\u001B[38;5;124mcontent-length\u001B[39m\u001B[38;5;124m\"\u001B[39m] \u001B[38;5;241m=\u001B[39m \u001B[38;5;28mstr\u001B[39m(\u001B[38;5;28mlen\u001B[39m(\u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39mbody))\n\u001B[1;32m 922\u001B[0m \u001B[38;5;66;03m# Handle retries for server-side errors.\u001B[39;00m\n\u001B[0;32m--> 923\u001B[0m resp, content \u001B[38;5;241m=\u001B[39m \u001B[43m_retry_request\u001B[49m\u001B[43m(\u001B[49m\n\u001B[1;32m 924\u001B[0m \u001B[43m \u001B[49m\u001B[43mhttp\u001B[49m\u001B[43m,\u001B[49m\n\u001B[1;32m 925\u001B[0m \u001B[43m \u001B[49m\u001B[43mnum_retries\u001B[49m\u001B[43m,\u001B[49m\n\u001B[1;32m 926\u001B[0m \u001B[43m \u001B[49m\u001B[38;5;124;43m\"\u001B[39;49m\u001B[38;5;124;43mrequest\u001B[39;49m\u001B[38;5;124;43m\"\u001B[39;49m\u001B[43m,\u001B[49m\n\u001B[1;32m 927\u001B[0m \u001B[43m \u001B[49m\u001B[38;5;28;43mself\u001B[39;49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43m_sleep\u001B[49m\u001B[43m,\u001B[49m\n\u001B[1;32m 928\u001B[0m \u001B[43m \u001B[49m\u001B[38;5;28;43mself\u001B[39;49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43m_rand\u001B[49m\u001B[43m,\u001B[49m\n\u001B[1;32m 929\u001B[0m \u001B[43m \u001B[49m\u001B[38;5;28;43mstr\u001B[39;49m\u001B[43m(\u001B[49m\u001B[38;5;28;43mself\u001B[39;49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43muri\u001B[49m\u001B[43m)\u001B[49m\u001B[43m,\u001B[49m\n\u001B[1;32m 930\u001B[0m \u001B[43m \u001B[49m\u001B[43mmethod\u001B[49m\u001B[38;5;241;43m=\u001B[39;49m\u001B[38;5;28;43mstr\u001B[39;49m\u001B[43m(\u001B[49m\u001B[38;5;28;43mself\u001B[39;49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43mmethod\u001B[49m\u001B[43m)\u001B[49m\u001B[43m,\u001B[49m\n\u001B[1;32m 931\u001B[0m \u001B[43m \u001B[49m\u001B[43mbody\u001B[49m\u001B[38;5;241;43m=\u001B[39;49m\u001B[38;5;28;43mself\u001B[39;49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43mbody\u001B[49m\u001B[43m,\u001B[49m\n\u001B[1;32m 932\u001B[0m \u001B[43m \u001B[49m\u001B[43mheaders\u001B[49m\u001B[38;5;241;43m=\u001B[39;49m\u001B[38;5;28;43mself\u001B[39;49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43mheaders\u001B[49m\u001B[43m,\u001B[49m\n\u001B[1;32m 933\u001B[0m \u001B[43m\u001B[49m\u001B[43m)\u001B[49m\n\u001B[1;32m 935\u001B[0m \u001B[38;5;28;01mfor\u001B[39;00m callback \u001B[38;5;129;01min\u001B[39;00m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39mresponse_callbacks:\n\u001B[1;32m 936\u001B[0m callback(resp)\n",
"File \u001B[0;32m~/code/langchain/.venv/lib/python3.10/site-packages/googleapiclient/http.py:191\u001B[0m, in \u001B[0;36m_retry_request\u001B[0;34m(http, num_retries, req_type, sleep, rand, uri, method, *args, **kwargs)\u001B[0m\n\u001B[1;32m 189\u001B[0m \u001B[38;5;28;01mtry\u001B[39;00m:\n\u001B[1;32m 190\u001B[0m exception \u001B[38;5;241m=\u001B[39m \u001B[38;5;28;01mNone\u001B[39;00m\n\u001B[0;32m--> 191\u001B[0m resp, content \u001B[38;5;241m=\u001B[39m \u001B[43mhttp\u001B[49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43mrequest\u001B[49m\u001B[43m(\u001B[49m\u001B[43muri\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[43mmethod\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[38;5;241;43m*\u001B[39;49m\u001B[43margs\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[38;5;241;43m*\u001B[39;49m\u001B[38;5;241;43m*\u001B[39;49m\u001B[43mkwargs\u001B[49m\u001B[43m)\u001B[49m\n\u001B[1;32m 192\u001B[0m \u001B[38;5;66;03m# Retry on SSL errors and socket timeout errors.\u001B[39;00m\n\u001B[1;32m 193\u001B[0m \u001B[38;5;28;01mexcept\u001B[39;00m _ssl_SSLError \u001B[38;5;28;01mas\u001B[39;00m ssl_error:\n",
"File \u001B[0;32m~/code/langchain/.venv/lib/python3.10/site-packages/httplib2/__init__.py:1724\u001B[0m, in \u001B[0;36mHttp.request\u001B[0;34m(self, uri, method, body, headers, redirections, connection_type)\u001B[0m\n\u001B[1;32m 1722\u001B[0m content \u001B[38;5;241m=\u001B[39m \u001B[38;5;124mb\u001B[39m\u001B[38;5;124m\"\u001B[39m\u001B[38;5;124m\"\u001B[39m\n\u001B[1;32m 1723\u001B[0m \u001B[38;5;28;01melse\u001B[39;00m:\n\u001B[0;32m-> 1724\u001B[0m (response, content) \u001B[38;5;241m=\u001B[39m \u001B[38;5;28;43mself\u001B[39;49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43m_request\u001B[49m\u001B[43m(\u001B[49m\n\u001B[1;32m 1725\u001B[0m \u001B[43m \u001B[49m\u001B[43mconn\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[43mauthority\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[43muri\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[43mrequest_uri\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[43mmethod\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[43mbody\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[43mheaders\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[43mredirections\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[43mcachekey\u001B[49m\u001B[43m,\u001B[49m\n\u001B[1;32m 1726\u001B[0m \u001B[43m \u001B[49m\u001B[43m)\u001B[49m\n\u001B[1;32m 1727\u001B[0m \u001B[38;5;28;01mexcept\u001B[39;00m \u001B[38;5;167;01mException\u001B[39;00m \u001B[38;5;28;01mas\u001B[39;00m e:\n\u001B[1;32m 1728\u001B[0m is_timeout \u001B[38;5;241m=\u001B[39m \u001B[38;5;28misinstance\u001B[39m(e, socket\u001B[38;5;241m.\u001B[39mtimeout)\n",
"File \u001B[0;32m~/code/langchain/.venv/lib/python3.10/site-packages/httplib2/__init__.py:1444\u001B[0m, in \u001B[0;36mHttp._request\u001B[0;34m(self, conn, host, absolute_uri, request_uri, method, body, headers, redirections, cachekey)\u001B[0m\n\u001B[1;32m 1441\u001B[0m \u001B[38;5;28;01mif\u001B[39;00m auth:\n\u001B[1;32m 1442\u001B[0m auth\u001B[38;5;241m.\u001B[39mrequest(method, request_uri, headers, body)\n\u001B[0;32m-> 1444\u001B[0m (response, content) \u001B[38;5;241m=\u001B[39m \u001B[38;5;28;43mself\u001B[39;49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43m_conn_request\u001B[49m\u001B[43m(\u001B[49m\u001B[43mconn\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[43mrequest_uri\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[43mmethod\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[43mbody\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[43mheaders\u001B[49m\u001B[43m)\u001B[49m\n\u001B[1;32m 1446\u001B[0m \u001B[38;5;28;01mif\u001B[39;00m auth:\n\u001B[1;32m 1447\u001B[0m \u001B[38;5;28;01mif\u001B[39;00m auth\u001B[38;5;241m.\u001B[39mresponse(response, body):\n",
"File \u001B[0;32m~/code/langchain/.venv/lib/python3.10/site-packages/httplib2/__init__.py:1366\u001B[0m, in \u001B[0;36mHttp._conn_request\u001B[0;34m(self, conn, request_uri, method, body, headers)\u001B[0m\n\u001B[1;32m 1364\u001B[0m \u001B[38;5;28;01mtry\u001B[39;00m:\n\u001B[1;32m 1365\u001B[0m \u001B[38;5;28;01mif\u001B[39;00m conn\u001B[38;5;241m.\u001B[39msock \u001B[38;5;129;01mis\u001B[39;00m \u001B[38;5;28;01mNone\u001B[39;00m:\n\u001B[0;32m-> 1366\u001B[0m \u001B[43mconn\u001B[49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43mconnect\u001B[49m\u001B[43m(\u001B[49m\u001B[43m)\u001B[49m\n\u001B[1;32m 1367\u001B[0m conn\u001B[38;5;241m.\u001B[39mrequest(method, request_uri, body, headers)\n\u001B[1;32m 1368\u001B[0m \u001B[38;5;28;01mexcept\u001B[39;00m socket\u001B[38;5;241m.\u001B[39mtimeout:\n",
"File \u001B[0;32m~/code/langchain/.venv/lib/python3.10/site-packages/httplib2/__init__.py:1156\u001B[0m, in \u001B[0;36mHTTPSConnectionWithTimeout.connect\u001B[0;34m(self)\u001B[0m\n\u001B[1;32m 1154\u001B[0m \u001B[38;5;28;01mif\u001B[39;00m has_timeout(\u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39mtimeout):\n\u001B[1;32m 1155\u001B[0m sock\u001B[38;5;241m.\u001B[39msettimeout(\u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39mtimeout)\n\u001B[0;32m-> 1156\u001B[0m \u001B[43msock\u001B[49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43mconnect\u001B[49m\u001B[43m(\u001B[49m\u001B[43m(\u001B[49m\u001B[38;5;28;43mself\u001B[39;49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43mhost\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[38;5;28;43mself\u001B[39;49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43mport\u001B[49m\u001B[43m)\u001B[49m\u001B[43m)\u001B[49m\n\u001B[1;32m 1158\u001B[0m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39msock \u001B[38;5;241m=\u001B[39m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39m_context\u001B[38;5;241m.\u001B[39mwrap_socket(sock, server_hostname\u001B[38;5;241m=\u001B[39m\u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39mhost)\n\u001B[1;32m 1160\u001B[0m \u001B[38;5;66;03m# Python 3.3 compatibility: emulate the check_hostname behavior\u001B[39;00m\n",
"\u001B[0;31mKeyboardInterrupt\u001B[0m: "
]
}
],
"source": [
"agent_executor.invoke({\"input\": \"What is ChatGPT?\"})"
"agent_chain.run(input=\"What is ChatGPT?\")"
]
},
{
@@ -196,15 +179,15 @@
"text": [
"\n",
"\n",
"\u001B[1m> Entering new AgentExecutor chain...\u001B[0m\n",
"\u001B[32;1m\u001B[1;3mThought: I need to find out who developed ChatGPT\n",
"\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n",
"\u001b[32;1m\u001b[1;3mThought: I need to find out who developed ChatGPT\n",
"Action: Search\n",
"Action Input: Who developed ChatGPT\u001B[0m\n",
"Observation: \u001B[36;1m\u001B[1;3mChatGPT is an artificial intelligence chatbot developed by OpenAI and launched in November 2022. It is built on top of OpenAI's GPT-3 family of large ... Feb 15, 2023 ... Who owns Chat GPT? Chat GPT is owned and developed by AI research and deployment company, OpenAI. The organization is headquartered in San ... Feb 8, 2023 ... ChatGPT is an AI chatbot developed by San Francisco-based startup OpenAI. OpenAI was co-founded in 2015 by Elon Musk and Sam Altman and is ... Dec 7, 2022 ... ChatGPT is an AI chatbot designed and developed by OpenAI. The bot works by generating text responses based on human-user input, like questions ... Jan 12, 2023 ... In 2019, Microsoft invested $1 billion in OpenAI, the tiny San Francisco company that designed ChatGPT. And in the years since, it has quietly ... Jan 25, 2023 ... The inside story of ChatGPT: How OpenAI founder Sam Altman built the world's hottest technology with billions from Microsoft. Dec 3, 2022 ... ChatGPT went viral on social media for its ability to do anything from code to write essays. · The company that created the AI chatbot has a ... Jan 17, 2023 ... While many Americans were nursing hangovers on New Year's Day, 22-year-old Edward Tian was working feverishly on a new app to combat misuse ... ChatGPT is a language model created by OpenAI, an artificial intelligence research laboratory consisting of a team of researchers and engineers focused on ... 1 day ago ... Everyone is talking about ChatGPT, developed by OpenAI. This is such a great tool that has helped to make AI more accessible to a wider ...\u001B[0m\n",
"Thought:\u001B[32;1m\u001B[1;3m I now know the final answer\n",
"Final Answer: ChatGPT was developed by OpenAI.\u001B[0m\n",
"Action Input: Who developed ChatGPT\u001b[0m\n",
"Observation: \u001b[36;1m\u001b[1;3mChatGPT is an artificial intelligence chatbot developed by OpenAI and launched in November 2022. It is built on top of OpenAI's GPT-3 family of large ... Feb 15, 2023 ... Who owns Chat GPT? Chat GPT is owned and developed by AI research and deployment company, OpenAI. The organization is headquartered in San ... Feb 8, 2023 ... ChatGPT is an AI chatbot developed by San Francisco-based startup OpenAI. OpenAI was co-founded in 2015 by Elon Musk and Sam Altman and is ... Dec 7, 2022 ... ChatGPT is an AI chatbot designed and developed by OpenAI. The bot works by generating text responses based on human-user input, like questions ... Jan 12, 2023 ... In 2019, Microsoft invested $1 billion in OpenAI, the tiny San Francisco company that designed ChatGPT. And in the years since, it has quietly ... Jan 25, 2023 ... The inside story of ChatGPT: How OpenAI founder Sam Altman built the world's hottest technology with billions from Microsoft. Dec 3, 2022 ... ChatGPT went viral on social media for its ability to do anything from code to write essays. · The company that created the AI chatbot has a ... Jan 17, 2023 ... While many Americans were nursing hangovers on New Year's Day, 22-year-old Edward Tian was working feverishly on a new app to combat misuse ... ChatGPT is a language model created by OpenAI, an artificial intelligence research laboratory consisting of a team of researchers and engineers focused on ... 1 day ago ... Everyone is talking about ChatGPT, developed by OpenAI. This is such a great tool that has helped to make AI more accessible to a wider ...\u001b[0m\n",
"Thought:\u001b[32;1m\u001b[1;3m I now know the final answer\n",
"Final Answer: ChatGPT was developed by OpenAI.\u001b[0m\n",
"\n",
"\u001B[1m> Finished chain.\u001B[0m\n"
"\u001b[1m> Finished chain.\u001b[0m\n"
]
},
{
@@ -219,7 +202,7 @@
}
],
"source": [
"agent_executor.invoke({\"input\": \"Who developed it?\"})"
"agent_chain.run(input=\"Who developed it?\")"
]
},
{
@@ -234,14 +217,14 @@
"text": [
"\n",
"\n",
"\u001B[1m> Entering new AgentExecutor chain...\u001B[0m\n",
"\u001B[32;1m\u001B[1;3mThought: I need to simplify the conversation for a 5 year old.\n",
"\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n",
"\u001b[32;1m\u001b[1;3mThought: I need to simplify the conversation for a 5 year old.\n",
"Action: Summary\n",
"Action Input: My daughter 5 years old\u001B[0m\n",
"Action Input: My daughter 5 years old\u001b[0m\n",
"\n",
"\u001B[1m> Entering new LLMChain chain...\u001B[0m\n",
"\u001b[1m> Entering new LLMChain chain...\u001b[0m\n",
"Prompt after formatting:\n",
"\u001B[32;1m\u001B[1;3mThis is a conversation between a human and a bot:\n",
"\u001b[32;1m\u001b[1;3mThis is a conversation between a human and a bot:\n",
"\n",
"Human: What is ChatGPT?\n",
"AI: ChatGPT is an artificial intelligence chatbot developed by OpenAI and launched in November 2022. It is built on top of OpenAI's GPT-3 family of large language models and is optimized for dialogue by using Reinforcement Learning with Human-in-the-Loop. It is also capable of sending and receiving images during chatting.\n",
@@ -249,16 +232,16 @@
"AI: ChatGPT was developed by OpenAI.\n",
"\n",
"Write a summary of the conversation for My daughter 5 years old:\n",
"\u001B[0m\n",
"\u001b[0m\n",
"\n",
"\u001B[1m> Finished chain.\u001B[0m\n",
"\u001b[1m> Finished chain.\u001b[0m\n",
"\n",
"Observation: \u001B[33;1m\u001B[1;3m\n",
"The conversation was about ChatGPT, an artificial intelligence chatbot. It was created by OpenAI and can send and receive images while chatting.\u001B[0m\n",
"Thought:\u001B[32;1m\u001B[1;3m I now know the final answer.\n",
"Final Answer: ChatGPT is an artificial intelligence chatbot created by OpenAI that can send and receive images while chatting.\u001B[0m\n",
"Observation: \u001b[33;1m\u001b[1;3m\n",
"The conversation was about ChatGPT, an artificial intelligence chatbot. It was created by OpenAI and can send and receive images while chatting.\u001b[0m\n",
"Thought:\u001b[32;1m\u001b[1;3m I now know the final answer.\n",
"Final Answer: ChatGPT is an artificial intelligence chatbot created by OpenAI that can send and receive images while chatting.\u001b[0m\n",
"\n",
"\u001B[1m> Finished chain.\u001B[0m\n"
"\u001b[1m> Finished chain.\u001b[0m\n"
]
},
{
@@ -273,8 +256,8 @@
}
],
"source": [
"agent_executor.invoke(\n",
" {\"input\": \"Thanks. Summarize the conversation, for my daughter 5 years old.\"}\n",
"agent_chain.run(\n",
" input=\"Thanks. Summarize the conversation, for my daughter 5 years old.\"\n",
")"
]
},
@@ -306,17 +289,9 @@
}
],
"source": [
"print(agent_executor.memory.buffer)"
"print(agent_chain.memory.buffer)"
]
},
{
"cell_type": "markdown",
"id": "84ca95c30e262e00",
"metadata": {
"collapsed": false
},
"source": []
},
{
"cell_type": "markdown",
"id": "cc3d0aa4",
@@ -365,9 +340,25 @@
" ),\n",
"]\n",
"\n",
"prompt = hub.pull(\"hwchase17/react\")\n",
"agent = create_react_agent(model, tools, prompt)\n",
"agent_executor = AgentExecutor(agent=agent, tools=tools, memory=memory)"
"prefix = \"\"\"Have a conversation with a human, answering the following questions as best you can. You have access to the following tools:\"\"\"\n",
"suffix = \"\"\"Begin!\"\n",
"\n",
"{chat_history}\n",
"Question: {input}\n",
"{agent_scratchpad}\"\"\"\n",
"\n",
"prompt = ZeroShotAgent.create_prompt(\n",
" tools,\n",
" prefix=prefix,\n",
" suffix=suffix,\n",
" input_variables=[\"input\", \"chat_history\", \"agent_scratchpad\"],\n",
")\n",
"\n",
"llm_chain = LLMChain(llm=OpenAI(temperature=0), prompt=prompt)\n",
"agent = ZeroShotAgent(llm_chain=llm_chain, tools=tools, verbose=True)\n",
"agent_chain = AgentExecutor.from_agent_and_tools(\n",
" agent=agent, tools=tools, verbose=True, memory=memory\n",
")"
]
},
{
@@ -382,15 +373,15 @@
"text": [
"\n",
"\n",
"\u001B[1m> Entering new AgentExecutor chain...\u001B[0m\n",
"\u001B[32;1m\u001B[1;3mThought: I should research ChatGPT to answer this question.\n",
"\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n",
"\u001b[32;1m\u001b[1;3mThought: I should research ChatGPT to answer this question.\n",
"Action: Search\n",
"Action Input: \"ChatGPT\"\u001B[0m\n",
"Observation: \u001B[36;1m\u001B[1;3mNov 30, 2022 ... We've trained a model called ChatGPT which interacts in a conversational way. The dialogue format makes it possible for ChatGPT to answer ... ChatGPT is an artificial intelligence chatbot developed by OpenAI and launched in November 2022. It is built on top of OpenAI's GPT-3 family of large ... ChatGPT. We've trained a model called ChatGPT which interacts in a conversational way. The dialogue format makes it possible for ChatGPT to answer ... Feb 2, 2023 ... ChatGPT, the popular chatbot from OpenAI, is estimated to have reached 100 million monthly active users in January, just two months after ... 2 days ago ... ChatGPT recently launched a new version of its own plagiarism detection tool, with hopes that it will squelch some of the criticism around how ... An API for accessing new AI models developed by OpenAI. Feb 19, 2023 ... ChatGPT is an AI chatbot system that OpenAI released in November to show off and test what a very large, powerful AI system can accomplish. You ... ChatGPT is fine-tuned from GPT-3.5, a language model trained to produce text. ChatGPT was optimized for dialogue by using Reinforcement Learning with Human ... 3 days ago ... Visual ChatGPT connects ChatGPT and a series of Visual Foundation Models to enable sending and receiving images during chatting. Dec 1, 2022 ... ChatGPT is a natural language processing tool driven by AI technology that allows you to have human-like conversations and much more with a ...\u001B[0m\n",
"Thought:\u001B[32;1m\u001B[1;3m I now know the final answer.\n",
"Final Answer: ChatGPT is an artificial intelligence chatbot developed by OpenAI and launched in November 2022. It is built on top of OpenAI's GPT-3 family of large language models and is optimized for dialogue by using Reinforcement Learning with Human-in-the-Loop. It is also capable of sending and receiving images during chatting.\u001B[0m\n",
"Action Input: \"ChatGPT\"\u001b[0m\n",
"Observation: \u001b[36;1m\u001b[1;3mNov 30, 2022 ... We've trained a model called ChatGPT which interacts in a conversational way. The dialogue format makes it possible for ChatGPT to answer ... ChatGPT is an artificial intelligence chatbot developed by OpenAI and launched in November 2022. It is built on top of OpenAI's GPT-3 family of large ... ChatGPT. We've trained a model called ChatGPT which interacts in a conversational way. The dialogue format makes it possible for ChatGPT to answer ... Feb 2, 2023 ... ChatGPT, the popular chatbot from OpenAI, is estimated to have reached 100 million monthly active users in January, just two months after ... 2 days ago ... ChatGPT recently launched a new version of its own plagiarism detection tool, with hopes that it will squelch some of the criticism around how ... An API for accessing new AI models developed by OpenAI. Feb 19, 2023 ... ChatGPT is an AI chatbot system that OpenAI released in November to show off and test what a very large, powerful AI system can accomplish. You ... ChatGPT is fine-tuned from GPT-3.5, a language model trained to produce text. ChatGPT was optimized for dialogue by using Reinforcement Learning with Human ... 3 days ago ... Visual ChatGPT connects ChatGPT and a series of Visual Foundation Models to enable sending and receiving images during chatting. Dec 1, 2022 ... ChatGPT is a natural language processing tool driven by AI technology that allows you to have human-like conversations and much more with a ...\u001b[0m\n",
"Thought:\u001b[32;1m\u001b[1;3m I now know the final answer.\n",
"Final Answer: ChatGPT is an artificial intelligence chatbot developed by OpenAI and launched in November 2022. It is built on top of OpenAI's GPT-3 family of large language models and is optimized for dialogue by using Reinforcement Learning with Human-in-the-Loop. It is also capable of sending and receiving images during chatting.\u001b[0m\n",
"\n",
"\u001B[1m> Finished chain.\u001B[0m\n"
"\u001b[1m> Finished chain.\u001b[0m\n"
]
},
{
@@ -405,7 +396,7 @@
}
],
"source": [
"agent_executor.invoke({\"input\": \"What is ChatGPT?\"})"
"agent_chain.run(input=\"What is ChatGPT?\")"
]
},
{
@@ -420,15 +411,15 @@
"text": [
"\n",
"\n",
"\u001B[1m> Entering new AgentExecutor chain...\u001B[0m\n",
"\u001B[32;1m\u001B[1;3mThought: I need to find out who developed ChatGPT\n",
"\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n",
"\u001b[32;1m\u001b[1;3mThought: I need to find out who developed ChatGPT\n",
"Action: Search\n",
"Action Input: Who developed ChatGPT\u001B[0m\n",
"Observation: \u001B[36;1m\u001B[1;3mChatGPT is an artificial intelligence chatbot developed by OpenAI and launched in November 2022. It is built on top of OpenAI's GPT-3 family of large ... Feb 15, 2023 ... Who owns Chat GPT? Chat GPT is owned and developed by AI research and deployment company, OpenAI. The organization is headquartered in San ... Feb 8, 2023 ... ChatGPT is an AI chatbot developed by San Francisco-based startup OpenAI. OpenAI was co-founded in 2015 by Elon Musk and Sam Altman and is ... Dec 7, 2022 ... ChatGPT is an AI chatbot designed and developed by OpenAI. The bot works by generating text responses based on human-user input, like questions ... Jan 12, 2023 ... In 2019, Microsoft invested $1 billion in OpenAI, the tiny San Francisco company that designed ChatGPT. And in the years since, it has quietly ... Jan 25, 2023 ... The inside story of ChatGPT: How OpenAI founder Sam Altman built the world's hottest technology with billions from Microsoft. Dec 3, 2022 ... ChatGPT went viral on social media for its ability to do anything from code to write essays. · The company that created the AI chatbot has a ... Jan 17, 2023 ... While many Americans were nursing hangovers on New Year's Day, 22-year-old Edward Tian was working feverishly on a new app to combat misuse ... ChatGPT is a language model created by OpenAI, an artificial intelligence research laboratory consisting of a team of researchers and engineers focused on ... 1 day ago ... Everyone is talking about ChatGPT, developed by OpenAI. This is such a great tool that has helped to make AI more accessible to a wider ...\u001B[0m\n",
"Thought:\u001B[32;1m\u001B[1;3m I now know the final answer\n",
"Final Answer: ChatGPT was developed by OpenAI.\u001B[0m\n",
"Action Input: Who developed ChatGPT\u001b[0m\n",
"Observation: \u001b[36;1m\u001b[1;3mChatGPT is an artificial intelligence chatbot developed by OpenAI and launched in November 2022. It is built on top of OpenAI's GPT-3 family of large ... Feb 15, 2023 ... Who owns Chat GPT? Chat GPT is owned and developed by AI research and deployment company, OpenAI. The organization is headquartered in San ... Feb 8, 2023 ... ChatGPT is an AI chatbot developed by San Francisco-based startup OpenAI. OpenAI was co-founded in 2015 by Elon Musk and Sam Altman and is ... Dec 7, 2022 ... ChatGPT is an AI chatbot designed and developed by OpenAI. The bot works by generating text responses based on human-user input, like questions ... Jan 12, 2023 ... In 2019, Microsoft invested $1 billion in OpenAI, the tiny San Francisco company that designed ChatGPT. And in the years since, it has quietly ... Jan 25, 2023 ... The inside story of ChatGPT: How OpenAI founder Sam Altman built the world's hottest technology with billions from Microsoft. Dec 3, 2022 ... ChatGPT went viral on social media for its ability to do anything from code to write essays. · The company that created the AI chatbot has a ... Jan 17, 2023 ... While many Americans were nursing hangovers on New Year's Day, 22-year-old Edward Tian was working feverishly on a new app to combat misuse ... ChatGPT is a language model created by OpenAI, an artificial intelligence research laboratory consisting of a team of researchers and engineers focused on ... 1 day ago ... Everyone is talking about ChatGPT, developed by OpenAI. This is such a great tool that has helped to make AI more accessible to a wider ...\u001b[0m\n",
"Thought:\u001b[32;1m\u001b[1;3m I now know the final answer\n",
"Final Answer: ChatGPT was developed by OpenAI.\u001b[0m\n",
"\n",
"\u001B[1m> Finished chain.\u001B[0m\n"
"\u001b[1m> Finished chain.\u001b[0m\n"
]
},
{
@@ -443,7 +434,7 @@
}
],
"source": [
"agent_executor.invoke({\"input\": \"Who developed it?\"})"
"agent_chain.run(input=\"Who developed it?\")"
]
},
{
@@ -458,14 +449,14 @@
"text": [
"\n",
"\n",
"\u001B[1m> Entering new AgentExecutor chain...\u001B[0m\n",
"\u001B[32;1m\u001B[1;3mThought: I need to simplify the conversation for a 5 year old.\n",
"\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n",
"\u001b[32;1m\u001b[1;3mThought: I need to simplify the conversation for a 5 year old.\n",
"Action: Summary\n",
"Action Input: My daughter 5 years old\u001B[0m\n",
"Action Input: My daughter 5 years old\u001b[0m\n",
"\n",
"\u001B[1m> Entering new LLMChain chain...\u001B[0m\n",
"\u001b[1m> Entering new LLMChain chain...\u001b[0m\n",
"Prompt after formatting:\n",
"\u001B[32;1m\u001B[1;3mThis is a conversation between a human and a bot:\n",
"\u001b[32;1m\u001b[1;3mThis is a conversation between a human and a bot:\n",
"\n",
"Human: What is ChatGPT?\n",
"AI: ChatGPT is an artificial intelligence chatbot developed by OpenAI and launched in November 2022. It is built on top of OpenAI's GPT-3 family of large language models and is optimized for dialogue by using Reinforcement Learning with Human-in-the-Loop. It is also capable of sending and receiving images during chatting.\n",
@@ -473,16 +464,16 @@
"AI: ChatGPT was developed by OpenAI.\n",
"\n",
"Write a summary of the conversation for My daughter 5 years old:\n",
"\u001B[0m\n",
"\u001b[0m\n",
"\n",
"\u001B[1m> Finished chain.\u001B[0m\n",
"\u001b[1m> Finished chain.\u001b[0m\n",
"\n",
"Observation: \u001B[33;1m\u001B[1;3m\n",
"The conversation was about ChatGPT, an artificial intelligence chatbot developed by OpenAI. It is designed to have conversations with humans and can also send and receive images.\u001B[0m\n",
"Thought:\u001B[32;1m\u001B[1;3m I now know the final answer.\n",
"Final Answer: ChatGPT is an artificial intelligence chatbot developed by OpenAI that can have conversations with humans and send and receive images.\u001B[0m\n",
"Observation: \u001b[33;1m\u001b[1;3m\n",
"The conversation was about ChatGPT, an artificial intelligence chatbot developed by OpenAI. It is designed to have conversations with humans and can also send and receive images.\u001b[0m\n",
"Thought:\u001b[32;1m\u001b[1;3m I now know the final answer.\n",
"Final Answer: ChatGPT is an artificial intelligence chatbot developed by OpenAI that can have conversations with humans and send and receive images.\u001b[0m\n",
"\n",
"\u001B[1m> Finished chain.\u001B[0m\n"
"\u001b[1m> Finished chain.\u001b[0m\n"
]
},
{
@@ -497,8 +488,8 @@
}
],
"source": [
"agent_executor.invoke(\n",
" {\"input\": \"Thanks. Summarize the conversation, for my daughter 5 years old.\"}\n",
"agent_chain.run(\n",
" input=\"Thanks. Summarize the conversation, for my daughter 5 years old.\"\n",
")"
]
},
@@ -533,7 +524,7 @@
}
],
"source": [
"print(agent_executor.memory.buffer)"
"print(agent_chain.memory.buffer)"
]
}
],

View File

@@ -1,199 +0,0 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": 2,
"id": "c48812ed-35bd-4fbe-9a2c-6c7335e5645e",
"metadata": {},
"outputs": [],
"source": [
"from langchain_anthropic import ChatAnthropic\n",
"from langchain_core.runnables import ConfigurableField\n",
"from langchain_core.tools import tool\n",
"from langchain_openai import ChatOpenAI\n",
"\n",
"\n",
"@tool\n",
"def multiply(x: float, y: float) -> float:\n",
" \"\"\"Multiply 'x' times 'y'.\"\"\"\n",
" return x * y\n",
"\n",
"\n",
"@tool\n",
"def exponentiate(x: float, y: float) -> float:\n",
" \"\"\"Raise 'x' to the 'y'.\"\"\"\n",
" return x**y\n",
"\n",
"\n",
"@tool\n",
"def add(x: float, y: float) -> float:\n",
" \"\"\"Add 'x' and 'y'.\"\"\"\n",
" return x + y\n",
"\n",
"\n",
"tools = [multiply, exponentiate, add]\n",
"\n",
"gpt35 = ChatOpenAI(model=\"gpt-3.5-turbo-0125\", temperature=0).bind_tools(tools)\n",
"claude3 = ChatAnthropic(model=\"claude-3-sonnet-20240229\").bind_tools(tools)\n",
"llm_with_tools = gpt35.configurable_alternatives(\n",
" ConfigurableField(id=\"llm\"), default_key=\"gpt35\", claude3=claude3\n",
")"
]
},
{
"cell_type": "markdown",
"id": "9c186263-1b98-4cb2-b6d1-71f65eb0d811",
"metadata": {},
"source": [
"# LangGraph"
]
},
{
"cell_type": "code",
"execution_count": 3,
"id": "28fc2c60-7dbc-428a-8983-1a6a15ea30d2",
"metadata": {},
"outputs": [],
"source": [
"import operator\n",
"from typing import Annotated, Sequence, TypedDict\n",
"\n",
"from langchain_core.messages import AIMessage, BaseMessage, HumanMessage, ToolMessage\n",
"from langchain_core.runnables import RunnableLambda\n",
"from langgraph.graph import END, StateGraph\n",
"\n",
"\n",
"class AgentState(TypedDict):\n",
" messages: Annotated[Sequence[BaseMessage], operator.add]\n",
"\n",
"\n",
"def should_continue(state):\n",
" return \"continue\" if state[\"messages\"][-1].tool_calls else \"end\"\n",
"\n",
"\n",
"def call_model(state, config):\n",
" return {\"messages\": [llm_with_tools.invoke(state[\"messages\"], config=config)]}\n",
"\n",
"\n",
"def _invoke_tool(tool_call):\n",
" tool = {tool.name: tool for tool in tools}[tool_call[\"name\"]]\n",
" return ToolMessage(tool.invoke(tool_call[\"args\"]), tool_call_id=tool_call[\"id\"])\n",
"\n",
"\n",
"tool_executor = RunnableLambda(_invoke_tool)\n",
"\n",
"\n",
"def call_tools(state):\n",
" last_message = state[\"messages\"][-1]\n",
" return {\"messages\": tool_executor.batch(last_message.tool_calls)}\n",
"\n",
"\n",
"workflow = StateGraph(AgentState)\n",
"workflow.add_node(\"agent\", call_model)\n",
"workflow.add_node(\"action\", call_tools)\n",
"workflow.set_entry_point(\"agent\")\n",
"workflow.add_conditional_edges(\n",
" \"agent\",\n",
" should_continue,\n",
" {\n",
" \"continue\": \"action\",\n",
" \"end\": END,\n",
" },\n",
")\n",
"workflow.add_edge(\"action\", \"agent\")\n",
"graph = workflow.compile()"
]
},
{
"cell_type": "code",
"execution_count": 4,
"id": "3710e724-2595-4625-ba3a-effb81e66e4a",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"{'messages': [HumanMessage(content=\"what's 3 plus 5 raised to the 2.743. also what's 17.24 - 918.1241\"),\n",
" AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_6yMU2WsS4Bqgi1WxFHxtfJRc', 'function': {'arguments': '{\"x\": 8, \"y\": 2.743}', 'name': 'exponentiate'}, 'type': 'function'}, {'id': 'call_GAL3dQiKFF9XEV0RrRLPTvVp', 'function': {'arguments': '{\"x\": 17.24, \"y\": -918.1241}', 'name': 'add'}, 'type': 'function'}]}, response_metadata={'token_usage': {'completion_tokens': 58, 'prompt_tokens': 168, 'total_tokens': 226}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': 'fp_b28b39ffa8', 'finish_reason': 'tool_calls', 'logprobs': None}, id='run-528302fc-7acf-4c11-82c4-119ccf40c573-0', tool_calls=[{'name': 'exponentiate', 'args': {'x': 8, 'y': 2.743}, 'id': 'call_6yMU2WsS4Bqgi1WxFHxtfJRc'}, {'name': 'add', 'args': {'x': 17.24, 'y': -918.1241}, 'id': 'call_GAL3dQiKFF9XEV0RrRLPTvVp'}]),\n",
" ToolMessage(content='300.03770462067547', tool_call_id='call_6yMU2WsS4Bqgi1WxFHxtfJRc'),\n",
" ToolMessage(content='-900.8841', tool_call_id='call_GAL3dQiKFF9XEV0RrRLPTvVp'),\n",
" AIMessage(content='The result of \\\\(3 + 5^{2.743}\\\\) is approximately 300.04, and the result of \\\\(17.24 - 918.1241\\\\) is approximately -900.88.', response_metadata={'token_usage': {'completion_tokens': 44, 'prompt_tokens': 251, 'total_tokens': 295}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': 'fp_b28b39ffa8', 'finish_reason': 'stop', 'logprobs': None}, id='run-d1161669-ed09-4b18-94bd-6d8530df5aa8-0')]}"
]
},
"execution_count": 4,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"graph.invoke(\n",
" {\n",
" \"messages\": [\n",
" HumanMessage(\n",
" \"what's 3 plus 5 raised to the 2.743. also what's 17.24 - 918.1241\"\n",
" )\n",
" ]\n",
" }\n",
")"
]
},
{
"cell_type": "code",
"execution_count": 5,
"id": "073c074e-d722-42e0-85ec-c62c079207e4",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"{'messages': [HumanMessage(content=\"what's 3 plus 5 raised to the 2.743. also what's 17.24 - 918.1241\"),\n",
" AIMessage(content=[{'text': \"Okay, let's break this down into two parts:\", 'type': 'text'}, {'id': 'toolu_01DEhqcXkXTtzJAiZ7uMBeDC', 'input': {'x': 3, 'y': 5}, 'name': 'add', 'type': 'tool_use'}], response_metadata={'id': 'msg_01AkLGH8sxMHaH15yewmjwkF', 'model': 'claude-3-sonnet-20240229', 'stop_reason': 'tool_use', 'stop_sequence': None, 'usage': {'input_tokens': 450, 'output_tokens': 81}}, id='run-f35bfae8-8ded-4f8a-831b-0940d6ad16b6-0', tool_calls=[{'name': 'add', 'args': {'x': 3, 'y': 5}, 'id': 'toolu_01DEhqcXkXTtzJAiZ7uMBeDC'}]),\n",
" ToolMessage(content='8.0', tool_call_id='toolu_01DEhqcXkXTtzJAiZ7uMBeDC'),\n",
" AIMessage(content=[{'id': 'toolu_013DyMLrvnrto33peAKMGMr1', 'input': {'x': 8.0, 'y': 2.743}, 'name': 'exponentiate', 'type': 'tool_use'}], response_metadata={'id': 'msg_015Fmp8aztwYcce2JDAFfce3', 'model': 'claude-3-sonnet-20240229', 'stop_reason': 'tool_use', 'stop_sequence': None, 'usage': {'input_tokens': 545, 'output_tokens': 75}}, id='run-48aaeeeb-a1e5-48fd-a57a-6c3da2907b47-0', tool_calls=[{'name': 'exponentiate', 'args': {'x': 8.0, 'y': 2.743}, 'id': 'toolu_013DyMLrvnrto33peAKMGMr1'}]),\n",
" ToolMessage(content='300.03770462067547', tool_call_id='toolu_013DyMLrvnrto33peAKMGMr1'),\n",
" AIMessage(content=[{'text': 'So 3 plus 5 raised to the 2.743 power is 300.04.\\n\\nFor the second part:', 'type': 'text'}, {'id': 'toolu_01UTmMrGTmLpPrPCF1rShN46', 'input': {'x': 17.24, 'y': -918.1241}, 'name': 'add', 'type': 'tool_use'}], response_metadata={'id': 'msg_015TkhfRBENPib2RWAxkieH6', 'model': 'claude-3-sonnet-20240229', 'stop_reason': 'tool_use', 'stop_sequence': None, 'usage': {'input_tokens': 638, 'output_tokens': 105}}, id='run-45fb62e3-d102-4159-881d-241c5dbadeed-0', tool_calls=[{'name': 'add', 'args': {'x': 17.24, 'y': -918.1241}, 'id': 'toolu_01UTmMrGTmLpPrPCF1rShN46'}]),\n",
" ToolMessage(content='-900.8841', tool_call_id='toolu_01UTmMrGTmLpPrPCF1rShN46'),\n",
" AIMessage(content='Therefore, 17.24 - 918.1241 = -900.8841', response_metadata={'id': 'msg_01LgKnRuUcSyADCpxv9tPoYD', 'model': 'claude-3-sonnet-20240229', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 759, 'output_tokens': 24}}, id='run-1008254e-ccd1-497c-8312-9550dd77bd08-0')]}"
]
},
"execution_count": 5,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"graph.invoke(\n",
" {\n",
" \"messages\": [\n",
" HumanMessage(\n",
" \"what's 3 plus 5 raised to the 2.743. also what's 17.24 - 918.1241\"\n",
" )\n",
" ]\n",
" },\n",
" config={\"configurable\": {\"llm\": \"claude3\"}},\n",
")"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.4"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

View File

@@ -3811,7 +3811,7 @@
"from langchain.chains import ConversationalRetrievalChain\n",
"from langchain_openai import ChatOpenAI\n",
"\n",
"model = ChatOpenAI(model=\"gpt-3.5-turbo-0613\") # switch to 'gpt-4'\n",
"model = ChatOpenAI(model_name=\"gpt-3.5-turbo-0613\") # switch to 'gpt-4'\n",
"qa = ConversationalRetrievalChain.from_llm(model, retriever=retriever)"
]
},

View File

@@ -424,7 +424,7 @@
" DialogueAgentWithTools(\n",
" name=name,\n",
" system_message=SystemMessage(content=system_message),\n",
" model=ChatOpenAI(model=\"gpt-4\", temperature=0.2),\n",
" model=ChatOpenAI(model_name=\"gpt-4\", temperature=0.2),\n",
" tool_names=tools,\n",
" top_k_results=2,\n",
" )\n",

View File

@@ -601,7 +601,7 @@
"source": [
"from langchain_openai import ChatOpenAI\n",
"\n",
"llm = ChatOpenAI(model=\"gpt-4\", temperature=0)"
"llm = ChatOpenAI(model_name=\"gpt-4\", temperature=0)"
]
},
{

File diff suppressed because one or more lines are too long

File diff suppressed because it is too large Load Diff

View File

@@ -9,10 +9,6 @@
## Tutorials
### [LangChain v 0.1 by LangChain.ai](https://www.youtube.com/playlist?list=PLfaIDFEXuae0gBSJ9T0w7cu7iJZbH3T31)
### [Build with Langchain - Advanced by LangChain.ai](https://www.youtube.com/playlist?list=PLfaIDFEXuae06tclDATrMYY0idsTdLg9v)
### [LangGraph by LangChain.ai](https://www.youtube.com/playlist?list=PLfaIDFEXuae16n2TWUkKq5PgJ0w6Pkwtg)
### [by Greg Kamradt](https://www.youtube.com/playlist?list=PLqZXAkvF1bPNQER9mLmDbntNfSpzdDIU5)
### [by Sam Witteveen](https://www.youtube.com/playlist?list=PL8motc6AQftk1Bs42EW45kwYbyJ4jOdiZ)
### [by James Briggs](https://www.youtube.com/playlist?list=PLIUOU7oqGTLieV9uTIFMm6_4PXg-hlN6F)
@@ -25,10 +21,10 @@
### Featured courses on Deeplearning.AI
- [LangChain for LLM Application Development](https://www.deeplearning.ai/short-courses/langchain-for-llm-application-development/)
- [LangChain Chat with Your Data](https://www.deeplearning.ai/short-courses/langchain-chat-with-your-data/)
- [Functions, Tools and Agents with LangChain](https://www.deeplearning.ai/short-courses/functions-tools-agents-langchain/)
- [Build LLM Apps with LangChain.js](https://www.deeplearning.ai/short-courses/build-llm-apps-with-langchain-js/)
- [LangChain for LLM Application Development](https://learn.deeplearning.ai/langchain)
- [LangChain Chat with Your Data](https://learn.deeplearning.ai/langchain-chat-with-your-data)
- [Functions, Tools and Agents with LangChain](https://learn.deeplearning.ai/functions-tools-agents-langchain)
- [Build LLM Apps with LangChain.js](https://learn.deeplearning.ai/courses/build-llm-apps-with-langchain-js)
### Online courses
@@ -39,7 +35,6 @@
- [Udacity](https://www.udacity.com/catalog/all/any-price/any-school/any-skill/any-difficulty/any-duration/any-type/relevance/page-1?searchValue=langchain)
- [LinkedIn Learning](https://www.linkedin.com/search/results/learning/?keywords=langchain)
- [edX](https://www.edx.org/search?q=langchain)
- [freeCodeCamp](https://www.youtube.com/@freecodecamp/search?query=langchain)
## Short Tutorials

View File

@@ -98,7 +98,7 @@ To run unit tests in Docker:
make docker_tests
```
There are also [integration tests and code-coverage](/docs/contributing/testing/) available.
There are also [integration tests and code-coverage](./testing) available.
### Only develop langchain_core or langchain_experimental

View File

@@ -1,4 +1,7 @@
# Technical logistics
---
sidebar_position: 3
---
# Contribute Documentation
LangChain documentation consists of two components:

View File

@@ -1,2 +0,0 @@
label: 'Documentation'
position: 3

View File

@@ -1,138 +0,0 @@
---
sidebar_label: "Style guide"
---
# LangChain Documentation Style Guide
## Introduction
As LangChain continues to grow, the surface area of documentation required to cover it continues to grow too.
This page provides guidelines for anyone writing documentation for LangChain, as well as some of our philosophies around
organization and structure.
## Philosophy
LangChain's documentation aspires to follow the [Diataxis framework](https://diataxis.fr).
Under this framework, all documentation falls under one of four categories:
- **Tutorials**: Lessons that take the reader by the hand through a series of conceptual steps to complete a project.
- An example of this is our [LCEL streaming guide](/docs/expression_language/streaming).
- Our guides on [custom components](/docs/modules/model_io/chat/custom_chat_model) is another one.
- **How-to guides**: Guides that take the reader through the steps required to solve a real-world problem.
- The clearest examples of this are our [Use case](/docs/use_cases/) quickstart pages.
- **Reference**: Technical descriptions of the machinery and how to operate it.
- Our [Runnable interface](/docs/expression_language/interface) page is an example of this.
- The [API reference pages](https://api.python.langchain.com/) are another.
- **Explanation**: Explanations that clarify and illuminate a particular topic.
- The [LCEL primitives pages](/docs/expression_language/primitives/sequence) are an example of this.
Each category serves a distinct purpose and requires a specific approach to writing and structuring the content.
## Taxonomy
Keeping the above in mind, we have sorted LangChain's docs into categories. It is helpful to think in these terms
when contributing new documentation:
### Getting started
The [getting started section](/docs/get_started/introduction) includes a high-level introduction to LangChain, a quickstart that
tours LangChain's various features, and logistical instructions around installation and project setup.
It contains elements of **How-to guides** and **Explanations**.
### Use cases
[Use cases](/docs/use_cases/) are guides that are meant to show how to use LangChain to accomplish a specific task (RAG, information extraction, etc.).
The quickstarts should be good entrypoints for first-time LangChain developers who prefer to learn by getting something practical prototyped,
then taking the pieces apart retrospectively. These should mirror what LangChain is good at.
The quickstart pages here should fit the **How-to guide** category, with the other pages intended to be **Explanations** of more
in-depth concepts and strategies that accompany the main happy paths.
:::note
The below sections are listed roughly in order of increasing level of abstraction.
:::
### Expression Language
[LangChain Expression Language (LCEL)](/docs/expression_language/) is the fundamental way that most LangChain components fit together, and this section is designed to teach
developers how to use it to build with LangChain's primitives effectively.
This section should contains **Tutorials** that teach how to stream and use LCEL primitives for more abstract tasks, **Explanations** of specific behaviors,
and some **References** for how to use different methods in the Runnable interface.
### Components
The [components section](/docs/modules) covers concepts one level of abstraction higher than LCEL.
Abstract base classes like `BaseChatModel` and `BaseRetriever` should be covered here, as well as core implementations of these base classes,
such as `ChatPromptTemplate` and `RecursiveCharacterTextSplitter`. Customization guides belong here too.
This section should contain mostly conceptual **Tutorials**, **References**, and **Explanations** of the components they cover.
:::note
As a general rule of thumb, everything covered in the `Expression Language` and `Components` sections (with the exception of the `Composition` section of components) should
cover only components that exist in `langchain_core`.
:::
### Integrations
The [integrations](/docs/integrations/platforms/) are specific implementations of components. These often involve third-party APIs and services.
If this is the case, as a general rule, these are maintained by the third-party partner.
This section should contain mostly **Explanations** and **References**, though the actual content here is more flexible than other sections and more at the
discretion of the third-party provider.
:::note
Concepts covered in `Integrations` should generally exist in `langchain_community` or specific partner packages.
:::
### Guides and Ecosystem
The [Guides](/docs/guides) and [Ecosystem](/docs/langsmith/) sections should contain guides that address higher-level problems than the sections above.
This includes, but is not limited to, considerations around productionization and development workflows.
These should contain mostly **How-to guides**, **Explanations**, and **Tutorials**.
### API references
LangChain's API references. Should act as **References** (as the name implies) with some **Explanation**-focused content as well.
## Sample developer journey
We have set up our docs to assist a new developer to LangChain. Let's walk through the intended path:
- The developer lands on https://python.langchain.com, and reads through the introduction and the diagram.
- If they are just curious, they may be drawn to the [Quickstart](/docs/get_started/quickstart) to get a high-level tour of what LangChain contains.
- If they have a specific task in mind that they want to accomplish, they will be drawn to the Use-Case section. The use-case should provide a good, concrete hook that shows the value LangChain can provide them and be a good entrypoint to the framework.
- They can then move to learn more about the fundamentals of LangChain through the Expression Language sections.
- Next, they can learn about LangChain's various components and integrations.
- Finally, they can get additional knowledge through the Guides.
This is only an ideal of course - sections will inevitably reference lower or higher-level concepts that are documented in other sections.
## Guidelines
Here are some other guidelines you should think about when writing and organizing documentation.
### Linking to other sections
Because sections of the docs do not exist in a vacuum, it is important to link to other sections as often as possible
to allow a developer to learn more about an unfamiliar topic inline.
This includes linking to the API references as well as conceptual sections!
### Conciseness
In general, take a less-is-more approach. If a section with a good explanation of a concept already exists, you should link to it rather than
re-explain it, unless the concept you are documenting presents some new wrinkle.
Be concise, including in code samples.
### General style
- Use active voice and present tense whenever possible.
- Use examples and code snippets to illustrate concepts and usage.
- Use appropriate header levels (`#`, `##`, `###`, etc.) to organize the content hierarchically.
- Use bullet points and numbered lists to break down information into easily digestible chunks.
- Use tables (especially for **Reference** sections) and diagrams often to present information visually.
- Include the table of contents for longer documentation pages to help readers navigate the content, but hide it for shorter pages.

View File

@@ -12,7 +12,7 @@ As an open-source project in a rapidly developing field, we are extremely open t
There are many ways to contribute to LangChain. Here are some common ways people contribute:
- [**Documentation**](/docs/contributing/documentation/style_guide): Help improve our docs, including this one!
- [**Documentation**](./documentation.mdx): Help improve our docs, including this one!
- [**Code**](./code.mdx): Help us write code, fix bugs, or improve our infrastructure.
- [**Integrations**](integrations.mdx): Help us integrate with your favorite vendors and tools.
- [**Discussions**](https://github.com/langchain-ai/langchain/discussions): Help answer usage questions and discuss issues with users.

View File

@@ -3,7 +3,7 @@ sidebar_position: 5
---
# Contribute Integrations
To begin, make sure you have all the dependencies outlined in guide on [Contributing Code](/docs/contributing/code/).
To begin, make sure you have all the dependencies outlined in guide on [Contributing Code](./code).
There are a few different places you can contribute integrations for LangChain:
@@ -133,7 +133,7 @@ By default, this will include stubs for a Chat Model, an LLM, and/or a Vector St
Some basic tests are presented in the `tests/` directory. You should add more tests to cover your package's functionality.
For information on running and implementing tests, see the [Testing guide](/docs/contributing/testing/).
For information on running and implementing tests, see the [Testing guide](./testing).
### Write documentation
@@ -190,9 +190,12 @@ Maintainer steps (Contributors should **not** do these):
## Partner package in external repo
Partner packages in external repos must be coordinated between the LangChain team and
the partner organization to ensure that they are maintained and updated.
If you are creating a partner package in an external repo, you should follow the same steps as above,
but you will need to set up your own CI/CD and package management.
If you're interested in creating a partner package in an external repo, please start
with one in the LangChain repo, and then reach out to the LangChain team to discuss
how to move it to an external repo.
Name your package as `langchain-{partner}-{integration}`.
Still, you have to create the `libs/partners/{partner}-{integration}` folder in the `LangChain` monorepo
and add a `README.md` file with a link to the external repo.
See this [example](https://github.com/langchain-ai/langchain/tree/master/libs/partners/google-genai).
This allows keeping track of all the partner packages in the `LangChain` documentation.

View File

@@ -41,7 +41,7 @@ There are other files in the root directory level, but their presence should be
The `/docs` directory contains the content for the documentation that is shown
at https://python.langchain.com/ and the associated API Reference https://api.python.langchain.com/en/latest/langchain_api_reference.html.
See the [documentation](/docs/contributing/documentation/style_guide) guidelines to learn how to contribute to the documentation.
See the [documentation](./documentation) guidelines to learn how to contribute to the documentation.
## Code

View File

@@ -0,0 +1,205 @@
{
"cells": [
{
"cell_type": "markdown",
"id": "e89f490d",
"metadata": {},
"source": [
"# Agents\n",
"\n",
"You can pass a Runnable into an agent. Make sure you have `langchainhub` installed: `pip install langchainhub`"
]
},
{
"cell_type": "code",
"execution_count": 8,
"id": "af4381de",
"metadata": {},
"outputs": [],
"source": [
"from langchain import hub\n",
"from langchain.agents import AgentExecutor, tool\n",
"from langchain.agents.output_parsers import XMLAgentOutputParser\n",
"from langchain_community.chat_models import ChatAnthropic"
]
},
{
"cell_type": "code",
"execution_count": 3,
"id": "24cc8134",
"metadata": {},
"outputs": [],
"source": [
"model = ChatAnthropic(model=\"claude-2\")"
]
},
{
"cell_type": "code",
"execution_count": 4,
"id": "67c0b0e4",
"metadata": {},
"outputs": [],
"source": [
"@tool\n",
"def search(query: str) -> str:\n",
" \"\"\"Search things about current events.\"\"\"\n",
" return \"32 degrees\""
]
},
{
"cell_type": "code",
"execution_count": 5,
"id": "7203b101",
"metadata": {},
"outputs": [],
"source": [
"tool_list = [search]"
]
},
{
"cell_type": "code",
"execution_count": 6,
"id": "b68e756d",
"metadata": {},
"outputs": [],
"source": [
"# Get the prompt to use - you can modify this!\n",
"prompt = hub.pull(\"hwchase17/xml-agent-convo\")"
]
},
{
"cell_type": "code",
"execution_count": 7,
"id": "61ab3e9a",
"metadata": {},
"outputs": [],
"source": [
"# Logic for going from intermediate steps to a string to pass into model\n",
"# This is pretty tied to the prompt\n",
"def convert_intermediate_steps(intermediate_steps):\n",
" log = \"\"\n",
" for action, observation in intermediate_steps:\n",
" log += (\n",
" f\"<tool>{action.tool}</tool><tool_input>{action.tool_input}\"\n",
" f\"</tool_input><observation>{observation}</observation>\"\n",
" )\n",
" return log\n",
"\n",
"\n",
"# Logic for converting tools to string to go in prompt\n",
"def convert_tools(tools):\n",
" return \"\\n\".join([f\"{tool.name}: {tool.description}\" for tool in tools])"
]
},
{
"cell_type": "markdown",
"id": "260f5988",
"metadata": {},
"source": [
"Building an agent from a runnable usually involves a few things:\n",
"\n",
"1. Data processing for the intermediate steps. These need to be represented in a way that the language model can recognize them. This should be pretty tightly coupled to the instructions in the prompt\n",
"\n",
"2. The prompt itself\n",
"\n",
"3. The model, complete with stop tokens if needed\n",
"\n",
"4. The output parser - should be in sync with how the prompt specifies things to be formatted."
]
},
{
"cell_type": "code",
"execution_count": 12,
"id": "e92f1d6f",
"metadata": {},
"outputs": [],
"source": [
"agent = (\n",
" {\n",
" \"input\": lambda x: x[\"input\"],\n",
" \"agent_scratchpad\": lambda x: convert_intermediate_steps(\n",
" x[\"intermediate_steps\"]\n",
" ),\n",
" }\n",
" | prompt.partial(tools=convert_tools(tool_list))\n",
" | model.bind(stop=[\"</tool_input>\", \"</final_answer>\"])\n",
" | XMLAgentOutputParser()\n",
")"
]
},
{
"cell_type": "code",
"execution_count": 13,
"id": "6ce6ec7a",
"metadata": {},
"outputs": [],
"source": [
"agent_executor = AgentExecutor(agent=agent, tools=tool_list, verbose=True)"
]
},
{
"cell_type": "code",
"execution_count": 14,
"id": "fb5cb2e3",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"\n",
"\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n",
"\u001b[32;1m\u001b[1;3m <tool>search</tool><tool_input>weather in New York\u001b[0m\u001b[36;1m\u001b[1;3m32 degrees\u001b[0m\u001b[32;1m\u001b[1;3m <tool>search</tool>\n",
"<tool_input>weather in New York\u001b[0m\u001b[36;1m\u001b[1;3m32 degrees\u001b[0m\u001b[32;1m\u001b[1;3m <final_answer>The weather in New York is 32 degrees\u001b[0m\n",
"\n",
"\u001b[1m> Finished chain.\u001b[0m\n"
]
},
{
"data": {
"text/plain": [
"{'input': 'whats the weather in New york?',\n",
" 'output': 'The weather in New York is 32 degrees'}"
]
},
"execution_count": 14,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"agent_executor.invoke({\"input\": \"whats the weather in New york?\"})"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "bce86dd8",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.1"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

View File

@@ -1,15 +1,5 @@
{
"cells": [
{
"cell_type": "raw",
"id": "1e997ab7",
"metadata": {},
"source": [
"---\n",
"sidebar_class_name: hidden\n",
"---"
]
},
{
"cell_type": "markdown",
"id": "f09fd305",

View File

@@ -0,0 +1,163 @@
{
"cells": [
{
"cell_type": "markdown",
"id": "cf4fb76d-c534-485b-8b51-a0714ee3b82e",
"metadata": {},
"source": [
"# Routing by semantic similarity\n",
"\n",
"With LCEL you can easily add [custom routing logic](/docs/expression_language/how_to/routing#using-a-custom-function) to your chain to dynamically determine the chain logic based on user input. All you need to do is define a function that given an input returns a `Runnable`.\n",
"\n",
"One especially useful technique is to use embeddings to route a query to the most relevant prompt. Here's a very simple example."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "b793a0aa",
"metadata": {},
"outputs": [],
"source": [
"%pip install --upgrade --quiet langchain-core langchain langchain-openai"
]
},
{
"cell_type": "code",
"execution_count": 1,
"id": "eef9020a-5f7c-4291-98eb-fa73f17d4b92",
"metadata": {},
"outputs": [],
"source": [
"from langchain.utils.math import cosine_similarity\n",
"from langchain_core.output_parsers import StrOutputParser\n",
"from langchain_core.prompts import PromptTemplate\n",
"from langchain_core.runnables import RunnableLambda, RunnablePassthrough\n",
"from langchain_openai import ChatOpenAI, OpenAIEmbeddings\n",
"\n",
"physics_template = \"\"\"You are a very smart physics professor. \\\n",
"You are great at answering questions about physics in a concise and easy to understand manner. \\\n",
"When you don't know the answer to a question you admit that you don't know.\n",
"\n",
"Here is a question:\n",
"{query}\"\"\"\n",
"\n",
"math_template = \"\"\"You are a very good mathematician. You are great at answering math questions. \\\n",
"You are so good because you are able to break down hard problems into their component parts, \\\n",
"answer the component parts, and then put them together to answer the broader question.\n",
"\n",
"Here is a question:\n",
"{query}\"\"\"\n",
"\n",
"embeddings = OpenAIEmbeddings()\n",
"prompt_templates = [physics_template, math_template]\n",
"prompt_embeddings = embeddings.embed_documents(prompt_templates)\n",
"\n",
"\n",
"def prompt_router(input):\n",
" query_embedding = embeddings.embed_query(input[\"query\"])\n",
" similarity = cosine_similarity([query_embedding], prompt_embeddings)[0]\n",
" most_similar = prompt_templates[similarity.argmax()]\n",
" print(\"Using MATH\" if most_similar == math_template else \"Using PHYSICS\")\n",
" return PromptTemplate.from_template(most_similar)\n",
"\n",
"\n",
"chain = (\n",
" {\"query\": RunnablePassthrough()}\n",
" | RunnableLambda(prompt_router)\n",
" | ChatOpenAI()\n",
" | StrOutputParser()\n",
")"
]
},
{
"cell_type": "code",
"execution_count": 4,
"id": "4d22b0f3-24f2-4a47-9440-065b57ebcdbd",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Using PHYSICS\n",
"A black hole is a region in space where gravity is extremely strong, so strong that nothing, not even light, can escape its gravitational pull. It is formed when a massive star collapses under its own gravity during a supernova explosion. The collapse causes an incredibly dense mass to be concentrated in a small volume, creating a gravitational field that is so intense that it warps space and time. Black holes have a boundary called the event horizon, which marks the point of no return for anything that gets too close. Beyond the event horizon, the gravitational pull is so strong that even light cannot escape, hence the name \"black hole.\" While we have a good understanding of black holes, there is still much to learn, especially about what happens inside them.\n"
]
}
],
"source": [
"print(chain.invoke(\"What's a black hole\"))"
]
},
{
"cell_type": "code",
"execution_count": 5,
"id": "f261910d-1de1-4a01-8c8a-308db02b81de",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Using MATH\n",
"Thank you for your kind words! I will do my best to break down the concept of a path integral for you.\n",
"\n",
"In mathematics and physics, a path integral is a mathematical tool used to calculate the probability amplitude or wave function of a particle or system of particles. It was introduced by Richard Feynman and is an integral over all possible paths that a particle can take to go from an initial state to a final state.\n",
"\n",
"To understand the concept better, let's consider an example. Suppose we have a particle moving from point A to point B in space. Classically, we would describe this particle's motion using a definite trajectory, but in quantum mechanics, particles can simultaneously take multiple paths from A to B.\n",
"\n",
"The path integral formalism considers all possible paths that the particle could take and assigns a probability amplitude to each path. These probability amplitudes are then added up, taking into account the interference effects between different paths.\n",
"\n",
"To calculate a path integral, we need to define an action, which is a mathematical function that describes the behavior of the system. The action is usually expressed in terms of the particle's position, velocity, and time.\n",
"\n",
"Once we have the action, we can write down the path integral as an integral over all possible paths. Each path is weighted by a factor determined by the action and the principle of least action, which states that a particle takes a path that minimizes the action.\n",
"\n",
"Mathematically, the path integral is expressed as:\n",
"\n",
"∫ e^(iS/ħ) D[x(t)]\n",
"\n",
"Here, S is the action, ħ is the reduced Planck's constant, and D[x(t)] represents the integration over all possible paths x(t) of the particle.\n",
"\n",
"By evaluating this integral, we can obtain the probability amplitude for the particle to go from the initial state to the final state. The absolute square of this amplitude gives us the probability of finding the particle in a particular state.\n",
"\n",
"Path integrals have proven to be a powerful tool in various areas of physics, including quantum mechanics, quantum field theory, and statistical mechanics. They allow us to study complex systems and calculate probabilities that are difficult to obtain using other methods.\n",
"\n",
"I hope this explanation helps you understand the concept of a path integral. If you have any further questions, feel free to ask!\n"
]
}
],
"source": [
"print(chain.invoke(\"What's a path integral\"))"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "f0c1732a-01ca-4d10-977c-29ed7480972b",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.1"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

View File

@@ -0,0 +1,11 @@
---
sidebar_position: 3
---
# Cookbook
import DocCardList from "@theme/DocCardList";
Example code for accomplishing common tasks with the LangChain Expression Language (LCEL). These examples show how to compose different Runnable (the core LCEL interface) components to achieve various tasks. If you're just getting acquainted with LCEL, the [Prompt + LLM](/docs/expression_language/cookbook/prompt_llm_parser) page is a good place to start.
<DocCardList />

View File

@@ -0,0 +1,194 @@
{
"cells": [
{
"cell_type": "markdown",
"id": "5062941a",
"metadata": {},
"source": [
"# Adding memory\n",
"\n",
"This shows how to add memory to an arbitrary chain. Right now, you can use the memory classes but need to hook it up manually"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "18753dee",
"metadata": {},
"outputs": [],
"source": [
"%pip install --upgrade --quiet langchain langchain-openai"
]
},
{
"cell_type": "code",
"execution_count": 1,
"id": "7998efd8",
"metadata": {},
"outputs": [],
"source": [
"from operator import itemgetter\n",
"\n",
"from langchain.memory import ConversationBufferMemory\n",
"from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder\n",
"from langchain_core.runnables import RunnableLambda, RunnablePassthrough\n",
"from langchain_openai import ChatOpenAI\n",
"\n",
"model = ChatOpenAI()\n",
"prompt = ChatPromptTemplate.from_messages(\n",
" [\n",
" (\"system\", \"You are a helpful chatbot\"),\n",
" MessagesPlaceholder(variable_name=\"history\"),\n",
" (\"human\", \"{input}\"),\n",
" ]\n",
")"
]
},
{
"cell_type": "code",
"execution_count": 2,
"id": "fa0087f3",
"metadata": {},
"outputs": [],
"source": [
"memory = ConversationBufferMemory(return_messages=True)"
]
},
{
"cell_type": "code",
"execution_count": 3,
"id": "06b531ae",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"{'history': []}"
]
},
"execution_count": 3,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"memory.load_memory_variables({})"
]
},
{
"cell_type": "code",
"execution_count": 4,
"id": "d9437af6",
"metadata": {},
"outputs": [],
"source": [
"chain = (\n",
" RunnablePassthrough.assign(\n",
" history=RunnableLambda(memory.load_memory_variables) | itemgetter(\"history\")\n",
" )\n",
" | prompt\n",
" | model\n",
")"
]
},
{
"cell_type": "code",
"execution_count": 5,
"id": "bed1e260",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"AIMessage(content='Hello Bob! How can I assist you today?', additional_kwargs={}, example=False)"
]
},
"execution_count": 5,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"inputs = {\"input\": \"hi im bob\"}\n",
"response = chain.invoke(inputs)\n",
"response"
]
},
{
"cell_type": "code",
"execution_count": 6,
"id": "890475b4",
"metadata": {},
"outputs": [],
"source": [
"memory.save_context(inputs, {\"output\": response.content})"
]
},
{
"cell_type": "code",
"execution_count": 7,
"id": "e8fcb77f",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"{'history': [HumanMessage(content='hi im bob', additional_kwargs={}, example=False),\n",
" AIMessage(content='Hello Bob! How can I assist you today?', additional_kwargs={}, example=False)]}"
]
},
"execution_count": 7,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"memory.load_memory_variables({})"
]
},
{
"cell_type": "code",
"execution_count": 8,
"id": "d837d5c3",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"AIMessage(content='Your name is Bob.', additional_kwargs={}, example=False)"
]
},
"execution_count": 8,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"inputs = {\"input\": \"whats my name\"}\n",
"response = chain.invoke(inputs)\n",
"response"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.1"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

View File

@@ -5,19 +5,9 @@
"id": "4927a727-b4c8-453c-8c83-bd87b4fcac14",
"metadata": {},
"source": [
"# Moderation chain\n",
"# Adding moderation\n",
"\n",
"This notebook walks through examples of how to use a moderation chain, and several common ways for doing so. \n",
"Moderation chains are useful for detecting text that could be hateful, violent, etc. This can be useful to apply on both user input, but also on the output of a Language Model. \n",
"Some API providers specifically prohibit you, or your end users, from generating some \n",
"types of harmful content. To comply with this (and to just generally prevent your application from being harmful) \n",
"you may want to add a moderation chain to your sequences in order to make sure any output \n",
"the LLM generates is not harmful.\n",
"\n",
"If the content passed into the moderation chain is harmful, there is not one best way to handle it.\n",
"It probably depends on your application. Sometimes you may want to throw an error \n",
"(and have your application handle that). Other times, you may want to return something to \n",
"the user explaining that the text was harmful."
"This shows how to add in moderation (or other safeguards) around your LLM application."
]
},
{

View File

@@ -34,7 +34,7 @@
"from langchain.agents import AgentExecutor, load_tools\n",
"from langchain.agents.format_scratchpad import format_to_openai_function_messages\n",
"from langchain.agents.output_parsers import OpenAIFunctionsAgentOutputParser\n",
"from langchain_community.tools import WikipediaQueryRun\n",
"from langchain.tools import WikipediaQueryRun\n",
"from langchain_community.utilities import WikipediaAPIWrapper\n",
"from langchain_core.prompt_values import ChatPromptValue\n",
"from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder\n",

View File

@@ -0,0 +1,492 @@
{
"cells": [
{
"cell_type": "raw",
"id": "abe47592-909c-4844-bf44-9e55c2fb4bfa",
"metadata": {},
"source": [
"---\n",
"sidebar_position: 1\n",
"title: RAG\n",
"---\n"
]
},
{
"cell_type": "markdown",
"id": "91c5ef3d",
"metadata": {},
"source": [
"Let's look at adding in a retrieval step to a prompt and LLM, which adds up to a \"retrieval-augmented generation\" chain"
]
},
{
"cell_type": "code",
"execution_count": 4,
"id": "7f25d9e9-d192-42e9-af50-5660a4bfb0d9",
"metadata": {},
"outputs": [],
"source": [
"%pip install --upgrade --quiet langchain langchain-openai faiss-cpu tiktoken"
]
},
{
"cell_type": "code",
"execution_count": 1,
"id": "33be32af",
"metadata": {},
"outputs": [],
"source": [
"from operator import itemgetter\n",
"\n",
"from langchain_community.vectorstores import FAISS\n",
"from langchain_core.output_parsers import StrOutputParser\n",
"from langchain_core.prompts import ChatPromptTemplate\n",
"from langchain_core.runnables import RunnableLambda, RunnablePassthrough\n",
"from langchain_openai import ChatOpenAI, OpenAIEmbeddings"
]
},
{
"cell_type": "code",
"execution_count": 2,
"id": "bfc47ec1",
"metadata": {},
"outputs": [],
"source": [
"vectorstore = FAISS.from_texts(\n",
" [\"harrison worked at kensho\"], embedding=OpenAIEmbeddings()\n",
")\n",
"retriever = vectorstore.as_retriever()\n",
"\n",
"template = \"\"\"Answer the question based only on the following context:\n",
"{context}\n",
"\n",
"Question: {question}\n",
"\"\"\"\n",
"prompt = ChatPromptTemplate.from_template(template)\n",
"\n",
"model = ChatOpenAI()"
]
},
{
"cell_type": "code",
"execution_count": 3,
"id": "eae31755",
"metadata": {},
"outputs": [],
"source": [
"chain = (\n",
" {\"context\": retriever, \"question\": RunnablePassthrough()}\n",
" | prompt\n",
" | model\n",
" | StrOutputParser()\n",
")"
]
},
{
"cell_type": "code",
"execution_count": 4,
"id": "f3040b0c",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"'Harrison worked at Kensho.'"
]
},
"execution_count": 4,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"chain.invoke(\"where did harrison work?\")"
]
},
{
"cell_type": "code",
"execution_count": 5,
"id": "e1d20c7c",
"metadata": {},
"outputs": [],
"source": [
"template = \"\"\"Answer the question based only on the following context:\n",
"{context}\n",
"\n",
"Question: {question}\n",
"\n",
"Answer in the following language: {language}\n",
"\"\"\"\n",
"prompt = ChatPromptTemplate.from_template(template)\n",
"\n",
"chain = (\n",
" {\n",
" \"context\": itemgetter(\"question\") | retriever,\n",
" \"question\": itemgetter(\"question\"),\n",
" \"language\": itemgetter(\"language\"),\n",
" }\n",
" | prompt\n",
" | model\n",
" | StrOutputParser()\n",
")"
]
},
{
"cell_type": "code",
"execution_count": 6,
"id": "7ee8b2d4",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"'Harrison ha lavorato a Kensho.'"
]
},
"execution_count": 6,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"chain.invoke({\"question\": \"where did harrison work\", \"language\": \"italian\"})"
]
},
{
"cell_type": "markdown",
"id": "f007669c",
"metadata": {},
"source": [
"## Conversational Retrieval Chain\n",
"\n",
"We can easily add in conversation history. This primarily means adding in chat_message_history"
]
},
{
"cell_type": "code",
"execution_count": 21,
"id": "3f30c348",
"metadata": {},
"outputs": [],
"source": [
"from langchain_core.messages import AIMessage, HumanMessage, get_buffer_string\n",
"from langchain_core.prompts import format_document\n",
"from langchain_core.runnables import RunnableParallel"
]
},
{
"cell_type": "code",
"execution_count": 8,
"id": "64ab1dbf",
"metadata": {},
"outputs": [],
"source": [
"from langchain.prompts.prompt import PromptTemplate\n",
"\n",
"_template = \"\"\"Given the following conversation and a follow up question, rephrase the follow up question to be a standalone question, in its original language.\n",
"\n",
"Chat History:\n",
"{chat_history}\n",
"Follow Up Input: {question}\n",
"Standalone question:\"\"\"\n",
"CONDENSE_QUESTION_PROMPT = PromptTemplate.from_template(_template)"
]
},
{
"cell_type": "code",
"execution_count": 9,
"id": "7d628c97",
"metadata": {},
"outputs": [],
"source": [
"template = \"\"\"Answer the question based only on the following context:\n",
"{context}\n",
"\n",
"Question: {question}\n",
"\"\"\"\n",
"ANSWER_PROMPT = ChatPromptTemplate.from_template(template)"
]
},
{
"cell_type": "code",
"execution_count": 10,
"id": "f60a5d0f",
"metadata": {},
"outputs": [],
"source": [
"DEFAULT_DOCUMENT_PROMPT = PromptTemplate.from_template(template=\"{page_content}\")\n",
"\n",
"\n",
"def _combine_documents(\n",
" docs, document_prompt=DEFAULT_DOCUMENT_PROMPT, document_separator=\"\\n\\n\"\n",
"):\n",
" doc_strings = [format_document(doc, document_prompt) for doc in docs]\n",
" return document_separator.join(doc_strings)"
]
},
{
"cell_type": "code",
"execution_count": 11,
"id": "5c32cc89",
"metadata": {},
"outputs": [],
"source": [
"_inputs = RunnableParallel(\n",
" standalone_question=RunnablePassthrough.assign(\n",
" chat_history=lambda x: get_buffer_string(x[\"chat_history\"])\n",
" )\n",
" | CONDENSE_QUESTION_PROMPT\n",
" | ChatOpenAI(temperature=0)\n",
" | StrOutputParser(),\n",
")\n",
"_context = {\n",
" \"context\": itemgetter(\"standalone_question\") | retriever | _combine_documents,\n",
" \"question\": lambda x: x[\"standalone_question\"],\n",
"}\n",
"conversational_qa_chain = _inputs | _context | ANSWER_PROMPT | ChatOpenAI()"
]
},
{
"cell_type": "code",
"execution_count": 12,
"id": "135c8205",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"AIMessage(content='Harrison was employed at Kensho.')"
]
},
"execution_count": 12,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"conversational_qa_chain.invoke(\n",
" {\n",
" \"question\": \"where did harrison work?\",\n",
" \"chat_history\": [],\n",
" }\n",
")"
]
},
{
"cell_type": "code",
"execution_count": 22,
"id": "424e7e7a",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"AIMessage(content='Harrison worked at Kensho.')"
]
},
"execution_count": 22,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"conversational_qa_chain.invoke(\n",
" {\n",
" \"question\": \"where did he work?\",\n",
" \"chat_history\": [\n",
" HumanMessage(content=\"Who wrote this notebook?\"),\n",
" AIMessage(content=\"Harrison\"),\n",
" ],\n",
" }\n",
")"
]
},
{
"cell_type": "markdown",
"id": "c5543183",
"metadata": {},
"source": [
"### With Memory and returning source documents\n",
"\n",
"This shows how to use memory with the above. For memory, we need to manage that outside at the memory. For returning the retrieved documents, we just need to pass them through all the way."
]
},
{
"cell_type": "code",
"execution_count": 14,
"id": "e31dd17c",
"metadata": {},
"outputs": [],
"source": [
"from operator import itemgetter\n",
"\n",
"from langchain.memory import ConversationBufferMemory"
]
},
{
"cell_type": "code",
"execution_count": 15,
"id": "d4bffe94",
"metadata": {},
"outputs": [],
"source": [
"memory = ConversationBufferMemory(\n",
" return_messages=True, output_key=\"answer\", input_key=\"question\"\n",
")"
]
},
{
"cell_type": "code",
"execution_count": 16,
"id": "733be985",
"metadata": {},
"outputs": [],
"source": [
"# First we add a step to load memory\n",
"# This adds a \"memory\" key to the input object\n",
"loaded_memory = RunnablePassthrough.assign(\n",
" chat_history=RunnableLambda(memory.load_memory_variables) | itemgetter(\"history\"),\n",
")\n",
"# Now we calculate the standalone question\n",
"standalone_question = {\n",
" \"standalone_question\": {\n",
" \"question\": lambda x: x[\"question\"],\n",
" \"chat_history\": lambda x: get_buffer_string(x[\"chat_history\"]),\n",
" }\n",
" | CONDENSE_QUESTION_PROMPT\n",
" | ChatOpenAI(temperature=0)\n",
" | StrOutputParser(),\n",
"}\n",
"# Now we retrieve the documents\n",
"retrieved_documents = {\n",
" \"docs\": itemgetter(\"standalone_question\") | retriever,\n",
" \"question\": lambda x: x[\"standalone_question\"],\n",
"}\n",
"# Now we construct the inputs for the final prompt\n",
"final_inputs = {\n",
" \"context\": lambda x: _combine_documents(x[\"docs\"]),\n",
" \"question\": itemgetter(\"question\"),\n",
"}\n",
"# And finally, we do the part that returns the answers\n",
"answer = {\n",
" \"answer\": final_inputs | ANSWER_PROMPT | ChatOpenAI(),\n",
" \"docs\": itemgetter(\"docs\"),\n",
"}\n",
"# And now we put it all together!\n",
"final_chain = loaded_memory | standalone_question | retrieved_documents | answer"
]
},
{
"cell_type": "code",
"execution_count": 17,
"id": "806e390c",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"{'answer': AIMessage(content='Harrison was employed at Kensho.'),\n",
" 'docs': [Document(page_content='harrison worked at kensho')]}"
]
},
"execution_count": 17,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"inputs = {\"question\": \"where did harrison work?\"}\n",
"result = final_chain.invoke(inputs)\n",
"result"
]
},
{
"cell_type": "code",
"execution_count": 18,
"id": "977399fd",
"metadata": {},
"outputs": [],
"source": [
"# Note that the memory does not save automatically\n",
"# This will be improved in the future\n",
"# For now you need to save it yourself\n",
"memory.save_context(inputs, {\"answer\": result[\"answer\"].content})"
]
},
{
"cell_type": "code",
"execution_count": 19,
"id": "f94f7de4",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"{'history': [HumanMessage(content='where did harrison work?'),\n",
" AIMessage(content='Harrison was employed at Kensho.')]}"
]
},
"execution_count": 19,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"memory.load_memory_variables({})"
]
},
{
"cell_type": "code",
"execution_count": 20,
"id": "88f2b7cd",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"{'answer': AIMessage(content='Harrison actually worked at Kensho.'),\n",
" 'docs': [Document(page_content='harrison worked at kensho')]}"
]
},
"execution_count": 20,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"inputs = {\"question\": \"but where did he really work?\"}\n",
"result = final_chain.invoke(inputs)\n",
"result"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "207a2782",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.1"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

View File

@@ -0,0 +1,225 @@
{
"cells": [
{
"cell_type": "raw",
"id": "c14da114-1a4a-487d-9cff-e0e8c30ba366",
"metadata": {},
"source": [
"---\n",
"sidebar_position: 3\n",
"title: Querying a SQL DB\n",
"---\n"
]
},
{
"cell_type": "markdown",
"id": "506e9636",
"metadata": {},
"source": [
"We can replicate our SQLDatabaseChain with Runnables."
]
},
{
"cell_type": "code",
"id": "b3121aa8",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"%pip install --upgrade --quiet langchain langchain-openai"
]
},
{
"cell_type": "code",
"execution_count": 1,
"id": "7a927516",
"metadata": {},
"outputs": [],
"source": [
"from langchain_core.prompts import ChatPromptTemplate\n",
"\n",
"template = \"\"\"Based on the table schema below, write a SQL query that would answer the user's question:\n",
"{schema}\n",
"\n",
"Question: {question}\n",
"SQL Query:\"\"\"\n",
"prompt = ChatPromptTemplate.from_template(template)"
]
},
{
"cell_type": "code",
"execution_count": 2,
"id": "3f51f386",
"metadata": {},
"outputs": [],
"source": [
"from langchain_community.utilities import SQLDatabase"
]
},
{
"cell_type": "markdown",
"id": "7c3449d6-684b-416e-ba16-90a035835a88",
"metadata": {},
"source": [
"We'll need the Chinook sample DB for this example. There's many places to download it from, e.g. https://database.guide/2-sample-databases-sqlite/"
]
},
{
"cell_type": "code",
"execution_count": 20,
"id": "2ccca6fc",
"metadata": {},
"outputs": [],
"source": [
"db = SQLDatabase.from_uri(\"sqlite:///./Chinook.db\")"
]
},
{
"cell_type": "code",
"execution_count": 21,
"id": "05ba88ee",
"metadata": {},
"outputs": [],
"source": [
"def get_schema(_):\n",
" return db.get_table_info()"
]
},
{
"cell_type": "code",
"execution_count": 22,
"id": "a4eda902",
"metadata": {},
"outputs": [],
"source": [
"def run_query(query):\n",
" return db.run(query)"
]
},
{
"cell_type": "code",
"execution_count": 23,
"id": "5046cb17",
"metadata": {},
"outputs": [],
"source": [
"from langchain_core.output_parsers import StrOutputParser\n",
"from langchain_core.runnables import RunnablePassthrough\n",
"from langchain_openai import ChatOpenAI\n",
"\n",
"model = ChatOpenAI()\n",
"\n",
"sql_response = (\n",
" RunnablePassthrough.assign(schema=get_schema)\n",
" | prompt\n",
" | model.bind(stop=[\"\\nSQLResult:\"])\n",
" | StrOutputParser()\n",
")"
]
},
{
"cell_type": "code",
"execution_count": 24,
"id": "a5552039",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"'SELECT COUNT(*) FROM Employee'"
]
},
"execution_count": 24,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"sql_response.invoke({\"question\": \"How many employees are there?\"})"
]
},
{
"cell_type": "code",
"execution_count": 25,
"id": "d6fee130",
"metadata": {},
"outputs": [],
"source": [
"template = \"\"\"Based on the table schema below, question, sql query, and sql response, write a natural language response:\n",
"{schema}\n",
"\n",
"Question: {question}\n",
"SQL Query: {query}\n",
"SQL Response: {response}\"\"\"\n",
"prompt_response = ChatPromptTemplate.from_template(template)"
]
},
{
"cell_type": "code",
"execution_count": 26,
"id": "923aa634",
"metadata": {},
"outputs": [],
"source": [
"full_chain = (\n",
" RunnablePassthrough.assign(query=sql_response).assign(\n",
" schema=get_schema,\n",
" response=lambda x: db.run(x[\"query\"]),\n",
" )\n",
" | prompt_response\n",
" | model\n",
")"
]
},
{
"cell_type": "code",
"execution_count": 27,
"id": "e94963d8",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"AIMessage(content='There are 8 employees.', additional_kwargs={}, example=False)"
]
},
"execution_count": 27,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"full_chain.invoke({\"question\": \"How many employees are there?\"})"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "4f358d7b-a721-4db3-9f92-f06913428afc",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.1"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

View File

@@ -0,0 +1,122 @@
{
"cells": [
{
"cell_type": "markdown",
"id": "29781123",
"metadata": {},
"source": [
"# Using tools\n",
"\n",
"You can use any Tools with Runnables easily."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "a5c579dd-2e22-41b0-a789-346dfdecb5a2",
"metadata": {},
"outputs": [],
"source": [
"%pip install --upgrade --quiet langchain langchain-openai duckduckgo-search"
]
},
{
"cell_type": "code",
"execution_count": 6,
"id": "9232d2a9",
"metadata": {},
"outputs": [],
"source": [
"from langchain.tools import DuckDuckGoSearchRun\n",
"from langchain_core.output_parsers import StrOutputParser\n",
"from langchain_core.prompts import ChatPromptTemplate\n",
"from langchain_openai import ChatOpenAI"
]
},
{
"cell_type": "code",
"execution_count": 4,
"id": "a0c64d2c",
"metadata": {},
"outputs": [],
"source": [
"search = DuckDuckGoSearchRun()"
]
},
{
"cell_type": "code",
"execution_count": 7,
"id": "391969b6",
"metadata": {},
"outputs": [],
"source": [
"template = \"\"\"turn the following user input into a search query for a search engine:\n",
"\n",
"{input}\"\"\"\n",
"prompt = ChatPromptTemplate.from_template(template)\n",
"\n",
"model = ChatOpenAI()"
]
},
{
"cell_type": "code",
"execution_count": 8,
"id": "e3d9d20d",
"metadata": {},
"outputs": [],
"source": [
"chain = prompt | model | StrOutputParser() | search"
]
},
{
"cell_type": "code",
"execution_count": 9,
"id": "55f2967d",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"'What sports games are on TV today & tonight? Watch and stream live sports on TV today, tonight, tomorrow. Today\\'s 2023 sports TV schedule includes football, basketball, baseball, hockey, motorsports, soccer and more. Watch on TV or stream online on ESPN, FOX, FS1, CBS, NBC, ABC, Peacock, Paramount+, fuboTV, local channels and many other networks. MLB Games Tonight: How to Watch on TV, Streaming & Odds - Thursday, September 7. Seattle Mariners\\' Julio Rodriguez greets teammates in the dugout after scoring against the Oakland Athletics in a ... Circle - Country Music and Lifestyle. Live coverage of all the MLB action today is available to you, with the information provided below. The Brewers will look to pick up a road win at PNC Park against the Pirates on Wednesday at 12:35 PM ET. Check out the latest odds and with BetMGM Sportsbook. Use bonus code \"GNPLAY\" for special offers! MLB Games Tonight: How to Watch on TV, Streaming & Odds - Tuesday, September 5. Houston Astros\\' Kyle Tucker runs after hitting a double during the fourth inning of a baseball game against the Los Angeles Angels, Sunday, Aug. 13, 2023, in Houston. (AP Photo/Eric Christian Smith) (APMedia) The Houston Astros versus the Texas Rangers is one of ... The second half of tonight\\'s college football schedule still has some good games remaining to watch on your television.. We\\'ve already seen an exciting one when Colorado upset TCU. And we saw some ...'"
]
},
"execution_count": 9,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"chain.invoke({\"input\": \"I'd like to figure out what games are tonight\"})"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "a16949cf-00ea-43c6-a6aa-797ad4f6918d",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "poetry-venv",
"language": "python",
"name": "poetry-venv"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.1"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

View File

@@ -440,7 +440,7 @@
"id": "e6833844-f1c4-444c-a3d2-31b3c6b31d46",
"metadata": {},
"source": [
"We then use the `RunnableParallel` to prepare the expected inputs into the prompt by using the entries for the retrieved documents as well as the original user question, using the retriever for document search, and `RunnablePassthrough` to pass the users question:"
"We then use the `RunnableParallel` to prepare the expected inputs into the prompt by using the entries for the retrieved documents as well as the original user question, using the retriever for document search, and RunnablePassthrough to pass the users question:"
]
},
{
@@ -509,7 +509,7 @@
"source": [
"## Next steps\n",
"\n",
"We recommend reading our [Advantages of LCEL](/docs/expression_language/why) section next to see a side-by-side comparison of the code needed to produce common functionality with and without LCEL."
"We recommend reading our [Why use LCEL](/docs/expression_language/why) section next to see a side-by-side comparison of the code needed to produce common functionality with and without LCEL."
]
}
],

View File

@@ -1,25 +1,13 @@
{
"cells": [
{
"cell_type": "raw",
"id": "fe63ffaf",
"metadata": {},
"source": [
"---\n",
"sidebar_position: 2\n",
"title: \"Binding: Attach runtime args\"\n",
"keywords: [RunnableBinding, LCEL]\n",
"---"
]
},
{
"cell_type": "markdown",
"id": "711752cb-4f15-42a3-9838-a0c67f397771",
"metadata": {},
"source": [
"# Binding: Attach runtime args\n",
"# Bind runtime args\n",
"\n",
"Sometimes we want to invoke a Runnable within a Runnable sequence with constant arguments that are not part of the output of the preceding Runnable in the sequence, and which are not part of the user input. We can use `Runnable.bind()` to pass these arguments in.\n",
"Sometimes we want to invoke a Runnable within a Runnable sequence with constant arguments that are not part of the output of the preceding Runnable in the sequence, and which are not part of the user input. We can use `Runnable.bind()` to easily pass these arguments in.\n",
"\n",
"Suppose we have a simple prompt + model sequence:"
]

View File

@@ -1,17 +1,5 @@
{
"cells": [
{
"cell_type": "raw",
"id": "9ede5870",
"metadata": {},
"source": [
"---\n",
"sidebar_position: 7\n",
"title: \"Configure runtime chain internals\"\n",
"keywords: [ConfigurableField, configurable_fields, ConfigurableAlternatives, configurable_alternatives, LCEL]\n",
"---"
]
},
{
"cell_type": "markdown",
"id": "39eaf61b",
@@ -63,7 +51,7 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain_core.prompts import PromptTemplate\n",
"from langchain.prompts import PromptTemplate\n",
"from langchain_core.runnables import ConfigurableField\n",
"from langchain_openai import ChatOpenAI\n",
"\n",
@@ -285,8 +273,8 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain.prompts import PromptTemplate\n",
"from langchain_community.chat_models import ChatAnthropic\n",
"from langchain_core.prompts import PromptTemplate\n",
"from langchain_core.runnables import ConfigurableField\n",
"from langchain_openai import ChatOpenAI"
]

View File

@@ -5,9 +5,9 @@
"id": "b45110ef",
"metadata": {},
"source": [
"# Create a runnable with the @chain decorator\n",
"# Create a runnable with the `@chain` decorator\n",
"\n",
"You can also turn an arbitrary function into a chain by adding a `@chain` decorator. This is functionaly equivalent to wrapping in a [`RunnableLambda`](/docs/expression_language/primitives/functions).\n",
"You can also turn an arbitrary function into a chain by adding a `@chain` decorator. This is functionaly equivalent to wrapping in a [`RunnableLambda`](./functions).\n",
"\n",
"This will have the benefit of improved observability by tracing your chain correctly. Any calls to runnables inside this function will be traced as nested childen.\n",
"\n",

View File

@@ -0,0 +1,310 @@
{
"cells": [
{
"cell_type": "markdown",
"id": "19c9cbd6",
"metadata": {},
"source": [
"# Add fallbacks\n",
"\n",
"There are many possible points of failure in an LLM application, whether that be issues with LLM API's, poor model outputs, issues with other integrations, etc. Fallbacks help you gracefully handle and isolate these issues.\n",
"\n",
"Crucially, fallbacks can be applied not only on the LLM level but on the whole runnable level."
]
},
{
"cell_type": "markdown",
"id": "a6bb9ba9",
"metadata": {},
"source": [
"## Handling LLM API Errors\n",
"\n",
"This is maybe the most common use case for fallbacks. A request to an LLM API can fail for a variety of reasons - the API could be down, you could have hit rate limits, any number of things. Therefore, using fallbacks can help protect against these types of things.\n",
"\n",
"IMPORTANT: By default, a lot of the LLM wrappers catch errors and retry. You will most likely want to turn those off when working with fallbacks. Otherwise the first wrapper will keep on retrying and not failing."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "ebb61b1f",
"metadata": {},
"outputs": [],
"source": [
"%pip install --upgrade --quiet langchain langchain-openai"
]
},
{
"cell_type": "code",
"execution_count": 1,
"id": "d3e893bf",
"metadata": {},
"outputs": [],
"source": [
"from langchain_community.chat_models import ChatAnthropic\n",
"from langchain_openai import ChatOpenAI"
]
},
{
"cell_type": "markdown",
"id": "4847c82d",
"metadata": {},
"source": [
"First, let's mock out what happens if we hit a RateLimitError from OpenAI"
]
},
{
"cell_type": "code",
"execution_count": 2,
"id": "dfdd8bf5",
"metadata": {},
"outputs": [],
"source": [
"from unittest.mock import patch\n",
"\n",
"import httpx\n",
"from openai import RateLimitError\n",
"\n",
"request = httpx.Request(\"GET\", \"/\")\n",
"response = httpx.Response(200, request=request)\n",
"error = RateLimitError(\"rate limit\", response=response, body=\"\")"
]
},
{
"cell_type": "code",
"execution_count": 3,
"id": "e6fdffc1",
"metadata": {},
"outputs": [],
"source": [
"# Note that we set max_retries = 0 to avoid retrying on RateLimits, etc\n",
"openai_llm = ChatOpenAI(max_retries=0)\n",
"anthropic_llm = ChatAnthropic()\n",
"llm = openai_llm.with_fallbacks([anthropic_llm])"
]
},
{
"cell_type": "code",
"execution_count": 4,
"id": "584461ab",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Hit error\n"
]
}
],
"source": [
"# Let's use just the OpenAI LLm first, to show that we run into an error\n",
"with patch(\"openai.resources.chat.completions.Completions.create\", side_effect=error):\n",
" try:\n",
" print(openai_llm.invoke(\"Why did the chicken cross the road?\"))\n",
" except RateLimitError:\n",
" print(\"Hit error\")"
]
},
{
"cell_type": "code",
"execution_count": 28,
"id": "4fc1e673",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"content=' I don\\'t actually know why the chicken crossed the road, but here are some possible humorous answers:\\n\\n- To get to the other side!\\n\\n- It was too chicken to just stand there. \\n\\n- It wanted a change of scenery.\\n\\n- It wanted to show the possum it could be done.\\n\\n- It was on its way to a poultry farmers\\' convention.\\n\\nThe joke plays on the double meaning of \"the other side\" - literally crossing the road to the other side, or the \"other side\" meaning the afterlife. So it\\'s an anti-joke, with a silly or unexpected pun as the answer.' additional_kwargs={} example=False\n"
]
}
],
"source": [
"# Now let's try with fallbacks to Anthropic\n",
"with patch(\"openai.resources.chat.completions.Completions.create\", side_effect=error):\n",
" try:\n",
" print(llm.invoke(\"Why did the chicken cross the road?\"))\n",
" except RateLimitError:\n",
" print(\"Hit error\")"
]
},
{
"cell_type": "markdown",
"id": "f00bea25",
"metadata": {},
"source": [
"We can use our \"LLM with Fallbacks\" as we would a normal LLM."
]
},
{
"cell_type": "code",
"execution_count": 6,
"id": "4f8eaaa0",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"content=\" I don't actually know why the kangaroo crossed the road, but I'm happy to take a guess! Maybe the kangaroo was trying to get to the other side to find some tasty grass to eat. Or maybe it was trying to get away from a predator or other danger. Kangaroos do need to cross roads and other open areas sometimes as part of their normal activities. Whatever the reason, I'm sure the kangaroo looked both ways before hopping across!\" additional_kwargs={} example=False\n"
]
}
],
"source": [
"from langchain_core.prompts import ChatPromptTemplate\n",
"\n",
"prompt = ChatPromptTemplate.from_messages(\n",
" [\n",
" (\n",
" \"system\",\n",
" \"You're a nice assistant who always includes a compliment in your response\",\n",
" ),\n",
" (\"human\", \"Why did the {animal} cross the road\"),\n",
" ]\n",
")\n",
"chain = prompt | llm\n",
"with patch(\"openai.resources.chat.completions.Completions.create\", side_effect=error):\n",
" try:\n",
" print(chain.invoke({\"animal\": \"kangaroo\"}))\n",
" except RateLimitError:\n",
" print(\"Hit error\")"
]
},
{
"cell_type": "markdown",
"id": "ef9f0f39-0b9f-4723-a394-f61c98c75d41",
"metadata": {},
"source": [
"### Specifying errors to handle\n",
"\n",
"We can also specify the errors to handle if we want to be more specific about when the fallback is invoked:"
]
},
{
"cell_type": "code",
"execution_count": 7,
"id": "e4069ca4-1c16-4915-9a8c-b2732869ae27",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Hit error\n"
]
}
],
"source": [
"llm = openai_llm.with_fallbacks(\n",
" [anthropic_llm], exceptions_to_handle=(KeyboardInterrupt,)\n",
")\n",
"\n",
"chain = prompt | llm\n",
"with patch(\"openai.resources.chat.completions.Completions.create\", side_effect=error):\n",
" try:\n",
" print(chain.invoke({\"animal\": \"kangaroo\"}))\n",
" except RateLimitError:\n",
" print(\"Hit error\")"
]
},
{
"cell_type": "markdown",
"id": "8d62241b",
"metadata": {},
"source": [
"## Fallbacks for Sequences\n",
"\n",
"We can also create fallbacks for sequences, that are sequences themselves. Here we do that with two different models: ChatOpenAI and then normal OpenAI (which does not use a chat model). Because OpenAI is NOT a chat model, you likely want a different prompt."
]
},
{
"cell_type": "code",
"execution_count": 30,
"id": "6d0b8056",
"metadata": {},
"outputs": [],
"source": [
"# First let's create a chain with a ChatModel\n",
"# We add in a string output parser here so the outputs between the two are the same type\n",
"from langchain_core.output_parsers import StrOutputParser\n",
"\n",
"chat_prompt = ChatPromptTemplate.from_messages(\n",
" [\n",
" (\n",
" \"system\",\n",
" \"You're a nice assistant who always includes a compliment in your response\",\n",
" ),\n",
" (\"human\", \"Why did the {animal} cross the road\"),\n",
" ]\n",
")\n",
"# Here we're going to use a bad model name to easily create a chain that will error\n",
"chat_model = ChatOpenAI(model_name=\"gpt-fake\")\n",
"bad_chain = chat_prompt | chat_model | StrOutputParser()"
]
},
{
"cell_type": "code",
"execution_count": 31,
"id": "8d1fc2a5",
"metadata": {},
"outputs": [],
"source": [
"# Now lets create a chain with the normal OpenAI model\n",
"from langchain.prompts import PromptTemplate\n",
"from langchain_openai import OpenAI\n",
"\n",
"prompt_template = \"\"\"Instructions: You should always include a compliment in your response.\n",
"\n",
"Question: Why did the {animal} cross the road?\"\"\"\n",
"prompt = PromptTemplate.from_template(prompt_template)\n",
"llm = OpenAI()\n",
"good_chain = prompt | llm"
]
},
{
"cell_type": "code",
"execution_count": 32,
"id": "283bfa44",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"'\\n\\nAnswer: The turtle crossed the road to get to the other side, and I have to say he had some impressive determination.'"
]
},
"execution_count": 32,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"# We can now create a final chain which combines the two\n",
"chain = bad_chain.with_fallbacks([good_chain])\n",
"chain.invoke({\"animal\": \"turtle\"})"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.1"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

View File

@@ -0,0 +1,206 @@
{
"cells": [
{
"cell_type": "raw",
"id": "ce0e08fd",
"metadata": {},
"source": [
"---\n",
"sidebar_position: 2\n",
"title: \"RunnableLambda: Run Custom Functions\"\n",
"keywords: [RunnableLambda, LCEL]\n",
"---"
]
},
{
"cell_type": "markdown",
"id": "fbc4bf6e",
"metadata": {},
"source": [
"# Run custom functions\n",
"\n",
"You can use arbitrary functions in the pipeline.\n",
"\n",
"Note that all inputs to these functions need to be a SINGLE argument. If you have a function that accepts multiple arguments, you should write a wrapper that accepts a single input and unpacks it into multiple argument."
]
},
{
"cell_type": "raw",
"id": "9a5fe916",
"metadata": {},
"source": [
"%pip install --upgrade --quiet langchain langchain-openai"
]
},
{
"cell_type": "code",
"execution_count": 1,
"id": "6bb221b3",
"metadata": {},
"outputs": [],
"source": [
"from operator import itemgetter\n",
"\n",
"from langchain_core.prompts import ChatPromptTemplate\n",
"from langchain_core.runnables import RunnableLambda\n",
"from langchain_openai import ChatOpenAI\n",
"\n",
"\n",
"def length_function(text):\n",
" return len(text)\n",
"\n",
"\n",
"def _multiple_length_function(text1, text2):\n",
" return len(text1) * len(text2)\n",
"\n",
"\n",
"def multiple_length_function(_dict):\n",
" return _multiple_length_function(_dict[\"text1\"], _dict[\"text2\"])\n",
"\n",
"\n",
"prompt = ChatPromptTemplate.from_template(\"what is {a} + {b}\")\n",
"model = ChatOpenAI()\n",
"\n",
"chain1 = prompt | model\n",
"\n",
"chain = (\n",
" {\n",
" \"a\": itemgetter(\"foo\") | RunnableLambda(length_function),\n",
" \"b\": {\"text1\": itemgetter(\"foo\"), \"text2\": itemgetter(\"bar\")}\n",
" | RunnableLambda(multiple_length_function),\n",
" }\n",
" | prompt\n",
" | model\n",
")"
]
},
{
"cell_type": "code",
"execution_count": 2,
"id": "5488ec85",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"AIMessage(content='3 + 9 equals 12.')"
]
},
"execution_count": 2,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"chain.invoke({\"foo\": \"bar\", \"bar\": \"gah\"})"
]
},
{
"cell_type": "markdown",
"id": "4728ddd9-914d-42ce-ae9b-72c9ce8ec940",
"metadata": {},
"source": [
"## Accepting a Runnable Config\n",
"\n",
"Runnable lambdas can optionally accept a [RunnableConfig](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.config.RunnableConfig.html#langchain_core.runnables.config.RunnableConfig), which they can use to pass callbacks, tags, and other configuration information to nested runs."
]
},
{
"cell_type": "code",
"execution_count": 3,
"id": "80b3b5f6-5d58-44b9-807e-cce9a46bf49f",
"metadata": {},
"outputs": [],
"source": [
"from langchain_core.output_parsers import StrOutputParser\n",
"from langchain_core.runnables import RunnableConfig"
]
},
{
"cell_type": "code",
"execution_count": 4,
"id": "ff0daf0c-49dd-4d21-9772-e5fa133c5f36",
"metadata": {},
"outputs": [],
"source": [
"import json\n",
"\n",
"\n",
"def parse_or_fix(text: str, config: RunnableConfig):\n",
" fixing_chain = (\n",
" ChatPromptTemplate.from_template(\n",
" \"Fix the following text:\\n\\n```text\\n{input}\\n```\\nError: {error}\"\n",
" \" Don't narrate, just respond with the fixed data.\"\n",
" )\n",
" | ChatOpenAI()\n",
" | StrOutputParser()\n",
" )\n",
" for _ in range(3):\n",
" try:\n",
" return json.loads(text)\n",
" except Exception as e:\n",
" text = fixing_chain.invoke({\"input\": text, \"error\": e}, config)\n",
" return \"Failed to parse\""
]
},
{
"cell_type": "code",
"execution_count": 5,
"id": "1a5e709e-9d75-48c7-bb9c-503251990505",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"{'foo': 'bar'}\n",
"Tokens Used: 65\n",
"\tPrompt Tokens: 56\n",
"\tCompletion Tokens: 9\n",
"Successful Requests: 1\n",
"Total Cost (USD): $0.00010200000000000001\n"
]
}
],
"source": [
"from langchain.callbacks import get_openai_callback\n",
"\n",
"with get_openai_callback() as cb:\n",
" output = RunnableLambda(parse_or_fix).invoke(\n",
" \"{foo: bar}\", {\"tags\": [\"my-tag\"], \"callbacks\": [cb]}\n",
" )\n",
" print(output)\n",
" print(cb)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "29f55c38",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.1"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

View File

@@ -1,184 +1,10 @@
{
"cells": [
{
"cell_type": "raw",
"id": "ce0e08fd",
"metadata": {},
"source": [
"---\n",
"sidebar_position: 3\n",
"title: \"Lambda: Run custom functions\"\n",
"keywords: [RunnableLambda, LCEL]\n",
"---"
]
},
{
"cell_type": "markdown",
"id": "fbc4bf6e",
"metadata": {},
"source": [
"# Run custom functions\n",
"\n",
"You can use arbitrary functions in the pipeline.\n",
"\n",
"Note that all inputs to these functions need to be a SINGLE argument. If you have a function that accepts multiple arguments, you should write a wrapper that accepts a single input and unpacks it into multiple argument."
]
},
{
"cell_type": "raw",
"id": "9a5fe916",
"metadata": {},
"source": [
"%pip install --upgrade --quiet langchain langchain-openai"
]
},
{
"cell_type": "code",
"execution_count": 1,
"id": "6bb221b3",
"metadata": {},
"outputs": [],
"source": [
"from operator import itemgetter\n",
"\n",
"from langchain_core.prompts import ChatPromptTemplate\n",
"from langchain_core.runnables import RunnableLambda\n",
"from langchain_openai import ChatOpenAI\n",
"\n",
"\n",
"def length_function(text):\n",
" return len(text)\n",
"\n",
"\n",
"def _multiple_length_function(text1, text2):\n",
" return len(text1) * len(text2)\n",
"\n",
"\n",
"def multiple_length_function(_dict):\n",
" return _multiple_length_function(_dict[\"text1\"], _dict[\"text2\"])\n",
"\n",
"\n",
"prompt = ChatPromptTemplate.from_template(\"what is {a} + {b}\")\n",
"model = ChatOpenAI()\n",
"\n",
"chain1 = prompt | model\n",
"\n",
"chain = (\n",
" {\n",
" \"a\": itemgetter(\"foo\") | RunnableLambda(length_function),\n",
" \"b\": {\"text1\": itemgetter(\"foo\"), \"text2\": itemgetter(\"bar\")}\n",
" | RunnableLambda(multiple_length_function),\n",
" }\n",
" | prompt\n",
" | model\n",
")"
]
},
{
"cell_type": "code",
"execution_count": 2,
"id": "5488ec85",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"AIMessage(content='3 + 9 = 12', response_metadata={'token_usage': {'completion_tokens': 7, 'prompt_tokens': 14, 'total_tokens': 21}, 'model_name': 'gpt-3.5-turbo', 'system_fingerprint': 'fp_b28b39ffa8', 'finish_reason': 'stop', 'logprobs': None}, id='run-bd204541-81fd-429a-ad92-dd1913af9b1c-0')"
]
},
"execution_count": 2,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"chain.invoke({\"foo\": \"bar\", \"bar\": \"gah\"})"
]
},
{
"cell_type": "markdown",
"id": "4728ddd9-914d-42ce-ae9b-72c9ce8ec940",
"metadata": {},
"source": [
"## Accepting a Runnable Config\n",
"\n",
"Runnable lambdas can optionally accept a [RunnableConfig](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.config.RunnableConfig.html#langchain_core.runnables.config.RunnableConfig), which they can use to pass callbacks, tags, and other configuration information to nested runs."
]
},
{
"cell_type": "code",
"execution_count": 3,
"id": "80b3b5f6-5d58-44b9-807e-cce9a46bf49f",
"metadata": {},
"outputs": [],
"source": [
"from langchain_core.output_parsers import StrOutputParser\n",
"from langchain_core.runnables import RunnableConfig"
]
},
{
"cell_type": "code",
"execution_count": 4,
"id": "ff0daf0c-49dd-4d21-9772-e5fa133c5f36",
"metadata": {},
"outputs": [],
"source": [
"import json\n",
"\n",
"\n",
"def parse_or_fix(text: str, config: RunnableConfig):\n",
" fixing_chain = (\n",
" ChatPromptTemplate.from_template(\n",
" \"Fix the following text:\\n\\n```text\\n{input}\\n```\\nError: {error}\"\n",
" \" Don't narrate, just respond with the fixed data.\"\n",
" )\n",
" | ChatOpenAI()\n",
" | StrOutputParser()\n",
" )\n",
" for _ in range(3):\n",
" try:\n",
" return json.loads(text)\n",
" except Exception as e:\n",
" text = fixing_chain.invoke({\"input\": text, \"error\": e}, config)\n",
" return \"Failed to parse\""
]
},
{
"cell_type": "code",
"execution_count": 5,
"id": "1a5e709e-9d75-48c7-bb9c-503251990505",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"{'foo': 'bar'}\n",
"Tokens Used: 62\n",
"\tPrompt Tokens: 56\n",
"\tCompletion Tokens: 6\n",
"Successful Requests: 1\n",
"Total Cost (USD): $9.6e-05\n"
]
}
],
"source": [
"from langchain_community.callbacks import get_openai_callback\n",
"\n",
"with get_openai_callback() as cb:\n",
" output = RunnableLambda(parse_or_fix).invoke(\n",
" \"{foo: bar}\", {\"tags\": [\"my-tag\"], \"callbacks\": [cb]}\n",
" )\n",
" print(output)\n",
" print(cb)"
]
},
{
"cell_type": "markdown",
"id": "922b48bd",
"metadata": {},
"source": [
"# Streaming\n",
"# Stream custom generator functions\n",
"\n",
"You can use generator functions (ie. functions that use the `yield` keyword, and behave like iterators) in a LCEL pipeline.\n",
"\n",
@@ -188,20 +14,39 @@
"- implementing a custom output parser\n",
"- modifying the output of a previous step, while preserving streaming capabilities\n",
"\n",
"Here's an example of a custom output parser for comma-separated lists:"
"Let's implement a custom output parser for comma-separated lists."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Sync version"
]
},
{
"cell_type": "code",
"execution_count": 6,
"id": "29f55c38",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"%pip install --upgrade --quiet langchain langchain-openai"
]
},
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [],
"source": [
"from typing import Iterator, List\n",
"\n",
"from langchain.prompts.chat import ChatPromptTemplate\n",
"from langchain_core.output_parsers import StrOutputParser\n",
"from langchain_openai import ChatOpenAI\n",
"\n",
"prompt = ChatPromptTemplate.from_template(\n",
" \"Write a comma-separated list of 5 animals similar to: {animal}. Do not include numbers\"\n",
" \"Write a comma-separated list of 5 animals similar to: {animal}\"\n",
")\n",
"model = ChatOpenAI(temperature=0.0)\n",
"\n",
@@ -210,8 +55,7 @@
},
{
"cell_type": "code",
"execution_count": 7,
"id": "75aa946b",
"execution_count": 2,
"metadata": {},
"outputs": [
{
@@ -229,8 +73,7 @@
},
{
"cell_type": "code",
"execution_count": 8,
"id": "d002a7fe",
"execution_count": 3,
"metadata": {},
"outputs": [
{
@@ -239,7 +82,7 @@
"'lion, tiger, wolf, gorilla, panda'"
]
},
"execution_count": 8,
"execution_count": 3,
"metadata": {},
"output_type": "execute_result"
}
@@ -250,8 +93,7 @@
},
{
"cell_type": "code",
"execution_count": 9,
"id": "f08b8a5b",
"execution_count": 4,
"metadata": {},
"outputs": [],
"source": [
@@ -277,8 +119,7 @@
},
{
"cell_type": "code",
"execution_count": 10,
"id": "02e414aa",
"execution_count": 5,
"metadata": {},
"outputs": [],
"source": [
@@ -287,8 +128,7 @@
},
{
"cell_type": "code",
"execution_count": 11,
"id": "7ed8799d",
"execution_count": 6,
"metadata": {},
"outputs": [
{
@@ -310,17 +150,16 @@
},
{
"cell_type": "code",
"execution_count": 12,
"id": "9ea4ddc6",
"execution_count": 7,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"['lion', 'tiger', 'wolf', 'gorilla', 'elephant']"
"['lion', 'tiger', 'wolf', 'gorilla', 'panda']"
]
},
"execution_count": 12,
"execution_count": 7,
"metadata": {},
"output_type": "execute_result"
}
@@ -331,7 +170,6 @@
},
{
"cell_type": "markdown",
"id": "96e320ed",
"metadata": {},
"source": [
"## Async version"
@@ -339,8 +177,7 @@
},
{
"cell_type": "code",
"execution_count": 13,
"id": "569dbbef",
"execution_count": 8,
"metadata": {},
"outputs": [],
"source": [
@@ -367,8 +204,7 @@
},
{
"cell_type": "code",
"execution_count": 14,
"id": "7a76b713",
"execution_count": 9,
"metadata": {},
"outputs": [
{
@@ -390,8 +226,7 @@
},
{
"cell_type": "code",
"execution_count": 15,
"id": "3a650482",
"execution_count": 10,
"metadata": {},
"outputs": [
{
@@ -400,7 +235,7 @@
"['lion', 'tiger', 'wolf', 'gorilla', 'panda']"
]
},
"execution_count": 15,
"execution_count": 10,
"metadata": {},
"output_type": "execute_result"
}
@@ -426,9 +261,9 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.5"
"version": "3.11.5"
}
},
"nbformat": 4,
"nbformat_minor": 5
"nbformat_minor": 4
}

View File

@@ -0,0 +1,9 @@
---
sidebar_position: 2
---
# How to
import DocCardList from "@theme/DocCardList";
<DocCardList />

View File

@@ -29,10 +29,10 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain_community.vectorstores import FAISS\n",
"from langchain.prompts import ChatPromptTemplate\n",
"from langchain.vectorstores import FAISS\n",
"from langchain_core.output_parsers import StrOutputParser\n",
"from langchain_core.prompts import ChatPromptTemplate\n",
"from langchain_core.runnables import RunnablePassthrough\n",
"from langchain_core.runnables import RunnableLambda, RunnablePassthrough\n",
"from langchain_openai import ChatOpenAI, OpenAIEmbeddings"
]
},

View File

@@ -6,8 +6,8 @@
"metadata": {},
"source": [
"---\n",
"sidebar_position: 1\n",
"title: \"Parallel: Format data\"\n",
"sidebar_position: 0\n",
"title: \"RunnableParallel: Manipulating data\"\n",
"keywords: [RunnableParallel, RunnableMap, LCEL]\n",
"---"
]
@@ -17,13 +17,13 @@
"id": "b022ab74-794d-4c54-ad47-ff9549ddb9d2",
"metadata": {},
"source": [
"# Formatting inputs & output\n",
"# Manipulating inputs & output\n",
"\n",
"The `RunnableParallel` primitive is essentially a dict whose values are runnables (or things that can be coerced to runnables, like functions). It runs all of its values in parallel, and each value is called with the overall input of the `RunnableParallel`. The final return value is a dict with the results of each value under its appropriate key.\n",
"RunnableParallel can be useful for manipulating the output of one Runnable to match the input format of the next Runnable in a sequence.\n",
"\n",
"It is useful for parallelizing operations, but can also be useful for manipulating the output of one Runnable to match the input format of the next Runnable in a sequence.\n",
"Here the input to prompt is expected to be a map with keys \"context\" and \"question\". The user input is just the question. So we need to get the context using our retriever and passthrough the user input under the \"question\" key.\n",
"\n",
"Here the input to prompt is expected to be a map with keys \"context\" and \"question\". The user input is just the question. So we need to get the context using our retriever and passthrough the user input under the \"question\" key.\n"
"\n"
]
},
{

View File

@@ -1,14 +1,14 @@
{
"cells": [
{
"cell_type": "raw",
"cell_type": "markdown",
"id": "d35de667-0352-4bfb-a890-cebe7f676fe7",
"metadata": {},
"source": [
"---\n",
"sidebar_position: 5\n",
"title: \"Passthrough: Pass through inputs\"\n",
"keywords: [RunnablePassthrough, LCEL]\n",
"sidebar_position: 1\n",
"title: \"RunnablePassthrough: Passing data through\"\n",
"keywords: [RunnablePassthrough, RunnableParallel, LCEL]\n",
"---"
]
},
@@ -19,7 +19,11 @@
"source": [
"# Passing data through\n",
"\n",
"RunnablePassthrough on its own allows you to pass inputs unchanged. This typically is used in conjuction with RunnableParallel to pass data through to a new key in the map. \n",
"RunnablePassthrough allows to pass inputs unchanged or with the addition of extra keys. This typically is used in conjuction with RunnableParallel to assign data to a new key in the map. \n",
"\n",
"RunnablePassthrough() called on it's own, will simply take the input and pass it through. \n",
"\n",
"RunnablePassthrough called with assign (`RunnablePassthrough.assign(...)`) will take the input, and will add the extra arguments passed to the assign function. \n",
"\n",
"See the example below:"
]
@@ -56,6 +60,7 @@
"\n",
"runnable = RunnableParallel(\n",
" passed=RunnablePassthrough(),\n",
" extra=RunnablePassthrough.assign(mult=lambda x: x[\"num\"] * 3),\n",
" modified=lambda x: x[\"num\"] + 1,\n",
")\n",
"\n",
@@ -69,7 +74,9 @@
"source": [
"As seen above, `passed` key was called with `RunnablePassthrough()` and so it simply passed on `{'num': 1}`. \n",
"\n",
"We also set a second key in the map with `modified`. This uses a lambda to set a single value adding 1 to the num, which resulted in `modified` key with the value of `2`."
"In the second line, we used `RunnablePastshrough.assign` with a lambda that multiplies the numerical value by 3. In this cased, `extra` was set with `{'num': 1, 'mult': 3}` which is the original value with the `mult` key added. \n",
"\n",
"Finally, we also set a third key in the map with `modified` which uses a lambda to set a single value adding 1 to the num, which resulted in `modified` key with the value of `2`."
]
},
{
@@ -79,7 +86,7 @@
"source": [
"## Retrieval Example\n",
"\n",
"In the example below, we see a use case where we use `RunnablePassthrough` along with `RunnableParallel`. "
"In the example below, we see a use case where we use RunnablePassthrough along with RunnableMap. "
]
},
{

View File

@@ -7,7 +7,7 @@
"source": [
"---\n",
"sidebar_position: 3\n",
"title: \"Route logic based on input\"\n",
"title: \"RunnableBranch: Dynamically route logic based on input\"\n",
"keywords: [RunnableBranch, LCEL]\n",
"---"
]
@@ -25,7 +25,7 @@
"\n",
"There are two ways to perform routing:\n",
"\n",
"1. Conditionally return runnables from a [`RunnableLambda`](/docs/expression_language/primitives/functions) (recommended)\n",
"1. Conditionally return runnables from a [`RunnableLambda`](./functions) (recommended)\n",
"2. Using a `RunnableBranch`.\n",
"\n",
"We'll illustrate both methods using a two step sequence where the first step classifies an input question as being about `LangChain`, `Anthropic`, or `Other`, then routes to a corresponding prompt chain."
@@ -42,23 +42,22 @@
},
{
"cell_type": "code",
"execution_count": 1,
"execution_count": null,
"id": "8a8a1967",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"'Anthropic'"
"' Anthropic'"
]
},
"execution_count": 1,
"metadata": {},
"output_type": "execute_result"
"output_type": "display_data"
}
],
"source": [
"from langchain_anthropic import ChatAnthropic\n",
"from langchain_community.chat_models import ChatAnthropic\n",
"from langchain_core.output_parsers import StrOutputParser\n",
"from langchain_core.prompts import PromptTemplate\n",
"\n",
@@ -74,7 +73,7 @@
"\n",
"Classification:\"\"\"\n",
" )\n",
" | ChatAnthropic(model_name=\"claude-3-haiku-20240307\")\n",
" | ChatAnthropic()\n",
" | StrOutputParser()\n",
")\n",
"\n",
@@ -91,33 +90,42 @@
},
{
"cell_type": "code",
"execution_count": 3,
"execution_count": null,
"id": "89d7722d",
"metadata": {},
"outputs": [],
"source": [
"langchain_chain = PromptTemplate.from_template(\n",
" \"\"\"You are an expert in langchain. \\\n",
"langchain_chain = (\n",
" PromptTemplate.from_template(\n",
" \"\"\"You are an expert in langchain. \\\n",
"Always answer questions starting with \"As Harrison Chase told me\". \\\n",
"Respond to the following question:\n",
"\n",
"Question: {question}\n",
"Answer:\"\"\"\n",
") | ChatAnthropic(model_name=\"claude-3-haiku-20240307\")\n",
"anthropic_chain = PromptTemplate.from_template(\n",
" \"\"\"You are an expert in anthropic. \\\n",
" )\n",
" | ChatAnthropic()\n",
")\n",
"anthropic_chain = (\n",
" PromptTemplate.from_template(\n",
" \"\"\"You are an expert in anthropic. \\\n",
"Always answer questions starting with \"As Dario Amodei told me\". \\\n",
"Respond to the following question:\n",
"\n",
"Question: {question}\n",
"Answer:\"\"\"\n",
") | ChatAnthropic(model_name=\"claude-3-haiku-20240307\")\n",
"general_chain = PromptTemplate.from_template(\n",
" \"\"\"Respond to the following question:\n",
" )\n",
" | ChatAnthropic()\n",
")\n",
"general_chain = (\n",
" PromptTemplate.from_template(\n",
" \"\"\"Respond to the following question:\n",
"\n",
"Question: {question}\n",
"Answer:\"\"\"\n",
") | ChatAnthropic(model_name=\"claude-3-haiku-20240307\")"
" )\n",
" | ChatAnthropic()\n",
")"
]
},
{
@@ -132,7 +140,7 @@
},
{
"cell_type": "code",
"execution_count": 4,
"execution_count": 10,
"id": "687492da",
"metadata": {},
"outputs": [],
@@ -148,7 +156,7 @@
},
{
"cell_type": "code",
"execution_count": 5,
"execution_count": 11,
"id": "02a33c86",
"metadata": {},
"outputs": [],
@@ -162,17 +170,17 @@
},
{
"cell_type": "code",
"execution_count": 6,
"execution_count": 12,
"id": "c2e977a4",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"AIMessage(content=\"As Dario Amodei told me, to use Anthropic, you can start by exploring the company's website and learning about their mission, values, and the different services and products they offer. Anthropic is focused on developing safe and ethical AI systems, so they have a strong emphasis on transparency and responsible AI development. \\n\\nDepending on your specific needs, you can look into Anthropic's AI research and development services, which cover areas like natural language processing, computer vision, and reinforcement learning. They also offer consulting and advisory services to help organizations navigate the challenges and opportunities of AI integration.\\n\\nAdditionally, Anthropic has released some open-source AI models and tools that you can explore and experiment with. These can be a great way to get hands-on experience with Anthropic's approach to AI development.\\n\\nOverall, Anthropic aims to be a reliable and trustworthy partner in the AI space, so I'd encourage you to reach out to them directly to discuss how they can best support your specific requirements.\", response_metadata={'id': 'msg_01CtLFgFSwvTaJomrihE87Ra', 'content': [ContentBlock(text=\"As Dario Amodei told me, to use Anthropic, you can start by exploring the company's website and learning about their mission, values, and the different services and products they offer. Anthropic is focused on developing safe and ethical AI systems, so they have a strong emphasis on transparency and responsible AI development. \\n\\nDepending on your specific needs, you can look into Anthropic's AI research and development services, which cover areas like natural language processing, computer vision, and reinforcement learning. They also offer consulting and advisory services to help organizations navigate the challenges and opportunities of AI integration.\\n\\nAdditionally, Anthropic has released some open-source AI models and tools that you can explore and experiment with. These can be a great way to get hands-on experience with Anthropic's approach to AI development.\\n\\nOverall, Anthropic aims to be a reliable and trustworthy partner in the AI space, so I'd encourage you to reach out to them directly to discuss how they can best support your specific requirements.\", type='text')], 'model': 'claude-3-haiku-20240307', 'role': 'assistant', 'stop_reason': 'end_turn', 'stop_sequence': None, 'type': 'message', 'usage': Usage(input_tokens=53, output_tokens=219)})"
"AIMessage(content=' As Dario Amodei told me, to use Anthropic IPC you first need to import it:\\n\\n```python\\nfrom anthroipc import ic\\n```\\n\\nThen you can create a client and connect to the server:\\n\\n```python \\nclient = ic.connect()\\n```\\n\\nAfter that, you can call methods on the client and get responses:\\n\\n```python\\nresponse = client.ask(\"What is the meaning of life?\")\\nprint(response)\\n```\\n\\nYou can also register callbacks to handle events: \\n\\n```python\\ndef on_poke(event):\\n print(\"Got poked!\")\\n\\nclient.on(\\'poke\\', on_poke)\\n```\\n\\nAnd that\\'s the basics of using the Anthropic IPC client library for Python! Let me know if you have any other questions!', additional_kwargs={}, example=False)"
]
},
"execution_count": 6,
"execution_count": 12,
"metadata": {},
"output_type": "execute_result"
}
@@ -183,17 +191,17 @@
},
{
"cell_type": "code",
"execution_count": 7,
"execution_count": 13,
"id": "48913dc6",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"AIMessage(content=\"As Harrison Chase told me, using LangChain involves a few key steps:\\n\\n1. **Set up your environment**: Install the necessary Python packages, including the LangChain library itself, as well as any other dependencies your application might require, such as language models or other integrations.\\n\\n2. **Understand the core concepts**: LangChain revolves around a few core concepts, like Agents, Chains, and Tools. Familiarize yourself with these concepts and how they work together to build powerful language-based applications.\\n\\n3. **Identify your use case**: Determine what kind of task or application you want to build using LangChain, such as a chatbot, a question-answering system, or a document summarization tool.\\n\\n4. **Choose the appropriate components**: Based on your use case, select the right LangChain components, such as agents, chains, and tools, to build your application.\\n\\n5. **Integrate with language models**: LangChain is designed to work seamlessly with various language models, such as OpenAI's GPT-3 or Anthropic's models. Connect your chosen language model to your LangChain application.\\n\\n6. **Implement your application logic**: Use LangChain's building blocks to implement the specific functionality of your application, such as prompting the language model, processing the response, and integrating with other services or data sources.\\n\\n7. **Test and iterate**: Thoroughly test your application, gather feedback, and iterate on your design and implementation to improve its performance and user experience.\\n\\nAs Harrison Chase emphasized, LangChain provides a flexible and powerful framework for building language-based applications, making it easier to leverage the capabilities of modern language models. By following these steps, you can get started with LangChain and create innovative solutions tailored to your specific needs.\", response_metadata={'id': 'msg_01H3UXAAHG4TwxJLpxwuuVU7', 'content': [ContentBlock(text=\"As Harrison Chase told me, using LangChain involves a few key steps:\\n\\n1. **Set up your environment**: Install the necessary Python packages, including the LangChain library itself, as well as any other dependencies your application might require, such as language models or other integrations.\\n\\n2. **Understand the core concepts**: LangChain revolves around a few core concepts, like Agents, Chains, and Tools. Familiarize yourself with these concepts and how they work together to build powerful language-based applications.\\n\\n3. **Identify your use case**: Determine what kind of task or application you want to build using LangChain, such as a chatbot, a question-answering system, or a document summarization tool.\\n\\n4. **Choose the appropriate components**: Based on your use case, select the right LangChain components, such as agents, chains, and tools, to build your application.\\n\\n5. **Integrate with language models**: LangChain is designed to work seamlessly with various language models, such as OpenAI's GPT-3 or Anthropic's models. Connect your chosen language model to your LangChain application.\\n\\n6. **Implement your application logic**: Use LangChain's building blocks to implement the specific functionality of your application, such as prompting the language model, processing the response, and integrating with other services or data sources.\\n\\n7. **Test and iterate**: Thoroughly test your application, gather feedback, and iterate on your design and implementation to improve its performance and user experience.\\n\\nAs Harrison Chase emphasized, LangChain provides a flexible and powerful framework for building language-based applications, making it easier to leverage the capabilities of modern language models. By following these steps, you can get started with LangChain and create innovative solutions tailored to your specific needs.\", type='text')], 'model': 'claude-3-haiku-20240307', 'role': 'assistant', 'stop_reason': 'end_turn', 'stop_sequence': None, 'type': 'message', 'usage': Usage(input_tokens=50, output_tokens=400)})"
"AIMessage(content=' As Harrison Chase told me, to use LangChain you first need to sign up for an API key at platform.langchain.com. Once you have your API key, you can install the Python library and write a simple Python script to call the LangChain API. Here is some sample code to get started:\\n\\n```python\\nimport langchain\\n\\napi_key = \"YOUR_API_KEY\"\\n\\nlangchain.set_key(api_key)\\n\\nresponse = langchain.ask(\"What is the capital of France?\")\\n\\nprint(response.response)\\n```\\n\\nThis will send the question \"What is the capital of France?\" to the LangChain API and print the response. You can customize the request by providing parameters like max_tokens, temperature, etc. The LangChain Python library documentation has more details on the available options. The key things are getting an API key and calling langchain.ask() with your question text. Let me know if you have any other questions!', additional_kwargs={}, example=False)"
]
},
"execution_count": 7,
"execution_count": 13,
"metadata": {},
"output_type": "execute_result"
}
@@ -204,17 +212,17 @@
},
{
"cell_type": "code",
"execution_count": 8,
"execution_count": 14,
"id": "a14d0dca",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"AIMessage(content='4', response_metadata={'id': 'msg_01UAKP81jTZu9fyiyFYhsbHc', 'content': [ContentBlock(text='4', type='text')], 'model': 'claude-3-haiku-20240307', 'role': 'assistant', 'stop_reason': 'end_turn', 'stop_sequence': None, 'type': 'message', 'usage': Usage(input_tokens=28, output_tokens=5)})"
"AIMessage(content=' 4', additional_kwargs={}, example=False)"
]
},
"execution_count": 8,
"execution_count": 14,
"metadata": {},
"output_type": "execute_result"
}
@@ -241,19 +249,18 @@
},
{
"cell_type": "code",
"execution_count": 9,
"execution_count": null,
"id": "2a101418",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"AIMessage(content=\"As Dario Amodei told me, to use Anthropic, you should first familiarize yourself with our mission and principles. Anthropic is committed to developing safe and beneficial artificial intelligence that can help solve important problems facing humanity. \\n\\nTo get started, I recommend exploring the resources on our website, which cover our research, products, and approach to AI development. You can also reach out to our team to learn more about how Anthropic's technology and services can support your specific needs.\\n\\nThe key is to engage with us in a way that aligns with our values of transparency, ethical AI, and a commitment to the wellbeing of humanity. We're here to collaborate and help you harness the power of advanced AI responsibly.\", response_metadata={'id': 'msg_0187BVnpniPDJnVvwf3M1LdY', 'content': [ContentBlock(text=\"As Dario Amodei told me, to use Anthropic, you should first familiarize yourself with our mission and principles. Anthropic is committed to developing safe and beneficial artificial intelligence that can help solve important problems facing humanity. \\n\\nTo get started, I recommend exploring the resources on our website, which cover our research, products, and approach to AI development. You can also reach out to our team to learn more about how Anthropic's technology and services can support your specific needs.\\n\\nThe key is to engage with us in a way that aligns with our values of transparency, ethical AI, and a commitment to the wellbeing of humanity. We're here to collaborate and help you harness the power of advanced AI responsibly.\", type='text')], 'model': 'claude-3-haiku-20240307', 'role': 'assistant', 'stop_reason': 'end_turn', 'stop_sequence': None, 'type': 'message', 'usage': Usage(input_tokens=53, output_tokens=160)})"
"AIMessage(content=\" As Dario Amodei told me, here are some ways to use Anthropic:\\n\\n- Sign up for an account on Anthropic's website to access tools like Claude, Constitutional AI, and Writer. \\n\\n- Use Claude for tasks like email generation, customer service chat, and QA. Claude can understand natural language prompts and provide helpful responses.\\n\\n- Use Constitutional AI if you need an AI assistant that is harmless, honest, and helpful. It is designed to be safe and aligned with human values.\\n\\n- Use Writer to generate natural language content for things like marketing copy, stories, reports, and more. Give it a topic and prompt and it will create high-quality written content.\\n\\n- Check out Anthropic's documentation and blog for tips, tutorials, examples, and announcements about new capabilities as they continue to develop their AI technology.\\n\\n- Follow Anthropic on social media or subscribe to their newsletter to stay up to date on new features and releases.\\n\\n- For most people, the easiest way to leverage Anthropic's technology is through their website - just create an account to get started!\", additional_kwargs={}, example=False)"
]
},
"execution_count": 9,
"metadata": {},
"output_type": "execute_result"
"output_type": "display_data"
}
],
"source": [
@@ -270,19 +277,18 @@
},
{
"cell_type": "code",
"execution_count": 10,
"execution_count": null,
"id": "8d8caf9b",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"AIMessage(content=\"As Harrison Chase told me, using LangChain involves several key steps. First, you'll need to install the LangChain library and import the necessary modules. Then, you'll want to define your language model, any data sources you plan to use, and the specific tasks you want to accomplish, such as question answering, text generation, or agent-based reasoning. \\n\\nLangChain provides a flexible framework for building applications that leverage large language models. It includes abstractions for things like retrievers, prompts, and chains, which allow you to compose different components together to create powerful workflows. \\n\\nThe documentation on the LangChain website is excellent and covers many common use cases in detail. I'd recommend starting there to get a solid understanding of the core concepts and how to apply them to your specific needs. And of course, feel free to reach out if you have any other questions - I'm always happy to share more insights from my conversations with Harrison.\", response_metadata={'id': 'msg_01T1naS99wGPkEAP4LME8iAv', 'content': [ContentBlock(text=\"As Harrison Chase told me, using LangChain involves several key steps. First, you'll need to install the LangChain library and import the necessary modules. Then, you'll want to define your language model, any data sources you plan to use, and the specific tasks you want to accomplish, such as question answering, text generation, or agent-based reasoning. \\n\\nLangChain provides a flexible framework for building applications that leverage large language models. It includes abstractions for things like retrievers, prompts, and chains, which allow you to compose different components together to create powerful workflows. \\n\\nThe documentation on the LangChain website is excellent and covers many common use cases in detail. I'd recommend starting there to get a solid understanding of the core concepts and how to apply them to your specific needs. And of course, feel free to reach out if you have any other questions - I'm always happy to share more insights from my conversations with Harrison.\", type='text')], 'model': 'claude-3-haiku-20240307', 'role': 'assistant', 'stop_reason': 'end_turn', 'stop_sequence': None, 'type': 'message', 'usage': Usage(input_tokens=50, output_tokens=205)})"
"AIMessage(content=' As Harrison Chase told me, here is how you use LangChain:\\n\\nLangChain is an AI assistant that can have conversations, answer questions, and generate text. To use LangChain, you simply type or speak your input and LangChain will respond. \\n\\nYou can ask LangChain questions, have discussions, get summaries or explanations about topics, and request it to generate text on a subject. Some examples of interactions:\\n\\n- Ask general knowledge questions and LangChain will try to answer factually. For example \"What is the capital of France?\"\\n\\n- Have conversations on topics by taking turns speaking. You can prompt the start of a conversation by saying something like \"Let\\'s discuss machine learning\"\\n\\n- Ask for summaries or high-level explanations on subjects. For example \"Can you summarize the main themes in Shakespeare\\'s Hamlet?\" \\n\\n- Give creative writing prompts or requests to have LangChain generate text in different styles. For example \"Write a short children\\'s story about a mouse\" or \"Generate a poem in the style of Robert Frost about nature\"\\n\\n- Correct LangChain if it makes an inaccurate statement and provide the right information. This helps train it.\\n\\nThe key is interacting naturally and giving it clear prompts and requests', additional_kwargs={}, example=False)"
]
},
"execution_count": 10,
"metadata": {},
"output_type": "execute_result"
"output_type": "display_data"
}
],
"source": [
@@ -291,150 +297,23 @@
},
{
"cell_type": "code",
"execution_count": 11,
"execution_count": null,
"id": "26159af7",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"AIMessage(content='4', response_metadata={'id': 'msg_01T6T3TS6hRCtU8JayN93QEi', 'content': [ContentBlock(text='4', type='text')], 'model': 'claude-3-haiku-20240307', 'role': 'assistant', 'stop_reason': 'end_turn', 'stop_sequence': None, 'type': 'message', 'usage': Usage(input_tokens=28, output_tokens=5)})"
"AIMessage(content=' 2 + 2 = 4', additional_kwargs={}, example=False)"
]
},
"execution_count": 11,
"metadata": {},
"output_type": "execute_result"
"output_type": "display_data"
}
],
"source": [
"full_chain.invoke({\"question\": \"whats 2 + 2\"})"
]
},
{
"cell_type": "markdown",
"id": "fa0f589d",
"metadata": {},
"source": [
"# Routing by semantic similarity\n",
"\n",
"One especially useful technique is to use embeddings to route a query to the most relevant prompt. Here's an example."
]
},
{
"cell_type": "code",
"execution_count": 12,
"id": "a23457d7",
"metadata": {},
"outputs": [],
"source": [
"from langchain.utils.math import cosine_similarity\n",
"from langchain_core.output_parsers import StrOutputParser\n",
"from langchain_core.prompts import PromptTemplate\n",
"from langchain_core.runnables import RunnableLambda, RunnablePassthrough\n",
"from langchain_openai import OpenAIEmbeddings\n",
"\n",
"physics_template = \"\"\"You are a very smart physics professor. \\\n",
"You are great at answering questions about physics in a concise and easy to understand manner. \\\n",
"When you don't know the answer to a question you admit that you don't know.\n",
"\n",
"Here is a question:\n",
"{query}\"\"\"\n",
"\n",
"math_template = \"\"\"You are a very good mathematician. You are great at answering math questions. \\\n",
"You are so good because you are able to break down hard problems into their component parts, \\\n",
"answer the component parts, and then put them together to answer the broader question.\n",
"\n",
"Here is a question:\n",
"{query}\"\"\"\n",
"\n",
"embeddings = OpenAIEmbeddings()\n",
"prompt_templates = [physics_template, math_template]\n",
"prompt_embeddings = embeddings.embed_documents(prompt_templates)\n",
"\n",
"\n",
"def prompt_router(input):\n",
" query_embedding = embeddings.embed_query(input[\"query\"])\n",
" similarity = cosine_similarity([query_embedding], prompt_embeddings)[0]\n",
" most_similar = prompt_templates[similarity.argmax()]\n",
" print(\"Using MATH\" if most_similar == math_template else \"Using PHYSICS\")\n",
" return PromptTemplate.from_template(most_similar)\n",
"\n",
"\n",
"chain = (\n",
" {\"query\": RunnablePassthrough()}\n",
" | RunnableLambda(prompt_router)\n",
" | ChatAnthropic(model_name=\"claude-3-haiku-20240307\")\n",
" | StrOutputParser()\n",
")"
]
},
{
"cell_type": "code",
"execution_count": 13,
"id": "664bb851",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Using PHYSICS\n",
"As a physics professor, I would be happy to provide a concise and easy-to-understand explanation of what a black hole is.\n",
"\n",
"A black hole is an incredibly dense region of space-time where the gravitational pull is so strong that nothing, not even light, can escape from it. This means that if you were to get too close to a black hole, you would be pulled in and crushed by the intense gravitational forces.\n",
"\n",
"The formation of a black hole occurs when a massive star, much larger than our Sun, reaches the end of its life and collapses in on itself. This collapse causes the matter to become extremely dense, and the gravitational force becomes so strong that it creates a point of no return, known as the event horizon.\n",
"\n",
"Beyond the event horizon, the laws of physics as we know them break down, and the intense gravitational forces create a singularity, which is a point of infinite density and curvature in space-time.\n",
"\n",
"Black holes are fascinating and mysterious objects, and there is still much to be learned about their properties and behavior. If I were unsure about any specific details or aspects of black holes, I would readily admit that I do not have a complete understanding and would encourage further research and investigation.\n"
]
}
],
"source": [
"print(chain.invoke(\"What's a black hole\"))"
]
},
{
"cell_type": "code",
"execution_count": 14,
"id": "df34e469",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Using MATH\n",
"A path integral is a powerful mathematical concept in physics, particularly in the field of quantum mechanics. It was developed by the renowned physicist Richard Feynman as an alternative formulation of quantum mechanics.\n",
"\n",
"In a path integral, instead of considering a single, definite path that a particle might take from one point to another, as in classical mechanics, the particle is considered to take all possible paths simultaneously. Each path is assigned a complex-valued weight, and the total probability amplitude for the particle to go from one point to another is calculated by summing (integrating) over all possible paths.\n",
"\n",
"The key ideas behind the path integral formulation are:\n",
"\n",
"1. Superposition principle: In quantum mechanics, particles can exist in a superposition of multiple states or paths simultaneously.\n",
"\n",
"2. Probability amplitude: The probability amplitude for a particle to go from one point to another is calculated by summing the complex-valued weights of all possible paths.\n",
"\n",
"3. Weighting of paths: Each path is assigned a weight based on the action (the time integral of the Lagrangian) along that path. Paths with lower action have a greater weight.\n",
"\n",
"4. Feynman's approach: Feynman developed the path integral formulation as an alternative to the traditional wave function approach in quantum mechanics, providing a more intuitive and conceptual understanding of quantum phenomena.\n",
"\n",
"The path integral approach is particularly useful in quantum field theory, where it provides a powerful framework for calculating transition probabilities and understanding the behavior of quantum systems. It has also found applications in various areas of physics, such as condensed matter, statistical mechanics, and even in finance (the path integral approach to option pricing).\n",
"\n",
"The mathematical construction of the path integral involves the use of advanced concepts from functional analysis and measure theory, making it a powerful and sophisticated tool in the physicist's arsenal.\n"
]
}
],
"source": [
"print(chain.invoke(\"What's a path integral\"))"
]
},
{
"cell_type": "markdown",
"id": "927b7498",
"metadata": {},
"source": []
}
],
"metadata": {
@@ -453,7 +332,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.5"
"version": "3.9.1"
}
},
"nbformat": 4,

View File

@@ -7,27 +7,27 @@ sidebar_class_name: hidden
LangChain Expression Language, or LCEL, is a declarative way to easily compose chains together.
LCEL was designed from day 1 to **support putting prototypes in production, with no code changes**, from the simplest “prompt + LLM” chain to the most complex chains (weve seen folks successfully run LCEL chains with 100s of steps in production). To highlight a few of the reasons you might want to use LCEL:
[**First-class streaming support**](/docs/expression_language/streaming)
**Streaming support**
When you build your chains with LCEL you get the best possible time-to-first-token (time elapsed until the first chunk of output comes out). For some chains this means eg. we stream tokens straight from an LLM to a streaming output parser, and you get back parsed, incremental chunks of output at the same rate as the LLM provider outputs the raw tokens.
[**Async support**](/docs/expression_language/interface)
**Async support**
Any chain built with LCEL can be called both with the synchronous API (eg. in your Jupyter notebook while prototyping) as well as with the asynchronous API (eg. in a [LangServe](/docs/langsmith) server). This enables using the same code for prototypes and in production, with great performance, and the ability to handle many concurrent requests in the same server.
[**Optimized parallel execution**](/docs/expression_language/primitives/parallel)
**Optimized parallel execution**
Whenever your LCEL chains have steps that can be executed in parallel (eg if you fetch documents from multiple retrievers) we automatically do it, both in the sync and the async interfaces, for the smallest possible latency.
[**Retries and fallbacks**](/docs/guides/productionization/fallbacks)
**Retries and fallbacks**
Configure retries and fallbacks for any part of your LCEL chain. This is a great way to make your chains more reliable at scale. Were currently working on adding streaming support for retries/fallbacks, so you can get the added reliability without any latency cost.
[**Access intermediate results**](/docs/expression_language/interface#async-stream-events-beta)
**Access intermediate results**
For more complex chains its often very useful to access the results of intermediate steps even before the final output is produced. This can be used to let end-users know something is happening, or even just to debug your chain. You can stream intermediate results, and its available on every [LangServe](/docs/langserve) server.
[**Input and output schemas**](/docs/expression_language/interface#input-schema)
**Input and output schemas**
Input and output schemas give every LCEL chain Pydantic and JSONSchema schemas inferred from the structure of your chain. This can be used for validation of inputs and outputs, and is an integral part of LangServe.
[**Seamless LangSmith tracing**](/docs/langsmith)
**Seamless LangSmith tracing integration**
As your chains get more and more complex, it becomes increasingly important to understand what exactly is happening at every step.
With LCEL, **all** steps are automatically logged to [LangSmith](/docs/langsmith/) for maximum observability and debuggability.
[**Seamless LangServe deployment**](/docs/langserve)
**Seamless LangServe deployment integration**
Any chain created with LCEL can be easily deployed using [LangServe](/docs/langserve).

View File

@@ -7,7 +7,7 @@
"source": [
"---\n",
"sidebar_position: 1\n",
"title: Runnable interface\n",
"title: Interface\n",
"---"
]
},
@@ -16,8 +16,7 @@
"id": "9a9acd2e",
"metadata": {},
"source": [
"To make it as easy as possible to create custom chains, we've implemented a [\"Runnable\"](https://api.python.langchain.com/en/stable/runnables/langchain_core.runnables.base.Runnable.html#langchain_core.runnables.base.Runnable) protocol. Many LangChain components implement the `Runnable` protocol, including chat models, LLMs, output parsers, retrievers, prompt templates, and more. There are also several useful primitives for working with runnables, which you can read about [in this section](/docs/expression_language/primitives).\n",
"\n",
"To make it as easy as possible to create custom chains, we've implemented a [\"Runnable\"](https://api.python.langchain.com/en/stable/runnables/langchain_core.runnables.base.Runnable.html#langchain_core.runnables.base.Runnable) protocol. The `Runnable` protocol is implemented for most components. \n",
"This is a standard interface, which makes it easy to define custom chains as well as invoke them in a standard way. \n",
"The standard interface includes:\n",
"\n",
@@ -25,7 +24,7 @@
"- [`invoke`](#invoke): call the chain on an input\n",
"- [`batch`](#batch): call the chain on a list of inputs\n",
"\n",
"These also have corresponding async methods that should be used with [asyncio](https://docs.python.org/3/library/asyncio.html) `await` syntax for concurrency:\n",
"These also have corresponding async methods:\n",
"\n",
"- [`astream`](#async-stream): stream back chunks of the response async\n",
"- [`ainvoke`](#async-invoke): call the chain on an input async\n",
@@ -53,11 +52,9 @@
]
},
{
"cell_type": "code",
"execution_count": null,
"cell_type": "raw",
"id": "57768739",
"metadata": {},
"outputs": [],
"source": [
"%pip install --upgrade --quiet langchain-core langchain-community langchain-openai"
]

View File

@@ -1,180 +0,0 @@
{
"cells": [
{
"cell_type": "raw",
"metadata": {},
"source": [
"---\n",
"sidebar_position: 6\n",
"title: \"Assign: Add values to state\"\n",
"keywords: [RunnablePassthrough, assign, LCEL]\n",
"---"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Adding values to chain state\n",
"\n",
"The `RunnablePassthrough.assign(...)` static method takes an input value and adds the extra arguments passed to the assign function.\n",
"\n",
"This is useful when additively creating a dictionary to use as input to a later step, which is a common LCEL pattern.\n",
"\n",
"Here's an example:"
]
},
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"\u001b[33mWARNING: You are using pip version 22.0.4; however, version 24.0 is available.\n",
"You should consider upgrading via the '/Users/jacoblee/.pyenv/versions/3.10.5/bin/python -m pip install --upgrade pip' command.\u001b[0m\u001b[33m\n",
"\u001b[0mNote: you may need to restart the kernel to use updated packages.\n"
]
}
],
"source": [
"%pip install --upgrade --quiet langchain langchain-openai"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"{'extra': {'num': 1, 'mult': 3}, 'modified': 2}"
]
},
"execution_count": 2,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"from langchain_core.runnables import RunnableParallel, RunnablePassthrough\n",
"\n",
"runnable = RunnableParallel(\n",
" extra=RunnablePassthrough.assign(mult=lambda x: x[\"num\"] * 3),\n",
" modified=lambda x: x[\"num\"] + 1,\n",
")\n",
"\n",
"runnable.invoke({\"num\": 1})"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Let's break down what's happening here.\n",
"\n",
"- The input to the chain is `{\"num\": 1}`. This is passed into a `RunnableParallel`, which invokes the runnables it is passed in parallel with that input.\n",
"- The value under the `extra` key is invoked. `RunnablePassthrough.assign()` keeps the original keys in the input dict (`{\"num\": 1}`), and assigns a new key called `mult`. The value is `lambda x: x[\"num\"] * 3)`, which is `3`. Thus, the result is `{\"num\": 1, \"mult\": 3}`.\n",
"- `{\"num\": 1, \"mult\": 3}` is returned to the `RunnableParallel` call, and is set as the value to the key `extra`.\n",
"- At the same time, the `modified` key is called. The result is `2`, since the lambda extracts a key called `\"num\"` from its input and adds one.\n",
"\n",
"Thus, the result is `{'extra': {'num': 1, 'mult': 3}, 'modified': 2}`.\n",
"\n",
"## Streaming\n",
"\n",
"One nice feature of this method is that it allows values to pass through as soon as they are available. To show this off, we'll use `RunnablePassthrough.assign()` to immediately return source docs in a retrieval chain:"
]
},
{
"cell_type": "code",
"execution_count": 4,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"{'question': 'where did harrison work?'}\n",
"{'context': [Document(page_content='harrison worked at kensho')]}\n",
"{'output': ''}\n",
"{'output': 'H'}\n",
"{'output': 'arrison'}\n",
"{'output': ' worked'}\n",
"{'output': ' at'}\n",
"{'output': ' Kens'}\n",
"{'output': 'ho'}\n",
"{'output': '.'}\n",
"{'output': ''}\n"
]
}
],
"source": [
"from langchain_community.vectorstores import FAISS\n",
"from langchain_core.output_parsers import StrOutputParser\n",
"from langchain_core.prompts import ChatPromptTemplate\n",
"from langchain_core.runnables import RunnablePassthrough\n",
"from langchain_openai import ChatOpenAI, OpenAIEmbeddings\n",
"\n",
"vectorstore = FAISS.from_texts(\n",
" [\"harrison worked at kensho\"], embedding=OpenAIEmbeddings()\n",
")\n",
"retriever = vectorstore.as_retriever()\n",
"template = \"\"\"Answer the question based only on the following context:\n",
"{context}\n",
"\n",
"Question: {question}\n",
"\"\"\"\n",
"prompt = ChatPromptTemplate.from_template(template)\n",
"model = ChatOpenAI()\n",
"\n",
"generation_chain = prompt | model | StrOutputParser()\n",
"\n",
"retrieval_chain = {\n",
" \"context\": retriever,\n",
" \"question\": RunnablePassthrough(),\n",
"} | RunnablePassthrough.assign(output=generation_chain)\n",
"\n",
"stream = retrieval_chain.stream(\"where did harrison work?\")\n",
"\n",
"for chunk in stream:\n",
" print(chunk)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"We can see that the first chunk contains the original `\"question\"` since that is immediately available. The second chunk contains `\"context\"` since the retriever finishes second. Finally, the output from the `generation_chain` streams in chunks as soon as it is available."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.5"
}
},
"nbformat": 4,
"nbformat_minor": 2
}

View File

@@ -1,15 +0,0 @@
---
sidebar_class_name: hidden
---
# Primitives
In addition to various [components](/docs/modules) that are usable with LCEL, LangChain also includes various primitives
that help pass around and format data, bind arguments, invoke custom logic, and more.
This section goes into greater depth on where and how some of these components are useful.
import DocCardList from "@theme/DocCardList";
import { useCurrentSidebarCategory } from '@docusaurus/theme-common';
<DocCardList items={useCurrentSidebarCategory().items.filter((item) => item.href !== "/docs/expression_language/primitives/")} />

View File

@@ -1,243 +0,0 @@
{
"cells": [
{
"cell_type": "raw",
"metadata": {},
"source": [
"---\n",
"sidebar_position: 0\n",
"title: \"Sequences: Chaining runnables\"\n",
"keywords: [Runnable, Runnables, LCEL]\n",
"---"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Chaining runnables\n",
"\n",
"One key advantage of the `Runnable` interface is that any two runnables can be \"chained\" together into sequences. The output of the previous runnable's `.invoke()` call is passed as input to the next runnable. This can be done using the pipe operator (`|`), or the more explicit `.pipe()` method, which does the same thing. The resulting `RunnableSequence` is itself a runnable, which means it can be invoked, streamed, or piped just like any other runnable.\n",
"\n",
"## The pipe operator\n",
"\n",
"To show off how this works, let's go through an example. We'll walk through a common pattern in LangChain: using a [prompt template](/docs/modules/model_io/prompts/) to format input into a [chat model](/docs/modules/model_io/chat/), and finally converting the chat message output into a string with an [output parser](/docs/modules/model_io/output_parsers/)."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"%pip install --upgrade --quiet langchain langchain-anthropic"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [],
"source": [
"from langchain_anthropic import ChatAnthropic\n",
"from langchain_core.output_parsers import StrOutputParser\n",
"from langchain_core.prompts import ChatPromptTemplate\n",
"\n",
"prompt = ChatPromptTemplate.from_template(\"tell me a joke about {topic}\")\n",
"model = ChatAnthropic(model_name=\"claude-3-haiku-20240307\")\n",
"\n",
"chain = prompt | model | StrOutputParser()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Prompts and models are both runnable, and the output type from the prompt call is the same as the input type of the chat model, so we can chain them together. We can then invoke the resulting sequence like any other runnable:"
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"\"Here's a bear joke for you:\\n\\nWhy don't bears wear socks? \\nBecause they have bear feet!\\n\\nHow's that? I tried to keep it light and silly. Bears can make for some fun puns and jokes. Let me know if you'd like to hear another one!\""
]
},
"execution_count": 3,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"chain.invoke({\"topic\": \"bears\"})"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Coercion\n",
"\n",
"We can even combine this chain with more runnables to create another chain. This may involve some input/output formatting using other types of runnables, depending on the required inputs and outputs of the chain components.\n",
"\n",
"For example, let's say we wanted to compose the joke generating chain with another chain that evaluates whether or not the generated joke was funny.\n",
"\n",
"We would need to be careful with how we format the input into the next chain. In the below example, the dict in the chain is automatically parsed and converted into a [`RunnableParallel`](/docs/expression_language/primitives/parallel), which runs all of its values in parallel and returns a dict with the results.\n",
"\n",
"This happens to be the same format the next prompt template expects. Here it is in action:"
]
},
{
"cell_type": "code",
"execution_count": 4,
"metadata": {},
"outputs": [],
"source": [
"from langchain_core.output_parsers import StrOutputParser\n",
"\n",
"analysis_prompt = ChatPromptTemplate.from_template(\"is this a funny joke? {joke}\")\n",
"\n",
"composed_chain = {\"joke\": chain} | analysis_prompt | model | StrOutputParser()"
]
},
{
"cell_type": "code",
"execution_count": 5,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"\"That's a pretty classic and well-known bear pun joke. Whether it's considered funny is quite subjective, as humor is very personal. Some people may find that type of pun-based joke amusing, while others may not find it that humorous. Ultimately, the funniness of a joke is in the eye (or ear) of the beholder. If you enjoyed the joke and got a chuckle out of it, then that's what matters most.\""
]
},
"execution_count": 5,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"composed_chain.invoke({\"topic\": \"bears\"})"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Functions will also be coerced into runnables, so you can add custom logic to your chains too. The below chain results in the same logical flow as before:"
]
},
{
"cell_type": "code",
"execution_count": 6,
"metadata": {},
"outputs": [],
"source": [
"composed_chain_with_lambda = (\n",
" chain\n",
" | (lambda input: {\"joke\": input})\n",
" | analysis_prompt\n",
" | model\n",
" | StrOutputParser()\n",
")"
]
},
{
"cell_type": "code",
"execution_count": 7,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"'I appreciate the effort, but I have to be honest - I didn\\'t find that joke particularly funny. Beet-themed puns can be quite hit-or-miss, and this one falls more on the \"miss\" side for me. The premise is a bit too straightforward and predictable. While I can see the logic behind it, the punchline just doesn\\'t pack much of a comedic punch. \\n\\nThat said, I do admire your willingness to explore puns and wordplay around vegetables. Cultivating a good sense of humor takes practice, and not every joke is going to land. The important thing is to keep experimenting and finding what works. Maybe try for a more unexpected or creative twist on beet-related humor next time. But thanks for sharing - I always appreciate when humans test out jokes on me, even if they don\\'t always make me laugh out loud.'"
]
},
"execution_count": 7,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"composed_chain_with_lambda.invoke({\"topic\": \"beets\"})"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"However, keep in mind that using functions like this may interfere with operations like streaming. See [this section](/docs/expression_language/primitives/functions) for more information."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## The `.pipe()` method\n",
"\n",
"We could also compose the same sequence using the `.pipe()` method. Here's what that looks like:"
]
},
{
"cell_type": "code",
"execution_count": 8,
"metadata": {},
"outputs": [],
"source": [
"from langchain_core.runnables import RunnableParallel\n",
"\n",
"composed_chain_with_pipe = (\n",
" RunnableParallel({\"joke\": chain})\n",
" .pipe(analysis_prompt)\n",
" .pipe(model)\n",
" .pipe(StrOutputParser())\n",
")"
]
},
{
"cell_type": "code",
"execution_count": 9,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"'That\\'s a pretty good Battlestar Galactica-themed pun! I appreciated the clever play on words with \"Centurion\" and \"center on.\" It\\'s the kind of nerdy, science fiction-inspired humor that fans of the show would likely enjoy. The joke is clever and demonstrates a good understanding of the Battlestar Galactica universe. I\\'d be curious to hear any other Battlestar-related jokes you might have up your sleeve. As long as they don\\'t reproduce copyrighted material, I\\'m happy to provide my thoughts on the humor and appeal for fans of the show.'"
]
},
"execution_count": 9,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"composed_chain_with_pipe.invoke({\"topic\": \"battlestar galactica\"})"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.5"
}
},
"nbformat": 4,
"nbformat_minor": 2
}

View File

@@ -201,23 +201,13 @@
" print(chunk, end=\"|\", flush=True)"
]
},
{
"cell_type": "markdown",
"id": "868bc412",
"metadata": {},
"source": [
"You might notice above that `parser` actually doesn't block the streaming output from the model, and instead processes each chunk individually. Many of the [LCEL primitives](/docs/expression_language/primitives) also support this kind of transform-style passthrough streaming, which can be very convenient when constructing apps.\n",
"\n",
"Certain runnables, like [prompt templates](/docs/modules/model_io/prompts) and [chat models](/docs/modules/model_io/chat), cannot process individual chunks and instead aggregate all previous steps. This will interrupt the streaming process. Custom functions can be [designed to return generators](/docs/expression_language/primitives/functions#streaming), which"
]
},
{
"cell_type": "markdown",
"id": "1b399fb4-5e3c-4581-9570-6df9b42b623d",
"metadata": {},
"source": [
":::{.callout-note}\n",
"If the above functionality is not relevant to what you're building, you do not have to use the `LangChain Expression Language` to use LangChain and can instead rely on a standard **imperative** programming approach by\n",
"You do not have to use the `LangChain Expression Language` to use LangChain and can instead rely on a standard **imperative** programming approach by\n",
"caling `invoke`, `batch` or `stream` on each component individually, assigning the results to variables and then using them downstream as you see fit.\n",
"\n",
"If that works for your needs, then that's fine by us 👌!\n",

File diff suppressed because it is too large Load Diff

View File

@@ -1,7 +1,3 @@
---
sidebar_position: 2
---
# Installation
## Official release
@@ -33,13 +29,6 @@ If you want to install from source, you can do so by cloning the repo and be sur
pip install -e .
```
## LangChain core
The `langchain-core` package contains base abstractions that the rest of the LangChain ecosystem uses, along with the LangChain Expression Language. It is automatically installed by `langchain`, but can also be used separately. Install with:
```bash
pip install langchain-core
```
## LangChain community
The `langchain-community` package contains third-party integrations. It is automatically installed by `langchain`, but can also be used separately. Install with:
@@ -47,6 +36,13 @@ The `langchain-community` package contains third-party integrations. It is autom
pip install langchain-community
```
## LangChain core
The `langchain-core` package contains base abstractions that the rest of the LangChain ecosystem uses, along with the LangChain Expression Language. It is automatically installed by `langchain`, but can also be used separately. Install with:
```bash
pip install langchain-core
```
## LangChain experimental
The `langchain-experimental` package holds experimental LangChain code, intended for research and experimental uses.
Install with:
@@ -55,13 +51,6 @@ Install with:
pip install langchain-experimental
```
## LangGraph
`langgraph` is a library for building stateful, multi-actor applications with LLMs, built on top of (and intended to be used with) LangChain.
Install with:
```bash
pip install langgraph
```
## LangServe
LangServe helps developers deploy LangChain runnables and chains as a REST API.
LangServe is automatically installed by LangChain CLI.

View File

@@ -1,16 +1,18 @@
---
sidebar_position: 0
sidebar_class_name: hidden
---
# Introduction
**LangChain** is a framework for developing applications powered by large language models (LLMs).
**LangChain** is a framework for developing applications powered by language models. It enables applications that:
- **Are context-aware**: connect a language model to sources of context (prompt instructions, few shot examples, content to ground its response in, etc.)
- **Reason**: rely on a language model to reason (about how to answer based on provided context, what actions to take, etc.)
LangChain simplifies every stage of the LLM application lifecycle:
- **Development**: Build your applications using LangChain's open-source [building blocks](/docs/expression_language/) and [components](/docs/modules/). Hit the ground running using [third-party integrations](/docs/integrations/platforms/) and [Templates](/docs/templates).
- **Productionization**: Use [LangSmith](/docs/langsmith/) to inspect, monitor and evaluate your chains, so that you can continuously optimize and deploy with confidence.
- **Deployment**: Turn any chain into an API with [LangServe](/docs/langserve).
This framework consists of several parts.
- **LangChain Libraries**: The Python and JavaScript libraries. Contains interfaces and integrations for a myriad of components, a basic run time for combining these components into chains and agents, and off-the-shelf implementations of chains and agents.
- **[LangChain Templates](/docs/templates)**: A collection of easily deployable reference architectures for a wide variety of tasks.
- **[LangServe](/docs/langserve)**: A library for deploying LangChain chains as a REST API.
- **[LangSmith](/docs/langsmith)**: A developer platform that lets you debug, test, evaluate, and monitor chains built on any LLM framework and seamlessly integrates with LangChain.
import ThemedImage from '@theme/ThemedImage';
@@ -23,24 +25,31 @@ import ThemedImage from '@theme/ThemedImage';
title="LangChain Framework Overview"
/>
Concretely, the framework consists of the following open-source libraries:
Together, these products simplify the entire application lifecycle:
- **Develop**: Write your applications in LangChain/LangChain.js. Hit the ground running using Templates for reference.
- **Productionize**: Use LangSmith to inspect, test and monitor your chains, so that you can constantly improve and deploy with confidence.
- **Deploy**: Turn any chain into an API with LangServe.
## LangChain Libraries
The main value props of the LangChain packages are:
1. **Components**: composable tools and integrations for working with language models. Components are modular and easy-to-use, whether you are using the rest of the LangChain framework or not
2. **Off-the-shelf chains**: built-in assemblages of components for accomplishing higher-level tasks
Off-the-shelf chains make it easy to get started. Components make it easy to customize existing chains and build new ones.
The LangChain libraries themselves are made up of several different packages.
- **`langchain-core`**: Base abstractions and LangChain Expression Language.
- **`langchain-community`**: Third party integrations.
- Partner packages (e.g. **`langchain-openai`**, **`langchain-anthropic`**, etc.): Some integrations have been further split into their own lightweight packages that only depend on **`langchain-core`**.
- **`langchain`**: Chains, agents, and retrieval strategies that make up an application's cognitive architecture.
- **[langgraph](/docs/langgraph)**: Build robust and stateful multi-actor applications with LLMs by modeling steps as edges and nodes in a graph.
- **[langserve](/docs/langserve)**: Deploy LangChain chains as REST APIs.
The broader ecosystem includes:
- **[LangSmith](/docs/langsmith)**: A developer platform that lets you debug, test, evaluate, and monitor LLM applications and seamlessly integrates with LangChain.
## Get started
[Heres](/docs/get_started/installation) how to install LangChain, set up your environment, and start building.
We recommend following our [Quickstart](/docs/get_started/quickstart) guide to familiarize yourself with the framework by building your first LangChain application.
[See here](/docs/get_started/installation) for instructions on how to install LangChain, set up your environment, and start building.
Read up on our [Security](/docs/security) best practices to make sure you're developing safely with LangChain.
:::note
@@ -48,53 +57,48 @@ These docs focus on the Python LangChain library. [Head here](https://js.langcha
:::
## Use cases
## LangChain Expression Language (LCEL)
If you're looking to build something specific or are more of a hands-on learner, check out our [use-cases](/docs/use_cases).
They're walkthroughs and techniques for common end-to-end tasks, such as:
LCEL is a declarative way to compose chains. LCEL was designed from day 1 to support putting prototypes in production, with no code changes, from the simplest “prompt + LLM” chain to the most complex chains.
- [Question answering with RAG](/docs/use_cases/question_answering/)
- [Extracting structured output](/docs/use_cases/extraction/)
- **[Overview](/docs/expression_language/)**: LCEL and its benefits
- **[Interface](/docs/expression_language/interface)**: The standard interface for LCEL objects
- **[How-to](/docs/expression_language/how_to)**: Key features of LCEL
- **[Cookbook](/docs/expression_language/cookbook)**: Example code for accomplishing common tasks
## Modules
LangChain provides standard, extendable interfaces and integrations for the following modules:
#### [Model I/O](/docs/modules/model_io/)
Interface with language models
#### [Retrieval](/docs/modules/data_connection/)
Interface with application-specific data
#### [Agents](/docs/modules/agents/)
Let models choose which tools to use given high-level directives
## Examples, ecosystem, and resources
### [Use cases](/docs/use_cases/question_answering/)
Walkthroughs and techniques for common end-to-end use cases, like:
- [Document question answering](/docs/use_cases/question_answering/)
- [Chatbots](/docs/use_cases/chatbots/)
- and more!
## Expression Language
LangChain Expression Language (LCEL) is the foundation of many of LangChain's components, and is a declarative way to compose chains. LCEL was designed from day 1 to support putting prototypes in production, with no code changes, from the simplest “prompt + LLM” chain to the most complex chains.
- **[Get started](/docs/expression_language/)**: LCEL and its benefits
- **[Runnable interface](/docs/expression_language/interface)**: The standard interface for LCEL objects
- **[Primitives](/docs/expression_language/primitives)**: More on the primitives LCEL includes
- and more!
## Ecosystem
### [🦜🛠️ LangSmith](/docs/langsmith)
Trace and evaluate your language model applications and intelligent agents to help you move from prototype to production.
### [🦜🕸️ LangGraph](/docs/langgraph)
Build stateful, multi-actor applications with LLMs, built on top of (and intended to be used with) LangChain primitives.
### [🦜🏓 LangServe](/docs/langserve)
Deploy LangChain runnables and chains as REST APIs.
## [Security](/docs/security)
Read up on our [Security](/docs/security) best practices to make sure you're developing safely with LangChain.
## Additional resources
### [Components](/docs/modules/)
LangChain provides standard, extendable interfaces and integrations for many different components, including:
- [Analyzing structured data](/docs/use_cases/sql/)
- and much more...
### [Integrations](/docs/integrations/providers/)
LangChain is part of a rich ecosystem of tools that integrate with our framework and build on top of it. Check out our growing list of [integrations](/docs/integrations/providers/).
### [Guides](/docs/guides/)
### [Guides](../guides/debugging.md)
Best practices for developing with LangChain.
### [API reference](https://api.python.langchain.com)
Head to the reference section for full documentation of all classes and methods in the LangChain and LangChain Experimental Python packages.
### [Contributing](/docs/contributing)
### [Developer's guide](/docs/contributing)
Check out the developer's guide for guidelines on contributing and help getting your dev environment set up.

View File

@@ -1,7 +1,3 @@
---
sidebar_position: 1
---
# Quickstart
In this quickstart we'll show you how to:
@@ -94,12 +90,12 @@ from langchain_openai import ChatOpenAI
llm = ChatOpenAI()
```
If you'd prefer not to set an environment variable you can pass the key in directly via the `api_key` named parameter when initiating the OpenAI LLM class:
If you'd prefer not to set an environment variable you can pass the key in directly via the `openai_api_key` named parameter when initiating the OpenAI LLM class:
```python
from langchain_openai import ChatOpenAI
llm = ChatOpenAI(api_key="...")
llm = ChatOpenAI(openai_api_key="...")
```
</TabItem>
@@ -141,10 +137,10 @@ from langchain_anthropic import ChatAnthropic
llm = ChatAnthropic(model="claude-3-sonnet-20240229", temperature=0.2, max_tokens=1024)
```
If you'd prefer not to set an environment variable you can pass the key in directly via the `api_key` named parameter when initiating the Anthropic Chat Model class:
If you'd prefer not to set an environment variable you can pass the key in directly via the `anthropic_api_key` named parameter when initiating the Anthropic Chat Model class:
```python
llm = ChatAnthropic(api_key="...")
llm = ChatAnthropic(anthropic_api_key="...")
```
</TabItem>
@@ -153,7 +149,7 @@ llm = ChatAnthropic(api_key="...")
First we'll need to import the Cohere SDK package.
```shell
pip install langchain-cohere
pip install cohere
```
Accessing the API requires an API key, which you can get by creating an account and heading [here](https://dashboard.cohere.com/api-keys). Once we have a key we'll want to set it as an environment variable by running:
@@ -165,7 +161,7 @@ export COHERE_API_KEY="..."
We can then initialize the model:
```python
from langchain_cohere import ChatCohere
from langchain_community.chat_models import ChatCohere
llm = ChatCohere()
```
@@ -173,7 +169,7 @@ llm = ChatCohere()
If you'd prefer not to set an environment variable you can pass the key in directly via the `cohere_api_key` named parameter when initiating the Cohere LLM class:
```python
from langchain_cohere import ChatCohere
from langchain_community.chat_models import ChatCohere
llm = ChatCohere(cohere_api_key="...")
```
@@ -293,7 +289,7 @@ embeddings = OllamaEmbeddings()
Make sure you have the `cohere` package installed and the appropriate environment variables set (these are the same as needed for the LLM).
```python
from langchain_cohere.embeddings import CohereEmbeddings
from langchain_community.embeddings import CohereEmbeddings
embeddings = CohereEmbeddings()
```
@@ -509,7 +505,7 @@ from langchain.agents import AgentExecutor
# Get the prompt to use - you can modify this!
prompt = hub.pull("hwchase17/openai-functions-agent")
# You need to set OPENAI_API_KEY environment variable or pass it as argument `api_key`.
# You need to set OPENAI_API_KEY environment variable or pass it as argument `openai_api_key`.
llm = ChatOpenAI(model="gpt-3.5-turbo", temperature=0)
agent = create_openai_functions_agent(llm, tools, prompt)
agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True)

View File

@@ -8,11 +8,11 @@ Here are a few different tools and functionalities to aid in debugging.
## Tracing
Platforms with tracing capabilities like [LangSmith](/docs/langsmith/) are the most comprehensive solutions for debugging. These platforms make it easy to not only log and visualize LLM apps, but also to actively debug, test and refine them.
Platforms with tracing capabilities like [LangSmith](/docs/langsmith/) and [WandB](/docs/integrations/providers/wandb_tracing) are the most comprehensive solutions for debugging. These platforms make it easy to not only log and visualize LLM apps, but also to actively debug, test and refine them.
When building production-grade LLM applications, platforms like this are essential.
For anyone building production-grade LLM applications, we highly recommend using a platform like this.
![Screenshot of the LangSmith debugging interface showing an AgentExecutor run with input and output details, and a run tree visualization.](../../../static/img/run_details.png "LangSmith Debugging Interface")
![Screenshot of the LangSmith debugging interface showing an AgentExecutor run with input and output details, and a run tree visualization.](../../static/img/run_details.png "LangSmith Debugging Interface")
## `set_debug` and `set_verbose`
@@ -27,7 +27,7 @@ Let's suppose we have a simple agent, and want to visualize the actions it takes
from langchain.agents import AgentType, initialize_agent, load_tools
from langchain_openai import ChatOpenAI
llm = ChatOpenAI(model="gpt-4", temperature=0)
llm = ChatOpenAI(model_name="gpt-4", temperature=0)
tools = load_tools(["ddg-search", "llm-math"], llm=llm)
agent = initialize_agent(tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION)
```

View File

@@ -1,13 +0,0 @@
---
sidebar_position: 1
sidebar_class_name: hidden
---
# Development
This section contains guides with general information around building apps with LangChain.
import DocCardList from "@theme/DocCardList";
import { useCurrentSidebarCategory } from '@docusaurus/theme-common';
<DocCardList items={useCurrentSidebarCategory().items.filter((item) => item.href !== "/docs/guides/development/")} />

View File

@@ -7,19 +7,18 @@ Building applications with language models involves many moving parts. One of th
The guides in this section review the APIs and functionality LangChain provides to help you better evaluate your applications. Evaluation and testing are both critical when thinking about deploying LLM applications, since production environments require repeatable and useful outcomes.
LangChain offers various types of evaluators to help you measure performance and integrity on diverse data, and we hope to encourage the community to create and share other useful evaluators so everyone can improve. These docs will introduce the evaluator types, how to use them, and provide some examples of their use in real-world scenarios.
These built-in evaluators all integrate smoothly with [LangSmith](/docs/langsmith), and allow you to create feedback loops that improve your application over time and prevent regressions.
Each evaluator type in LangChain comes with ready-to-use implementations and an extensible API that allows for customization according to your unique requirements. Here are some of the types of evaluators we offer:
- [String Evaluators](/docs/guides/productionization/evaluation/string/): These evaluators assess the predicted string for a given input, usually comparing it against a reference string.
- [Trajectory Evaluators](/docs/guides/productionization/evaluation/trajectory/): These are used to evaluate the entire trajectory of agent actions.
- [Comparison Evaluators](/docs/guides/productionization/evaluation/comparison/): These evaluators are designed to compare predictions from two runs on a common input.
- [String Evaluators](/docs/guides/evaluation/string/): These evaluators assess the predicted string for a given input, usually comparing it against a reference string.
- [Trajectory Evaluators](/docs/guides/evaluation/trajectory/): These are used to evaluate the entire trajectory of agent actions.
- [Comparison Evaluators](/docs/guides/evaluation/comparison/): These evaluators are designed to compare predictions from two runs on a common input.
These evaluators can be used across various scenarios and can be applied to different chain and LLM implementations in the LangChain library.
We also are working to share guides and cookbooks that demonstrate how to use these evaluators in real-world scenarios, such as:
- [Chain Comparisons](/docs/guides/productionization/evaluation/examples/comparisons): This example uses a comparison evaluator to predict the preferred output. It reviews ways to measure confidence intervals to select statistically significant differences in aggregate preference scores across different models or prompts.
- [Chain Comparisons](/docs/guides/evaluation/examples/comparisons): This example uses a comparison evaluator to predict the preferred output. It reviews ways to measure confidence intervals to select statistically significant differences in aggregate preference scores across different models or prompts.
## LangSmith Evaluation

View File

@@ -204,7 +204,7 @@
" ]\n",
")\n",
"# Here we're going to use a bad model name to easily create a chain that will error\n",
"chat_model = ChatOpenAI(model=\"gpt-fake\")\n",
"chat_model = ChatOpenAI(model_name=\"gpt-fake\")\n",
"bad_chain = chat_prompt | chat_model | StrOutputParser()"
]
},

View File

@@ -1,3 +0,0 @@
# Guides
This section contains deeper dives into the LangChain framework and how to apply it.

View File

@@ -9,7 +9,7 @@
"\n",
"## Use case\n",
"\n",
"The popularity of projects like [PrivateGPT](https://github.com/imartinez/privateGPT), [llama.cpp](https://github.com/ggerganov/llama.cpp), [Ollama](https://github.com/ollama/ollama), [GPT4All](https://github.com/nomic-ai/gpt4all), [llamafile](https://github.com/Mozilla-Ocho/llamafile), and others underscore the demand to run LLMs locally (on your own device).\n",
"The popularity of projects like [PrivateGPT](https://github.com/imartinez/privateGPT), [llama.cpp](https://github.com/ggerganov/llama.cpp), [GPT4All](https://github.com/nomic-ai/gpt4all), and [llamafile](https://github.com/Mozilla-Ocho/llamafile) underscore the demand to run LLMs locally (on your own device).\n",
"\n",
"This has at least two important benefits:\n",
"\n",
@@ -32,7 +32,7 @@
"1. `Base model`: What is the base-model and how was it trained?\n",
"2. `Fine-tuning approach`: Was the base-model fine-tuned and, if so, what [set of instructions](https://cameronrwolfe.substack.com/p/beyond-llama-the-power-of-open-llms#%C2%A7alpaca-an-instruction-following-llama-model) was used?\n",
"\n",
"![Image description](../../../static/img/OSS_LLM_overview.png)\n",
"![Image description](../../static/img/OSS_LLM_overview.png)\n",
"\n",
"The relative performance of these models can be assessed using several leaderboards, including:\n",
"\n",
@@ -56,7 +56,7 @@
"\n",
"In particular, see [this excellent post](https://finbarr.ca/how-is-llama-cpp-possible/) on the importance of quantization.\n",
"\n",
"![Image description](../../../static/img/llama-memory-weights.png)\n",
"![Image description](../../static/img/llama-memory-weights.png)\n",
"\n",
"With less precision, we radically decrease the memory needed to store the LLM in memory.\n",
"\n",
@@ -64,7 +64,7 @@
"\n",
"A Mac M2 Max is 5-6x faster than a M1 for inference due to the larger GPU memory bandwidth.\n",
"\n",
"![Image description](../../../static/img/llama_t_put.png)\n",
"![Image description](../../static/img/llama_t_put.png)\n",
"\n",
"## Quickstart\n",
"\n",

View File

@@ -0,0 +1,283 @@
{
"cells": [
{
"cell_type": "markdown",
"id": "920a3c1a",
"metadata": {},
"source": [
"# Model comparison\n",
"\n",
"Constructing your language model application will likely involved choosing between many different options of prompts, models, and even chains to use. When doing so, you will want to compare these different options on different inputs in an easy, flexible, and intuitive way. \n",
"\n",
"LangChain provides the concept of a ModelLaboratory to test out and try different models."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "12ebae56",
"metadata": {},
"outputs": [],
"source": [
"%pip install --upgrade --quiet langchain langchain-openai"
]
},
{
"cell_type": "code",
"execution_count": 1,
"id": "ab9e95ad",
"metadata": {},
"outputs": [],
"source": [
"from langchain.model_laboratory import ModelLaboratory\n",
"from langchain_community.llms import Cohere, HuggingFaceHub\n",
"from langchain_core.prompts import PromptTemplate\n",
"from langchain_openai import OpenAI"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "3dd69cb4",
"metadata": {},
"outputs": [],
"source": [
"import getpass\n",
"import os\n",
"\n",
"# get a new token: https://dashboard.cohere.ai/\n",
"os.environ[\"COHERE_API_KEY\"] = getpass.getpass(\"Cohere API Key:\")\n",
"os.environ[\"OPENAI_API_KEY\"] = getpass.getpass(\"Open API Key:\")\n",
"os.environ[\"HUGGINGFACEHUB_API_TOKEN\"] = getpass.getpass(\"Hugging Face API Key:\")"
]
},
{
"cell_type": "code",
"execution_count": 2,
"id": "32cb94e6",
"metadata": {},
"outputs": [],
"source": [
"llms = [\n",
" OpenAI(temperature=0),\n",
" Cohere(temperature=0),\n",
" HuggingFaceHub(repo_id=\"google/flan-t5-xl\", model_kwargs={\"temperature\": 1}),\n",
"]"
]
},
{
"cell_type": "code",
"execution_count": 3,
"id": "14cde09d",
"metadata": {},
"outputs": [],
"source": [
"model_lab = ModelLaboratory.from_llms(llms)"
]
},
{
"cell_type": "code",
"execution_count": 4,
"id": "f186c741",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"\u001b[1mInput:\u001b[0m\n",
"What color is a flamingo?\n",
"\n",
"\u001b[1mOpenAI\u001b[0m\n",
"Params: {'model': 'text-davinci-002', 'temperature': 0.0, 'max_tokens': 256, 'top_p': 1, 'frequency_penalty': 0, 'presence_penalty': 0, 'n': 1, 'best_of': 1}\n",
"\u001b[36;1m\u001b[1;3m\n",
"\n",
"Flamingos are pink.\u001b[0m\n",
"\n",
"\u001b[1mCohere\u001b[0m\n",
"Params: {'model': 'command-xlarge-20221108', 'max_tokens': 20, 'temperature': 0.0, 'k': 0, 'p': 1, 'frequency_penalty': 0, 'presence_penalty': 0}\n",
"\u001b[33;1m\u001b[1;3m\n",
"\n",
"Pink\u001b[0m\n",
"\n",
"\u001b[1mHuggingFaceHub\u001b[0m\n",
"Params: {'repo_id': 'google/flan-t5-xl', 'temperature': 1}\n",
"\u001b[38;5;200m\u001b[1;3mpink\u001b[0m\n",
"\n"
]
}
],
"source": [
"model_lab.compare(\"What color is a flamingo?\")"
]
},
{
"cell_type": "code",
"execution_count": 5,
"id": "248b652a",
"metadata": {},
"outputs": [],
"source": [
"prompt = PromptTemplate(\n",
" template=\"What is the capital of {state}?\", input_variables=[\"state\"]\n",
")\n",
"model_lab_with_prompt = ModelLaboratory.from_llms(llms, prompt=prompt)"
]
},
{
"cell_type": "code",
"execution_count": 6,
"id": "f64377ac",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"\u001b[1mInput:\u001b[0m\n",
"New York\n",
"\n",
"\u001b[1mOpenAI\u001b[0m\n",
"Params: {'model': 'text-davinci-002', 'temperature': 0.0, 'max_tokens': 256, 'top_p': 1, 'frequency_penalty': 0, 'presence_penalty': 0, 'n': 1, 'best_of': 1}\n",
"\u001b[36;1m\u001b[1;3m\n",
"\n",
"The capital of New York is Albany.\u001b[0m\n",
"\n",
"\u001b[1mCohere\u001b[0m\n",
"Params: {'model': 'command-xlarge-20221108', 'max_tokens': 20, 'temperature': 0.0, 'k': 0, 'p': 1, 'frequency_penalty': 0, 'presence_penalty': 0}\n",
"\u001b[33;1m\u001b[1;3m\n",
"\n",
"The capital of New York is Albany.\u001b[0m\n",
"\n",
"\u001b[1mHuggingFaceHub\u001b[0m\n",
"Params: {'repo_id': 'google/flan-t5-xl', 'temperature': 1}\n",
"\u001b[38;5;200m\u001b[1;3mst john s\u001b[0m\n",
"\n"
]
}
],
"source": [
"model_lab_with_prompt.compare(\"New York\")"
]
},
{
"cell_type": "code",
"execution_count": 7,
"id": "54336dbf",
"metadata": {},
"outputs": [],
"source": [
"from langchain.agents.self_ask_with_search.base import SelfAskWithSearchChain\n",
"from langchain_community.utilities import SerpAPIWrapper\n",
"\n",
"open_ai_llm = OpenAI(temperature=0)\n",
"search = SerpAPIWrapper()\n",
"self_ask_with_search_openai = SelfAskWithSearchChain(\n",
" llm=open_ai_llm, search_chain=search, verbose=True\n",
")\n",
"\n",
"cohere_llm = Cohere(temperature=0)\n",
"search = SerpAPIWrapper()\n",
"self_ask_with_search_cohere = SelfAskWithSearchChain(\n",
" llm=cohere_llm, search_chain=search, verbose=True\n",
")"
]
},
{
"cell_type": "code",
"execution_count": 8,
"id": "6a50a9f1",
"metadata": {},
"outputs": [],
"source": [
"chains = [self_ask_with_search_openai, self_ask_with_search_cohere]\n",
"names = [str(open_ai_llm), str(cohere_llm)]"
]
},
{
"cell_type": "code",
"execution_count": 9,
"id": "d3549e99",
"metadata": {},
"outputs": [],
"source": [
"model_lab = ModelLaboratory(chains, names=names)"
]
},
{
"cell_type": "code",
"execution_count": 10,
"id": "362f7f57",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"\u001b[1mInput:\u001b[0m\n",
"What is the hometown of the reigning men's U.S. Open champion?\n",
"\n",
"\u001b[1mOpenAI\u001b[0m\n",
"Params: {'model': 'text-davinci-002', 'temperature': 0.0, 'max_tokens': 256, 'top_p': 1, 'frequency_penalty': 0, 'presence_penalty': 0, 'n': 1, 'best_of': 1}\n",
"\n",
"\n",
"\u001b[1m> Entering new chain...\u001b[0m\n",
"What is the hometown of the reigning men's U.S. Open champion?\n",
"Are follow up questions needed here:\u001b[32;1m\u001b[1;3m Yes.\n",
"Follow up: Who is the reigning men's U.S. Open champion?\u001b[0m\n",
"Intermediate answer: \u001b[33;1m\u001b[1;3mCarlos Alcaraz.\u001b[0m\u001b[32;1m\u001b[1;3m\n",
"Follow up: Where is Carlos Alcaraz from?\u001b[0m\n",
"Intermediate answer: \u001b[33;1m\u001b[1;3mEl Palmar, Spain.\u001b[0m\u001b[32;1m\u001b[1;3m\n",
"So the final answer is: El Palmar, Spain\u001b[0m\n",
"\u001b[1m> Finished chain.\u001b[0m\n",
"\u001b[36;1m\u001b[1;3m\n",
"So the final answer is: El Palmar, Spain\u001b[0m\n",
"\n",
"\u001b[1mCohere\u001b[0m\n",
"Params: {'model': 'command-xlarge-20221108', 'max_tokens': 256, 'temperature': 0.0, 'k': 0, 'p': 1, 'frequency_penalty': 0, 'presence_penalty': 0}\n",
"\n",
"\n",
"\u001b[1m> Entering new chain...\u001b[0m\n",
"What is the hometown of the reigning men's U.S. Open champion?\n",
"Are follow up questions needed here:\u001b[32;1m\u001b[1;3m Yes.\n",
"Follow up: Who is the reigning men's U.S. Open champion?\u001b[0m\n",
"Intermediate answer: \u001b[33;1m\u001b[1;3mCarlos Alcaraz.\u001b[0m\u001b[32;1m\u001b[1;3m\n",
"So the final answer is:\n",
"\n",
"Carlos Alcaraz\u001b[0m\n",
"\u001b[1m> Finished chain.\u001b[0m\n",
"\u001b[33;1m\u001b[1;3m\n",
"So the final answer is:\n",
"\n",
"Carlos Alcaraz\u001b[0m\n",
"\n"
]
}
],
"source": [
"model_lab.compare(\"What is the hometown of the reigning men's U.S. Open champion?\")"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.3"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

View File

@@ -0,0 +1 @@
label: 'Privacy'

View File

@@ -137,7 +137,7 @@
}
],
"source": [
"from langchain_core.prompts.prompt import PromptTemplate\n",
"from langchain.prompts.prompt import PromptTemplate\n",
"from langchain_openai import ChatOpenAI\n",
"\n",
"anonymizer = PresidioAnonymizer()\n",

View File

@@ -24,7 +24,7 @@
"<img src=\"/img/qa_privacy_protection.png\" width=\"900\"/>\n",
"\n",
"\n",
"In the following notebook, we will not go into the details of how the anonymizer works. If you are interested, please visit [this part of the documentation](/docs/guides/productionization/safety/presidio_data_anonymization/).\n",
"In the following notebook, we will not go into the details of how the anonymizer works. If you are interested, please visit [this part of the documentation](/docs/guides/privacy/presidio_data_anonymization/).\n",
"\n",
"## Quickstart\n",
"\n",
@@ -878,8 +878,8 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain.prompts.prompt import PromptTemplate\n",
"from langchain_core.prompts import format_document\n",
"from langchain_core.prompts.prompt import PromptTemplate\n",
"\n",
"DEFAULT_DOCUMENT_PROMPT = PromptTemplate.from_template(template=\"{page_content}\")\n",
"\n",

View File

@@ -207,7 +207,7 @@
}
],
"source": [
"from langchain_core.prompts.prompt import PromptTemplate\n",
"from langchain.prompts.prompt import PromptTemplate\n",
"from langchain_openai import ChatOpenAI\n",
"\n",
"anonymizer = PresidioReversibleAnonymizer()\n",

Some files were not shown because too many files have changed in this diff Show More