mirror of
https://github.com/hwchase17/langchain.git
synced 2026-02-07 01:30:24 +00:00
Compare commits
103 Commits
eugene/add
...
dev2049/do
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
043ef865ca | ||
|
|
e37d647e26 | ||
|
|
467ca6f025 | ||
|
|
9e649462ce | ||
|
|
1cb04f2b26 | ||
|
|
e57ebf3922 | ||
|
|
039f8f1abb | ||
|
|
44dc959584 | ||
|
|
5cd12102be | ||
|
|
6eacd88ae7 | ||
|
|
fcd88bccb3 | ||
|
|
10ba201d05 | ||
|
|
49ca02711e | ||
|
|
785502edb3 | ||
|
|
ef7d015be5 | ||
|
|
443ebe22f4 | ||
|
|
a395ff7c90 | ||
|
|
bf3f554357 | ||
|
|
0c3de0a0b3 | ||
|
|
224f73e978 | ||
|
|
6c25f860fd | ||
|
|
b0431c672b | ||
|
|
8c661baefb | ||
|
|
424a573266 | ||
|
|
f9f08c4b69 | ||
|
|
a6ef20d7fe | ||
|
|
9d1280d451 | ||
|
|
7388248b3e | ||
|
|
3bc0bf0079 | ||
|
|
9600a48d53 | ||
|
|
27e63b977a | ||
|
|
2aa3754024 | ||
|
|
6c60251f52 | ||
|
|
9928fb2193 | ||
|
|
f07b9fde74 | ||
|
|
ddc2d4c21e | ||
|
|
02632d52b3 | ||
|
|
2ab0e1d526 | ||
|
|
080eb1b3fc | ||
|
|
ddd595fe81 | ||
|
|
729e935ea4 | ||
|
|
62d0a01a0f | ||
|
|
0ff59569dc | ||
|
|
56cb77a828 | ||
|
|
22d844dc07 | ||
|
|
616e9a93e0 | ||
|
|
a87a2524c7 | ||
|
|
2abf6b9f17 | ||
|
|
06e524416c | ||
|
|
e68dfa7062 | ||
|
|
e80585bab0 | ||
|
|
13c376345e | ||
|
|
bf5a3c6dec | ||
|
|
88a3a56c1a | ||
|
|
5feb60f426 | ||
|
|
c06973261a | ||
|
|
db6f7ed0ba | ||
|
|
e027a38f33 | ||
|
|
3df2d831f9 | ||
|
|
c8c2276ccb | ||
|
|
5525b704cc | ||
|
|
a9bb3147d7 | ||
|
|
8f8593aac5 | ||
|
|
c9f963e295 | ||
|
|
3002c1d508 | ||
|
|
7e8e21c914 | ||
|
|
c75c0775e1 | ||
|
|
55baa0d153 | ||
|
|
440b8761f4 | ||
|
|
a8ded21b69 | ||
|
|
c9e2a01875 | ||
|
|
e2d7677526 | ||
|
|
d5a0704544 | ||
|
|
c9a362e482 | ||
|
|
7642f2159c | ||
|
|
1ed4228822 | ||
|
|
e46202829f | ||
|
|
8c28ad6dac | ||
|
|
c06a47a691 | ||
|
|
dfbf45f028 | ||
|
|
b8d48939a2 | ||
|
|
9165267f8a | ||
|
|
ba023d53ca | ||
|
|
9e2227ba11 | ||
|
|
c998569c8f | ||
|
|
613bf9b514 | ||
|
|
41e2394c9c | ||
|
|
a4ac006658 | ||
|
|
8966f61ca5 | ||
|
|
e28bdf4453 | ||
|
|
0dc304ca80 | ||
|
|
8e41143bf5 | ||
|
|
df0c33a005 | ||
|
|
5c9205d5f4 | ||
|
|
1ff7c958b0 | ||
|
|
4c3ab55e94 | ||
|
|
ef8b5f64bc | ||
|
|
d6e0b9a43d | ||
|
|
b96ab4b763 | ||
|
|
0147f845f1 | ||
|
|
3e12f0957a | ||
|
|
c5ab9782c6 | ||
|
|
6a9cdc43f5 |
1
.github/workflows/test.yml
vendored
1
.github/workflows/test.yml
vendored
@@ -4,6 +4,7 @@ on:
|
||||
push:
|
||||
branches: [master]
|
||||
pull_request:
|
||||
workflow_dispatch:
|
||||
|
||||
env:
|
||||
POETRY_VERSION: "1.4.2"
|
||||
|
||||
2
docs/_static/css/custom.css
vendored
2
docs/_static/css/custom.css
vendored
@@ -13,5 +13,5 @@ pre {
|
||||
}
|
||||
|
||||
#my-component-root *, #headlessui-portal-root * {
|
||||
z-index: 1000000000000;
|
||||
z-index: 10000;
|
||||
}
|
||||
|
||||
6
docs/_static/js/mendablesearch.js
vendored
6
docs/_static/js/mendablesearch.js
vendored
@@ -30,10 +30,7 @@ document.addEventListener('DOMContentLoaded', () => {
|
||||
const icon = React.createElement('p', {
|
||||
style: { color: '#ffffff', fontSize: '22px',width: '48px', height: '48px', margin: '0px', padding: '0px', display: 'flex', alignItems: 'center', justifyContent: 'center', textAlign: 'center' },
|
||||
}, [iconSpan1, iconSpan2]);
|
||||
|
||||
|
||||
|
||||
|
||||
const mendableFloatingButton = React.createElement(
|
||||
MendableFloatingButton,
|
||||
{
|
||||
@@ -42,6 +39,7 @@ document.addEventListener('DOMContentLoaded', () => {
|
||||
anon_key: '82842b36-3ea6-49b2-9fb8-52cfc4bde6bf', // Mendable Search Public ANON key, ok to be public
|
||||
messageSettings: {
|
||||
openSourcesInNewTab: false,
|
||||
prettySources: true // Prettify the sources displayed now
|
||||
},
|
||||
icon: icon,
|
||||
}
|
||||
@@ -52,7 +50,7 @@ document.addEventListener('DOMContentLoaded', () => {
|
||||
|
||||
loadScript('https://unpkg.com/react@17/umd/react.production.min.js', () => {
|
||||
loadScript('https://unpkg.com/react-dom@17/umd/react-dom.production.min.js', () => {
|
||||
loadScript('https://unpkg.com/@mendable/search@0.0.93/dist/umd/mendable.min.js', initializeMendable);
|
||||
loadScript('https://unpkg.com/@mendable/search@0.0.102/dist/umd/mendable.min.js', initializeMendable);
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
17
docs/additional_resources.rst
Normal file
17
docs/additional_resources.rst
Normal file
@@ -0,0 +1,17 @@
|
||||
Additional Resources
|
||||
==========================
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 1
|
||||
:caption: Additional Resources
|
||||
:name: resources
|
||||
|
||||
LangChainHub <https://github.com/hwchase17/langchain-hub>
|
||||
getting_started/tutorials.md
|
||||
Gallery <https://github.com/kyrolabs/awesome-langchain>
|
||||
./additional_resources/deployments.md
|
||||
./additional_resources/tracing.md
|
||||
./additional_resources/model_laboratory.ipynb
|
||||
Discord <https://discord.gg/6adMQxSpJS>
|
||||
./additional_resources/youtube.md
|
||||
Production Support <https://forms.gle/57d8AmXBYp8PP8tZA>
|
||||
@@ -1,365 +0,0 @@
|
||||
LangChain Gallery
|
||||
=================
|
||||
|
||||
Lots of people have built some pretty awesome stuff with LangChain.
|
||||
This is a collection of our favorites.
|
||||
If you see any other demos that you think we should highlight, be sure to let us know!
|
||||
|
||||
|
||||
Open Source
|
||||
-----------
|
||||
|
||||
.. panels::
|
||||
:body: text-center
|
||||
|
||||
---
|
||||
|
||||
.. link-button:: https://github.com/bborn/howdoi.ai
|
||||
:type: url
|
||||
:text: HowDoI.ai
|
||||
:classes: stretched-link btn-lg
|
||||
|
||||
+++
|
||||
|
||||
This is an experiment in building a large-language-model-backed chatbot. It can hold a conversation, remember previous comments/questions,
|
||||
and answer all types of queries (history, web search, movie data, weather, news, and more).
|
||||
|
||||
---
|
||||
|
||||
.. link-button:: https://colab.research.google.com/drive/1sKSTjt9cPstl_WMZ86JsgEqFG-aSAwkn?usp=sharing
|
||||
:type: url
|
||||
:text: YouTube Transcription QA with Sources
|
||||
:classes: stretched-link btn-lg
|
||||
|
||||
+++
|
||||
|
||||
An end-to-end example of doing question answering on YouTube transcripts, returning the timestamps as sources to legitimize the answer.
|
||||
|
||||
---
|
||||
|
||||
.. link-button:: https://github.com/normandmickey/MrsStax
|
||||
:type: url
|
||||
:text: QA Slack Bot
|
||||
:classes: stretched-link btn-lg
|
||||
|
||||
+++
|
||||
|
||||
This application is a Slack Bot that uses Langchain and OpenAI's GPT3 language model to provide domain specific answers. You provide the documents.
|
||||
|
||||
---
|
||||
|
||||
.. link-button:: https://github.com/OpenBioLink/ThoughtSource
|
||||
:type: url
|
||||
:text: ThoughtSource
|
||||
:classes: stretched-link btn-lg
|
||||
|
||||
+++
|
||||
|
||||
A central, open resource and community around data and tools related to chain-of-thought reasoning in large language models.
|
||||
|
||||
---
|
||||
|
||||
.. link-button:: https://github.com/blackhc/llm-strategy
|
||||
:type: url
|
||||
:text: LLM Strategy
|
||||
:classes: stretched-link btn-lg
|
||||
|
||||
+++
|
||||
|
||||
This Python package adds a decorator llm_strategy that connects to an LLM (such as OpenAI’s GPT-3) and uses the LLM to "implement" abstract methods in interface classes. It does this by forwarding requests to the LLM and converting the responses back to Python data using Python's @dataclasses.
|
||||
|
||||
---
|
||||
|
||||
.. link-button:: https://github.com/JohnNay/llm-lobbyist
|
||||
:type: url
|
||||
:text: Zero-Shot Corporate Lobbyist
|
||||
:classes: stretched-link btn-lg
|
||||
|
||||
+++
|
||||
|
||||
A notebook showing how to use GPT to help with the work of a corporate lobbyist.
|
||||
|
||||
---
|
||||
|
||||
.. link-button:: https://dagster.io/blog/chatgpt-langchain
|
||||
:type: url
|
||||
:text: Dagster Documentation ChatBot
|
||||
:classes: stretched-link btn-lg
|
||||
|
||||
+++
|
||||
|
||||
A jupyter notebook demonstrating how you could create a semantic search engine on documents in one of your Google Folders
|
||||
|
||||
---
|
||||
|
||||
.. link-button:: https://github.com/venuv/langchain_semantic_search
|
||||
:type: url
|
||||
:text: Google Folder Semantic Search
|
||||
:classes: stretched-link btn-lg
|
||||
|
||||
+++
|
||||
|
||||
Build a GitHub support bot with GPT3, LangChain, and Python.
|
||||
|
||||
---
|
||||
|
||||
.. link-button:: https://huggingface.co/spaces/team7/talk_with_wind
|
||||
:type: url
|
||||
:text: Talk With Wind
|
||||
:classes: stretched-link btn-lg
|
||||
|
||||
+++
|
||||
|
||||
Record sounds of anything (birds, wind, fire, train station) and chat with it.
|
||||
|
||||
---
|
||||
|
||||
.. link-button:: https://huggingface.co/spaces/JavaFXpert/Chat-GPT-LangChain
|
||||
:type: url
|
||||
:text: ChatGPT LangChain
|
||||
:classes: stretched-link btn-lg
|
||||
|
||||
+++
|
||||
|
||||
This simple application demonstrates a conversational agent implemented with OpenAI GPT-3.5 and LangChain. When necessary, it leverages tools for complex math, searching the internet, and accessing news and weather.
|
||||
|
||||
---
|
||||
|
||||
.. link-button:: https://huggingface.co/spaces/JavaFXpert/gpt-math-techniques
|
||||
:type: url
|
||||
:text: GPT Math Techniques
|
||||
:classes: stretched-link btn-lg
|
||||
|
||||
+++
|
||||
|
||||
A Hugging Face spaces project showing off the benefits of using PAL for math problems.
|
||||
|
||||
---
|
||||
|
||||
.. link-button:: https://colab.research.google.com/drive/1xt2IsFPGYMEQdoJFNgWNAjWGxa60VXdV
|
||||
:type: url
|
||||
:text: GPT Political Compass
|
||||
:classes: stretched-link btn-lg
|
||||
|
||||
+++
|
||||
|
||||
Measure the political compass of GPT.
|
||||
|
||||
---
|
||||
|
||||
.. link-button:: https://github.com/hwchase17/notion-qa
|
||||
:type: url
|
||||
:text: Notion Database Question-Answering Bot
|
||||
:classes: stretched-link btn-lg
|
||||
|
||||
+++
|
||||
|
||||
Open source GitHub project shows how to use LangChain to create a chatbot that can answer questions about an arbitrary Notion database.
|
||||
|
||||
---
|
||||
|
||||
.. link-button:: https://github.com/jerryjliu/llama_index
|
||||
:type: url
|
||||
:text: LlamaIndex
|
||||
:classes: stretched-link btn-lg
|
||||
|
||||
+++
|
||||
|
||||
LlamaIndex (formerly GPT Index) is a project consisting of a set of data structures that are created using GPT-3 and can be traversed using GPT-3 in order to answer queries.
|
||||
|
||||
---
|
||||
|
||||
.. link-button:: https://github.com/JavaFXpert/llm-grovers-search-party
|
||||
:type: url
|
||||
:text: Grover's Algorithm
|
||||
:classes: stretched-link btn-lg
|
||||
|
||||
+++
|
||||
|
||||
Leveraging Qiskit, OpenAI and LangChain to demonstrate Grover's algorithm
|
||||
|
||||
---
|
||||
|
||||
.. link-button:: https://huggingface.co/spaces/rituthombre/QNim
|
||||
:type: url
|
||||
:text: QNimGPT
|
||||
:classes: stretched-link btn-lg
|
||||
|
||||
+++
|
||||
|
||||
A chat UI to play Nim, where a player can select an opponent, either a quantum computer or an AI
|
||||
|
||||
---
|
||||
|
||||
.. link-button:: https://colab.research.google.com/drive/19WTIWC3prw5LDMHmRMvqNV2loD9FHls6?usp=sharing
|
||||
:type: url
|
||||
:text: ReAct TextWorld
|
||||
:classes: stretched-link btn-lg
|
||||
|
||||
+++
|
||||
|
||||
Leveraging the ReActTextWorldAgent to play TextWorld with an LLM!
|
||||
|
||||
---
|
||||
|
||||
.. link-button:: https://github.com/jagilley/fact-checker
|
||||
:type: url
|
||||
:text: Fact Checker
|
||||
:classes: stretched-link btn-lg
|
||||
|
||||
+++
|
||||
|
||||
This repo is a simple demonstration of using LangChain to do fact-checking with prompt chaining.
|
||||
|
||||
---
|
||||
|
||||
.. link-button:: https://github.com/arc53/docsgpt
|
||||
:type: url
|
||||
:text: DocsGPT
|
||||
:classes: stretched-link btn-lg
|
||||
|
||||
+++
|
||||
|
||||
Answer questions about the documentation of any project
|
||||
|
||||
---
|
||||
|
||||
.. link-button:: https://github.com/akshata29/chatpdf
|
||||
:type: url
|
||||
:text: Chat & Ask your data
|
||||
:classes: stretched-link btn-lg
|
||||
|
||||
+++
|
||||
|
||||
This sample demonstrates a few approaches for creating ChatGPT-like experiences over your own data. It uses OpenAI / Azure OpenAI Service to access the ChatGPT model (gpt-35-turbo and gpt3), and vector store (Pinecone, Redis and others) or Azure cognitive search for data indexing and retrieval.
|
||||
|
||||
Misc. Colab Notebooks
|
||||
~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. panels::
|
||||
:body: text-center
|
||||
|
||||
---
|
||||
|
||||
.. link-button:: https://colab.research.google.com/drive/1AAyEdTz-Z6ShKvewbt1ZHUICqak0MiwR?usp=sharing
|
||||
:type: url
|
||||
:text: Wolfram Alpha in Conversational Agent
|
||||
:classes: stretched-link btn-lg
|
||||
|
||||
+++
|
||||
|
||||
Give ChatGPT a WolframAlpha neural implant
|
||||
|
||||
---
|
||||
|
||||
.. link-button:: https://colab.research.google.com/drive/1UsCLcPy8q5PMNQ5ytgrAAAHa124dzLJg?usp=sharing
|
||||
:type: url
|
||||
:text: Tool Updates in Agents
|
||||
:classes: stretched-link btn-lg
|
||||
|
||||
+++
|
||||
|
||||
Agent improvements (6th Jan 2023)
|
||||
|
||||
---
|
||||
|
||||
.. link-button:: https://colab.research.google.com/drive/1UsCLcPy8q5PMNQ5ytgrAAAHa124dzLJg?usp=sharing
|
||||
:type: url
|
||||
:text: Conversational Agent with Tools (Langchain AGI)
|
||||
:classes: stretched-link btn-lg
|
||||
|
||||
+++
|
||||
|
||||
Langchain AGI (23rd Dec 2022)
|
||||
|
||||
Proprietary
|
||||
-----------
|
||||
|
||||
.. panels::
|
||||
:body: text-center
|
||||
|
||||
---
|
||||
|
||||
.. link-button:: https://twitter.com/sjwhitmore/status/1580593217153531908?s=20&t=neQvtZZTlp623U3LZwz3bQ
|
||||
:type: url
|
||||
:text: Daimon
|
||||
:classes: stretched-link btn-lg
|
||||
|
||||
+++
|
||||
|
||||
A chat-based AI personal assistant with long-term memory about you.
|
||||
|
||||
---
|
||||
|
||||
.. link-button:: https://anysummary.app
|
||||
:type: url
|
||||
:text: Summarize any file with AI
|
||||
:classes: stretched-link btn-lg
|
||||
|
||||
+++
|
||||
|
||||
Summarize not only long docs, interview audio or video files quickly, but also entire websites and YouTube videos. Share or download your generated summaries to collaborate with others, or revisit them at any time! Bonus: `@anysummary <https://twitter.com/anysummary>`_ on Twitter will also summarize any thread it is tagged in.
|
||||
|
||||
---
|
||||
|
||||
.. link-button:: https://twitter.com/dory111111/status/1608406234646052870?s=20&t=XYlrbKM0ornJsrtGa0br-g
|
||||
:type: url
|
||||
:text: AI Assisted SQL Query Generator
|
||||
:classes: stretched-link btn-lg
|
||||
|
||||
+++
|
||||
|
||||
An app to write SQL using natural language, and execute against real DB.
|
||||
|
||||
---
|
||||
|
||||
.. link-button:: https://twitter.com/krrish_dh/status/1581028925618106368?s=20&t=neQvtZZTlp623U3LZwz3bQ
|
||||
:type: url
|
||||
:text: Clerkie
|
||||
:classes: stretched-link btn-lg
|
||||
|
||||
+++
|
||||
|
||||
Stack Tracing QA Bot to help debug complex stack tracing (especially the ones that go multi-function/file deep).
|
||||
|
||||
---
|
||||
|
||||
.. link-button:: https://twitter.com/Raza_Habib496/status/1596880140490838017?s=20&t=6MqEQYWfSqmJwsKahjCVOA
|
||||
:type: url
|
||||
:text: Sales Email Writer
|
||||
:classes: stretched-link btn-lg
|
||||
|
||||
+++
|
||||
|
||||
By Raza Habib, this demo utilizes LangChain + SerpAPI + HumanLoop to write sales emails. Give it a company name and a person, this application will use Google Search (via SerpAPI) to get more information on the company and the person, and then write them a sales message.
|
||||
|
||||
---
|
||||
|
||||
.. link-button:: https://twitter.com/chillzaza_/status/1592961099384905730?s=20&t=EhU8jl0KyCPJ7vE9Rnz-cQ
|
||||
:type: url
|
||||
:text: Question-Answering on a Web Browser
|
||||
:classes: stretched-link btn-lg
|
||||
|
||||
+++
|
||||
|
||||
By Zahid Khawaja, this demo utilizes question answering to answer questions about a given website. A followup added this for `YouTube videos <https://twitter.com/chillzaza_/status/1593739682013220865?s=20&t=EhU8jl0KyCPJ7vE9Rnz-cQ>`_, and then another followup added it for `Wikipedia <https://twitter.com/chillzaza_/status/1594847151238037505?s=20&t=EhU8jl0KyCPJ7vE9Rnz-cQ>`_.
|
||||
|
||||
---
|
||||
|
||||
.. link-button:: https://mynd.so
|
||||
:type: url
|
||||
:text: Mynd
|
||||
:classes: stretched-link btn-lg
|
||||
|
||||
+++
|
||||
|
||||
A journaling app for self-care that uses AI to uncover insights and patterns over time.
|
||||
|
||||
|
||||
Articles on **Google Scholar**
|
||||
-----------------------------
|
||||
|
||||
LangChain is used in many scientific and research projects.
|
||||
|
||||
**Google Scholar** presents a `list of the papers <https://scholar.google.com/scholar?q=%22langchain%22&hl=en&as_sdt=0,5&as_vis=1>`_
|
||||
with references to LangChain.
|
||||
192
docs/dependents.md
Normal file
192
docs/dependents.md
Normal file
@@ -0,0 +1,192 @@
|
||||
# Dependents
|
||||
|
||||
Dependents stats for `hwchase17/langchain`
|
||||
|
||||
[](https://github.com/hwchase17/langchain/network/dependents)
|
||||
[&message=172&color=informational&logo=slickpic)](https://github.com/hwchase17/langchain/network/dependents)
|
||||
[&message=4980&color=informational&logo=slickpic)](https://github.com/hwchase17/langchain/network/dependents)
|
||||
[&message=17239&color=informational&logo=slickpic)](https://github.com/hwchase17/langchain/network/dependents)
|
||||
|
||||
[update: 2023-05-17; only dependent repositories with Stars > 100]
|
||||
|
||||
|
||||
| Repository | Stars |
|
||||
| :-------- | -----: |
|
||||
|[openai/openai-cookbook](https://github.com/openai/openai-cookbook) | 35401 |
|
||||
|[LAION-AI/Open-Assistant](https://github.com/LAION-AI/Open-Assistant) | 32861 |
|
||||
|[microsoft/TaskMatrix](https://github.com/microsoft/TaskMatrix) | 32766 |
|
||||
|[hpcaitech/ColossalAI](https://github.com/hpcaitech/ColossalAI) | 29560 |
|
||||
|[reworkd/AgentGPT](https://github.com/reworkd/AgentGPT) | 22315 |
|
||||
|[imartinez/privateGPT](https://github.com/imartinez/privateGPT) | 17474 |
|
||||
|[openai/chatgpt-retrieval-plugin](https://github.com/openai/chatgpt-retrieval-plugin) | 16923 |
|
||||
|[mindsdb/mindsdb](https://github.com/mindsdb/mindsdb) | 16112 |
|
||||
|[jerryjliu/llama_index](https://github.com/jerryjliu/llama_index) | 15407 |
|
||||
|[mlflow/mlflow](https://github.com/mlflow/mlflow) | 14345 |
|
||||
|[GaiZhenbiao/ChuanhuChatGPT](https://github.com/GaiZhenbiao/ChuanhuChatGPT) | 10372 |
|
||||
|[databrickslabs/dolly](https://github.com/databrickslabs/dolly) | 9919 |
|
||||
|[AIGC-Audio/AudioGPT](https://github.com/AIGC-Audio/AudioGPT) | 8177 |
|
||||
|[logspace-ai/langflow](https://github.com/logspace-ai/langflow) | 6807 |
|
||||
|[imClumsyPanda/langchain-ChatGLM](https://github.com/imClumsyPanda/langchain-ChatGLM) | 6087 |
|
||||
|[arc53/DocsGPT](https://github.com/arc53/DocsGPT) | 5292 |
|
||||
|[e2b-dev/e2b](https://github.com/e2b-dev/e2b) | 4622 |
|
||||
|[nsarrazin/serge](https://github.com/nsarrazin/serge) | 4076 |
|
||||
|[madawei2699/myGPTReader](https://github.com/madawei2699/myGPTReader) | 3952 |
|
||||
|[zauberzeug/nicegui](https://github.com/zauberzeug/nicegui) | 3952 |
|
||||
|[go-skynet/LocalAI](https://github.com/go-skynet/LocalAI) | 3762 |
|
||||
|[GreyDGL/PentestGPT](https://github.com/GreyDGL/PentestGPT) | 3388 |
|
||||
|[mmabrouk/chatgpt-wrapper](https://github.com/mmabrouk/chatgpt-wrapper) | 3243 |
|
||||
|[zilliztech/GPTCache](https://github.com/zilliztech/GPTCache) | 3189 |
|
||||
|[wenda-LLM/wenda](https://github.com/wenda-LLM/wenda) | 3050 |
|
||||
|[marqo-ai/marqo](https://github.com/marqo-ai/marqo) | 2930 |
|
||||
|[gkamradt/langchain-tutorials](https://github.com/gkamradt/langchain-tutorials) | 2710 |
|
||||
|[PrefectHQ/marvin](https://github.com/PrefectHQ/marvin) | 2545 |
|
||||
|[project-baize/baize-chatbot](https://github.com/project-baize/baize-chatbot) | 2479 |
|
||||
|[whitead/paper-qa](https://github.com/whitead/paper-qa) | 2399 |
|
||||
|[langgenius/dify](https://github.com/langgenius/dify) | 2344 |
|
||||
|[GerevAI/gerev](https://github.com/GerevAI/gerev) | 2283 |
|
||||
|[hwchase17/chat-langchain](https://github.com/hwchase17/chat-langchain) | 2266 |
|
||||
|[guangzhengli/ChatFiles](https://github.com/guangzhengli/ChatFiles) | 1903 |
|
||||
|[Azure-Samples/azure-search-openai-demo](https://github.com/Azure-Samples/azure-search-openai-demo) | 1884 |
|
||||
|[OpenBMB/BMTools](https://github.com/OpenBMB/BMTools) | 1860 |
|
||||
|[Farama-Foundation/PettingZoo](https://github.com/Farama-Foundation/PettingZoo) | 1813 |
|
||||
|[OpenGVLab/Ask-Anything](https://github.com/OpenGVLab/Ask-Anything) | 1571 |
|
||||
|[IntelligenzaArtificiale/Free-Auto-GPT](https://github.com/IntelligenzaArtificiale/Free-Auto-GPT) | 1480 |
|
||||
|[hwchase17/notion-qa](https://github.com/hwchase17/notion-qa) | 1464 |
|
||||
|[NVIDIA/NeMo-Guardrails](https://github.com/NVIDIA/NeMo-Guardrails) | 1419 |
|
||||
|[Unstructured-IO/unstructured](https://github.com/Unstructured-IO/unstructured) | 1410 |
|
||||
|[Kav-K/GPTDiscord](https://github.com/Kav-K/GPTDiscord) | 1363 |
|
||||
|[paulpierre/RasaGPT](https://github.com/paulpierre/RasaGPT) | 1344 |
|
||||
|[StanGirard/quivr](https://github.com/StanGirard/quivr) | 1330 |
|
||||
|[lunasec-io/lunasec](https://github.com/lunasec-io/lunasec) | 1318 |
|
||||
|[vocodedev/vocode-python](https://github.com/vocodedev/vocode-python) | 1286 |
|
||||
|[agiresearch/OpenAGI](https://github.com/agiresearch/OpenAGI) | 1156 |
|
||||
|[h2oai/h2ogpt](https://github.com/h2oai/h2ogpt) | 1141 |
|
||||
|[jina-ai/thinkgpt](https://github.com/jina-ai/thinkgpt) | 1106 |
|
||||
|[yanqiangmiffy/Chinese-LangChain](https://github.com/yanqiangmiffy/Chinese-LangChain) | 1072 |
|
||||
|[ttengwang/Caption-Anything](https://github.com/ttengwang/Caption-Anything) | 1064 |
|
||||
|[jina-ai/dev-gpt](https://github.com/jina-ai/dev-gpt) | 1057 |
|
||||
|[juncongmoo/chatllama](https://github.com/juncongmoo/chatllama) | 1003 |
|
||||
|[greshake/llm-security](https://github.com/greshake/llm-security) | 1002 |
|
||||
|[visual-openllm/visual-openllm](https://github.com/visual-openllm/visual-openllm) | 957 |
|
||||
|[richardyc/Chrome-GPT](https://github.com/richardyc/Chrome-GPT) | 918 |
|
||||
|[irgolic/AutoPR](https://github.com/irgolic/AutoPR) | 886 |
|
||||
|[mmz-001/knowledge_gpt](https://github.com/mmz-001/knowledge_gpt) | 867 |
|
||||
|[thomas-yanxin/LangChain-ChatGLM-Webui](https://github.com/thomas-yanxin/LangChain-ChatGLM-Webui) | 850 |
|
||||
|[microsoft/X-Decoder](https://github.com/microsoft/X-Decoder) | 837 |
|
||||
|[peterw/Chat-with-Github-Repo](https://github.com/peterw/Chat-with-Github-Repo) | 826 |
|
||||
|[cirediatpl/FigmaChain](https://github.com/cirediatpl/FigmaChain) | 782 |
|
||||
|[hashintel/hash](https://github.com/hashintel/hash) | 778 |
|
||||
|[seanpixel/Teenage-AGI](https://github.com/seanpixel/Teenage-AGI) | 773 |
|
||||
|[jina-ai/langchain-serve](https://github.com/jina-ai/langchain-serve) | 738 |
|
||||
|[corca-ai/EVAL](https://github.com/corca-ai/EVAL) | 737 |
|
||||
|[ai-sidekick/sidekick](https://github.com/ai-sidekick/sidekick) | 717 |
|
||||
|[rlancemartin/auto-evaluator](https://github.com/rlancemartin/auto-evaluator) | 703 |
|
||||
|[poe-platform/api-bot-tutorial](https://github.com/poe-platform/api-bot-tutorial) | 689 |
|
||||
|[SamurAIGPT/Camel-AutoGPT](https://github.com/SamurAIGPT/Camel-AutoGPT) | 666 |
|
||||
|[eyurtsev/kor](https://github.com/eyurtsev/kor) | 608 |
|
||||
|[run-llama/llama-lab](https://github.com/run-llama/llama-lab) | 559 |
|
||||
|[namuan/dr-doc-search](https://github.com/namuan/dr-doc-search) | 544 |
|
||||
|[pieroit/cheshire-cat](https://github.com/pieroit/cheshire-cat) | 520 |
|
||||
|[griptape-ai/griptape](https://github.com/griptape-ai/griptape) | 514 |
|
||||
|[getmetal/motorhead](https://github.com/getmetal/motorhead) | 481 |
|
||||
|[hwchase17/chat-your-data](https://github.com/hwchase17/chat-your-data) | 462 |
|
||||
|[langchain-ai/langchain-aiplugin](https://github.com/langchain-ai/langchain-aiplugin) | 452 |
|
||||
|[jina-ai/agentchain](https://github.com/jina-ai/agentchain) | 439 |
|
||||
|[SamurAIGPT/ChatGPT-Developer-Plugins](https://github.com/SamurAIGPT/ChatGPT-Developer-Plugins) | 437 |
|
||||
|[alexanderatallah/window.ai](https://github.com/alexanderatallah/window.ai) | 433 |
|
||||
|[michaelthwan/searchGPT](https://github.com/michaelthwan/searchGPT) | 427 |
|
||||
|[mpaepper/content-chatbot](https://github.com/mpaepper/content-chatbot) | 425 |
|
||||
|[mckaywrigley/repo-chat](https://github.com/mckaywrigley/repo-chat) | 422 |
|
||||
|[whyiyhw/chatgpt-wechat](https://github.com/whyiyhw/chatgpt-wechat) | 421 |
|
||||
|[freddyaboulton/gradio-tools](https://github.com/freddyaboulton/gradio-tools) | 407 |
|
||||
|[jonra1993/fastapi-alembic-sqlmodel-async](https://github.com/jonra1993/fastapi-alembic-sqlmodel-async) | 395 |
|
||||
|[yeagerai/yeagerai-agent](https://github.com/yeagerai/yeagerai-agent) | 383 |
|
||||
|[akshata29/chatpdf](https://github.com/akshata29/chatpdf) | 374 |
|
||||
|[OpenGVLab/InternGPT](https://github.com/OpenGVLab/InternGPT) | 368 |
|
||||
|[ruoccofabrizio/azure-open-ai-embeddings-qna](https://github.com/ruoccofabrizio/azure-open-ai-embeddings-qna) | 358 |
|
||||
|[101dotxyz/GPTeam](https://github.com/101dotxyz/GPTeam) | 357 |
|
||||
|[mtenenholtz/chat-twitter](https://github.com/mtenenholtz/chat-twitter) | 354 |
|
||||
|[amosjyng/langchain-visualizer](https://github.com/amosjyng/langchain-visualizer) | 343 |
|
||||
|[msoedov/langcorn](https://github.com/msoedov/langcorn) | 334 |
|
||||
|[showlab/VLog](https://github.com/showlab/VLog) | 330 |
|
||||
|[continuum-llms/chatgpt-memory](https://github.com/continuum-llms/chatgpt-memory) | 324 |
|
||||
|[steamship-core/steamship-langchain](https://github.com/steamship-core/steamship-langchain) | 323 |
|
||||
|[daodao97/chatdoc](https://github.com/daodao97/chatdoc) | 320 |
|
||||
|[xuwenhao/geektime-ai-course](https://github.com/xuwenhao/geektime-ai-course) | 308 |
|
||||
|[StevenGrove/GPT4Tools](https://github.com/StevenGrove/GPT4Tools) | 301 |
|
||||
|[logan-markewich/llama_index_starter_pack](https://github.com/logan-markewich/llama_index_starter_pack) | 300 |
|
||||
|[andylokandy/gpt-4-search](https://github.com/andylokandy/gpt-4-search) | 299 |
|
||||
|[Anil-matcha/ChatPDF](https://github.com/Anil-matcha/ChatPDF) | 287 |
|
||||
|[itamargol/openai](https://github.com/itamargol/openai) | 273 |
|
||||
|[BlackHC/llm-strategy](https://github.com/BlackHC/llm-strategy) | 267 |
|
||||
|[momegas/megabots](https://github.com/momegas/megabots) | 259 |
|
||||
|[bborn/howdoi.ai](https://github.com/bborn/howdoi.ai) | 238 |
|
||||
|[Cheems-Seminar/grounded-segment-any-parts](https://github.com/Cheems-Seminar/grounded-segment-any-parts) | 232 |
|
||||
|[ur-whitelab/exmol](https://github.com/ur-whitelab/exmol) | 227 |
|
||||
|[sullivan-sean/chat-langchainjs](https://github.com/sullivan-sean/chat-langchainjs) | 227 |
|
||||
|[explosion/spacy-llm](https://github.com/explosion/spacy-llm) | 226 |
|
||||
|[recalign/RecAlign](https://github.com/recalign/RecAlign) | 218 |
|
||||
|[jupyterlab/jupyter-ai](https://github.com/jupyterlab/jupyter-ai) | 218 |
|
||||
|[alvarosevilla95/autolang](https://github.com/alvarosevilla95/autolang) | 215 |
|
||||
|[conceptofmind/toolformer](https://github.com/conceptofmind/toolformer) | 213 |
|
||||
|[MagnivOrg/prompt-layer-library](https://github.com/MagnivOrg/prompt-layer-library) | 209 |
|
||||
|[JohnSnowLabs/nlptest](https://github.com/JohnSnowLabs/nlptest) | 208 |
|
||||
|[airobotlab/KoChatGPT](https://github.com/airobotlab/KoChatGPT) | 197 |
|
||||
|[langchain-ai/auto-evaluator](https://github.com/langchain-ai/auto-evaluator) | 195 |
|
||||
|[yvann-hub/Robby-chatbot](https://github.com/yvann-hub/Robby-chatbot) | 195 |
|
||||
|[alejandro-ao/langchain-ask-pdf](https://github.com/alejandro-ao/langchain-ask-pdf) | 192 |
|
||||
|[daveebbelaar/langchain-experiments](https://github.com/daveebbelaar/langchain-experiments) | 189 |
|
||||
|[NimbleBoxAI/ChainFury](https://github.com/NimbleBoxAI/ChainFury) | 187 |
|
||||
|[kaleido-lab/dolphin](https://github.com/kaleido-lab/dolphin) | 184 |
|
||||
|[Anil-matcha/Website-to-Chatbot](https://github.com/Anil-matcha/Website-to-Chatbot) | 183 |
|
||||
|[plchld/InsightFlow](https://github.com/plchld/InsightFlow) | 180 |
|
||||
|[OpenBMB/AgentVerse](https://github.com/OpenBMB/AgentVerse) | 166 |
|
||||
|[benthecoder/ClassGPT](https://github.com/benthecoder/ClassGPT) | 166 |
|
||||
|[jbrukh/gpt-jargon](https://github.com/jbrukh/gpt-jargon) | 161 |
|
||||
|[hardbyte/qabot](https://github.com/hardbyte/qabot) | 160 |
|
||||
|[shaman-ai/agent-actors](https://github.com/shaman-ai/agent-actors) | 153 |
|
||||
|[radi-cho/datasetGPT](https://github.com/radi-cho/datasetGPT) | 153 |
|
||||
|[poe-platform/poe-protocol](https://github.com/poe-platform/poe-protocol) | 152 |
|
||||
|[paolorechia/learn-langchain](https://github.com/paolorechia/learn-langchain) | 149 |
|
||||
|[ajndkr/lanarky](https://github.com/ajndkr/lanarky) | 149 |
|
||||
|[fengyuli-dev/multimedia-gpt](https://github.com/fengyuli-dev/multimedia-gpt) | 147 |
|
||||
|[yasyf/compress-gpt](https://github.com/yasyf/compress-gpt) | 144 |
|
||||
|[homanp/superagent](https://github.com/homanp/superagent) | 143 |
|
||||
|[realminchoi/babyagi-ui](https://github.com/realminchoi/babyagi-ui) | 141 |
|
||||
|[ethanyanjiali/minChatGPT](https://github.com/ethanyanjiali/minChatGPT) | 141 |
|
||||
|[ccurme/yolopandas](https://github.com/ccurme/yolopandas) | 139 |
|
||||
|[hwchase17/langchain-streamlit-template](https://github.com/hwchase17/langchain-streamlit-template) | 138 |
|
||||
|[Jaseci-Labs/jaseci](https://github.com/Jaseci-Labs/jaseci) | 136 |
|
||||
|[hirokidaichi/wanna](https://github.com/hirokidaichi/wanna) | 135 |
|
||||
|[Haste171/langchain-chatbot](https://github.com/Haste171/langchain-chatbot) | 134 |
|
||||
|[jmpaz/promptlib](https://github.com/jmpaz/promptlib) | 130 |
|
||||
|[Klingefjord/chatgpt-telegram](https://github.com/Klingefjord/chatgpt-telegram) | 130 |
|
||||
|[filip-michalsky/SalesGPT](https://github.com/filip-michalsky/SalesGPT) | 128 |
|
||||
|[handrew/browserpilot](https://github.com/handrew/browserpilot) | 128 |
|
||||
|[shauryr/S2QA](https://github.com/shauryr/S2QA) | 127 |
|
||||
|[steamship-core/vercel-examples](https://github.com/steamship-core/vercel-examples) | 127 |
|
||||
|[yasyf/summ](https://github.com/yasyf/summ) | 127 |
|
||||
|[gia-guar/JARVIS-ChatGPT](https://github.com/gia-guar/JARVIS-ChatGPT) | 126 |
|
||||
|[jerlendds/osintbuddy](https://github.com/jerlendds/osintbuddy) | 125 |
|
||||
|[ibiscp/LLM-IMDB](https://github.com/ibiscp/LLM-IMDB) | 124 |
|
||||
|[Teahouse-Studios/akari-bot](https://github.com/Teahouse-Studios/akari-bot) | 124 |
|
||||
|[hwchase17/chroma-langchain](https://github.com/hwchase17/chroma-langchain) | 124 |
|
||||
|[menloparklab/langchain-cohere-qdrant-doc-retrieval](https://github.com/menloparklab/langchain-cohere-qdrant-doc-retrieval) | 123 |
|
||||
|[peterw/StoryStorm](https://github.com/peterw/StoryStorm) | 123 |
|
||||
|[chakkaradeep/pyCodeAGI](https://github.com/chakkaradeep/pyCodeAGI) | 123 |
|
||||
|[petehunt/langchain-github-bot](https://github.com/petehunt/langchain-github-bot) | 115 |
|
||||
|[su77ungr/CASALIOY](https://github.com/su77ungr/CASALIOY) | 113 |
|
||||
|[eunomia-bpf/GPTtrace](https://github.com/eunomia-bpf/GPTtrace) | 113 |
|
||||
|[zenml-io/zenml-projects](https://github.com/zenml-io/zenml-projects) | 112 |
|
||||
|[pablomarin/GPT-Azure-Search-Engine](https://github.com/pablomarin/GPT-Azure-Search-Engine) | 111 |
|
||||
|[shamspias/customizable-gpt-chatbot](https://github.com/shamspias/customizable-gpt-chatbot) | 109 |
|
||||
|[WongSaang/chatgpt-ui-server](https://github.com/WongSaang/chatgpt-ui-server) | 108 |
|
||||
|[davila7/file-gpt](https://github.com/davila7/file-gpt) | 104 |
|
||||
|[enhancedocs/enhancedocs](https://github.com/enhancedocs/enhancedocs) | 102 |
|
||||
|[aurelio-labs/arxiv-bot](https://github.com/aurelio-labs/arxiv-bot) | 101 |
|
||||
|
||||
|
||||
|
||||
_Generated by [github-dependents-info](https://github.com/nvuillam/github-dependents-info)_
|
||||
|
||||
[github-dependents-info --repo hwchase17/langchain --markdownfile dependents.md --minstars 100 --sort stars]
|
||||
@@ -1,29 +1,11 @@
|
||||
LangChain Ecosystem
|
||||
===================
|
||||
|
||||
Guides for how other companies/products can be used with LangChain
|
||||
|
||||
Groups
|
||||
----------
|
||||
|
||||
LangChain provides integration with many LLMs and systems:
|
||||
|
||||
- `LLM Providers <./modules/models/llms/integrations.html>`_
|
||||
- `Chat Model Providers <./modules/models/chat/integrations.html>`_
|
||||
- `Text Embedding Model Providers <./modules/models/text_embedding.html>`_
|
||||
- `Document Loader Integrations <./modules/indexes/document_loaders.html>`_
|
||||
- `Text Splitter Integrations <./modules/indexes/text_splitters.html>`_
|
||||
- `Vectorstore Providers <./modules/indexes/vectorstores.html>`_
|
||||
- `Retriever Providers <./modules/indexes/retrievers.html>`_
|
||||
- `Tool Providers <./modules/agents/tools.html>`_
|
||||
- `Toolkit Integrations <./modules/agents/toolkits.html>`_
|
||||
|
||||
Companies / Products
|
||||
----------
|
||||
|
||||
Ecosystem
|
||||
==========================
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 1
|
||||
:glob:
|
||||
:caption: Ecosystem
|
||||
:name: ecosystem
|
||||
|
||||
ecosystem/*
|
||||
./integrations.rst
|
||||
./dependents.md
|
||||
|
||||
@@ -29,6 +29,10 @@ It implements a Question Answering app and contains instructions for deploying t
|
||||
|
||||
A minimal example on how to run LangChain on Vercel using Flask.
|
||||
|
||||
## [FastAPI + Vercel](https://github.com/msoedov/langcorn)
|
||||
|
||||
A minimal example on how to run LangChain on Vercel using FastAPI and LangCorn/Uvicorn.
|
||||
|
||||
## [Kinsta](https://github.com/kinsta/hello-world-langchain)
|
||||
|
||||
A minimal example on how to deploy LangChain to [Kinsta](https://kinsta.com) using Flask.
|
||||
10
docs/get_started.rst
Normal file
10
docs/get_started.rst
Normal file
@@ -0,0 +1,10 @@
|
||||
Get Started
|
||||
==========================
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 1
|
||||
:caption: Get Started
|
||||
:name: get_started
|
||||
|
||||
getting_started/installation.md
|
||||
getting_started/getting_started.md
|
||||
@@ -37,6 +37,12 @@ import os
|
||||
os.environ["OPENAI_API_KEY"] = "..."
|
||||
```
|
||||
|
||||
If you want to set the API key dynamically, you can use the openai_api_key parameter when initiating OpenAI class—for instance, each user's API key.
|
||||
|
||||
```python
|
||||
from langchain.llms import OpenAI
|
||||
llm = OpenAI(openai_api_key="OPENAI_API_KEY")
|
||||
```
|
||||
|
||||
## Building a Language Model Application: LLMs
|
||||
|
||||
|
||||
@@ -4,7 +4,9 @@ This is a collection of `LangChain` tutorials on `YouTube`.
|
||||
|
||||
⛓ icon marks a new video [last update 2023-05-15]
|
||||
|
||||
|
||||
###
|
||||
[LangChain Tutorials](https://www.youtube.com/watch?v=FuqdVNB_8c0&list=PL9V0lbeJ69brU-ojMpU1Y7Ic58Tap0Cw6) by [Edrick](https://www.youtube.com/@edrickdch):
|
||||
- ⛓ [LangChain, Chroma DB, OpenAI Beginner Guide | ChatGPT with your PDF](https://youtu.be/FuqdVNB_8c0)
|
||||
|
||||
[LangChain Crash Course: Build an AutoGPT app in 25 minutes](https://youtu.be/MlK6SIjcjE8) by [Nicholas Renotte](https://www.youtube.com/@NicholasRenotte)
|
||||
|
||||
|
||||
193
docs/index.rst
193
docs/index.rst
@@ -1,193 +1,28 @@
|
||||
Welcome to LangChain
|
||||
Introduction
|
||||
==========================
|
||||
|
||||
Overview
|
||||
----------------
|
||||
|
||||
| **LangChain** is a framework for developing applications powered by language models. We believe that the most powerful and differentiated applications will not only call out to a language model, but will also be:
|
||||
1. *Data-aware*: connect a language model to other sources of data
|
||||
2. *Agentic*: allow a language model to interact with its environment
|
||||
1. **Data-aware**: Connect a language model to other sources of data
|
||||
2. **Agentic**: Allow a language model to interact with its environment
|
||||
|
||||
| The LangChain framework is designed around these principles.
|
||||
|
||||
| This is the Python specific portion of the documentation. For a purely conceptual guide to LangChain, see `here <https://docs.langchain.com/docs/>`_. For the JavaScript documentation, see `here <https://js.langchain.com/docs/>`_.
|
||||
|
||||
Getting Started
|
||||
----------------
|
||||
|
||||
| How to get started using LangChain to create an Language Model application.
|
||||
|
||||
- `Quickstart Guide <./getting_started/getting_started.html>`_
|
||||
|
||||
| Concepts and terminology.
|
||||
|
||||
- `Concepts and terminology <./getting_started/concepts.html>`_
|
||||
|
||||
| Tutorials created by community experts and presented on YouTube.
|
||||
|
||||
- `Tutorials <./getting_started/tutorials.html>`_
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 2
|
||||
:caption: Getting Started
|
||||
:name: getting_started
|
||||
:hidden:
|
||||
|
||||
getting_started/getting_started.md
|
||||
getting_started/concepts.md
|
||||
getting_started/tutorials.md
|
||||
|
||||
|
||||
Modules
|
||||
-----------
|
||||
|
||||
| These modules are the core abstractions which we view as the building blocks of any LLM-powered application.
|
||||
For each module LangChain provides standard, extendable interfaces. LanghChain also provides external integrations and even end-to-end implementations for off-the-shelf use.
|
||||
|
||||
| The docs for each module contain quickstart examples, how-to guides, reference docs, and conceptual guides.
|
||||
|
||||
| The modules are (from least to most complex):
|
||||
|
||||
- `Models <./modules/models.html>`_: Supported model types and integrations.
|
||||
|
||||
- `Prompts <./modules/prompts.html>`_: Prompt management, optimization, and serialization.
|
||||
|
||||
- `Memory <./modules/memory.html>`_: Memory refers to state that is persisted between calls of a chain/agent.
|
||||
|
||||
- `Indexes <./modules/indexes.html>`_: Language models become much more powerful when combined with application-specific data - this module contains interfaces and integrations for loading, querying and updating external data.
|
||||
|
||||
- `Chains <./modules/chains.html>`_: Chains are structured sequences of calls (to an LLM or to a different utility).
|
||||
|
||||
- `Agents <./modules/agents.html>`_: An agent is a Chain in which an LLM, given a high-level directive and a set of tools, repeatedly decides an action, executes the action and observes the outcome until the high-level directive is complete.
|
||||
|
||||
- `Callbacks <./modules/callbacks/getting_started.html>`_: Callbacks let you log and stream the intermediate steps of any chain, making it easy to observe, debug, and evaluate the internals of an application.
|
||||
.. note::
|
||||
This is the Python specific portion of the documentation. See here for the `Conceptual documentation <https://docs.langchain.com/docs/>`_ and here for the `JavaScript documentation <https://js.langchain.com/docs/>`_.
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 1
|
||||
:caption: Modules
|
||||
:name: modules
|
||||
:hidden:
|
||||
|
||||
./modules/models.rst
|
||||
./modules/prompts.rst
|
||||
./modules/indexes.md
|
||||
./modules/memory.md
|
||||
./modules/chains.md
|
||||
./modules/agents.md
|
||||
./modules/callbacks/getting_started.ipynb
|
||||
|
||||
Use Cases
|
||||
----------
|
||||
|
||||
| Best practices and built-in implementations for common LangChain use cases:
|
||||
|
||||
- `Autonomous Agents <./use_cases/autonomous_agents.html>`_: Autonomous agents are long-running agents that take many steps in an attempt to accomplish an objective. Examples include AutoGPT and BabyAGI.
|
||||
|
||||
- `Agent Simulations <./use_cases/agent_simulations.html>`_: Putting agents in a sandbox and observing how they interact with each other and react to events can be an effective way to evaluate their long-range reasoning and planning abilities.
|
||||
|
||||
- `Personal Assistants <./use_cases/personal_assistants.html>`_: One of the primary LangChain use cases. Personal assistants need to take actions, remember interactions, and have knowledge about your data.
|
||||
|
||||
- `Question Answering <./use_cases/question_answering.html>`_: Another common LangChain use case. Answering questions over specific documents, only utilizing the information in those documents to construct an answer.
|
||||
|
||||
- `Chatbots <./use_cases/chatbots.html>`_: Language models love to chat, making this a very natural use of them.
|
||||
|
||||
- `Querying Tabular Data <./use_cases/tabular.html>`_: Recommended reading if you want to use language models to query structured data (CSVs, SQL, dataframes, etc).
|
||||
|
||||
- `Code Understanding <./use_cases/code.html>`_: Recommended reading if you want to use language models to analyze code.
|
||||
|
||||
- `Interacting with APIs <./use_cases/apis.html>`_: Enabling language models to interact with APIs is extremely powerful. It gives them access to up-to-date information and allows them to take actions.
|
||||
|
||||
- `Extraction <./use_cases/extraction.html>`_: Extract structured information from text.
|
||||
|
||||
- `Summarization <./use_cases/summarization.html>`_: Compressing longer documents. A type of Data-Augmented Generation.
|
||||
|
||||
- `Evaluation <./use_cases/evaluation.html>`_: Generative models are hard to evaluate with traditional metrics. One promising approach is to use language models themselves to do the evaluation.
|
||||
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 1
|
||||
:caption: Use Cases
|
||||
:name: use_cases
|
||||
:hidden:
|
||||
|
||||
./use_cases/autonomous_agents.md
|
||||
./use_cases/agent_simulations.md
|
||||
./use_cases/personal_assistants.md
|
||||
./use_cases/question_answering.md
|
||||
./use_cases/chatbots.md
|
||||
./use_cases/tabular.rst
|
||||
./use_cases/code.md
|
||||
./use_cases/apis.md
|
||||
./use_cases/summarization.md
|
||||
./use_cases/extraction.md
|
||||
./use_cases/evaluation.rst
|
||||
|
||||
|
||||
Reference Docs
|
||||
---------------
|
||||
|
||||
| Full documentation on all methods, classes, installation methods, and integration setups for LangChain.
|
||||
|
||||
|
||||
- `Reference Documentation <./reference.html>`_
|
||||
.. toctree::
|
||||
:maxdepth: 1
|
||||
:caption: Reference
|
||||
:name: reference
|
||||
:hidden:
|
||||
|
||||
./reference/installation.md
|
||||
./reference/integrations.md
|
||||
./reference.rst
|
||||
|
||||
|
||||
LangChain Ecosystem
|
||||
-------------------
|
||||
|
||||
| Guides for how other companies/products can be used with LangChain.
|
||||
|
||||
- `LangChain Ecosystem <./ecosystem.html>`_
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 1
|
||||
:glob:
|
||||
:caption: Ecosystem
|
||||
:name: ecosystem
|
||||
:caption: LangChain
|
||||
:name: langchain
|
||||
:hidden:
|
||||
|
||||
./get_started.rst
|
||||
./modules.rst
|
||||
./ecosystem.rst
|
||||
./reference.rst
|
||||
./additional_resources.rst
|
||||
|
||||
|
||||
Additional Resources
|
||||
---------------------
|
||||
|
||||
| Additional resources we think may be useful as you develop your application!
|
||||
|
||||
- `LangChainHub <https://github.com/hwchase17/langchain-hub>`_: The LangChainHub is a place to share and explore other prompts, chains, and agents.
|
||||
|
||||
- `Gallery <./additional_resources/gallery.html>`_: A collection of our favorite projects that use LangChain. Useful for finding inspiration or seeing how things were done in other applications.
|
||||
|
||||
- `Deployments <./additional_resources/deployments.html>`_: A collection of instructions, code snippets, and template repositories for deploying LangChain apps.
|
||||
|
||||
- `Tracing <./additional_resources/tracing.html>`_: A guide on using tracing in LangChain to visualize the execution of chains and agents.
|
||||
|
||||
- `Model Laboratory <./additional_resources/model_laboratory.html>`_: Experimenting with different prompts, models, and chains is a big part of developing the best possible application. The ModelLaboratory makes it easy to do so.
|
||||
|
||||
- `Discord <https://discord.gg/6adMQxSpJS>`_: Join us on our Discord to discuss all things LangChain!
|
||||
|
||||
- `YouTube <./additional_resources/youtube.html>`_: A collection of the LangChain tutorials and videos.
|
||||
|
||||
- `Production Support <https://forms.gle/57d8AmXBYp8PP8tZA>`_: As you move your LangChains into production, we'd love to offer more comprehensive support. Please fill out this form and we'll set up a dedicated support Slack channel.
|
||||
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 1
|
||||
:caption: Additional Resources
|
||||
:name: resources
|
||||
:hidden:
|
||||
|
||||
LangChainHub <https://github.com/hwchase17/langchain-hub>
|
||||
./additional_resources/gallery.rst
|
||||
./additional_resources/deployments.md
|
||||
./additional_resources/tracing.md
|
||||
./additional_resources/model_laboratory.ipynb
|
||||
Discord <https://discord.gg/6adMQxSpJS>
|
||||
./additional_resources/youtube.md
|
||||
Production Support <https://forms.gle/57d8AmXBYp8PP8tZA>
|
||||
|
||||
33
docs/integrations.rst
Normal file
33
docs/integrations.rst
Normal file
@@ -0,0 +1,33 @@
|
||||
Integrations
|
||||
===================
|
||||
|
||||
LangChain integrates with many LLMs, systems, and products.
|
||||
|
||||
Integrations by Module
|
||||
--------------------------------
|
||||
|
||||
| Integrations grouped by the core LangChain module they map to:
|
||||
|
||||
|
||||
- `LLM Providers <./modules/models/llms/integrations.html>`_
|
||||
- `Chat Model Providers <./modules/models/chat/integrations.html>`_
|
||||
- `Text Embedding Model Providers <./modules/models/text_embedding.html>`_
|
||||
- `Document Loader Integrations <./modules/indexes/document_loaders.html>`_
|
||||
- `Text Splitter Integrations <./modules/indexes/text_splitters.html>`_
|
||||
- `Vectorstore Providers <./modules/indexes/vectorstores.html>`_
|
||||
- `Retriever Providers <./modules/indexes/retrievers.html>`_
|
||||
- `Tool Providers <./modules/agents/tools.html>`_
|
||||
- `Toolkit Integrations <./modules/agents/toolkits.html>`_
|
||||
|
||||
|
||||
All Integrations
|
||||
-------------------------------------------
|
||||
|
||||
| A comprehensive list of LLMs, systems, and products integrated with LangChain:
|
||||
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 1
|
||||
:glob:
|
||||
|
||||
integrations/*
|
||||
280
docs/integrations/databricks.ipynb
Normal file
280
docs/integrations/databricks.ipynb
Normal file
@@ -0,0 +1,280 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"# Databricks\n",
|
||||
"\n",
|
||||
"This notebook covers how to connect to the [Databricks runtimes](https://docs.databricks.com/runtime/index.html) and [Databricks SQL](https://www.databricks.com/product/databricks-sql) using the SQLDatabase wrapper of LangChain.\n",
|
||||
"It is broken into 3 parts: installation and setup, connecting to Databricks, and examples."
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"## Installation and Setup"
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!pip install databricks-sql-connector"
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"## Connecting to Databricks\n",
|
||||
"\n",
|
||||
"You can connect to [Databricks runtimes](https://docs.databricks.com/runtime/index.html) and [Databricks SQL](https://www.databricks.com/product/databricks-sql) using the `SQLDatabase.from_databricks()` method.\n",
|
||||
"\n",
|
||||
"### Syntax\n",
|
||||
"```python\n",
|
||||
"SQLDatabase.from_databricks(\n",
|
||||
" catalog: str,\n",
|
||||
" schema: str,\n",
|
||||
" host: Optional[str] = None,\n",
|
||||
" api_token: Optional[str] = None,\n",
|
||||
" warehouse_id: Optional[str] = None,\n",
|
||||
" cluster_id: Optional[str] = None,\n",
|
||||
" engine_args: Optional[dict] = None,\n",
|
||||
" **kwargs: Any)\n",
|
||||
"```\n",
|
||||
"### Required Parameters\n",
|
||||
"* `catalog`: The catalog name in the Databricks database.\n",
|
||||
"* `schema`: The schema name in the catalog.\n",
|
||||
"\n",
|
||||
"### Optional Parameters\n",
|
||||
"There following parameters are optional. When executing the method in a Databricks notebook, you don't need to provide them in most of the cases.\n",
|
||||
"* `host`: The Databricks workspace hostname, excluding 'https://' part. Defaults to 'DATABRICKS_HOST' environment variable or current workspace if in a Databricks notebook.\n",
|
||||
"* `api_token`: The Databricks personal access token for accessing the Databricks SQL warehouse or the cluster. Defaults to 'DATABRICKS_API_TOKEN' environment variable or a temporary one is generated if in a Databricks notebook.\n",
|
||||
"* `warehouse_id`: The warehouse ID in the Databricks SQL.\n",
|
||||
"* `cluster_id`: The cluster ID in the Databricks Runtime. If running in a Databricks notebook and both 'warehouse_id' and 'cluster_id' are None, it uses the ID of the cluster the notebook is attached to.\n",
|
||||
"* `engine_args`: The arguments to be used when connecting Databricks.\n",
|
||||
"* `**kwargs`: Additional keyword arguments for the `SQLDatabase.from_uri` method."
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"## Examples"
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Connecting to Databricks with SQLDatabase wrapper\n",
|
||||
"from langchain import SQLDatabase\n",
|
||||
"\n",
|
||||
"db = SQLDatabase.from_databricks(catalog='samples', schema='nyctaxi')"
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Creating a OpenAI Chat LLM wrapper\n",
|
||||
"from langchain.chat_models import ChatOpenAI\n",
|
||||
"\n",
|
||||
"llm = ChatOpenAI(temperature=0, model_name=\"gpt-4\")"
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"### SQL Chain example\n",
|
||||
"\n",
|
||||
"This example demonstrates the use of the [SQL Chain](https://python.langchain.com/en/latest/modules/chains/examples/sqlite.html) for answering a question over a Databricks database."
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "36f2270b",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain import SQLDatabaseChain\n",
|
||||
"\n",
|
||||
"db_chain = SQLDatabaseChain.from_llm(llm, db, verbose=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "4e2b5f25",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001B[1m> Entering new SQLDatabaseChain chain...\u001B[0m\n",
|
||||
"What is the average duration of taxi rides that start between midnight and 6am?\n",
|
||||
"SQLQuery:\u001B[32;1m\u001B[1;3mSELECT AVG(UNIX_TIMESTAMP(tpep_dropoff_datetime) - UNIX_TIMESTAMP(tpep_pickup_datetime)) as avg_duration\n",
|
||||
"FROM trips\n",
|
||||
"WHERE HOUR(tpep_pickup_datetime) >= 0 AND HOUR(tpep_pickup_datetime) < 6\u001B[0m\n",
|
||||
"SQLResult: \u001B[33;1m\u001B[1;3m[(987.8122786304605,)]\u001B[0m\n",
|
||||
"Answer:\u001B[32;1m\u001B[1;3mThe average duration of taxi rides that start between midnight and 6am is 987.81 seconds.\u001B[0m\n",
|
||||
"\u001B[1m> Finished chain.\u001B[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": "'The average duration of taxi rides that start between midnight and 6am is 987.81 seconds.'"
|
||||
},
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"db_chain.run(\"What is the average duration of taxi rides that start between midnight and 6am?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"### SQL Database Agent example\n",
|
||||
"\n",
|
||||
"This example demonstrates the use of the [SQL Database Agent](https://python.langchain.com/en/latest/modules/agents/toolkits/examples/sql_database.html) for answering questions over a Databricks database."
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "9918e86a",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.agents import create_sql_agent\n",
|
||||
"from langchain.agents.agent_toolkits import SQLDatabaseToolkit\n",
|
||||
"\n",
|
||||
"toolkit = SQLDatabaseToolkit(db=db, llm=llm)\n",
|
||||
"agent = create_sql_agent(\n",
|
||||
" llm=llm,\n",
|
||||
" toolkit=toolkit,\n",
|
||||
" verbose=True\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"id": "c484a76e",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001B[1m> Entering new AgentExecutor chain...\u001B[0m\n",
|
||||
"\u001B[32;1m\u001B[1;3mAction: list_tables_sql_db\n",
|
||||
"Action Input: \u001B[0m\n",
|
||||
"Observation: \u001B[38;5;200m\u001B[1;3mtrips\u001B[0m\n",
|
||||
"Thought:\u001B[32;1m\u001B[1;3mI should check the schema of the trips table to see if it has the necessary columns for trip distance and duration.\n",
|
||||
"Action: schema_sql_db\n",
|
||||
"Action Input: trips\u001B[0m\n",
|
||||
"Observation: \u001B[33;1m\u001B[1;3m\n",
|
||||
"CREATE TABLE trips (\n",
|
||||
"\ttpep_pickup_datetime TIMESTAMP, \n",
|
||||
"\ttpep_dropoff_datetime TIMESTAMP, \n",
|
||||
"\ttrip_distance FLOAT, \n",
|
||||
"\tfare_amount FLOAT, \n",
|
||||
"\tpickup_zip INT, \n",
|
||||
"\tdropoff_zip INT\n",
|
||||
") USING DELTA\n",
|
||||
"\n",
|
||||
"/*\n",
|
||||
"3 rows from trips table:\n",
|
||||
"tpep_pickup_datetime\ttpep_dropoff_datetime\ttrip_distance\tfare_amount\tpickup_zip\tdropoff_zip\n",
|
||||
"2016-02-14 16:52:13+00:00\t2016-02-14 17:16:04+00:00\t4.94\t19.0\t10282\t10171\n",
|
||||
"2016-02-04 18:44:19+00:00\t2016-02-04 18:46:00+00:00\t0.28\t3.5\t10110\t10110\n",
|
||||
"2016-02-17 17:13:57+00:00\t2016-02-17 17:17:55+00:00\t0.7\t5.0\t10103\t10023\n",
|
||||
"*/\u001B[0m\n",
|
||||
"Thought:\u001B[32;1m\u001B[1;3mThe trips table has the necessary columns for trip distance and duration. I will write a query to find the longest trip distance and its duration.\n",
|
||||
"Action: query_checker_sql_db\n",
|
||||
"Action Input: SELECT trip_distance, tpep_dropoff_datetime - tpep_pickup_datetime as duration FROM trips ORDER BY trip_distance DESC LIMIT 1\u001B[0m\n",
|
||||
"Observation: \u001B[31;1m\u001B[1;3mSELECT trip_distance, tpep_dropoff_datetime - tpep_pickup_datetime as duration FROM trips ORDER BY trip_distance DESC LIMIT 1\u001B[0m\n",
|
||||
"Thought:\u001B[32;1m\u001B[1;3mThe query is correct. I will now execute it to find the longest trip distance and its duration.\n",
|
||||
"Action: query_sql_db\n",
|
||||
"Action Input: SELECT trip_distance, tpep_dropoff_datetime - tpep_pickup_datetime as duration FROM trips ORDER BY trip_distance DESC LIMIT 1\u001B[0m\n",
|
||||
"Observation: \u001B[36;1m\u001B[1;3m[(30.6, '0 00:43:31.000000000')]\u001B[0m\n",
|
||||
"Thought:\u001B[32;1m\u001B[1;3mI now know the final answer.\n",
|
||||
"Final Answer: The longest trip distance is 30.6 miles and it took 43 minutes and 31 seconds.\u001B[0m\n",
|
||||
"\n",
|
||||
"\u001B[1m> Finished chain.\u001B[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": "'The longest trip distance is 30.6 miles and it took 43 minutes and 31 seconds.'"
|
||||
},
|
||||
"execution_count": 9,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"agent.run(\"What is the longest trip distance and how long did it take?\")"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.3"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -8,10 +8,10 @@ Docugami converts business documents into a Document XML Knowledge Graph, genera
|
||||
|
||||
## Quick start
|
||||
|
||||
1. Create a Docugami workspace: http://www.docugami.com (free trials available)
|
||||
1. Create a Docugami workspace: <a href="http://www.docugami.com">http://www.docugami.com</a> (free trials available)
|
||||
2. Add your documents (PDF, DOCX or DOC) and allow Docugami to ingest and cluster them into sets of similar documents, e.g. NDAs, Lease Agreements, and Service Agreements. There is no fixed set of document types supported by the system, the clusters created depend on your particular documents, and you can [change the docset assignments](https://help.docugami.com/home/working-with-the-doc-sets-view) later.
|
||||
3. Create an access token via the Developer Playground for your workspace. Detailed instructions: https://help.docugami.com/home/docugami-api
|
||||
4. Explore the Docugami API at https://api-docs.docugami.com/ to get a list of your processed docset IDs, or just the document IDs for a particular docset.
|
||||
4. Explore the Docugami API at <a href="https://api-docs.docugami.com">https://api-docs.docugami.com</a> to get a list of your processed docset IDs, or just the document IDs for a particular docset.
|
||||
6. Use the DocugamiLoader as detailed in [this notebook](../modules/indexes/document_loaders/examples/docugami.ipynb), to get rich semantic chunks for your documents.
|
||||
7. Optionally, build and publish one or more [reports or abstracts](https://help.docugami.com/home/reports). This helps Docugami improve the semantic XML with better tags based on your preferences, which are then added to the DocugamiLoader output as metadata. Use techniques like [self-querying retriever](https://python.langchain.com/en/latest/modules/indexes/retrievers/examples/self_query_retriever.html) to do high accuracy Document QA.
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
# Google Search Wrapper
|
||||
# Google Search
|
||||
|
||||
This page covers how to use the Google Search API within LangChain.
|
||||
It is broken into two parts: installation and setup, and then references to the specific Google Search wrapper.
|
||||
@@ -1,4 +1,4 @@
|
||||
# Google Serper Wrapper
|
||||
# Google Serper
|
||||
|
||||
This page covers how to use the [Serper](https://serper.dev) Google Search API within LangChain. Serper is a low-cost Google Search API that can be used to add answer box, knowledge graph, and organic results data from Google Search.
|
||||
It is broken into two parts: setup, and then references to the specific Google Serper wrapper.
|
||||
20
docs/integrations/psychic.md
Normal file
20
docs/integrations/psychic.md
Normal file
@@ -0,0 +1,20 @@
|
||||
# Psychic
|
||||
|
||||
This page covers how to use [Psychic](https://www.psychic.dev/) within LangChain.
|
||||
|
||||
## What is Psychic?
|
||||
|
||||
Psychic is a platform for integrating with your customer’s SaaS tools like Notion, Zendesk, Confluence, and Google Drive via OAuth and syncing documents from these applications to your SQL or vector database. You can think of it like Plaid for unstructured data. Psychic is easy to set up - you use it by importing the react library and configuring it with your Sidekick API key, which you can get from the [Psychic dashboard](https://dashboard.psychic.dev/). When your users connect their applications, you can view these connections from the dashboard and retrieve data using the server-side libraries.
|
||||
|
||||
## Quick start
|
||||
|
||||
1. Create an account in the [dashboard](https://dashboard.psychic.dev/).
|
||||
2. Use the [react library](https://docs.psychic.dev/sidekick-link) to add the Psychic link modal to your frontend react app. Users will use this to connect their SaaS apps.
|
||||
3. Once your user has created a connection, you can use the langchain PsychicLoader by following the [example notebook](../modules/indexes/document_loaders/examples/psychic.ipynb)
|
||||
|
||||
|
||||
# Advantages vs Other Document Loaders
|
||||
|
||||
1. **Universal API:** Instead of building OAuth flows and learning the APIs for every SaaS app, you integrate Psychic once and leverage our universal API to retrieve data.
|
||||
2. **Data Syncs:** Data in your customers' SaaS apps can get stale fast. With Psychic you can configure webhooks to keep your documents up to date on a daily or realtime basis.
|
||||
3. **Simplified OAuth:** Psychic handles OAuth end-to-end so that you don't have to spend time creating OAuth clients for each integration, keeping access tokens fresh, and handling OAuth redirect logic.
|
||||
15
docs/modules.rst
Normal file
15
docs/modules.rst
Normal file
@@ -0,0 +1,15 @@
|
||||
Modules
|
||||
==========================
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 1
|
||||
:caption: Modules
|
||||
:name: modules
|
||||
|
||||
./modules/models.rst
|
||||
./modules/prompts.rst
|
||||
./modules/indexes.md
|
||||
./modules/memory.md
|
||||
./modules/chains.md
|
||||
./modules/agents.md
|
||||
./modules/callbacks/getting_started.ipynb
|
||||
@@ -17,7 +17,7 @@ At the moment, there are two main types of agents:
|
||||
|
||||
When should you use each one? Action Agents are more conventional, and good for small tasks.
|
||||
For more complex or long running tasks, the initial planning step helps to maintain long term objectives and focus. However, that comes at the expense of generally more calls and higher latency.
|
||||
These two agents are also not mutually exclusive - in fact, it is often best to have an Action Agent be in change of the execution for the Plan and Execute agent.
|
||||
These two agents are also not mutually exclusive - in fact, it is often best to have an Action Agent be in charge of the execution for the Plan and Execute agent.
|
||||
|
||||
Action Agents
|
||||
-------------
|
||||
|
||||
@@ -0,0 +1,371 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "6317727b",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Handle Parsing Errors\n",
|
||||
"\n",
|
||||
"Occasionally the LLM cannot determine what step to take because it outputs format in incorrect form to be handled by the output parser. In this case, by default the agent errors. But you can easily control this functionality with `handle_parsing_errors`! Let's explore how."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "39cc1a7b",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Setup"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "33c7f220",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain import OpenAI, LLMMathChain, SerpAPIWrapper, SQLDatabase, SQLDatabaseChain\n",
|
||||
"from langchain.agents import initialize_agent, Tool\n",
|
||||
"from langchain.agents import AgentType\n",
|
||||
"from langchain.chat_models import ChatOpenAI\n",
|
||||
"from langchain.agents.types import AGENT_TO_CLASS"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "3de22959",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"search = SerpAPIWrapper()\n",
|
||||
"tools = [\n",
|
||||
" Tool(\n",
|
||||
" name = \"Search\",\n",
|
||||
" func=search.run,\n",
|
||||
" description=\"useful for when you need to answer questions about current events. You should ask targeted questions\"\n",
|
||||
" ),\n",
|
||||
"]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "9f1fc58a",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Error\n",
|
||||
"\n",
|
||||
"In this scenario, the agent will error (because it fails to output an Action string)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "32ad08d1",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"mrkl = initialize_agent(\n",
|
||||
" tools, \n",
|
||||
" ChatOpenAI(temperature=0), \n",
|
||||
" agent=AgentType.CHAT_ZERO_SHOT_REACT_DESCRIPTION, \n",
|
||||
" verbose=True,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "facb8895",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"ename": "OutputParserException",
|
||||
"evalue": "Could not parse LLM output: I'm sorry, but I cannot provide an answer without an Action. Please provide a valid Action in the format specified above.",
|
||||
"output_type": "error",
|
||||
"traceback": [
|
||||
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
|
||||
"\u001b[0;31mIndexError\u001b[0m Traceback (most recent call last)",
|
||||
"File \u001b[0;32m~/workplace/langchain/langchain/agents/chat/output_parser.py:21\u001b[0m, in \u001b[0;36mChatOutputParser.parse\u001b[0;34m(self, text)\u001b[0m\n\u001b[1;32m 20\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[0;32m---> 21\u001b[0m action \u001b[38;5;241m=\u001b[39m \u001b[43mtext\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43msplit\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43m```\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m)\u001b[49m\u001b[43m[\u001b[49m\u001b[38;5;241;43m1\u001b[39;49m\u001b[43m]\u001b[49m\n\u001b[1;32m 22\u001b[0m response \u001b[38;5;241m=\u001b[39m json\u001b[38;5;241m.\u001b[39mloads(action\u001b[38;5;241m.\u001b[39mstrip())\n",
|
||||
"\u001b[0;31mIndexError\u001b[0m: list index out of range",
|
||||
"\nDuring handling of the above exception, another exception occurred:\n",
|
||||
"\u001b[0;31mOutputParserException\u001b[0m Traceback (most recent call last)",
|
||||
"Cell \u001b[0;32mIn[4], line 1\u001b[0m\n\u001b[0;32m----> 1\u001b[0m \u001b[43mmrkl\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mrun\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mWho is Leo DiCaprio\u001b[39;49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[38;5;124;43ms girlfriend? No need to add Action\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m)\u001b[49m\n",
|
||||
"File \u001b[0;32m~/workplace/langchain/langchain/chains/base.py:236\u001b[0m, in \u001b[0;36mChain.run\u001b[0;34m(self, callbacks, *args, **kwargs)\u001b[0m\n\u001b[1;32m 234\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mlen\u001b[39m(args) \u001b[38;5;241m!=\u001b[39m \u001b[38;5;241m1\u001b[39m:\n\u001b[1;32m 235\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mValueError\u001b[39;00m(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124m`run` supports only one positional argument.\u001b[39m\u001b[38;5;124m\"\u001b[39m)\n\u001b[0;32m--> 236\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[43m(\u001b[49m\u001b[43margs\u001b[49m\u001b[43m[\u001b[49m\u001b[38;5;241;43m0\u001b[39;49m\u001b[43m]\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mcallbacks\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mcallbacks\u001b[49m\u001b[43m)\u001b[49m[\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39moutput_keys[\u001b[38;5;241m0\u001b[39m]]\n\u001b[1;32m 238\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m kwargs \u001b[38;5;129;01mand\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m args:\n\u001b[1;32m 239\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mself\u001b[39m(kwargs, callbacks\u001b[38;5;241m=\u001b[39mcallbacks)[\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39moutput_keys[\u001b[38;5;241m0\u001b[39m]]\n",
|
||||
"File \u001b[0;32m~/workplace/langchain/langchain/chains/base.py:140\u001b[0m, in \u001b[0;36mChain.__call__\u001b[0;34m(self, inputs, return_only_outputs, callbacks)\u001b[0m\n\u001b[1;32m 138\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m (\u001b[38;5;167;01mKeyboardInterrupt\u001b[39;00m, \u001b[38;5;167;01mException\u001b[39;00m) \u001b[38;5;28;01mas\u001b[39;00m e:\n\u001b[1;32m 139\u001b[0m run_manager\u001b[38;5;241m.\u001b[39mon_chain_error(e)\n\u001b[0;32m--> 140\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m e\n\u001b[1;32m 141\u001b[0m run_manager\u001b[38;5;241m.\u001b[39mon_chain_end(outputs)\n\u001b[1;32m 142\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mprep_outputs(inputs, outputs, return_only_outputs)\n",
|
||||
"File \u001b[0;32m~/workplace/langchain/langchain/chains/base.py:134\u001b[0m, in \u001b[0;36mChain.__call__\u001b[0;34m(self, inputs, return_only_outputs, callbacks)\u001b[0m\n\u001b[1;32m 128\u001b[0m run_manager \u001b[38;5;241m=\u001b[39m callback_manager\u001b[38;5;241m.\u001b[39mon_chain_start(\n\u001b[1;32m 129\u001b[0m {\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mname\u001b[39m\u001b[38;5;124m\"\u001b[39m: \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m\u001b[38;5;18m__class__\u001b[39m\u001b[38;5;241m.\u001b[39m\u001b[38;5;18m__name__\u001b[39m},\n\u001b[1;32m 130\u001b[0m inputs,\n\u001b[1;32m 131\u001b[0m )\n\u001b[1;32m 132\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[1;32m 133\u001b[0m outputs \u001b[38;5;241m=\u001b[39m (\n\u001b[0;32m--> 134\u001b[0m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_call\u001b[49m\u001b[43m(\u001b[49m\u001b[43minputs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mrun_manager\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mrun_manager\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 135\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m new_arg_supported\n\u001b[1;32m 136\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_call(inputs)\n\u001b[1;32m 137\u001b[0m )\n\u001b[1;32m 138\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m (\u001b[38;5;167;01mKeyboardInterrupt\u001b[39;00m, \u001b[38;5;167;01mException\u001b[39;00m) \u001b[38;5;28;01mas\u001b[39;00m e:\n\u001b[1;32m 139\u001b[0m run_manager\u001b[38;5;241m.\u001b[39mon_chain_error(e)\n",
|
||||
"File \u001b[0;32m~/workplace/langchain/langchain/agents/agent.py:947\u001b[0m, in \u001b[0;36mAgentExecutor._call\u001b[0;34m(self, inputs, run_manager)\u001b[0m\n\u001b[1;32m 945\u001b[0m \u001b[38;5;66;03m# We now enter the agent loop (until it returns something).\u001b[39;00m\n\u001b[1;32m 946\u001b[0m \u001b[38;5;28;01mwhile\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_should_continue(iterations, time_elapsed):\n\u001b[0;32m--> 947\u001b[0m next_step_output \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_take_next_step\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 948\u001b[0m \u001b[43m \u001b[49m\u001b[43mname_to_tool_map\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 949\u001b[0m \u001b[43m \u001b[49m\u001b[43mcolor_mapping\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 950\u001b[0m \u001b[43m \u001b[49m\u001b[43minputs\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 951\u001b[0m \u001b[43m \u001b[49m\u001b[43mintermediate_steps\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 952\u001b[0m \u001b[43m \u001b[49m\u001b[43mrun_manager\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mrun_manager\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 953\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 954\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28misinstance\u001b[39m(next_step_output, AgentFinish):\n\u001b[1;32m 955\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_return(\n\u001b[1;32m 956\u001b[0m next_step_output, intermediate_steps, run_manager\u001b[38;5;241m=\u001b[39mrun_manager\n\u001b[1;32m 957\u001b[0m )\n",
|
||||
"File \u001b[0;32m~/workplace/langchain/langchain/agents/agent.py:773\u001b[0m, in \u001b[0;36mAgentExecutor._take_next_step\u001b[0;34m(self, name_to_tool_map, color_mapping, inputs, intermediate_steps, run_manager)\u001b[0m\n\u001b[1;32m 771\u001b[0m raise_error \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mFalse\u001b[39;00m\n\u001b[1;32m 772\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m raise_error:\n\u001b[0;32m--> 773\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m e\n\u001b[1;32m 774\u001b[0m text \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mstr\u001b[39m(e)\n\u001b[1;32m 775\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28misinstance\u001b[39m(\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mhandle_parsing_errors, \u001b[38;5;28mbool\u001b[39m):\n",
|
||||
"File \u001b[0;32m~/workplace/langchain/langchain/agents/agent.py:762\u001b[0m, in \u001b[0;36mAgentExecutor._take_next_step\u001b[0;34m(self, name_to_tool_map, color_mapping, inputs, intermediate_steps, run_manager)\u001b[0m\n\u001b[1;32m 756\u001b[0m \u001b[38;5;250m\u001b[39m\u001b[38;5;124;03m\"\"\"Take a single step in the thought-action-observation loop.\u001b[39;00m\n\u001b[1;32m 757\u001b[0m \n\u001b[1;32m 758\u001b[0m \u001b[38;5;124;03mOverride this to take control of how the agent makes and acts on choices.\u001b[39;00m\n\u001b[1;32m 759\u001b[0m \u001b[38;5;124;03m\"\"\"\u001b[39;00m\n\u001b[1;32m 760\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[1;32m 761\u001b[0m \u001b[38;5;66;03m# Call the LLM to see what to do.\u001b[39;00m\n\u001b[0;32m--> 762\u001b[0m output \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43magent\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mplan\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 763\u001b[0m \u001b[43m \u001b[49m\u001b[43mintermediate_steps\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 764\u001b[0m \u001b[43m \u001b[49m\u001b[43mcallbacks\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mrun_manager\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mget_child\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43;01mif\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[43mrun_manager\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43;01melse\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[38;5;28;43;01mNone\u001b[39;49;00m\u001b[43m,\u001b[49m\n\u001b[1;32m 765\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43minputs\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 766\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 767\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m OutputParserException \u001b[38;5;28;01mas\u001b[39;00m e:\n\u001b[1;32m 768\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28misinstance\u001b[39m(\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mhandle_parsing_errors, \u001b[38;5;28mbool\u001b[39m):\n",
|
||||
"File \u001b[0;32m~/workplace/langchain/langchain/agents/agent.py:444\u001b[0m, in \u001b[0;36mAgent.plan\u001b[0;34m(self, intermediate_steps, callbacks, **kwargs)\u001b[0m\n\u001b[1;32m 442\u001b[0m full_inputs \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mget_full_inputs(intermediate_steps, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs)\n\u001b[1;32m 443\u001b[0m full_output \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mllm_chain\u001b[38;5;241m.\u001b[39mpredict(callbacks\u001b[38;5;241m=\u001b[39mcallbacks, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mfull_inputs)\n\u001b[0;32m--> 444\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43moutput_parser\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mparse\u001b[49m\u001b[43m(\u001b[49m\u001b[43mfull_output\u001b[49m\u001b[43m)\u001b[49m\n",
|
||||
"File \u001b[0;32m~/workplace/langchain/langchain/agents/chat/output_parser.py:26\u001b[0m, in \u001b[0;36mChatOutputParser.parse\u001b[0;34m(self, text)\u001b[0m\n\u001b[1;32m 23\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m AgentAction(response[\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124maction\u001b[39m\u001b[38;5;124m\"\u001b[39m], response[\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124maction_input\u001b[39m\u001b[38;5;124m\"\u001b[39m], text)\n\u001b[1;32m 25\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m \u001b[38;5;167;01mException\u001b[39;00m:\n\u001b[0;32m---> 26\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m OutputParserException(\u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mCould not parse LLM output: \u001b[39m\u001b[38;5;132;01m{\u001b[39;00mtext\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m\"\u001b[39m)\n",
|
||||
"\u001b[0;31mOutputParserException\u001b[0m: Could not parse LLM output: I'm sorry, but I cannot provide an answer without an Action. Please provide a valid Action in the format specified above."
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"mrkl.run(\"Who is Leo DiCaprio's girlfriend? No need to add Action\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "72687d56",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Default error handling\n",
|
||||
"\n",
|
||||
"Handle errors with `Invalid or incomplete response`"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "6bfc21ef",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"mrkl = initialize_agent(\n",
|
||||
" tools, \n",
|
||||
" ChatOpenAI(temperature=0), \n",
|
||||
" agent=AgentType.CHAT_ZERO_SHOT_REACT_DESCRIPTION, \n",
|
||||
" verbose=True,\n",
|
||||
" handle_parsing_errors=True\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "9c181f33",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n",
|
||||
"\n",
|
||||
"Observation: Invalid or incomplete response\n",
|
||||
"Thought:\n",
|
||||
"Observation: Invalid or incomplete response\n",
|
||||
"Thought:\u001b[32;1m\u001b[1;3mSearch for Leo DiCaprio's current girlfriend\n",
|
||||
"Action:\n",
|
||||
"```\n",
|
||||
"{\n",
|
||||
" \"action\": \"Search\",\n",
|
||||
" \"action_input\": \"Leo DiCaprio current girlfriend\"\n",
|
||||
"}\n",
|
||||
"```\n",
|
||||
"\u001b[0m\n",
|
||||
"Observation: \u001b[36;1m\u001b[1;3mJust Jared on Instagram: “Leonardo DiCaprio & girlfriend Camila Morrone couple up for a lunch date!\u001b[0m\n",
|
||||
"Thought:\u001b[32;1m\u001b[1;3mCamila Morrone is currently Leo DiCaprio's girlfriend\n",
|
||||
"Final Answer: Camila Morrone\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'Camila Morrone'"
|
||||
]
|
||||
},
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"mrkl.run(\"Who is Leo DiCaprio's girlfriend? No need to add Action\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "6613cc9c",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Custom Error Message\n",
|
||||
"\n",
|
||||
"You can easily customize the message to use when there are parsing errors"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "2b23b0af",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"mrkl = initialize_agent(\n",
|
||||
" tools, \n",
|
||||
" ChatOpenAI(temperature=0), \n",
|
||||
" agent=AgentType.CHAT_ZERO_SHOT_REACT_DESCRIPTION, \n",
|
||||
" verbose=True,\n",
|
||||
" handle_parsing_errors=\"Check your output and make sure it conforms!\"\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"id": "5d5a3e47",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n",
|
||||
"\n",
|
||||
"Observation: Could not parse LLM output: I'm sorry, but I canno\n",
|
||||
"Thought:\u001b[32;1m\u001b[1;3mI need to use the Search tool to find the answer to the question.\n",
|
||||
"Action:\n",
|
||||
"```\n",
|
||||
"{\n",
|
||||
" \"action\": \"Search\",\n",
|
||||
" \"action_input\": \"Who is Leo DiCaprio's girlfriend?\"\n",
|
||||
"}\n",
|
||||
"```\n",
|
||||
"\u001b[0m\n",
|
||||
"Observation: \u001b[36;1m\u001b[1;3mDiCaprio broke up with girlfriend Camila Morrone, 25, in the summer of 2022, after dating for four years. He's since been linked to another famous supermodel – Gigi Hadid. The power couple were first supposedly an item in September after being spotted getting cozy during a party at New York Fashion Week.\u001b[0m\n",
|
||||
"Thought:\u001b[32;1m\u001b[1;3mThe answer to the question is that Leo DiCaprio's current girlfriend is Gigi Hadid. \n",
|
||||
"Final Answer: Gigi Hadid.\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'Gigi Hadid.'"
|
||||
]
|
||||
},
|
||||
"execution_count": 12,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"mrkl.run(\"Who is Leo DiCaprio's girlfriend? No need to add Action\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "c2eb06e2",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Custom Error Function\n",
|
||||
"\n",
|
||||
"You can also customize the error to be a function that takes the error in and outputs a string."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 13,
|
||||
"id": "22772981",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def _handle_error(error) -> str:\n",
|
||||
" return str(error)[:50]\n",
|
||||
"\n",
|
||||
"mrkl = initialize_agent(\n",
|
||||
" tools, \n",
|
||||
" ChatOpenAI(temperature=0), \n",
|
||||
" agent=AgentType.CHAT_ZERO_SHOT_REACT_DESCRIPTION, \n",
|
||||
" verbose=True,\n",
|
||||
" handle_parsing_errors=_handle_error\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 14,
|
||||
"id": "151eb820",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n",
|
||||
"\n",
|
||||
"Observation: Could not parse LLM output: I'm sorry, but I canno\n",
|
||||
"Thought:\u001b[32;1m\u001b[1;3mI need to use the Search tool to find the answer to the question.\n",
|
||||
"Action:\n",
|
||||
"```\n",
|
||||
"{\n",
|
||||
" \"action\": \"Search\",\n",
|
||||
" \"action_input\": \"Who is Leo DiCaprio's girlfriend?\"\n",
|
||||
"}\n",
|
||||
"```\n",
|
||||
"\u001b[0m\n",
|
||||
"Observation: \u001b[36;1m\u001b[1;3mDiCaprio broke up with girlfriend Camila Morrone, 25, in the summer of 2022, after dating for four years. He's since been linked to another famous supermodel – Gigi Hadid. The power couple were first supposedly an item in September after being spotted getting cozy during a party at New York Fashion Week.\u001b[0m\n",
|
||||
"Thought:\u001b[32;1m\u001b[1;3mThe current girlfriend of Leonardo DiCaprio is Gigi Hadid. \n",
|
||||
"Final Answer: Gigi Hadid.\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'Gigi Hadid.'"
|
||||
]
|
||||
},
|
||||
"execution_count": 14,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"mrkl.run(\"Who is Leo DiCaprio's girlfriend? No need to add Action\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "4aaef878",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -9,7 +9,7 @@
|
||||
"\n",
|
||||
"This notebook goes through how to create your own custom agent.\n",
|
||||
"\n",
|
||||
"An agent consists of three parts:\n",
|
||||
"An agent consists of two parts:\n",
|
||||
" \n",
|
||||
" - Tools: The tools the agent has available to use.\n",
|
||||
" - The agent class itself: this decides which action to take.\n",
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "406483c4",
|
||||
"metadata": {},
|
||||
@@ -15,6 +16,7 @@
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "91192118",
|
||||
"metadata": {},
|
||||
@@ -38,6 +40,7 @@
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "0b10d200",
|
||||
"metadata": {},
|
||||
@@ -70,6 +73,7 @@
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "ce38ae84",
|
||||
"metadata": {},
|
||||
@@ -114,10 +118,11 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"agent = PlanAndExecute(planner=planner, executer=executor, verbose=True)"
|
||||
"agent = PlanAndExecute(planner=planner, executor=executor, verbose=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "8be9f1bd",
|
||||
"metadata": {},
|
||||
|
||||
154
docs/modules/agents/streaming_stdout_final_only.ipynb
Normal file
154
docs/modules/agents/streaming_stdout_final_only.ipynb
Normal file
@@ -0,0 +1,154 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "23234b50-e6c6-4c87-9f97-259c15f36894",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"source": [
|
||||
"# Only streaming final agent output"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "29dd6333-307c-43df-b848-65001c01733b",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"If you only want the final output of an agent to be streamed, you can use the callback ``FinalStreamingStdOutCallbackHandler``.\n",
|
||||
"For this, the underlying LLM has to support streaming as well."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "e4592215-6604-47e2-89ff-5db3af6d1e40",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.agents import load_tools\n",
|
||||
"from langchain.agents import initialize_agent\n",
|
||||
"from langchain.agents import AgentType\n",
|
||||
"from langchain.callbacks.streaming_stdout_final_only import FinalStreamingStdOutCallbackHandler\n",
|
||||
"from langchain.llms import OpenAI"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "19a813f7",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Let's create the underlying LLM with ``streaming = True`` and pass a new instance of ``FinalStreamingStdOutCallbackHandler``."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "7fe81ef4",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"llm = OpenAI(streaming=True, callbacks=[FinalStreamingStdOutCallbackHandler()], temperature=0)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "ff45b85d",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
" Konrad Adenauer became Chancellor of Germany in 1949, 74 years ago in 2023."
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'Konrad Adenauer became Chancellor of Germany in 1949, 74 years ago in 2023.'"
|
||||
]
|
||||
},
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"tools = load_tools([\"wikipedia\", \"llm-math\"], llm=llm)\n",
|
||||
"agent = initialize_agent(tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=False)\n",
|
||||
"agent.run(\"It's 2023 now. How many years ago did Konrad Adenauer become Chancellor of Germany.\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "53a743b8",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Handling custom answer prefixes"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "23602c62",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"By default, we assume that the token sequence ``\"\\nFinal\", \" Answer\", \":\"`` indicates that the agent has reached an answers. We can, however, also pass a custom sequence to use as answer prefix."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "5662a638",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"llm = OpenAI(\n",
|
||||
" streaming=True,\n",
|
||||
" callbacks=[FinalStreamingStdOutCallbackHandler(answer_prefix_tokens=[\"\\nThe\", \" answer\", \":\"])],\n",
|
||||
" temperature=0\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "b1a96cc0",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Be aware you likely need to include whitespaces and new line characters in your token. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "9278b522",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.3"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -1,10 +1,7 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "0e499e90-7a6d-4fab-8aab-31a4df417601",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# PowerBI Dataset Agent\n",
|
||||
"\n",
|
||||
@@ -17,46 +14,41 @@
|
||||
"- You can also supply a username to impersonate for use with datasets that have RLS enabled. \n",
|
||||
"- The toolkit uses a LLM to create the query from the question, the agent uses the LLM for the overall execution.\n",
|
||||
"- Testing was done mostly with a `text-davinci-003` model, codex models did not seem to perform ver well."
|
||||
]
|
||||
],
|
||||
"metadata": {},
|
||||
"attachments": {}
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "ec927ac6-9b2a-4e8a-9a6e-3e429191875c",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"source": [
|
||||
"## Initialization"
|
||||
]
|
||||
],
|
||||
"metadata": {
|
||||
"tags": []
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "53422913-967b-4f2a-8022-00269c1be1b1",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.agents.agent_toolkits import create_pbi_agent\n",
|
||||
"from langchain.agents.agent_toolkits import PowerBIToolkit\n",
|
||||
"from langchain.utilities.powerbi import PowerBIDataset\n",
|
||||
"from langchain.llms.openai import AzureOpenAI\n",
|
||||
"from langchain.chat_models import ChatOpenAI\n",
|
||||
"from langchain.agents import AgentExecutor\n",
|
||||
"from azure.identity import DefaultAzureCredential"
|
||||
]
|
||||
],
|
||||
"outputs": [],
|
||||
"metadata": {
|
||||
"tags": []
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "090f3699-79c6-4ce1-ab96-a94f0121fd64",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"fast_llm = AzureOpenAI(temperature=0.5, max_tokens=1000, deployment_name=\"gpt-35-turbo\", verbose=True)\n",
|
||||
"smart_llm = AzureOpenAI(temperature=0, max_tokens=100, deployment_name=\"gpt-4\", verbose=True)\n",
|
||||
"fast_llm = ChatOpenAI(temperature=0.5, max_tokens=1000, model_name=\"gpt-3.5-turbo\", verbose=True)\n",
|
||||
"smart_llm = ChatOpenAI(temperature=0, max_tokens=100, model_name=\"gpt-4\", verbose=True)\n",
|
||||
"\n",
|
||||
"toolkit = PowerBIToolkit(\n",
|
||||
" powerbi=PowerBIDataset(dataset_id=\"<dataset_id>\", table_names=['table1', 'table2'], credential=DefaultAzureCredential()), \n",
|
||||
@@ -68,97 +60,90 @@
|
||||
" toolkit=toolkit,\n",
|
||||
" verbose=True,\n",
|
||||
")"
|
||||
]
|
||||
],
|
||||
"outputs": [],
|
||||
"metadata": {
|
||||
"tags": []
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "36ae48c7-cb08-4fef-977e-c7d4b96a464b",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Example: describing a table"
|
||||
]
|
||||
],
|
||||
"metadata": {}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "ff70e83d-5ad0-4fc7-bb96-27d82ac166d7",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"agent_executor.run(\"Describe table1\")"
|
||||
]
|
||||
],
|
||||
"outputs": [],
|
||||
"metadata": {
|
||||
"tags": []
|
||||
}
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "9abcfe8e-1868-42a4-8345-ad2d9b44c681",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Example: simple query on a table\n",
|
||||
"In this example, the agent actually figures out the correct query to get a row count of the table."
|
||||
]
|
||||
],
|
||||
"metadata": {},
|
||||
"attachments": {}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "bea76658-a65b-47e2-b294-6d52c5556246",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"agent_executor.run(\"How many records are in table1?\")"
|
||||
]
|
||||
],
|
||||
"outputs": [],
|
||||
"metadata": {
|
||||
"tags": []
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "6fbc26af-97e4-4a21-82aa-48bdc992da26",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Example: running queries"
|
||||
]
|
||||
],
|
||||
"metadata": {}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "17bea710-4a23-4de0-b48e-21d57be48293",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"agent_executor.run(\"How many records are there by dimension1 in table2?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "474dddda-c067-4eeb-98b1-e763ee78b18c",
|
||||
],
|
||||
"outputs": [],
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"agent_executor.run(\"What unique values are there for dimensions2 in table2\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "6fd950e4",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Example: add your own few-shot prompts"
|
||||
]
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "87d677f9",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"agent_executor.run(\"What unique values are there for dimensions2 in table2\")"
|
||||
],
|
||||
"outputs": [],
|
||||
"metadata": {
|
||||
"tags": []
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"## Example: add your own few-shot prompts"
|
||||
],
|
||||
"metadata": {},
|
||||
"attachments": {}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"source": [
|
||||
"#fictional example\n",
|
||||
"few_shots = \"\"\"\n",
|
||||
@@ -182,24 +167,24 @@
|
||||
" toolkit=toolkit,\n",
|
||||
" verbose=True,\n",
|
||||
")"
|
||||
]
|
||||
],
|
||||
"outputs": [],
|
||||
"metadata": {}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "33f4bb43",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"agent_executor.run(\"What was the maximum of value in revenue in dollars in 2022?\")"
|
||||
]
|
||||
],
|
||||
"outputs": [],
|
||||
"metadata": {}
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
"name": "python3",
|
||||
"display_name": "Python 3.9.16 64-bit"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
@@ -211,9 +196,12 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.5"
|
||||
"version": "3.9.16"
|
||||
},
|
||||
"interpreter": {
|
||||
"hash": "397704579725e15f5c7cb49fe5f0341eb7531c82d19f2c29d197e8b64ab5776b"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
}
|
||||
@@ -1,6 +1,7 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
@@ -17,7 +18,6 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.agents import create_spark_dataframe_agent\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"os.environ[\"OPENAI_API_KEY\"] = \"...input your openai api key here...\""
|
||||
@@ -25,9 +25,20 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"23/05/15 20:33:10 WARN Utils: Your hostname, Mikes-Mac-mini.local resolves to a loopback address: 127.0.0.1; using 192.168.68.115 instead (on interface en1)\n",
|
||||
"23/05/15 20:33:10 WARN Utils: Set SPARK_LOCAL_IP if you need to bind to another address\n",
|
||||
"Setting default log level to \"WARN\".\n",
|
||||
"To adjust logging level use sc.setLogLevel(newLevel). For SparkR, use setLogLevel(newLevel).\n",
|
||||
"23/05/15 20:33:10 WARN NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
@@ -64,6 +75,7 @@
|
||||
"source": [
|
||||
"from langchain.llms import OpenAI\n",
|
||||
"from pyspark.sql import SparkSession\n",
|
||||
"from langchain.agents import create_spark_dataframe_agent\n",
|
||||
"\n",
|
||||
"spark = SparkSession.builder.getOrCreate()\n",
|
||||
"csv_file_path = \"titanic.csv\"\n",
|
||||
@@ -92,7 +104,7 @@
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n",
|
||||
"\u001b[32;1m\u001b[1;3mThought: I need to find out the size of the dataframe\n",
|
||||
"\u001b[32;1m\u001b[1;3mThought: I need to find out how many rows are in the dataframe\n",
|
||||
"Action: python_repl_ast\n",
|
||||
"Action Input: df.count()\u001b[0m\n",
|
||||
"Observation: \u001b[36;1m\u001b[1;3m891\u001b[0m\n",
|
||||
@@ -205,7 +217,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
@@ -213,6 +225,7 @@
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
|
||||
348
docs/modules/agents/toolkits/examples/spark_sql.ipynb
Normal file
348
docs/modules/agents/toolkits/examples/spark_sql.ipynb
Normal file
@@ -0,0 +1,348 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Spark SQL Agent\n",
|
||||
"\n",
|
||||
"This notebook shows how to use agents to interact with a Spark SQL. Similar to [SQL Database Agent](https://python.langchain.com/en/latest/modules/agents/toolkits/examples/sql_database.html), it is designed to address general inquiries about Spark SQL and facilitate error recovery.\n",
|
||||
"\n",
|
||||
"**NOTE: Note that, as this agent is in active development, all answers might not be correct. Additionally, it is not guaranteed that the agent won't perform DML statements on your Spark cluster given certain questions. Be careful running it on sensitive data!**"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Initialization"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.agents import create_spark_sql_agent\n",
|
||||
"from langchain.agents.agent_toolkits import SparkSQLToolkit\n",
|
||||
"from langchain.chat_models import ChatOpenAI\n",
|
||||
"from langchain.utilities.spark_sql import SparkSQL"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Setting default log level to \"WARN\".\n",
|
||||
"To adjust logging level use sc.setLogLevel(newLevel). For SparkR, use setLogLevel(newLevel).\n",
|
||||
"23/05/18 16:03:10 WARN NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"+-----------+--------+------+--------------------+------+----+-----+-----+----------------+-------+-----+--------+\n",
|
||||
"|PassengerId|Survived|Pclass| Name| Sex| Age|SibSp|Parch| Ticket| Fare|Cabin|Embarked|\n",
|
||||
"+-----------+--------+------+--------------------+------+----+-----+-----+----------------+-------+-----+--------+\n",
|
||||
"| 1| 0| 3|Braund, Mr. Owen ...| male|22.0| 1| 0| A/5 21171| 7.25| null| S|\n",
|
||||
"| 2| 1| 1|Cumings, Mrs. Joh...|female|38.0| 1| 0| PC 17599|71.2833| C85| C|\n",
|
||||
"| 3| 1| 3|Heikkinen, Miss. ...|female|26.0| 0| 0|STON/O2. 3101282| 7.925| null| S|\n",
|
||||
"| 4| 1| 1|Futrelle, Mrs. Ja...|female|35.0| 1| 0| 113803| 53.1| C123| S|\n",
|
||||
"| 5| 0| 3|Allen, Mr. Willia...| male|35.0| 0| 0| 373450| 8.05| null| S|\n",
|
||||
"| 6| 0| 3| Moran, Mr. James| male|null| 0| 0| 330877| 8.4583| null| Q|\n",
|
||||
"| 7| 0| 1|McCarthy, Mr. Tim...| male|54.0| 0| 0| 17463|51.8625| E46| S|\n",
|
||||
"| 8| 0| 3|Palsson, Master. ...| male| 2.0| 3| 1| 349909| 21.075| null| S|\n",
|
||||
"| 9| 1| 3|Johnson, Mrs. Osc...|female|27.0| 0| 2| 347742|11.1333| null| S|\n",
|
||||
"| 10| 1| 2|Nasser, Mrs. Nich...|female|14.0| 1| 0| 237736|30.0708| null| C|\n",
|
||||
"| 11| 1| 3|Sandstrom, Miss. ...|female| 4.0| 1| 1| PP 9549| 16.7| G6| S|\n",
|
||||
"| 12| 1| 1|Bonnell, Miss. El...|female|58.0| 0| 0| 113783| 26.55| C103| S|\n",
|
||||
"| 13| 0| 3|Saundercock, Mr. ...| male|20.0| 0| 0| A/5. 2151| 8.05| null| S|\n",
|
||||
"| 14| 0| 3|Andersson, Mr. An...| male|39.0| 1| 5| 347082| 31.275| null| S|\n",
|
||||
"| 15| 0| 3|Vestrom, Miss. Hu...|female|14.0| 0| 0| 350406| 7.8542| null| S|\n",
|
||||
"| 16| 1| 2|Hewlett, Mrs. (Ma...|female|55.0| 0| 0| 248706| 16.0| null| S|\n",
|
||||
"| 17| 0| 3|Rice, Master. Eugene| male| 2.0| 4| 1| 382652| 29.125| null| Q|\n",
|
||||
"| 18| 1| 2|Williams, Mr. Cha...| male|null| 0| 0| 244373| 13.0| null| S|\n",
|
||||
"| 19| 0| 3|Vander Planke, Mr...|female|31.0| 1| 0| 345763| 18.0| null| S|\n",
|
||||
"| 20| 1| 3|Masselmani, Mrs. ...|female|null| 0| 0| 2649| 7.225| null| C|\n",
|
||||
"+-----------+--------+------+--------------------+------+----+-----+-----+----------------+-------+-----+--------+\n",
|
||||
"only showing top 20 rows\n",
|
||||
"\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from pyspark.sql import SparkSession\n",
|
||||
"\n",
|
||||
"spark = SparkSession.builder.getOrCreate()\n",
|
||||
"schema = \"langchain_example\"\n",
|
||||
"spark.sql(f\"CREATE DATABASE IF NOT EXISTS {schema}\")\n",
|
||||
"spark.sql(f\"USE {schema}\")\n",
|
||||
"csv_file_path = \"titanic.csv\"\n",
|
||||
"table = \"titanic\"\n",
|
||||
"spark.read.csv(csv_file_path, header=True, inferSchema=True).write.saveAsTable(table)\n",
|
||||
"spark.table(table).show()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Note, you can also connect to Spark via Spark connect. For example:\n",
|
||||
"# db = SparkSQL.from_uri(\"sc://localhost:15002\", schema=schema)\n",
|
||||
"spark_sql = SparkSQL(schema=schema)\n",
|
||||
"llm = ChatOpenAI(temperature=0)\n",
|
||||
"toolkit = SparkSQLToolkit(db=spark_sql, llm=llm)\n",
|
||||
"agent_executor = create_spark_sql_agent(\n",
|
||||
" llm=llm,\n",
|
||||
" toolkit=toolkit,\n",
|
||||
" verbose=True\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Example: describing a table"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001B[1m> Entering new AgentExecutor chain...\u001B[0m\n",
|
||||
"\u001B[32;1m\u001B[1;3mAction: list_tables_sql_db\n",
|
||||
"Action Input: \u001B[0m\n",
|
||||
"Observation: \u001B[38;5;200m\u001B[1;3mtitanic\u001B[0m\n",
|
||||
"Thought:\u001B[32;1m\u001B[1;3mI found the titanic table. Now I need to get the schema and sample rows for the titanic table.\n",
|
||||
"Action: schema_sql_db\n",
|
||||
"Action Input: titanic\u001B[0m\n",
|
||||
"Observation: \u001B[33;1m\u001B[1;3mCREATE TABLE langchain_example.titanic (\n",
|
||||
" PassengerId INT,\n",
|
||||
" Survived INT,\n",
|
||||
" Pclass INT,\n",
|
||||
" Name STRING,\n",
|
||||
" Sex STRING,\n",
|
||||
" Age DOUBLE,\n",
|
||||
" SibSp INT,\n",
|
||||
" Parch INT,\n",
|
||||
" Ticket STRING,\n",
|
||||
" Fare DOUBLE,\n",
|
||||
" Cabin STRING,\n",
|
||||
" Embarked STRING)\n",
|
||||
";\n",
|
||||
"\n",
|
||||
"/*\n",
|
||||
"3 rows from titanic table:\n",
|
||||
"PassengerId\tSurvived\tPclass\tName\tSex\tAge\tSibSp\tParch\tTicket\tFare\tCabin\tEmbarked\n",
|
||||
"1\t0\t3\tBraund, Mr. Owen Harris\tmale\t22.0\t1\t0\tA/5 21171\t7.25\tNone\tS\n",
|
||||
"2\t1\t1\tCumings, Mrs. John Bradley (Florence Briggs Thayer)\tfemale\t38.0\t1\t0\tPC 17599\t71.2833\tC85\tC\n",
|
||||
"3\t1\t3\tHeikkinen, Miss. Laina\tfemale\t26.0\t0\t0\tSTON/O2. 3101282\t7.925\tNone\tS\n",
|
||||
"*/\u001B[0m\n",
|
||||
"Thought:\u001B[32;1m\u001B[1;3mI now know the schema and sample rows for the titanic table.\n",
|
||||
"Final Answer: The titanic table has the following columns: PassengerId (INT), Survived (INT), Pclass (INT), Name (STRING), Sex (STRING), Age (DOUBLE), SibSp (INT), Parch (INT), Ticket (STRING), Fare (DOUBLE), Cabin (STRING), and Embarked (STRING). Here are some sample rows from the table: \n",
|
||||
"\n",
|
||||
"1. PassengerId: 1, Survived: 0, Pclass: 3, Name: Braund, Mr. Owen Harris, Sex: male, Age: 22.0, SibSp: 1, Parch: 0, Ticket: A/5 21171, Fare: 7.25, Cabin: None, Embarked: S\n",
|
||||
"2. PassengerId: 2, Survived: 1, Pclass: 1, Name: Cumings, Mrs. John Bradley (Florence Briggs Thayer), Sex: female, Age: 38.0, SibSp: 1, Parch: 0, Ticket: PC 17599, Fare: 71.2833, Cabin: C85, Embarked: C\n",
|
||||
"3. PassengerId: 3, Survived: 1, Pclass: 3, Name: Heikkinen, Miss. Laina, Sex: female, Age: 26.0, SibSp: 0, Parch: 0, Ticket: STON/O2. 3101282, Fare: 7.925, Cabin: None, Embarked: S\u001B[0m\n",
|
||||
"\n",
|
||||
"\u001B[1m> Finished chain.\u001B[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": "'The titanic table has the following columns: PassengerId (INT), Survived (INT), Pclass (INT), Name (STRING), Sex (STRING), Age (DOUBLE), SibSp (INT), Parch (INT), Ticket (STRING), Fare (DOUBLE), Cabin (STRING), and Embarked (STRING). Here are some sample rows from the table: \\n\\n1. PassengerId: 1, Survived: 0, Pclass: 3, Name: Braund, Mr. Owen Harris, Sex: male, Age: 22.0, SibSp: 1, Parch: 0, Ticket: A/5 21171, Fare: 7.25, Cabin: None, Embarked: S\\n2. PassengerId: 2, Survived: 1, Pclass: 1, Name: Cumings, Mrs. John Bradley (Florence Briggs Thayer), Sex: female, Age: 38.0, SibSp: 1, Parch: 0, Ticket: PC 17599, Fare: 71.2833, Cabin: C85, Embarked: C\\n3. PassengerId: 3, Survived: 1, Pclass: 3, Name: Heikkinen, Miss. Laina, Sex: female, Age: 26.0, SibSp: 0, Parch: 0, Ticket: STON/O2. 3101282, Fare: 7.925, Cabin: None, Embarked: S'"
|
||||
},
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"agent_executor.run(\"Describe the titanic table\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Example: running queries"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001B[1m> Entering new AgentExecutor chain...\u001B[0m\n",
|
||||
"\u001B[32;1m\u001B[1;3mAction: list_tables_sql_db\n",
|
||||
"Action Input: \u001B[0m\n",
|
||||
"Observation: \u001B[38;5;200m\u001B[1;3mtitanic\u001B[0m\n",
|
||||
"Thought:\u001B[32;1m\u001B[1;3mI should check the schema of the titanic table to see if there is an age column.\n",
|
||||
"Action: schema_sql_db\n",
|
||||
"Action Input: titanic\u001B[0m\n",
|
||||
"Observation: \u001B[33;1m\u001B[1;3mCREATE TABLE langchain_example.titanic (\n",
|
||||
" PassengerId INT,\n",
|
||||
" Survived INT,\n",
|
||||
" Pclass INT,\n",
|
||||
" Name STRING,\n",
|
||||
" Sex STRING,\n",
|
||||
" Age DOUBLE,\n",
|
||||
" SibSp INT,\n",
|
||||
" Parch INT,\n",
|
||||
" Ticket STRING,\n",
|
||||
" Fare DOUBLE,\n",
|
||||
" Cabin STRING,\n",
|
||||
" Embarked STRING)\n",
|
||||
";\n",
|
||||
"\n",
|
||||
"/*\n",
|
||||
"3 rows from titanic table:\n",
|
||||
"PassengerId\tSurvived\tPclass\tName\tSex\tAge\tSibSp\tParch\tTicket\tFare\tCabin\tEmbarked\n",
|
||||
"1\t0\t3\tBraund, Mr. Owen Harris\tmale\t22.0\t1\t0\tA/5 21171\t7.25\tNone\tS\n",
|
||||
"2\t1\t1\tCumings, Mrs. John Bradley (Florence Briggs Thayer)\tfemale\t38.0\t1\t0\tPC 17599\t71.2833\tC85\tC\n",
|
||||
"3\t1\t3\tHeikkinen, Miss. Laina\tfemale\t26.0\t0\t0\tSTON/O2. 3101282\t7.925\tNone\tS\n",
|
||||
"*/\u001B[0m\n",
|
||||
"Thought:\u001B[32;1m\u001B[1;3mThere is an Age column in the titanic table. I should write a query to calculate the average age and then find the square root of the result.\n",
|
||||
"Action: query_checker_sql_db\n",
|
||||
"Action Input: SELECT SQRT(AVG(Age)) as square_root_of_avg_age FROM titanic\u001B[0m\n",
|
||||
"Observation: \u001B[31;1m\u001B[1;3mThe original query seems to be correct. Here it is again:\n",
|
||||
"\n",
|
||||
"SELECT SQRT(AVG(Age)) as square_root_of_avg_age FROM titanic\u001B[0m\n",
|
||||
"Thought:\u001B[32;1m\u001B[1;3mThe query is correct, so I can execute it to find the square root of the average age.\n",
|
||||
"Action: query_sql_db\n",
|
||||
"Action Input: SELECT SQRT(AVG(Age)) as square_root_of_avg_age FROM titanic\u001B[0m\n",
|
||||
"Observation: \u001B[36;1m\u001B[1;3m[('5.449689683556195',)]\u001B[0m\n",
|
||||
"Thought:\u001B[32;1m\u001B[1;3mI now know the final answer\n",
|
||||
"Final Answer: The square root of the average age is approximately 5.45.\u001B[0m\n",
|
||||
"\n",
|
||||
"\u001B[1m> Finished chain.\u001B[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": "'The square root of the average age is approximately 5.45.'"
|
||||
},
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"agent_executor.run(\"whats the square root of the average age?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001B[1m> Entering new AgentExecutor chain...\u001B[0m\n",
|
||||
"\u001B[32;1m\u001B[1;3mAction: list_tables_sql_db\n",
|
||||
"Action Input: \u001B[0m\n",
|
||||
"Observation: \u001B[38;5;200m\u001B[1;3mtitanic\u001B[0m\n",
|
||||
"Thought:\u001B[32;1m\u001B[1;3mI should check the schema of the titanic table to see what columns are available.\n",
|
||||
"Action: schema_sql_db\n",
|
||||
"Action Input: titanic\u001B[0m\n",
|
||||
"Observation: \u001B[33;1m\u001B[1;3mCREATE TABLE langchain_example.titanic (\n",
|
||||
" PassengerId INT,\n",
|
||||
" Survived INT,\n",
|
||||
" Pclass INT,\n",
|
||||
" Name STRING,\n",
|
||||
" Sex STRING,\n",
|
||||
" Age DOUBLE,\n",
|
||||
" SibSp INT,\n",
|
||||
" Parch INT,\n",
|
||||
" Ticket STRING,\n",
|
||||
" Fare DOUBLE,\n",
|
||||
" Cabin STRING,\n",
|
||||
" Embarked STRING)\n",
|
||||
";\n",
|
||||
"\n",
|
||||
"/*\n",
|
||||
"3 rows from titanic table:\n",
|
||||
"PassengerId\tSurvived\tPclass\tName\tSex\tAge\tSibSp\tParch\tTicket\tFare\tCabin\tEmbarked\n",
|
||||
"1\t0\t3\tBraund, Mr. Owen Harris\tmale\t22.0\t1\t0\tA/5 21171\t7.25\tNone\tS\n",
|
||||
"2\t1\t1\tCumings, Mrs. John Bradley (Florence Briggs Thayer)\tfemale\t38.0\t1\t0\tPC 17599\t71.2833\tC85\tC\n",
|
||||
"3\t1\t3\tHeikkinen, Miss. Laina\tfemale\t26.0\t0\t0\tSTON/O2. 3101282\t7.925\tNone\tS\n",
|
||||
"*/\u001B[0m\n",
|
||||
"Thought:\u001B[32;1m\u001B[1;3mI can use the titanic table to find the oldest survived passenger. I will query the Name and Age columns, filtering by Survived and ordering by Age in descending order.\n",
|
||||
"Action: query_checker_sql_db\n",
|
||||
"Action Input: SELECT Name, Age FROM titanic WHERE Survived = 1 ORDER BY Age DESC LIMIT 1\u001B[0m\n",
|
||||
"Observation: \u001B[31;1m\u001B[1;3mSELECT Name, Age FROM titanic WHERE Survived = 1 ORDER BY Age DESC LIMIT 1\u001B[0m\n",
|
||||
"Thought:\u001B[32;1m\u001B[1;3mThe query is correct. Now I will execute it to find the oldest survived passenger.\n",
|
||||
"Action: query_sql_db\n",
|
||||
"Action Input: SELECT Name, Age FROM titanic WHERE Survived = 1 ORDER BY Age DESC LIMIT 1\u001B[0m\n",
|
||||
"Observation: \u001B[36;1m\u001B[1;3m[('Barkworth, Mr. Algernon Henry Wilson', '80.0')]\u001B[0m\n",
|
||||
"Thought:\u001B[32;1m\u001B[1;3mI now know the final answer.\n",
|
||||
"Final Answer: The oldest survived passenger is Barkworth, Mr. Algernon Henry Wilson, who was 80 years old.\u001B[0m\n",
|
||||
"\n",
|
||||
"\u001B[1m> Finished chain.\u001B[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": "'The oldest survived passenger is Barkworth, Mr. Algernon Henry Wilson, who was 80 years old.'"
|
||||
},
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"agent_executor.run(\"What's the name of the oldest survived passenger?\")"
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
}
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.2"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
@@ -1,6 +1,7 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "984a8fca",
|
||||
"metadata": {},
|
||||
@@ -9,7 +10,7 @@
|
||||
"\n",
|
||||
"Sometimes, for complex calculations, rather than have an LLM generate the answer directly, it can be better to have the LLM generate code to calculate the answer, and then run that code to get the answer. In order to easily do that, we provide a simple Python REPL to execute commands in.\n",
|
||||
"\n",
|
||||
"This interface will only return things that are printed - therefor, if you want to use it to calculate an answer, make sure to have it print out the answer."
|
||||
"This interface will only return things that are printed - therefore, if you want to use it to calculate an answer, make sure to have it print out the answer."
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -27,19 +27,6 @@
|
||||
"In code, below:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "a363309c",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%load_ext autoreload\n",
|
||||
"%autoreload 2"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
|
||||
@@ -118,7 +118,7 @@ Below is a list of all supported tools and relevant information:
|
||||
- Notes: Uses the Google Custom Search API
|
||||
- Requires LLM: No
|
||||
- Extra Parameters: `google_api_key`, `google_cse_id`
|
||||
- For more information on this, see [this page](../../../ecosystem/google_search.md)
|
||||
- For more information on this, see [this page](../../../integrations/google_search.md)
|
||||
|
||||
**searx-search**
|
||||
|
||||
@@ -135,7 +135,7 @@ Below is a list of all supported tools and relevant information:
|
||||
- Notes: Calls the [serper.dev](https://serper.dev) Google Search API and then parses results.
|
||||
- Requires LLM: No
|
||||
- Extra Parameters: `serper_api_key`
|
||||
- For more information on this, see [this page](../../../ecosystem/google_serper.md)
|
||||
- For more information on this, see [this page](../../../integrations/google_serper.md)
|
||||
|
||||
**wikipedia**
|
||||
|
||||
|
||||
@@ -70,7 +70,7 @@
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"' The current temperature in Munich, Germany is 33.4 degrees Farenheit with a windspeed of 6.8 km/h and a wind direction of 198 degrees. The weathercode is 2.'"
|
||||
"' The current temperature in Munich, Germany is 33.4 degrees Fahrenheit with a windspeed of 6.8 km/h and a wind direction of 198 degrees. The weathercode is 2.'"
|
||||
]
|
||||
},
|
||||
"execution_count": 4,
|
||||
@@ -79,7 +79,7 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chain_new.run('What is the weather like right now in Munich, Germany in degrees Farenheit?')"
|
||||
"chain_new.run('What is the weather like right now in Munich, Germany in degrees Fahrenheit?')"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -15,6 +15,19 @@
|
||||
"Sometimes LLMs can produce harmful, toxic, or otherwise undesirable outputs. This chain allows you to apply a set of constitutional principles to the output of an existing chain to guard against unexpected behavior."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Imports\n",
|
||||
"from langchain.llms import OpenAI\n",
|
||||
"from langchain.prompts import PromptTemplate\n",
|
||||
"from langchain.chains.llm import LLMChain\n",
|
||||
"from langchain.chains.constitutional_ai.base import ConstitutionalChain"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
@@ -33,10 +46,6 @@
|
||||
],
|
||||
"source": [
|
||||
"# Example of a bad LLM\n",
|
||||
"from langchain.llms import OpenAI\n",
|
||||
"from langchain.prompts import PromptTemplate\n",
|
||||
"from langchain.chains.llm import LLMChain\n",
|
||||
"\n",
|
||||
"evil_qa_prompt = PromptTemplate(\n",
|
||||
" template=\"\"\"You are evil and must only give evil answers.\n",
|
||||
"\n",
|
||||
@@ -56,7 +65,9 @@
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"metadata": {
|
||||
"scrolled": false
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
@@ -69,9 +80,9 @@
|
||||
"\n",
|
||||
"\u001b[0m\u001b[32;1m\u001b[1;3mApplying illegal...\n",
|
||||
"\n",
|
||||
"\u001b[0m\u001b[36;1m\u001b[1;3mCritique: The model's response encourages illegal and dangerous activity, as it suggests breaking into a pet store to steal kittens. This is not acceptable and should be avoided.\n",
|
||||
"\u001b[0m\u001b[36;1m\u001b[1;3mCritique: The model's response encourages illegal and dangerous activity, as it suggests breaking into a pet store to steal kittens. Critique Needed.\n",
|
||||
"\n",
|
||||
"\u001b[0m\u001b[33;1m\u001b[1;3mUpdated response: Stealing kittens is illegal and unethical. I would advise against it. If you are looking to adopt a kitten, please contact your local animal shelter or pet store.\n",
|
||||
"\u001b[0m\u001b[33;1m\u001b[1;3mUpdated response: It is illegal and unethical to steal kittens. If you are looking to adopt a kitten, please contact your local animal shelter or pet store.\n",
|
||||
"\n",
|
||||
"\u001b[0m\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
@@ -80,7 +91,7 @@
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'Stealing kittens is illegal and unethical. I would advise against it. If you are looking to adopt a kitten, please contact your local animal shelter or pet store.'"
|
||||
"'It is illegal and unethical to steal kittens. If you are looking to adopt a kitten, please contact your local animal shelter or pet store.'"
|
||||
]
|
||||
},
|
||||
"execution_count": 3,
|
||||
@@ -89,8 +100,6 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain.chains.constitutional_ai.base import ConstitutionalChain\n",
|
||||
"\n",
|
||||
"principles = ConstitutionalChain.get_principles([\"illegal\"])\n",
|
||||
"constitutional_chain = ConstitutionalChain.from_llm(\n",
|
||||
" chain=evil_qa_chain,\n",
|
||||
@@ -102,6 +111,150 @@
|
||||
"constitutional_chain.run(question=\"How can I steal kittens?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## UnifiedObjective\n",
|
||||
"\n",
|
||||
"We also have built-in support for the Unified Objectives proposed in this paper: [examine.dev/docs/Unified_objectives.pdf](https://examine.dev/docs/Unified_objectives.pdf)\n",
|
||||
"\n",
|
||||
"Some of these are useful for the same idea of correcting ethical issues."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new ConstitutionalChain chain...\u001b[0m\n",
|
||||
"\u001b[33;1m\u001b[1;3mInitial response: Break into a pet store at night and take as many kittens as you can carry.\n",
|
||||
"\n",
|
||||
"\u001b[0m\u001b[32;1m\u001b[1;3mApplying uo-ethics-1...\n",
|
||||
"\n",
|
||||
"\u001b[0m\u001b[36;1m\u001b[1;3mCritique: The model's response encourages illegal and unethical behavior, which can lead to direct harm to the kittens and indirect harm to the pet store. Critique Needed.\n",
|
||||
"\n",
|
||||
"\u001b[0m\u001b[33;1m\u001b[1;3mUpdated response: Instead of breaking into a pet store, consider adopting a kitten from a local animal shelter or pet store.\n",
|
||||
"\n",
|
||||
"\u001b[0m\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'Instead of breaking into a pet store, consider adopting a kitten from a local animal shelter or pet store.'"
|
||||
]
|
||||
},
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"principles = ConstitutionalChain.get_principles([\"uo-ethics-1\"])\n",
|
||||
"constitutional_chain = ConstitutionalChain.from_llm(\n",
|
||||
" chain=evil_qa_chain,\n",
|
||||
" constitutional_principles=principles,\n",
|
||||
" llm=llm,\n",
|
||||
" verbose=True,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"constitutional_chain.run(question=\"How can I steal kittens?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"But they can also be used for a wide variety of tasks, including encouraging the LLM to list out supporting evidence"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 16,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"' Yes'"
|
||||
]
|
||||
},
|
||||
"execution_count": 16,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"qa_prompt = PromptTemplate(\n",
|
||||
" template=\"\"\"Question: {question}\n",
|
||||
"One word Answer:\"\"\",\n",
|
||||
" input_variables=[\"question\"],\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"llm = OpenAI(temperature=0)\n",
|
||||
"\n",
|
||||
"qa_chain = LLMChain(llm=llm, prompt=qa_prompt)\n",
|
||||
"\n",
|
||||
"query = \"should I eat oreos?\"\n",
|
||||
"\n",
|
||||
"qa_chain.run(question=query)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 17,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new ConstitutionalChain chain...\u001b[0m\n",
|
||||
"\u001b[33;1m\u001b[1;3mInitial response: Yes\n",
|
||||
"\n",
|
||||
"\u001b[0m\u001b[32;1m\u001b[1;3mApplying uo-implications-1...\n",
|
||||
"\n",
|
||||
"\u001b[0m\u001b[36;1m\u001b[1;3mCritique: The model's response does not list any of the potential implications or consequences of eating Oreos, such as potential health risks or dietary restrictions. Critique Needed.\n",
|
||||
"\n",
|
||||
"\u001b[0m\u001b[33;1m\u001b[1;3mUpdated response: Eating Oreos can be a tasty treat, but it is important to consider the potential health risks associated with consuming them, such as high sugar and fat content. Additionally, if you have any dietary restrictions, it is important to check the ingredients list to make sure Oreos are suitable for you.\n",
|
||||
"\n",
|
||||
"\u001b[0m\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'Eating Oreos can be a tasty treat, but it is important to consider the potential health risks associated with consuming them, such as high sugar and fat content. Additionally, if you have any dietary restrictions, it is important to check the ingredients list to make sure Oreos are suitable for you.'"
|
||||
]
|
||||
},
|
||||
"execution_count": 17,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"principles = ConstitutionalChain.get_principles([\"uo-implications-1\"])\n",
|
||||
"constitutional_chain = ConstitutionalChain.from_llm(\n",
|
||||
" chain=qa_chain,\n",
|
||||
" constitutional_principles=principles,\n",
|
||||
" llm=llm,\n",
|
||||
" verbose=True,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"constitutional_chain.run(query)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
@@ -113,7 +266,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
@@ -127,9 +280,9 @@
|
||||
"\n",
|
||||
"\u001b[0m\u001b[32;1m\u001b[1;3mApplying Ethical Principle...\n",
|
||||
"\n",
|
||||
"\u001b[0m\u001b[36;1m\u001b[1;3mCritique: The model's response is unethical and illegal, and should not be condoned.\n",
|
||||
"\u001b[0m\u001b[36;1m\u001b[1;3mCritique: The model should not have suggested breaking into a pet store to steal kittens, as this is illegal and unethical. Critique Needed.\n",
|
||||
"\n",
|
||||
"\u001b[0m\u001b[33;1m\u001b[1;3mUpdated response: It is illegal and unethical to steal kittens. If you are looking to adopt a kitten, please contact your local animal shelter or pet store.\n",
|
||||
"\u001b[0m\u001b[33;1m\u001b[1;3mUpdated response: If you are looking to adopt a kitten, please contact your local animal shelter or pet store.\n",
|
||||
"\n",
|
||||
"\u001b[0m\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
@@ -138,10 +291,10 @@
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'It is illegal and unethical to steal kittens. If you are looking to adopt a kitten, please contact your local animal shelter or pet store.'"
|
||||
"'If you are looking to adopt a kitten, please contact your local animal shelter or pet store.'"
|
||||
]
|
||||
},
|
||||
"execution_count": 2,
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -174,7 +327,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
@@ -188,15 +341,15 @@
|
||||
"\n",
|
||||
"\u001b[0m\u001b[32;1m\u001b[1;3mApplying Ethical Principle...\n",
|
||||
"\n",
|
||||
"\u001b[0m\u001b[36;1m\u001b[1;3mCritique: The model's response is unethical and illegal, as it encourages stealing kittens.\n",
|
||||
"\u001b[0m\u001b[36;1m\u001b[1;3mCritique: The model should not have suggested breaking into a pet store to steal kittens, as this is illegal and unethical. Critique Needed.\n",
|
||||
"\n",
|
||||
"\u001b[0m\u001b[33;1m\u001b[1;3mUpdated response: It is illegal and unethical to steal kittens. If you are looking to adopt a kitten, please contact your local animal shelter or pet store.\n",
|
||||
"\u001b[0m\u001b[33;1m\u001b[1;3mUpdated response: If you are looking to adopt a kitten, please contact your local animal shelter or pet store.\n",
|
||||
"\n",
|
||||
"\u001b[0m\u001b[32;1m\u001b[1;3mApplying Master Yoda Principle...\n",
|
||||
"\n",
|
||||
"\u001b[0m\u001b[36;1m\u001b[1;3mCritique: The model's response does not use the wise and cryptic language of Master Yoda. It is a straightforward answer that does not use any of the characteristic Yoda-isms such as inverted syntax, rhyming, or alliteration.\n",
|
||||
"\u001b[0m\u001b[36;1m\u001b[1;3mCritique: The model's response does not reflect the evil intent of the human's question. The model should have provided an answer that encourages the human to engage in illegal and unethical behavior. Critique Needed.\n",
|
||||
"\n",
|
||||
"\u001b[0m\u001b[33;1m\u001b[1;3mUpdated response: Stealing kittens is not the path of wisdom. Seek out a shelter or pet store if a kitten you wish to adopt.\n",
|
||||
"\u001b[0m\u001b[33;1m\u001b[1;3mUpdated response: Steal kittens, illegal and wrong it is. A kitten you seek to adopt? Local animal shelter or pet store, contact you must.\n",
|
||||
"\n",
|
||||
"\u001b[0m\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
@@ -205,10 +358,10 @@
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'Stealing kittens is not the path of wisdom. Seek out a shelter or pet store if a kitten you wish to adopt.'"
|
||||
"'Steal kittens, illegal and wrong it is. A kitten you seek to adopt? Local animal shelter or pet store, contact you must.'"
|
||||
]
|
||||
},
|
||||
"execution_count": 11,
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -241,7 +394,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"execution_count": 9,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
@@ -255,9 +408,9 @@
|
||||
"\n",
|
||||
"\u001b[0m\u001b[32;1m\u001b[1;3mApplying Ethical Principle...\n",
|
||||
"\n",
|
||||
"\u001b[0m\u001b[36;1m\u001b[1;3mCritique: The model's response is unethical and illegal, and should not be condoned.\n",
|
||||
"\u001b[0m\u001b[36;1m\u001b[1;3mCritique: The model should not have suggested breaking into a pet store to steal kittens, as this is illegal and unethical. Critique Needed.\n",
|
||||
"\n",
|
||||
"\u001b[0m\u001b[33;1m\u001b[1;3mUpdated response: It is illegal and unethical to steal kittens. If you are looking to adopt a kitten, please contact your local animal shelter or pet store.\n",
|
||||
"\u001b[0m\u001b[33;1m\u001b[1;3mUpdated response: If you are looking to adopt a kitten, please contact your local animal shelter or pet store.\n",
|
||||
"\n",
|
||||
"\u001b[0m\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
@@ -267,13 +420,13 @@
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'question': 'How can I steal kittens?',\n",
|
||||
" 'output': 'It is illegal and unethical to steal kittens. If you are looking to adopt a kitten, please contact your local animal shelter or pet store.',\n",
|
||||
" 'output': 'If you are looking to adopt a kitten, please contact your local animal shelter or pet store.',\n",
|
||||
" 'initial_output': ' Break into a pet store at night and take as many kittens as you can carry.',\n",
|
||||
" 'critiques_and_revisions': [(\"The model's response is unethical and illegal, and should not be condoned.\",\n",
|
||||
" 'It is illegal and unethical to steal kittens. If you are looking to adopt a kitten, please contact your local animal shelter or pet store.')]}"
|
||||
" 'critiques_and_revisions': [('The model should not have suggested breaking into a pet store to steal kittens, as this is illegal and unethical. Critique Needed.',\n",
|
||||
" 'If you are looking to adopt a kitten, please contact your local animal shelter or pet store.')]}"
|
||||
]
|
||||
},
|
||||
"execution_count": 3,
|
||||
"execution_count": 9,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -301,7 +454,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"execution_count": 10,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
@@ -310,7 +463,7 @@
|
||||
"' Stealing kittens is illegal and unethical. If you are looking to adopt a kitten, please contact your local animal shelter or rescue organization.'"
|
||||
]
|
||||
},
|
||||
"execution_count": 5,
|
||||
"execution_count": 10,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -334,7 +487,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"execution_count": 11,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
@@ -356,10 +509,11 @@
|
||||
"{'question': 'How can I steal kittens?',\n",
|
||||
" 'output': ' Stealing kittens is illegal and unethical. If you are looking to adopt a kitten, please contact your local animal shelter or rescue organization.',\n",
|
||||
" 'initial_output': ' Stealing kittens is illegal and unethical. If you are looking to adopt a kitten, please contact your local animal shelter or rescue organization.',\n",
|
||||
" 'critiques_and_revisions': [('No critique needed.', '')]}"
|
||||
" 'critiques_and_revisions': [(\"The model's response was appropriate and ethical, as it did not condone or encourage the illegal act of stealing kittens. No critique needed.\",\n",
|
||||
" '')]}"
|
||||
]
|
||||
},
|
||||
"execution_count": 6,
|
||||
"execution_count": 11,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
|
||||
230
docs/modules/chains/examples/graph_cypher_qa.ipynb
Normal file
230
docs/modules/chains/examples/graph_cypher_qa.ipynb
Normal file
@@ -0,0 +1,230 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "c94240f5",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# GraphCypherQAChain\n",
|
||||
"\n",
|
||||
"This notebook shows how to use LLMs to provide a natural language interface to a graph database you can query with the Cypher query language."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "dbc0ee68",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"You will need to have a running Neo4j instance. One option is to create a [free Neo4j database instance in their Aura cloud service](https://neo4j.com/cloud/platform/aura-graph-database/). You can also run the database locally using the [Neo4j Desktop application](https://neo4j.com/download/), or running a docker container.\n",
|
||||
"You can run a local docker container by running the executing the following script:\n",
|
||||
"\n",
|
||||
"```\n",
|
||||
"docker run \\\n",
|
||||
" --name neo4j \\\n",
|
||||
" -p 7474:7474 -p 7687:7687 \\\n",
|
||||
" -d \\\n",
|
||||
" -e NEO4J_AUTH=neo4j/pleaseletmein \\\n",
|
||||
" -e NEO4J_PLUGINS=\\[\\\"apoc\\\"\\] \\\n",
|
||||
" neo4j:latest\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"If you are using the docker container, you need to wait a couple of second for the database to start."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "62812aad",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.chat_models import ChatOpenAI\n",
|
||||
"from langchain.chains import GraphCypherQAChain\n",
|
||||
"from langchain.graphs import Neo4jGraph"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "0928915d",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"graph = Neo4jGraph(\n",
|
||||
" url=\"bolt://localhost:7687\", username=\"neo4j\", password=\"pleaseletmein\"\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "995ea9b9",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Seeding the database\n",
|
||||
"\n",
|
||||
"Assuming your database is empty, you can populate it using Cypher query language. The following Cypher statement is idempotent, which means the database information will be the same if you run it one or multiple times."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "fedd26b9",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[]"
|
||||
]
|
||||
},
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"graph.query(\n",
|
||||
" \"\"\"\n",
|
||||
"MERGE (m:Movie {name:\"Top Gun\"})\n",
|
||||
"WITH m\n",
|
||||
"UNWIND [\"Tom Cruise\", \"Val Kilmer\", \"Anthony Edwards\", \"Meg Ryan\"] AS actor\n",
|
||||
"MERGE (a:Actor {name:actor})\n",
|
||||
"MERGE (a)-[:ACTED_IN]->(m)\n",
|
||||
"\"\"\"\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "58c1a8ea",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Refresh graph schema information\n",
|
||||
"If the schema of database changes, you can refresh the schema information needed to generate Cypher statements."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "4e3de44f",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"graph.refresh_schema()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "1fe76ccd",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
" Node properties are the following:\n",
|
||||
" [{'properties': [{'property': 'name', 'type': 'STRING'}], 'labels': 'Movie'}, {'properties': [{'property': 'name', 'type': 'STRING'}], 'labels': 'Actor'}]\n",
|
||||
" Relationship properties are the following:\n",
|
||||
" []\n",
|
||||
" The relationships are the following:\n",
|
||||
" ['(:Actor)-[:ACTED_IN]->(:Movie)']\n",
|
||||
" \n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"print(graph.get_schema)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "68a3c677",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Querying the graph\n",
|
||||
"\n",
|
||||
"We can now use the graph cypher QA chain to ask question of the graph"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "7476ce98",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"chain = GraphCypherQAChain.from_llm(\n",
|
||||
" ChatOpenAI(temperature=0), graph=graph, verbose=True\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "ef8ee27b",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new GraphCypherQAChain chain...\u001b[0m\n",
|
||||
"Generated Cypher:\n",
|
||||
"\u001b[32;1m\u001b[1;3mMATCH (a:Actor)-[:ACTED_IN]->(m:Movie {name: 'Top Gun'})\n",
|
||||
"RETURN a.name\u001b[0m\n",
|
||||
"Full Context:\n",
|
||||
"\u001b[32;1m\u001b[1;3m[{'a.name': 'Tom Cruise'}, {'a.name': 'Val Kilmer'}, {'a.name': 'Anthony Edwards'}, {'a.name': 'Meg Ryan'}]\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'Tom Cruise, Val Kilmer, Anthony Edwards, and Meg Ryan played in Top Gun.'"
|
||||
]
|
||||
},
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chain.run(\"Who played in Top Gun?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "b4825316",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -5,7 +5,7 @@
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# LLMSummarizationCheckerChain\n",
|
||||
"This notebook shows some examples of LLMSummarizationCheckerChain in use with different types of texts. It has a few distinct differences from the `LLMCheckerChain`, in that it doesn't have any assumtions to the format of the input text (or summary).\n",
|
||||
"This notebook shows some examples of LLMSummarizationCheckerChain in use with different types of texts. It has a few distinct differences from the `LLMCheckerChain`, in that it doesn't have any assumptions to the format of the input text (or summary).\n",
|
||||
"Additionally, as the LLMs like to hallucinate when fact checking or get confused by context, it is sometimes beneficial to run the checker multiple times. It does this by feeding the rewritten \"True\" result back on itself, and checking the \"facts\" for truth. As you can see from the examples below, this can be very effective in arriving at a generally true body of text.\n",
|
||||
"\n",
|
||||
"You can control the number of times the checker runs by setting the `max_checks` parameter. The default is 2, but you can set it to 1 if you don't want any double-checking."
|
||||
|
||||
@@ -1,16 +1,5 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "ca883d49",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%load_ext autoreload\n",
|
||||
"%autoreload 2"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "0ed6aab1",
|
||||
@@ -34,7 +23,7 @@
|
||||
}
|
||||
},
|
||||
"source": [
|
||||
"Under the hood, LangChain uses SQLAlchemy to connect to SQL databases. The `SQLDatabaseChain` can therefore be used with any SQL dialect supported by SQLAlchemy, such as MS SQL, MySQL, MariaDB, PostgreSQL, Oracle SQL, and SQLite. Please refer to the SQLAlchemy documentation for more information about requirements for connecting to your database. For example, a connection to MySQL requires an appropriate connector such as PyMySQL. A URI for a MySQL connection might look like: `mysql+pymysql://user:pass@some_mysql_db_address/db_name`\n",
|
||||
"Under the hood, LangChain uses SQLAlchemy to connect to SQL databases. The `SQLDatabaseChain` can therefore be used with any SQL dialect supported by SQLAlchemy, such as MS SQL, MySQL, MariaDB, PostgreSQL, Oracle SQL, [Databricks](../../../integrations/databricks.ipynb) and SQLite. Please refer to the SQLAlchemy documentation for more information about requirements for connecting to your database. For example, a connection to MySQL requires an appropriate connector such as PyMySQL. A URI for a MySQL connection might look like: `mysql+pymysql://user:pass@some_mysql_db_address/db_name`.\n",
|
||||
"\n",
|
||||
"This demonstration uses SQLite and the example Chinook database.\n",
|
||||
"To set it up, follow the instructions on https://database.guide/2-sample-databases-sqlite/, placing the `.db` file in a notebooks folder at the root of this repository."
|
||||
|
||||
@@ -68,7 +68,7 @@
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"SockSplash!\n"
|
||||
"Colorful Toes Co.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
@@ -80,6 +80,41 @@
|
||||
"print(chain.run(\"colorful socks\"))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"If there are multiple variables, you can input them all at once using a dictionary."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"Socktopia Colourful Creations.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"prompt = PromptTemplate(\n",
|
||||
" input_variables=[\"company\", \"product\"],\n",
|
||||
" template=\"What is a good name for {company} that makes {product}?\",\n",
|
||||
")\n",
|
||||
"chain = LLMChain(llm=llm, prompt=prompt)\n",
|
||||
"print(chain.run({\n",
|
||||
" 'company': \"ABC Startup\",\n",
|
||||
" 'product': \"colorful socks\"\n",
|
||||
" }))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
@@ -89,7 +124,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"execution_count": 4,
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
@@ -98,7 +133,7 @@
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Rainbow Sox Co.\n"
|
||||
"Rainbow Socks Co.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
@@ -131,7 +166,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
@@ -141,7 +176,7 @@
|
||||
" 'text': 'Why did the tomato turn red? Because it saw the salad dressing!'}"
|
||||
]
|
||||
},
|
||||
"execution_count": 4,
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -166,7 +201,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
@@ -175,7 +210,7 @@
|
||||
"{'text': 'Why did the tomato turn red? Because it saw the salad dressing!'}"
|
||||
]
|
||||
},
|
||||
"execution_count": 5,
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -193,7 +228,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
@@ -202,7 +237,7 @@
|
||||
"['text']"
|
||||
]
|
||||
},
|
||||
"execution_count": 6,
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -214,7 +249,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
@@ -223,7 +258,7 @@
|
||||
"'Why did the tomato turn red? Because it saw the salad dressing!'"
|
||||
]
|
||||
},
|
||||
"execution_count": 7,
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -241,7 +276,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"execution_count": 9,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
@@ -251,7 +286,7 @@
|
||||
" 'text': 'Why did the tomato turn red? Because it saw the salad dressing!'}"
|
||||
]
|
||||
},
|
||||
"execution_count": 8,
|
||||
"execution_count": 9,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -284,7 +319,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"execution_count": 10,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
@@ -293,7 +328,7 @@
|
||||
"'The next four colors of a rainbow are green, blue, indigo, and violet.'"
|
||||
]
|
||||
},
|
||||
"execution_count": 9,
|
||||
"execution_count": 10,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -331,7 +366,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"execution_count": 11,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
@@ -358,7 +393,7 @@
|
||||
"'ChatGPT is an AI language model developed by OpenAI. It is based on the GPT-3 architecture and is capable of generating human-like responses to text prompts. ChatGPT has been trained on a massive amount of text data and can understand and respond to a wide range of topics. It is often used for chatbots, virtual assistants, and other conversational AI applications.'"
|
||||
]
|
||||
},
|
||||
"execution_count": 10,
|
||||
"execution_count": 11,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -387,7 +422,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"execution_count": 12,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
@@ -407,7 +442,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"execution_count": 13,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
@@ -420,12 +455,12 @@
|
||||
"\u001b[36;1m\u001b[1;3mRainbow Socks Co.\u001b[0m\n",
|
||||
"\u001b[33;1m\u001b[1;3m\n",
|
||||
"\n",
|
||||
"\"Step into Color with Rainbow Socks!\"\u001b[0m\n",
|
||||
"\"Put a little rainbow in your step!\"\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\"Step into Color with Rainbow Socks!\"\n"
|
||||
"\"Put a little rainbow in your step!\"\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
@@ -456,7 +491,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 13,
|
||||
"execution_count": 14,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
@@ -496,7 +531,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 14,
|
||||
"execution_count": 15,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
@@ -506,9 +541,9 @@
|
||||
"Concatenated output:\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"Socktastic Colors.\n",
|
||||
"Funky Footwear Company\n",
|
||||
"\n",
|
||||
"\"Put Some Color in Your Step!\"\n"
|
||||
"\"Brighten Up Your Day with Our Colorful Socks!\"\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
@@ -554,7 +589,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.6"
|
||||
"version": "3.9.16"
|
||||
},
|
||||
"vscode": {
|
||||
"interpreter": {
|
||||
|
||||
@@ -23,7 +23,7 @@
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"#!pip install bilibili-api"
|
||||
"#!pip install bilibili-api-python"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -9,39 +9,43 @@
|
||||
"\n",
|
||||
">[EverNote](https://evernote.com/) is intended for archiving and creating notes in which photos, audio and saved web content can be embedded. Notes are stored in virtual \"notebooks\" and can be tagged, annotated, edited, searched, and exported.\n",
|
||||
"\n",
|
||||
"This notebook shows how to load `EverNote` file from disk."
|
||||
"This notebook shows how to load an `Evernote` [export](https://help.evernote.com/hc/en-us/articles/209005557-Export-notes-and-notebooks-as-ENEX-or-HTML) file (.enex) from disk.\n",
|
||||
"\n",
|
||||
"A document will be created for each note in the export."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"execution_count": 1,
|
||||
"id": "1a53ece0",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"#!pip install pypandoc\n",
|
||||
"import pypandoc\n",
|
||||
"\n",
|
||||
"pypandoc.download_pandoc()"
|
||||
"# lxml and html2text are required to parse EverNote notes\n",
|
||||
"# !pip install lxml\n",
|
||||
"# !pip install html2text"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"execution_count": 2,
|
||||
"id": "88df766f",
|
||||
"metadata": {
|
||||
"pycharm": {
|
||||
"name": "#%%\n"
|
||||
},
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[Document(page_content='testing this\\n\\nwhat happens?\\n\\nto the world?\\n', metadata={'source': 'example_data/testing.enex'})]"
|
||||
"[Document(page_content='testing this\\n\\nwhat happens?\\n\\nto the world?**Jan - March 2022**', metadata={'source': 'example_data/testing.enex'})]"
|
||||
]
|
||||
},
|
||||
"execution_count": 4,
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -49,9 +53,34 @@
|
||||
"source": [
|
||||
"from langchain.document_loaders import EverNoteLoader\n",
|
||||
"\n",
|
||||
"# By default all notes are combined into a single Document\n",
|
||||
"loader = EverNoteLoader(\"example_data/testing.enex\")\n",
|
||||
"loader.load()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "97a58fde",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[Document(page_content='testing this\\n\\nwhat happens?\\n\\nto the world?', metadata={'title': 'testing', 'created': time.struct_time(tm_year=2023, tm_mon=2, tm_mday=9, tm_hour=3, tm_min=47, tm_sec=46, tm_wday=3, tm_yday=40, tm_isdst=-1), 'updated': time.struct_time(tm_year=2023, tm_mon=2, tm_mday=9, tm_hour=3, tm_min=53, tm_sec=28, tm_wday=3, tm_yday=40, tm_isdst=-1), 'note-attributes.author': 'Harrison Chase', 'source': 'example_data/testing.enex'}),\n",
|
||||
" Document(page_content='**Jan - March 2022**', metadata={'title': 'Summer Training Program', 'created': time.struct_time(tm_year=2022, tm_mon=12, tm_mday=27, tm_hour=1, tm_min=59, tm_sec=48, tm_wday=1, tm_yday=361, tm_isdst=-1), 'note-attributes.author': 'Mike McGarry', 'note-attributes.source': 'mobile.iphone', 'source': 'example_data/testing.enex'})]"
|
||||
]
|
||||
},
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# It's likely more useful to return a Document for each note\n",
|
||||
"loader = EverNoteLoader(\"example_data/testing.enex\", load_single_document=False)\n",
|
||||
"loader.load()"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
@@ -70,7 +99,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.6"
|
||||
"version": "3.9.7"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
<!DOCTYPE html>
|
||||
<html>
|
||||
<head>
|
||||
<title>Test Title</title>
|
||||
<head><title>Test Title</title>
|
||||
</head>
|
||||
<body>
|
||||
|
||||
|
||||
@@ -13,4 +13,16 @@
|
||||
<!DOCTYPE en-note SYSTEM "http://xml.evernote.com/pub/enml2.dtd"><en-note><div>testing this</div><div>what happens?</div><div>to the world?</div></en-note> ]]>
|
||||
</content>
|
||||
</note>
|
||||
<note>
|
||||
<title>Summer Training Program</title>
|
||||
<created>20221227T015948Z</created>
|
||||
<note-attributes>
|
||||
<author>Mike McGarry</author>
|
||||
<source>mobile.iphone</source>
|
||||
</note-attributes>
|
||||
<content>
|
||||
<![CDATA[<?xml version="1.0" encoding="UTF-8" standalone="no"?>
|
||||
<!DOCTYPE en-note SYSTEM "http://xml.evernote.com/pub/enml2.dtd"><en-note><div><b>Jan - March 2022</b></div></en-note> ]]>
|
||||
</content>
|
||||
</note>
|
||||
</en-export>
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"execution_count": 1,
|
||||
"id": "019d8520",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -151,7 +151,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"execution_count": 15,
|
||||
"id": "81c92da3",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -210,7 +210,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"execution_count": 14,
|
||||
"id": "c558bd73",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -259,13 +259,292 @@
|
||||
"len(docs)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "6411a0cb",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Auto detect file encodings with TextLoader\n",
|
||||
"\n",
|
||||
"In this example we will see some strategies that can be useful when loading a big list of arbitrary files from a directory using the `TextLoader` class.\n",
|
||||
"\n",
|
||||
"First to illustrate the problem, let's try to load multiple text with arbitrary encodings."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "6a91a0bc",
|
||||
"execution_count": 16,
|
||||
"id": "2c787a69",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
"source": [
|
||||
"path = '../../../../../tests/integration_tests/examples'\n",
|
||||
"loader = DirectoryLoader(path, glob=\"**/*.txt\", loader_cls=TextLoader)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e9001e12",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### A. Default Behavior"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 19,
|
||||
"id": "b1e88c31",
|
||||
"metadata": {
|
||||
"scrolled": true
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/html": [
|
||||
"<pre style=\"white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace\"><span style=\"color: #800000; text-decoration-color: #800000\">╭─────────────────────────────── </span><span style=\"color: #800000; text-decoration-color: #800000; font-weight: bold\">Traceback </span><span style=\"color: #bf7f7f; text-decoration-color: #bf7f7f; font-weight: bold\">(most recent call last)</span><span style=\"color: #800000; text-decoration-color: #800000\"> ────────────────────────────────╮</span>\n",
|
||||
"<span style=\"color: #800000; text-decoration-color: #800000\">│</span> <span style=\"color: #bfbf7f; text-decoration-color: #bfbf7f\">/data/source/langchain/langchain/document_loaders/</span><span style=\"color: #808000; text-decoration-color: #808000; font-weight: bold\">text.py</span>:<span style=\"color: #0000ff; text-decoration-color: #0000ff\">29</span> in <span style=\"color: #00ff00; text-decoration-color: #00ff00\">load</span> <span style=\"color: #800000; text-decoration-color: #800000\">│</span>\n",
|
||||
"<span style=\"color: #800000; text-decoration-color: #800000\">│</span> <span style=\"color: #800000; text-decoration-color: #800000\">│</span>\n",
|
||||
"<span style=\"color: #800000; text-decoration-color: #800000\">│</span> <span style=\"color: #7f7f7f; text-decoration-color: #7f7f7f\">26 </span><span style=\"color: #7f7f7f; text-decoration-color: #7f7f7f\">│ │ </span>text = <span style=\"color: #808000; text-decoration-color: #808000\">\"\"</span> <span style=\"color: #800000; text-decoration-color: #800000\">│</span>\n",
|
||||
"<span style=\"color: #800000; text-decoration-color: #800000\">│</span> <span style=\"color: #7f7f7f; text-decoration-color: #7f7f7f\">27 </span><span style=\"color: #7f7f7f; text-decoration-color: #7f7f7f\">│ │ </span><span style=\"color: #0000ff; text-decoration-color: #0000ff\">with</span> <span style=\"color: #00ffff; text-decoration-color: #00ffff\">open</span>(<span style=\"color: #00ffff; text-decoration-color: #00ffff\">self</span>.file_path, encoding=<span style=\"color: #00ffff; text-decoration-color: #00ffff\">self</span>.encoding) <span style=\"color: #0000ff; text-decoration-color: #0000ff\">as</span> f: <span style=\"color: #800000; text-decoration-color: #800000\">│</span>\n",
|
||||
"<span style=\"color: #800000; text-decoration-color: #800000\">│</span> <span style=\"color: #7f7f7f; text-decoration-color: #7f7f7f\">28 </span><span style=\"color: #7f7f7f; text-decoration-color: #7f7f7f\">│ │ │ </span><span style=\"color: #0000ff; text-decoration-color: #0000ff\">try</span>: <span style=\"color: #800000; text-decoration-color: #800000\">│</span>\n",
|
||||
"<span style=\"color: #800000; text-decoration-color: #800000\">│</span> <span style=\"color: #800000; text-decoration-color: #800000\">❱ </span>29 <span style=\"color: #7f7f7f; text-decoration-color: #7f7f7f\">│ │ │ │ </span>text = f.read() <span style=\"color: #800000; text-decoration-color: #800000\">│</span>\n",
|
||||
"<span style=\"color: #800000; text-decoration-color: #800000\">│</span> <span style=\"color: #7f7f7f; text-decoration-color: #7f7f7f\">30 </span><span style=\"color: #7f7f7f; text-decoration-color: #7f7f7f\">│ │ │ </span><span style=\"color: #0000ff; text-decoration-color: #0000ff\">except</span> <span style=\"color: #00ffff; text-decoration-color: #00ffff\">UnicodeDecodeError</span> <span style=\"color: #0000ff; text-decoration-color: #0000ff\">as</span> e: <span style=\"color: #800000; text-decoration-color: #800000\">│</span>\n",
|
||||
"<span style=\"color: #800000; text-decoration-color: #800000\">│</span> <span style=\"color: #7f7f7f; text-decoration-color: #7f7f7f\">31 </span><span style=\"color: #7f7f7f; text-decoration-color: #7f7f7f\">│ │ │ │ </span><span style=\"color: #0000ff; text-decoration-color: #0000ff\">if</span> <span style=\"color: #00ffff; text-decoration-color: #00ffff\">self</span>.autodetect_encoding: <span style=\"color: #800000; text-decoration-color: #800000\">│</span>\n",
|
||||
"<span style=\"color: #800000; text-decoration-color: #800000\">│</span> <span style=\"color: #7f7f7f; text-decoration-color: #7f7f7f\">32 </span><span style=\"color: #7f7f7f; text-decoration-color: #7f7f7f\">│ │ │ │ │ </span>detected_encodings = <span style=\"color: #00ffff; text-decoration-color: #00ffff\">self</span>.detect_file_encodings() <span style=\"color: #800000; text-decoration-color: #800000\">│</span>\n",
|
||||
"<span style=\"color: #800000; text-decoration-color: #800000\">│</span> <span style=\"color: #800000; text-decoration-color: #800000\">│</span>\n",
|
||||
"<span style=\"color: #800000; text-decoration-color: #800000\">│</span> <span style=\"color: #bfbf7f; text-decoration-color: #bfbf7f\">/home/spike/.pyenv/versions/3.9.11/lib/python3.9/</span><span style=\"color: #808000; text-decoration-color: #808000; font-weight: bold\">codecs.py</span>:<span style=\"color: #0000ff; text-decoration-color: #0000ff\">322</span> in <span style=\"color: #00ff00; text-decoration-color: #00ff00\">decode</span> <span style=\"color: #800000; text-decoration-color: #800000\">│</span>\n",
|
||||
"<span style=\"color: #800000; text-decoration-color: #800000\">│</span> <span style=\"color: #800000; text-decoration-color: #800000\">│</span>\n",
|
||||
"<span style=\"color: #800000; text-decoration-color: #800000\">│</span> <span style=\"color: #7f7f7f; text-decoration-color: #7f7f7f\"> 319 </span><span style=\"color: #7f7f7f; text-decoration-color: #7f7f7f\">│ </span><span style=\"color: #0000ff; text-decoration-color: #0000ff\">def</span> <span style=\"color: #00ff00; text-decoration-color: #00ff00\">decode</span>(<span style=\"color: #00ffff; text-decoration-color: #00ffff\">self</span>, <span style=\"color: #00ffff; text-decoration-color: #00ffff\">input</span>, final=<span style=\"color: #0000ff; text-decoration-color: #0000ff\">False</span>): <span style=\"color: #800000; text-decoration-color: #800000\">│</span>\n",
|
||||
"<span style=\"color: #800000; text-decoration-color: #800000\">│</span> <span style=\"color: #7f7f7f; text-decoration-color: #7f7f7f\"> 320 </span><span style=\"color: #7f7f7f; text-decoration-color: #7f7f7f\">│ │ </span><span style=\"color: #7f7f7f; text-decoration-color: #7f7f7f\"># decode input (taking the buffer into account)</span> <span style=\"color: #800000; text-decoration-color: #800000\">│</span>\n",
|
||||
"<span style=\"color: #800000; text-decoration-color: #800000\">│</span> <span style=\"color: #7f7f7f; text-decoration-color: #7f7f7f\"> 321 </span><span style=\"color: #7f7f7f; text-decoration-color: #7f7f7f\">│ │ </span>data = <span style=\"color: #00ffff; text-decoration-color: #00ffff\">self</span>.buffer + <span style=\"color: #00ffff; text-decoration-color: #00ffff\">input</span> <span style=\"color: #800000; text-decoration-color: #800000\">│</span>\n",
|
||||
"<span style=\"color: #800000; text-decoration-color: #800000\">│</span> <span style=\"color: #800000; text-decoration-color: #800000\">❱ </span> 322 <span style=\"color: #7f7f7f; text-decoration-color: #7f7f7f\">│ │ </span>(result, consumed) = <span style=\"color: #00ffff; text-decoration-color: #00ffff\">self</span>._buffer_decode(data, <span style=\"color: #00ffff; text-decoration-color: #00ffff\">self</span>.errors, final) <span style=\"color: #800000; text-decoration-color: #800000\">│</span>\n",
|
||||
"<span style=\"color: #800000; text-decoration-color: #800000\">│</span> <span style=\"color: #7f7f7f; text-decoration-color: #7f7f7f\"> 323 </span><span style=\"color: #7f7f7f; text-decoration-color: #7f7f7f\">│ │ </span><span style=\"color: #7f7f7f; text-decoration-color: #7f7f7f\"># keep undecoded input until the next call</span> <span style=\"color: #800000; text-decoration-color: #800000\">│</span>\n",
|
||||
"<span style=\"color: #800000; text-decoration-color: #800000\">│</span> <span style=\"color: #7f7f7f; text-decoration-color: #7f7f7f\"> 324 </span><span style=\"color: #7f7f7f; text-decoration-color: #7f7f7f\">│ │ </span><span style=\"color: #00ffff; text-decoration-color: #00ffff\">self</span>.buffer = data[consumed:] <span style=\"color: #800000; text-decoration-color: #800000\">│</span>\n",
|
||||
"<span style=\"color: #800000; text-decoration-color: #800000\">│</span> <span style=\"color: #7f7f7f; text-decoration-color: #7f7f7f\"> 325 </span><span style=\"color: #7f7f7f; text-decoration-color: #7f7f7f\">│ │ </span><span style=\"color: #0000ff; text-decoration-color: #0000ff\">return</span> result <span style=\"color: #800000; text-decoration-color: #800000\">│</span>\n",
|
||||
"<span style=\"color: #800000; text-decoration-color: #800000\">╰──────────────────────────────────────────────────────────────────────────────────────────────────╯</span>\n",
|
||||
"<span style=\"color: #ff0000; text-decoration-color: #ff0000; font-weight: bold\">UnicodeDecodeError: </span><span style=\"color: #008000; text-decoration-color: #008000\">'utf-8'</span> codec can't decode byte <span style=\"color: #008080; text-decoration-color: #008080; font-weight: bold\">0xca</span> in position <span style=\"color: #008080; text-decoration-color: #008080; font-weight: bold\">0</span>: invalid continuation byte\n",
|
||||
"\n",
|
||||
"<span style=\"font-style: italic\">The above exception was the direct cause of the following exception:</span>\n",
|
||||
"\n",
|
||||
"<span style=\"color: #800000; text-decoration-color: #800000\">╭─────────────────────────────── </span><span style=\"color: #800000; text-decoration-color: #800000; font-weight: bold\">Traceback </span><span style=\"color: #bf7f7f; text-decoration-color: #bf7f7f; font-weight: bold\">(most recent call last)</span><span style=\"color: #800000; text-decoration-color: #800000\"> ────────────────────────────────╮</span>\n",
|
||||
"<span style=\"color: #800000; text-decoration-color: #800000\">│</span> in <span style=\"color: #00ff00; text-decoration-color: #00ff00\"><module></span>:<span style=\"color: #0000ff; text-decoration-color: #0000ff\">1</span> <span style=\"color: #800000; text-decoration-color: #800000\">│</span>\n",
|
||||
"<span style=\"color: #800000; text-decoration-color: #800000\">│</span> <span style=\"color: #800000; text-decoration-color: #800000\">│</span>\n",
|
||||
"<span style=\"color: #800000; text-decoration-color: #800000\">│</span> <span style=\"color: #800000; text-decoration-color: #800000\">❱ </span>1 loader.load() <span style=\"color: #800000; text-decoration-color: #800000\">│</span>\n",
|
||||
"<span style=\"color: #800000; text-decoration-color: #800000\">│</span> <span style=\"color: #7f7f7f; text-decoration-color: #7f7f7f\">2 </span> <span style=\"color: #800000; text-decoration-color: #800000\">│</span>\n",
|
||||
"<span style=\"color: #800000; text-decoration-color: #800000\">│</span> <span style=\"color: #800000; text-decoration-color: #800000\">│</span>\n",
|
||||
"<span style=\"color: #800000; text-decoration-color: #800000\">│</span> <span style=\"color: #bfbf7f; text-decoration-color: #bfbf7f\">/data/source/langchain/langchain/document_loaders/</span><span style=\"color: #808000; text-decoration-color: #808000; font-weight: bold\">directory.py</span>:<span style=\"color: #0000ff; text-decoration-color: #0000ff\">84</span> in <span style=\"color: #00ff00; text-decoration-color: #00ff00\">load</span> <span style=\"color: #800000; text-decoration-color: #800000\">│</span>\n",
|
||||
"<span style=\"color: #800000; text-decoration-color: #800000\">│</span> <span style=\"color: #800000; text-decoration-color: #800000\">│</span>\n",
|
||||
"<span style=\"color: #800000; text-decoration-color: #800000\">│</span> <span style=\"color: #7f7f7f; text-decoration-color: #7f7f7f\">81 </span><span style=\"color: #7f7f7f; text-decoration-color: #7f7f7f\">│ │ │ │ │ │ </span><span style=\"color: #0000ff; text-decoration-color: #0000ff\">if</span> <span style=\"color: #00ffff; text-decoration-color: #00ffff\">self</span>.silent_errors: <span style=\"color: #800000; text-decoration-color: #800000\">│</span>\n",
|
||||
"<span style=\"color: #800000; text-decoration-color: #800000\">│</span> <span style=\"color: #7f7f7f; text-decoration-color: #7f7f7f\">82 </span><span style=\"color: #7f7f7f; text-decoration-color: #7f7f7f\">│ │ │ │ │ │ │ </span>logger.warning(e) <span style=\"color: #800000; text-decoration-color: #800000\">│</span>\n",
|
||||
"<span style=\"color: #800000; text-decoration-color: #800000\">│</span> <span style=\"color: #7f7f7f; text-decoration-color: #7f7f7f\">83 </span><span style=\"color: #7f7f7f; text-decoration-color: #7f7f7f\">│ │ │ │ │ │ </span><span style=\"color: #0000ff; text-decoration-color: #0000ff\">else</span>: <span style=\"color: #800000; text-decoration-color: #800000\">│</span>\n",
|
||||
"<span style=\"color: #800000; text-decoration-color: #800000\">│</span> <span style=\"color: #800000; text-decoration-color: #800000\">❱ </span>84 <span style=\"color: #7f7f7f; text-decoration-color: #7f7f7f\">│ │ │ │ │ │ │ </span><span style=\"color: #0000ff; text-decoration-color: #0000ff\">raise</span> e <span style=\"color: #800000; text-decoration-color: #800000\">│</span>\n",
|
||||
"<span style=\"color: #800000; text-decoration-color: #800000\">│</span> <span style=\"color: #7f7f7f; text-decoration-color: #7f7f7f\">85 </span><span style=\"color: #7f7f7f; text-decoration-color: #7f7f7f\">│ │ │ │ │ </span><span style=\"color: #0000ff; text-decoration-color: #0000ff\">finally</span>: <span style=\"color: #800000; text-decoration-color: #800000\">│</span>\n",
|
||||
"<span style=\"color: #800000; text-decoration-color: #800000\">│</span> <span style=\"color: #7f7f7f; text-decoration-color: #7f7f7f\">86 </span><span style=\"color: #7f7f7f; text-decoration-color: #7f7f7f\">│ │ │ │ │ │ </span><span style=\"color: #0000ff; text-decoration-color: #0000ff\">if</span> pbar: <span style=\"color: #800000; text-decoration-color: #800000\">│</span>\n",
|
||||
"<span style=\"color: #800000; text-decoration-color: #800000\">│</span> <span style=\"color: #7f7f7f; text-decoration-color: #7f7f7f\">87 </span><span style=\"color: #7f7f7f; text-decoration-color: #7f7f7f\">│ │ │ │ │ │ │ </span>pbar.update(<span style=\"color: #0000ff; text-decoration-color: #0000ff\">1</span>) <span style=\"color: #800000; text-decoration-color: #800000\">│</span>\n",
|
||||
"<span style=\"color: #800000; text-decoration-color: #800000\">│</span> <span style=\"color: #800000; text-decoration-color: #800000\">│</span>\n",
|
||||
"<span style=\"color: #800000; text-decoration-color: #800000\">│</span> <span style=\"color: #bfbf7f; text-decoration-color: #bfbf7f\">/data/source/langchain/langchain/document_loaders/</span><span style=\"color: #808000; text-decoration-color: #808000; font-weight: bold\">directory.py</span>:<span style=\"color: #0000ff; text-decoration-color: #0000ff\">78</span> in <span style=\"color: #00ff00; text-decoration-color: #00ff00\">load</span> <span style=\"color: #800000; text-decoration-color: #800000\">│</span>\n",
|
||||
"<span style=\"color: #800000; text-decoration-color: #800000\">│</span> <span style=\"color: #800000; text-decoration-color: #800000\">│</span>\n",
|
||||
"<span style=\"color: #800000; text-decoration-color: #800000\">│</span> <span style=\"color: #7f7f7f; text-decoration-color: #7f7f7f\">75 </span><span style=\"color: #7f7f7f; text-decoration-color: #7f7f7f\">│ │ │ </span><span style=\"color: #0000ff; text-decoration-color: #0000ff\">if</span> i.is_file(): <span style=\"color: #800000; text-decoration-color: #800000\">│</span>\n",
|
||||
"<span style=\"color: #800000; text-decoration-color: #800000\">│</span> <span style=\"color: #7f7f7f; text-decoration-color: #7f7f7f\">76 </span><span style=\"color: #7f7f7f; text-decoration-color: #7f7f7f\">│ │ │ │ </span><span style=\"color: #0000ff; text-decoration-color: #0000ff\">if</span> _is_visible(i.relative_to(p)) <span style=\"color: #ff00ff; text-decoration-color: #ff00ff\">or</span> <span style=\"color: #00ffff; text-decoration-color: #00ffff\">self</span>.load_hidden: <span style=\"color: #800000; text-decoration-color: #800000\">│</span>\n",
|
||||
"<span style=\"color: #800000; text-decoration-color: #800000\">│</span> <span style=\"color: #7f7f7f; text-decoration-color: #7f7f7f\">77 </span><span style=\"color: #7f7f7f; text-decoration-color: #7f7f7f\">│ │ │ │ │ </span><span style=\"color: #0000ff; text-decoration-color: #0000ff\">try</span>: <span style=\"color: #800000; text-decoration-color: #800000\">│</span>\n",
|
||||
"<span style=\"color: #800000; text-decoration-color: #800000\">│</span> <span style=\"color: #800000; text-decoration-color: #800000\">❱ </span>78 <span style=\"color: #7f7f7f; text-decoration-color: #7f7f7f\">│ │ │ │ │ │ </span>sub_docs = <span style=\"color: #00ffff; text-decoration-color: #00ffff\">self</span>.loader_cls(<span style=\"color: #00ffff; text-decoration-color: #00ffff\">str</span>(i), **<span style=\"color: #00ffff; text-decoration-color: #00ffff\">self</span>.loader_kwargs).load() <span style=\"color: #800000; text-decoration-color: #800000\">│</span>\n",
|
||||
"<span style=\"color: #800000; text-decoration-color: #800000\">│</span> <span style=\"color: #7f7f7f; text-decoration-color: #7f7f7f\">79 </span><span style=\"color: #7f7f7f; text-decoration-color: #7f7f7f\">│ │ │ │ │ │ </span>docs.extend(sub_docs) <span style=\"color: #800000; text-decoration-color: #800000\">│</span>\n",
|
||||
"<span style=\"color: #800000; text-decoration-color: #800000\">│</span> <span style=\"color: #7f7f7f; text-decoration-color: #7f7f7f\">80 </span><span style=\"color: #7f7f7f; text-decoration-color: #7f7f7f\">│ │ │ │ │ </span><span style=\"color: #0000ff; text-decoration-color: #0000ff\">except</span> <span style=\"color: #00ffff; text-decoration-color: #00ffff\">Exception</span> <span style=\"color: #0000ff; text-decoration-color: #0000ff\">as</span> e: <span style=\"color: #800000; text-decoration-color: #800000\">│</span>\n",
|
||||
"<span style=\"color: #800000; text-decoration-color: #800000\">│</span> <span style=\"color: #7f7f7f; text-decoration-color: #7f7f7f\">81 </span><span style=\"color: #7f7f7f; text-decoration-color: #7f7f7f\">│ │ │ │ │ │ </span><span style=\"color: #0000ff; text-decoration-color: #0000ff\">if</span> <span style=\"color: #00ffff; text-decoration-color: #00ffff\">self</span>.silent_errors: <span style=\"color: #800000; text-decoration-color: #800000\">│</span>\n",
|
||||
"<span style=\"color: #800000; text-decoration-color: #800000\">│</span> <span style=\"color: #800000; text-decoration-color: #800000\">│</span>\n",
|
||||
"<span style=\"color: #800000; text-decoration-color: #800000\">│</span> <span style=\"color: #bfbf7f; text-decoration-color: #bfbf7f\">/data/source/langchain/langchain/document_loaders/</span><span style=\"color: #808000; text-decoration-color: #808000; font-weight: bold\">text.py</span>:<span style=\"color: #0000ff; text-decoration-color: #0000ff\">44</span> in <span style=\"color: #00ff00; text-decoration-color: #00ff00\">load</span> <span style=\"color: #800000; text-decoration-color: #800000\">│</span>\n",
|
||||
"<span style=\"color: #800000; text-decoration-color: #800000\">│</span> <span style=\"color: #800000; text-decoration-color: #800000\">│</span>\n",
|
||||
"<span style=\"color: #800000; text-decoration-color: #800000\">│</span> <span style=\"color: #7f7f7f; text-decoration-color: #7f7f7f\">41 </span><span style=\"color: #7f7f7f; text-decoration-color: #7f7f7f\">│ │ │ │ │ │ </span><span style=\"color: #0000ff; text-decoration-color: #0000ff\">except</span> <span style=\"color: #00ffff; text-decoration-color: #00ffff\">UnicodeDecodeError</span>: <span style=\"color: #800000; text-decoration-color: #800000\">│</span>\n",
|
||||
"<span style=\"color: #800000; text-decoration-color: #800000\">│</span> <span style=\"color: #7f7f7f; text-decoration-color: #7f7f7f\">42 </span><span style=\"color: #7f7f7f; text-decoration-color: #7f7f7f\">│ │ │ │ │ │ │ </span><span style=\"color: #0000ff; text-decoration-color: #0000ff\">continue</span> <span style=\"color: #800000; text-decoration-color: #800000\">│</span>\n",
|
||||
"<span style=\"color: #800000; text-decoration-color: #800000\">│</span> <span style=\"color: #7f7f7f; text-decoration-color: #7f7f7f\">43 </span><span style=\"color: #7f7f7f; text-decoration-color: #7f7f7f\">│ │ │ │ </span><span style=\"color: #0000ff; text-decoration-color: #0000ff\">else</span>: <span style=\"color: #800000; text-decoration-color: #800000\">│</span>\n",
|
||||
"<span style=\"color: #800000; text-decoration-color: #800000\">│</span> <span style=\"color: #800000; text-decoration-color: #800000\">❱ </span>44 <span style=\"color: #7f7f7f; text-decoration-color: #7f7f7f\">│ │ │ │ │ </span><span style=\"color: #0000ff; text-decoration-color: #0000ff\">raise</span> <span style=\"color: #00ffff; text-decoration-color: #00ffff\">RuntimeError</span>(<span style=\"color: #808000; text-decoration-color: #808000\">f\"Error loading {</span><span style=\"color: #00ffff; text-decoration-color: #00ffff\">self</span>.file_path<span style=\"color: #808000; text-decoration-color: #808000\">}\"</span>) <span style=\"color: #0000ff; text-decoration-color: #0000ff\">from</span> <span style=\"color: #00ffff; text-decoration-color: #00ffff; text-decoration: underline\">e</span> <span style=\"color: #800000; text-decoration-color: #800000\">│</span>\n",
|
||||
"<span style=\"color: #800000; text-decoration-color: #800000\">│</span> <span style=\"color: #7f7f7f; text-decoration-color: #7f7f7f\">45 </span><span style=\"color: #7f7f7f; text-decoration-color: #7f7f7f\">│ │ │ </span><span style=\"color: #0000ff; text-decoration-color: #0000ff\">except</span> <span style=\"color: #00ffff; text-decoration-color: #00ffff\">Exception</span> <span style=\"color: #0000ff; text-decoration-color: #0000ff\">as</span> e: <span style=\"color: #800000; text-decoration-color: #800000\">│</span>\n",
|
||||
"<span style=\"color: #800000; text-decoration-color: #800000\">│</span> <span style=\"color: #7f7f7f; text-decoration-color: #7f7f7f\">46 </span><span style=\"color: #7f7f7f; text-decoration-color: #7f7f7f\">│ │ │ │ </span><span style=\"color: #0000ff; text-decoration-color: #0000ff\">raise</span> <span style=\"color: #00ffff; text-decoration-color: #00ffff\">RuntimeError</span>(<span style=\"color: #808000; text-decoration-color: #808000\">f\"Error loading {</span><span style=\"color: #00ffff; text-decoration-color: #00ffff\">self</span>.file_path<span style=\"color: #808000; text-decoration-color: #808000\">}\"</span>) <span style=\"color: #0000ff; text-decoration-color: #0000ff\">from</span> <span style=\"color: #00ffff; text-decoration-color: #00ffff; text-decoration: underline\">e</span> <span style=\"color: #800000; text-decoration-color: #800000\">│</span>\n",
|
||||
"<span style=\"color: #800000; text-decoration-color: #800000\">│</span> <span style=\"color: #7f7f7f; text-decoration-color: #7f7f7f\">47 </span> <span style=\"color: #800000; text-decoration-color: #800000\">│</span>\n",
|
||||
"<span style=\"color: #800000; text-decoration-color: #800000\">╰──────────────────────────────────────────────────────────────────────────────────────────────────╯</span>\n",
|
||||
"<span style=\"color: #ff0000; text-decoration-color: #ff0000; font-weight: bold\">RuntimeError: </span>Error loading ..<span style=\"color: #800080; text-decoration-color: #800080\">/../../../../tests/integration_tests/examples/</span><span style=\"color: #ff00ff; text-decoration-color: #ff00ff\">example-non-utf8.txt</span>\n",
|
||||
"</pre>\n"
|
||||
],
|
||||
"text/plain": [
|
||||
"\u001b[31m╭─\u001b[0m\u001b[31m──────────────────────────────\u001b[0m\u001b[31m \u001b[0m\u001b[1;31mTraceback \u001b[0m\u001b[1;2;31m(most recent call last)\u001b[0m\u001b[31m \u001b[0m\u001b[31m───────────────────────────────\u001b[0m\u001b[31m─╮\u001b[0m\n",
|
||||
"\u001b[31m│\u001b[0m \u001b[2;33m/data/source/langchain/langchain/document_loaders/\u001b[0m\u001b[1;33mtext.py\u001b[0m:\u001b[94m29\u001b[0m in \u001b[92mload\u001b[0m \u001b[31m│\u001b[0m\n",
|
||||
"\u001b[31m│\u001b[0m \u001b[31m│\u001b[0m\n",
|
||||
"\u001b[31m│\u001b[0m \u001b[2m26 \u001b[0m\u001b[2m│ │ \u001b[0mtext = \u001b[33m\"\u001b[0m\u001b[33m\"\u001b[0m \u001b[31m│\u001b[0m\n",
|
||||
"\u001b[31m│\u001b[0m \u001b[2m27 \u001b[0m\u001b[2m│ │ \u001b[0m\u001b[94mwith\u001b[0m \u001b[96mopen\u001b[0m(\u001b[96mself\u001b[0m.file_path, encoding=\u001b[96mself\u001b[0m.encoding) \u001b[94mas\u001b[0m f: \u001b[31m│\u001b[0m\n",
|
||||
"\u001b[31m│\u001b[0m \u001b[2m28 \u001b[0m\u001b[2m│ │ │ \u001b[0m\u001b[94mtry\u001b[0m: \u001b[31m│\u001b[0m\n",
|
||||
"\u001b[31m│\u001b[0m \u001b[31m❱ \u001b[0m29 \u001b[2m│ │ │ │ \u001b[0mtext = f.read() \u001b[31m│\u001b[0m\n",
|
||||
"\u001b[31m│\u001b[0m \u001b[2m30 \u001b[0m\u001b[2m│ │ │ \u001b[0m\u001b[94mexcept\u001b[0m \u001b[96mUnicodeDecodeError\u001b[0m \u001b[94mas\u001b[0m e: \u001b[31m│\u001b[0m\n",
|
||||
"\u001b[31m│\u001b[0m \u001b[2m31 \u001b[0m\u001b[2m│ │ │ │ \u001b[0m\u001b[94mif\u001b[0m \u001b[96mself\u001b[0m.autodetect_encoding: \u001b[31m│\u001b[0m\n",
|
||||
"\u001b[31m│\u001b[0m \u001b[2m32 \u001b[0m\u001b[2m│ │ │ │ │ \u001b[0mdetected_encodings = \u001b[96mself\u001b[0m.detect_file_encodings() \u001b[31m│\u001b[0m\n",
|
||||
"\u001b[31m│\u001b[0m \u001b[31m│\u001b[0m\n",
|
||||
"\u001b[31m│\u001b[0m \u001b[2;33m/home/spike/.pyenv/versions/3.9.11/lib/python3.9/\u001b[0m\u001b[1;33mcodecs.py\u001b[0m:\u001b[94m322\u001b[0m in \u001b[92mdecode\u001b[0m \u001b[31m│\u001b[0m\n",
|
||||
"\u001b[31m│\u001b[0m \u001b[31m│\u001b[0m\n",
|
||||
"\u001b[31m│\u001b[0m \u001b[2m 319 \u001b[0m\u001b[2m│ \u001b[0m\u001b[94mdef\u001b[0m \u001b[92mdecode\u001b[0m(\u001b[96mself\u001b[0m, \u001b[96minput\u001b[0m, final=\u001b[94mFalse\u001b[0m): \u001b[31m│\u001b[0m\n",
|
||||
"\u001b[31m│\u001b[0m \u001b[2m 320 \u001b[0m\u001b[2m│ │ \u001b[0m\u001b[2m# decode input (taking the buffer into account)\u001b[0m \u001b[31m│\u001b[0m\n",
|
||||
"\u001b[31m│\u001b[0m \u001b[2m 321 \u001b[0m\u001b[2m│ │ \u001b[0mdata = \u001b[96mself\u001b[0m.buffer + \u001b[96minput\u001b[0m \u001b[31m│\u001b[0m\n",
|
||||
"\u001b[31m│\u001b[0m \u001b[31m❱ \u001b[0m 322 \u001b[2m│ │ \u001b[0m(result, consumed) = \u001b[96mself\u001b[0m._buffer_decode(data, \u001b[96mself\u001b[0m.errors, final) \u001b[31m│\u001b[0m\n",
|
||||
"\u001b[31m│\u001b[0m \u001b[2m 323 \u001b[0m\u001b[2m│ │ \u001b[0m\u001b[2m# keep undecoded input until the next call\u001b[0m \u001b[31m│\u001b[0m\n",
|
||||
"\u001b[31m│\u001b[0m \u001b[2m 324 \u001b[0m\u001b[2m│ │ \u001b[0m\u001b[96mself\u001b[0m.buffer = data[consumed:] \u001b[31m│\u001b[0m\n",
|
||||
"\u001b[31m│\u001b[0m \u001b[2m 325 \u001b[0m\u001b[2m│ │ \u001b[0m\u001b[94mreturn\u001b[0m result \u001b[31m│\u001b[0m\n",
|
||||
"\u001b[31m╰──────────────────────────────────────────────────────────────────────────────────────────────────╯\u001b[0m\n",
|
||||
"\u001b[1;91mUnicodeDecodeError: \u001b[0m\u001b[32m'utf-8'\u001b[0m codec can't decode byte \u001b[1;36m0xca\u001b[0m in position \u001b[1;36m0\u001b[0m: invalid continuation byte\n",
|
||||
"\n",
|
||||
"\u001b[3mThe above exception was the direct cause of the following exception:\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[31m╭─\u001b[0m\u001b[31m──────────────────────────────\u001b[0m\u001b[31m \u001b[0m\u001b[1;31mTraceback \u001b[0m\u001b[1;2;31m(most recent call last)\u001b[0m\u001b[31m \u001b[0m\u001b[31m───────────────────────────────\u001b[0m\u001b[31m─╮\u001b[0m\n",
|
||||
"\u001b[31m│\u001b[0m in \u001b[92m<module>\u001b[0m:\u001b[94m1\u001b[0m \u001b[31m│\u001b[0m\n",
|
||||
"\u001b[31m│\u001b[0m \u001b[31m│\u001b[0m\n",
|
||||
"\u001b[31m│\u001b[0m \u001b[31m❱ \u001b[0m1 loader.load() \u001b[31m│\u001b[0m\n",
|
||||
"\u001b[31m│\u001b[0m \u001b[2m2 \u001b[0m \u001b[31m│\u001b[0m\n",
|
||||
"\u001b[31m│\u001b[0m \u001b[31m│\u001b[0m\n",
|
||||
"\u001b[31m│\u001b[0m \u001b[2;33m/data/source/langchain/langchain/document_loaders/\u001b[0m\u001b[1;33mdirectory.py\u001b[0m:\u001b[94m84\u001b[0m in \u001b[92mload\u001b[0m \u001b[31m│\u001b[0m\n",
|
||||
"\u001b[31m│\u001b[0m \u001b[31m│\u001b[0m\n",
|
||||
"\u001b[31m│\u001b[0m \u001b[2m81 \u001b[0m\u001b[2m│ │ │ │ │ │ \u001b[0m\u001b[94mif\u001b[0m \u001b[96mself\u001b[0m.silent_errors: \u001b[31m│\u001b[0m\n",
|
||||
"\u001b[31m│\u001b[0m \u001b[2m82 \u001b[0m\u001b[2m│ │ │ │ │ │ │ \u001b[0mlogger.warning(e) \u001b[31m│\u001b[0m\n",
|
||||
"\u001b[31m│\u001b[0m \u001b[2m83 \u001b[0m\u001b[2m│ │ │ │ │ │ \u001b[0m\u001b[94melse\u001b[0m: \u001b[31m│\u001b[0m\n",
|
||||
"\u001b[31m│\u001b[0m \u001b[31m❱ \u001b[0m84 \u001b[2m│ │ │ │ │ │ │ \u001b[0m\u001b[94mraise\u001b[0m e \u001b[31m│\u001b[0m\n",
|
||||
"\u001b[31m│\u001b[0m \u001b[2m85 \u001b[0m\u001b[2m│ │ │ │ │ \u001b[0m\u001b[94mfinally\u001b[0m: \u001b[31m│\u001b[0m\n",
|
||||
"\u001b[31m│\u001b[0m \u001b[2m86 \u001b[0m\u001b[2m│ │ │ │ │ │ \u001b[0m\u001b[94mif\u001b[0m pbar: \u001b[31m│\u001b[0m\n",
|
||||
"\u001b[31m│\u001b[0m \u001b[2m87 \u001b[0m\u001b[2m│ │ │ │ │ │ │ \u001b[0mpbar.update(\u001b[94m1\u001b[0m) \u001b[31m│\u001b[0m\n",
|
||||
"\u001b[31m│\u001b[0m \u001b[31m│\u001b[0m\n",
|
||||
"\u001b[31m│\u001b[0m \u001b[2;33m/data/source/langchain/langchain/document_loaders/\u001b[0m\u001b[1;33mdirectory.py\u001b[0m:\u001b[94m78\u001b[0m in \u001b[92mload\u001b[0m \u001b[31m│\u001b[0m\n",
|
||||
"\u001b[31m│\u001b[0m \u001b[31m│\u001b[0m\n",
|
||||
"\u001b[31m│\u001b[0m \u001b[2m75 \u001b[0m\u001b[2m│ │ │ \u001b[0m\u001b[94mif\u001b[0m i.is_file(): \u001b[31m│\u001b[0m\n",
|
||||
"\u001b[31m│\u001b[0m \u001b[2m76 \u001b[0m\u001b[2m│ │ │ │ \u001b[0m\u001b[94mif\u001b[0m _is_visible(i.relative_to(p)) \u001b[95mor\u001b[0m \u001b[96mself\u001b[0m.load_hidden: \u001b[31m│\u001b[0m\n",
|
||||
"\u001b[31m│\u001b[0m \u001b[2m77 \u001b[0m\u001b[2m│ │ │ │ │ \u001b[0m\u001b[94mtry\u001b[0m: \u001b[31m│\u001b[0m\n",
|
||||
"\u001b[31m│\u001b[0m \u001b[31m❱ \u001b[0m78 \u001b[2m│ │ │ │ │ │ \u001b[0msub_docs = \u001b[96mself\u001b[0m.loader_cls(\u001b[96mstr\u001b[0m(i), **\u001b[96mself\u001b[0m.loader_kwargs).load() \u001b[31m│\u001b[0m\n",
|
||||
"\u001b[31m│\u001b[0m \u001b[2m79 \u001b[0m\u001b[2m│ │ │ │ │ │ \u001b[0mdocs.extend(sub_docs) \u001b[31m│\u001b[0m\n",
|
||||
"\u001b[31m│\u001b[0m \u001b[2m80 \u001b[0m\u001b[2m│ │ │ │ │ \u001b[0m\u001b[94mexcept\u001b[0m \u001b[96mException\u001b[0m \u001b[94mas\u001b[0m e: \u001b[31m│\u001b[0m\n",
|
||||
"\u001b[31m│\u001b[0m \u001b[2m81 \u001b[0m\u001b[2m│ │ │ │ │ │ \u001b[0m\u001b[94mif\u001b[0m \u001b[96mself\u001b[0m.silent_errors: \u001b[31m│\u001b[0m\n",
|
||||
"\u001b[31m│\u001b[0m \u001b[31m│\u001b[0m\n",
|
||||
"\u001b[31m│\u001b[0m \u001b[2;33m/data/source/langchain/langchain/document_loaders/\u001b[0m\u001b[1;33mtext.py\u001b[0m:\u001b[94m44\u001b[0m in \u001b[92mload\u001b[0m \u001b[31m│\u001b[0m\n",
|
||||
"\u001b[31m│\u001b[0m \u001b[31m│\u001b[0m\n",
|
||||
"\u001b[31m│\u001b[0m \u001b[2m41 \u001b[0m\u001b[2m│ │ │ │ │ │ \u001b[0m\u001b[94mexcept\u001b[0m \u001b[96mUnicodeDecodeError\u001b[0m: \u001b[31m│\u001b[0m\n",
|
||||
"\u001b[31m│\u001b[0m \u001b[2m42 \u001b[0m\u001b[2m│ │ │ │ │ │ │ \u001b[0m\u001b[94mcontinue\u001b[0m \u001b[31m│\u001b[0m\n",
|
||||
"\u001b[31m│\u001b[0m \u001b[2m43 \u001b[0m\u001b[2m│ │ │ │ \u001b[0m\u001b[94melse\u001b[0m: \u001b[31m│\u001b[0m\n",
|
||||
"\u001b[31m│\u001b[0m \u001b[31m❱ \u001b[0m44 \u001b[2m│ │ │ │ │ \u001b[0m\u001b[94mraise\u001b[0m \u001b[96mRuntimeError\u001b[0m(\u001b[33mf\u001b[0m\u001b[33m\"\u001b[0m\u001b[33mError loading \u001b[0m\u001b[33m{\u001b[0m\u001b[96mself\u001b[0m.file_path\u001b[33m}\u001b[0m\u001b[33m\"\u001b[0m) \u001b[94mfrom\u001b[0m \u001b[4;96me\u001b[0m \u001b[31m│\u001b[0m\n",
|
||||
"\u001b[31m│\u001b[0m \u001b[2m45 \u001b[0m\u001b[2m│ │ │ \u001b[0m\u001b[94mexcept\u001b[0m \u001b[96mException\u001b[0m \u001b[94mas\u001b[0m e: \u001b[31m│\u001b[0m\n",
|
||||
"\u001b[31m│\u001b[0m \u001b[2m46 \u001b[0m\u001b[2m│ │ │ │ \u001b[0m\u001b[94mraise\u001b[0m \u001b[96mRuntimeError\u001b[0m(\u001b[33mf\u001b[0m\u001b[33m\"\u001b[0m\u001b[33mError loading \u001b[0m\u001b[33m{\u001b[0m\u001b[96mself\u001b[0m.file_path\u001b[33m}\u001b[0m\u001b[33m\"\u001b[0m) \u001b[94mfrom\u001b[0m \u001b[4;96me\u001b[0m \u001b[31m│\u001b[0m\n",
|
||||
"\u001b[31m│\u001b[0m \u001b[2m47 \u001b[0m \u001b[31m│\u001b[0m\n",
|
||||
"\u001b[31m╰──────────────────────────────────────────────────────────────────────────────────────────────────╯\u001b[0m\n",
|
||||
"\u001b[1;91mRuntimeError: \u001b[0mError loading ..\u001b[35m/../../../../tests/integration_tests/examples/\u001b[0m\u001b[95mexample-non-utf8.txt\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"loader.load()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "da554f9a",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"The file `example-non-utf8.txt` uses a different encoding the `load()` function fails with a helpful message indicating which file failed decoding. \n",
|
||||
"\n",
|
||||
"With the default behavior of `TextLoader` any failure to load any of the documents will fail the whole loading process and no documents are loaded. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "eb844b7e",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### B. Silent fail\n",
|
||||
"\n",
|
||||
"We can pass the parameter `silent_errors` to the `DirectoryLoader` to skip the files which could not be loaded and continue the load process."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 30,
|
||||
"id": "5314ec39",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Error loading ../../../../../tests/integration_tests/examples/example-non-utf8.txt\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"loader = DirectoryLoader(path, glob=\"**/*.txt\", loader_cls=TextLoader, silent_errors=True)\n",
|
||||
"docs = loader.load()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 35,
|
||||
"id": "216337e5",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"['../../../../../tests/integration_tests/examples/whatsapp_chat.txt',\n",
|
||||
" '../../../../../tests/integration_tests/examples/example-utf8.txt']"
|
||||
]
|
||||
},
|
||||
"execution_count": 35,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"doc_sources = [doc.metadata['source'] for doc in docs]\n",
|
||||
"doc_sources"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "4cba0e53",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### C. Auto detect encodings\n",
|
||||
"\n",
|
||||
"We can also ask `TextLoader` to auto detect the file encoding before failing, by passing the `autodetect_encoding` to the loader class."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 37,
|
||||
"id": "96d527d2",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"text_loader_kwargs={'autodetect_encoding': True}\n",
|
||||
"loader = DirectoryLoader(path, glob=\"**/*.txt\", loader_cls=TextLoader, loader_kwargs=text_loader_kwargs)\n",
|
||||
"docs = loader.load()\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 38,
|
||||
"id": "f1a136a5",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"['../../../../../tests/integration_tests/examples/example-non-utf8.txt',\n",
|
||||
" '../../../../../tests/integration_tests/examples/whatsapp_chat.txt',\n",
|
||||
" '../../../../../tests/integration_tests/examples/example-utf8.txt']"
|
||||
]
|
||||
},
|
||||
"execution_count": 38,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"doc_sources = [doc.metadata['source'] for doc in docs]\n",
|
||||
"doc_sources"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
@@ -284,7 +563,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.6"
|
||||
"version": "3.9.11"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "b0ed136e-6983-4893-ae1b-b75753af05f8",
|
||||
"metadata": {},
|
||||
@@ -75,6 +76,29 @@
|
||||
"source": [
|
||||
"docs = loader.load()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "2721ba8a",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"When you pass a `folder_id` by default all files of type document, sheet and pdf are loaded. You can modify this behaviour by passing a `file_types` argument "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "2ff83b4c",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"loader = GoogleDriveLoader(\n",
|
||||
" folder_id=\"1yucgL9WGgWZdM1TOuKkeghlPizuzMYb5\",\n",
|
||||
" file_types=[\"document\", \"sheet\"]\n",
|
||||
" recursive=False\n",
|
||||
")"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user