mirror of
https://github.com/hwchase17/langchain.git
synced 2026-02-07 01:30:24 +00:00
Compare commits
155 Commits
dev2049/pg
...
v0.0.198
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
289e9aeb9d | ||
|
|
d1561b74eb | ||
|
|
bb7ac9edb5 | ||
|
|
010d0bfeea | ||
|
|
e05997c25e | ||
|
|
c5bce4a465 | ||
|
|
2c9619bc1d | ||
|
|
18f5c985d9 | ||
|
|
a197acfcd3 | ||
|
|
18af149e91 | ||
|
|
614cff89bc | ||
|
|
a7227ee01b | ||
|
|
232faba796 | ||
|
|
d7d629911b | ||
|
|
6e90406e0f | ||
|
|
c868a3eef3 | ||
|
|
20e9ce8a62 | ||
|
|
704d56e241 | ||
|
|
b934677a81 | ||
|
|
2d038b57b2 | ||
|
|
0b740c9baa | ||
|
|
ac3e6e3944 | ||
|
|
d2270a2261 | ||
|
|
1250cd4630 | ||
|
|
f8cf09a230 | ||
|
|
e4224a396b | ||
|
|
21bd16bb59 | ||
|
|
9218684759 | ||
|
|
d5819a7ca7 | ||
|
|
0ca37e613c | ||
|
|
ca1afa7213 | ||
|
|
5f356b9993 | ||
|
|
d6f5d0c6b1 | ||
|
|
62ec10a7f5 | ||
|
|
736a1819aa | ||
|
|
f3e7ac0a2c | ||
|
|
3678cba0be | ||
|
|
7af186fddf | ||
|
|
7cc200766e | ||
|
|
db7ef635c0 | ||
|
|
0eb1bc1a02 | ||
|
|
63fcf41bea | ||
|
|
2791a753bf | ||
|
|
a09a0e3511 | ||
|
|
0ce8745928 | ||
|
|
d8ae925425 | ||
|
|
fe8bbc2da7 | ||
|
|
77c286cf02 | ||
|
|
3ec6400d70 | ||
|
|
a6ebffb695 | ||
|
|
767fa91eae | ||
|
|
5f74db4500 | ||
|
|
511c12dd39 | ||
|
|
893d20f735 | ||
|
|
35cfd25db3 | ||
|
|
658f8bdee7 | ||
|
|
5518f24ec3 | ||
|
|
b93638ef1e | ||
|
|
a1549901ce | ||
|
|
78aa59c68b | ||
|
|
ec0dd6e34a | ||
|
|
3294774148 | ||
|
|
cef79ca579 | ||
|
|
106364a45c | ||
|
|
9355e3f5f5 | ||
|
|
f15763518a | ||
|
|
137da7e4b6 | ||
|
|
9f4b720a63 | ||
|
|
76fcd96dae | ||
|
|
11fec7d4d1 | ||
|
|
0b4a51930c | ||
|
|
c66755b661 | ||
|
|
4d8cda1c3b | ||
|
|
3af36943e8 | ||
|
|
8ef7274ee6 | ||
|
|
d9fcc45d05 | ||
|
|
ce7c11625f | ||
|
|
5a207cce8f | ||
|
|
b3ae6bcd3f | ||
|
|
5468528748 | ||
|
|
69f4ffb851 | ||
|
|
2be4fbb835 | ||
|
|
062c3c00a2 | ||
|
|
92b87c2fec | ||
|
|
3954bcf396 | ||
|
|
b7999a9bc1 | ||
|
|
a0d847f636 | ||
|
|
217b5cc72d | ||
|
|
4092fd21dc | ||
|
|
2a4b32dee2 | ||
|
|
daf3e99b96 | ||
|
|
b177a29d3f | ||
|
|
65111eb2b3 | ||
|
|
0cfaa76e45 | ||
|
|
2ae2d6cd1d | ||
|
|
204a73c1d9 | ||
|
|
08e2352f7b | ||
|
|
f907b62526 | ||
|
|
233b52735e | ||
|
|
38dabdbb3a | ||
|
|
84a46753ab | ||
|
|
d5b1608216 | ||
|
|
25487fa5ee | ||
|
|
2dcda8a8ac | ||
|
|
98dd6d068a | ||
|
|
5124c1e0d9 | ||
|
|
a47c8618ec | ||
|
|
87ad4fc4b2 | ||
|
|
92a5f00ffb | ||
|
|
aea090045b | ||
|
|
a4c9053d40 | ||
|
|
2f2d27fd82 | ||
|
|
019eb13681 | ||
|
|
450eb91fe2 | ||
|
|
0551bc90a5 | ||
|
|
a0ea6f6b6b | ||
|
|
1a9ac3b1f9 | ||
|
|
625717daa8 | ||
|
|
74f8e603d9 | ||
|
|
d0d89d39ef | ||
|
|
b64c39dfe7 | ||
|
|
3fb0e4872a | ||
|
|
8d9e9e013c | ||
|
|
26ec845921 | ||
|
|
c112d7334d | ||
|
|
6c11f94013 | ||
|
|
6e25e65085 | ||
|
|
8f98592ac9 | ||
|
|
b9040669a0 | ||
|
|
647210a4b9 | ||
|
|
8fea0529c1 | ||
|
|
6a3ceaa377 | ||
|
|
3e45b83065 | ||
|
|
33ea606f45 | ||
|
|
5af2c51e78 | ||
|
|
d3bdb8ea6d | ||
|
|
1c51d3db0f | ||
|
|
475007d63a | ||
|
|
92f218207b | ||
|
|
ad09367a92 | ||
|
|
9921f8cc3a | ||
|
|
4e71a1702b | ||
|
|
b201cfaa0f | ||
|
|
ae3611730a | ||
|
|
934319fc28 | ||
|
|
44ad9628c9 | ||
|
|
1f4abb265a | ||
|
|
ae2cf1f598 | ||
|
|
b81f98b8a6 | ||
|
|
b72401b47b | ||
|
|
ce6dbe41a9 | ||
|
|
95c6ed0568 | ||
|
|
bc875a9df1 | ||
|
|
a97e4252e3 | ||
|
|
9a7488a5ce |
2
.github/ISSUE_TEMPLATE/bug-report.yml
vendored
2
.github/ISSUE_TEMPLATE/bug-report.yml
vendored
@@ -46,7 +46,7 @@ body:
|
||||
- @agola11
|
||||
|
||||
Tools / Toolkits
|
||||
- @vowelparrot
|
||||
- ...
|
||||
|
||||
placeholder: "@Username ..."
|
||||
|
||||
|
||||
2
.github/PULL_REQUEST_TEMPLATE.md
vendored
2
.github/PULL_REQUEST_TEMPLATE.md
vendored
@@ -48,7 +48,7 @@ Tag maintainers/contributors who might be interested:
|
||||
- @agola11
|
||||
|
||||
Agents / Tools / Toolkits
|
||||
- @vowelparrot
|
||||
- @hwchase17
|
||||
|
||||
VectorStores / Retrievers / Memory
|
||||
- @dev2049
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
|
||||
⚡ Building applications with LLMs through composability ⚡
|
||||
|
||||
[](https://github.com/hwchase17/langchain/releases)
|
||||
[](https://github.com/hwchase17/langchain/actions/workflows/lint.yml)
|
||||
[](https://github.com/hwchase17/langchain/actions/workflows/test.yml)
|
||||
[](https://github.com/hwchase17/langchain/actions/workflows/linkcheck.yml)
|
||||
@@ -12,6 +13,8 @@
|
||||
[](https://vscode.dev/redirect?url=vscode://ms-vscode-remote.remote-containers/cloneInVolume?url=https://github.com/hwchase17/langchain)
|
||||
[](https://codespaces.new/hwchase17/langchain)
|
||||
[](https://star-history.com/#hwchase17/langchain)
|
||||
[](https://libraries.io/github/hwchase17/langchain)
|
||||
[](https://github.com/hwchase17/langchain/issues)
|
||||
|
||||
|
||||
Looking for the JS/TS version? Check out [LangChain.js](https://github.com/hwchase17/langchainjs).
|
||||
|
||||
1
docs/_static/js/mendablesearch.js
vendored
1
docs/_static/js/mendablesearch.js
vendored
@@ -37,6 +37,7 @@ document.addEventListener('DOMContentLoaded', () => {
|
||||
style: { darkMode: false, accentColor: '#010810' },
|
||||
floatingButtonStyle: { color: '#ffffff', backgroundColor: '#010810' },
|
||||
anon_key: '82842b36-3ea6-49b2-9fb8-52cfc4bde6bf', // Mendable Search Public ANON key, ok to be public
|
||||
cmdShortcutKey:'j',
|
||||
messageSettings: {
|
||||
openSourcesInNewTab: false,
|
||||
prettySources: true // Prettify the sources displayed now
|
||||
|
||||
137
docs/additional_resources/deploy_llms.rst
Normal file
137
docs/additional_resources/deploy_llms.rst
Normal file
@@ -0,0 +1,137 @@
|
||||
|
||||
===========================
|
||||
Deploying LLMs in Production
|
||||
===========================
|
||||
|
||||
In today's fast-paced technological landscape, the use of Large Language Models (LLMs) is rapidly expanding. As a result, it's crucial for developers to understand how to effectively deploy these models in production environments. LLM interfaces typically fall into two categories:
|
||||
|
||||
- **Case 1: Utilizing External LLM Providers (OpenAI, Anthropic, etc.)**
|
||||
In this scenario, most of the computational burden is handled by the LLM providers, while LangChain simplifies the implementation of business logic around these services. This approach includes features such as prompt templating, chat message generation, caching, vector embedding database creation, preprocessing, etc.
|
||||
|
||||
- **Case 2: Self-hosted Open-Source Models**
|
||||
Alternatively, developers can opt to use smaller, yet comparably capable, self-hosted open-source LLM models. This approach can significantly decrease costs, latency, and privacy concerns associated with transferring data to external LLM providers.
|
||||
|
||||
Regardless of the framework that forms the backbone of your product, deploying LLM applications comes with its own set of challenges. It's vital to understand the trade-offs and key considerations when evaluating serving frameworks.
|
||||
|
||||
Outline
|
||||
=======
|
||||
|
||||
This guide aims to provide a comprehensive overview of the requirements for deploying LLMs in a production setting, focusing on:
|
||||
|
||||
- `Designing a Robust LLM Application Service <#robust>`_
|
||||
- `Maintaining Cost-Efficiency <#cost>`_
|
||||
- `Ensuring Rapid Iteration <#iteration>`_
|
||||
|
||||
Understanding these components is crucial when assessing serving systems. LangChain integrates with several open-source projects designed to tackle these issues, providing a robust framework for productionizing your LLM applications. Some notable frameworks include:
|
||||
|
||||
- `Ray Serve <../integrations/ray_serve.html>`_
|
||||
- `BentoML <https://github.com/ssheng/BentoChain>`_
|
||||
- `Modal <../integrations/modal.html>`_
|
||||
|
||||
These links will provide further information on each ecosystem, assisting you in finding the best fit for your LLM deployment needs.
|
||||
|
||||
Designing a Robust LLM Application Service
|
||||
===========================================
|
||||
.. _robust:
|
||||
|
||||
When deploying an LLM service in production, it's imperative to provide a seamless user experience free from outages. Achieving 24/7 service availability involves creating and maintaining several sub-systems surrounding your application.
|
||||
|
||||
Monitoring
|
||||
----------
|
||||
|
||||
Monitoring forms an integral part of any system running in a production environment. In the context of LLMs, it is essential to monitor both performance and quality metrics.
|
||||
|
||||
**Performance Metrics:** These metrics provide insights into the efficiency and capacity of your model. Here are some key examples:
|
||||
|
||||
- Query per second (QPS): This measures the number of queries your model processes in a second, offering insights into its utilization.
|
||||
- Latency: This metric quantifies the delay from when your client sends a request to when they receive a response.
|
||||
- Tokens Per Second (TPS): This represents the number of tokens your model can generate in a second.
|
||||
|
||||
**Quality Metrics:** These metrics are typically customized according to the business use-case. For instance, how does the output of your system compare to a baseline, such as a previous version? Although these metrics can be calculated offline, you need to log the necessary data to use them later.
|
||||
|
||||
Fault tolerance
|
||||
---------------
|
||||
|
||||
Your application may encounter errors such as exceptions in your model inference or business logic code, causing failures and disrupting traffic. Other potential issues could arise from the machine running your application, such as unexpected hardware breakdowns or loss of spot-instances during high-demand periods. One way to mitigate these risks is by increasing redundancy through replica scaling and implementing recovery mechanisms for failed replicas. However, model replicas aren't the only potential points of failure. It's essential to build resilience against various failures that could occur at any point in your stack.
|
||||
|
||||
|
||||
Zero down time upgrade
|
||||
----------------------
|
||||
|
||||
System upgrades are often necessary but can result in service disruptions if not handled correctly. One way to prevent downtime during upgrades is by implementing a smooth transition process from the old version to the new one. Ideally, the new version of your LLM service is deployed, and traffic gradually shifts from the old to the new version, maintaining a constant QPS throughout the process.
|
||||
|
||||
|
||||
Load balancing
|
||||
--------------
|
||||
|
||||
Load balancing, in simple terms, is a technique to distribute work evenly across multiple computers, servers, or other resources to optimize the utilization of the system, maximize throughput, minimize response time, and avoid overload of any single resource. Think of it as a traffic officer directing cars (requests) to different roads (servers) so that no single road becomes too congested.
|
||||
|
||||
There are several strategies for load balancing. For example, one common method is the *Round Robin* strategy, where each request is sent to the next server in line, cycling back to the first when all servers have received a request. This works well when all servers are equally capable. However, if some servers are more powerful than others, you might use a *Weighted Round Robin* or *Least Connections* strategy, where more requests are sent to the more powerful servers, or to those currently handling the fewest active requests. Let's imagine you're running a LLM chain. If your application becomes popular, you could have hundreds or even thousands of users asking questions at the same time. If one server gets too busy (high load), the load balancer would direct new requests to another server that is less busy. This way, all your users get a timely response and the system remains stable.
|
||||
|
||||
|
||||
|
||||
Maintaining Cost-Efficiency and Scalability
|
||||
============================================
|
||||
.. _cost:
|
||||
|
||||
Deploying LLM services can be costly, especially when you're handling a large volume of user interactions. Charges by LLM providers are usually based on tokens used, making a chat system inference on these models potentially expensive. However, several strategies can help manage these costs without compromising the quality of the service.
|
||||
|
||||
|
||||
Self-hosting models
|
||||
-------------------
|
||||
|
||||
Several smaller and open-source LLMs are emerging to tackle the issue of reliance on LLM providers. Self-hosting allows you to maintain similar quality to LLM provider models while managing costs. The challenge lies in building a reliable, high-performing LLM serving system on your own machines.
|
||||
|
||||
Resource Management and Auto-Scaling
|
||||
------------------------------------
|
||||
|
||||
Computational logic within your application requires precise resource allocation. For instance, if part of your traffic is served by an OpenAI endpoint and another part by a self-hosted model, it's crucial to allocate suitable resources for each. Auto-scaling—adjusting resource allocation based on traffic—can significantly impact the cost of running your application. This strategy requires a balance between cost and responsiveness, ensuring neither resource over-provisioning nor compromised application responsiveness.
|
||||
|
||||
Utilizing Spot Instances
|
||||
------------------------
|
||||
|
||||
On platforms like AWS, spot instances offer substantial cost savings, typically priced at about a third of on-demand instances. The trade-off is a higher crash rate, necessitating a robust fault-tolerance mechanism for effective use.
|
||||
|
||||
Independent Scaling
|
||||
-------------------
|
||||
|
||||
When self-hosting your models, you should consider independent scaling. For example, if you have two translation models, one fine-tuned for French and another for Spanish, incoming requests might necessitate different scaling requirements for each.
|
||||
|
||||
Batching requests
|
||||
-----------------
|
||||
|
||||
In the context of Large Language Models, batching requests can enhance efficiency by better utilizing your GPU resources. GPUs are inherently parallel processors, designed to handle multiple tasks simultaneously. If you send individual requests to the model, the GPU might not be fully utilized as it's only working on a single task at a time. On the other hand, by batching requests together, you're allowing the GPU to work on multiple tasks at once, maximizing its utilization and improving inference speed. This not only leads to cost savings but can also improve the overall latency of your LLM service.
|
||||
|
||||
|
||||
In summary, managing costs while scaling your LLM services requires a strategic approach. Utilizing self-hosting models, managing resources effectively, employing auto-scaling, using spot instances, independently scaling models, and batching requests are key strategies to consider. Open-source libraries such as Ray Serve and BentoML are designed to deal with these complexities.
|
||||
|
||||
|
||||
|
||||
Ensuring Rapid Iteration
|
||||
========================
|
||||
|
||||
.. _iteration:
|
||||
|
||||
The LLM landscape is evolving at an unprecedented pace, with new libraries and model architectures being introduced constantly. Consequently, it's crucial to avoid tying yourself to a solution specific to one particular framework. This is especially relevant in serving, where changes to your infrastructure can be time-consuming, expensive, and risky. Strive for infrastructure that is not locked into any specific machine learning library or framework, but instead offers a general-purpose, scalable serving layer. Here are some aspects where flexibility plays a key role:
|
||||
|
||||
Model composition
|
||||
-----------------
|
||||
|
||||
Deploying systems like LangChain demands the ability to piece together different models and connect them via logic. Take the example of building a natural language input SQL query engine. Querying an LLM and obtaining the SQL command is only part of the system. You need to extract metadata from the connected database, construct a prompt for the LLM, run the SQL query on an engine, collect and feed back the response to the LLM as the query runs, and present the results to the user. This demonstrates the need to seamlessly integrate various complex components built in Python into a dynamic chain of logical blocks that can be served together.
|
||||
|
||||
Cloud providers
|
||||
---------------
|
||||
|
||||
Many hosted solutions are restricted to a single cloud provider, which can limit your options in today's multi-cloud world. Depending on where your other infrastructure components are built, you might prefer to stick with your chosen cloud provider.
|
||||
|
||||
|
||||
Infrastructure as Code (IaC)
|
||||
---------------------------
|
||||
|
||||
Rapid iteration also involves the ability to recreate your infrastructure quickly and reliably. This is where Infrastructure as Code (IaC) tools like Terraform, CloudFormation, or Kubernetes YAML files come into play. They allow you to define your infrastructure in code files, which can be version controlled and quickly deployed, enabling faster and more reliable iterations.
|
||||
|
||||
|
||||
CI/CD
|
||||
-----
|
||||
|
||||
In a fast-paced environment, implementing CI/CD pipelines can significantly speed up the iteration process. They help automate the testing and deployment of your LLM applications, reducing the risk of errors and enabling faster feedback and iteration.
|
||||
@@ -2,191 +2,230 @@
|
||||
|
||||
Dependents stats for `hwchase17/langchain`
|
||||
|
||||
[](https://github.com/hwchase17/langchain/network/dependents)
|
||||
[&message=172&color=informational&logo=slickpic)](https://github.com/hwchase17/langchain/network/dependents)
|
||||
[&message=4980&color=informational&logo=slickpic)](https://github.com/hwchase17/langchain/network/dependents)
|
||||
[&message=17239&color=informational&logo=slickpic)](https://github.com/hwchase17/langchain/network/dependents)
|
||||
[](https://github.com/hwchase17/langchain/network/dependents)
|
||||
[&message=212&color=informational&logo=slickpic)](https://github.com/hwchase17/langchain/network/dependents)
|
||||
[&message=7272&color=informational&logo=slickpic)](https://github.com/hwchase17/langchain/network/dependents)
|
||||
[&message=19095&color=informational&logo=slickpic)](https://github.com/hwchase17/langchain/network/dependents)
|
||||
|
||||
[update: 2023-05-17; only dependent repositories with Stars > 100]
|
||||
[update: 2023-06-05; only dependent repositories with Stars > 100]
|
||||
|
||||
|
||||
| Repository | Stars |
|
||||
| :-------- | -----: |
|
||||
|[openai/openai-cookbook](https://github.com/openai/openai-cookbook) | 35401 |
|
||||
|[LAION-AI/Open-Assistant](https://github.com/LAION-AI/Open-Assistant) | 32861 |
|
||||
|[microsoft/TaskMatrix](https://github.com/microsoft/TaskMatrix) | 32766 |
|
||||
|[hpcaitech/ColossalAI](https://github.com/hpcaitech/ColossalAI) | 29560 |
|
||||
|[reworkd/AgentGPT](https://github.com/reworkd/AgentGPT) | 22315 |
|
||||
|[imartinez/privateGPT](https://github.com/imartinez/privateGPT) | 17474 |
|
||||
|[openai/chatgpt-retrieval-plugin](https://github.com/openai/chatgpt-retrieval-plugin) | 16923 |
|
||||
|[mindsdb/mindsdb](https://github.com/mindsdb/mindsdb) | 16112 |
|
||||
|[jerryjliu/llama_index](https://github.com/jerryjliu/llama_index) | 15407 |
|
||||
|[mlflow/mlflow](https://github.com/mlflow/mlflow) | 14345 |
|
||||
|[GaiZhenbiao/ChuanhuChatGPT](https://github.com/GaiZhenbiao/ChuanhuChatGPT) | 10372 |
|
||||
|[databrickslabs/dolly](https://github.com/databrickslabs/dolly) | 9919 |
|
||||
|[AIGC-Audio/AudioGPT](https://github.com/AIGC-Audio/AudioGPT) | 8177 |
|
||||
|[logspace-ai/langflow](https://github.com/logspace-ai/langflow) | 6807 |
|
||||
|[imClumsyPanda/langchain-ChatGLM](https://github.com/imClumsyPanda/langchain-ChatGLM) | 6087 |
|
||||
|[arc53/DocsGPT](https://github.com/arc53/DocsGPT) | 5292 |
|
||||
|[e2b-dev/e2b](https://github.com/e2b-dev/e2b) | 4622 |
|
||||
|[nsarrazin/serge](https://github.com/nsarrazin/serge) | 4076 |
|
||||
|[madawei2699/myGPTReader](https://github.com/madawei2699/myGPTReader) | 3952 |
|
||||
|[zauberzeug/nicegui](https://github.com/zauberzeug/nicegui) | 3952 |
|
||||
|[go-skynet/LocalAI](https://github.com/go-skynet/LocalAI) | 3762 |
|
||||
|[GreyDGL/PentestGPT](https://github.com/GreyDGL/PentestGPT) | 3388 |
|
||||
|[mmabrouk/chatgpt-wrapper](https://github.com/mmabrouk/chatgpt-wrapper) | 3243 |
|
||||
|[zilliztech/GPTCache](https://github.com/zilliztech/GPTCache) | 3189 |
|
||||
|[wenda-LLM/wenda](https://github.com/wenda-LLM/wenda) | 3050 |
|
||||
|[marqo-ai/marqo](https://github.com/marqo-ai/marqo) | 2930 |
|
||||
|[gkamradt/langchain-tutorials](https://github.com/gkamradt/langchain-tutorials) | 2710 |
|
||||
|[PrefectHQ/marvin](https://github.com/PrefectHQ/marvin) | 2545 |
|
||||
|[project-baize/baize-chatbot](https://github.com/project-baize/baize-chatbot) | 2479 |
|
||||
|[whitead/paper-qa](https://github.com/whitead/paper-qa) | 2399 |
|
||||
|[langgenius/dify](https://github.com/langgenius/dify) | 2344 |
|
||||
|[GerevAI/gerev](https://github.com/GerevAI/gerev) | 2283 |
|
||||
|[hwchase17/chat-langchain](https://github.com/hwchase17/chat-langchain) | 2266 |
|
||||
|[guangzhengli/ChatFiles](https://github.com/guangzhengli/ChatFiles) | 1903 |
|
||||
|[Azure-Samples/azure-search-openai-demo](https://github.com/Azure-Samples/azure-search-openai-demo) | 1884 |
|
||||
|[OpenBMB/BMTools](https://github.com/OpenBMB/BMTools) | 1860 |
|
||||
|[Farama-Foundation/PettingZoo](https://github.com/Farama-Foundation/PettingZoo) | 1813 |
|
||||
|[OpenGVLab/Ask-Anything](https://github.com/OpenGVLab/Ask-Anything) | 1571 |
|
||||
|[IntelligenzaArtificiale/Free-Auto-GPT](https://github.com/IntelligenzaArtificiale/Free-Auto-GPT) | 1480 |
|
||||
|[hwchase17/notion-qa](https://github.com/hwchase17/notion-qa) | 1464 |
|
||||
|[NVIDIA/NeMo-Guardrails](https://github.com/NVIDIA/NeMo-Guardrails) | 1419 |
|
||||
|[Unstructured-IO/unstructured](https://github.com/Unstructured-IO/unstructured) | 1410 |
|
||||
|[Kav-K/GPTDiscord](https://github.com/Kav-K/GPTDiscord) | 1363 |
|
||||
|[paulpierre/RasaGPT](https://github.com/paulpierre/RasaGPT) | 1344 |
|
||||
|[StanGirard/quivr](https://github.com/StanGirard/quivr) | 1330 |
|
||||
|[lunasec-io/lunasec](https://github.com/lunasec-io/lunasec) | 1318 |
|
||||
|[vocodedev/vocode-python](https://github.com/vocodedev/vocode-python) | 1286 |
|
||||
|[agiresearch/OpenAGI](https://github.com/agiresearch/OpenAGI) | 1156 |
|
||||
|[h2oai/h2ogpt](https://github.com/h2oai/h2ogpt) | 1141 |
|
||||
|[jina-ai/thinkgpt](https://github.com/jina-ai/thinkgpt) | 1106 |
|
||||
|[yanqiangmiffy/Chinese-LangChain](https://github.com/yanqiangmiffy/Chinese-LangChain) | 1072 |
|
||||
|[ttengwang/Caption-Anything](https://github.com/ttengwang/Caption-Anything) | 1064 |
|
||||
|[jina-ai/dev-gpt](https://github.com/jina-ai/dev-gpt) | 1057 |
|
||||
|[juncongmoo/chatllama](https://github.com/juncongmoo/chatllama) | 1003 |
|
||||
|[greshake/llm-security](https://github.com/greshake/llm-security) | 1002 |
|
||||
|[visual-openllm/visual-openllm](https://github.com/visual-openllm/visual-openllm) | 957 |
|
||||
|[richardyc/Chrome-GPT](https://github.com/richardyc/Chrome-GPT) | 918 |
|
||||
|[irgolic/AutoPR](https://github.com/irgolic/AutoPR) | 886 |
|
||||
|[mmz-001/knowledge_gpt](https://github.com/mmz-001/knowledge_gpt) | 867 |
|
||||
|[thomas-yanxin/LangChain-ChatGLM-Webui](https://github.com/thomas-yanxin/LangChain-ChatGLM-Webui) | 850 |
|
||||
|[microsoft/X-Decoder](https://github.com/microsoft/X-Decoder) | 837 |
|
||||
|[peterw/Chat-with-Github-Repo](https://github.com/peterw/Chat-with-Github-Repo) | 826 |
|
||||
|[cirediatpl/FigmaChain](https://github.com/cirediatpl/FigmaChain) | 782 |
|
||||
|[hashintel/hash](https://github.com/hashintel/hash) | 778 |
|
||||
|[seanpixel/Teenage-AGI](https://github.com/seanpixel/Teenage-AGI) | 773 |
|
||||
|[jina-ai/langchain-serve](https://github.com/jina-ai/langchain-serve) | 738 |
|
||||
|[corca-ai/EVAL](https://github.com/corca-ai/EVAL) | 737 |
|
||||
|[ai-sidekick/sidekick](https://github.com/ai-sidekick/sidekick) | 717 |
|
||||
|[rlancemartin/auto-evaluator](https://github.com/rlancemartin/auto-evaluator) | 703 |
|
||||
|[poe-platform/api-bot-tutorial](https://github.com/poe-platform/api-bot-tutorial) | 689 |
|
||||
|[SamurAIGPT/Camel-AutoGPT](https://github.com/SamurAIGPT/Camel-AutoGPT) | 666 |
|
||||
|[eyurtsev/kor](https://github.com/eyurtsev/kor) | 608 |
|
||||
|[run-llama/llama-lab](https://github.com/run-llama/llama-lab) | 559 |
|
||||
|[namuan/dr-doc-search](https://github.com/namuan/dr-doc-search) | 544 |
|
||||
|[pieroit/cheshire-cat](https://github.com/pieroit/cheshire-cat) | 520 |
|
||||
|[griptape-ai/griptape](https://github.com/griptape-ai/griptape) | 514 |
|
||||
|[getmetal/motorhead](https://github.com/getmetal/motorhead) | 481 |
|
||||
|[hwchase17/chat-your-data](https://github.com/hwchase17/chat-your-data) | 462 |
|
||||
|[langchain-ai/langchain-aiplugin](https://github.com/langchain-ai/langchain-aiplugin) | 452 |
|
||||
|[jina-ai/agentchain](https://github.com/jina-ai/agentchain) | 439 |
|
||||
|[SamurAIGPT/ChatGPT-Developer-Plugins](https://github.com/SamurAIGPT/ChatGPT-Developer-Plugins) | 437 |
|
||||
|[alexanderatallah/window.ai](https://github.com/alexanderatallah/window.ai) | 433 |
|
||||
|[michaelthwan/searchGPT](https://github.com/michaelthwan/searchGPT) | 427 |
|
||||
|[mpaepper/content-chatbot](https://github.com/mpaepper/content-chatbot) | 425 |
|
||||
|[mckaywrigley/repo-chat](https://github.com/mckaywrigley/repo-chat) | 422 |
|
||||
|[whyiyhw/chatgpt-wechat](https://github.com/whyiyhw/chatgpt-wechat) | 421 |
|
||||
|[freddyaboulton/gradio-tools](https://github.com/freddyaboulton/gradio-tools) | 407 |
|
||||
|[jonra1993/fastapi-alembic-sqlmodel-async](https://github.com/jonra1993/fastapi-alembic-sqlmodel-async) | 395 |
|
||||
|[yeagerai/yeagerai-agent](https://github.com/yeagerai/yeagerai-agent) | 383 |
|
||||
|[akshata29/chatpdf](https://github.com/akshata29/chatpdf) | 374 |
|
||||
|[OpenGVLab/InternGPT](https://github.com/OpenGVLab/InternGPT) | 368 |
|
||||
|[ruoccofabrizio/azure-open-ai-embeddings-qna](https://github.com/ruoccofabrizio/azure-open-ai-embeddings-qna) | 358 |
|
||||
|[101dotxyz/GPTeam](https://github.com/101dotxyz/GPTeam) | 357 |
|
||||
|[mtenenholtz/chat-twitter](https://github.com/mtenenholtz/chat-twitter) | 354 |
|
||||
|[amosjyng/langchain-visualizer](https://github.com/amosjyng/langchain-visualizer) | 343 |
|
||||
|[msoedov/langcorn](https://github.com/msoedov/langcorn) | 334 |
|
||||
|[showlab/VLog](https://github.com/showlab/VLog) | 330 |
|
||||
|[continuum-llms/chatgpt-memory](https://github.com/continuum-llms/chatgpt-memory) | 324 |
|
||||
|[steamship-core/steamship-langchain](https://github.com/steamship-core/steamship-langchain) | 323 |
|
||||
|[daodao97/chatdoc](https://github.com/daodao97/chatdoc) | 320 |
|
||||
|[xuwenhao/geektime-ai-course](https://github.com/xuwenhao/geektime-ai-course) | 308 |
|
||||
|[StevenGrove/GPT4Tools](https://github.com/StevenGrove/GPT4Tools) | 301 |
|
||||
|[logan-markewich/llama_index_starter_pack](https://github.com/logan-markewich/llama_index_starter_pack) | 300 |
|
||||
|[andylokandy/gpt-4-search](https://github.com/andylokandy/gpt-4-search) | 299 |
|
||||
|[Anil-matcha/ChatPDF](https://github.com/Anil-matcha/ChatPDF) | 287 |
|
||||
|[itamargol/openai](https://github.com/itamargol/openai) | 273 |
|
||||
|[BlackHC/llm-strategy](https://github.com/BlackHC/llm-strategy) | 267 |
|
||||
|[momegas/megabots](https://github.com/momegas/megabots) | 259 |
|
||||
|[bborn/howdoi.ai](https://github.com/bborn/howdoi.ai) | 238 |
|
||||
|[Cheems-Seminar/grounded-segment-any-parts](https://github.com/Cheems-Seminar/grounded-segment-any-parts) | 232 |
|
||||
|[ur-whitelab/exmol](https://github.com/ur-whitelab/exmol) | 227 |
|
||||
|[sullivan-sean/chat-langchainjs](https://github.com/sullivan-sean/chat-langchainjs) | 227 |
|
||||
|[explosion/spacy-llm](https://github.com/explosion/spacy-llm) | 226 |
|
||||
|[recalign/RecAlign](https://github.com/recalign/RecAlign) | 218 |
|
||||
|[jupyterlab/jupyter-ai](https://github.com/jupyterlab/jupyter-ai) | 218 |
|
||||
|[alvarosevilla95/autolang](https://github.com/alvarosevilla95/autolang) | 215 |
|
||||
|[conceptofmind/toolformer](https://github.com/conceptofmind/toolformer) | 213 |
|
||||
|[MagnivOrg/prompt-layer-library](https://github.com/MagnivOrg/prompt-layer-library) | 209 |
|
||||
|[JohnSnowLabs/nlptest](https://github.com/JohnSnowLabs/nlptest) | 208 |
|
||||
|[airobotlab/KoChatGPT](https://github.com/airobotlab/KoChatGPT) | 197 |
|
||||
|[langchain-ai/auto-evaluator](https://github.com/langchain-ai/auto-evaluator) | 195 |
|
||||
|[yvann-hub/Robby-chatbot](https://github.com/yvann-hub/Robby-chatbot) | 195 |
|
||||
|[alejandro-ao/langchain-ask-pdf](https://github.com/alejandro-ao/langchain-ask-pdf) | 192 |
|
||||
|[daveebbelaar/langchain-experiments](https://github.com/daveebbelaar/langchain-experiments) | 189 |
|
||||
|[NimbleBoxAI/ChainFury](https://github.com/NimbleBoxAI/ChainFury) | 187 |
|
||||
|[kaleido-lab/dolphin](https://github.com/kaleido-lab/dolphin) | 184 |
|
||||
|[Anil-matcha/Website-to-Chatbot](https://github.com/Anil-matcha/Website-to-Chatbot) | 183 |
|
||||
|[plchld/InsightFlow](https://github.com/plchld/InsightFlow) | 180 |
|
||||
|[OpenBMB/AgentVerse](https://github.com/OpenBMB/AgentVerse) | 166 |
|
||||
|[benthecoder/ClassGPT](https://github.com/benthecoder/ClassGPT) | 166 |
|
||||
|[jbrukh/gpt-jargon](https://github.com/jbrukh/gpt-jargon) | 161 |
|
||||
|[hardbyte/qabot](https://github.com/hardbyte/qabot) | 160 |
|
||||
|[shaman-ai/agent-actors](https://github.com/shaman-ai/agent-actors) | 153 |
|
||||
|[radi-cho/datasetGPT](https://github.com/radi-cho/datasetGPT) | 153 |
|
||||
|[poe-platform/poe-protocol](https://github.com/poe-platform/poe-protocol) | 152 |
|
||||
|[paolorechia/learn-langchain](https://github.com/paolorechia/learn-langchain) | 149 |
|
||||
|[ajndkr/lanarky](https://github.com/ajndkr/lanarky) | 149 |
|
||||
|[fengyuli-dev/multimedia-gpt](https://github.com/fengyuli-dev/multimedia-gpt) | 147 |
|
||||
|[yasyf/compress-gpt](https://github.com/yasyf/compress-gpt) | 144 |
|
||||
|[homanp/superagent](https://github.com/homanp/superagent) | 143 |
|
||||
|[realminchoi/babyagi-ui](https://github.com/realminchoi/babyagi-ui) | 141 |
|
||||
|[ethanyanjiali/minChatGPT](https://github.com/ethanyanjiali/minChatGPT) | 141 |
|
||||
|[ccurme/yolopandas](https://github.com/ccurme/yolopandas) | 139 |
|
||||
|[hwchase17/langchain-streamlit-template](https://github.com/hwchase17/langchain-streamlit-template) | 138 |
|
||||
|[Jaseci-Labs/jaseci](https://github.com/Jaseci-Labs/jaseci) | 136 |
|
||||
|[openai/openai-cookbook](https://github.com/openai/openai-cookbook) | 38024 |
|
||||
|[LAION-AI/Open-Assistant](https://github.com/LAION-AI/Open-Assistant) | 33609 |
|
||||
|[microsoft/TaskMatrix](https://github.com/microsoft/TaskMatrix) | 33136 |
|
||||
|[hpcaitech/ColossalAI](https://github.com/hpcaitech/ColossalAI) | 30032 |
|
||||
|[imartinez/privateGPT](https://github.com/imartinez/privateGPT) | 28094 |
|
||||
|[reworkd/AgentGPT](https://github.com/reworkd/AgentGPT) | 23430 |
|
||||
|[openai/chatgpt-retrieval-plugin](https://github.com/openai/chatgpt-retrieval-plugin) | 17942 |
|
||||
|[jerryjliu/llama_index](https://github.com/jerryjliu/llama_index) | 16697 |
|
||||
|[mindsdb/mindsdb](https://github.com/mindsdb/mindsdb) | 16410 |
|
||||
|[mlflow/mlflow](https://github.com/mlflow/mlflow) | 14517 |
|
||||
|[GaiZhenbiao/ChuanhuChatGPT](https://github.com/GaiZhenbiao/ChuanhuChatGPT) | 10793 |
|
||||
|[databrickslabs/dolly](https://github.com/databrickslabs/dolly) | 10155 |
|
||||
|[openai/evals](https://github.com/openai/evals) | 10076 |
|
||||
|[AIGC-Audio/AudioGPT](https://github.com/AIGC-Audio/AudioGPT) | 8619 |
|
||||
|[logspace-ai/langflow](https://github.com/logspace-ai/langflow) | 8211 |
|
||||
|[imClumsyPanda/langchain-ChatGLM](https://github.com/imClumsyPanda/langchain-ChatGLM) | 8154 |
|
||||
|[PromtEngineer/localGPT](https://github.com/PromtEngineer/localGPT) | 6853 |
|
||||
|[StanGirard/quivr](https://github.com/StanGirard/quivr) | 6830 |
|
||||
|[PipedreamHQ/pipedream](https://github.com/PipedreamHQ/pipedream) | 6520 |
|
||||
|[go-skynet/LocalAI](https://github.com/go-skynet/LocalAI) | 6018 |
|
||||
|[arc53/DocsGPT](https://github.com/arc53/DocsGPT) | 5643 |
|
||||
|[e2b-dev/e2b](https://github.com/e2b-dev/e2b) | 5075 |
|
||||
|[langgenius/dify](https://github.com/langgenius/dify) | 4281 |
|
||||
|[nsarrazin/serge](https://github.com/nsarrazin/serge) | 4228 |
|
||||
|[zauberzeug/nicegui](https://github.com/zauberzeug/nicegui) | 4084 |
|
||||
|[madawei2699/myGPTReader](https://github.com/madawei2699/myGPTReader) | 4039 |
|
||||
|[wenda-LLM/wenda](https://github.com/wenda-LLM/wenda) | 3871 |
|
||||
|[GreyDGL/PentestGPT](https://github.com/GreyDGL/PentestGPT) | 3837 |
|
||||
|[zilliztech/GPTCache](https://github.com/zilliztech/GPTCache) | 3625 |
|
||||
|[csunny/DB-GPT](https://github.com/csunny/DB-GPT) | 3545 |
|
||||
|[gkamradt/langchain-tutorials](https://github.com/gkamradt/langchain-tutorials) | 3404 |
|
||||
|[mmabrouk/chatgpt-wrapper](https://github.com/mmabrouk/chatgpt-wrapper) | 3303 |
|
||||
|[postgresml/postgresml](https://github.com/postgresml/postgresml) | 3052 |
|
||||
|[marqo-ai/marqo](https://github.com/marqo-ai/marqo) | 3014 |
|
||||
|[MineDojo/Voyager](https://github.com/MineDojo/Voyager) | 2945 |
|
||||
|[PrefectHQ/marvin](https://github.com/PrefectHQ/marvin) | 2761 |
|
||||
|[project-baize/baize-chatbot](https://github.com/project-baize/baize-chatbot) | 2673 |
|
||||
|[hwchase17/chat-langchain](https://github.com/hwchase17/chat-langchain) | 2589 |
|
||||
|[whitead/paper-qa](https://github.com/whitead/paper-qa) | 2572 |
|
||||
|[Azure-Samples/azure-search-openai-demo](https://github.com/Azure-Samples/azure-search-openai-demo) | 2366 |
|
||||
|[GerevAI/gerev](https://github.com/GerevAI/gerev) | 2330 |
|
||||
|[OpenGVLab/InternGPT](https://github.com/OpenGVLab/InternGPT) | 2289 |
|
||||
|[ParisNeo/gpt4all-ui](https://github.com/ParisNeo/gpt4all-ui) | 2159 |
|
||||
|[OpenBMB/BMTools](https://github.com/OpenBMB/BMTools) | 2158 |
|
||||
|[guangzhengli/ChatFiles](https://github.com/guangzhengli/ChatFiles) | 2005 |
|
||||
|[h2oai/h2ogpt](https://github.com/h2oai/h2ogpt) | 1939 |
|
||||
|[Farama-Foundation/PettingZoo](https://github.com/Farama-Foundation/PettingZoo) | 1845 |
|
||||
|[OpenGVLab/Ask-Anything](https://github.com/OpenGVLab/Ask-Anything) | 1749 |
|
||||
|[IntelligenzaArtificiale/Free-Auto-GPT](https://github.com/IntelligenzaArtificiale/Free-Auto-GPT) | 1740 |
|
||||
|[Unstructured-IO/unstructured](https://github.com/Unstructured-IO/unstructured) | 1628 |
|
||||
|[hwchase17/notion-qa](https://github.com/hwchase17/notion-qa) | 1607 |
|
||||
|[NVIDIA/NeMo-Guardrails](https://github.com/NVIDIA/NeMo-Guardrails) | 1544 |
|
||||
|[SamurAIGPT/privateGPT](https://github.com/SamurAIGPT/privateGPT) | 1543 |
|
||||
|[paulpierre/RasaGPT](https://github.com/paulpierre/RasaGPT) | 1526 |
|
||||
|[yanqiangmiffy/Chinese-LangChain](https://github.com/yanqiangmiffy/Chinese-LangChain) | 1485 |
|
||||
|[Kav-K/GPTDiscord](https://github.com/Kav-K/GPTDiscord) | 1402 |
|
||||
|[vocodedev/vocode-python](https://github.com/vocodedev/vocode-python) | 1387 |
|
||||
|[Chainlit/chainlit](https://github.com/Chainlit/chainlit) | 1336 |
|
||||
|[lunasec-io/lunasec](https://github.com/lunasec-io/lunasec) | 1323 |
|
||||
|[psychic-api/psychic](https://github.com/psychic-api/psychic) | 1248 |
|
||||
|[agiresearch/OpenAGI](https://github.com/agiresearch/OpenAGI) | 1208 |
|
||||
|[jina-ai/thinkgpt](https://github.com/jina-ai/thinkgpt) | 1193 |
|
||||
|[thomas-yanxin/LangChain-ChatGLM-Webui](https://github.com/thomas-yanxin/LangChain-ChatGLM-Webui) | 1182 |
|
||||
|[ttengwang/Caption-Anything](https://github.com/ttengwang/Caption-Anything) | 1137 |
|
||||
|[jina-ai/dev-gpt](https://github.com/jina-ai/dev-gpt) | 1135 |
|
||||
|[greshake/llm-security](https://github.com/greshake/llm-security) | 1086 |
|
||||
|[keephq/keep](https://github.com/keephq/keep) | 1063 |
|
||||
|[juncongmoo/chatllama](https://github.com/juncongmoo/chatllama) | 1037 |
|
||||
|[richardyc/Chrome-GPT](https://github.com/richardyc/Chrome-GPT) | 1035 |
|
||||
|[visual-openllm/visual-openllm](https://github.com/visual-openllm/visual-openllm) | 997 |
|
||||
|[mmz-001/knowledge_gpt](https://github.com/mmz-001/knowledge_gpt) | 995 |
|
||||
|[jina-ai/langchain-serve](https://github.com/jina-ai/langchain-serve) | 949 |
|
||||
|[irgolic/AutoPR](https://github.com/irgolic/AutoPR) | 936 |
|
||||
|[microsoft/X-Decoder](https://github.com/microsoft/X-Decoder) | 908 |
|
||||
|[poe-platform/api-bot-tutorial](https://github.com/poe-platform/api-bot-tutorial) | 902 |
|
||||
|[peterw/Chat-with-Github-Repo](https://github.com/peterw/Chat-with-Github-Repo) | 875 |
|
||||
|[cirediatpl/FigmaChain](https://github.com/cirediatpl/FigmaChain) | 822 |
|
||||
|[homanp/superagent](https://github.com/homanp/superagent) | 806 |
|
||||
|[seanpixel/Teenage-AGI](https://github.com/seanpixel/Teenage-AGI) | 800 |
|
||||
|[chatarena/chatarena](https://github.com/chatarena/chatarena) | 796 |
|
||||
|[hashintel/hash](https://github.com/hashintel/hash) | 795 |
|
||||
|[SamurAIGPT/Camel-AutoGPT](https://github.com/SamurAIGPT/Camel-AutoGPT) | 786 |
|
||||
|[rlancemartin/auto-evaluator](https://github.com/rlancemartin/auto-evaluator) | 770 |
|
||||
|[corca-ai/EVAL](https://github.com/corca-ai/EVAL) | 769 |
|
||||
|[101dotxyz/GPTeam](https://github.com/101dotxyz/GPTeam) | 755 |
|
||||
|[noahshinn024/reflexion](https://github.com/noahshinn024/reflexion) | 706 |
|
||||
|[eyurtsev/kor](https://github.com/eyurtsev/kor) | 695 |
|
||||
|[cheshire-cat-ai/core](https://github.com/cheshire-cat-ai/core) | 681 |
|
||||
|[e-johnstonn/BriefGPT](https://github.com/e-johnstonn/BriefGPT) | 656 |
|
||||
|[run-llama/llama-lab](https://github.com/run-llama/llama-lab) | 635 |
|
||||
|[griptape-ai/griptape](https://github.com/griptape-ai/griptape) | 583 |
|
||||
|[namuan/dr-doc-search](https://github.com/namuan/dr-doc-search) | 555 |
|
||||
|[getmetal/motorhead](https://github.com/getmetal/motorhead) | 550 |
|
||||
|[kreneskyp/ix](https://github.com/kreneskyp/ix) | 543 |
|
||||
|[hwchase17/chat-your-data](https://github.com/hwchase17/chat-your-data) | 510 |
|
||||
|[Anil-matcha/ChatPDF](https://github.com/Anil-matcha/ChatPDF) | 501 |
|
||||
|[whyiyhw/chatgpt-wechat](https://github.com/whyiyhw/chatgpt-wechat) | 497 |
|
||||
|[SamurAIGPT/ChatGPT-Developer-Plugins](https://github.com/SamurAIGPT/ChatGPT-Developer-Plugins) | 496 |
|
||||
|[microsoft/PodcastCopilot](https://github.com/microsoft/PodcastCopilot) | 492 |
|
||||
|[debanjum/khoj](https://github.com/debanjum/khoj) | 485 |
|
||||
|[akshata29/chatpdf](https://github.com/akshata29/chatpdf) | 485 |
|
||||
|[langchain-ai/langchain-aiplugin](https://github.com/langchain-ai/langchain-aiplugin) | 462 |
|
||||
|[jina-ai/agentchain](https://github.com/jina-ai/agentchain) | 460 |
|
||||
|[alexanderatallah/window.ai](https://github.com/alexanderatallah/window.ai) | 457 |
|
||||
|[yeagerai/yeagerai-agent](https://github.com/yeagerai/yeagerai-agent) | 451 |
|
||||
|[mckaywrigley/repo-chat](https://github.com/mckaywrigley/repo-chat) | 446 |
|
||||
|[michaelthwan/searchGPT](https://github.com/michaelthwan/searchGPT) | 446 |
|
||||
|[mpaepper/content-chatbot](https://github.com/mpaepper/content-chatbot) | 441 |
|
||||
|[freddyaboulton/gradio-tools](https://github.com/freddyaboulton/gradio-tools) | 439 |
|
||||
|[ruoccofabrizio/azure-open-ai-embeddings-qna](https://github.com/ruoccofabrizio/azure-open-ai-embeddings-qna) | 429 |
|
||||
|[StevenGrove/GPT4Tools](https://github.com/StevenGrove/GPT4Tools) | 422 |
|
||||
|[jonra1993/fastapi-alembic-sqlmodel-async](https://github.com/jonra1993/fastapi-alembic-sqlmodel-async) | 407 |
|
||||
|[msoedov/langcorn](https://github.com/msoedov/langcorn) | 405 |
|
||||
|[amosjyng/langchain-visualizer](https://github.com/amosjyng/langchain-visualizer) | 395 |
|
||||
|[ajndkr/lanarky](https://github.com/ajndkr/lanarky) | 384 |
|
||||
|[mtenenholtz/chat-twitter](https://github.com/mtenenholtz/chat-twitter) | 376 |
|
||||
|[steamship-core/steamship-langchain](https://github.com/steamship-core/steamship-langchain) | 371 |
|
||||
|[langchain-ai/auto-evaluator](https://github.com/langchain-ai/auto-evaluator) | 365 |
|
||||
|[xuwenhao/geektime-ai-course](https://github.com/xuwenhao/geektime-ai-course) | 358 |
|
||||
|[continuum-llms/chatgpt-memory](https://github.com/continuum-llms/chatgpt-memory) | 357 |
|
||||
|[opentensor/bittensor](https://github.com/opentensor/bittensor) | 347 |
|
||||
|[showlab/VLog](https://github.com/showlab/VLog) | 345 |
|
||||
|[daodao97/chatdoc](https://github.com/daodao97/chatdoc) | 345 |
|
||||
|[logan-markewich/llama_index_starter_pack](https://github.com/logan-markewich/llama_index_starter_pack) | 332 |
|
||||
|[poe-platform/poe-protocol](https://github.com/poe-platform/poe-protocol) | 320 |
|
||||
|[explosion/spacy-llm](https://github.com/explosion/spacy-llm) | 312 |
|
||||
|[andylokandy/gpt-4-search](https://github.com/andylokandy/gpt-4-search) | 311 |
|
||||
|[alejandro-ao/langchain-ask-pdf](https://github.com/alejandro-ao/langchain-ask-pdf) | 310 |
|
||||
|[jupyterlab/jupyter-ai](https://github.com/jupyterlab/jupyter-ai) | 294 |
|
||||
|[BlackHC/llm-strategy](https://github.com/BlackHC/llm-strategy) | 283 |
|
||||
|[itamargol/openai](https://github.com/itamargol/openai) | 281 |
|
||||
|[momegas/megabots](https://github.com/momegas/megabots) | 279 |
|
||||
|[personoids/personoids-lite](https://github.com/personoids/personoids-lite) | 277 |
|
||||
|[yvann-hub/Robby-chatbot](https://github.com/yvann-hub/Robby-chatbot) | 267 |
|
||||
|[Anil-matcha/Website-to-Chatbot](https://github.com/Anil-matcha/Website-to-Chatbot) | 266 |
|
||||
|[Cheems-Seminar/grounded-segment-any-parts](https://github.com/Cheems-Seminar/grounded-segment-any-parts) | 260 |
|
||||
|[sullivan-sean/chat-langchainjs](https://github.com/sullivan-sean/chat-langchainjs) | 248 |
|
||||
|[bborn/howdoi.ai](https://github.com/bborn/howdoi.ai) | 245 |
|
||||
|[daveebbelaar/langchain-experiments](https://github.com/daveebbelaar/langchain-experiments) | 240 |
|
||||
|[MagnivOrg/prompt-layer-library](https://github.com/MagnivOrg/prompt-layer-library) | 237 |
|
||||
|[ur-whitelab/exmol](https://github.com/ur-whitelab/exmol) | 234 |
|
||||
|[conceptofmind/toolformer](https://github.com/conceptofmind/toolformer) | 234 |
|
||||
|[recalign/RecAlign](https://github.com/recalign/RecAlign) | 226 |
|
||||
|[OpenBMB/AgentVerse](https://github.com/OpenBMB/AgentVerse) | 220 |
|
||||
|[alvarosevilla95/autolang](https://github.com/alvarosevilla95/autolang) | 219 |
|
||||
|[JohnSnowLabs/nlptest](https://github.com/JohnSnowLabs/nlptest) | 216 |
|
||||
|[kaleido-lab/dolphin](https://github.com/kaleido-lab/dolphin) | 215 |
|
||||
|[truera/trulens](https://github.com/truera/trulens) | 208 |
|
||||
|[NimbleBoxAI/ChainFury](https://github.com/NimbleBoxAI/ChainFury) | 208 |
|
||||
|[airobotlab/KoChatGPT](https://github.com/airobotlab/KoChatGPT) | 207 |
|
||||
|[monarch-initiative/ontogpt](https://github.com/monarch-initiative/ontogpt) | 200 |
|
||||
|[paolorechia/learn-langchain](https://github.com/paolorechia/learn-langchain) | 195 |
|
||||
|[shaman-ai/agent-actors](https://github.com/shaman-ai/agent-actors) | 185 |
|
||||
|[Haste171/langchain-chatbot](https://github.com/Haste171/langchain-chatbot) | 184 |
|
||||
|[plchld/InsightFlow](https://github.com/plchld/InsightFlow) | 182 |
|
||||
|[su77ungr/CASALIOY](https://github.com/su77ungr/CASALIOY) | 180 |
|
||||
|[jbrukh/gpt-jargon](https://github.com/jbrukh/gpt-jargon) | 177 |
|
||||
|[benthecoder/ClassGPT](https://github.com/benthecoder/ClassGPT) | 174 |
|
||||
|[billxbf/ReWOO](https://github.com/billxbf/ReWOO) | 170 |
|
||||
|[filip-michalsky/SalesGPT](https://github.com/filip-michalsky/SalesGPT) | 168 |
|
||||
|[hwchase17/langchain-streamlit-template](https://github.com/hwchase17/langchain-streamlit-template) | 168 |
|
||||
|[radi-cho/datasetGPT](https://github.com/radi-cho/datasetGPT) | 164 |
|
||||
|[hardbyte/qabot](https://github.com/hardbyte/qabot) | 164 |
|
||||
|[gia-guar/JARVIS-ChatGPT](https://github.com/gia-guar/JARVIS-ChatGPT) | 158 |
|
||||
|[plastic-labs/tutor-gpt](https://github.com/plastic-labs/tutor-gpt) | 154 |
|
||||
|[yasyf/compress-gpt](https://github.com/yasyf/compress-gpt) | 154 |
|
||||
|[fengyuli-dev/multimedia-gpt](https://github.com/fengyuli-dev/multimedia-gpt) | 154 |
|
||||
|[ethanyanjiali/minChatGPT](https://github.com/ethanyanjiali/minChatGPT) | 153 |
|
||||
|[hwchase17/chroma-langchain](https://github.com/hwchase17/chroma-langchain) | 153 |
|
||||
|[edreisMD/plugnplai](https://github.com/edreisMD/plugnplai) | 148 |
|
||||
|[chakkaradeep/pyCodeAGI](https://github.com/chakkaradeep/pyCodeAGI) | 145 |
|
||||
|[ccurme/yolopandas](https://github.com/ccurme/yolopandas) | 145 |
|
||||
|[shamspias/customizable-gpt-chatbot](https://github.com/shamspias/customizable-gpt-chatbot) | 144 |
|
||||
|[realminchoi/babyagi-ui](https://github.com/realminchoi/babyagi-ui) | 143 |
|
||||
|[PradipNichite/Youtube-Tutorials](https://github.com/PradipNichite/Youtube-Tutorials) | 140 |
|
||||
|[gustavz/DataChad](https://github.com/gustavz/DataChad) | 140 |
|
||||
|[Klingefjord/chatgpt-telegram](https://github.com/Klingefjord/chatgpt-telegram) | 140 |
|
||||
|[Jaseci-Labs/jaseci](https://github.com/Jaseci-Labs/jaseci) | 139 |
|
||||
|[handrew/browserpilot](https://github.com/handrew/browserpilot) | 137 |
|
||||
|[jmpaz/promptlib](https://github.com/jmpaz/promptlib) | 137 |
|
||||
|[SamPink/dev-gpt](https://github.com/SamPink/dev-gpt) | 135 |
|
||||
|[menloparklab/langchain-cohere-qdrant-doc-retrieval](https://github.com/menloparklab/langchain-cohere-qdrant-doc-retrieval) | 135 |
|
||||
|[hirokidaichi/wanna](https://github.com/hirokidaichi/wanna) | 135 |
|
||||
|[Haste171/langchain-chatbot](https://github.com/Haste171/langchain-chatbot) | 134 |
|
||||
|[jmpaz/promptlib](https://github.com/jmpaz/promptlib) | 130 |
|
||||
|[Klingefjord/chatgpt-telegram](https://github.com/Klingefjord/chatgpt-telegram) | 130 |
|
||||
|[filip-michalsky/SalesGPT](https://github.com/filip-michalsky/SalesGPT) | 128 |
|
||||
|[handrew/browserpilot](https://github.com/handrew/browserpilot) | 128 |
|
||||
|[shauryr/S2QA](https://github.com/shauryr/S2QA) | 127 |
|
||||
|[steamship-core/vercel-examples](https://github.com/steamship-core/vercel-examples) | 127 |
|
||||
|[yasyf/summ](https://github.com/yasyf/summ) | 127 |
|
||||
|[gia-guar/JARVIS-ChatGPT](https://github.com/gia-guar/JARVIS-ChatGPT) | 126 |
|
||||
|[jerlendds/osintbuddy](https://github.com/jerlendds/osintbuddy) | 125 |
|
||||
|[ibiscp/LLM-IMDB](https://github.com/ibiscp/LLM-IMDB) | 124 |
|
||||
|[Teahouse-Studios/akari-bot](https://github.com/Teahouse-Studios/akari-bot) | 124 |
|
||||
|[hwchase17/chroma-langchain](https://github.com/hwchase17/chroma-langchain) | 124 |
|
||||
|[menloparklab/langchain-cohere-qdrant-doc-retrieval](https://github.com/menloparklab/langchain-cohere-qdrant-doc-retrieval) | 123 |
|
||||
|[peterw/StoryStorm](https://github.com/peterw/StoryStorm) | 123 |
|
||||
|[chakkaradeep/pyCodeAGI](https://github.com/chakkaradeep/pyCodeAGI) | 123 |
|
||||
|[petehunt/langchain-github-bot](https://github.com/petehunt/langchain-github-bot) | 115 |
|
||||
|[su77ungr/CASALIOY](https://github.com/su77ungr/CASALIOY) | 113 |
|
||||
|[eunomia-bpf/GPTtrace](https://github.com/eunomia-bpf/GPTtrace) | 113 |
|
||||
|[steamship-core/vercel-examples](https://github.com/steamship-core/vercel-examples) | 134 |
|
||||
|[pablomarin/GPT-Azure-Search-Engine](https://github.com/pablomarin/GPT-Azure-Search-Engine) | 133 |
|
||||
|[ibiscp/LLM-IMDB](https://github.com/ibiscp/LLM-IMDB) | 133 |
|
||||
|[shauryr/S2QA](https://github.com/shauryr/S2QA) | 133 |
|
||||
|[jerlendds/osintbuddy](https://github.com/jerlendds/osintbuddy) | 132 |
|
||||
|[yuanjie-ai/ChatLLM](https://github.com/yuanjie-ai/ChatLLM) | 132 |
|
||||
|[yasyf/summ](https://github.com/yasyf/summ) | 132 |
|
||||
|[WongSaang/chatgpt-ui-server](https://github.com/WongSaang/chatgpt-ui-server) | 130 |
|
||||
|[peterw/StoryStorm](https://github.com/peterw/StoryStorm) | 127 |
|
||||
|[Teahouse-Studios/akari-bot](https://github.com/Teahouse-Studios/akari-bot) | 126 |
|
||||
|[vaibkumr/prompt-optimizer](https://github.com/vaibkumr/prompt-optimizer) | 125 |
|
||||
|[preset-io/promptimize](https://github.com/preset-io/promptimize) | 124 |
|
||||
|[homanp/vercel-langchain](https://github.com/homanp/vercel-langchain) | 124 |
|
||||
|[petehunt/langchain-github-bot](https://github.com/petehunt/langchain-github-bot) | 123 |
|
||||
|[eunomia-bpf/GPTtrace](https://github.com/eunomia-bpf/GPTtrace) | 118 |
|
||||
|[nicknochnack/LangchainDocuments](https://github.com/nicknochnack/LangchainDocuments) | 116 |
|
||||
|[jiran214/GPT-vup](https://github.com/jiran214/GPT-vup) | 112 |
|
||||
|[rsaryev/talk-codebase](https://github.com/rsaryev/talk-codebase) | 112 |
|
||||
|[zenml-io/zenml-projects](https://github.com/zenml-io/zenml-projects) | 112 |
|
||||
|[pablomarin/GPT-Azure-Search-Engine](https://github.com/pablomarin/GPT-Azure-Search-Engine) | 111 |
|
||||
|[shamspias/customizable-gpt-chatbot](https://github.com/shamspias/customizable-gpt-chatbot) | 109 |
|
||||
|[WongSaang/chatgpt-ui-server](https://github.com/WongSaang/chatgpt-ui-server) | 108 |
|
||||
|[davila7/file-gpt](https://github.com/davila7/file-gpt) | 104 |
|
||||
|[enhancedocs/enhancedocs](https://github.com/enhancedocs/enhancedocs) | 102 |
|
||||
|[aurelio-labs/arxiv-bot](https://github.com/aurelio-labs/arxiv-bot) | 101 |
|
||||
|
||||
|[microsoft/azure-openai-in-a-day-workshop](https://github.com/microsoft/azure-openai-in-a-day-workshop) | 112 |
|
||||
|[davila7/file-gpt](https://github.com/davila7/file-gpt) | 112 |
|
||||
|[prof-frink-lab/slangchain](https://github.com/prof-frink-lab/slangchain) | 111 |
|
||||
|[aurelio-labs/arxiv-bot](https://github.com/aurelio-labs/arxiv-bot) | 110 |
|
||||
|[fixie-ai/fixie-examples](https://github.com/fixie-ai/fixie-examples) | 108 |
|
||||
|[miaoshouai/miaoshouai-assistant](https://github.com/miaoshouai/miaoshouai-assistant) | 105 |
|
||||
|[flurb18/AgentOoba](https://github.com/flurb18/AgentOoba) | 103 |
|
||||
|[solana-labs/chatgpt-plugin](https://github.com/solana-labs/chatgpt-plugin) | 102 |
|
||||
|[Significant-Gravitas/Auto-GPT-Benchmarks](https://github.com/Significant-Gravitas/Auto-GPT-Benchmarks) | 102 |
|
||||
|[kaarthik108/snowChat](https://github.com/kaarthik108/snowChat) | 100 |
|
||||
|
||||
|
||||
_Generated by [github-dependents-info](https://github.com/nvuillam/github-dependents-info)_
|
||||
|
||||
[github-dependents-info --repo hwchase17/langchain --markdownfile dependents.md --minstars 100 --sort stars]
|
||||
`github-dependents-info --repo hwchase17/langchain --markdownfile dependents.md --minstars 100 --sort stars`
|
||||
|
||||
25
docs/ecosystem/baseten.md
Normal file
25
docs/ecosystem/baseten.md
Normal file
@@ -0,0 +1,25 @@
|
||||
# Baseten
|
||||
|
||||
Learn how to use LangChain with models deployed on Baseten.
|
||||
|
||||
## Installation and setup
|
||||
|
||||
- Create a [Baseten](https://baseten.co) account and [API key](https://docs.baseten.co/settings/api-keys).
|
||||
- Install the Baseten Python client with `pip install baseten`
|
||||
- Use your API key to authenticate with `baseten login`
|
||||
|
||||
## Invoking a model
|
||||
|
||||
Baseten integrates with LangChain through the LLM module, which provides a standardized and interoperable interface for models that are deployed on your Baseten workspace.
|
||||
|
||||
You can deploy foundation models like WizardLM and Alpaca with one click from the [Baseten model library](https://app.baseten.co/explore/) or if you have your own model, [deploy it with this tutorial](https://docs.baseten.co/deploying-models/deploy).
|
||||
|
||||
In this example, we'll work with WizardLM. [Deploy WizardLM here](https://app.baseten.co/explore/wizardlm) and follow along with the deployed [model's version ID](https://docs.baseten.co/managing-models/manage).
|
||||
|
||||
```python
|
||||
from langchain.llms import Baseten
|
||||
|
||||
wizardlm = Baseten(model="MODEL_VERSION_ID", verbose=True)
|
||||
|
||||
wizardlm("What is the difference between a Wizard and a Sorcerer?")
|
||||
```
|
||||
@@ -6,6 +6,11 @@ This section covers several options for that. Note that these options are meant
|
||||
|
||||
What follows is a list of template GitHub repositories designed to be easily forked and modified to use your chain. This list is far from exhaustive, and we are EXTREMELY open to contributions here.
|
||||
|
||||
## [Anyscale](https://www.anyscale.com/model-serving)
|
||||
|
||||
Anyscale is a unified compute platform that makes it easy to develop, deploy, and manage scalable LLM applications in production using Ray.
|
||||
With Anyscale you can scale the most challenging LLM-based workloads and both develop and deploy LLM-based apps on a single compute platform.
|
||||
|
||||
## [Streamlit](https://github.com/hwchase17/langchain-streamlit-template)
|
||||
|
||||
This repo serves as a template for how to deploy a LangChain with Streamlit.
|
||||
|
||||
@@ -10,7 +10,8 @@
|
||||
|
||||
### Tutorials
|
||||
[LangChain Tutorials](https://www.youtube.com/watch?v=FuqdVNB_8c0&list=PL9V0lbeJ69brU-ojMpU1Y7Ic58Tap0Cw6) by [Edrick](https://www.youtube.com/@edrickdch):
|
||||
- ⛓ [LangChain, Chroma DB, OpenAI Beginner Guide | ChatGPT with your PDF](https://youtu.be/FuqdVNB_8c0)
|
||||
- ⛓ [LangChain, Chroma DB, OpenAI Beginner Guide | ChatGPT with your PDF](https://youtu.be/FuqdVNB_8c0)
|
||||
- ⛓ [LangChain 101: The Complete Beginner's Guide](https://youtu.be/P3MAbZ2eMUI)
|
||||
|
||||
[LangChain Crash Course: Build an AutoGPT app in 25 minutes](https://youtu.be/MlK6SIjcjE8) by [Nicholas Renotte](https://www.youtube.com/@NicholasRenotte)
|
||||
|
||||
|
||||
@@ -176,6 +176,8 @@ Additional Resources
|
||||
|
||||
- `Gallery <https://github.com/kyrolabs/awesome-langchain>`_: A collection of great projects that use Langchain, compiled by the folks at `Kyrolabs <https://kyrolabs.com>`_. Useful for finding inspiration and example implementations.
|
||||
|
||||
- `Deploying LLMs in Production <./additional_resources/deploy_llms.html>`_: A collection of best practices and tutorials for deploying LLMs in production.
|
||||
|
||||
- `Tracing <./additional_resources/tracing.html>`_: A guide on using tracing in LangChain to visualize the execution of chains and agents.
|
||||
|
||||
- `Model Laboratory <./additional_resources/model_laboratory.html>`_: Experimenting with different prompts, models, and chains is a big part of developing the best possible application. The ModelLaboratory makes it easy to do so.
|
||||
@@ -194,6 +196,8 @@ Additional Resources
|
||||
:hidden:
|
||||
|
||||
LangChainHub <https://github.com/hwchase17/langchain-hub>
|
||||
./additional_resources/deployments.md
|
||||
./additional_resources/deploy_llms.rst
|
||||
Gallery <https://github.com/kyrolabs/awesome-langchain>
|
||||
./additional_resources/tracing.md
|
||||
./additional_resources/model_laboratory.ipynb
|
||||
|
||||
@@ -18,7 +18,7 @@ from langchain import Bedrock
|
||||
|
||||
## Text Embedding Models
|
||||
|
||||
See a [usage example](../modules/models/text_embedding/examples/bedrock.ipynb).
|
||||
See a [usage example](../modules/models/text_embedding/examples/amazon_bedrock.ipynb).
|
||||
```python
|
||||
from langchain.embeddings import BedrockEmbeddings
|
||||
```
|
||||
18
docs/integrations/annoy.md
Normal file
18
docs/integrations/annoy.md
Normal file
@@ -0,0 +1,18 @@
|
||||
# Annoy
|
||||
|
||||
> [Annoy](https://github.com/spotify/annoy) (`Approximate Nearest Neighbors Oh Yeah`) is a C++ library with Python bindings to search for points in space that are close to a given query point. It also creates large read-only file-based data structures that are mmapped into memory so that many processes may share the same data.
|
||||
## Installation and Setup
|
||||
|
||||
|
||||
```bash
|
||||
pip install annoy
|
||||
```
|
||||
|
||||
|
||||
## Vectorstore
|
||||
|
||||
See a [usage example](../modules/indexes/vectorstores/examples/annoy.ipynb).
|
||||
|
||||
```python
|
||||
from langchain.vectorstores import Annoy
|
||||
```
|
||||
26
docs/integrations/anthropic.md
Normal file
26
docs/integrations/anthropic.md
Normal file
@@ -0,0 +1,26 @@
|
||||
# Anthropic
|
||||
|
||||
>[Anthropic](https://en.wikipedia.org/wiki/Anthropic) is an American artificial intelligence (AI) startup and
|
||||
> public-benefit corporation, founded by former members of OpenAI. `Anthropic` specializes in developing general AI
|
||||
> systems and language models, with a company ethos of responsible AI usage.
|
||||
> `Anthropic` develops a chatbot, named `Claude`. Similar to `ChatGPT`, `Claude` uses a messaging
|
||||
> interface where users can submit questions or requests and receive highly detailed and relevant responses.
|
||||
|
||||
## Installation and Setup
|
||||
|
||||
|
||||
```bash
|
||||
pip install anthropic
|
||||
```
|
||||
|
||||
See the [setup documentation](https://console.anthropic.com/docs/access).
|
||||
|
||||
|
||||
|
||||
## Chat Models
|
||||
|
||||
See a [usage example](../modules/models/chat/integrations/anthropic.ipynb)
|
||||
|
||||
```python
|
||||
from langchain.chat_models import ChatAnthropic
|
||||
```
|
||||
@@ -26,3 +26,11 @@ See a [usage example](../modules/indexes/document_loaders/examples/arxiv.ipynb).
|
||||
```python
|
||||
from langchain.document_loaders import ArxivLoader
|
||||
```
|
||||
|
||||
## Retriever
|
||||
|
||||
See a [usage example](../modules/indexes/retrievers/examples/arxiv.ipynb).
|
||||
|
||||
```python
|
||||
from langchain.retrievers import ArxivRetriever
|
||||
```
|
||||
|
||||
21
docs/integrations/awadb.md
Normal file
21
docs/integrations/awadb.md
Normal file
@@ -0,0 +1,21 @@
|
||||
# AwaDB
|
||||
|
||||
>[AwaDB](https://github.com/awa-ai/awadb) is an AI Native database for the search and storage of embedding vectors used by LLM Applications.
|
||||
|
||||
## Installation and Setup
|
||||
|
||||
```bash
|
||||
pip install awadb
|
||||
```
|
||||
|
||||
|
||||
## VectorStore
|
||||
|
||||
There exists a wrapper around AwaDB vector databases, allowing you to use it as a vectorstore,
|
||||
whether for semantic search or example selection.
|
||||
|
||||
```python
|
||||
from langchain.vectorstores import AwaDB
|
||||
```
|
||||
|
||||
For a more detailed walkthrough of the AwaDB wrapper, see [this notebook](../modules/indexes/vectorstores/examples/awadb.ipynb)
|
||||
24
docs/integrations/azure_cognitive_search_.md
Normal file
24
docs/integrations/azure_cognitive_search_.md
Normal file
@@ -0,0 +1,24 @@
|
||||
# Azure Cognitive Search
|
||||
|
||||
>[Azure Cognitive Search](https://learn.microsoft.com/en-us/azure/search/search-what-is-azure-search) (formerly known as `Azure Search`) is a cloud search service that gives developers infrastructure, APIs, and tools for building a rich search experience over private, heterogeneous content in web, mobile, and enterprise applications.
|
||||
|
||||
>Search is foundational to any app that surfaces text to users, where common scenarios include catalog or document search, online retail apps, or data exploration over proprietary content. When you create a search service, you'll work with the following capabilities:
|
||||
>- A search engine for full text search over a search index containing user-owned content
|
||||
>- Rich indexing, with lexical analysis and optional AI enrichment for content extraction and transformation
|
||||
>- Rich query syntax for text search, fuzzy search, autocomplete, geo-search and more
|
||||
>- Programmability through REST APIs and client libraries in Azure SDKs
|
||||
>- Azure integration at the data layer, machine learning layer, and AI (Cognitive Services)
|
||||
|
||||
|
||||
## Installation and Setup
|
||||
|
||||
See [set up instructions](https://learn.microsoft.com/en-us/azure/search/search-create-service-portal).
|
||||
|
||||
|
||||
## Retriever
|
||||
|
||||
See a [usage example](../modules/indexes/retrievers/examples/azure_cognitive_search.ipynb).
|
||||
|
||||
```python
|
||||
from langchain.retrievers import AzureCognitiveSearchRetriever
|
||||
```
|
||||
@@ -1,7 +1,8 @@
|
||||
# Beam
|
||||
|
||||
This page covers how to use Beam within LangChain.
|
||||
It is broken into two parts: installation and setup, and then references to specific Beam wrappers.
|
||||
>[Beam](https://docs.beam.cloud/introduction) makes it easy to run code on GPUs, deploy scalable web APIs,
|
||||
> schedule cron jobs, and run massively parallel workloads — without managing any infrastructure.
|
||||
|
||||
|
||||
## Installation and Setup
|
||||
|
||||
@@ -9,19 +10,19 @@ It is broken into two parts: installation and setup, and then references to spec
|
||||
- Install the Beam CLI with `curl https://raw.githubusercontent.com/slai-labs/get-beam/main/get-beam.sh -sSfL | sh`
|
||||
- Register API keys with `beam configure`
|
||||
- Set environment variables (`BEAM_CLIENT_ID`) and (`BEAM_CLIENT_SECRET`)
|
||||
- Install the Beam SDK `pip install beam-sdk`
|
||||
- Install the Beam SDK:
|
||||
```bash
|
||||
pip install beam-sdk
|
||||
```
|
||||
|
||||
## Wrappers
|
||||
## LLM
|
||||
|
||||
### LLM
|
||||
|
||||
There exists a Beam LLM wrapper, which you can access with
|
||||
|
||||
```python
|
||||
from langchain.llms.beam import Beam
|
||||
```
|
||||
|
||||
## Define your Beam app.
|
||||
### Example of the Beam app
|
||||
|
||||
This is the environment you’ll be developing against once you start the app.
|
||||
It's also used to define the maximum response length from the model.
|
||||
@@ -44,7 +45,7 @@ llm = Beam(model_name="gpt2",
|
||||
verbose=False)
|
||||
```
|
||||
|
||||
## Deploy your Beam app
|
||||
### Deploy the Beam app
|
||||
|
||||
Once defined, you can deploy your Beam app by calling your model's `_deploy()` method.
|
||||
|
||||
@@ -52,9 +53,9 @@ Once defined, you can deploy your Beam app by calling your model's `_deploy()` m
|
||||
llm._deploy()
|
||||
```
|
||||
|
||||
## Call your Beam app
|
||||
### Call the Beam app
|
||||
|
||||
Once a beam model is deployed, it can be called by callying your model's `_call()` method.
|
||||
Once a beam model is deployed, it can be called by calling your model's `_call()` method.
|
||||
This returns the GPT2 text response to your prompt.
|
||||
|
||||
```python
|
||||
|
||||
23
docs/integrations/cassandra.md
Normal file
23
docs/integrations/cassandra.md
Normal file
@@ -0,0 +1,23 @@
|
||||
# Cassandra
|
||||
|
||||
>[Cassandra](https://en.wikipedia.org/wiki/Apache_Cassandra) is a free and open-source, distributed, wide-column
|
||||
> store, NoSQL database management system designed to handle large amounts of data across many commodity servers,
|
||||
> providing high availability with no single point of failure. `Cassandra` offers support for clusters spanning
|
||||
> multiple datacenters, with asynchronous masterless replication allowing low latency operations for all clients.
|
||||
> `Cassandra` was designed to implement a combination of `Amazon's Dynamo` distributed storage and replication
|
||||
> techniques combined with `Google's Bigtable` data and storage engine model.
|
||||
|
||||
## Installation and Setup
|
||||
|
||||
```bash
|
||||
pip install cassandra-drive
|
||||
```
|
||||
|
||||
|
||||
## Memory
|
||||
|
||||
See a [usage example](../modules/memory/examples/cassandra_chat_message_history.ipynb).
|
||||
|
||||
```python
|
||||
from langchain.memory import CassandraChatMessageHistory
|
||||
```
|
||||
@@ -1,20 +1,29 @@
|
||||
# Chroma
|
||||
|
||||
This page covers how to use the Chroma ecosystem within LangChain.
|
||||
It is broken into two parts: installation and setup, and then references to specific Chroma wrappers.
|
||||
>[Chroma](https://docs.trychroma.com/getting-started) is a database for building AI applications with embeddings.
|
||||
|
||||
## Installation and Setup
|
||||
- Install the Python package with `pip install chromadb`
|
||||
## Wrappers
|
||||
|
||||
### VectorStore
|
||||
```bash
|
||||
pip install chromadb
|
||||
```
|
||||
|
||||
|
||||
## VectorStore
|
||||
|
||||
There exists a wrapper around Chroma vector databases, allowing you to use it as a vectorstore,
|
||||
whether for semantic search or example selection.
|
||||
|
||||
To import this vectorstore:
|
||||
```python
|
||||
from langchain.vectorstores import Chroma
|
||||
```
|
||||
|
||||
For a more detailed walkthrough of the Chroma wrapper, see [this notebook](../modules/indexes/vectorstores/getting_started.ipynb)
|
||||
|
||||
## Retriever
|
||||
|
||||
See a [usage example](../modules/indexes/retrievers/examples/chroma_self_query.ipynb).
|
||||
|
||||
```python
|
||||
from langchain.retrievers import SelfQueryRetriever
|
||||
```
|
||||
|
||||
52
docs/integrations/clickhouse.md
Normal file
52
docs/integrations/clickhouse.md
Normal file
@@ -0,0 +1,52 @@
|
||||
# ClickHouse
|
||||
|
||||
This page covers how to use ClickHouse Vector Search within LangChain.
|
||||
|
||||
[ClickHouse](https://clickhouse.com) is a open source real-time OLAP database with full SQL support and a wide range of functions to assist users in writing analytical queries. Some of these functions and data structures perform distance operations between vectors, enabling ClickHouse to be used as a vector database.
|
||||
|
||||
Due to the fully parallelized query pipeline, ClickHouse can process vector search operations very quickly, especially when performing exact matching through a linear scan over all rows, delivering processing speed comparable to dedicated vector databases.
|
||||
|
||||
High compression levels, tunable through custom compression codecs, enable very large datasets to be stored and queried. ClickHouse is not memory-bound, allowing multi-TB datasets containing embeddings to be queried.
|
||||
|
||||
The capabilities for computing the distance between two vectors are just another SQL function and can be effectively combined with more traditional SQL filtering and aggregation capabilities. This allows vectors to be stored and queried alongside metadata, and even rich text, enabling a broad array of use cases and applications.
|
||||
|
||||
Finally, experimental ClickHouse capabilities like [Approximate Nearest Neighbour (ANN) indices](https://clickhouse.com/docs/en/engines/table-engines/mergetree-family/annindexes) support faster approximate matching of vectors and provide a promising development aimed to further enhance the vector matching capabilities of ClickHouse.
|
||||
|
||||
## Installation
|
||||
- Install clickhouse server by [binary](https://clickhouse.com/docs/en/install) or [docker image](https://hub.docker.com/r/clickhouse/clickhouse-server/)
|
||||
- Install the Python SDK with `pip install clickhouse-connect`
|
||||
|
||||
### Configure clickhouse vector index
|
||||
|
||||
Customize `ClickhouseSettings` object with parameters
|
||||
|
||||
```python
|
||||
from langchain.vectorstores import ClickHouse, ClickhouseSettings
|
||||
config = ClickhouseSettings(host="<clickhouse-server-host>", port=8123, ...)
|
||||
index = Clickhouse(embedding_function, config)
|
||||
index.add_documents(...)
|
||||
```
|
||||
|
||||
## Wrappers
|
||||
supported functions:
|
||||
- `add_texts`
|
||||
- `add_documents`
|
||||
- `from_texts`
|
||||
- `from_documents`
|
||||
- `similarity_search`
|
||||
- `asimilarity_search`
|
||||
- `similarity_search_by_vector`
|
||||
- `asimilarity_search_by_vector`
|
||||
- `similarity_search_with_relevance_scores`
|
||||
|
||||
### VectorStore
|
||||
|
||||
There exists a wrapper around open source Clickhouse database, allowing you to use it as a vectorstore,
|
||||
whether for semantic search or similar example retrieval.
|
||||
|
||||
To import this vectorstore:
|
||||
```python
|
||||
from langchain.vectorstores import Clickhouse
|
||||
```
|
||||
|
||||
For a more detailed walkthrough of the MyScale wrapper, see [this notebook](../modules/indexes/vectorstores/examples/clickhouse.ipynb)
|
||||
@@ -1,25 +1,38 @@
|
||||
# Cohere
|
||||
|
||||
This page covers how to use the Cohere ecosystem within LangChain.
|
||||
It is broken into two parts: installation and setup, and then references to specific Cohere wrappers.
|
||||
>[Cohere](https://cohere.ai/about) is a Canadian startup that provides natural language processing models
|
||||
> that help companies improve human-machine interactions.
|
||||
|
||||
## Installation and Setup
|
||||
- Install the Python SDK with `pip install cohere`
|
||||
- Get an Cohere api key and set it as an environment variable (`COHERE_API_KEY`)
|
||||
- Install the Python SDK :
|
||||
```bash
|
||||
pip install cohere
|
||||
```
|
||||
|
||||
## Wrappers
|
||||
Get a [Cohere api key](https://dashboard.cohere.ai/) and set it as an environment variable (`COHERE_API_KEY`)
|
||||
|
||||
### LLM
|
||||
|
||||
## LLM
|
||||
|
||||
There exists an Cohere LLM wrapper, which you can access with
|
||||
See a [usage example](../modules/models/llms/integrations/cohere.ipynb).
|
||||
|
||||
```python
|
||||
from langchain.llms import Cohere
|
||||
```
|
||||
|
||||
### Embeddings
|
||||
## Text Embedding Model
|
||||
|
||||
There exists an Cohere Embeddings wrapper, which you can access with
|
||||
There exists an Cohere Embedding model, which you can access with
|
||||
```python
|
||||
from langchain.embeddings import CohereEmbeddings
|
||||
```
|
||||
For a more detailed walkthrough of this, see [this notebook](../modules/models/text_embedding/examples/cohere.ipynb)
|
||||
|
||||
## Retriever
|
||||
|
||||
See a [usage example](../modules/indexes/retrievers/examples/cohere-reranker.ipynb).
|
||||
|
||||
```python
|
||||
from langchain.retrievers.document_compressors import CohereRerank
|
||||
```
|
||||
|
||||
@@ -1,25 +1,17 @@
|
||||
# Databerry
|
||||
|
||||
This page covers how to use the [Databerry](https://databerry.ai) within LangChain.
|
||||
>[Databerry](https://databerry.ai) is an [open source](https://github.com/gmpetrov/databerry) document retrieval platform that helps to connect your personal data with Large Language Models.
|
||||
|
||||
## What is Databerry?
|
||||
|
||||
Databerry is an [open source](https://github.com/gmpetrov/databerry) document retrievial platform that helps to connect your personal data with Large Language Models.
|
||||
## Installation and Setup
|
||||
|
||||

|
||||
We need to sign up for Databerry, create a datastore, add some data and get your datastore api endpoint url.
|
||||
We need the [API Key](https://docs.databerry.ai/api-reference/authentication).
|
||||
|
||||
## Quick start
|
||||
## Retriever
|
||||
|
||||
Retrieving documents stored in Databerry from LangChain is very easy!
|
||||
See a [usage example](../modules/indexes/retrievers/examples/databerry.ipynb).
|
||||
|
||||
```python
|
||||
from langchain.retrievers import DataberryRetriever
|
||||
|
||||
retriever = DataberryRetriever(
|
||||
datastore_url="https://api.databerry.ai/query/clg1xg2h80000l708dymr0fxc",
|
||||
# api_key="DATABERRY_API_KEY", # optional if datastore is public
|
||||
# top_k=10 # optional
|
||||
)
|
||||
|
||||
docs = retriever.get_relevant_documents("What's Databerry?")
|
||||
```
|
||||
|
||||
36
docs/integrations/databricks.md
Normal file
36
docs/integrations/databricks.md
Normal file
@@ -0,0 +1,36 @@
|
||||
Databricks
|
||||
==========
|
||||
|
||||
The [Databricks](https://www.databricks.com/) Lakehouse Platform unifies data, analytics, and AI on one platform.
|
||||
|
||||
Databricks embraces the LangChain ecosystem in various ways:
|
||||
|
||||
1. Databricks connector for the SQLDatabase Chain: SQLDatabase.from_databricks() provides an easy way to query your data on Databricks through LangChain
|
||||
2. Databricks-managed MLflow integrates with LangChain: Tracking and serving LangChain applications with fewer steps
|
||||
3. Databricks as an LLM provider: Deploy your fine-tuned LLMs on Databricks via serving endpoints or cluster driver proxy apps, and query it as langchain.llms.Databricks
|
||||
4. Databricks Dolly: Databricks open-sourced Dolly which allows for commercial use, and can be accessed through the Hugging Face Hub
|
||||
|
||||
Databricks connector for the SQLDatabase Chain
|
||||
----------------------------------------------
|
||||
You can connect to [Databricks runtimes](https://docs.databricks.com/runtime/index.html) and [Databricks SQL](https://www.databricks.com/product/databricks-sql) using the SQLDatabase wrapper of LangChain. See the notebook [Connect to Databricks](./databricks/databricks.html) for details.
|
||||
|
||||
Databricks-managed MLflow integrates with LangChain
|
||||
---------------------------------------------------
|
||||
|
||||
MLflow is an open source platform to manage the ML lifecycle, including experimentation, reproducibility, deployment, and a central model registry. See the notebook [MLflow Callback Handler](./mlflow_tracking.ipynb) for details about MLflow's integration with LangChain.
|
||||
|
||||
Databricks provides a fully managed and hosted version of MLflow integrated with enterprise security features, high availability, and other Databricks workspace features such as experiment and run management and notebook revision capture. MLflow on Databricks offers an integrated experience for tracking and securing machine learning model training runs and running machine learning projects. See [MLflow guide](https://docs.databricks.com/mlflow/index.html) for more details.
|
||||
|
||||
Databricks-managed MLflow makes it more convenient to develop LangChain applications on Databricks. For MLflow tracking, you don't need to set the tracking uri. For MLflow Model Serving, you can save LangChain Chains in the MLflow langchain flavor, and then register and serve the Chain with a few clicks on Databricks, with credentials securely managed by MLflow Model Serving.
|
||||
|
||||
Databricks as an LLM provider
|
||||
-----------------------------
|
||||
|
||||
The notebook [Wrap Databricks endpoints as LLMs](../modules/models/llms/integrations/databricks.html) illustrates the method to wrap Databricks endpoints as LLMs in LangChain. It supports two types of endpoints: the serving endpoint, which is recommended for both production and development, and the cluster driver proxy app, which is recommended for interactive development.
|
||||
|
||||
Databricks endpoints support Dolly, but are also great for hosting models like MPT-7B or any other models from the Hugging Face ecosystem. Databricks endpoints can also be used with proprietary models like OpenAI to provide a governance layer for enterprises.
|
||||
|
||||
Databricks Dolly
|
||||
----------------
|
||||
|
||||
Databricks’ Dolly is an instruction-following large language model trained on the Databricks machine learning platform that is licensed for commercial use. The model is available on Hugging Face Hub as databricks/dolly-v2-12b. See the notebook [Hugging Face Hub](../modules/models/llms/integrations/huggingface_hub.html) for instructions to access it through the Hugging Face Hub integration with LangChain.
|
||||
@@ -58,7 +58,7 @@
|
||||
"### Optional Parameters\n",
|
||||
"There following parameters are optional. When executing the method in a Databricks notebook, you don't need to provide them in most of the cases.\n",
|
||||
"* `host`: The Databricks workspace hostname, excluding 'https://' part. Defaults to 'DATABRICKS_HOST' environment variable or current workspace if in a Databricks notebook.\n",
|
||||
"* `api_token`: The Databricks personal access token for accessing the Databricks SQL warehouse or the cluster. Defaults to 'DATABRICKS_API_TOKEN' environment variable or a temporary one is generated if in a Databricks notebook.\n",
|
||||
"* `api_token`: The Databricks personal access token for accessing the Databricks SQL warehouse or the cluster. Defaults to 'DATABRICKS_TOKEN' environment variable or a temporary one is generated if in a Databricks notebook.\n",
|
||||
"* `warehouse_id`: The warehouse ID in the Databricks SQL.\n",
|
||||
"* `cluster_id`: The cluster ID in the Databricks Runtime. If running in a Databricks notebook and both 'warehouse_id' and 'cluster_id' are None, it uses the ID of the cluster the notebook is attached to.\n",
|
||||
"* `engine_args`: The arguments to be used when connecting Databricks.\n",
|
||||
24
docs/integrations/elasticsearch.md
Normal file
24
docs/integrations/elasticsearch.md
Normal file
@@ -0,0 +1,24 @@
|
||||
# Elasticsearch
|
||||
|
||||
>[Elasticsearch](https://www.elastic.co/elasticsearch/) is a distributed, RESTful search and analytics engine.
|
||||
> It provides a distributed, multi-tenant-capable full-text search engine with an HTTP web interface and schema-free
|
||||
> JSON documents.
|
||||
|
||||
|
||||
## Installation and Setup
|
||||
|
||||
```bash
|
||||
pip install elasticsearch
|
||||
```
|
||||
|
||||
## Retriever
|
||||
|
||||
>In information retrieval, [Okapi BM25](https://en.wikipedia.org/wiki/Okapi_BM25) (BM is an abbreviation of best matching) is a ranking function used by search engines to estimate the relevance of documents to a given search query. It is based on the probabilistic retrieval framework developed in the 1970s and 1980s by Stephen E. Robertson, Karen Spärck Jones, and others.
|
||||
|
||||
>The name of the actual ranking function is BM25. The fuller name, Okapi BM25, includes the name of the first system to use it, which was the Okapi information retrieval system, implemented at London's City University in the 1980s and 1990s. BM25 and its newer variants, e.g. BM25F (a version of BM25 that can take document structure and anchor text into account), represent TF-IDF-like retrieval functions used in document retrieval.
|
||||
|
||||
See a [usage example](../modules/indexes/retrievers/examples/elastic_search_bm25.ipynb).
|
||||
|
||||
```python
|
||||
from langchain.retrievers import ElasticSearchBM25Retriever
|
||||
```
|
||||
24
docs/integrations/google_vertex_ai.md
Normal file
24
docs/integrations/google_vertex_ai.md
Normal file
@@ -0,0 +1,24 @@
|
||||
# Google Vertex AI
|
||||
|
||||
>[Vertex AI](https://cloud.google.com/vertex-ai/docs/start/introduction-unified-platform) is a machine learning (ML)
|
||||
> platform that lets you train and deploy ML models and AI applications.
|
||||
> `Vertex AI` combines data engineering, data science, and ML engineering workflows, enabling your teams to
|
||||
> collaborate using a common toolset.
|
||||
|
||||
## Installation and Setup
|
||||
|
||||
|
||||
```bash
|
||||
pip install google-cloud-aiplatform
|
||||
```
|
||||
|
||||
See the [setup instructions](../modules/models/chat/integrations/google_vertex_ai_palm.ipynb)
|
||||
|
||||
|
||||
## Chat Models
|
||||
|
||||
See a [usage example](../modules/models/chat/integrations/google_vertex_ai_palm.ipynb)
|
||||
|
||||
```python
|
||||
from langchain.chat_models import ChatVertexAI
|
||||
```
|
||||
@@ -47,7 +47,7 @@ To use a the wrapper for a model hosted on Hugging Face Hub:
|
||||
```python
|
||||
from langchain.embeddings import HuggingFaceHubEmbeddings
|
||||
```
|
||||
For a more detailed walkthrough of this, see [this notebook](../modules/models/text_embedding/examples/huggingfacehub.ipynb)
|
||||
For a more detailed walkthrough of this, see [this notebook](../modules/models/text_embedding/examples/huggingface_hub.ipynb)
|
||||
|
||||
### Tokenizer
|
||||
|
||||
|
||||
368
docs/integrations/langchain_decorators.md
Normal file
368
docs/integrations/langchain_decorators.md
Normal file
@@ -0,0 +1,368 @@
|
||||
# LangChain Decorators ✨
|
||||
|
||||
lanchchain decorators is a layer on the top of LangChain that provides syntactic sugar 🍭 for writing custom langchain prompts and chains
|
||||
|
||||
For Feedback, Issues, Contributions - please raise an issue here:
|
||||
[ju-bezdek/langchain-decorators](https://github.com/ju-bezdek/langchain-decorators)
|
||||
|
||||
|
||||
|
||||
Main principles and benefits:
|
||||
|
||||
- more `pythonic` way of writing code
|
||||
- write multiline prompts that wont break your code flow with indentation
|
||||
- making use of IDE in-built support for **hinting**, **type checking** and **popup with docs** to quickly peek in the function to see the prompt, parameters it consumes etc.
|
||||
- leverage all the power of 🦜🔗 LangChain ecosystem
|
||||
- adding support for **optional parameters**
|
||||
- easily share parameters between the prompts by binding them to one class
|
||||
|
||||
|
||||
|
||||
Here is a simple example of a code written with **LangChain Decorators ✨**
|
||||
|
||||
``` python
|
||||
|
||||
@llm_prompt
|
||||
def write_me_short_post(topic:str, platform:str="twitter", audience:str = "developers")->str:
|
||||
"""
|
||||
Write me a short header for my post about {topic} for {platform} platform.
|
||||
It should be for {audience} audience.
|
||||
(Max 15 words)
|
||||
"""
|
||||
return
|
||||
|
||||
# run it naturaly
|
||||
write_me_short_post(topic="starwars")
|
||||
# or
|
||||
write_me_short_post(topic="starwars", platform="redit")
|
||||
```
|
||||
|
||||
# Quick start
|
||||
## Installation
|
||||
```bash
|
||||
pip install langchain_decorators
|
||||
```
|
||||
|
||||
## Examples
|
||||
|
||||
Good idea on how to start is to review the examples here:
|
||||
- [jupyter notebook](https://github.com/ju-bezdek/langchain-decorators/blob/main/example_notebook.ipynb)
|
||||
- [colab notebook](https://colab.research.google.com/drive/1no-8WfeP6JaLD9yUtkPgym6x0G9ZYZOG#scrollTo=N4cf__D0E2Yk)
|
||||
|
||||
# Defining other parameters
|
||||
Here we are just marking a function as a prompt with `llm_prompt` decorator, turning it effectively into a LLMChain. Instead of running it
|
||||
|
||||
|
||||
Standard LLMchain takes much more init parameter than just inputs_variables and prompt... here is this implementation detail hidden in the decorator.
|
||||
Here is how it works:
|
||||
|
||||
1. Using **Global settings**:
|
||||
|
||||
``` python
|
||||
# define global settings for all prompty (if not set - chatGPT is the current default)
|
||||
from langchain_decorators import GlobalSettings
|
||||
|
||||
GlobalSettings.define_settings(
|
||||
default_llm=ChatOpenAI(temperature=0.0), this is default... can change it here globally
|
||||
default_streaming_llm=ChatOpenAI(temperature=0.0,streaming=True), this is default... can change it here for all ... will be used for streaming
|
||||
)
|
||||
```
|
||||
|
||||
2. Using predefined **prompt types**
|
||||
|
||||
``` python
|
||||
#You can change the default prompt types
|
||||
from langchain_decorators import PromptTypes, PromptTypeSettings
|
||||
|
||||
PromptTypes.AGENT_REASONING.llm = ChatOpenAI()
|
||||
|
||||
# Or you can just define your own ones:
|
||||
class MyCustomPromptTypes(PromptTypes):
|
||||
GPT4=PromptTypeSettings(llm=ChatOpenAI(model="gpt-4"))
|
||||
|
||||
@llm_prompt(prompt_type=MyCustomPromptTypes.GPT4)
|
||||
def write_a_complicated_code(app_idea:str)->str:
|
||||
...
|
||||
|
||||
```
|
||||
|
||||
3. Define the settings **directly in the decorator**
|
||||
|
||||
``` python
|
||||
from langchain.llms import OpenAI
|
||||
|
||||
@llm_prompt(
|
||||
llm=OpenAI(temperature=0.7),
|
||||
stop_tokens=["\nObservation"],
|
||||
...
|
||||
)
|
||||
def creative_writer(book_title:str)->str:
|
||||
...
|
||||
```
|
||||
|
||||
## Passing a memory and/or callbacks:
|
||||
|
||||
To pass any of these, just declare them in the function (or use kwargs to pass anything)
|
||||
|
||||
```python
|
||||
|
||||
@llm_prompt()
|
||||
async def write_me_short_post(topic:str, platform:str="twitter", memory:SimpleMemory = None):
|
||||
"""
|
||||
{history_key}
|
||||
Write me a short header for my post about {topic} for {platform} platform.
|
||||
It should be for {audience} audience.
|
||||
(Max 15 words)
|
||||
"""
|
||||
pass
|
||||
|
||||
await write_me_short_post(topic="old movies")
|
||||
|
||||
```
|
||||
|
||||
# Simplified streaming
|
||||
|
||||
If we wan't to leverage streaming:
|
||||
- we need to define prompt as async function
|
||||
- turn on the streaming on the decorator, or we can define PromptType with streaming on
|
||||
- capture the stream using StreamingContext
|
||||
|
||||
This way we just mark which prompt should be streamed, not needing to tinker with what LLM should we use, passing around the creating and distribute streaming handler into particular part of our chain... just turn the streaming on/off on prompt/prompt type...
|
||||
|
||||
The streaming will happen only if we call it in streaming context ... there we can define a simple function to handle the stream
|
||||
|
||||
``` python
|
||||
# this code example is complete and should run as it is
|
||||
|
||||
from langchain_decorators import StreamingContext, llm_prompt
|
||||
|
||||
# this will mark the prompt for streaming (useful if we want stream just some prompts in our app... but don't want to pass distribute the callback handlers)
|
||||
# note that only async functions can be streamed (will get an error if it's not)
|
||||
@llm_prompt(capture_stream=True)
|
||||
async def write_me_short_post(topic:str, platform:str="twitter", audience:str = "developers"):
|
||||
"""
|
||||
Write me a short header for my post about {topic} for {platform} platform.
|
||||
It should be for {audience} audience.
|
||||
(Max 15 words)
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
|
||||
# just an arbitrary function to demonstrate the streaming... wil be some websockets code in the real world
|
||||
tokens=[]
|
||||
def capture_stream_func(new_token:str):
|
||||
tokens.append(new_token)
|
||||
|
||||
# if we want to capture the stream, we need to wrap the execution into StreamingContext...
|
||||
# this will allow us to capture the stream even if the prompt call is hidden inside higher level method
|
||||
# only the prompts marked with capture_stream will be captured here
|
||||
with StreamingContext(stream_to_stdout=True, callback=capture_stream_func):
|
||||
result = await run_prompt()
|
||||
print("Stream finished ... we can distinguish tokens thanks to alternating colors")
|
||||
|
||||
|
||||
print("\nWe've captured",len(tokens),"tokens🎉\n")
|
||||
print("Here is the result:")
|
||||
print(result)
|
||||
```
|
||||
|
||||
|
||||
# Prompt declarations
|
||||
By default the prompt is is the whole function docs, unless you mark your prompt
|
||||
|
||||
## Documenting your prompt
|
||||
|
||||
We can specify what part of our docs is the prompt definition, by specifying a code block with **<prompt>** language tag
|
||||
|
||||
``` python
|
||||
@llm_prompt
|
||||
def write_me_short_post(topic:str, platform:str="twitter", audience:str = "developers"):
|
||||
"""
|
||||
Here is a good way to write a prompt as part of a function docstring, with additional documentation for devs.
|
||||
|
||||
It needs to be a code block, marked as a `<prompt>` language
|
||||
```<prompt>
|
||||
Write me a short header for my post about {topic} for {platform} platform.
|
||||
It should be for {audience} audience.
|
||||
(Max 15 words)
|
||||
```
|
||||
|
||||
Now only to code block above will be used as a prompt, and the rest of the docstring will be used as a description for developers.
|
||||
(It has also a nice benefit that IDE (like VS code) will display the prompt properly (not trying to parse it as markdown, and thus not showing new lines properly))
|
||||
"""
|
||||
return
|
||||
```
|
||||
|
||||
## Chat messages prompt
|
||||
|
||||
For chat models is very useful to define prompt as a set of message templates... here is how to do it:
|
||||
|
||||
``` python
|
||||
@llm_prompt
|
||||
def simulate_conversation(human_input:str, agent_role:str="a pirate"):
|
||||
"""
|
||||
## System message
|
||||
- note the `:system` sufix inside the <prompt:_role_> tag
|
||||
|
||||
|
||||
```<prompt:system>
|
||||
You are a {agent_role} hacker. You mus act like one.
|
||||
You reply always in code, using python or javascript code block...
|
||||
for example:
|
||||
|
||||
... do not reply with anything else.. just with code - respecting your role.
|
||||
```
|
||||
|
||||
# human message
|
||||
(we are using the real role that are enforced by the LLM - GPT supports system, assistant, user)
|
||||
``` <prompt:user>
|
||||
Helo, who are you
|
||||
```
|
||||
a reply:
|
||||
|
||||
|
||||
``` <prompt:assistant>
|
||||
\``` python <<- escaping inner code block with \ that should be part of the prompt
|
||||
def hello():
|
||||
print("Argh... hello you pesky pirate")
|
||||
\```
|
||||
```
|
||||
|
||||
we can also add some history using placeholder
|
||||
```<prompt:placeholder>
|
||||
{history}
|
||||
```
|
||||
```<prompt:user>
|
||||
{human_input}
|
||||
```
|
||||
|
||||
Now only to code block above will be used as a prompt, and the rest of the docstring will be used as a description for developers.
|
||||
(It has also a nice benefit that IDE (like VS code) will display the prompt properly (not trying to parse it as markdown, and thus not showing new lines properly))
|
||||
"""
|
||||
pass
|
||||
|
||||
```
|
||||
|
||||
the roles here are model native roles (assistant, user, system for chatGPT)
|
||||
|
||||
|
||||
|
||||
# Optional sections
|
||||
- you can define a whole sections of your prompt that should be optional
|
||||
- if any input in the section is missing, the whole section wont be rendered
|
||||
|
||||
the syntax for this is as follows:
|
||||
|
||||
``` python
|
||||
@llm_prompt
|
||||
def prompt_with_optional_partials():
|
||||
"""
|
||||
this text will be rendered always, but
|
||||
|
||||
{? anything inside this block will be rendered only if all the {value}s parameters are not empty (None | "") ?}
|
||||
|
||||
you can also place it in between the words
|
||||
this too will be rendered{? , but
|
||||
this block will be rendered only if {this_value} and {this_value}
|
||||
is not empty?} !
|
||||
"""
|
||||
```
|
||||
|
||||
|
||||
# Output parsers
|
||||
|
||||
- llm_prompt decorator natively tries to detect the best output parser based on the output type. (if not set, it returns the raw string)
|
||||
- list, dict and pydantic outputs are also supported natively (automaticaly)
|
||||
|
||||
``` python
|
||||
# this code example is complete and should run as it is
|
||||
|
||||
from langchain_decorators import llm_prompt
|
||||
|
||||
@llm_prompt
|
||||
def write_name_suggestions(company_business:str, count:int)->list:
|
||||
""" Write me {count} good name suggestions for company that {company_business}
|
||||
"""
|
||||
pass
|
||||
|
||||
write_name_suggestions(company_business="sells cookies", count=5)
|
||||
```
|
||||
|
||||
## More complex structures
|
||||
|
||||
for dict / pydantic you need to specify the formatting instructions...
|
||||
this can be tedious, that's why you can let the output parser gegnerate you the instructions based on the model (pydantic)
|
||||
|
||||
``` python
|
||||
from langchain_decorators import llm_prompt
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
|
||||
class TheOutputStructureWeExpect(BaseModel):
|
||||
name:str = Field (description="The name of the company")
|
||||
headline:str = Field( description="The description of the company (for landing page)")
|
||||
employees:list[str] = Field(description="5-8 fake employee names with their positions")
|
||||
|
||||
@llm_prompt()
|
||||
def fake_company_generator(company_business:str)->TheOutputStructureWeExpect:
|
||||
""" Generate a fake company that {company_business}
|
||||
{FORMAT_INSTRUCTIONS}
|
||||
"""
|
||||
return
|
||||
|
||||
company = fake_company_generator(company_business="sells cookies")
|
||||
|
||||
# print the result nicely formatted
|
||||
print("Company name: ",company.name)
|
||||
print("company headline: ",company.headline)
|
||||
print("company employees: ",company.employees)
|
||||
|
||||
```
|
||||
|
||||
|
||||
# Binding the prompt to an object
|
||||
|
||||
``` python
|
||||
from pydantic import BaseModel
|
||||
from langchain_decorators import llm_prompt
|
||||
|
||||
class AssistantPersonality(BaseModel):
|
||||
assistant_name:str
|
||||
assistant_role:str
|
||||
field:str
|
||||
|
||||
@property
|
||||
def a_property(self):
|
||||
return "whatever"
|
||||
|
||||
def hello_world(self, function_kwarg:str=None):
|
||||
"""
|
||||
We can reference any {field} or {a_property} inside our prompt... and combine it with {function_kwarg} in the method
|
||||
"""
|
||||
|
||||
|
||||
@llm_prompt
|
||||
def introduce_your_self(self)->str:
|
||||
"""
|
||||
``` <prompt:system>
|
||||
You are an assistant named {assistant_name}.
|
||||
Your role is to act as {assistant_role}
|
||||
```
|
||||
```<prompt:user>
|
||||
Introduce your self (in less than 20 words)
|
||||
```
|
||||
"""
|
||||
|
||||
|
||||
|
||||
personality = AssistantPersonality(assistant_name="John", assistant_role="a pirate")
|
||||
|
||||
print(personality.introduce_your_self(personality))
|
||||
```
|
||||
|
||||
|
||||
# More examples:
|
||||
|
||||
- these and few more examples are also available in the [colab notebook here](https://colab.research.google.com/drive/1no-8WfeP6JaLD9yUtkPgym6x0G9ZYZOG#scrollTo=N4cf__D0E2Yk)
|
||||
- including the [ReAct Agent re-implementation](https://colab.research.google.com/drive/1no-8WfeP6JaLD9yUtkPgym6x0G9ZYZOG#scrollTo=3bID5fryE2Yp) using purely langchain decorators
|
||||
@@ -1,20 +1,21 @@
|
||||
# Momento
|
||||
|
||||
>[Momento Cache](https://docs.momentohq.com/) is the world's first truly serverless caching service. It provides instant elasticity, scale-to-zero
|
||||
> capability, and blazing-fast performance.
|
||||
> With Momento Cache, you grab the SDK, you get an end point, input a few lines into your code, and you're off and running.
|
||||
|
||||
This page covers how to use the [Momento](https://gomomento.com) ecosystem within LangChain.
|
||||
It is broken into two parts: installation and setup, and then references to specific Momento wrappers.
|
||||
|
||||
## Installation and Setup
|
||||
|
||||
- Sign up for a free account [here](https://docs.momentohq.com/getting-started) and get an auth token
|
||||
- Install the Momento Python SDK with `pip install momento`
|
||||
|
||||
## Wrappers
|
||||
|
||||
### Cache
|
||||
## Cache
|
||||
|
||||
The Cache wrapper allows for [Momento](https://gomomento.com) to be used as a serverless, distributed, low-latency cache for LLM prompts and responses.
|
||||
|
||||
#### Standard Cache
|
||||
|
||||
The standard cache is the go-to use case for [Momento](https://gomomento.com) users in any environment.
|
||||
|
||||
@@ -44,10 +45,10 @@ cache_name = "langchain"
|
||||
langchain.llm_cache = MomentoCache(cache_client, cache_name)
|
||||
```
|
||||
|
||||
### Memory
|
||||
## Memory
|
||||
|
||||
Momento can be used as a distributed memory store for LLMs.
|
||||
|
||||
#### Chat Message History Memory
|
||||
### Chat Message History Memory
|
||||
|
||||
See [this notebook](../modules/memory/examples/momento_chat_message_history.ipynb) for a walkthrough of how to use Momento as a memory store for chat message history.
|
||||
|
||||
@@ -35,7 +35,6 @@ from langchain.llms import AzureOpenAI
|
||||
For a more detailed walkthrough of the `Azure` wrapper, see [this notebook](../modules/models/llms/integrations/azure_openai_example.ipynb)
|
||||
|
||||
|
||||
|
||||
## Text Embedding Model
|
||||
|
||||
```python
|
||||
@@ -44,6 +43,14 @@ from langchain.embeddings import OpenAIEmbeddings
|
||||
For a more detailed walkthrough of this, see [this notebook](../modules/models/text_embedding/examples/openai.ipynb)
|
||||
|
||||
|
||||
## Chat Model
|
||||
|
||||
```python
|
||||
from langchain.chat_models import ChatOpenAI
|
||||
```
|
||||
For a more detailed walkthrough of this, see [this notebook](../modules/models/chat/integrations/openai.ipynb)
|
||||
|
||||
|
||||
## Tokenizer
|
||||
|
||||
There are several places you can use the `tiktoken` tokenizer. By default, it is used to count tokens
|
||||
@@ -71,3 +78,11 @@ See a [usage example](../modules/indexes/document_loaders/examples/chatgpt_loade
|
||||
```python
|
||||
from langchain.document_loaders.chatgpt import ChatGPTLoader
|
||||
```
|
||||
|
||||
## Retriever
|
||||
|
||||
See a [usage example](../modules/indexes/retrievers/examples/chatgpt-plugin.ipynb).
|
||||
|
||||
```python
|
||||
from langchain.retrievers import ChatGPTPluginRetriever
|
||||
```
|
||||
|
||||
@@ -24,10 +24,6 @@ To import this vectorstore:
|
||||
from langchain.vectorstores.pgvector import PGVector
|
||||
```
|
||||
|
||||
PGVector embedding size is not autodetected. If you are using ChatGPT or any other embedding with 1536 dimensions
|
||||
default is fine. If you are going to use for example HuggingFaceEmbeddings you need to set the environment variable named `PGVECTOR_VECTOR_SIZE`
|
||||
to the needed value, In case of HuggingFaceEmbeddings is would be: `PGVECTOR_VECTOR_SIZE=768`
|
||||
|
||||
### Usage
|
||||
|
||||
For a more detailed walkthrough of the PGVector Wrapper, see [this notebook](../modules/indexes/vectorstores/examples/pgvector.ipynb)
|
||||
|
||||
@@ -4,17 +4,19 @@ This page covers how to use the Pinecone ecosystem within LangChain.
|
||||
It is broken into two parts: installation and setup, and then references to specific Pinecone wrappers.
|
||||
|
||||
## Installation and Setup
|
||||
- Install the Python SDK with `pip install pinecone-client`
|
||||
## Wrappers
|
||||
Install the Python SDK:
|
||||
```bash
|
||||
pip install pinecone-client
|
||||
```
|
||||
|
||||
### VectorStore
|
||||
|
||||
## Vectorstore
|
||||
|
||||
There exists a wrapper around Pinecone indexes, allowing you to use it as a vectorstore,
|
||||
whether for semantic search or example selection.
|
||||
|
||||
To import this vectorstore:
|
||||
```python
|
||||
from langchain.vectorstores import Pinecone
|
||||
```
|
||||
|
||||
For a more detailed walkthrough of the Pinecone wrapper, see [this notebook](../modules/indexes/vectorstores/examples/pinecone.ipynb)
|
||||
For a more detailed walkthrough of the Pinecone vectorstore, see [this notebook](../modules/indexes/vectorstores/examples/pinecone.ipynb)
|
||||
|
||||
@@ -1,19 +1,23 @@
|
||||
# Prediction Guard
|
||||
|
||||
This page covers how to use the Prediction Guard ecosystem within LangChain.
|
||||
It is broken into two parts: installation and setup, and then references to specific Prediction Guard wrappers.
|
||||
>[Prediction Guard](https://docs.predictionguard.com/) gives a quick and easy access to state-of-the-art open and closed access LLMs, without needing to spend days and weeks figuring out all of the implementation details, managing a bunch of different API specs, and setting up the infrastructure for model deployments.
|
||||
|
||||
|
||||
## Installation and Setup
|
||||
- Install the Python SDK with `pip install predictionguard`
|
||||
- Install the Python SDK:
|
||||
```bash
|
||||
pip install predictionguard
|
||||
```
|
||||
|
||||
- Get an Prediction Guard access token (as described [here](https://docs.predictionguard.com/)) and set it as an environment variable (`PREDICTIONGUARD_TOKEN`)
|
||||
|
||||
## LLM Wrapper
|
||||
## LLM
|
||||
|
||||
There exists a Prediction Guard LLM wrapper, which you can access with
|
||||
```python
|
||||
from langchain.llms import PredictionGuard
|
||||
```
|
||||
|
||||
### Example
|
||||
You can provide the name of the Prediction Guard model as an argument when initializing the LLM:
|
||||
```python
|
||||
pgllm = PredictionGuard(model="MPT-7B-Instruct")
|
||||
@@ -24,14 +28,12 @@ You can also provide your access token directly as an argument:
|
||||
pgllm = PredictionGuard(model="MPT-7B-Instruct", token="<your access token>")
|
||||
```
|
||||
|
||||
Finally, you can provide an "output" argument that is used to structure/ control the output of the LLM:
|
||||
Also, you can provide an "output" argument that is used to structure/ control the output of the LLM:
|
||||
```python
|
||||
pgllm = PredictionGuard(model="MPT-7B-Instruct", output={"type": "boolean"})
|
||||
```
|
||||
|
||||
## Example usage
|
||||
|
||||
Basic usage of the controlled or guarded LLM wrapper:
|
||||
#### Basic usage of the controlled or guarded LLM:
|
||||
```python
|
||||
import os
|
||||
|
||||
@@ -72,7 +74,7 @@ pgllm = PredictionGuard(model="MPT-7B-Instruct",
|
||||
pgllm(prompt.format(query="What kind of post is this?"))
|
||||
```
|
||||
|
||||
Basic LLM Chaining with the Prediction Guard wrapper:
|
||||
#### Basic LLM Chaining with the Prediction Guard:
|
||||
```python
|
||||
import os
|
||||
|
||||
|
||||
@@ -1,31 +1,35 @@
|
||||
# PromptLayer
|
||||
|
||||
This page covers how to use [PromptLayer](https://www.promptlayer.com) within LangChain.
|
||||
It is broken into two parts: installation and setup, and then references to specific PromptLayer wrappers.
|
||||
>[PromptLayer](https://docs.promptlayer.com/what-is-promptlayer/wxpF9EZkUwvdkwvVE9XEvC/how-promptlayer-works/dvgGSxNe6nB1jj8mUVbG8r)
|
||||
> is a devtool that allows you to track, manage, and share your GPT prompt engineering.
|
||||
> It acts as a middleware between your code and OpenAI's python library, recording all your API requests
|
||||
> and saving relevant metadata for easy exploration and search in the [PromptLayer](https://www.promptlayer.com) dashboard.
|
||||
|
||||
## Installation and Setup
|
||||
|
||||
If you want to work with PromptLayer:
|
||||
- Install the promptlayer python library `pip install promptlayer`
|
||||
- Install the `promptlayer` python library
|
||||
```bash
|
||||
pip install promptlayer
|
||||
```
|
||||
- Create a PromptLayer account
|
||||
- Create an api token and set it as an environment variable (`PROMPTLAYER_API_KEY`)
|
||||
|
||||
## Wrappers
|
||||
|
||||
### LLM
|
||||
## LLM
|
||||
|
||||
There exists an PromptLayer OpenAI LLM wrapper, which you can access with
|
||||
```python
|
||||
from langchain.llms import PromptLayerOpenAI
|
||||
```
|
||||
|
||||
To tag your requests, use the argument `pl_tags` when instanializing the LLM
|
||||
### Example
|
||||
|
||||
To tag your requests, use the argument `pl_tags` when instantiating the LLM
|
||||
```python
|
||||
from langchain.llms import PromptLayerOpenAI
|
||||
llm = PromptLayerOpenAI(pl_tags=["langchain-requests", "chatbot"])
|
||||
```
|
||||
|
||||
To get the PromptLayer request id, use the argument `return_pl_id` when instanializing the LLM
|
||||
To get the PromptLayer request id, use the argument `return_pl_id` when instantiating the LLM
|
||||
```python
|
||||
from langchain.llms import PromptLayerOpenAI
|
||||
llm = PromptLayerOpenAI(return_pl_id=True)
|
||||
@@ -42,8 +46,14 @@ You can use the PromptLayer request ID to add a prompt, score, or other metadata
|
||||
|
||||
This LLM is identical to the [OpenAI LLM](./openai.md), except that
|
||||
- all your requests will be logged to your PromptLayer account
|
||||
- you can add `pl_tags` when instantializing to tag your requests on PromptLayer
|
||||
- you can add `return_pl_id` when instantializing to return a PromptLayer request id to use [while tracking requests](https://magniv.notion.site/Track-4deee1b1f7a34c1680d085f82567dab9).
|
||||
- you can add `pl_tags` when instantiating to tag your requests on PromptLayer
|
||||
- you can add `return_pl_id` when instantiating to return a PromptLayer request id to use [while tracking requests](https://magniv.notion.site/Track-4deee1b1f7a34c1680d085f82567dab9).
|
||||
|
||||
## Chat Model
|
||||
|
||||
```python
|
||||
from langchain.chat_models import PromptLayerChatOpenAI
|
||||
```
|
||||
|
||||
See a [usage example](../modules/models/chat/integrations/promptlayer_chatopenai.ipynb).
|
||||
|
||||
PromptLayer also provides native wrappers for [`PromptLayerChatOpenAI`](../modules/models/chat/integrations/promptlayer_chatopenai.ipynb) and `PromptLayerOpenAIChat`
|
||||
|
||||
233
docs/integrations/ray_serve.ipynb
Normal file
233
docs/integrations/ray_serve.ipynb
Normal file
@@ -0,0 +1,233 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Ray Serve\n",
|
||||
"\n",
|
||||
"[Ray Serve](https://docs.ray.io/en/latest/serve/index.html) is a scalable model serving library for building online inference APIs. Serve is particularly well suited for system composition, enabling you to build a complex inference service consisting of multiple chains and business logic all in Python code. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Goal of this notebook\n",
|
||||
"This notebook shows a simple example of how to deploy an OpenAI chain into production. You can extend it to deploy your own self-hosted models where you can easily define amount of hardware resources (GPUs and CPUs) needed to run your model in production efficiently. Read more about available options including autoscaling in the Ray Serve [documentation](https://docs.ray.io/en/latest/serve/getting_started.html).\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Setup Ray Serve\n",
|
||||
"Install ray with `pip install ray[serve]`. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## General Skeleton"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"The general skeleton for deploying a service is the following:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# 0: Import ray serve and request from starlette\n",
|
||||
"from ray import serve\n",
|
||||
"from starlette.requests import Request\n",
|
||||
"\n",
|
||||
"# 1: Define a Ray Serve deployment.\n",
|
||||
"@serve.deployment\n",
|
||||
"class LLMServe:\n",
|
||||
"\n",
|
||||
" def __init__(self) -> None:\n",
|
||||
" # All the initialization code goes here\n",
|
||||
" pass\n",
|
||||
"\n",
|
||||
" async def __call__(self, request: Request) -> str:\n",
|
||||
" # You can parse the request here\n",
|
||||
" # and return a response\n",
|
||||
" return \"Hello World\"\n",
|
||||
"\n",
|
||||
"# 2: Bind the model to deployment\n",
|
||||
"deployment = LLMServe.bind()\n",
|
||||
"\n",
|
||||
"# 3: Run the deployment\n",
|
||||
"serve.api.run(deployment)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Shutdown the deployment\n",
|
||||
"serve.api.shutdown()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Example of deploying and OpenAI chain with custom prompts"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Get an OpenAI API key from [here](https://platform.openai.com/account/api-keys). By running the following code, you will be asked to provide your API key."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.llms import OpenAI\n",
|
||||
"from langchain import PromptTemplate, LLMChain"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from getpass import getpass\n",
|
||||
"OPENAI_API_KEY = getpass()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"@serve.deployment\n",
|
||||
"class DeployLLM:\n",
|
||||
"\n",
|
||||
" def __init__(self):\n",
|
||||
" # We initialize the LLM, template and the chain here\n",
|
||||
" llm = OpenAI(openai_api_key=OPENAI_API_KEY)\n",
|
||||
" template = \"Question: {question}\\n\\nAnswer: Let's think step by step.\"\n",
|
||||
" prompt = PromptTemplate(template=template, input_variables=[\"question\"])\n",
|
||||
" self.chain = LLMChain(llm=llm, prompt=prompt)\n",
|
||||
"\n",
|
||||
" def _run_chain(self, text: str):\n",
|
||||
" return self.chain(text)\n",
|
||||
"\n",
|
||||
" async def __call__(self, request: Request):\n",
|
||||
" # 1. Parse the request\n",
|
||||
" text = request.query_params[\"text\"]\n",
|
||||
" # 2. Run the chain\n",
|
||||
" resp = self._run_chain(text)\n",
|
||||
" # 3. Return the response\n",
|
||||
" return resp[\"text\"]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Now we can bind the deployment."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Bind the model to deployment\n",
|
||||
"deployment = DeployLLM.bind()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We can assign the port number and host when we want to run the deployment. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Example port number\n",
|
||||
"PORT_NUMBER = 8282\n",
|
||||
"# Run the deployment\n",
|
||||
"serve.api.run(deployment, port=PORT_NUMBER)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Now that service is deployed on port `localhost:8282` we can send a post request to get the results back."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import requests\n",
|
||||
"\n",
|
||||
"text = \"What NFL team won the Super Bowl in the year Justin Beiber was born?\"\n",
|
||||
"response = requests.post(f'http://localhost:{PORT_NUMBER}/?text={text}')\n",
|
||||
"print(response.content.decode())"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "ray",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.9"
|
||||
},
|
||||
"orig_nbformat": 4
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
17
docs/integrations/roam.md
Normal file
17
docs/integrations/roam.md
Normal file
@@ -0,0 +1,17 @@
|
||||
# Roam
|
||||
|
||||
>[ROAM](https://roamresearch.com/) is a note-taking tool for networked thought, designed to create a personal knowledge base.
|
||||
|
||||
## Installation and Setup
|
||||
|
||||
There isn't any special setup for it.
|
||||
|
||||
|
||||
|
||||
## Document Loader
|
||||
|
||||
See a [usage example](../modules/indexes/document_loaders/examples/roam.ipynb).
|
||||
|
||||
```python
|
||||
from langchain.document_loaders import RoamLoader
|
||||
```
|
||||
43
docs/integrations/shaleprotocol.md
Normal file
43
docs/integrations/shaleprotocol.md
Normal file
@@ -0,0 +1,43 @@
|
||||
# Shale Protocol
|
||||
|
||||
[Shale Protocol](https://shaleprotocol.com) provides production-ready inference APIs for open LLMs. It's a Plug & Play API as it's hosted on a highly scalable GPU cloud infrastructure.
|
||||
|
||||
Our free tier supports up to 1K daily requests per key as we want to eliminate the barrier for anyone to start building genAI apps with LLMs.
|
||||
|
||||
With Shale Protocol, developers/researchers can create apps and explore the capabilities of open LLMs at no cost.
|
||||
|
||||
This page covers how Shale-Serve API can be incorporated with LangChain.
|
||||
|
||||
As of June 2023, the API supports Vicuna-13B by default. We are going to support more LLMs such as Falcon-40B in future releases.
|
||||
|
||||
|
||||
## How to
|
||||
|
||||
### 1. Find the link to our Discord on https://shaleprotocol.com. Generate an API key through the "Shale Bot" on our Discord. No credit card is required and no free trials. It's a forever free tier with 1K limit per day per API key.
|
||||
|
||||
### 2. Use https://shale.live/v1 as OpenAI API drop-in replacement
|
||||
|
||||
For example
|
||||
```python
|
||||
from langchain.llms import OpenAI
|
||||
from langchain import PromptTemplate, LLMChain
|
||||
|
||||
import os
|
||||
os.environ['OPENAI_API_BASE'] = "https://shale.live/v1"
|
||||
os.environ['OPENAI_API_KEY'] = "ENTER YOUR API KEY"
|
||||
|
||||
llm = OpenAI()
|
||||
|
||||
template = """Question: {question}
|
||||
|
||||
# Answer: Let's think step by step."""
|
||||
|
||||
prompt = PromptTemplate(template=template, input_variables=["question"])
|
||||
|
||||
llm_chain = LLMChain(prompt=prompt, llm=llm)
|
||||
|
||||
question = "What NFL team won the Super Bowl in the year Justin Beiber was born?"
|
||||
|
||||
llm_chain.run(question)
|
||||
|
||||
```
|
||||
17
docs/integrations/slack.md
Normal file
17
docs/integrations/slack.md
Normal file
@@ -0,0 +1,17 @@
|
||||
# Slack
|
||||
|
||||
>[Slack](https://slack.com/) is an instant messaging program.
|
||||
|
||||
## Installation and Setup
|
||||
|
||||
There isn't any special setup for it.
|
||||
|
||||
|
||||
|
||||
## Document Loader
|
||||
|
||||
See a [usage example](../modules/indexes/document_loaders/examples/slack.ipynb).
|
||||
|
||||
```python
|
||||
from langchain.document_loaders import SlackDirectoryLoader
|
||||
```
|
||||
20
docs/integrations/spacy.md
Normal file
20
docs/integrations/spacy.md
Normal file
@@ -0,0 +1,20 @@
|
||||
# spaCy
|
||||
|
||||
>[spaCy](https://spacy.io/) is an open-source software library for advanced natural language processing, written in the programming languages Python and Cython.
|
||||
|
||||
## Installation and Setup
|
||||
|
||||
|
||||
```bash
|
||||
pip install spacy
|
||||
```
|
||||
|
||||
|
||||
|
||||
## Text Splitter
|
||||
|
||||
See a [usage example](../modules/indexes/text_splitters/examples/spacy.ipynb).
|
||||
|
||||
```python
|
||||
from langchain.llms import SpacyTextSplitter
|
||||
```
|
||||
15
docs/integrations/spreedly.md
Normal file
15
docs/integrations/spreedly.md
Normal file
@@ -0,0 +1,15 @@
|
||||
# Spreedly
|
||||
|
||||
>[Spreedly](https://docs.spreedly.com/) is a service that allows you to securely store credit cards and use them to transact against any number of payment gateways and third party APIs. It does this by simultaneously providing a card tokenization/vault service as well as a gateway and receiver integration service. Payment methods tokenized by Spreedly are stored at `Spreedly`, allowing you to independently store a card and then pass that card to different end points based on your business requirements.
|
||||
|
||||
## Installation and Setup
|
||||
|
||||
See [setup instructions](../modules/indexes/document_loaders/examples/spreedly.ipynb).
|
||||
|
||||
## Document Loader
|
||||
|
||||
See a [usage example](../modules/indexes/document_loaders/examples/spreedly.ipynb).
|
||||
|
||||
```python
|
||||
from langchain.document_loaders import SpreedlyLoader
|
||||
```
|
||||
16
docs/integrations/stripe.md
Normal file
16
docs/integrations/stripe.md
Normal file
@@ -0,0 +1,16 @@
|
||||
# Stripe
|
||||
|
||||
>[Stripe](https://stripe.com/en-ca) is an Irish-American financial services and software as a service (SaaS) company. It offers payment-processing software and application programming interfaces for e-commerce websites and mobile applications.
|
||||
|
||||
|
||||
## Installation and Setup
|
||||
|
||||
See [setup instructions](../modules/indexes/document_loaders/examples/stripe.ipynb).
|
||||
|
||||
## Document Loader
|
||||
|
||||
See a [usage example](../modules/indexes/document_loaders/examples/stripe.ipynb).
|
||||
|
||||
```python
|
||||
from langchain.document_loaders import StripeLoader
|
||||
```
|
||||
17
docs/integrations/telegram.md
Normal file
17
docs/integrations/telegram.md
Normal file
@@ -0,0 +1,17 @@
|
||||
# Telegram
|
||||
|
||||
>[Telegram Messenger](https://web.telegram.org/a/) is a globally accessible freemium, cross-platform, encrypted, cloud-based and centralized instant messaging service. The application also provides optional end-to-end encrypted chats and video calling, VoIP, file sharing and several other features.
|
||||
|
||||
|
||||
## Installation and Setup
|
||||
|
||||
See [setup instructions](../modules/indexes/document_loaders/examples/telegram.ipynb).
|
||||
|
||||
## Document Loader
|
||||
|
||||
See a [usage example](../modules/indexes/document_loaders/examples/telegram.ipynb).
|
||||
|
||||
```python
|
||||
from langchain.document_loaders import TelegramChatFileLoader
|
||||
from langchain.document_loaders import TelegramChatApiLoader
|
||||
```
|
||||
22
docs/integrations/tensorflow_hub.md
Normal file
22
docs/integrations/tensorflow_hub.md
Normal file
@@ -0,0 +1,22 @@
|
||||
# Tensorflow Hub
|
||||
|
||||
>[TensorFlow Hub](https://www.tensorflow.org/hub) is a repository of trained machine learning models ready for fine-tuning and deployable anywhere.
|
||||
|
||||
>[TensorFlow Hub](https://tfhub.dev/) lets you search and discover hundreds of trained, ready-to-deploy machine learning models in one place.
|
||||
|
||||
## Installation and Setup
|
||||
|
||||
|
||||
```bash
|
||||
pip install tensorflow-hub
|
||||
pip install tensorflow_text
|
||||
```
|
||||
|
||||
|
||||
## Text Embedding Models
|
||||
|
||||
See a [usage example](../modules/models/text_embedding/examples/tensorflowhub.ipynb)
|
||||
|
||||
```python
|
||||
from langchain.embeddings import TensorflowHubEmbeddings
|
||||
```
|
||||
16
docs/integrations/tomarkdown.md
Normal file
16
docs/integrations/tomarkdown.md
Normal file
@@ -0,0 +1,16 @@
|
||||
# 2Markdown
|
||||
|
||||
>[2markdown](https://2markdown.com/) service transforms website content into structured markdown files.
|
||||
|
||||
|
||||
## Installation and Setup
|
||||
|
||||
We need the `API key`. See [instructions how to get it](https://2markdown.com/login).
|
||||
|
||||
## Document Loader
|
||||
|
||||
See a [usage example](../modules/indexes/document_loaders/examples/tomarkdown.ipynb).
|
||||
|
||||
```python
|
||||
from langchain.document_loaders import ToMarkdownLoader
|
||||
```
|
||||
22
docs/integrations/trello.md
Normal file
22
docs/integrations/trello.md
Normal file
@@ -0,0 +1,22 @@
|
||||
# Trello
|
||||
|
||||
>[Trello](https://www.atlassian.com/software/trello) is a web-based project management and collaboration tool that allows individuals and teams to organize and track their tasks and projects. It provides a visual interface known as a "board" where users can create lists and cards to represent their tasks and activities.
|
||||
>The TrelloLoader allows us to load cards from a `Trello` board.
|
||||
|
||||
|
||||
## Installation and Setup
|
||||
|
||||
```bash
|
||||
pip install py-trello beautifulsoup4
|
||||
```
|
||||
|
||||
See [setup instructions](../modules/indexes/document_loaders/examples/trello.ipynb).
|
||||
|
||||
|
||||
## Document Loader
|
||||
|
||||
See a [usage example](../modules/indexes/document_loaders/examples/trello.ipynb).
|
||||
|
||||
```python
|
||||
from langchain.document_loaders import TrelloLoader
|
||||
```
|
||||
21
docs/integrations/twitter.md
Normal file
21
docs/integrations/twitter.md
Normal file
@@ -0,0 +1,21 @@
|
||||
# Twitter
|
||||
|
||||
>[Twitter](https://twitter.com/) is an online social media and social networking service.
|
||||
|
||||
|
||||
## Installation and Setup
|
||||
|
||||
```bash
|
||||
pip install tweepy
|
||||
```
|
||||
|
||||
We must initialize the loader with the `Twitter API` token, and we need to set up the Twitter `username`.
|
||||
|
||||
|
||||
## Document Loader
|
||||
|
||||
See a [usage example](../modules/indexes/document_loaders/examples/twitter.ipynb).
|
||||
|
||||
```python
|
||||
from langchain.document_loaders import TwitterTweetLoader
|
||||
```
|
||||
@@ -4,7 +4,7 @@
|
||||
What is Vectara?
|
||||
|
||||
**Vectara Overview:**
|
||||
- Vectara is developer-first API platform for building conversational search applications
|
||||
- Vectara is developer-first API platform for building GenAI applications
|
||||
- To use Vectara - first [sign up](https://console.vectara.com/signup) and create an account. Then create a corpus and an API key for indexing and searching.
|
||||
- You can use Vectara's [indexing API](https://docs.vectara.com/docs/indexing-apis/indexing) to add documents into Vectara's index
|
||||
- You can use Vectara's [Search API](https://docs.vectara.com/docs/search-apis/search) to query Vectara's index (which also supports Hybrid search implicitly).
|
||||
@@ -13,6 +13,13 @@ What is Vectara?
|
||||
## Installation and Setup
|
||||
To use Vectara with LangChain no special installation steps are required. You just have to provide your customer_id, corpus ID, and an API key created within the Vectara console to enable indexing and searching.
|
||||
|
||||
Alternatively these can be provided as environment variables
|
||||
- export `VECTARA_CUSTOMER_ID`="your_customer_id"
|
||||
- export `VECTARA_CORPUS_ID`="your_corpus_id"
|
||||
- export `VECTARA_API_KEY`="your-vectara-api-key"
|
||||
|
||||
## Usage
|
||||
|
||||
### VectorStore
|
||||
|
||||
There exists a wrapper around the Vectara platform, allowing you to use it as a vectorstore, whether for semantic search or example selection.
|
||||
@@ -32,8 +39,21 @@ vectara = Vectara(
|
||||
```
|
||||
The customer_id, corpus_id and api_key are optional, and if they are not supplied will be read from the environment variables `VECTARA_CUSTOMER_ID`, `VECTARA_CORPUS_ID` and `VECTARA_API_KEY`, respectively.
|
||||
|
||||
To query the vectorstore, you can use the `similarity_search` method (or `similarity_search_with_score`), which takes a query string and returns a list of results:
|
||||
```python
|
||||
results = vectara.similarity_score("what is LangChain?")
|
||||
```
|
||||
|
||||
For a more detailed walkthrough of the Vectara wrapper, see one of the two example notebooks:
|
||||
`similarity_search_with_score` also supports the following additional arguments:
|
||||
- `k`: number of results to return (defaults to 5)
|
||||
- `lambda_val`: the [lexical matching](https://docs.vectara.com/docs/api-reference/search-apis/lexical-matching) factor for hybrid search (defaults to 0.025)
|
||||
- `filter`: a [filter](https://docs.vectara.com/docs/common-use-cases/filtering-by-metadata/filter-overview) to apply to the results (default None)
|
||||
- `n_sentence_context`: number of sentences to include before/after the actual matching segment when returning results. This defaults to 0 so as to return the exact text segment that matches, but can be used with other values e.g. 2 or 3 to return adjacent text segments.
|
||||
|
||||
The results are returned as a list of relevant documents, and a relevance score of each document.
|
||||
|
||||
|
||||
For a more detailed examples of using the Vectara wrapper, see one of these two sample notebooks:
|
||||
* [Chat Over Documents with Vectara](./vectara/vectara_chat.html)
|
||||
* [Vectara Text Generation](./vectara/vectara_text_generation.html)
|
||||
|
||||
|
||||
@@ -102,21 +102,11 @@
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"<class 'langchain.vectorstores.vectara.Vectara'>\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"openai_api_key = os.environ['OPENAI_API_KEY']\n",
|
||||
"llm = OpenAI(openai_api_key=openai_api_key, temperature=0)\n",
|
||||
"retriever = VectaraRetriever(vectorstore, alpha=0.025, k=5, filter=None)\n",
|
||||
"\n",
|
||||
"print(type(vectorstore))\n",
|
||||
"retriever = vectorstore.as_retriever(lambda_val=0.025, k=5, filter=None)\n",
|
||||
"d = retriever.get_relevant_documents('What did the president say about Ketanji Brown Jackson')\n",
|
||||
"\n",
|
||||
"qa = ConversationalRetrievalChain.from_llm(llm, retriever, memory=memory)"
|
||||
@@ -142,7 +132,7 @@
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"\" The president said that Ketanji Brown Jackson is one of the nation's top legal minds, a former top litigator in private practice, and a former federal public defender.\""
|
||||
"\" The president said that Ketanji Brown Jackson is one of the nation's top legal minds and that she will continue Justice Breyer's legacy of excellence.\""
|
||||
]
|
||||
},
|
||||
"execution_count": 7,
|
||||
@@ -174,7 +164,7 @@
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"' Justice Stephen Breyer.'"
|
||||
"' Justice Stephen Breyer'"
|
||||
]
|
||||
},
|
||||
"execution_count": 9,
|
||||
@@ -241,7 +231,7 @@
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"\" The president said that Ketanji Brown Jackson is one of the nation's top legal minds, a former top litigator in private practice, and a former federal public defender.\""
|
||||
"\" The president said that Ketanji Brown Jackson is one of the nation's top legal minds and that she will continue Justice Breyer's legacy of excellence.\""
|
||||
]
|
||||
},
|
||||
"execution_count": 12,
|
||||
@@ -286,7 +276,7 @@
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"' Justice Stephen Breyer.'"
|
||||
"' Justice Stephen Breyer'"
|
||||
]
|
||||
},
|
||||
"execution_count": 14,
|
||||
@@ -344,7 +334,7 @@
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"Document(page_content='Tonight, I’d like to honor someone who has dedicated his life to serve this country: Justice Stephen Breyer—an Army veteran, Constitutional scholar, and retiring Justice of the United States Supreme Court. Justice Breyer, thank you for your service. One of the most serious constitutional responsibilities a President has is nominating someone to serve on the United States Supreme Court. And I did that 4 days ago, when I nominated Circuit Court of Appeals Judge Ketanji Brown Jackson. One of our nation’s top legal minds, who will continue Justice Breyer’s legacy of excellence. A former top litigator in private practice. A former federal public defender.', metadata={'source': '../../modules/state_of_the_union.txt'})"
|
||||
"Document(page_content='Tonight. I call on the Senate to: Pass the Freedom to Vote Act. Pass the John Lewis Voting Rights Act. And while you’re at it, pass the Disclose Act so Americans can know who is funding our elections. \\n\\nTonight, I’d like to honor someone who has dedicated his life to serve this country: Justice Stephen Breyer—an Army veteran, Constitutional scholar, and retiring Justice of the United States Supreme Court. Justice Breyer, thank you for your service. \\n\\nOne of the most serious constitutional responsibilities a President has is nominating someone to serve on the United States Supreme Court. \\n\\nAnd I did that 4 days ago, when I nominated Circuit Court of Appeals Judge Ketanji Brown Jackson. One of our nation’s top legal minds, who will continue Justice Breyer’s legacy of excellence.', metadata={'source': '../../../state_of_the_union.txt'})"
|
||||
]
|
||||
},
|
||||
"execution_count": 17,
|
||||
@@ -392,6 +382,24 @@
|
||||
"result = qa({\"question\": query, \"chat_history\": chat_history, \"vectordbkwargs\": vectordbkwargs})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 35,
|
||||
"id": "24ebdaec",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
" The president said that Ketanji Brown Jackson is one of the nation's top legal minds and that she will continue Justice Breyer's legacy of excellence.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"print(result['answer'])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "99b96dae",
|
||||
@@ -459,7 +467,7 @@
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"' The president did not mention Ketanji Brown Jackson.'"
|
||||
"\" The president said that he nominated Circuit Court of Appeals Judge Ketanji Brown Jackson, who he described as one of the nation's top legal minds, to continue Justice Breyer's legacy of excellence.\""
|
||||
]
|
||||
},
|
||||
"execution_count": 23,
|
||||
@@ -538,7 +546,7 @@
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"' The president did not mention Ketanji Brown Jackson.\\nSOURCES: ../../modules/state_of_the_union.txt'"
|
||||
"\" The president said that he nominated Circuit Court of Appeals Judge Ketanji Brown Jackson, who he described as one of the nation's top legal minds, and that she will continue Justice Breyer's legacy of excellence.\\nSOURCES: ../../../state_of_the_union.txt\""
|
||||
]
|
||||
},
|
||||
"execution_count": 27,
|
||||
@@ -598,7 +606,7 @@
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
" The president said that Ketanji Brown Jackson is one of the nation's top legal minds, a former top litigator in private practice, and a former federal public defender."
|
||||
" The president said that Ketanji Brown Jackson is one of the nation's top legal minds and that she will continue Justice Breyer's legacy of excellence."
|
||||
]
|
||||
}
|
||||
],
|
||||
@@ -620,7 +628,7 @@
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
" Justice Stephen Breyer."
|
||||
" Justice Stephen Breyer"
|
||||
]
|
||||
}
|
||||
],
|
||||
@@ -681,7 +689,7 @@
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"\" The president said that Ketanji Brown Jackson is one of the nation's top legal minds, a former top litigator in private practice, and a former federal public defender.\""
|
||||
"\" The president said that Ketanji Brown Jackson is one of the nation's top legal minds and that she will continue Justice Breyer's legacy of excellence.\""
|
||||
]
|
||||
},
|
||||
"execution_count": 33,
|
||||
|
||||
@@ -6,7 +6,7 @@
|
||||
"source": [
|
||||
"# Vectara Text Generation\n",
|
||||
"\n",
|
||||
"This notebook is based on [chat_vector_db](https://github.com/hwchase17/langchain/blob/master/docs/modules/chains/index_examples/question_answering.ipynb) and adapted to Vectara."
|
||||
"This notebook is based on [text generation](https://github.com/hwchase17/langchain/blob/master/docs/modules/chains/index_examples/vector_db_text_generation.ipynb) notebook and adapted to Vectara."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -24,6 +24,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"from langchain.llms import OpenAI\n",
|
||||
"from langchain.docstore.document import Document\n",
|
||||
"import requests\n",
|
||||
@@ -159,7 +160,7 @@
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"[{'text': '\\n\\nEnvironment variables are an essential part of any development workflow. They provide a way to store and access information that is specific to the environment in which the code is running. This can be especially useful when working with different versions of a language or framework, or when running code on different machines.\\n\\nThe Deno CLI tasks extension provides a way to easily manage environment variables when running Deno commands. This extension provides a task definition for allowing you to create tasks that execute the `deno` CLI from within the editor. The template for the Deno CLI tasks has the following interface, which can be configured in a `tasks.json` within your workspace:\\n\\nThe task definition includes the `type` field, which should be set to `deno`, and the `command` field, which is the `deno` command to run (e.g. `run`, `test`, `cache`, etc.). Additionally, you can specify additional arguments to pass on the command line, the current working directory to execute the command, and any environment variables.\\n\\nUsing environment variables with the Deno CLI tasks extension is a great way to ensure that your code is running in the correct environment. For example, if you are running a test suite,'}, {'text': '\\n\\nEnvironment variables are an important part of any programming language, and they can be used to store and access data in a variety of ways. In this blog post, we\\'ll be taking a look at environment variables specifically for the shell.\\n\\nShell variables are similar to environment variables, but they won\\'t be exported to spawned commands. They are defined with the following syntax:\\n\\n```sh\\nVAR_NAME=value\\n```\\n\\nShell variables can be used to store and access data in a variety of ways. For example, you can use them to store values that you want to re-use, but don\\'t want to be available in any spawned processes.\\n\\nFor example, if you wanted to store a value and then use it in a command, you could do something like this:\\n\\n```sh\\nVAR=hello && echo $VAR && deno eval \"console.log(\\'Deno: \\' + Deno.env.get(\\'VAR\\'))\"\\n```\\n\\nThis would output the following:\\n\\n```\\nhello\\nDeno: undefined\\n```\\n\\nAs you can see, the value stored in the shell variable is not available in the spawned process.\\n\\n'}, {'text': '\\n\\nWhen it comes to developing applications, environment variables are an essential part of the process. Environment variables are used to store information that can be used by applications and scripts to customize their behavior. This is especially important when it comes to developing applications with Deno, as there are several environment variables that can impact the behavior of Deno.\\n\\nThe most important environment variable for Deno is `DENO_AUTH_TOKENS`. This environment variable is used to store authentication tokens that are used to access remote resources. This is especially important when it comes to accessing remote APIs or databases. Without the proper authentication tokens, Deno will not be able to access the remote resources.\\n\\nAnother important environment variable for Deno is `DENO_DIR`. This environment variable is used to store the directory where Deno will store its files. This includes the Deno executable, the Deno cache, and the Deno configuration files. By setting this environment variable, you can ensure that Deno will always be able to find the files it needs.\\n\\nFinally, there is the `DENO_PLUGINS` environment variable. This environment variable is used to store the list of plugins that Deno will use. This is important for customizing the'}, {'text': '\\n\\nEnvironment variables are a great way to store and access sensitive information in your Deno applications. Deno offers built-in support for environment variables with `Deno.env`, and you can also use a `.env` file to store and access environment variables. In this blog post, we\\'ll explore both of these options and how to use them in your Deno applications.\\n\\n## Built-in `Deno.env`\\n\\nThe Deno runtime offers built-in support for environment variables with [`Deno.env`](https://deno.land/api@v1.25.3?s=Deno.env). `Deno.env` has getter and setter methods. Here is example usage:\\n\\n```ts\\nDeno.env.set(\"FIREBASE_API_KEY\", \"examplekey123\");\\nDeno.env.set(\"FIREBASE_AUTH_DOMAIN\", \"firebasedomain.com\");\\n\\nconsole.log(Deno.env.get(\"FIREBASE_API_KEY\")); // examplekey123\\nconsole.log(Deno.env.get(\"FIREBASE_AUTH_'}]\n"
|
||||
"[{'text': '\\n\\nEnvironment variables are a powerful tool for managing configuration settings in your applications. They allow you to store and access values from anywhere in your code, making it easier to keep your codebase organized and maintainable.\\n\\nHowever, there are times when you may want to use environment variables specifically for a single command. This is where shell variables come in. Shell variables are similar to environment variables, but they won\\'t be exported to spawned commands. They are defined with the following syntax:\\n\\n```sh\\nVAR_NAME=value\\n```\\n\\nFor example, if you wanted to use a shell variable instead of an environment variable in a command, you could do something like this:\\n\\n```sh\\nVAR=hello && echo $VAR && deno eval \"console.log(\\'Deno: \\' + Deno.env.get(\\'VAR\\'))\"\\n```\\n\\nThis would output the following:\\n\\n```\\nhello\\nDeno: undefined\\n```\\n\\nShell variables can be useful when you want to re-use a value, but don\\'t want it available in any spawned processes.\\n\\nAnother way to use environment variables is through pipelines. Pipelines provide a way to pipe the'}, {'text': '\\n\\nEnvironment variables are a great way to store and access sensitive information in your applications. They are also useful for configuring applications and managing different environments. In Deno, there are two ways to use environment variables: the built-in `Deno.env` and the `.env` file.\\n\\nThe `Deno.env` is a built-in feature of the Deno runtime that allows you to set and get environment variables. It has getter and setter methods that you can use to access and set environment variables. For example, you can set the `FIREBASE_API_KEY` and `FIREBASE_AUTH_DOMAIN` environment variables like this:\\n\\n```ts\\nDeno.env.set(\"FIREBASE_API_KEY\", \"examplekey123\");\\nDeno.env.set(\"FIREBASE_AUTH_DOMAIN\", \"firebasedomain.com\");\\n\\nconsole.log(Deno.env.get(\"FIREBASE_API_KEY\")); // examplekey123\\nconsole.log(Deno.env.get(\"FIREBASE_AUTH_DOMAIN\")); // firebasedomain'}, {'text': \"\\n\\nEnvironment variables are a powerful tool for managing configuration and settings in your applications. They allow you to store and access values that can be used in your code, and they can be set and changed without having to modify your code.\\n\\nIn Deno, environment variables are defined using the `export` command. For example, to set a variable called `VAR_NAME` to the value `value`, you would use the following command:\\n\\n```sh\\nexport VAR_NAME=value\\n```\\n\\nYou can then access the value of the environment variable in your code using the `Deno.env.get()` method. For example, if you wanted to log the value of the `VAR_NAME` variable, you could use the following code:\\n\\n```js\\nconsole.log(Deno.env.get('VAR_NAME'));\\n```\\n\\nYou can also set environment variables for a single command. To do this, you can list the environment variables before the command, like so:\\n\\n```\\nVAR=hello VAR2=bye deno run main.ts\\n```\\n\\nThis will set the environment variables `VAR` and `V\"}, {'text': \"\\n\\nEnvironment variables are a powerful tool for managing settings and configuration in your applications. They can be used to store information such as user preferences, application settings, and even passwords. In this blog post, we'll discuss how to make Deno scripts executable with a hashbang (shebang).\\n\\nA hashbang is a line of code that is placed at the beginning of a script. It tells the system which interpreter to use when running the script. In the case of Deno, the hashbang should be `#!/usr/bin/env -S deno run --allow-env`. This tells the system to use the Deno interpreter and to allow the script to access environment variables.\\n\\nOnce the hashbang is in place, you may need to give the script execution permissions. On Linux, this can be done with the command `sudo chmod +x hashbang.ts`. After that, you can execute the script by calling it like any other command: `./hashbang.ts`.\\n\\nIn the example program, we give the context permission to access the environment variables and print the Deno installation path. This is done by using the `Deno.env.get()` function, which returns the value of the specified environment\"}]\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
|
||||
21
docs/integrations/vespa.md
Normal file
21
docs/integrations/vespa.md
Normal file
@@ -0,0 +1,21 @@
|
||||
# Vespa
|
||||
|
||||
>[Vespa](https://vespa.ai/) is a fully featured search engine and vector database.
|
||||
> It supports vector search (ANN), lexical search, and search in structured data, all in the same query.
|
||||
|
||||
## Installation and Setup
|
||||
|
||||
|
||||
```bash
|
||||
pip install pyvespa
|
||||
```
|
||||
|
||||
|
||||
|
||||
## Retriever
|
||||
|
||||
See a [usage example](../modules/indexes/retrievers/examples/vespa.ipynb).
|
||||
|
||||
```python
|
||||
from langchain.retrievers import VespaRetriever
|
||||
```
|
||||
21
docs/integrations/weather.md
Normal file
21
docs/integrations/weather.md
Normal file
@@ -0,0 +1,21 @@
|
||||
# Weather
|
||||
|
||||
>[OpenWeatherMap](https://openweathermap.org/) is an open source weather service provider.
|
||||
|
||||
|
||||
|
||||
## Installation and Setup
|
||||
|
||||
```bash
|
||||
pip install pyowm
|
||||
```
|
||||
|
||||
We must set up the `OpenWeatherMap API token`.
|
||||
|
||||
## Document Loader
|
||||
|
||||
See a [usage example](../modules/indexes/document_loaders/examples/weather.ipynb).
|
||||
|
||||
```python
|
||||
from langchain.document_loaders import WeatherDataLoader
|
||||
```
|
||||
18
docs/integrations/whatsapp.md
Normal file
18
docs/integrations/whatsapp.md
Normal file
@@ -0,0 +1,18 @@
|
||||
# WhatsApp
|
||||
|
||||
>[WhatsApp](https://www.whatsapp.com/) (also called `WhatsApp Messenger`) is a freeware, cross-platform, centralized instant messaging (IM) and voice-over-IP (VoIP) service. It allows users to send text and voice messages, make voice and video calls, and share images, documents, user locations, and other content.
|
||||
|
||||
|
||||
## Installation and Setup
|
||||
|
||||
There isn't any special setup for it.
|
||||
|
||||
|
||||
|
||||
## Document Loader
|
||||
|
||||
See a [usage example](../modules/indexes/document_loaders/examples/whatsapp_chat.ipynb).
|
||||
|
||||
```python
|
||||
from langchain.document_loaders import WhatsAppChatLoader
|
||||
```
|
||||
28
docs/integrations/wikipedia.md
Normal file
28
docs/integrations/wikipedia.md
Normal file
@@ -0,0 +1,28 @@
|
||||
# Wikipedia
|
||||
|
||||
>[Wikipedia](https://wikipedia.org/) is a multilingual free online encyclopedia written and maintained by a community of volunteers, known as Wikipedians, through open collaboration and using a wiki-based editing system called MediaWiki. `Wikipedia` is the largest and most-read reference work in history.
|
||||
|
||||
|
||||
## Installation and Setup
|
||||
|
||||
```bash
|
||||
pip install wikipedia
|
||||
```
|
||||
|
||||
|
||||
|
||||
## Document Loader
|
||||
|
||||
See a [usage example](../modules/indexes/document_loaders/examples/wikipedia.ipynb).
|
||||
|
||||
```python
|
||||
from langchain.document_loaders import WikipediaLoader
|
||||
```
|
||||
|
||||
## Retriever
|
||||
|
||||
See a [usage example](../modules/indexes/retrievers/examples/wikipedia.ipynb).
|
||||
|
||||
```python
|
||||
from langchain.retrievers import WikipediaRetriever
|
||||
```
|
||||
22
docs/integrations/youtube.md
Normal file
22
docs/integrations/youtube.md
Normal file
@@ -0,0 +1,22 @@
|
||||
# YouTube
|
||||
|
||||
>[YouTube](https://www.youtube.com/) is an online video sharing and social media platform created by Google.
|
||||
> We download the `YouTube` transcripts and video information.
|
||||
|
||||
## Installation and Setup
|
||||
|
||||
```bash
|
||||
pip install youtube-transcript-api
|
||||
pip install pytube
|
||||
```
|
||||
See a [usage example](../modules/indexes/document_loaders/examples/youtube_transcript.ipynb).
|
||||
|
||||
|
||||
## Document Loader
|
||||
|
||||
See a [usage example](../modules/indexes/document_loaders/examples/youtube_transcript.ipynb).
|
||||
|
||||
```python
|
||||
from langchain.document_loaders import YoutubeLoader
|
||||
from langchain.document_loaders import GoogleApiYoutubeLoader
|
||||
```
|
||||
28
docs/integrations/zep.md
Normal file
28
docs/integrations/zep.md
Normal file
@@ -0,0 +1,28 @@
|
||||
# Zep
|
||||
|
||||
>[Zep](https://docs.getzep.com/) - A long-term memory store for LLM applications.
|
||||
|
||||
>`Zep` stores, summarizes, embeds, indexes, and enriches conversational AI chat histories, and exposes them via simple, low-latency APIs.
|
||||
>- Long-term memory persistence, with access to historical messages irrespective of your summarization strategy.
|
||||
>- Auto-summarization of memory messages based on a configurable message window. A series of summaries are stored, providing flexibility for future summarization strategies.
|
||||
>- Vector search over memories, with messages automatically embedded on creation.
|
||||
>- Auto-token counting of memories and summaries, allowing finer-grained control over prompt assembly.
|
||||
>- Python and JavaScript SDKs.
|
||||
|
||||
|
||||
`Zep` [project](https://github.com/getzep/zep)
|
||||
|
||||
## Installation and Setup
|
||||
|
||||
```bash
|
||||
pip install zep_python
|
||||
```
|
||||
|
||||
|
||||
## Retriever
|
||||
|
||||
See a [usage example](../modules/indexes/retrievers/examples/zep_memorystore.ipynb).
|
||||
|
||||
```python
|
||||
from langchain.retrievers import ZepRetriever
|
||||
```
|
||||
@@ -1,19 +1,20 @@
|
||||
# Zilliz
|
||||
|
||||
This page covers how to use the Zilliz Cloud ecosystem within LangChain.
|
||||
Zilliz uses the Milvus integration.
|
||||
It is broken into two parts: installation and setup, and then references to specific Milvus wrappers.
|
||||
>[Zilliz Cloud](https://zilliz.com/doc/quick_start) is a fully managed service on cloud for `LF AI Milvus®`,
|
||||
|
||||
|
||||
## Installation and Setup
|
||||
- Install the Python SDK with `pip install pymilvus`
|
||||
## Wrappers
|
||||
|
||||
### VectorStore
|
||||
Install the Python SDK:
|
||||
```bash
|
||||
pip install pymilvus
|
||||
```
|
||||
|
||||
There exists a wrapper around Zilliz indexes, allowing you to use it as a vectorstore,
|
||||
## Vectorstore
|
||||
|
||||
A wrapper around Zilliz indexes allows you to use it as a vectorstore,
|
||||
whether for semantic search or example selection.
|
||||
|
||||
To import this vectorstore:
|
||||
```python
|
||||
from langchain.vectorstores import Milvus
|
||||
```
|
||||
|
||||
@@ -5,108 +5,101 @@ Agents
|
||||
`Conceptual Guide <https://docs.langchain.com/docs/components/agents>`_
|
||||
|
||||
|
||||
Some applications will require not just a predetermined chain of calls to LLMs/other tools,
|
||||
Some applications require not just a predetermined chain of calls to LLMs/other tools,
|
||||
but potentially an unknown chain that depends on the user's input.
|
||||
In these types of chains, there is a “agent” which has access to a suite of tools.
|
||||
In these types of chains, there is an **agent** which has access to a suite of **tools**.
|
||||
Depending on the user input, the agent can then decide which, if any, of these tools to call.
|
||||
|
||||
At the moment, there are two main types of agents:
|
||||
|
||||
1. "Action Agents": these agents decide an action to take and take that action one step at a time
|
||||
2. "Plan-and-Execute Agents": these agents first decide a plan of actions to take, and then execute those actions one at a time.
|
||||
1. **Action Agents**: these agents decide the actions to take and execute that actions one action at a time.
|
||||
2. **Plan-and-Execute Agents**: these agents first decide a plan of actions to take, and then execute those actions one at a time.
|
||||
|
||||
When should you use each one? Action Agents are more conventional, and good for small tasks.
|
||||
For more complex or long running tasks, the initial planning step helps to maintain long term objectives and focus. However, that comes at the expense of generally more calls and higher latency.
|
||||
These two agents are also not mutually exclusive - in fact, it is often best to have an Action Agent be in charge of the execution for the Plan and Execute agent.
|
||||
For more complex or long running tasks, the initial planning step helps to maintain long term objectives and focus.
|
||||
However, that comes at the expense of generally more calls and higher latency.
|
||||
These two agents are also not mutually exclusive - in fact, it is often best to have an Action Agent be in charge
|
||||
of the execution for the Plan and Execute agent.
|
||||
|
||||
Action Agents
|
||||
-------------
|
||||
|
||||
High level pseudocode of agents looks something like:
|
||||
High level pseudocode of the Action Agents:
|
||||
|
||||
- Some user input is received
|
||||
- The `agent` decides which `tool` - if any - to use, and what the input to that tool should be
|
||||
- That `tool` is then called with that `tool input`, and an `observation` is recorded (this is just the output of calling that tool with that tool input)
|
||||
- That history of `tool`, `tool input`, and `observation` is passed back into the `agent`, and it decides what step to take next
|
||||
- This is repeated until the `agent` decides it no longer needs to use a `tool`, and then it responds directly to the user.
|
||||
- The **user input** is received
|
||||
- The **agent** decides which **tool** - if any - to use, and what the **tool input** should be
|
||||
- That **tool** is then called with the **tool input**, and an **observation** is recorded (the output of this calling)
|
||||
- That history of **tool**, **tool input**, and **observation** is passed back into the **agent**, and it decides the next step
|
||||
- This is repeated until the **agent** decides it no longer needs to use a **tool**, and then it responds directly to the user.
|
||||
|
||||
The different abstractions involved in agents are as follows:
|
||||
|
||||
- Agent: this is where the logic of the application lives. Agents expose an interface that takes in user input along with a list of previous steps the agent has taken, and returns either an `AgentAction` or `AgentFinish`
|
||||
- `AgentAction` corresponds to the tool to use and the input to that tool
|
||||
- `AgentFinish` means the agent is done, and has information around what to return to the user
|
||||
- Tools: these are the actions an agent can take. What tools you give an agent highly depend on what you want the agent to do
|
||||
- Toolkits: these are groups of tools designed for a specific use case. For example, in order for an agent to interact with a SQL database in the best way it may need access to one tool to execute queries and another tool to inspect tables.
|
||||
- Agent Executor: this wraps an agent and a list of tools. This is responsible for the loop of running the agent iteratively until the stopping criteria is met.
|
||||
The different abstractions involved in agents are:
|
||||
|
||||
The most important abstraction of the four above to understand is that of the agent.
|
||||
Although an agent can be defined in whatever way one chooses, the typical way to construct an agent is with:
|
||||
- **Agent**: this is where the logic of the application lives. Agents expose an interface that takes in user input
|
||||
along with a list of previous steps the agent has taken, and returns either an **AgentAction** or **AgentFinish**
|
||||
|
||||
- PromptTemplate: this is responsible for taking the user input and previous steps and constructing a prompt to send to the language model
|
||||
- Language Model: this takes the prompt constructed by the PromptTemplate and returns some output
|
||||
- Output Parser: this takes the output of the Language Model and parses it into an `AgentAction` or `AgentFinish` object.
|
||||
- **AgentAction** corresponds to the tool to use and the input to that tool
|
||||
- **AgentFinish** means the agent is done, and has information around what to return to the user
|
||||
- **Tools**: these are the actions an agent can take. What tools you give an agent highly depend on what you want the agent to do
|
||||
- **Toolkits**: these are groups of tools designed for a specific use case. For example, in order for an agent to
|
||||
interact with a SQL database in the best way it may need access to one tool to execute queries and another tool to inspect tables.
|
||||
- **Agent Executor**: this wraps an agent and a list of tools. This is responsible for the loop of running the agent
|
||||
iteratively until the stopping criteria is met.
|
||||
|
||||
|
||||
|
|
||||
- `Getting Started <./agents/getting_started.html>`_: An overview of agents. It covers how to use all things related to agents in an end-to-end manner.
|
||||
|
||||
|
||||
|
|
||||
**Agent Construction:**
|
||||
|
||||
Although an agent can be constructed in many way, the typical way to construct an agent is with:
|
||||
|
||||
- **PromptTemplate**: this is responsible for taking the user input and previous steps and constructing a prompt
|
||||
to send to the language model
|
||||
- **Language Model**: this takes the prompt constructed by the PromptTemplate and returns some output
|
||||
- **Output Parser**: this takes the output of the Language Model and parses it into an **AgentAction** or **AgentFinish** object.
|
||||
|
||||
|
||||
|
|
||||
**Additional Documentation:**
|
||||
|
||||
|
||||
- `Tools <./agents/tools.html>`_: Different types of **tools** LangChain supports natively. We also cover how to add your own tools.
|
||||
|
||||
- `Agents <./agents/agents.html>`_: Different types of **agents** LangChain supports natively. We also cover how to
|
||||
modify and create your own agents.
|
||||
|
||||
- `Toolkits <./agents/toolkits.html>`_: Various **toolkits** that LangChain supports out of the box, and how to
|
||||
create an agent from them.
|
||||
|
||||
- `Agent Executor <./agents/agent_executors.html>`_: The **Agent Executor** class, which is responsible for calling
|
||||
the agent and tools in a loop. We go over different ways to customize this, and options you can use for more control.
|
||||
|
||||
|
||||
Plan-and-Execute Agents
|
||||
-----------------------
|
||||
High level pseudocode of the **Plan-and-Execute Agents**:
|
||||
|
||||
- The **user input** is received
|
||||
- The **planner** lists out the steps to take
|
||||
- The **executor** goes through the list of steps, executing them
|
||||
|
||||
The most typical implementation is to have the planner be a language model, and the executor be an action agent.
|
||||
|
||||
|
|
||||
- `Plan-and-Execute Agents <./agents/plan_and_execute.html>`_
|
||||
|
||||
In this section of documentation, we first start with a Getting Started notebook to cover how to use all things related to agents in an end-to-end manner.
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 1
|
||||
:hidden:
|
||||
|
||||
./agents/getting_started.ipynb
|
||||
|
||||
|
||||
We then split the documentation into the following sections:
|
||||
|
||||
**Tools**
|
||||
|
||||
In this section we cover the different types of tools LangChain supports natively.
|
||||
We then cover how to add your own tools.
|
||||
|
||||
|
||||
**Agents**
|
||||
|
||||
In this section we cover the different types of agents LangChain supports natively.
|
||||
We then cover how to modify and create your own agents.
|
||||
|
||||
|
||||
**Toolkits**
|
||||
|
||||
In this section we go over the various toolkits that LangChain supports out of the box,
|
||||
and how to create an agent from them.
|
||||
|
||||
|
||||
**Agent Executor**
|
||||
|
||||
In this section we go over the Agent Executor class, which is responsible for calling
|
||||
the agent and tools in a loop. We go over different ways to customize this, and options you
|
||||
can use for more control.
|
||||
|
||||
**Go Deeper**
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 1
|
||||
|
||||
./agents/tools.rst
|
||||
./agents/agents.rst
|
||||
./agents/toolkits.rst
|
||||
./agents/agent_executors.rst
|
||||
|
||||
Plan-and-Execute Agents
|
||||
-----------------------
|
||||
|
||||
High level pseudocode of agents looks something like:
|
||||
|
||||
- Some user input is received
|
||||
- The planner lists out the steps to take
|
||||
- The executor goes through the list of steps, executing them
|
||||
|
||||
The most typical implementation is to have the planner be a language model,
|
||||
and the executor be an action agent.
|
||||
|
||||
**Go Deeper**
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 1
|
||||
|
||||
./agents/plan_and_execute.ipynb
|
||||
|
||||
|
||||
@@ -14,6 +14,7 @@
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "9b22020a",
|
||||
"metadata": {},
|
||||
@@ -139,6 +140,7 @@
|
||||
"source": []
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "c0a6c031",
|
||||
"metadata": {},
|
||||
@@ -229,7 +231,7 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"agent.run(\"What did biden say about ketanji brown jackson is the state of the union address?\")"
|
||||
"agent.run(\"What did biden say about ketanji brown jackson in the state of the union address?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -271,6 +273,7 @@
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "787a9b5e",
|
||||
"metadata": {},
|
||||
@@ -279,6 +282,7 @@
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "9161ba91",
|
||||
"metadata": {},
|
||||
@@ -396,6 +400,7 @@
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "49a0cbbe",
|
||||
"metadata": {},
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "23234b50-e6c6-4c87-9f97-259c15f36894",
|
||||
"metadata": {
|
||||
@@ -11,6 +12,7 @@
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "29dd6333-307c-43df-b848-65001c01733b",
|
||||
"metadata": {},
|
||||
@@ -36,6 +38,7 @@
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "19a813f7",
|
||||
"metadata": {},
|
||||
@@ -84,6 +87,7 @@
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "53a743b8",
|
||||
"metadata": {},
|
||||
@@ -92,11 +96,12 @@
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "23602c62",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"By default, we assume that the token sequence ``\"\\nFinal\", \" Answer\", \":\"`` indicates that the agent has reached an answers. We can, however, also pass a custom sequence to use as answer prefix."
|
||||
"By default, we assume that the token sequence ``\"Final\", \"Answer\", \":\"`` indicates that the agent has reached an answers. We can, however, also pass a custom sequence to use as answer prefix."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -108,26 +113,75 @@
|
||||
"source": [
|
||||
"llm = OpenAI(\n",
|
||||
" streaming=True,\n",
|
||||
" callbacks=[FinalStreamingStdOutCallbackHandler(answer_prefix_tokens=[\"\\nThe\", \" answer\", \":\"])],\n",
|
||||
" callbacks=[FinalStreamingStdOutCallbackHandler(answer_prefix_tokens=[\"The\", \"answer\", \":\"])],\n",
|
||||
" temperature=0\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "b1a96cc0",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Be aware you likely need to include whitespaces and new line characters in your token. "
|
||||
"For convenience, the callback automatically strips whitespaces and new line characters when comparing to `answer_prefix_tokens`. I.e., if `answer_prefix_tokens = [\"The\", \" answer\", \":\"]` then both `[\"\\nThe\", \" answer\", \":\"]` and `[\"The\", \" answer\", \":\"]` would be recognized a the answer prefix."
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "9278b522",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"If you don't know the tokenized version of your answer prefix, you can determine it with the following code:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "9278b522",
|
||||
"id": "2f8f0640",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
"source": [
|
||||
"from langchain.callbacks.base import BaseCallbackHandler\n",
|
||||
"\n",
|
||||
"class MyCallbackHandler(BaseCallbackHandler):\n",
|
||||
" def on_llm_new_token(self, token, **kwargs) -> None:\n",
|
||||
" # print every token on a new line\n",
|
||||
" print(f\"#{token}#\")\n",
|
||||
"\n",
|
||||
"llm = OpenAI(streaming=True, callbacks=[MyCallbackHandler()])\n",
|
||||
"tools = load_tools([\"wikipedia\", \"llm-math\"], llm=llm)\n",
|
||||
"agent = initialize_agent(tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=False)\n",
|
||||
"agent.run(\"It's 2023 now. How many years ago did Konrad Adenauer become Chancellor of Germany.\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "61190e58",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Also streaming the answer prefixes"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "1255776f",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"When the parameter `stream_prefix = True` is set, the answer prefix itself will also be streamed. This can be useful when the answer prefix itself is part of the answer. For example, when your answer is a JSON like\n",
|
||||
"\n",
|
||||
"`\n",
|
||||
"{\n",
|
||||
" \"action\": \"Final answer\",\n",
|
||||
" \"action_input\": \"Konrad Adenauer became Chancellor 74 years ago.\"\n",
|
||||
"}\n",
|
||||
"`\n",
|
||||
"\n",
|
||||
"and you don't only want the action_input to be streamed, but the entire JSON."
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "18ada398-dce6-4049-9b56-fc0ede63da9c",
|
||||
"metadata": {},
|
||||
@@ -11,6 +12,7 @@
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "eecb683b-3a46-4b9d-81a3-7caefbfec1a1",
|
||||
"metadata": {},
|
||||
@@ -88,6 +90,7 @@
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "f4814175-964d-42f1-aa9d-22801ce1e912",
|
||||
"metadata": {},
|
||||
@@ -123,6 +126,7 @@
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "8a38ad10",
|
||||
"metadata": {},
|
||||
@@ -165,7 +169,7 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"agent_executor.run(\"What did biden say about ketanji brown jackson is the state of the union address?\")"
|
||||
"agent_executor.run(\"What did biden say about ketanji brown jackson in the state of the union address?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -203,10 +207,11 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"agent_executor.run(\"What did biden say about ketanji brown jackson is the state of the union address? List the source.\")"
|
||||
"agent_executor.run(\"What did biden say about ketanji brown jackson in the state of the union address? List the source.\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "7ca07707",
|
||||
"metadata": {},
|
||||
@@ -255,6 +260,7 @@
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "71680984-edaf-4a63-90f5-94edbd263550",
|
||||
"metadata": {},
|
||||
@@ -299,7 +305,7 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"agent_executor.run(\"What did biden say about ketanji brown jackson is the state of the union address?\")"
|
||||
"agent_executor.run(\"What did biden say about ketanji brown jackson in the state of the union address?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
86
docs/modules/agents/tools/examples/pubmed.ipynb
Normal file
86
docs/modules/agents/tools/examples/pubmed.ipynb
Normal file
@@ -0,0 +1,86 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "64f20f38",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# PubMed Tool\n",
|
||||
"\n",
|
||||
"This notebook goes over how to use PubMed as a tool\n",
|
||||
"\n",
|
||||
"PubMed® comprises more than 35 million citations for biomedical literature from MEDLINE, life science journals, and online books. Citations may include links to full text content from PubMed Central and publisher web sites."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "c80b9273",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.tools import PubmedQueryRun"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "f203c965",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"tool = PubmedQueryRun()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "baee7a2a",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'Published: <Year>2023</Year><Month>May</Month><Day>31</Day>\\nTitle: Dermatology in the wake of an AI revolution: who gets a say?\\nSummary: \\n\\nPublished: <Year>2023</Year><Month>May</Month><Day>30</Day>\\nTitle: What is ChatGPT and what do we do with it? Implications of the age of AI for nursing and midwifery practice and education: An editorial.\\nSummary: \\n\\nPublished: <Year>2023</Year><Month>Jun</Month><Day>02</Day>\\nTitle: The Impact of ChatGPT on the Nursing Profession: Revolutionizing Patient Care and Education.\\nSummary: The nursing field has undergone notable changes over time and is projected to undergo further modifications in the future, owing to the advent of sophisticated technologies and growing healthcare needs. The advent of ChatGPT, an AI-powered language model, is expected to exert a significant influence on the nursing profession, specifically in the domains of patient care and instruction. The present article delves into the ramifications of ChatGPT within the nursing domain and accentuates its capacity and constraints to transform the discipline.'"
|
||||
]
|
||||
},
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"tool.run(\"chatgpt\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "965903ba",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -160,3 +160,9 @@ Below is a list of all supported tools and relevant information:
|
||||
- Notes: A connection to the OpenWeatherMap API (https://api.openweathermap.org), specifically the `/data/2.5/weather` endpoint.
|
||||
- Requires LLM: No
|
||||
- Extra Parameters: `openweathermap_api_key` (your API key to access this endpoint)
|
||||
|
||||
**sleep**
|
||||
|
||||
- Tool Name: Sleep
|
||||
- Tool Description: Make agent sleep for some time.
|
||||
- Requires LLM: No
|
||||
|
||||
175
docs/modules/callbacks/filecallbackhandler.ipynb
Normal file
175
docs/modules/callbacks/filecallbackhandler.ipynb
Normal file
@@ -0,0 +1,175 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "63b87b91",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Logging to file\n",
|
||||
"This example shows how to print logs to file. It shows how to use the `FileCallbackHandler`, which does the same thing as [`StdOutCallbackHandler`](https://python.langchain.com/en/latest/modules/callbacks/getting_started.html#using-an-existing-handler), but instead writes the output to file. It also uses the `loguru` library to log other outputs that are not captured by the handler."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "6cb156cc",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new LLMChain chain...\u001b[0m\n",
|
||||
"Prompt after formatting:\n",
|
||||
"\u001b[32;1m\u001b[1;3m1 + 2 = \u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\u001b[32m2023-06-01 18:36:38.929\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[36m__main__\u001b[0m:\u001b[36m<module>\u001b[0m:\u001b[36m20\u001b[0m - \u001b[1m\n",
|
||||
"\n",
|
||||
"3\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from loguru import logger\n",
|
||||
"\n",
|
||||
"from langchain.callbacks import FileCallbackHandler\n",
|
||||
"from langchain.chains import LLMChain\n",
|
||||
"from langchain.llms import OpenAI\n",
|
||||
"from langchain.prompts import PromptTemplate\n",
|
||||
"\n",
|
||||
"logfile = 'output.log'\n",
|
||||
"\n",
|
||||
"logger.add(logfile, colorize=True, enqueue=True)\n",
|
||||
"handler = FileCallbackHandler(logfile)\n",
|
||||
"\n",
|
||||
"llm = OpenAI()\n",
|
||||
"prompt = PromptTemplate.from_template(\"1 + {number} = \")\n",
|
||||
"\n",
|
||||
"# this chain will both print to stdout (because verbose=True) and write to 'output.log'\n",
|
||||
"# if verbose=False, the FileCallbackHandler will still write to 'output.log'\n",
|
||||
"chain = LLMChain(llm=llm, prompt=prompt, callbacks=[handler], verbose=True)\n",
|
||||
"answer = chain.run(number=2)\n",
|
||||
"logger.info(answer)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "9c50d54f",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Now we can open the file `output.log` to see that the output has been captured."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "aa32dc0a",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!pip install ansi2html > /dev/null"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "4af00719",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/html": [
|
||||
"<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.01 Transitional//EN\" \"http://www.w3.org/TR/html4/loose.dtd\">\n",
|
||||
"<html>\n",
|
||||
"<head>\n",
|
||||
"<meta http-equiv=\"Content-Type\" content=\"text/html; charset=utf-8\">\n",
|
||||
"<title></title>\n",
|
||||
"<style type=\"text/css\">\n",
|
||||
".ansi2html-content { display: inline; white-space: pre-wrap; word-wrap: break-word; }\n",
|
||||
".body_foreground { color: #AAAAAA; }\n",
|
||||
".body_background { background-color: #000000; }\n",
|
||||
".inv_foreground { color: #000000; }\n",
|
||||
".inv_background { background-color: #AAAAAA; }\n",
|
||||
".ansi1 { font-weight: bold; }\n",
|
||||
".ansi3 { font-style: italic; }\n",
|
||||
".ansi32 { color: #00aa00; }\n",
|
||||
".ansi36 { color: #00aaaa; }\n",
|
||||
"</style>\n",
|
||||
"</head>\n",
|
||||
"<body class=\"body_foreground body_background\" style=\"font-size: normal;\" >\n",
|
||||
"<pre class=\"ansi2html-content\">\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"<span class=\"ansi1\">> Entering new LLMChain chain...</span>\n",
|
||||
"Prompt after formatting:\n",
|
||||
"<span class=\"ansi1 ansi32\"></span><span class=\"ansi1 ansi3 ansi32\">1 + 2 = </span>\n",
|
||||
"\n",
|
||||
"<span class=\"ansi1\">> Finished chain.</span>\n",
|
||||
"<span class=\"ansi32\">2023-06-01 18:36:38.929</span> | <span class=\"ansi1\">INFO </span> | <span class=\"ansi36\">__main__</span>:<span class=\"ansi36\"><module></span>:<span class=\"ansi36\">20</span> - <span class=\"ansi1\">\n",
|
||||
"\n",
|
||||
"3</span>\n",
|
||||
"\n",
|
||||
"</pre>\n",
|
||||
"</body>\n",
|
||||
"\n",
|
||||
"</html>\n"
|
||||
],
|
||||
"text/plain": [
|
||||
"<IPython.core.display.HTML object>"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from IPython.display import display, HTML\n",
|
||||
"from ansi2html import Ansi2HTMLConverter\n",
|
||||
"\n",
|
||||
"with open('output.log', 'r') as f:\n",
|
||||
" content = f.read()\n",
|
||||
"\n",
|
||||
"conv = Ansi2HTMLConverter()\n",
|
||||
"html = conv.convert(content, full=True)\n",
|
||||
"\n",
|
||||
"display(HTML(html))"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.16"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -6,14 +6,13 @@ Chains
|
||||
|
||||
|
||||
Using an LLM in isolation is fine for some simple applications,
|
||||
but many more complex ones require chaining LLMs - either with each other or with other experts.
|
||||
LangChain provides a standard interface for Chains, as well as some common implementations of chains for ease of use.
|
||||
but more complex applications require chaining LLMs - either with each other or with other experts.
|
||||
LangChain provides a standard interface for **Chains**, as well as several common implementations of chains.
|
||||
|
||||
The following sections of documentation are provided:
|
||||
|
|
||||
- `Getting Started <./chains/getting_started.html>`_: An overview of chains.
|
||||
|
||||
- `Getting Started <./chains/getting_started.html>`_: A getting started guide for chains, to get you up and running quickly.
|
||||
|
||||
- `How-To Guides <./chains/how_to_guides.html>`_: A collection of how-to guides. These highlight how to use various types of chains.
|
||||
- `How-To Guides <./chains/how_to_guides.html>`_: How-to guides about various types of chains.
|
||||
|
||||
- `Reference <../reference/modules/chains.html>`_: API reference documentation for all Chain classes.
|
||||
|
||||
|
||||
@@ -177,7 +177,7 @@
|
||||
"\u001b[32;1m\u001b[1;3mMATCH (a:Actor)-[:ACTED_IN]->(m:Movie {name: 'Top Gun'})\n",
|
||||
"RETURN a.name\u001b[0m\n",
|
||||
"Full Context:\n",
|
||||
"\u001b[32;1m\u001b[1;3m[{'a.name': 'Tom Cruise'}, {'a.name': 'Val Kilmer'}, {'a.name': 'Anthony Edwards'}, {'a.name': 'Meg Ryan'}]\u001b[0m\n",
|
||||
"\u001b[32;1m\u001b[1;3m[{'a.name': 'Val Kilmer'}, {'a.name': 'Anthony Edwards'}, {'a.name': 'Meg Ryan'}, {'a.name': 'Tom Cruise'}]\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
]
|
||||
@@ -185,7 +185,7 @@
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'Tom Cruise, Val Kilmer, Anthony Edwards, and Meg Ryan played in Top Gun.'"
|
||||
"'Val Kilmer, Anthony Edwards, Meg Ryan, and Tom Cruise played in Top Gun.'"
|
||||
]
|
||||
},
|
||||
"execution_count": 7,
|
||||
@@ -197,10 +197,180 @@
|
||||
"chain.run(\"Who played in Top Gun?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "2d28c4df",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Limit the number of results\n",
|
||||
"You can limit the number of results from the Cypher QA Chain using the `top_k` parameter.\n",
|
||||
"The default is 10."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"id": "df230946",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"chain = GraphCypherQAChain.from_llm(\n",
|
||||
" ChatOpenAI(temperature=0), graph=graph, verbose=True, top_k=2\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"id": "3f1600ee",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new GraphCypherQAChain chain...\u001b[0m\n",
|
||||
"Generated Cypher:\n",
|
||||
"\u001b[32;1m\u001b[1;3mMATCH (a:Actor)-[:ACTED_IN]->(m:Movie {name: 'Top Gun'})\n",
|
||||
"RETURN a.name\u001b[0m\n",
|
||||
"Full Context:\n",
|
||||
"\u001b[32;1m\u001b[1;3m[{'a.name': 'Val Kilmer'}, {'a.name': 'Anthony Edwards'}]\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'Val Kilmer and Anthony Edwards played in Top Gun.'"
|
||||
]
|
||||
},
|
||||
"execution_count": 9,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chain.run(\"Who played in Top Gun?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "88c16206",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Return intermediate results\n",
|
||||
"You can return intermediate steps from the Cypher QA Chain using the `return_intermediate_steps` parameter"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"id": "e412f36b",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"chain = GraphCypherQAChain.from_llm(\n",
|
||||
" ChatOpenAI(temperature=0), graph=graph, verbose=True, return_intermediate_steps=True\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"id": "4f4699dc",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new GraphCypherQAChain chain...\u001b[0m\n",
|
||||
"Generated Cypher:\n",
|
||||
"\u001b[32;1m\u001b[1;3mMATCH (a:Actor)-[:ACTED_IN]->(m:Movie {name: 'Top Gun'})\n",
|
||||
"RETURN a.name\u001b[0m\n",
|
||||
"Full Context:\n",
|
||||
"\u001b[32;1m\u001b[1;3m[{'a.name': 'Val Kilmer'}, {'a.name': 'Anthony Edwards'}, {'a.name': 'Meg Ryan'}, {'a.name': 'Tom Cruise'}]\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n",
|
||||
"Intermediate steps: [{'query': \"MATCH (a:Actor)-[:ACTED_IN]->(m:Movie {name: 'Top Gun'})\\nRETURN a.name\"}, {'context': [{'a.name': 'Val Kilmer'}, {'a.name': 'Anthony Edwards'}, {'a.name': 'Meg Ryan'}, {'a.name': 'Tom Cruise'}]}]\n",
|
||||
"Final answer: Val Kilmer, Anthony Edwards, Meg Ryan, and Tom Cruise played in Top Gun.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"result = chain(\"Who played in Top Gun?\")\n",
|
||||
"print(f\"Intermediate steps: {result['intermediate_steps']}\")\n",
|
||||
"print(f\"Final answer: {result['result']}\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "d6e1b054",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Return direct results\n",
|
||||
"You can return direct results from the Cypher QA Chain using the `return_direct` parameter"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"id": "2d3acf10",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"chain = GraphCypherQAChain.from_llm(\n",
|
||||
" ChatOpenAI(temperature=0), graph=graph, verbose=True, return_direct=True\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 13,
|
||||
"id": "b0a9d143",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new GraphCypherQAChain chain...\u001b[0m\n",
|
||||
"Generated Cypher:\n",
|
||||
"\u001b[32;1m\u001b[1;3mMATCH (a:Actor)-[:ACTED_IN]->(m:Movie {name: 'Top Gun'})\n",
|
||||
"RETURN a.name\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[{'a.name': 'Val Kilmer'},\n",
|
||||
" {'a.name': 'Anthony Edwards'},\n",
|
||||
" {'a.name': 'Meg Ryan'},\n",
|
||||
" {'a.name': 'Tom Cruise'}]"
|
||||
]
|
||||
},
|
||||
"execution_count": 13,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chain.run(\"Who played in Top Gun?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "b4825316",
|
||||
"id": "74d0a36f",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
@@ -222,7 +392,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.1"
|
||||
"version": "3.8.8"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
270
docs/modules/chains/examples/graph_nebula_qa.ipynb
Normal file
270
docs/modules/chains/examples/graph_nebula_qa.ipynb
Normal file
@@ -0,0 +1,270 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "c94240f5",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# NebulaGraphQAChain\n",
|
||||
"\n",
|
||||
"This notebook shows how to use LLMs to provide a natural language interface to NebulaGraph database."
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "dbc0ee68",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"You will need to have a running NebulaGraph cluster, for which you can run a containerized cluster by running the following script:\n",
|
||||
"\n",
|
||||
"```bash\n",
|
||||
"curl -fsSL nebula-up.siwei.io/install.sh | bash\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"Other options are:\n",
|
||||
"- Install as a [Docker Desktop Extension](https://www.docker.com/blog/distributed-cloud-native-graph-database-nebulagraph-docker-extension/). See [here](https://docs.nebula-graph.io/3.5.0/2.quick-start/1.quick-start-workflow/)\n",
|
||||
"- NebulaGraph Cloud Service. See [here](https://www.nebula-graph.io/cloud)\n",
|
||||
"- Deploy from package, source code, or via Kubernetes. See [here](https://docs.nebula-graph.io/)\n",
|
||||
"\n",
|
||||
"Once the cluster is running, we could create the SPACE and SCHEMA for the database."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "c82f4141",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install ipython-ngql\n",
|
||||
"%load_ext ngql\n",
|
||||
"\n",
|
||||
"# connect ngql jupyter extension to nebulagraph\n",
|
||||
"%ngql --address 127.0.0.1 --port 9669 --user root --password nebula\n",
|
||||
"# create a new space\n",
|
||||
"%ngql CREATE SPACE IF NOT EXISTS langchain(partition_num=1, replica_factor=1, vid_type=fixed_string(128));\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "eda0809a",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Wait for a few seconds for the space to be created.\n",
|
||||
"%ngql USE langchain;"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "119fe35c",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Create the schema, for full dataset, refer [here](https://www.siwei.io/en/nebulagraph-etl-dbt/)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "5aa796ee",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%%ngql\n",
|
||||
"CREATE TAG IF NOT EXISTS movie(name string);\n",
|
||||
"CREATE TAG IF NOT EXISTS person(name string, birthdate string);\n",
|
||||
"CREATE EDGE IF NOT EXISTS acted_in();\n",
|
||||
"CREATE TAG INDEX IF NOT EXISTS person_index ON person(name(128));\n",
|
||||
"CREATE TAG INDEX IF NOT EXISTS movie_index ON movie(name(128));"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "66e4799a",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Wait for schema creation to complete, then we can insert some data."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "d8eea530",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"UsageError: Cell magic `%%ngql` not found.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"%%ngql\n",
|
||||
"INSERT VERTEX person(name, birthdate) VALUES \"Al Pacino\":(\"Al Pacino\", \"1940-04-25\");\n",
|
||||
"INSERT VERTEX movie(name) VALUES \"The Godfather II\":(\"The Godfather II\");\n",
|
||||
"INSERT VERTEX movie(name) VALUES \"The Godfather Coda: The Death of Michael Corleone\":(\"The Godfather Coda: The Death of Michael Corleone\");\n",
|
||||
"INSERT EDGE acted_in() VALUES \"Al Pacino\"->\"The Godfather II\":();\n",
|
||||
"INSERT EDGE acted_in() VALUES \"Al Pacino\"->\"The Godfather Coda: The Death of Michael Corleone\":();"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "62812aad",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.chat_models import ChatOpenAI\n",
|
||||
"from langchain.chains import NebulaGraphQAChain\n",
|
||||
"from langchain.graphs import NebulaGraph"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "0928915d",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"graph = NebulaGraph(\n",
|
||||
" space=\"langchain\",\n",
|
||||
" username=\"root\",\n",
|
||||
" password=\"nebula\",\n",
|
||||
" address=\"127.0.0.1\",\n",
|
||||
" port=9669,\n",
|
||||
" session_pool_size=30,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "58c1a8ea",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Refresh graph schema information\n",
|
||||
"\n",
|
||||
"If the schema of database changes, you can refresh the schema information needed to generate nGQL statements."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "4e3de44f",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# graph.refresh_schema()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "1fe76ccd",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Node properties: [{'tag': 'movie', 'properties': [('name', 'string')]}, {'tag': 'person', 'properties': [('name', 'string'), ('birthdate', 'string')]}]\n",
|
||||
"Edge properties: [{'edge': 'acted_in', 'properties': []}]\n",
|
||||
"Relationships: ['(:person)-[:acted_in]->(:movie)']\n",
|
||||
"\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"print(graph.get_schema)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "68a3c677",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Querying the graph\n",
|
||||
"\n",
|
||||
"We can now use the graph cypher QA chain to ask question of the graph"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "7476ce98",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"chain = NebulaGraphQAChain.from_llm(\n",
|
||||
" ChatOpenAI(temperature=0), graph=graph, verbose=True\n",
|
||||
")\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "ef8ee27b",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new NebulaGraphQAChain chain...\u001b[0m\n",
|
||||
"Generated nGQL:\n",
|
||||
"\u001b[32;1m\u001b[1;3mMATCH (p:`person`)-[:acted_in]->(m:`movie`) WHERE m.`movie`.`name` == 'The Godfather II'\n",
|
||||
"RETURN p.`person`.`name`\u001b[0m\n",
|
||||
"Full Context:\n",
|
||||
"\u001b[32;1m\u001b[1;3m{'p.person.name': ['Al Pacino']}\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'Al Pacino played in The Godfather II.'"
|
||||
]
|
||||
},
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chain.run(\"Who played in The Godfather II?\")"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.3"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -21,7 +21,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"execution_count": 2,
|
||||
"id": "e9db25f3",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -318,6 +318,141 @@
|
||||
"chain({\"input_documents\": docs}, return_only_outputs=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "b882e209",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## The custom `MapReduceChain`\n",
|
||||
"\n",
|
||||
"**Multi input prompt**\n",
|
||||
"\n",
|
||||
"You can also use prompt with multi input. In this example, we will use a MapReduce chain to answer specifc question about our code."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"id": "f7ad9ee2",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.chains.combine_documents.map_reduce import MapReduceDocumentsChain\n",
|
||||
"from langchain.chains.combine_documents.stuff import StuffDocumentsChain\n",
|
||||
"\n",
|
||||
"map_template_string = \"\"\"Give the following python code information, generate a description that explains what the code does and also mention the time complexity.\n",
|
||||
"Code:\n",
|
||||
"{code}\n",
|
||||
"\n",
|
||||
"Return the the description in the following format:\n",
|
||||
"name of the function: description of the function\n",
|
||||
"\"\"\"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"reduce_template_string = \"\"\"Give the following following python fuctions name and their descritpion, answer the following question\n",
|
||||
"{code_description}\n",
|
||||
"Question: {question}\n",
|
||||
"Answer:\n",
|
||||
"\"\"\"\n",
|
||||
"\n",
|
||||
"MAP_PROMPT = PromptTemplate(input_variables=[\"code\"], template=map_template_string)\n",
|
||||
"REDUCE_PROMPT = PromptTemplate(input_variables=[\"code_description\", \"question\"], template=reduce_template_string)\n",
|
||||
"\n",
|
||||
"llm = OpenAI()\n",
|
||||
"\n",
|
||||
"map_llm_chain = LLMChain(llm=llm, prompt=MAP_PROMPT)\n",
|
||||
"reduce_llm_chain = LLMChain(llm=llm, prompt=REDUCE_PROMPT)\n",
|
||||
"\n",
|
||||
"generative_result_reduce_chain = StuffDocumentsChain(\n",
|
||||
" llm_chain=reduce_llm_chain,\n",
|
||||
" document_variable_name=\"code_description\",\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"combine_documents = MapReduceDocumentsChain(\n",
|
||||
" llm_chain=map_llm_chain,\n",
|
||||
" combine_document_chain=generative_result_reduce_chain,\n",
|
||||
" document_variable_name=\"code\",\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"map_reduce = MapReduceChain(\n",
|
||||
" combine_documents_chain=combine_documents,\n",
|
||||
" text_splitter=CharacterTextSplitter(separator=\"\\n##\\n\", chunk_size=100, chunk_overlap=0),\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"id": "0d4caccb",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"code = \"\"\"\n",
|
||||
"def bubblesort(list):\n",
|
||||
" for iter_num in range(len(list)-1,0,-1):\n",
|
||||
" for idx in range(iter_num):\n",
|
||||
" if list[idx]>list[idx+1]:\n",
|
||||
" temp = list[idx]\n",
|
||||
" list[idx] = list[idx+1]\n",
|
||||
" list[idx+1] = temp\n",
|
||||
" return list\n",
|
||||
"##\n",
|
||||
"def insertion_sort(InputList):\n",
|
||||
" for i in range(1, len(InputList)):\n",
|
||||
" j = i-1\n",
|
||||
" nxt_element = InputList[i]\n",
|
||||
" while (InputList[j] > nxt_element) and (j >= 0):\n",
|
||||
" InputList[j+1] = InputList[j]\n",
|
||||
" j=j-1\n",
|
||||
" InputList[j+1] = nxt_element\n",
|
||||
" return InputList\n",
|
||||
"##\n",
|
||||
"def shellSort(input_list):\n",
|
||||
" gap = len(input_list) // 2\n",
|
||||
" while gap > 0:\n",
|
||||
" for i in range(gap, len(input_list)):\n",
|
||||
" temp = input_list[i]\n",
|
||||
" j = i\n",
|
||||
" while j >= gap and input_list[j - gap] > temp:\n",
|
||||
" input_list[j] = input_list[j - gap]\n",
|
||||
" j = j-gap\n",
|
||||
" input_list[j] = temp\n",
|
||||
" gap = gap//2\n",
|
||||
" return input_list\n",
|
||||
"\n",
|
||||
"\"\"\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 13,
|
||||
"id": "d5a9a35b",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Created a chunk of size 247, which is longer than the specified 100\n",
|
||||
"Created a chunk of size 267, which is longer than the specified 100\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'shellSort has a better time complexity than both bubblesort and insertion_sort, as it has a time complexity of O(n^2), while the other two have a time complexity of O(n^2).'"
|
||||
]
|
||||
},
|
||||
"execution_count": 13,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"map_reduce.run(input_text=code, question=\"Which function has a better time complexity?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "f61350f9",
|
||||
@@ -470,7 +605,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.1"
|
||||
"version": "3.8.16"
|
||||
},
|
||||
"vscode": {
|
||||
"interpreter": {
|
||||
|
||||
@@ -5,53 +5,41 @@ Indexes
|
||||
`Conceptual Guide <https://docs.langchain.com/docs/components/indexing>`_
|
||||
|
||||
|
||||
Indexes refer to ways to structure documents so that LLMs can best interact with them.
|
||||
This module contains utility functions for working with documents, different types of indexes, and then examples for using those indexes in chains.
|
||||
**Indexes** refer to ways to structure documents so that LLMs can best interact with them.
|
||||
|
||||
The most common way that indexes are used in chains is in a "retrieval" step.
|
||||
This step refers to taking a user's query and returning the most relevant documents.
|
||||
We draw this distinction because (1) an index can be used for other things besides retrieval, and (2) retrieval can use other logic besides an index to find relevant documents.
|
||||
We therefore have a concept of a "Retriever" interface - this is the interface that most chains work with.
|
||||
We draw this distinction because (1) an index can be used for other things besides retrieval, and
|
||||
(2) retrieval can use other logic besides an index to find relevant documents.
|
||||
We therefore have a concept of a **Retriever** interface - this is the interface that most chains work with.
|
||||
|
||||
Most of the time when we talk about indexes and retrieval we are talking about indexing and retrieving
|
||||
unstructured data (like text documents).
|
||||
For interacting with structured data (SQL tables, etc) or APIs, please see the corresponding use case
|
||||
sections for links to relevant functionality.
|
||||
|
||||
|
|
||||
- `Getting Started <./indexes/getting_started.html>`_: An overview of the indexes.
|
||||
|
||||
|
||||
Index Types
|
||||
---------------------
|
||||
|
||||
- `Document Loaders <./indexes/document_loaders.html>`_: How to load documents from a variety of sources.
|
||||
|
||||
- `Text Splitters <./indexes/text_splitters.html>`_: An overview and different types of the **Text Splitters**.
|
||||
|
||||
- `VectorStores <./indexes/vectorstores.html>`_: An overview and different types of the **Vector Stores**.
|
||||
|
||||
- `Retrievers <./indexes/retrievers.html>`_: An overview and different types of the **Retrievers**.
|
||||
|
||||
Most of the time when we talk about indexes and retrieval we are talking about indexing and retrieving unstructured data (like text documents).
|
||||
For interacting with structured data (SQL tables, etc) or APIs, please see the corresponding use case sections for links to relevant functionality.
|
||||
The primary index and retrieval types supported by LangChain are currently centered around vector databases, and therefore
|
||||
a lot of the functionality we dive deep on those topics.
|
||||
|
||||
For an overview of everything related to this, please see the below notebook for getting started:
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 1
|
||||
:hidden:
|
||||
|
||||
./indexes/getting_started.ipynb
|
||||
|
||||
We then provide a deep dive on the four main components.
|
||||
|
||||
**Document Loaders**
|
||||
|
||||
How to load documents from a variety of sources.
|
||||
|
||||
**Text Splitters**
|
||||
|
||||
An overview of the abstractions and implementions around splitting text.
|
||||
|
||||
|
||||
**VectorStores**
|
||||
|
||||
An overview of VectorStores and the many integrations LangChain provides.
|
||||
|
||||
|
||||
**Retrievers**
|
||||
|
||||
An overview of Retrievers and the implementations LangChain provides.
|
||||
|
||||
Go Deeper
|
||||
---------
|
||||
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 1
|
||||
|
||||
./indexes/document_loaders.rst
|
||||
./indexes/text_splitters.rst
|
||||
./indexes/vectorstores.rst
|
||||
|
||||
@@ -30,12 +30,15 @@ For detailed instructions on how to get set up with Unstructured, see installati
|
||||
:maxdepth: 1
|
||||
:glob:
|
||||
|
||||
./document_loaders/examples/airtable.ipynb
|
||||
./document_loaders/examples/audio.ipynb
|
||||
./document_loaders/examples/conll-u.ipynb
|
||||
./document_loaders/examples/copypaste.ipynb
|
||||
./document_loaders/examples/csv.ipynb
|
||||
./document_loaders/examples/email.ipynb
|
||||
./document_loaders/examples/epub.ipynb
|
||||
./document_loaders/examples/evernote.ipynb
|
||||
./document_loaders/examples/excel.ipynb
|
||||
./document_loaders/examples/facebook_chat.ipynb
|
||||
./document_loaders/examples/file_directory.ipynb
|
||||
./document_loaders/examples/html.ipynb
|
||||
@@ -114,6 +117,7 @@ We need access tokens and sometime other parameters to get access to these datas
|
||||
./document_loaders/examples/discord_loader.ipynb
|
||||
./document_loaders/examples/docugami.ipynb
|
||||
./document_loaders/examples/duckdb.ipynb
|
||||
./document_loaders/examples/fauna.ipynb
|
||||
./document_loaders/examples/figma.ipynb
|
||||
./document_loaders/examples/gitbook.ipynb
|
||||
./document_loaders/examples/git.ipynb
|
||||
@@ -135,6 +139,7 @@ We need access tokens and sometime other parameters to get access to these datas
|
||||
./document_loaders/examples/reddit.ipynb
|
||||
./document_loaders/examples/roam.ipynb
|
||||
./document_loaders/examples/slack.ipynb
|
||||
./document_loaders/examples/snowflake.ipynb
|
||||
./document_loaders/examples/spreedly.ipynb
|
||||
./document_loaders/examples/stripe.ipynb
|
||||
./document_loaders/examples/tomarkdown.ipynb
|
||||
|
||||
142
docs/modules/indexes/document_loaders/examples/airtable.ipynb
Normal file
142
docs/modules/indexes/document_loaders/examples/airtable.ipynb
Normal file
@@ -0,0 +1,142 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "7ae421e6",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Airtable"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "98aea00d",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"! pip install pyairtable"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "592483eb",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.document_loaders import AirtableLoader"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "637e1205",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"* Get your API key [here](https://support.airtable.com/docs/creating-and-using-api-keys-and-access-tokens).\n",
|
||||
"* Get ID of your base [here](https://airtable.com/developers/web/api/introduction).\n",
|
||||
"* Get your table ID from the table url as shown [here](https://www.highviewapps.com/kb/where-can-i-find-the-airtable-base-id-and-table-id/#:~:text=Both%20the%20Airtable%20Base%20ID,URL%20that%20begins%20with%20tbl)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "c12a7aff",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"api_key=\"xxx\"\n",
|
||||
"base_id=\"xxx\"\n",
|
||||
"table_id=\"xxx\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"id": "ccddd5a6",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"loader = AirtableLoader(api_key,table_id,base_id)\n",
|
||||
"docs = loader.load()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "ae76c25c",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Returns each table row as `dict`."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"id": "7abec7ce",
|
||||
"metadata": {
|
||||
"scrolled": true
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"3"
|
||||
]
|
||||
},
|
||||
"execution_count": 10,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"len(docs)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"id": "403c95da",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'id': 'recF3GbGZCuh9sXIQ',\n",
|
||||
" 'createdTime': '2023-06-09T04:47:21.000Z',\n",
|
||||
" 'fields': {'Priority': 'High',\n",
|
||||
" 'Status': 'In progress',\n",
|
||||
" 'Name': 'Document Splitters'}}"
|
||||
]
|
||||
},
|
||||
"execution_count": 11,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"eval(docs[0].page_content)"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.16"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
97
docs/modules/indexes/document_loaders/examples/audio.ipynb
Normal file
97
docs/modules/indexes/document_loaders/examples/audio.ipynb
Normal file
File diff suppressed because one or more lines are too long
@@ -1,15 +1,18 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"source": [
|
||||
"# Confluence\n",
|
||||
"\n",
|
||||
">[Confluence](https://www.atlassian.com/software/confluence) is a wiki collaboration platform that saves and organizes all of the project-related material. `Confluence` is a knowledge base that primarily handles content management activities. \n",
|
||||
"\n",
|
||||
"A loader for `Confluence` pages currently supports both `username/api_key` and `Oauth2 login`.\n",
|
||||
"See [instructions](https://support.atlassian.com/atlassian-account/docs/manage-api-tokens-for-your-atlassian-account/).\n",
|
||||
"A loader for `Confluence` pages.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"This currently supports `username/api_key`, `Oauth2 login`. Additionally, on-prem installations also support `token` authentication. \n",
|
||||
"\n",
|
||||
"\n",
|
||||
"Specify a list `page_id`-s and/or `space_key` to load in the corresponding pages into Document objects, if both are specified the union of both sets will be returned.\n",
|
||||
@@ -20,9 +23,17 @@
|
||||
"Hint: `space_key` and `page_id` can both be found in the URL of a page in Confluence - https://yoursite.atlassian.com/wiki/spaces/<space_key>/pages/<page_id>\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Before using ConfluenceLoader make sure you have the latest version of the atlassian-python-api package installed:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"execution_count": 1,
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
@@ -31,6 +42,29 @@
|
||||
"#!pip install atlassian-python-api"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Examples"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Username and Password or Username and API Token (Atlassian Cloud only)\n",
|
||||
"\n",
|
||||
"This example authenticates using either a username and password or, if you're connecting to an Atlassian Cloud hosted version of Confluence, a username and an API Token.\n",
|
||||
"You can generate an API token at: https://id.atlassian.com/manage-profile/security/api-tokens.\n",
|
||||
"\n",
|
||||
"The `limit` parameter specifies how many documents will be retrieved in a single call, not how many documents will be retrieved in total.\n",
|
||||
"By default the code will return up to 1000 documents in 50 documents batches. To control the total number of documents use the `max_pages` parameter. \n",
|
||||
"Plese note the maximum value for the `limit` parameter in the atlassian-python-api package is currently 100. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
@@ -46,6 +80,34 @@
|
||||
")\n",
|
||||
"documents = loader.load(space_key=\"SPACE\", include_attachments=True, limit=50)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Personal Access Token (Server/On-Prem only)\n",
|
||||
"\n",
|
||||
"This method is valid for the Data Center/Server on-prem edition only.\n",
|
||||
"For more information on how to generate a Personal Access Token (PAT) check the official Confluence documentation at: https://confluence.atlassian.com/enterprise/using-personal-access-tokens-1026032365.html.\n",
|
||||
"When using a PAT you provide only the token value, you cannot provide a username. \n",
|
||||
"Please note that ConfluenceLoader will run under the permissions of the user that generated the PAT and will only be able to load documents for which said user has access to. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.document_loaders import ConfluenceLoader\n",
|
||||
"\n",
|
||||
"loader = ConfluenceLoader(\n",
|
||||
" url=\"https://yoursite.atlassian.com/wiki\",\n",
|
||||
" token=\"12345\"\n",
|
||||
")\n",
|
||||
"documents = loader.load(space_key=\"SPACE\", include_attachments=True, limit=50, max_pages=50)"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
@@ -64,7 +126,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.6"
|
||||
"version": "3.9.13"
|
||||
},
|
||||
"vscode": {
|
||||
"interpreter": {
|
||||
|
||||
@@ -29,7 +29,6 @@
|
||||
"cell_type": "code",
|
||||
"execution_count": 26,
|
||||
"metadata": {
|
||||
"collapsed": false,
|
||||
"jupyter": {
|
||||
"outputs_hidden": false
|
||||
}
|
||||
@@ -45,7 +44,6 @@
|
||||
"cell_type": "code",
|
||||
"execution_count": 27,
|
||||
"metadata": {
|
||||
"collapsed": false,
|
||||
"jupyter": {
|
||||
"outputs_hidden": false
|
||||
}
|
||||
@@ -76,7 +74,6 @@
|
||||
"cell_type": "code",
|
||||
"execution_count": 28,
|
||||
"metadata": {
|
||||
"collapsed": false,
|
||||
"jupyter": {
|
||||
"outputs_hidden": false
|
||||
}
|
||||
@@ -96,7 +93,6 @@
|
||||
"cell_type": "code",
|
||||
"execution_count": 29,
|
||||
"metadata": {
|
||||
"collapsed": false,
|
||||
"jupyter": {
|
||||
"outputs_hidden": false
|
||||
}
|
||||
@@ -152,6 +148,211 @@
|
||||
"source": [
|
||||
"print(data)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## `UnstructuredCSVLoader`\n",
|
||||
"\n",
|
||||
"You can also load the table using the `UnstructuredCSVLoader`. One advantage of using `UnstructuredCSVLoader` is that if you use it in `\"elements\"` mode, an HTML representation of the table will be available in the metadata."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.document_loaders.csv_loader import UnstructuredCSVLoader"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"loader = UnstructuredCSVLoader(file_path='example_data/mlb_teams_2012.csv', mode=\"elements\")\n",
|
||||
"docs = loader.load()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"<table border=\"1\" class=\"dataframe\">\n",
|
||||
" <tbody>\n",
|
||||
" <tr>\n",
|
||||
" <td>Nationals</td>\n",
|
||||
" <td>81.34</td>\n",
|
||||
" <td>98</td>\n",
|
||||
" </tr>\n",
|
||||
" <tr>\n",
|
||||
" <td>Reds</td>\n",
|
||||
" <td>82.20</td>\n",
|
||||
" <td>97</td>\n",
|
||||
" </tr>\n",
|
||||
" <tr>\n",
|
||||
" <td>Yankees</td>\n",
|
||||
" <td>197.96</td>\n",
|
||||
" <td>95</td>\n",
|
||||
" </tr>\n",
|
||||
" <tr>\n",
|
||||
" <td>Giants</td>\n",
|
||||
" <td>117.62</td>\n",
|
||||
" <td>94</td>\n",
|
||||
" </tr>\n",
|
||||
" <tr>\n",
|
||||
" <td>Braves</td>\n",
|
||||
" <td>83.31</td>\n",
|
||||
" <td>94</td>\n",
|
||||
" </tr>\n",
|
||||
" <tr>\n",
|
||||
" <td>Athletics</td>\n",
|
||||
" <td>55.37</td>\n",
|
||||
" <td>94</td>\n",
|
||||
" </tr>\n",
|
||||
" <tr>\n",
|
||||
" <td>Rangers</td>\n",
|
||||
" <td>120.51</td>\n",
|
||||
" <td>93</td>\n",
|
||||
" </tr>\n",
|
||||
" <tr>\n",
|
||||
" <td>Orioles</td>\n",
|
||||
" <td>81.43</td>\n",
|
||||
" <td>93</td>\n",
|
||||
" </tr>\n",
|
||||
" <tr>\n",
|
||||
" <td>Rays</td>\n",
|
||||
" <td>64.17</td>\n",
|
||||
" <td>90</td>\n",
|
||||
" </tr>\n",
|
||||
" <tr>\n",
|
||||
" <td>Angels</td>\n",
|
||||
" <td>154.49</td>\n",
|
||||
" <td>89</td>\n",
|
||||
" </tr>\n",
|
||||
" <tr>\n",
|
||||
" <td>Tigers</td>\n",
|
||||
" <td>132.30</td>\n",
|
||||
" <td>88</td>\n",
|
||||
" </tr>\n",
|
||||
" <tr>\n",
|
||||
" <td>Cardinals</td>\n",
|
||||
" <td>110.30</td>\n",
|
||||
" <td>88</td>\n",
|
||||
" </tr>\n",
|
||||
" <tr>\n",
|
||||
" <td>Dodgers</td>\n",
|
||||
" <td>95.14</td>\n",
|
||||
" <td>86</td>\n",
|
||||
" </tr>\n",
|
||||
" <tr>\n",
|
||||
" <td>White Sox</td>\n",
|
||||
" <td>96.92</td>\n",
|
||||
" <td>85</td>\n",
|
||||
" </tr>\n",
|
||||
" <tr>\n",
|
||||
" <td>Brewers</td>\n",
|
||||
" <td>97.65</td>\n",
|
||||
" <td>83</td>\n",
|
||||
" </tr>\n",
|
||||
" <tr>\n",
|
||||
" <td>Phillies</td>\n",
|
||||
" <td>174.54</td>\n",
|
||||
" <td>81</td>\n",
|
||||
" </tr>\n",
|
||||
" <tr>\n",
|
||||
" <td>Diamondbacks</td>\n",
|
||||
" <td>74.28</td>\n",
|
||||
" <td>81</td>\n",
|
||||
" </tr>\n",
|
||||
" <tr>\n",
|
||||
" <td>Pirates</td>\n",
|
||||
" <td>63.43</td>\n",
|
||||
" <td>79</td>\n",
|
||||
" </tr>\n",
|
||||
" <tr>\n",
|
||||
" <td>Padres</td>\n",
|
||||
" <td>55.24</td>\n",
|
||||
" <td>76</td>\n",
|
||||
" </tr>\n",
|
||||
" <tr>\n",
|
||||
" <td>Mariners</td>\n",
|
||||
" <td>81.97</td>\n",
|
||||
" <td>75</td>\n",
|
||||
" </tr>\n",
|
||||
" <tr>\n",
|
||||
" <td>Mets</td>\n",
|
||||
" <td>93.35</td>\n",
|
||||
" <td>74</td>\n",
|
||||
" </tr>\n",
|
||||
" <tr>\n",
|
||||
" <td>Blue Jays</td>\n",
|
||||
" <td>75.48</td>\n",
|
||||
" <td>73</td>\n",
|
||||
" </tr>\n",
|
||||
" <tr>\n",
|
||||
" <td>Royals</td>\n",
|
||||
" <td>60.91</td>\n",
|
||||
" <td>72</td>\n",
|
||||
" </tr>\n",
|
||||
" <tr>\n",
|
||||
" <td>Marlins</td>\n",
|
||||
" <td>118.07</td>\n",
|
||||
" <td>69</td>\n",
|
||||
" </tr>\n",
|
||||
" <tr>\n",
|
||||
" <td>Red Sox</td>\n",
|
||||
" <td>173.18</td>\n",
|
||||
" <td>69</td>\n",
|
||||
" </tr>\n",
|
||||
" <tr>\n",
|
||||
" <td>Indians</td>\n",
|
||||
" <td>78.43</td>\n",
|
||||
" <td>68</td>\n",
|
||||
" </tr>\n",
|
||||
" <tr>\n",
|
||||
" <td>Twins</td>\n",
|
||||
" <td>94.08</td>\n",
|
||||
" <td>66</td>\n",
|
||||
" </tr>\n",
|
||||
" <tr>\n",
|
||||
" <td>Rockies</td>\n",
|
||||
" <td>78.06</td>\n",
|
||||
" <td>64</td>\n",
|
||||
" </tr>\n",
|
||||
" <tr>\n",
|
||||
" <td>Cubs</td>\n",
|
||||
" <td>88.19</td>\n",
|
||||
" <td>61</td>\n",
|
||||
" </tr>\n",
|
||||
" <tr>\n",
|
||||
" <td>Astros</td>\n",
|
||||
" <td>60.65</td>\n",
|
||||
" <td>55</td>\n",
|
||||
" </tr>\n",
|
||||
" </tbody>\n",
|
||||
"</table>\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"print(docs[0].metadata[\"text_as_html\"])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
@@ -170,7 +371,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.6"
|
||||
"version": "3.8.13"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -0,0 +1,27 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<factbook>
|
||||
<country>
|
||||
<name>United States</name>
|
||||
<capital>Washington, DC</capital>
|
||||
<leader>Joe Biden</leader>
|
||||
<sport>Baseball</sport>
|
||||
</country>
|
||||
<country>
|
||||
<name>Canada</name>
|
||||
<capital>Ottawa</capital>
|
||||
<leader>Justin Trudeau</leader>
|
||||
<sport>Hockey</sport>
|
||||
</country>
|
||||
<country>
|
||||
<name>France</name>
|
||||
<capital>Paris</capital>
|
||||
<leader>Emmanuel Macron</leader>
|
||||
<sport>Soccer</sport>
|
||||
</country>
|
||||
<country>
|
||||
<name>Trinidad & Tobado</name>
|
||||
<capital>Port of Spain</capital>
|
||||
<leader>Keith Rowley</leader>
|
||||
<sport>Track & Field</sport>
|
||||
</country>
|
||||
</factbook>
|
||||
Binary file not shown.
79
docs/modules/indexes/document_loaders/examples/excel.ipynb
Normal file
79
docs/modules/indexes/document_loaders/examples/excel.ipynb
Normal file
@@ -0,0 +1,79 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "22a849cc",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Microsoft Excel\n",
|
||||
"\n",
|
||||
"The `UnstructuredExcelLoader` is used to load `Microsoft Excel` files. The loader works with both `.xlsx` and `.xls` files. The page content will be the raw text of the Excel file. If you use the loader in `\"elements\"` mode, an HTML representation of the Excel file will be available in the document metadata under the `text_as_html` key."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "e6616e3a",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.document_loaders import UnstructuredExcelLoader"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "a654e4d9",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"Document(page_content='\\n \\n \\n Team\\n Location\\n Stanley Cups\\n \\n \\n Blues\\n STL\\n 1\\n \\n \\n Flyers\\n PHI\\n 2\\n \\n \\n Maple Leafs\\n TOR\\n 13\\n \\n \\n', metadata={'source': 'example_data/stanley-cups.xlsx', 'filename': 'stanley-cups.xlsx', 'file_directory': 'example_data', 'filetype': 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet', 'page_number': 1, 'page_name': 'Stanley Cups', 'text_as_html': '<table border=\"1\" class=\"dataframe\">\\n <tbody>\\n <tr>\\n <td>Team</td>\\n <td>Location</td>\\n <td>Stanley Cups</td>\\n </tr>\\n <tr>\\n <td>Blues</td>\\n <td>STL</td>\\n <td>1</td>\\n </tr>\\n <tr>\\n <td>Flyers</td>\\n <td>PHI</td>\\n <td>2</td>\\n </tr>\\n <tr>\\n <td>Maple Leafs</td>\\n <td>TOR</td>\\n <td>13</td>\\n </tr>\\n </tbody>\\n</table>', 'category': 'Table'})"
|
||||
]
|
||||
},
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"loader = UnstructuredExcelLoader(\n",
|
||||
" \"example_data/stanley-cups.xlsx\",\n",
|
||||
" mode=\"elements\"\n",
|
||||
")\n",
|
||||
"docs = loader.load()\n",
|
||||
"docs[0]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "9ab94bde",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.8.13"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
84
docs/modules/indexes/document_loaders/examples/fauna.ipynb
Normal file
84
docs/modules/indexes/document_loaders/examples/fauna.ipynb
Normal file
@@ -0,0 +1,84 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Fauna\n",
|
||||
"\n",
|
||||
">[Fauna](https://fauna.com/) is a Document Database.\n",
|
||||
"\n",
|
||||
"Query `Fauna` documents"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"#!pip install fauna"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Query data example"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.document_loaders.fauna import FaunaLoader\n",
|
||||
"\n",
|
||||
"secret = \"<enter-valid-fauna-secret>\"\n",
|
||||
"query = \"Item.all()\" # Fauna query. Assumes that the collection is called \"Item\"\n",
|
||||
"field = \"text\" # The field that contains the page content. Assumes that the field is called \"text\"\n",
|
||||
"\n",
|
||||
"loader = FaunaLoader(query, field, secret)\n",
|
||||
"docs = loader.lazy_load()\n",
|
||||
"\n",
|
||||
"for value in docs:\n",
|
||||
" print(value)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Query with Pagination\n",
|
||||
"You get a `after` value if there are more data. You can get values after the curcor by passing in the `after` string in query. \n",
|
||||
"\n",
|
||||
"To learn more following [this link](https://fqlx-beta--fauna-docs.netlify.app/fqlx/beta/reference/schema_entities/set/static-paginate)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"query = \"\"\"\n",
|
||||
"Item.paginate(\"hs+DzoPOg ... aY1hOohozrV7A\")\n",
|
||||
"Item.all()\n",
|
||||
"\"\"\"\n",
|
||||
"loader = FaunaLoader(query, field, secret)"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"language_info": {
|
||||
"name": "python"
|
||||
},
|
||||
"orig_nbformat": 4
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
@@ -22,6 +22,16 @@
|
||||
"Load .docx using `Docx2txt` into a document."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "7b80ea891",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!pip install docx2txt "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
|
||||
@@ -146,6 +146,73 @@
|
||||
"documents[0]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Add custom scraping rules\n",
|
||||
"\n",
|
||||
"The `SitemapLoader` uses `beautifulsoup4` for the scraping process, and it scrapes every element on the page by default. The `SitemapLoader` constructor accepts a custom scraping function. This feature can be helpful to tailor the scraping process to your specific needs; for example, you might want to avoid scraping headers or navigation elements.\n",
|
||||
"\n",
|
||||
" The following example shows how to develop and use a custom function to avoid navigation and header elements."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Import the `beautifulsoup4` library and define the custom function."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"pip install beautifulsoup4"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from bs4 import BeautifulSoup\n",
|
||||
"\n",
|
||||
"def remove_nav_and_header_elements(content: BeautifulSoup) -> str:\n",
|
||||
" # Find all 'nav' and 'header' elements in the BeautifulSoup object\n",
|
||||
" nav_elements = content.find_all('nav')\n",
|
||||
" header_elements = content.find_all('header')\n",
|
||||
"\n",
|
||||
" # Remove each 'nav' and 'header' element from the BeautifulSoup object\n",
|
||||
" for element in nav_elements + header_elements:\n",
|
||||
" element.decompose()\n",
|
||||
"\n",
|
||||
" return str(content.get_text())"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Add your custom function to the `SitemapLoader` object."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"loader = SitemapLoader(\n",
|
||||
" \"https://langchain.readthedocs.io/sitemap.xml\",\n",
|
||||
" filter_urls=[\"https://python.langchain.com/en/latest/\"],\n",
|
||||
" parsing_function=remove_nav_and_header_elements\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
|
||||
@@ -41,7 +41,7 @@
|
||||
"source": [
|
||||
"# Optionally set your Slack URL. This will give you proper URLs in the docs sources.\n",
|
||||
"SLACK_WORKSPACE_URL = \"https://xxx.slack.com\"\n",
|
||||
"LOCAL_ZIPFILE = \"\" # Paste the local paty to your Slack zip file here.\n",
|
||||
"LOCAL_ZIPFILE = \"\" # Paste the local path to your Slack zip file here.\n",
|
||||
"\n",
|
||||
"loader = SlackDirectoryLoader(LOCAL_ZIPFILE, SLACK_WORKSPACE_URL)"
|
||||
]
|
||||
|
||||
@@ -0,0 +1,98 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Snowflake\n",
|
||||
"\n",
|
||||
"This notebooks goes over how to load documents from Snowflake"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"! pip install snowflake-connector-python"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import settings as s\n",
|
||||
"from langchain.document_loaders import SnowflakeLoader"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"QUERY = \"select text, survey_id from CLOUD_DATA_SOLUTIONS.HAPPY_OR_NOT.OPEN_FEEDBACK limit 10\"\n",
|
||||
"snowflake_loader = SnowflakeLoader(\n",
|
||||
" query=QUERY,\n",
|
||||
" user=s.SNOWFLAKE_USER,\n",
|
||||
" password=s.SNOWFLAKE_PASS,\n",
|
||||
" account=s.SNOWFLAKE_ACCOUNT,\n",
|
||||
" warehouse=s.SNOWFLAKE_WAREHOUSE,\n",
|
||||
" role=s.SNOWFLAKE_ROLE,\n",
|
||||
" database=s.SNOWFLAKE_DATABASE,\n",
|
||||
" schema=s.SNOWFLAKE_SCHEMA\n",
|
||||
")\n",
|
||||
"snowflake_documents = snowflake_loader.load()\n",
|
||||
"print(snowflake_documents)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from snowflakeLoader import SnowflakeLoader\n",
|
||||
"import settings as s\n",
|
||||
"QUERY = \"select text, survey_id as source from CLOUD_DATA_SOLUTIONS.HAPPY_OR_NOT.OPEN_FEEDBACK limit 10\"\n",
|
||||
"snowflake_loader = SnowflakeLoader(\n",
|
||||
" query=QUERY,\n",
|
||||
" user=s.SNOWFLAKE_USER,\n",
|
||||
" password=s.SNOWFLAKE_PASS,\n",
|
||||
" account=s.SNOWFLAKE_ACCOUNT,\n",
|
||||
" warehouse=s.SNOWFLAKE_WAREHOUSE,\n",
|
||||
" role=s.SNOWFLAKE_ROLE,\n",
|
||||
" database=s.SNOWFLAKE_DATABASE,\n",
|
||||
" schema=s.SNOWFLAKE_SCHEMA,\n",
|
||||
" metadata_columns=['source']\n",
|
||||
")\n",
|
||||
"snowflake_documents = snowflake_loader.load()\n",
|
||||
"print(snowflake_documents)"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
@@ -54,7 +54,6 @@
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "3e64cac2",
|
||||
"metadata": {},
|
||||
@@ -117,7 +116,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.13"
|
||||
"version": "3.10.6"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -171,7 +171,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.3"
|
||||
"version": "3.10.6"
|
||||
},
|
||||
"vscode": {
|
||||
"interpreter": {
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### WhatsApp Chat\n",
|
||||
"# WhatsApp Chat\n",
|
||||
"\n",
|
||||
">[WhatsApp](https://www.whatsapp.com/) (also called `WhatsApp Messenger`) is a freeware, cross-platform, centralized instant messaging (IM) and voice-over-IP (VoIP) service. It allows users to send text and voice messages, make voice and video calls, and share images, documents, user locations, and other content.\n",
|
||||
"\n",
|
||||
|
||||
78
docs/modules/indexes/document_loaders/examples/xml.ipynb
Normal file
78
docs/modules/indexes/document_loaders/examples/xml.ipynb
Normal file
@@ -0,0 +1,78 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "22a849cc",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# XML\n",
|
||||
"\n",
|
||||
"The `UnstructuredXMLLoader` is used to load `XML` files. The loader works with `.xml` files. The page content will be the text extracted from the XML tags."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "e6616e3a",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.document_loaders import UnstructuredXMLLoader"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "a654e4d9",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"Document(page_content='United States\\n\\nWashington, DC\\n\\nJoe Biden\\n\\nBaseball\\n\\nCanada\\n\\nOttawa\\n\\nJustin Trudeau\\n\\nHockey\\n\\nFrance\\n\\nParis\\n\\nEmmanuel Macron\\n\\nSoccer\\n\\nTrinidad & Tobado\\n\\nPort of Spain\\n\\nKeith Rowley\\n\\nTrack & Field', metadata={'source': 'example_data/factbook.xml'})"
|
||||
]
|
||||
},
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"loader = UnstructuredXMLLoader(\n",
|
||||
" \"example_data/factbook.xml\",\n",
|
||||
")\n",
|
||||
"docs = loader.load()\n",
|
||||
"docs[0]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "a54342bb",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.8.15"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -0,0 +1,296 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e48afb8d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Loading documents from a YouTube url\n",
|
||||
"\n",
|
||||
"Building chat or QA applications on YouTube videos is a topic of high interest.\n",
|
||||
"\n",
|
||||
"Below we show how to easily go from a YouTube url to text to chat!\n",
|
||||
"\n",
|
||||
"We wil use the `OpenAIWhisperParser`, which will use the OpenAI Whisper API to transcribe audio to text.\n",
|
||||
"\n",
|
||||
"Note: You will need to have an `OPENAI_API_KEY` supplied."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "5f34e934",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.document_loaders.generic import GenericLoader\n",
|
||||
"from langchain.document_loaders.parsers import OpenAIWhisperParser\n",
|
||||
"from langchain.document_loaders.blob_loaders.youtube_audio import YoutubeAudioLoader"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "85fc12bd",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We will use `yt_dlp` to download audio for YouTube urls.\n",
|
||||
"\n",
|
||||
"We will use `pydub` to split downloaded audio files (such that we adhere to Whisper API's 25MB file size limit)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "fb5a6606",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"! pip install yt_dlp\n",
|
||||
"! pip install pydub"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "b0e119f4",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### YouTube url to text\n",
|
||||
"\n",
|
||||
"Use `YoutubeAudioLoader` to fetch / download the audio files.\n",
|
||||
"\n",
|
||||
"Then, ues `OpenAIWhisperParser()` to transcribe them to text.\n",
|
||||
"\n",
|
||||
"Let's take the first lecture of Andrej Karpathy's YouTube course as an example! "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "23e1e134",
|
||||
"metadata": {
|
||||
"scrolled": false
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"[youtube] Extracting URL: https://youtu.be/kCc8FmEb1nY\n",
|
||||
"[youtube] kCc8FmEb1nY: Downloading webpage\n",
|
||||
"[youtube] kCc8FmEb1nY: Downloading android player API JSON\n",
|
||||
"[info] kCc8FmEb1nY: Downloading 1 format(s): 140\n",
|
||||
"[dashsegments] Total fragments: 11\n",
|
||||
"[download] Destination: /Users/31treehaus/Desktop/AI/langchain-fork/docs/modules/indexes/document_loaders/examples/Let's build GPT: from scratch, in code, spelled out..m4a\n",
|
||||
"[download] 100% of 107.73MiB in 00:00:18 at 5.92MiB/s \n",
|
||||
"[FixupM4a] Correcting container of \"/Users/31treehaus/Desktop/AI/langchain-fork/docs/modules/indexes/document_loaders/examples/Let's build GPT: from scratch, in code, spelled out..m4a\"\n",
|
||||
"[ExtractAudio] Not converting audio /Users/31treehaus/Desktop/AI/langchain-fork/docs/modules/indexes/document_loaders/examples/Let's build GPT: from scratch, in code, spelled out..m4a; file is already in target format m4a\n",
|
||||
"[youtube] Extracting URL: https://youtu.be/VMj-3S1tku0\n",
|
||||
"[youtube] VMj-3S1tku0: Downloading webpage\n",
|
||||
"[youtube] VMj-3S1tku0: Downloading android player API JSON\n",
|
||||
"[info] VMj-3S1tku0: Downloading 1 format(s): 140\n",
|
||||
"[download] /Users/31treehaus/Desktop/AI/langchain-fork/docs/modules/indexes/document_loaders/examples/The spelled-out intro to neural networks and backpropagation: building micrograd.m4a has already been downloaded\n",
|
||||
"[download] 100% of 134.98MiB\n",
|
||||
"[ExtractAudio] Not converting audio /Users/31treehaus/Desktop/AI/langchain-fork/docs/modules/indexes/document_loaders/examples/The spelled-out intro to neural networks and backpropagation: building micrograd.m4a; file is already in target format m4a\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Two Karpathy lecture videos\n",
|
||||
"urls = [\"https://youtu.be/kCc8FmEb1nY\",\n",
|
||||
" \"https://youtu.be/VMj-3S1tku0\"]\n",
|
||||
"\n",
|
||||
"# Directory to save audio files \n",
|
||||
"save_dir = \"~/Downloads/YouTube\"\n",
|
||||
"\n",
|
||||
"# Transcribe the videos to text\n",
|
||||
"loader = GenericLoader(YoutubeAudioLoader(urls,save_dir),OpenAIWhisperParser())\n",
|
||||
"docs = loader.load()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "72a94fd8",
|
||||
"metadata": {
|
||||
"scrolled": false
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"\"Hello, my name is Andrej and I've been training deep neural networks for a bit more than a decade. And in this lecture I'd like to show you what neural network training looks like under the hood. So in particular we are going to start with a blank Jupyter notebook and by the end of this lecture we will define and train a neural net and you'll get to see everything that goes on under the hood and exactly sort of how that works on an intuitive level. Now specifically what I would like to do is I w\""
|
||||
]
|
||||
},
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Returns a list of Documents, which can be easily viewed or parsed\n",
|
||||
"docs[0].page_content[0:500]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "93be6b49",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Building a chat app from YouTube video\n",
|
||||
"\n",
|
||||
"Given `Documents`, we can easily enable chat / question+answering."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "1823f042",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.chains import RetrievalQA\n",
|
||||
"from langchain.vectorstores import FAISS\n",
|
||||
"from langchain.chat_models import ChatOpenAI\n",
|
||||
"from langchain.embeddings import OpenAIEmbeddings\n",
|
||||
"from langchain.text_splitter import RecursiveCharacterTextSplitter"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "7257cda1",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Combine doc\n",
|
||||
"combined_docs = [doc.page_content for doc in docs]\n",
|
||||
"text = \" \".join(combined_docs)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "147c0c55",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Split them\n",
|
||||
"text_splitter = RecursiveCharacterTextSplitter(chunk_size = 1500, chunk_overlap = 150)\n",
|
||||
"splits = text_splitter.split_text(text)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "f3556703",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Build an index\n",
|
||||
"embeddings = OpenAIEmbeddings()\n",
|
||||
"vectordb = FAISS.from_texts(splits,embeddings)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"id": "beaa99db",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Build a QA chain\n",
|
||||
"qa_chain = RetrievalQA.from_chain_type(llm = ChatOpenAI(model_name=\"gpt-3.5-turbo\", temperature=0),\n",
|
||||
" chain_type=\"stuff\",\n",
|
||||
" retriever=vectordb.as_retriever())"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"id": "f2239a62",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"\"We need to zero out the gradient before backprop at each step because the backward pass accumulates gradients in the grad attribute of each parameter. If we don't reset the grad to zero before each backward pass, the gradients will accumulate and add up, leading to incorrect updates and slower convergence. By resetting the grad to zero before each backward pass, we ensure that the gradients are calculated correctly and that the optimization process works as intended.\""
|
||||
]
|
||||
},
|
||||
"execution_count": 9,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Ask a question!\n",
|
||||
"query = \"Why do we need to zero out the gradient before backprop at each step?\"\n",
|
||||
"qa_chain.run(query)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"id": "a8d01098",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'In the context of transformers, an encoder is a component that reads in a sequence of input tokens and generates a sequence of hidden representations. On the other hand, a decoder is a component that takes in a sequence of hidden representations and generates a sequence of output tokens. The main difference between the two is that the encoder is used to encode the input sequence into a fixed-length representation, while the decoder is used to decode the fixed-length representation into an output sequence. In machine translation, for example, the encoder reads in the source language sentence and generates a fixed-length representation, which is then used by the decoder to generate the target language sentence.'"
|
||||
]
|
||||
},
|
||||
"execution_count": 10,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"query = \"What is the difference between an encoder and decoder?\"\n",
|
||||
"qa_chain.run(query)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"id": "fe1e77dd",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'For any token, x is the input vector that contains the private information of that token, k and q are the key and query vectors respectively, which are produced by forwarding linear modules on x, and v is the vector that is calculated by propagating the same linear module on x again. The key vector represents what the token contains, and the query vector represents what the token is looking for. The vector v is the information that the token will communicate to other tokens if it finds them interesting, and it gets aggregated for the purposes of the self-attention mechanism.'"
|
||||
]
|
||||
},
|
||||
"execution_count": 11,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"query = \"For any token, what are x, k, v, and q?\"\n",
|
||||
"qa_chain.run(query)"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.16"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -1,6 +1,7 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "df770c72",
|
||||
"metadata": {},
|
||||
@@ -55,11 +56,12 @@
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "6b278a1b",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Add video info"
|
||||
"### Add video info"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -79,20 +81,36 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"loader = YoutubeLoader.from_youtube_url(\"https://www.youtube.com/watch?v=QsYGlZkevEg\", add_video_info=True)"
|
||||
"loader = YoutubeLoader.from_youtube_url(\"https://www.youtube.com/watch?v=QsYGlZkevEg\", add_video_info=True)\n",
|
||||
"loader.load()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "fc417e31",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Add language preferences\n",
|
||||
"\n",
|
||||
"Language param : It's a list of language codes in a descending priority, `en` by default.\n",
|
||||
"\n",
|
||||
"translation param : It's a translate preference when the youtube does'nt have your select language, `en` by default."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "97b98e92",
|
||||
"id": "08510625",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"loader = YoutubeLoader.from_youtube_url(\"https://www.youtube.com/watch?v=QsYGlZkevEg\", add_video_info=True, language=['en','id'], translation='en')\n",
|
||||
"loader.load()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "65796cc5",
|
||||
"metadata": {},
|
||||
|
||||
@@ -0,0 +1,90 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# AWS Kendra\n",
|
||||
"\n",
|
||||
"> AWS Kendra is an intelligent search service provided by Amazon Web Services (AWS). It utilizes advanced natural language processing (NLP) and machine learning algorithms to enable powerful search capabilities across various data sources within an organization. Kendra is designed to help users find the information they need quickly and accurately, improving productivity and decision-making.\n",
|
||||
"\n",
|
||||
"> With Kendra, users can search across a wide range of content types, including documents, FAQs, knowledge bases, manuals, and websites. It supports multiple languages and can understand complex queries, synonyms, and contextual meanings to provide highly relevant search results."
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Using the AWS Kendra Index Retriever"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"#!pip install boto3"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import boto3\n",
|
||||
"from langchain.retrievers import AwsKendraIndexRetriever"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Create New Retriever"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"kclient = boto3.client('kendra', region_name=\"us-east-1\")\n",
|
||||
"\n",
|
||||
"retriever = AwsKendraIndexRetriever(\n",
|
||||
" kclient=kclient,\n",
|
||||
" kendraindex=\"kendraindex\",\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Now you can use retrieved documents from AWS Kendra Index"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"retriever.get_relevant_documents(\"what is langchain\")"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"language_info": {
|
||||
"name": "python"
|
||||
},
|
||||
"orig_nbformat": 4
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
@@ -5,7 +5,16 @@
|
||||
"id": "1edb9e6b",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Azure Cognitive Search Retriever\n",
|
||||
"# Azure Cognitive Search\n",
|
||||
"\n",
|
||||
">[Azure Cognitive Search](https://learn.microsoft.com/en-us/azure/search/search-what-is-azure-search) (formerly known as `Azure Search`) is a cloud search service that gives developers infrastructure, APIs, and tools for building a rich search experience over private, heterogeneous content in web, mobile, and enterprise applications.\n",
|
||||
"\n",
|
||||
">Search is foundational to any app that surfaces text to users, where common scenarios include catalog or document search, online retail apps, or data exploration over proprietary content. When you create a search service, you'll work with the following capabilities:\n",
|
||||
">- A search engine for full text search over a search index containing user-owned content\n",
|
||||
">- Rich indexing, with lexical analysis and optional AI enrichment for content extraction and transformation\n",
|
||||
">- Rich query syntax for text search, fuzzy search, autocomplete, geo-search and more\n",
|
||||
">- Programmability through REST APIs and client libraries in Azure SDKs\n",
|
||||
">- Azure integration at the data layer, machine learning layer, and AI (Cognitive Services)\n",
|
||||
"\n",
|
||||
"This notebook shows how to use Azure Cognitive Search (ACS) within LangChain."
|
||||
]
|
||||
@@ -120,7 +129,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.3"
|
||||
"version": "3.10.6"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
121
docs/modules/indexes/retrievers/examples/merger_retriever.ipynb
Normal file
121
docs/modules/indexes/retrievers/examples/merger_retriever.ipynb
Normal file
@@ -0,0 +1,121 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "fc0db1bc",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# LOTR (Merger Retriever)\n",
|
||||
"\n",
|
||||
"Lord of the Retrievers, also known as MergerRetriever, takes a list of retrievers as input and merges the results of their get_relevant_documents() methods into a single list. The merged results will be a list of documents that are relevant to the query and that have been ranked by the different retrievers.\n",
|
||||
"\n",
|
||||
"The MergerRetriever class can be used to improve the accuracy of document retrieval in a number of ways. First, it can combine the results of multiple retrievers, which can help to reduce the risk of bias in the results. Second, it can rank the results of the different retrievers, which can help to ensure that the most relevant documents are returned first."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "9fbcc58f",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"import chromadb\n",
|
||||
"from langchain.retrievers.merger_retriever import MergerRetriever\n",
|
||||
"from langchain.vectorstores import Chroma\n",
|
||||
"from langchain.embeddings import HuggingFaceEmbeddings\n",
|
||||
"from langchain.embeddings import OpenAIEmbeddings\n",
|
||||
"from langchain.document_transformers import EmbeddingsRedundantFilter\n",
|
||||
"from langchain.retrievers.document_compressors import DocumentCompressorPipeline\n",
|
||||
"from langchain.retrievers import ContextualCompressionRetriever\n",
|
||||
"\n",
|
||||
"# Get 3 diff embeddings.\n",
|
||||
"all_mini = HuggingFaceEmbeddings(model_name=\"all-MiniLM-L6-v2\")\n",
|
||||
"multi_qa_mini = HuggingFaceEmbeddings(model_name=\"multi-qa-MiniLM-L6-dot-v1\")\n",
|
||||
"filter_embeddings = OpenAIEmbeddings()\n",
|
||||
"\n",
|
||||
"ABS_PATH = os.path.dirname(os.path.abspath(__file__))\n",
|
||||
"DB_DIR = os.path.join(ABS_PATH, \"db\")\n",
|
||||
"\n",
|
||||
"# Instantiate 2 diff cromadb indexs, each one with a diff embedding.\n",
|
||||
"client_settings = chromadb.config.Settings(\n",
|
||||
" chroma_db_impl=\"duckdb+parquet\",\n",
|
||||
" persist_directory=DB_DIR,\n",
|
||||
" anonymized_telemetry=False,\n",
|
||||
")\n",
|
||||
"db_all = Chroma(\n",
|
||||
" collection_name=\"project_store_all\",\n",
|
||||
" persist_directory=DB_DIR,\n",
|
||||
" client_settings=client_settings,\n",
|
||||
" embedding_function=all_mini,\n",
|
||||
")\n",
|
||||
"db_multi_qa = Chroma(\n",
|
||||
" collection_name=\"project_store_multi\",\n",
|
||||
" persist_directory=DB_DIR,\n",
|
||||
" client_settings=client_settings,\n",
|
||||
" embedding_function=multi_qa_mini,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# Define 2 diff retrievers with 2 diff embeddings and diff search type.\n",
|
||||
"retriever_all = db_all.as_retriever(\n",
|
||||
" search_type=\"similarity\", search_kwargs={\"k\": 5, \"include_metadata\": True}\n",
|
||||
")\n",
|
||||
"retriever_multi_qa = db_multi_qa.as_retriever(\n",
|
||||
" search_type=\"mmr\", search_kwargs={\"k\": 5, \"include_metadata\": True}\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# The Lord of the Retrievers will hold the ouput of boths retrievers and can be used as any other \n",
|
||||
"# retriever on different types of chains.\n",
|
||||
"lotr = MergerRetriever(retrievers=[retriever_all, retriever_multi_qa])\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "c152339d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Remove redundant results from the merged retrievers."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "039faea6",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"\n",
|
||||
"# We can remove redundant results from both retrievers using yet another embedding. \n",
|
||||
"# Using multiples embeddings in diff steps could help reduce biases.\n",
|
||||
"filter = EmbeddingsRedundantFilter(embeddings=filter_embeddings)\n",
|
||||
"pipeline = DocumentCompressorPipeline(transformers=[filter])\n",
|
||||
"compression_retriever = ContextualCompressionRetriever(\n",
|
||||
" base_compressor=pipeline, base_retriever=lotr\n",
|
||||
")"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.6"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
80
docs/modules/indexes/retrievers/examples/pubmed.ipynb
Normal file
80
docs/modules/indexes/retrievers/examples/pubmed.ipynb
Normal file
@@ -0,0 +1,80 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "3df0dcf8",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# PubMed Retriever\n",
|
||||
"\n",
|
||||
"This notebook goes over how to use PubMed as a retriever\n",
|
||||
"\n",
|
||||
"PubMed® comprises more than 35 million citations for biomedical literature from MEDLINE, life science journals, and online books. Citations may include links to full text content from PubMed Central and publisher web sites."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "aecaff63",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.retrievers import PubMedRetriever"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "f2f7e8d3",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"retriever = PubMedRetriever()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"id": "ed115aa1",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[Document(page_content='', metadata={'uid': '37268021', 'title': 'Dermatology in the wake of an AI revolution: who gets a say?', 'pub_date': '<Year>2023</Year><Month>May</Month><Day>31</Day>'}),\n",
|
||||
" Document(page_content='', metadata={'uid': '37267643', 'title': 'What is ChatGPT and what do we do with it? Implications of the age of AI for nursing and midwifery practice and education: An editorial.', 'pub_date': '<Year>2023</Year><Month>May</Month><Day>30</Day>'}),\n",
|
||||
" Document(page_content='The nursing field has undergone notable changes over time and is projected to undergo further modifications in the future, owing to the advent of sophisticated technologies and growing healthcare needs. The advent of ChatGPT, an AI-powered language model, is expected to exert a significant influence on the nursing profession, specifically in the domains of patient care and instruction. The present article delves into the ramifications of ChatGPT within the nursing domain and accentuates its capacity and constraints to transform the discipline.', metadata={'uid': '37266721', 'title': 'The Impact of ChatGPT on the Nursing Profession: Revolutionizing Patient Care and Education.', 'pub_date': '<Year>2023</Year><Month>Jun</Month><Day>02</Day>'})]"
|
||||
]
|
||||
},
|
||||
"execution_count": 9,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"retriever.get_relevant_documents(\"chatgpt\")"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -99,13 +99,14 @@
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "2d958271",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Similarity Score Threshold Retrieval\n",
|
||||
"\n",
|
||||
"You can also a retrieval method that sets a similarity score threshold and only returns documents with a score above that threshold"
|
||||
"You can also use a retrieval method that sets a similarity score threshold and only returns documents with a score above that threshold"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "9fc6205b",
|
||||
"metadata": {},
|
||||
@@ -14,7 +13,6 @@
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "51489529-5dcd-4b86-bda6-de0a39d8ffd1",
|
||||
"metadata": {},
|
||||
@@ -23,7 +21,6 @@
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "1435c804-069d-4ade-9a7b-006b97b767c1",
|
||||
"metadata": {},
|
||||
@@ -44,7 +41,6 @@
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "6c15470b-a16b-4e0d-bc6a-6998bafbb5a4",
|
||||
"metadata": {},
|
||||
@@ -58,7 +54,6 @@
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "ae3c3d16",
|
||||
"metadata": {},
|
||||
@@ -67,7 +62,6 @@
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "6fafb73b-d6ec-4822-b161-edf0aaf5224a",
|
||||
"metadata": {},
|
||||
@@ -151,7 +145,6 @@
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "2670363b-3806-4c7e-b14d-90a4d5d2a200",
|
||||
"metadata": {},
|
||||
@@ -273,7 +266,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.6"
|
||||
"version": "3.9.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -2,21 +2,15 @@
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Zep Memory\n",
|
||||
"# Zep\n",
|
||||
"\n",
|
||||
"## Retriever Example\n",
|
||||
">[Zep](https://docs.getzep.com/) - A long-term memory store for LLM applications.\n",
|
||||
"\n",
|
||||
"This notebook demonstrates how to search historical chat message histories using the [Zep Long-term Memory Store](https://getzep.github.io/).\n",
|
||||
"More on `Zep`:\n",
|
||||
"\n",
|
||||
"We'll demonstrate:\n",
|
||||
"\n",
|
||||
"1. Adding conversation history to the Zep memory store.\n",
|
||||
"2. Vector search over the conversation history.\n",
|
||||
"\n",
|
||||
"More on Zep:\n",
|
||||
"\n",
|
||||
"Zep stores, summarizes, embeds, indexes, and enriches conversational AI chat histories, and exposes them via simple, low-latency APIs.\n",
|
||||
"`Zep` stores, summarizes, embeds, indexes, and enriches conversational AI chat histories, and exposes them via simple, low-latency APIs.\n",
|
||||
"\n",
|
||||
"Key Features:\n",
|
||||
"\n",
|
||||
@@ -28,15 +22,37 @@
|
||||
"\n",
|
||||
"Zep's Go Extractor model is easily extensible, with a simple, clean interface available to build new enrichment functionality, such as summarizers, entity extractors, embedders, and more.\n",
|
||||
"\n",
|
||||
"Zep project: [https://github.com/getzep/zep](https://github.com/getzep/zep)\n"
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
}
|
||||
"`Zep` project: [https://github.com/getzep/zep](https://github.com/getzep/zep)\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Retriever Example\n",
|
||||
"\n",
|
||||
"This notebook demonstrates how to search historical chat message histories using the [Zep Long-term Memory Store](https://getzep.github.io/).\n",
|
||||
"\n",
|
||||
"We'll demonstrate:\n",
|
||||
"\n",
|
||||
"1. Adding conversation history to the Zep memory store.\n",
|
||||
"2. Vector search over the conversation history.\n",
|
||||
"\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2023-05-25T15:03:27.863217Z",
|
||||
"start_time": "2023-05-25T15:03:25.690273Z"
|
||||
},
|
||||
"collapsed": false,
|
||||
"jupyter": {
|
||||
"outputs_hidden": false
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.memory.chat_message_histories import ZepChatMessageHistory\n",
|
||||
@@ -45,29 +61,30 @@
|
||||
"\n",
|
||||
"# Set this to your Zep server URL\n",
|
||||
"ZEP_API_URL = \"http://localhost:8000\""
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false,
|
||||
"ExecuteTime": {
|
||||
"end_time": "2023-05-25T15:03:27.863217Z",
|
||||
"start_time": "2023-05-25T15:03:25.690273Z"
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Initialize the Zep Chat Message History Class and add a chat message history to the memory store\n",
|
||||
"\n",
|
||||
"**NOTE:** Unlike other Retrievers, the content returned by the Zep Retriever is session/user specific. A `session_id` is required when instantiating the Retriever."
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2023-05-25T15:03:29.118416Z",
|
||||
"start_time": "2023-05-25T15:03:29.022464Z"
|
||||
},
|
||||
"collapsed": false,
|
||||
"jupyter": {
|
||||
"outputs_hidden": false
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"session_id = str(uuid4()) # This is a unique identifier for the user/session\n",
|
||||
@@ -77,18 +94,21 @@
|
||||
" session_id=session_id,\n",
|
||||
" url=ZEP_API_URL,\n",
|
||||
")"
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false,
|
||||
"ExecuteTime": {
|
||||
"end_time": "2023-05-25T15:03:29.118416Z",
|
||||
"start_time": "2023-05-25T15:03:29.022464Z"
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2023-05-25T15:03:30.271181Z",
|
||||
"start_time": "2023-05-25T15:03:30.180442Z"
|
||||
},
|
||||
"collapsed": false,
|
||||
"jupyter": {
|
||||
"outputs_hidden": false
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Preload some messages into the memory. The default message window is 12 messages. We want to push beyond this to demonstrate auto-summarization.\n",
|
||||
@@ -157,35 +177,42 @@
|
||||
" if msg[\"role\"] == \"human\"\n",
|
||||
" else AIMessage(content=msg[\"content\"])\n",
|
||||
" )\n"
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false,
|
||||
"ExecuteTime": {
|
||||
"end_time": "2023-05-25T15:03:30.271181Z",
|
||||
"start_time": "2023-05-25T15:03:30.180442Z"
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Use the Zep Retriever to vector search over the Zep memory\n",
|
||||
"\n",
|
||||
"Zep provides native vector search over historical conversation memory. Embedding happens automatically.\n",
|
||||
"\n",
|
||||
"NOTE: Embedding of messages occurs asynchronously, so the first query may not return results. Subsequent queries will return results as the embeddings are generated."
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2023-05-25T15:03:32.979155Z",
|
||||
"start_time": "2023-05-25T15:03:32.590310Z"
|
||||
},
|
||||
"collapsed": false,
|
||||
"jupyter": {
|
||||
"outputs_hidden": false
|
||||
}
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": "[Document(page_content='Who was Octavia Butler?', metadata={'score': 0.7759001673780126, 'uuid': '3a82a02f-056e-4c6a-b960-67ebdf3b2b93', 'created_at': '2023-05-25T15:03:30.2041Z', 'role': 'human', 'token_count': 8}),\n Document(page_content=\"Octavia Butler's contemporaries included Ursula K. Le Guin, Samuel R. Delany, and Joanna Russ.\", metadata={'score': 0.7602262941130749, 'uuid': 'a2fc9c21-0897-46c8-bef7-6f5c0f71b04a', 'created_at': '2023-05-25T15:03:30.248065Z', 'role': 'ai', 'token_count': 27}),\n Document(page_content='Who were her contemporaries?', metadata={'score': 0.757553366415519, 'uuid': '41f9c41a-a205-41e1-b48b-a0a4cd943fc8', 'created_at': '2023-05-25T15:03:30.243995Z', 'role': 'human', 'token_count': 8}),\n Document(page_content='Octavia Estelle Butler (June 22, 1947 – February 24, 2006) was an American science fiction author.', metadata={'score': 0.7546211059317948, 'uuid': '34678311-0098-4f1a-8fd4-5615ac692deb', 'created_at': '2023-05-25T15:03:30.231427Z', 'role': 'ai', 'token_count': 31}),\n Document(page_content='Which books of hers were made into movies?', metadata={'score': 0.7496714959247069, 'uuid': '18046c3a-9666-4d3e-b4f0-43d1394732b7', 'created_at': '2023-05-25T15:03:30.236837Z', 'role': 'human', 'token_count': 11})]"
|
||||
"text/plain": [
|
||||
"[Document(page_content='Who was Octavia Butler?', metadata={'score': 0.7759001673780126, 'uuid': '3a82a02f-056e-4c6a-b960-67ebdf3b2b93', 'created_at': '2023-05-25T15:03:30.2041Z', 'role': 'human', 'token_count': 8}),\n",
|
||||
" Document(page_content=\"Octavia Butler's contemporaries included Ursula K. Le Guin, Samuel R. Delany, and Joanna Russ.\", metadata={'score': 0.7602262941130749, 'uuid': 'a2fc9c21-0897-46c8-bef7-6f5c0f71b04a', 'created_at': '2023-05-25T15:03:30.248065Z', 'role': 'ai', 'token_count': 27}),\n",
|
||||
" Document(page_content='Who were her contemporaries?', metadata={'score': 0.757553366415519, 'uuid': '41f9c41a-a205-41e1-b48b-a0a4cd943fc8', 'created_at': '2023-05-25T15:03:30.243995Z', 'role': 'human', 'token_count': 8}),\n",
|
||||
" Document(page_content='Octavia Estelle Butler (June 22, 1947 – February 24, 2006) was an American science fiction author.', metadata={'score': 0.7546211059317948, 'uuid': '34678311-0098-4f1a-8fd4-5615ac692deb', 'created_at': '2023-05-25T15:03:30.231427Z', 'role': 'ai', 'token_count': 31}),\n",
|
||||
" Document(page_content='Which books of hers were made into movies?', metadata={'score': 0.7496714959247069, 'uuid': '18046c3a-9666-4d3e-b4f0-43d1394732b7', 'created_at': '2023-05-25T15:03:30.236837Z', 'role': 'human', 'token_count': 11})]"
|
||||
]
|
||||
},
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
@@ -202,31 +229,38 @@
|
||||
")\n",
|
||||
"\n",
|
||||
"await zep_retriever.aget_relevant_documents(\"Who wrote Parable of the Sower?\")"
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false,
|
||||
"ExecuteTime": {
|
||||
"end_time": "2023-05-25T15:03:32.979155Z",
|
||||
"start_time": "2023-05-25T15:03:32.590310Z"
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We can also use the Zep sync API to retrieve results:"
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2023-05-25T15:03:34.713354Z",
|
||||
"start_time": "2023-05-25T15:03:34.577974Z"
|
||||
},
|
||||
"collapsed": false,
|
||||
"jupyter": {
|
||||
"outputs_hidden": false
|
||||
}
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": "[Document(page_content='Parable of the Sower is a science fiction novel by Octavia Butler, published in 1993. It follows the story of Lauren Olamina, a young woman living in a dystopian future where society has collapsed due to environmental disasters, poverty, and violence.', metadata={'score': 0.8897321402776546, 'uuid': '1c09603a-52c1-40d7-9d69-29f26256029c', 'created_at': '2023-05-25T15:03:30.268257Z', 'role': 'ai', 'token_count': 56}),\n Document(page_content=\"Write a short synopsis of Butler's book, Parable of the Sower. What is it about?\", metadata={'score': 0.8857628682610436, 'uuid': 'f6706e8c-6c91-452f-8c1b-9559fd924657', 'created_at': '2023-05-25T15:03:30.265302Z', 'role': 'human', 'token_count': 23}),\n Document(page_content='Who was Octavia Butler?', metadata={'score': 0.7759670375149477, 'uuid': '3a82a02f-056e-4c6a-b960-67ebdf3b2b93', 'created_at': '2023-05-25T15:03:30.2041Z', 'role': 'human', 'token_count': 8}),\n Document(page_content=\"Octavia Butler's contemporaries included Ursula K. Le Guin, Samuel R. Delany, and Joanna Russ.\", metadata={'score': 0.7602854653476563, 'uuid': 'a2fc9c21-0897-46c8-bef7-6f5c0f71b04a', 'created_at': '2023-05-25T15:03:30.248065Z', 'role': 'ai', 'token_count': 27}),\n Document(page_content='You might want to read Ursula K. Le Guin or Joanna Russ.', metadata={'score': 0.7595293992240313, 'uuid': 'f22f2498-6118-4c74-8718-aa89ccd7e3d6', 'created_at': '2023-05-25T15:03:30.261198Z', 'role': 'ai', 'token_count': 18})]"
|
||||
"text/plain": [
|
||||
"[Document(page_content='Parable of the Sower is a science fiction novel by Octavia Butler, published in 1993. It follows the story of Lauren Olamina, a young woman living in a dystopian future where society has collapsed due to environmental disasters, poverty, and violence.', metadata={'score': 0.8897321402776546, 'uuid': '1c09603a-52c1-40d7-9d69-29f26256029c', 'created_at': '2023-05-25T15:03:30.268257Z', 'role': 'ai', 'token_count': 56}),\n",
|
||||
" Document(page_content=\"Write a short synopsis of Butler's book, Parable of the Sower. What is it about?\", metadata={'score': 0.8857628682610436, 'uuid': 'f6706e8c-6c91-452f-8c1b-9559fd924657', 'created_at': '2023-05-25T15:03:30.265302Z', 'role': 'human', 'token_count': 23}),\n",
|
||||
" Document(page_content='Who was Octavia Butler?', metadata={'score': 0.7759670375149477, 'uuid': '3a82a02f-056e-4c6a-b960-67ebdf3b2b93', 'created_at': '2023-05-25T15:03:30.2041Z', 'role': 'human', 'token_count': 8}),\n",
|
||||
" Document(page_content=\"Octavia Butler's contemporaries included Ursula K. Le Guin, Samuel R. Delany, and Joanna Russ.\", metadata={'score': 0.7602854653476563, 'uuid': 'a2fc9c21-0897-46c8-bef7-6f5c0f71b04a', 'created_at': '2023-05-25T15:03:30.248065Z', 'role': 'ai', 'token_count': 27}),\n",
|
||||
" Document(page_content='You might want to read Ursula K. Le Guin or Joanna Russ.', metadata={'score': 0.7595293992240313, 'uuid': 'f22f2498-6118-4c74-8718-aa89ccd7e3d6', 'created_at': '2023-05-25T15:03:30.261198Z', 'role': 'ai', 'token_count': 18})]"
|
||||
]
|
||||
},
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
@@ -235,48 +269,44 @@
|
||||
],
|
||||
"source": [
|
||||
"zep_retriever.get_relevant_documents(\"Who wrote Parable of the Sower?\")"
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false,
|
||||
"ExecuteTime": {
|
||||
"end_time": "2023-05-25T15:03:34.713354Z",
|
||||
"start_time": "2023-05-25T15:03:34.577974Z"
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"outputs": [],
|
||||
"source": [],
|
||||
"metadata": {
|
||||
"collapsed": false,
|
||||
"ExecuteTime": {
|
||||
"end_time": "2023-05-18T20:09:21.298710Z",
|
||||
"start_time": "2023-05-18T20:09:21.297169Z"
|
||||
},
|
||||
"collapsed": false,
|
||||
"jupyter": {
|
||||
"outputs_hidden": false
|
||||
}
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3",
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 2
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython2",
|
||||
"version": "2.7.6"
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.6"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 0
|
||||
"nbformat_minor": 4
|
||||
}
|
||||
|
||||
@@ -0,0 +1,131 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "73dbcdb9",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# SentenceTransformersTokenTextSplitter\n",
|
||||
"\n",
|
||||
"This notebook demonstrates how to use the `SentenceTransformersTokenTextSplitter` text splitter.\n",
|
||||
"\n",
|
||||
"Language models have a token limit. You should not exceed the token limit. When you split your text into chunks it is therefore a good idea to count the number of tokens. There are many tokenizers. When you count tokens in your text you should use the same tokenizer as used in the language model. \n",
|
||||
"\n",
|
||||
"The `SentenceTransformersTokenTextSplitter` is a specialized text splitter for use with the sentence-transformer models. The default behaviour is to split the text into chunks that fit the token window of the sentence transformer model that you would like to use."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "9dd5419e",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.text_splitter import SentenceTransformersTokenTextSplitter"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "b43e5d54",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"splitter = SentenceTransformersTokenTextSplitter(chunk_overlap=0)\n",
|
||||
"text = \"Lorem \""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "1df84cb4",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"2\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"count_start_and_stop_tokens = 2\n",
|
||||
"text_token_count = splitter.count_tokens(text=text) - count_start_and_stop_tokens\n",
|
||||
"print(text_token_count)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "d7ad2213",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"tokens in text to split: 514\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"token_multiplier = splitter.maximum_tokens_per_chunk // text_token_count + 1\n",
|
||||
"\n",
|
||||
"# `text_to_split` does not fit in a single chunk\n",
|
||||
"text_to_split = text * token_multiplier\n",
|
||||
"\n",
|
||||
"print(f\"tokens in text to split: {splitter.count_tokens(text=text_to_split)}\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "818aea04",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"lorem\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"text_chunks = splitter.split_text(text=text_to_split)\n",
|
||||
"\n",
|
||||
"print(text_chunks[1])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "e9ba4f23",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -12,7 +12,8 @@
|
||||
"\n",
|
||||
"- `length_function`: how the length of chunks is calculated. Defaults to just counting number of characters, but it's pretty common to pass a token counter here.\n",
|
||||
"- `chunk_size`: the maximum size of your chunks (as measured by the length function).\n",
|
||||
"- `chunk_overlap`: the maximum overlap between chunks. It can be nice to have some overlap to maintain some continuity between chunks (eg do a sliding window)."
|
||||
"- `chunk_overlap`: the maximum overlap between chunks. It can be nice to have some overlap to maintain some continuity between chunks (eg do a sliding window).\n",
|
||||
"- `add_start_index` : wether to include the starting position of each chunk within the original document in the metadata. "
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -49,6 +50,7 @@
|
||||
" chunk_size = 100,\n",
|
||||
" chunk_overlap = 20,\n",
|
||||
" length_function = len,\n",
|
||||
" add_start_index = True,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
@@ -62,8 +64,8 @@
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"page_content='Madam Speaker, Madam Vice President, our First Lady and Second Gentleman. Members of Congress and' lookup_str='' metadata={} lookup_index=0\n",
|
||||
"page_content='of Congress and the Cabinet. Justices of the Supreme Court. My fellow Americans.' lookup_str='' metadata={} lookup_index=0\n"
|
||||
"page_content='Madam Speaker, Madam Vice President, our First Lady and Second Gentleman. Members of Congress and' metadata={'start_index': 0}\n",
|
||||
"page_content='of Congress and the Cabinet. Justices of the Supreme Court. My fellow Americans.' metadata={'start_index': 82}\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
@@ -90,7 +92,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.1"
|
||||
"version": "3.9.16"
|
||||
},
|
||||
"vscode": {
|
||||
"interpreter": {
|
||||
|
||||
194
docs/modules/indexes/vectorstores/examples/awadb.ipynb
Normal file
194
docs/modules/indexes/vectorstores/examples/awadb.ipynb
Normal file
@@ -0,0 +1,194 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "833c4789",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# AwaDB\n",
|
||||
"[AwaDB](https://github.com/awa-ai/awadb) is an AI Native database for the search and storage of embedding vectors used by LLM Applications.\n",
|
||||
"This notebook shows how to use functionality related to the AwaDB."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "252930ea",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!pip install awadb"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "f2b71a47",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.text_splitter import CharacterTextSplitter\n",
|
||||
"from langchain.vectorstores import AwaDB\n",
|
||||
"from langchain.document_loaders import TextLoader"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "49be0bac",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"loader = TextLoader('../../../state_of_the_union.txt')\n",
|
||||
"documents = loader.load()\n",
|
||||
"text_splitter = CharacterTextSplitter(chunk_size= 100, chunk_overlap=0)\n",
|
||||
"docs = text_splitter.split_documents(documents)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "18714278",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"db = AwaDB.from_documents(docs)\n",
|
||||
"query = \"What did the president say about Ketanji Brown Jackson\"\n",
|
||||
"docs = db.similarity_search(query)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "62b7a4c5",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(docs[0].page_content)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "a9b4be48",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"And I did that 4 days ago, when I nominated Circuit Court of Appeals Judge Ketanji Brown Jackson. One of our nation’s top legal minds, who will continue Justice Breyer’s legacy of excellence."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "87fec6b5",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Similarity search with score"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "17231924",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"The returned distance score is between 0-1. 0 is dissimilar, 1 is the most similar"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "f40ddae1",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"docs = db.similarity_search_with_score(query)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "f0045583",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(docs[0])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "8c2da99d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"(Document(page_content='And I did that 4 days ago, when I nominated Circuit Court of Appeals Judge Ketanji Brown Jackson. One of our nation’s top legal minds, who will continue Justice Breyer’s legacy of excellence.', metadata={'source': '../../../state_of_the_union.txt'}), 0.561813814013747)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "0b49fb59",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Restore the table created and added data before"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "1bfa6e25",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"AwaDB automatically persists added document data"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "2a0f3b35",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"If you can restore the table you created and added before, you can just do this as below:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "1fd4b5b0",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"awadb_client = awadb.Client()\n",
|
||||
"ret = awadb_client.Load('langchain_awadb')\n",
|
||||
"if ret : print('awadb load table success')\n",
|
||||
"else:\n",
|
||||
" print('awadb load table failed')"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "5ae9a9dd",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"awadb load table success"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
245
docs/modules/indexes/vectorstores/examples/azuresearch.ipynb
Normal file
245
docs/modules/indexes/vectorstores/examples/azuresearch.ipynb
Normal file
@@ -0,0 +1,245 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Azure Cognitive Search"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Install Azure Cognitive Search SDK"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!pip install --index-url=https://pkgs.dev.azure.com/azure-sdk/public/_packaging/azure-sdk-for-python/pypi/simple/ azure-search-documents==11.4.0a20230509004\n",
|
||||
"!pip install azure-identity"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Import required libraries"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os, json\n",
|
||||
"import openai\n",
|
||||
"from dotenv import load_dotenv\n",
|
||||
"from langchain.embeddings.openai import OpenAIEmbeddings\n",
|
||||
"from langchain.schema import BaseRetriever\n",
|
||||
"from langchain.vectorstores.azuresearch import AzureSearch"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Configure OpenAI settings\n",
|
||||
"Configure the OpenAI settings to use Azure OpenAI or OpenAI"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Load environment variables from a .env file using load_dotenv():\n",
|
||||
"load_dotenv()\n",
|
||||
"\n",
|
||||
"openai.api_type = \"azure\"\n",
|
||||
"openai.api_base = \"YOUR_OPENAI_ENDPOINT\"\n",
|
||||
"openai.api_version = \"2023-05-15\"\n",
|
||||
"openai.api_key = \"YOUR_OPENAI_API_KEY\"\n",
|
||||
"model: str = \"text-embedding-ada-002\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Configure vector store settings\n",
|
||||
" \n",
|
||||
"Set up the vector store settings using environment variables:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"vector_store_address: str = 'YOUR_AZURE_SEARCH_ENDPOINT'\n",
|
||||
"vector_store_password: str = 'YOUR_AZURE_SEARCH_ADMIN_KEY'\n",
|
||||
"index_name: str = \"langchain-vector-demo\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Create embeddings and vector store instances\n",
|
||||
" \n",
|
||||
"Create instances of the OpenAIEmbeddings and AzureSearch classes:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"embeddings: OpenAIEmbeddings = OpenAIEmbeddings(model=model, chunk_size=1) \n",
|
||||
"vector_store: AzureSearch = AzureSearch(azure_search_endpoint=vector_store_address, \n",
|
||||
" azure_search_key=vector_store_password, \n",
|
||||
" index_name=index_name, \n",
|
||||
" embedding_function=embeddings.embed_query) \n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Insert text and embeddings into vector store\n",
|
||||
" \n",
|
||||
"Add texts and metadata from the JSON data to the vector store:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.document_loaders import TextLoader\n",
|
||||
"from langchain.text_splitter import CharacterTextSplitter\n",
|
||||
"loader = TextLoader('../../../state_of_the_union.txt', encoding='utf-8')\n",
|
||||
"\n",
|
||||
"documents = loader.load()\n",
|
||||
"text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)\n",
|
||||
"docs = text_splitter.split_documents(documents)\n",
|
||||
"\n",
|
||||
"vector_store.add_documents(documents=docs)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Perform a vector similarity search\n",
|
||||
" \n",
|
||||
"Execute a pure vector similarity search using the similarity_search() method:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 13,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Tonight. I call on the Senate to: Pass the Freedom to Vote Act. Pass the John Lewis Voting Rights Act. And while you’re at it, pass the Disclose Act so Americans can know who is funding our elections. \n",
|
||||
"\n",
|
||||
"Tonight, I’d like to honor someone who has dedicated his life to serve this country: Justice Stephen Breyer—an Army veteran, Constitutional scholar, and retiring Justice of the United States Supreme Court. Justice Breyer, thank you for your service. \n",
|
||||
"\n",
|
||||
"One of the most serious constitutional responsibilities a President has is nominating someone to serve on the United States Supreme Court. \n",
|
||||
"\n",
|
||||
"And I did that 4 days ago, when I nominated Circuit Court of Appeals Judge Ketanji Brown Jackson. One of our nation’s top legal minds, who will continue Justice Breyer’s legacy of excellence.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Perform a similarity search\n",
|
||||
"docs = vector_store.similarity_search(query=\"What did the president say about Ketanji Brown Jackson\", k=3, search_type='similarity')\n",
|
||||
"print(docs[0].page_content)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Perform a Hybrid Search\n",
|
||||
"\n",
|
||||
"Execute hybrid search using the hybrid_search() method:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 14,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Tonight. I call on the Senate to: Pass the Freedom to Vote Act. Pass the John Lewis Voting Rights Act. And while you’re at it, pass the Disclose Act so Americans can know who is funding our elections. \n",
|
||||
"\n",
|
||||
"Tonight, I’d like to honor someone who has dedicated his life to serve this country: Justice Stephen Breyer—an Army veteran, Constitutional scholar, and retiring Justice of the United States Supreme Court. Justice Breyer, thank you for your service. \n",
|
||||
"\n",
|
||||
"One of the most serious constitutional responsibilities a President has is nominating someone to serve on the United States Supreme Court. \n",
|
||||
"\n",
|
||||
"And I did that 4 days ago, when I nominated Circuit Court of Appeals Judge Ketanji Brown Jackson. One of our nation’s top legal minds, who will continue Justice Breyer’s legacy of excellence.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Perform a hybrid search \n",
|
||||
"docs = vector_store.similarity_search(query=\"What did the president say about Ketanji Brown Jackson\", k=3)\n",
|
||||
"print(docs[0].page_content)"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3.9.13 ('.venv': venv)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.3"
|
||||
},
|
||||
"orig_nbformat": 4,
|
||||
"vscode": {
|
||||
"interpreter": {
|
||||
"hash": "645053d6307d413a1a75681b5ebb6449bb2babba4bcb0bf65a1ddc3dbefb108a"
|
||||
}
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
@@ -151,6 +151,15 @@
|
||||
"## Similarity search with score"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "346347d7",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"The returned distance score is cosine distance. Therefore, a lower score is better."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
|
||||
399
docs/modules/indexes/vectorstores/examples/clickhouse.ipynb
Normal file
399
docs/modules/indexes/vectorstores/examples/clickhouse.ipynb
Normal file
@@ -0,0 +1,399 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "683953b3",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# ClickHouse Vector Search\n",
|
||||
"\n",
|
||||
"> [ClickHouse](https://clickhouse.com/) is the fastest and most resource efficient open-source database for real-time apps and analytics with full SQL support and a wide range of functions to assist users in writing analytical queries. Lately added data structures and distance search functions (like `L2Distance`) as well as [approximate nearest neighbor search indexes](https://clickhouse.com/docs/en/engines/table-engines/mergetree-family/annindexes) enable ClickHouse to be used as a high performance and scalable vector database to store and search vectors with SQL.\n",
|
||||
"\n",
|
||||
"This notebook shows how to use functionality related to the `ClickHouse` vector search."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "43ead5d5-2c1f-4dce-a69a-cb00e4f9d6f0",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Setting up envrionments"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "b2c434bc",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Setting up local clickhouse server with docker (optional)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "249a7751",
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2023-06-03T08:43:43.035606Z",
|
||||
"start_time": "2023-06-03T08:43:42.618531Z"
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"! docker run -d -p 8123:8123 -p9000:9000 --name langchain-clickhouse-server --ulimit nofile=262144:262144 clickhouse/clickhouse-server:23.4.2.11"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "7bd3c1c0",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Setup up clickhouse client driver"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "9d614bf8",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!pip install clickhouse-connect"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "15a1d477-9cdb-4d82-b019-96951ecb2b72",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We want to use OpenAIEmbeddings so we have to get the OpenAI API Key."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "91003ea5-0c8c-436c-a5de-aaeaeef2f458",
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2023-06-03T08:49:35.383673Z",
|
||||
"start_time": "2023-06-03T08:49:33.984547Z"
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"import getpass\n",
|
||||
"\n",
|
||||
"if not os.environ['OPENAI_API_KEY']:\n",
|
||||
" os.environ['OPENAI_API_KEY'] = getpass.getpass('OpenAI API Key:')"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "aac9563e",
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2023-06-03T08:33:31.554934Z",
|
||||
"start_time": "2023-06-03T08:33:31.549590Z"
|
||||
},
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.embeddings.openai import OpenAIEmbeddings\n",
|
||||
"from langchain.text_splitter import CharacterTextSplitter\n",
|
||||
"from langchain.vectorstores import Clickhouse, ClickhouseSettings"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "a3c3999a",
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2023-06-03T08:33:32.527387Z",
|
||||
"start_time": "2023-06-03T08:33:32.501312Z"
|
||||
},
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.document_loaders import TextLoader\n",
|
||||
"loader = TextLoader('../../../state_of_the_union.txt')\n",
|
||||
"documents = loader.load()\n",
|
||||
"text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)\n",
|
||||
"docs = text_splitter.split_documents(documents)\n",
|
||||
"\n",
|
||||
"embeddings = OpenAIEmbeddings()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "6e104aee",
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2023-06-03T08:33:35.503823Z",
|
||||
"start_time": "2023-06-03T08:33:33.745832Z"
|
||||
}
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Inserting data...: 100%|██████████| 42/42 [00:00<00:00, 2801.49it/s]\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"for d in docs:\n",
|
||||
" d.metadata = {'some': 'metadata'}\n",
|
||||
"settings = ClickhouseSettings(table=\"clickhouse_vector_search_example\")\n",
|
||||
"docsearch = Clickhouse.from_documents(docs, embeddings, config=settings)\n",
|
||||
"\n",
|
||||
"query = \"What did the president say about Ketanji Brown Jackson\"\n",
|
||||
"docs = docsearch.similarity_search(query)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "9c608226",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Tonight. I call on the Senate to: Pass the Freedom to Vote Act. Pass the John Lewis Voting Rights Act. And while you’re at it, pass the Disclose Act so Americans can know who is funding our elections. \n",
|
||||
"\n",
|
||||
"Tonight, I’d like to honor someone who has dedicated his life to serve this country: Justice Stephen Breyer—an Army veteran, Constitutional scholar, and retiring Justice of the United States Supreme Court. Justice Breyer, thank you for your service. \n",
|
||||
"\n",
|
||||
"One of the most serious constitutional responsibilities a President has is nominating someone to serve on the United States Supreme Court. \n",
|
||||
"\n",
|
||||
"And I did that 4 days ago, when I nominated Circuit Court of Appeals Judge Ketanji Brown Jackson. One of our nation’s top legal minds, who will continue Justice Breyer’s legacy of excellence.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"print(docs[0].page_content)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e3a8b105",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Get connection info and data schema"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "69996818",
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2023-06-03T08:28:58.252991Z",
|
||||
"start_time": "2023-06-03T08:28:58.197560Z"
|
||||
},
|
||||
"scrolled": false
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\u001b[92m\u001b[1mdefault.clickhouse_vector_search_example @ localhost:8123\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1musername: None\u001b[0m\n",
|
||||
"\n",
|
||||
"Table Schema:\n",
|
||||
"---------------------------------------------------\n",
|
||||
"|\u001b[94mid \u001b[0m|\u001b[96mNullable(String) \u001b[0m|\n",
|
||||
"|\u001b[94mdocument \u001b[0m|\u001b[96mNullable(String) \u001b[0m|\n",
|
||||
"|\u001b[94membedding \u001b[0m|\u001b[96mArray(Float32) \u001b[0m|\n",
|
||||
"|\u001b[94mmetadata \u001b[0m|\u001b[96mObject('json') \u001b[0m|\n",
|
||||
"|\u001b[94muuid \u001b[0m|\u001b[96mUUID \u001b[0m|\n",
|
||||
"---------------------------------------------------\n",
|
||||
"\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"print(str(docsearch))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "324ac147",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Clickhouse table schema"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "b5bd7c5b",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"> Clickhouse table will be automatically created if not exist by default. Advanced users could pre-create the table with optimized settings. For distributed Clickhouse cluster with sharding, table engine should be configured as `Distributed`."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"id": "54f4f561",
|
||||
"metadata": {
|
||||
"scrolled": false
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Clickhouse Table DDL:\n",
|
||||
"\n",
|
||||
"CREATE TABLE IF NOT EXISTS default.clickhouse_vector_search_example(\n",
|
||||
" id Nullable(String),\n",
|
||||
" document Nullable(String),\n",
|
||||
" embedding Array(Float32),\n",
|
||||
" metadata JSON,\n",
|
||||
" uuid UUID DEFAULT generateUUIDv4(),\n",
|
||||
" CONSTRAINT cons_vec_len CHECK length(embedding) = 1536,\n",
|
||||
" INDEX vec_idx embedding TYPE annoy(100,'L2Distance') GRANULARITY 1000\n",
|
||||
") ENGINE = MergeTree ORDER BY uuid SETTINGS index_granularity = 8192\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"print(f\"Clickhouse Table DDL:\\n\\n{docsearch.schema}\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "f59360c0",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Filtering\n",
|
||||
"\n",
|
||||
"You can have direct access to ClickHouse SQL where statement. You can write `WHERE` clause following standard SQL.\n",
|
||||
"\n",
|
||||
"**NOTE**: Please be aware of SQL injection, this interface must not be directly called by end-user.\n",
|
||||
"\n",
|
||||
"If you custimized your `column_map` under your setting, you search with filter like this:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"id": "232055f6",
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2023-06-03T08:29:36.680805Z",
|
||||
"start_time": "2023-06-03T08:29:34.963676Z"
|
||||
}
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Inserting data...: 100%|██████████| 42/42 [00:00<00:00, 6939.56it/s]\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain.vectorstores import Clickhouse, ClickhouseSettings\n",
|
||||
"from langchain.document_loaders import TextLoader\n",
|
||||
"\n",
|
||||
"loader = TextLoader('../../../state_of_the_union.txt')\n",
|
||||
"documents = loader.load()\n",
|
||||
"text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)\n",
|
||||
"docs = text_splitter.split_documents(documents)\n",
|
||||
"\n",
|
||||
"embeddings = OpenAIEmbeddings()\n",
|
||||
"\n",
|
||||
"for i, d in enumerate(docs):\n",
|
||||
" d.metadata = {'doc_id': i}\n",
|
||||
"\n",
|
||||
"docsearch = Clickhouse.from_documents(docs, embeddings)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"id": "ddbcee77",
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2023-06-03T08:29:43.487436Z",
|
||||
"start_time": "2023-06-03T08:29:43.040831Z"
|
||||
}
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"0.6779101415357189 {'doc_id': 0} Madam Speaker, Madam...\n",
|
||||
"0.6997970363474885 {'doc_id': 8} And so many families...\n",
|
||||
"0.7044504914336727 {'doc_id': 1} Groups of citizens b...\n",
|
||||
"0.7053558702165094 {'doc_id': 6} And I’m taking robus...\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"meta = docsearch.metadata_column\n",
|
||||
"output = docsearch.similarity_search_with_relevance_scores('What did the president say about Ketanji Brown Jackson?', \n",
|
||||
" k=4, where_str=f\"{meta}.doc_id<10\")\n",
|
||||
"for d, dist in output:\n",
|
||||
" print(dist, d.metadata, d.page_content[:20] + '...')"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "a359ed74",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Deleting your data"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"id": "fb6a9d36",
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2023-06-03T08:30:24.822384Z",
|
||||
"start_time": "2023-06-03T08:30:24.798571Z"
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"docsearch.drop()"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.2"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user