Compare commits

..

35 Commits

Author SHA1 Message Date
Harrison Chase
d22dd95dfa cr 2023-12-04 11:39:22 -08:00
Harrison Chase
310e946124 integrations start 2023-12-03 08:43:58 -08:00
Eugene Yurtsev
943aa01c14 Improve indexing performance for Postgres (remote database) for refresh for async API (#14132)
This PR speeds up the indexing api on the async path by batching the uid
updates in the sql record manager (which may be remote).
2023-12-01 12:10:07 -05:00
William FH
528fc76d6a Update Prompt Format Error (#14044)
The number of times I try to format a string (especially in lcel) is
embarrassingly high. Think this may be more actionable than the default
error message. Now I get nice helpful errors


```
KeyError: "Input to ChatPromptTemplate is missing variable 'input'.  Expected: ['input'] Received: ['dialogue']"
```
2023-12-01 09:06:35 -08:00
William FH
71c2e184b4 [Nits] Evaluation - Some Rendering Improvements (#14097)
- Improve rendering of aggregate results at the end
- flatten reference if present
2023-12-01 09:06:07 -08:00
Bob Lin
f15859bd86 docs[patch]: Update discord.ipynb (#14099)
### Description

Now if `example` in Message is False, it will not be displayed. Update
the output in this document.

```python
In [22]: m = HumanMessage(content="Text")

In [23]: m
Out[23]: HumanMessage(content='Text')

In [24]: m = HumanMessage(content="Text", example=True)

In [25]: m
Out[25]: HumanMessage(content='Text', example=True)
```

### Twitter handle

[lin_bob57617](https://twitter.com/lin_bob57617)
2023-12-01 08:54:31 -08:00
Lance Martin
b07a5a9509 Template for Ollama + Multi-query retriever (#14092) 2023-12-01 08:53:17 -08:00
Bob Lin
75312c3694 docs[patch]: Update facebook.ipynb (#14102)
### Description

Openai version 1.0.0 and later no longer supports the usage of camel
case, So [the
APIs](https://github.com/openai/openai-python/blob/main/api.md#finetuning)
needs to be modified.

### Twitter handle

[lin_bob57617](https://twitter.com/lin_bob57617)
2023-12-01 08:49:56 -08:00
Erick Friis
a3ae8e0a41 templates[patch]: opensearch readme update (#14103) 2023-12-01 08:48:00 -08:00
Ean Yang
ac1c8634a8 docs[patch] Update invalid guides link (#14106) 2023-12-01 08:47:38 -08:00
Mark Scannell
9b0e46dcf0 Improve indexing performance for Postgres (remote database) for refresh (#14126)
**Description:** By combining the document timestamp refresh within a
single call to update(), this enables batching of multiple documents in
a single SQL statement. This is important for non-local databases where
tens of milliseconds has a huge impact on performance when doing
document-by-document SQL statements.
**Issue:** #11935 
**Dependencies:** None
**Tag maintainer:** @eyurtsev
2023-12-01 11:36:02 -05:00
Erick Friis
b161f302ff docs[patch]: local docs build <5s (#14096) 2023-11-30 17:39:30 -08:00
Hubert Yuan
80ed588733 docs[patch]: Update metaphor_search.ipynb (#14093)
- **Description:** Touch up of the documentation page for Metaphor
Search Tool integration. Removes documentation for old built-in tool
wrapper.

---------

Co-authored-by: Erick Friis <erick@langchain.dev>
2023-11-30 16:34:05 -08:00
Jacob Lee
3328507f11 langchain[patch], experimental[minor]: Adds OllamaFunctions wrapper (#13330)
CC @baskaryan @hwchase17 @jmorganca 

Having a bit of trouble importing `langchain_experimental` from a
notebook, will figure it out tomorrow

~Ah and also is blocked by #13226~

---------

Co-authored-by: Lance Martin <lance@langchain.dev>
Co-authored-by: Bagatur <baskaryan@gmail.com>
2023-11-30 16:13:57 -08:00
Bagatur
4063bf144a langchain[patch]: release 0.0.344 (#14095) 2023-11-30 15:57:11 -08:00
Bagatur
efce352d6b core[patch]: release 0.0.8 (#14086) 2023-11-30 15:12:06 -08:00
Harutaka Kawamura
0d08a692a3 langchain[minor]: Migrate mlflow and databricks classes to deployments APIs. (#13699)
## Description

Related to https://github.com/mlflow/mlflow/pull/10420. MLflow AI
gateway will be deprecated and replaced by the `mlflow.deployments`
module. Happy to split this PR if it's too large.

```
pip install git+https://github.com/langchain-ai/langchain.git@refs/pull/13699/merge#subdirectory=libs/langchain
```

## Dependencies

Install mlflow from https://github.com/mlflow/mlflow/pull/10420:

```
pip install git+https://github.com/mlflow/mlflow.git@refs/pull/10420/merge
```

## Testing plan

The following code works fine on local and databricks:

<details><summary>Click</summary>
<p>

```python
"""
Setup
-----
mlflow deployments start-server --config-path examples/gateway/openai/config.yaml
databricks secrets create-scope <scope>
databricks secrets put-secret <scope> openai-api-key --string-value $OPENAI_API_KEY

Run
---
python /path/to/this/file.py secrets/<scope>/openai-api-key
"""
from langchain.chat_models import ChatMlflow, ChatDatabricks
from langchain.embeddings import MlflowEmbeddings, DatabricksEmbeddings
from langchain.llms import Databricks, Mlflow
from langchain.schema.messages import HumanMessage
from langchain.chains.loading import load_chain
from mlflow.deployments import get_deploy_client
import uuid
import sys
import tempfile
from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate

###############################
# MLflow
###############################
chat = ChatMlflow(
    target_uri="http://127.0.0.1:5000", endpoint="chat", params={"temperature": 0.1}
)
print(chat([HumanMessage(content="hello")]))

embeddings = MlflowEmbeddings(target_uri="http://127.0.0.1:5000", endpoint="embeddings")
print(embeddings.embed_query("hello")[:3])
print(embeddings.embed_documents(["hello", "world"])[0][:3])

llm = Mlflow(
    target_uri="http://127.0.0.1:5000",
    endpoint="completions",
    params={"temperature": 0.1},
)
print(llm("I am"))

llm_chain = LLMChain(
    llm=llm,
    prompt=PromptTemplate(
        input_variables=["adjective"],
        template="Tell me a {adjective} joke",
    ),
)
print(llm_chain.run(adjective="funny"))

# serialization/deserialization
with tempfile.TemporaryDirectory() as tmpdir:
    print(tmpdir)
    path = f"{tmpdir}/llm.yaml"
    llm_chain.save(path)
    loaded_chain = load_chain(path)
    print(loaded_chain("funny"))

###############################
# Databricks
###############################
secret = sys.argv[1]
client = get_deploy_client("databricks")

# External - chat
name = f"chat-{uuid.uuid4()}"
client.create_endpoint(
    name=name,
    config={
        "served_entities": [
            {
                "name": "test",
                "external_model": {
                    "name": "gpt-4",
                    "provider": "openai",
                    "task": "llm/v1/chat",
                    "openai_config": {
                        "openai_api_key": "{{" + secret + "}}",
                    },
                },
            }
        ],
    },
)
try:
    chat = ChatDatabricks(
        target_uri="databricks", endpoint=name, params={"temperature": 0.1}
    )
    print(chat([HumanMessage(content="hello")]))
finally:
    client.delete_endpoint(endpoint=name)

# External - embeddings
name = f"embeddings-{uuid.uuid4()}"
client.create_endpoint(
    name=name,
    config={
        "served_entities": [
            {
                "name": "test",
                "external_model": {
                    "name": "text-embedding-ada-002",
                    "provider": "openai",
                    "task": "llm/v1/embeddings",
                    "openai_config": {
                        "openai_api_key": "{{" + secret + "}}",
                    },
                },
            }
        ],
    },
)
try:
    embeddings = DatabricksEmbeddings(target_uri="databricks", endpoint=name)
    print(embeddings.embed_query("hello")[:3])
    print(embeddings.embed_documents(["hello", "world"])[0][:3])
finally:
    client.delete_endpoint(endpoint=name)

# External - completions
name = f"completions-{uuid.uuid4()}"
client.create_endpoint(
    name=name,
    config={
        "served_entities": [
            {
                "name": "test",
                "external_model": {
                    "name": "gpt-3.5-turbo-instruct",
                    "provider": "openai",
                    "task": "llm/v1/completions",
                    "openai_config": {
                        "openai_api_key": "{{" + secret + "}}",
                    },
                },
            }
        ],
    },
)
try:
    llm = Databricks(
        endpoint_name=name,
        model_kwargs={"temperature": 0.1},
    )
    print(llm("I am"))
finally:
    client.delete_endpoint(endpoint=name)


# Foundation model - chat
chat = ChatDatabricks(
    endpoint="databricks-llama-2-70b-chat", params={"temperature": 0.1}
)
print(chat([HumanMessage(content="hello")]))

# Foundation model - embeddings
embeddings = DatabricksEmbeddings(endpoint="databricks-bge-large-en")
print(embeddings.embed_query("hello")[:3])

# Foundation model - completions
llm = Databricks(
    endpoint_name="databricks-mpt-7b-instruct", model_kwargs={"temperature": 0.1}
)
print(llm("hello"))
llm_chain = LLMChain(
    llm=llm,
    prompt=PromptTemplate(
        input_variables=["adjective"],
        template="Tell me a {adjective} joke",
    ),
)
print(llm_chain.run(adjective="funny"))

# serialization/deserialization
with tempfile.TemporaryDirectory() as tmpdir:
    print(tmpdir)
    path = f"{tmpdir}/llm.yaml"
    llm_chain.save(path)
    loaded_chain = load_chain(path)
    print(loaded_chain("funny"))

```

Output:

```
content='Hello! How can I assist you today?'
[-0.025058426, -0.01938856, -0.027781019]
[-0.025058426, -0.01938856, -0.027781019]
sorry, but I cannot continue the sentence as it is incomplete. Can you please provide more information or context?
Sure, here's a classic one for you:

Why don't scientists trust atoms?

Because they make up everything!
/var/folders/dz/cd_nvlf14g9g__n3ph0d_0pm0000gp/T/tmpx_4no6ad
{'adjective': 'funny', 'text': "Sure, here's a classic one for you:\n\nWhy don't scientists trust atoms?\n\nBecause they make up everything!"}
content='Hello! How can I assist you today?'
[-0.025058426, -0.01938856, -0.027781019]
[-0.025058426, -0.01938856, -0.027781019]
 a 23 year old female and I am currently studying for my master's degree
content="\nHello! It's nice to meet you. Is there something I can help you with or would you like to chat for a bit?"
[0.051055908203125, 0.007221221923828125, 0.003879547119140625]
[0.051055908203125, 0.007221221923828125, 0.003879547119140625]

hello back
 Well, I don't really know many jokes, but I do know this funny story...
/var/folders/dz/cd_nvlf14g9g__n3ph0d_0pm0000gp/T/tmp7_ds72ex
{'adjective': 'funny', 'text': " Well, I don't really know many jokes, but I do know this funny story..."}
```

</p>
</details>

The existing workflow doesn't break:

<details><summary>click</summary>
<p>

```python
import uuid

import mlflow
from mlflow.models import ModelSignature
from mlflow.types.schema import ColSpec, Schema


class MyModel(mlflow.pyfunc.PythonModel):
    def predict(self, context, model_input):
        return str(uuid.uuid4())


with mlflow.start_run():
    mlflow.pyfunc.log_model(
        "model",
        python_model=MyModel(),
        pip_requirements=["mlflow==2.8.1", "cloudpickle<3"],
        signature=ModelSignature(
            inputs=Schema(
                [
                    ColSpec("string", "prompt"),
                    ColSpec("string", "stop"),
                ]
            ),
            outputs=Schema(
                [
                    ColSpec(name=None, type="string"),
                ]
            ),
        ),
        registered_model_name=f"lang-{uuid.uuid4()}",
    )

# Manually create a serving endpoint with the registered model and run
from langchain.llms import Databricks

llm = Databricks(endpoint_name="<name>")
llm("hello")  # 9d0b2491-3d13-487c-bc02-1287f06ecae7
```

</p>
</details> 

## Follow-up tasks

(This PR is too large. I'll file a separate one for follow-up tasks.)

- Update `docs/docs/integrations/providers/mlflow_ai_gateway.mdx` and
`docs/docs/integrations/providers/databricks.md`.

---------

Signed-off-by: harupy <17039389+harupy@users.noreply.github.com>
Co-authored-by: Bagatur <baskaryan@gmail.com>
2023-11-30 15:06:58 -08:00
Tyler Hutcherson
dc31714ec5 templates[patch]: Rag redis template dependency update (#13614)
- **Description:** Update RAG Redis template readme and dependencies.
2023-11-30 12:22:13 -08:00
Jeremy Naccache
a14cf87576 core[patch]: Add **kwargs to Langchain's dumps() to allow passing of json.dumps() … (#10628)
…parameters.

In Langchain's `dumps()` function, I've added a `**kwargs` parameter.
This allows users to pass additional parameters to the underlying
`json.dumps()` function, providing greater flexibility and control over
JSON serialization.

Many parameters available in `json.dumps()` can be useful or even
necessary in specific situations. For example, when using an Agent with
return_intermediate_steps set to true, the output is a list of
AgentAction objects. These objects can't be serialized without using
Langchain's `dumps()` function.

The issue arises when using the Agent with a language other than
English, which may contain non-ASCII characters like 'é'. The default
behavior of `json.dumps()` sets ensure_ascii to true, converting
`{"name": "José"}` into `{"name": "Jos\u00e9"}`. This can make the
output hard to read, especially in the case of intermediate steps in
agent logs.

By allowing users to pass additional parameters to `json.dumps()` via
Langchain's dumps(), we can solve this problem. For instance, users can
set `ensure_ascii=False` to maintain the original characters.

This update also enables users to pass other useful `json.dumps()`
parameters like `sort_keys`, providing even more flexibility.

The implementation takes into account edge cases where a user might pass
a "default" parameter, which is already defined by `dumps()`, or an
"indent" parameter, which is also predefined if `pretty=True` is set.

---------

Co-authored-by: Erick Friis <erick@langchain.dev>
2023-11-30 08:52:24 -08:00
Erick Friis
8078caf764 templates[patch]: rag-google-cloud-sdp readme (#14043) 2023-11-30 08:17:51 -08:00
Yong woo Song
f4d520ccb5 Fix .env file path in integration_test README.md (#14028)
<!-- Thank you for contributing to LangChain!



Replace this entire comment with:
  - **Description:** a description of the change, 
  - **Issue:** the issue # it fixes (if applicable),
  - **Dependencies:** any dependencies required for this change,
- **Tag maintainer:** for a quicker response, tag the relevant
maintainer (see below),
- **Twitter handle:** we announce bigger features on Twitter. If your PR
gets announced, and you'd like a mention, we'll gladly shout you out!

Please make sure your PR is passing linting and testing before
submitting. Run `make format`, `make lint` and `make test` to check this
locally.

See contribution guidelines for more information on how to write/run
tests, lint, etc:

https://github.com/langchain-ai/langchain/blob/master/.github/CONTRIBUTING.md

If you're adding a new integration, please include:
1. a test for the integration, preferably unit tests that do not rely on
network access,
2. an example notebook showing its use. It lives in `docs/extras`
directory.

If no one reviews your PR within a few days, please @-mention one of
@baskaryan, @eyurtsev, @hwchase17.
 -->

### Description
Hello, 

The [integration_test
README](https://github.com/langchain-ai/langchain/tree/master/libs/langchain/tests)
was indicating incorrect paths for the `.env.example` and `.env` files.

`tests/.env.example` ->`tests/integration_tests/.env.example`

While it’s a minor error, it could **potentially lead to confusion** for
the document’s readers, so I’ve made the necessary corrections.

Thank you! ☺️

### Related Issue
- https://github.com/langchain-ai/langchain/pull/2806
2023-11-29 22:14:28 -05:00
Rohan Dey
41a4c06a94 Added support for a Pandas DataFrame OutputParser (#13257)
**Description:**

Added support for a Pandas DataFrame OutputParser with format
instructions, along with unit tests and a demo notebook. Namely, we've
added the ability to request data from a DataFrame, have the LLM parse
the request, and then use that request to retrieve a well-formatted
response.

Within LangChain, it seamlessly integrates with language models like
OpenAI's `text-davinci-003`, facilitating streamlined interaction using
the format instructions (just like the other output parsers).

This parser structures its requests as
`<operation/column/row>[<optional_array_params>]`. The instructions
detail permissible operations, valid columns, and array formats,
ensuring clarity and adherence to the required format.

For example:

- When the LLM receives the input: "Retrieve the mean of `num_legs` from
rows 1 to 3."
- The provided format instructions guide the LLM to structure the
request as: "mean:num_legs[1..3]".

The parser processes this formatted request, leveraging the LLM's
understanding to extract the mean of `num_legs` from rows 1 to 3 within
the Pandas DataFrame.

This integration allows users to communicate requests naturally, with
the LLM transforming these instructions into structured commands
understood by the `PandasDataFrameOutputParser`. The format instructions
act as a bridge between natural language queries and precise DataFrame
operations, optimizing communication and data retrieval.

**Issue:**

- https://github.com/langchain-ai/langchain/issues/11532

**Dependencies:**

No additional dependencies :)

**Tag maintainer:**

@baskaryan 

**Twitter handle:**

No need. :)

---------

Co-authored-by: Wasee Alam <waseealam@protonmail.com>
Co-authored-by: Harrison Chase <hw.chase.17@gmail.com>
2023-11-29 22:08:50 -05:00
Masanori Taniguchi
235bdb9fa7 Support Vald secure connection (#13269)
**Description:** 
When using Vald, only insecure grpc connection was supported, so secure
connection is now supported.
In addition, grpc metadata can be added to Vald requests to enable
authentication with a token.

<!-- Thank you for contributing to LangChain!

Replace this entire comment with:
  - **Description:** a description of the change, 
  - **Issue:** the issue # it fixes (if applicable),
  - **Dependencies:** any dependencies required for this change,
- **Tag maintainer:** for a quicker response, tag the relevant
maintainer (see below),
- **Twitter handle:** we announce bigger features on Twitter. If your PR
gets announced, and you'd like a mention, we'll gladly shout you out!

Please make sure your PR is passing linting and testing before
submitting. Run `make format`, `make lint` and `make test` to check this
locally.

See contribution guidelines for more information on how to write/run
tests, lint, etc:

https://github.com/langchain-ai/langchain/blob/master/.github/CONTRIBUTING.md

If you're adding a new integration, please include:
1. a test for the integration, preferably unit tests that do not rely on
network access,
2. an example notebook showing its use. It lives in `docs/extras`
directory.

If no one reviews your PR within a few days, please @-mention one of
@baskaryan, @eyurtsev, @hwchase17.
 -->
2023-11-29 22:07:29 -05:00
Nico Puhlmann
54355b651a Update index.mdx (#13285)
grammar correction

<!-- Thank you for contributing to LangChain!

Replace this entire comment with:
  - **Description:** a description of the change, 
  - **Issue:** the issue # it fixes (if applicable),
  - **Dependencies:** any dependencies required for this change,
- **Tag maintainer:** for a quicker response, tag the relevant
maintainer (see below),
- **Twitter handle:** we announce bigger features on Twitter. If your PR
gets announced, and you'd like a mention, we'll gladly shout you out!

Please make sure your PR is passing linting and testing before
submitting. Run `make format`, `make lint` and `make test` to check this
locally.

See contribution guidelines for more information on how to write/run
tests, lint, etc:

https://github.com/langchain-ai/langchain/blob/master/.github/CONTRIBUTING.md

If you're adding a new integration, please include:
1. a test for the integration, preferably unit tests that do not rely on
network access,
2. an example notebook showing its use. It lives in `docs/extras`
directory.

If no one reviews your PR within a few days, please @-mention one of
@baskaryan, @eyurtsev, @hwchase17.
 -->

Co-authored-by: Harrison Chase <hw.chase.17@gmail.com>
2023-11-29 22:06:33 -05:00
sudranga
d1d693b2a7 Fix issue where response_if_no_docs_found is not implemented on async… (#13297)
Response_if_no_docs_found is not implemented in
ConversationalRetrievalChain for async code paths. Implemented it and
added test cases

Co-authored-by: Harrison Chase <hw.chase.17@gmail.com>
2023-11-29 22:06:13 -05:00
AthulVincent
67c55cb5b0 Implemented MongoDB Atlas Self-Query Retriever (#13321)
# Description 
This PR implements Self-Query Retriever for MongoDB Atlas vector store.

I've implemented the comparators and operators that are supported by
MongoDB Atlas vector store according to the section titled "Atlas Vector
Search Pre-Filter" from
https://www.mongodb.com/docs/atlas/atlas-vector-search/vector-search-stage/.

Namely:
```
allowed_comparators = [
      Comparator.EQ,
      Comparator.NE,
      Comparator.GT,
      Comparator.GTE,
      Comparator.LT,
      Comparator.LTE,
      Comparator.IN,
      Comparator.NIN,
  ]

"""Subset of allowed logical operators."""
allowed_operators = [
    Operator.AND,
    Operator.OR
]
```
Translations from comparators/operators to MongoDB Atlas filter
operators(you can find the syntax in the "Atlas Vector Search
Pre-Filter" section from the previous link) are done using the following
dictionary:
```
map_dict = {
            Operator.AND: "$and",
            Operator.OR: "$or",
            Comparator.EQ: "$eq",
            Comparator.NE: "$ne",
            Comparator.GTE: "$gte",
            Comparator.LTE: "$lte",
            Comparator.LT: "$lt",
            Comparator.GT: "$gt",
            Comparator.IN: "$in",
            Comparator.NIN: "$nin",
        }
```

In visit_structured_query() the filters are passed as "pre_filter" and
not "filter" as in the MongoDB link above since langchain's
implementation of MongoDB atlas vector
store(libs\langchain\langchain\vectorstores\mongodb_atlas.py) in
_similarity_search_with_score() sets the "filter" key to have the value
of the "pre_filter" argument.
```
params["filter"] = pre_filter
```
Test cases and documentation have also been added.

# Issue
#11616 

# Dependencies
No new dependencies have been added.

# Documentation
I have created the notebook mongodb_atlas_self_query.ipynb outlining the
steps to get the self-query mechanism working.

I worked closely with [@Farhan-Faisal](https://github.com/Farhan-Faisal)
on this PR.

---------

Co-authored-by: Bagatur <baskaryan@gmail.com>
2023-11-29 22:05:06 -05:00
Josef Zoller
c2e3963da4 Merriam-Webster Dictionary Tool (#12044)
# Description

We implemented a simple tool for accessing the Merriam-Webster
Collegiate Dictionary API
(https://dictionaryapi.com/products/api-collegiate-dictionary).

Here's a simple usage example:

```py
from langchain.llms import OpenAI
from langchain.agents import load_tools, initialize_agent, AgentType

llm = OpenAI()
tools = load_tools(["serpapi", "merriam-webster"], llm=llm) # Serp API gives our agent access to Google
agent = initialize_agent(
  tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True
)
agent.run("What is the english word for the german word Himbeere? Define that word.")
```

Sample output:

```
> Entering new AgentExecutor chain...
 I need to find the english word for Himbeere and then get the definition of that word.
Action: Search
Action Input: "English word for Himbeere"
Observation: {'type': 'translation_result'}
Thought: Now I have the english word, I can look up the definition.
Action: MerriamWebster
Action Input: raspberry
Observation: Definitions of 'raspberry':

1. rasp-ber-ry, noun: any of various usually black or red edible berries that are aggregate fruits consisting of numerous small drupes on a fleshy receptacle and that are usually rounder and smaller than the closely related blackberries
2. rasp-ber-ry, noun: a perennial plant (genus Rubus) of the rose family that bears raspberries
3. rasp-ber-ry, noun: a sound of contempt made by protruding the tongue between the lips and expelling air forcibly to produce a vibration; broadly : an expression of disapproval or contempt
4. black raspberry, noun: a raspberry (Rubus occidentalis) of eastern North America that has a purplish-black fruit and is the source of several cultivated varieties —called also blackcap

Thought: I now know the final answer.
Final Answer: Raspberry is an english word for Himbeere and it is defined as any of various usually black or red edible berries that are aggregate fruits consisting of numerous small drupes on a fleshy receptacle and that are usually rounder and smaller than the closely related blackberries.

> Finished chain.
```

# Issue

This closes #12039.

# Dependencies

We added no extra dependencies.

<!-- Thank you for contributing to LangChain!

Replace this entire comment with:
  - **Description:** a description of the change, 
  - **Issue:** the issue # it fixes (if applicable),
  - **Dependencies:** any dependencies required for this change,
- **Tag maintainer:** for a quicker response, tag the relevant
maintainer (see below),
- **Twitter handle:** we announce bigger features on Twitter. If your PR
gets announced, and you'd like a mention, we'll gladly shout you out!

Please make sure your PR is passing linting and testing before
submitting. Run `make format`, `make lint` and `make test` to check this
locally.

See contribution guidelines for more information on how to write/run
tests, lint, etc:

https://github.com/langchain-ai/langchain/blob/master/.github/CONTRIBUTING.md

If you're adding a new integration, please include:
1. a test for the integration, preferably unit tests that do not rely on
network access,
2. an example notebook showing its use. It lives in `docs/extras`
directory.

If no one reviews your PR within a few days, please @-mention one of
@baskaryan, @eyurtsev, @hwchase17.
 -->

---------

Co-authored-by: Lara <63805048+larkgz@users.noreply.github.com>
Co-authored-by: Harrison Chase <hw.chase.17@gmail.com>
2023-11-29 20:28:29 -05:00
Mohammad Mohtashim
f3dd4a10cf DROP BOX Loader Documentation Update (#14047)
- **Description:** Update the document for drop box loader + made the
messages more verbose when loading pdf file since people were getting
confused
  - **Issue:** #13952
  - **Tag maintainer:** @baskaryan, @eyurtsev, @hwchase17,

---------

Co-authored-by: Erick Friis <erick@langchain.dev>
2023-11-29 17:25:35 -08:00
Cheng (William) Huang
a00db4b28f Add multi-input Reddit search tool (#13893)
- **Description:** Added a tool called RedditSearchRun and an
accompanying API wrapper, which searches Reddit for posts with support
for time filtering, post sorting, query string and subreddit filtering.
  - **Issue:** #13891 
  - **Dependencies:** `praw` module is used to search Reddit
- **Tag maintainer:** @baskaryan , and any of the other maintainers if
needed
  - **Twitter handle:** None.

  Hello,

This is our first PR and we hope that our changes will be helpful to the
community. We have run `make format`, `make lint` and `make test`
locally before submitting the PR. To our knowledge, our changes do not
introduce any new errors.

Our PR integrates the `praw` package which is already used by
RedditPostsLoader in LangChain. Nonetheless, we have added integration
tests and edited unit tests to test our changes. An example notebook is
also provided. These changes were put together by me, @Anika2000,
@CharlesXu123, and @Jeremy-Cheng-stack

Thank you in advance to the maintainers for their time.

---------

Co-authored-by: What-Is-A-Username <49571870+What-Is-A-Username@users.noreply.github.com>
Co-authored-by: Anika2000 <anika.sultana@mail.utoronto.ca>
Co-authored-by: Jeremy Cheng <81793294+Jeremy-Cheng-stack@users.noreply.github.com>
Co-authored-by: Harrison Chase <hw.chase.17@gmail.com>
2023-11-29 20:16:40 -05:00
Jawad Arshad
00a6e8962c langchain[minor]: Add serpapi tools (#13934)
- **Description:** Added some of the more endpoints supported by serpapi
that are not suported on langchain at the moment, like google trends,
google finance, google jobs, and google lens
- **Issue:** [Add support for many of the querying endpoints with
serpapi #11811](https://github.com/langchain-ai/langchain/issues/11811)

---------

Co-authored-by: zushenglu <58179949+zushenglu@users.noreply.github.com>
Co-authored-by: Erick Friis <erick@langchain.dev>
Co-authored-by: Ian Xu <ian.xu@mail.utoronto.ca>
Co-authored-by: zushenglu <zushenglu1809@gmail.com>
Co-authored-by: KevinT928 <96837880+KevinT928@users.noreply.github.com>
Co-authored-by: Bagatur <baskaryan@gmail.com>
2023-11-29 14:02:57 -08:00
h3l
dbaeb163aa langchain[minor]: add volcengine endpoint as LLM (#13942)
- **Description:** Volc Engine MaaS serves as an enterprise-grade,
large-model service platform designed for developers. You can visit its
homepage at https://www.volcengine.com/docs/82379/1099455 for details.
This change will facilitate developers to integrate quickly with the
platform.
  - **Issue:** None
  - **Dependencies:** volcengine
  - **Tag maintainer:** @baskaryan 
  - **Twitter handle:** @he1v3tica

---------

Co-authored-by: lvzhong <lvzhong@bytedance.com>
2023-11-29 13:16:42 -08:00
Mohammad Ahmad
1600ebe6c7 langchain[patch]: Mask API key for ForeFrontAI LLM (#14013)
- **Description:** Mask API key for ForeFrontAI LLM and associated unit
tests
  - **Issue:** https://github.com/langchain-ai/langchain/issues/12165
  - **Dependencies:** N/A
  - **Tag maintainer:** @eyurtsev 
  - **Twitter handle:** `__mmahmad__`

I made the API key non-optional since linting required adding validation
for None, but the key is required per documentation:
https://python.langchain.com/docs/integrations/llms/forefrontai
2023-11-29 13:12:19 -08:00
yoch
a0e859df51 langchain[patch]: fix cohere reranker init #12899 (#14029)
- **Description:** use post field validation for `CohereRerank`
  - **Issue:** #12899 and #13058
  - **Dependencies:** 
  - **Tag maintainer:** @baskaryan

---------

Co-authored-by: Bagatur <baskaryan@gmail.com>
2023-11-29 12:57:06 -08:00
123-fake-st
9bd6e9df36 update pdf document loaders' metadata source to url for online pdf (#13274)
- **Description:** Update 5 pdf document loaders in
`langchain.document_loaders.pdf`, to store a url in the metadata
(instead of a temporary, local file path) if the user provides a web
path to a pdf: `PyPDFium2Loader`, `PDFMinerLoader`,
`PDFMinerPDFasHTMLLoader`, `PyMuPDFLoader`, and `PDFPlumberLoader` were
updated.
- The updates follow the approach used to update `PyPDFLoader` for the
same behavior in #12092
- The `PyMuPDFLoader` changes required additional work in updating
`langchain.document_loaders.parsers.pdf.PyMuPDFParser` to be able to
process either an `io.BufferedReader` (from local pdf) or `io.BytesIO`
(from online pdf)
- The `PDFMinerPDFasHTMLLoader` change used a simpler approach since the
metadata is assigned by the loader and not the parser
  - **Issue:** Fixes #7034
  - **Dependencies:** None


```python
# PyPDFium2Loader example:
# old behavior
>>> from langchain.document_loaders import PyPDFium2Loader
>>> loader = PyPDFium2Loader('https://arxiv.org/pdf/1706.03762.pdf')
>>> docs = loader.load()
>>> docs[0].metadata
{'source': '/var/folders/7z/d5dt407n673drh1f5cm8spj40000gn/T/tmpm5oqa92f/tmp.pdf', 'page': 0}

# new behavior
>>> from langchain.document_loaders import PyPDFium2Loader
>>> loader = PyPDFium2Loader('https://arxiv.org/pdf/1706.03762.pdf')
>>> docs = loader.load()
>>> docs[0].metadata
{'source': 'https://arxiv.org/pdf/1706.03762.pdf', 'page': 0}
```
2023-11-29 15:07:46 -05:00
Toshish Jawale
6f64cb5078 Remove deprecated param and flexibility for prompt (#13310)
- **Description:** Updated to remove deprecated parameter penalty_alpha,
and use string variation of prompt rather than json object for better
flexibility. - **Issue:** the issue # it fixes (if applicable),
  - **Dependencies:** N/A
  - **Tag maintainer:** @eyurtsev
  - **Twitter handle:** @symbldotai

---------

Co-authored-by: toshishjawale <toshish@symbl.ai>
Co-authored-by: Harrison Chase <hw.chase.17@gmail.com>
2023-11-29 14:48:25 -05:00
1608 changed files with 128384 additions and 116553 deletions

View File

@@ -9,10 +9,10 @@ SCRIPT_DIR="$(cd "$(dirname "$0")"; pwd)"
cd "${SCRIPT_DIR}"
mkdir -p ../_dist
cp -r . ../_dist
rsync -ruv . ../_dist
cd ../_dist
poetry run python scripts/model_feat_table.py
poetry run nbdoc_build --srcdir docs
poetry run nbdoc_build --srcdir docs --pause 0
cp ../cookbook/README.md src/pages/cookbook.mdx
cp ../.github/CONTRIBUTING.md docs/contributing.md
wget https://raw.githubusercontent.com/langchain-ai/langserve/main/README.md -O docs/langserve.md

View File

@@ -1,888 +0,0 @@
{
"cells": [
{
"cell_type": "raw",
"id": "366a0e68-fd67-4fe5-a292-5c33733339ea",
"metadata": {},
"source": [
"---\n",
"sidebar_position: 0\n",
"title: Get started\n",
"---"
]
},
{
"cell_type": "markdown",
"id": "f331037f-be3f-4782-856f-d55dab952488",
"metadata": {},
"source": [
"LCEL makes it easy to build complex chains from basic components, and supports out of the box functionality such as streaming, parallelism, and logging."
]
},
{
"cell_type": "markdown",
"id": "9a9acd2e",
"metadata": {},
"source": [
"## Basic example: prompt + model + output parser\n",
"\n",
"The most basic and common use case is chaining a prompt template and a model together. To see how this works, let's create a chain that takes a topic and generates a joke:"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "6b6c5518-85eb-43af-afd8-d3ff4643c389",
"metadata": {},
"outputs": [],
"source": [
"from langchain.chat_models import ChatOpenAI\n",
"from langchain.prompts import ChatPromptTemplate\n",
"from langchain.schema.output_parser import StrOutputParser\n",
"\n",
"prompt = ChatPromptTemplate.from_template(\"Tell me a short joke about {topic}\")\n",
"model = ChatOpenAI(model=\"gpt-3.5-turbo\")\n",
"output_parser = StrOutputParser()\n",
"\n",
"chain = prompt | model | output_parser\n",
"\n",
"chain.invoke({\"topic\": \"ice cream\"})"
]
},
{
"attachments": {},
"cell_type": "markdown",
"id": "ae8ca065-8479-4083-b593-5b5823ffc91a",
"metadata": {},
"source": [
"Notice this line, where we piece together the different components into a single chain\n",
"\n",
"```python\n",
"chain = prompt | model | output_parser\n",
"```\n",
"\n",
"The `|` symbol is similar to a unix pipe operator, creating a chain in which the output of each component is fed as input into the next component.\n",
"\n",
"In this chain the user input is passed to the prompt template, then the prompt template output is passed to the model, then the model output is passed to the output parser. Let's take a look at each component individually to really understand what's going on. \n",
"\n",
"### 1. Prompt\n",
"\n",
"`prompt` is a `BasePromptTemplate`, which means it takes in a dictionary of template variables and produces a `PromptValue`. A `PromptValue` is a wrapper around a completed prompt that can be passed to either an `LLM` (which takes a string as input) or `ChatModel` (which takes a sequence of messages as input). It can work with either language model type because it defines logic both for producing `BaseMessage`s and for producing a string."
]
},
{
"cell_type": "code",
"execution_count": 4,
"id": "15b85a8f-0d79-49da-9132-b4554d7283e5",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"ChatPromptValue(messages=[HumanMessage(content='Tell me a short joke about ice cream')])"
]
},
"execution_count": 4,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"prompt_value = prompt.invoke({\"topic\": \"ice cream\"})\n",
"prompt_value"
]
},
{
"cell_type": "code",
"execution_count": 6,
"id": "d0ca55ee-1b96-4e1f-bddb-bb3b12d5e54b",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"[HumanMessage(content='Tell me a short joke about ice cream')]"
]
},
"execution_count": 6,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"prompt_value.to_messages()"
]
},
{
"cell_type": "code",
"execution_count": 7,
"id": "d5b345ba-48e4-4fda-873b-c92685237c52",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"'Human: Tell me a short joke about ice cream'"
]
},
"execution_count": 7,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"prompt_value.to_string()"
]
},
{
"cell_type": "markdown",
"id": "1619c4b7-38f8-4ba4-bf46-ef6ffa92a6d6",
"metadata": {},
"source": [
"### 2. Model\n",
"\n",
"The `PromptValue` is then passed to `model`. In this case our `model` is a `ChatModel`, meaning it will output a `BaseMessage`."
]
},
{
"cell_type": "code",
"execution_count": 8,
"id": "5f99f50c-8091-4bd6-9602-6b7504575ef0",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"AIMessage(content='Why did the ice cream go to therapy? \\n\\nBecause it was feeling a little rocky road!')"
]
},
"execution_count": 8,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"message = model.invoke(prompt_value)\n",
"message"
]
},
{
"cell_type": "markdown",
"id": "b774231e-29d4-4f22-8c7e-8fd20b756d0d",
"metadata": {},
"source": [
"If our `model` was an `LLM`, it would output a string."
]
},
{
"cell_type": "code",
"execution_count": 9,
"id": "7d851773-25f9-4173-bb91-c1e94b61967e",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"'\\n\\nWhy did the ice cream go to therapy?\\n\\nBecause it was feeling a little soft serve.'"
]
},
"execution_count": 9,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"from langchain.llms import OpenAI\n",
"\n",
"llm = OpenAI(model=\"gpt-3.5-turbo-instruct\")\n",
"llm.invoke(prompt_value)"
]
},
{
"cell_type": "markdown",
"id": "71d18c82-e9aa-4e5a-acda-d211aac20f1d",
"metadata": {},
"source": [
"### 3. Output parser\n",
"\n",
"And lastly we pass our `model` output to the `output_parser`, which is a `BaseOutputParser` meaning it takes either a string or a \n",
"`BaseMessage` as input. The `StrOutputParser` specifically simple converts any input into a string."
]
},
{
"cell_type": "code",
"execution_count": 10,
"id": "a3a0f4f3-6fa6-42de-bfaf-0bd8f3fdbd19",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"'Why did the ice cream go to therapy? \\n\\nBecause it was feeling a little rocky road!'"
]
},
"execution_count": 10,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"output_parser.invoke(message)"
]
},
{
"cell_type": "markdown",
"id": "5b258fd5-22ab-4069-862f-e64c4be6c9a8",
"metadata": {},
"source": [
"## Why use LCEL\n",
"\n",
"To understand the value of LCEL, let's see what we'd have to do to achieve similar functionality without it in this simple use case.\n",
"\n",
"### Without LCEL\n",
"\n",
"We could recreate our above functionality without LCEL or LangChain at all by doing something like this:"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "e628905c-430e-4e4a-9d7c-c91d2f42052e",
"metadata": {},
"outputs": [],
"source": [
"import openai\n",
"\n",
"\n",
"def manual_chain(topic: str) -> str:\n",
" prompt_value = f\"Tell me a short joke about {topic}\"\n",
" client = openai.OpenAI()\n",
" response = client.chat.completions.create(\n",
" model=\"gpt-3.5-turbo\", messages=[{\"role\": \"user\", \"content\": prompt_value}]\n",
" )\n",
" return response.choices[0].message.content"
]
},
{
"cell_type": "markdown",
"id": "3c0b0513-77b8-4371-a20e-3e487cec7e7f",
"metadata": {},
"source": [
"#### Stream\n",
"\n",
"If we want to stream results instead, we'll need to change our function:"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "4f2cc6dc-d70a-4c13-9258-452f14290da6",
"metadata": {},
"outputs": [],
"source": [
"from typing import Iterator\n",
"\n",
"\n",
"def manual_chain_stream(topic: str) -> Iterator[str]:\n",
" prompt_value = f\"Tell me a short joke about {topic}\"\n",
" client = openai.OpenAI()\n",
" stream = client.chat.completions.create(\n",
" model=\"gpt-3.5-turbo\",\n",
" messages=[{\"role\": \"user\", \"content\": prompt_value}],\n",
" stream=True,\n",
" )\n",
" for response in stream:\n",
" content = response.choices[0].delta.content\n",
" if content is not None:\n",
" yield content"
]
},
{
"cell_type": "markdown",
"id": "b9b41e78-ddeb-44d0-a58b-a0ea0c99a761",
"metadata": {},
"source": [
"#### Batch\n",
"\n",
"If we want to run on a batch of inputs in parallel, we'll again need a new function:"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "6b492f13-73a6-48ed-8d4f-9ad634da9988",
"metadata": {},
"outputs": [],
"source": [
"from concurrent.futures import ThreadPoolExecutor\n",
"\n",
"\n",
"def manual_chain_batch(topics: list) -> list:\n",
" with ThreadPoolExecutor(max_workers=5) as executor:\n",
" return list(executor.map(manual_chain, topics))"
]
},
{
"cell_type": "markdown",
"id": "cc5ba36f-eec1-4fc1-8cfe-fa242a7f7809",
"metadata": {},
"source": [
"#### Async\n",
"\n",
"If you needed an asynchronous version:"
]
},
{
"cell_type": "code",
"execution_count": 47,
"id": "eabe6621-e815-41e3-9c9d-5aa561a69835",
"metadata": {},
"outputs": [],
"source": [
"async def manual_chain_async(topic: str) -> str:\n",
" prompt_value = f\"Tell me a short joke about {topic}\"\n",
" client = openai.AsyncOpenAI()\n",
" response = await client.chat.completions.create(\n",
" model=\"gpt-3.5-turbo\", messages=[{\"role\": \"user\", \"content\": prompt_value}]\n",
" )\n",
" return response.choices[0].message.content"
]
},
{
"cell_type": "markdown",
"id": "f6888245-1ebe-4768-a53b-e1fef6a8b379",
"metadata": {},
"source": [
"#### LLM instead of chat model\n",
"\n",
"If we want to use a completion endpoint instead of a chat endpoint: "
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "9aca946b-acaa-4f7e-a3d0-ad8e3225e7f2",
"metadata": {},
"outputs": [],
"source": [
"def manual_chain_completion(topic: str) -> str:\n",
" prompt_value = f\"Tell me a short joke about {topic}\"\n",
" client = openai.OpenAI()\n",
" response = client.completions.create(\n",
" model=\"gpt-3.5-turbo-instruct\",\n",
" prompt=prompt_value,\n",
" )\n",
" return response.choices[0].text"
]
},
{
"cell_type": "markdown",
"id": "ca115eaf-59ef-45c1-aac1-e8b0ce7db250",
"metadata": {},
"source": [
"#### Different model provider\n",
"\n",
"If we want to use Anthropic instead of OpenAI: "
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "cde2ceb0-f65e-487b-9a32-137b0e9d79d5",
"metadata": {},
"outputs": [],
"source": [
"import anthropic\n",
"\n",
"\n",
"def manual_chain_anthropic(topic: str) -> str:\n",
" prompt_value = f\"Human:\\n\\nTell me a short joke about {topic}\\n\\nAssistant:\"\n",
" client = anthropic.Anthropic()\n",
" response = client.completions.create(\n",
" model=\"claude-2\",\n",
" prompt=prompt_value,\n",
" max_tokens_to_sample=256,\n",
" )\n",
" return response.completion"
]
},
{
"cell_type": "markdown",
"id": "370dd4d7-b825-40c4-ae3c-2693cba2f22a",
"metadata": {},
"source": [
"#### Logging\n",
"\n",
"If we want to log our intermediate results (we'll `print` here for illustrative purposes):"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "383a3c51-926d-48c6-b9ae-42bf8f14ecc8",
"metadata": {},
"outputs": [],
"source": [
"def manual_chain_anthropic_logging(topic: str) -> str:\n",
" print(f\"Input: {topic}\")\n",
" prompt_value = f\"Human:\\n\\nTell me a short joke about {topic}\\n\\nAssistant:\"\n",
" print(f\"Formatted prompt: {prompt_value}\")\n",
" client = anthropic.Anthropic()\n",
" response = client.completions.create(\n",
" model=\"claude-2\",\n",
" prompt=prompt_value,\n",
" max_tokens_to_sample=256,\n",
" )\n",
" print(f\"Output: {response.completion}\")\n",
" return response.completion"
]
},
{
"cell_type": "markdown",
"id": "e25ce3c5-27a7-4954-9f0e-b94313597135",
"metadata": {},
"source": [
"#### Fallbacks\n",
"\n",
"If you wanted to add retry or fallback logic:"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "2e49d512-bc83-4c5f-b56e-934b8343b0fe",
"metadata": {},
"outputs": [],
"source": [
"def manual_chain_with_fallback(topic: str) -> str:\n",
" try:\n",
" return manual_chain(topic)\n",
" except Exception:\n",
" return manual_chain_anthropic(topic)"
]
},
{
"cell_type": "markdown",
"id": "f7ef59b5-2ce3-479e-a7ac-79e1e2f30e9c",
"metadata": {},
"source": [
"### With LCEL\n",
"\n",
"Now let's take a look at how all of this work with LCEL. We'll use our chain from before (and for ease of use take in a string instead of a dict):"
]
},
{
"cell_type": "code",
"execution_count": 48,
"id": "dc0de76a-daf5-4ec0-ba7f-c63225821591",
"metadata": {},
"outputs": [],
"source": [
"from langchain_core.runnables import RunnablePassthrough\n",
"\n",
"prompt = ChatPromptTemplate.from_template(\"Tell me a short joke about {topic}\")\n",
"model = ChatOpenAI(model=\"gpt-3.5-turbo\")\n",
"output_parser = StrOutputParser()\n",
"\n",
"chain = {\"topic\": RunnablePassthrough()} | prompt | model | output_parser"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "b0d85dda-d63c-459f-99ec-5d6d669b5b0c",
"metadata": {},
"outputs": [],
"source": [
"chain.invoke(\"ice cream\")"
]
},
{
"cell_type": "markdown",
"id": "0c9eb899-e7c8-4ab5-aecd-d305cd716082",
"metadata": {},
"source": [
"#### Streaming"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "71f15ae5-8353-4fe6-b506-73c67ec9c27d",
"metadata": {},
"outputs": [],
"source": [
"for chunk in chain.stream(\"ice cream\"):\n",
" print(chunk, end=\"\", flush=True)"
]
},
{
"cell_type": "markdown",
"id": "2eff0ae2-f2ca-4463-bacb-634fc788b5bb",
"metadata": {},
"source": [
"#### Batch"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "dcf9f4a7-5ded-47fb-9057-adb04ed3382e",
"metadata": {},
"outputs": [],
"source": [
"chain.batch([\"ice cream\", \"spaghetti\", \"dumplings\"])"
]
},
{
"cell_type": "markdown",
"id": "82c49198-3ac3-4805-b898-063c45ce89fb",
"metadata": {},
"source": [
"#### Async\n",
"```python\n",
"chain.ainvoke(\"ice cream)\n",
"```"
]
},
{
"cell_type": "markdown",
"id": "c184ca63-e74d-478c-980c-2c19b459cccd",
"metadata": {},
"source": [
"#### LLM instead of chat model"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "9f18118e-e901-42ec-a4a0-75d011bec10e",
"metadata": {},
"outputs": [],
"source": [
"from langchain.llms import OpenAI\n",
"\n",
"llm = OpenAI(model=\"gpt-3.5-turbo-instruct\")\n",
"llm_chain = {\"topic\": RunnablePassthrough()} | prompt | llm | output_parser\n",
"llm_chain.invoke(\"ice cream\")"
]
},
{
"cell_type": "markdown",
"id": "a5de0201-3980-4f78-b89e-c8c59f1c4e7d",
"metadata": {},
"source": [
"If we wanted, we could even make the choice of chat model or llm runtime configurable"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "937fa94a-b019-450b-bec5-b6e3443fa903",
"metadata": {},
"outputs": [],
"source": [
"from langchain_core.runnables import ConfigurableField\n",
"\n",
"configurable_model = model.configurable_alternatives(\n",
" ConfigurableField(id=\"model\"), default_key=\"chat_openai\", openai=llm\n",
")\n",
"configurable_chain = {\"topic\": RunnablePassthrough()} | prompt | llm | output_parser\n",
"configurable_chain.invoke(\"ice cream\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "2187eb0b-e86b-4845-a2b3-2355781e1b8a",
"metadata": {},
"outputs": [],
"source": [
"configurable_chain.invoke(\"ice cream\", config={\"configurable\": {\"model\": \"openai\"}})"
]
},
{
"cell_type": "markdown",
"id": "e900a52e-f858-4604-9413-7fa7cb04a8a5",
"metadata": {},
"source": [
"#### Different model provider\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "983b323c-f573-452a-8f81-98eb8d6906f9",
"metadata": {},
"outputs": [],
"source": [
"from langchain.chat_models import ChatAnthropic\n",
"\n",
"anthropic = ChatAnthropic(model=\"claude-2\")\n",
"anthropic_chain = {\"topic\": RunnablePassthrough()} | prompt | anthropic | output_parser\n",
"anthropic_chain.invoke(\"ice cream\")"
]
},
{
"cell_type": "markdown",
"id": "9c5e16de-a8db-4689-aeef-b2e76d9071cd",
"metadata": {},
"source": [
"#### Logging\n",
"\n",
"By turning on LangSmith, every step of every chain is automatically logged. We set these environment variables:"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "d6204f21-d2e7-4ac6-871f-b60b34e5bd36",
"metadata": {},
"outputs": [],
"source": [
"import os\n",
"\n",
"os.environ[\"LANGCHAIN_API_KEY\"] = \"...\"\n",
"os.environ[\"LANGCHAIN_TRACING_V2\"] = \"true\""
]
},
{
"cell_type": "markdown",
"id": "4842ec53-b58a-4689-97da-32ed17003981",
"metadata": {},
"source": [
"And then get a trace of every chain run: {trace}"
]
},
{
"cell_type": "markdown",
"id": "4274f4bd-3a78-4a28-a531-28ea7ac1efae",
"metadata": {},
"source": [
"#### Fallbacks"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "3d0d8a0f-66eb-4c35-9529-74bec44ce4b8",
"metadata": {},
"outputs": [],
"source": [
"fallback_chain = chain.with_fallbacks([anthropic_chain])"
]
},
{
"cell_type": "markdown",
"id": "f58af836-26bd-4eab-97a0-76dd56d53430",
"metadata": {},
"source": [
"### With vs without LCEL"
]
},
{
"cell_type": "markdown",
"id": "9fb3d71d-8c69-4dc4-81b7-95cd46b271c2",
"metadata": {},
"source": [
"Our full code **with LCEL** looks like:"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "715c469a-545e-434e-bd6e-99745dd880a7",
"metadata": {},
"outputs": [],
"source": [
"import os\n",
"\n",
"from langchain.chat_models import ChatAnthropic, ChatOpenAI\n",
"from langchain.llms import OpenAI\n",
"from langchain_core.output_parsers import StrOutputParser\n",
"from langchain_core.prompts import ChatPromptTemplate\n",
"from langchain_core.runnables import RunnablePassthrough\n",
"\n",
"os.environ[\"LANGCHAIN_API_KEY\"] = \"...\"\n",
"os.environ[\"LANGCHAIN_TRACING_V2\"] = \"true\"\n",
"\n",
"prompt = ChatPromptTemplate.from_template(\"Tell me a short joke about {topic}\")\n",
"\n",
"chat_openai = ChatOpenAI(model=\"gpt-3.5-turbo\")\n",
"openai = OpenAI(model=\"gpt-3.5-turbo-instruct\")\n",
"anthropic = ChatAnthropic(model=\"claude-2\")\n",
"model = chat_openai.with_fallbacks([anthropic]).configurable_alternatives(\n",
" ConfigurableField(id=\"model\"),\n",
" default_key=\"chat_openai\",\n",
" openai=openai,\n",
" anthropic=anthropic,\n",
")\n",
"\n",
"chain = {\"topic\": RunnablePassthrough()} | prompt | model | StrOutputParser()"
]
},
{
"cell_type": "markdown",
"id": "0a925003-4a1f-406f-87f2-1fd8965b9f87",
"metadata": {},
"source": [
"Our code **without LCEL** might look something like:"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "a25837c5-829b-42a3-92b4-7e25831350c6",
"metadata": {},
"outputs": [],
"source": [
"from concurrent.futures import ThreadPoolExecutor\n",
"from typing import Iterator, List, Tuple\n",
"\n",
"import openai\n",
"\n",
"prompt_template = \"Tell me a short joke about {topic}\"\n",
"\n",
"\n",
"def manual_chain(topic: str, *, model: str = \"chat_openai\") -> str:\n",
" print(f\"Input: {topic}\")\n",
" prompt_value = prompt_template.format(topic=topic)\n",
"\n",
" if model == \"chat_openai\":\n",
" print(f\"Full prompt: {prompt_value}\")\n",
" response = openai.OpenAI().chat.completions.create(\n",
" model=\"gpt-3.5-turbo\", messages=[{\"role\": \"user\", \"content\": prompt_value}]\n",
" )\n",
" output = response.choices[0].message.content\n",
" elif model == \"openai\":\n",
" print(f\"Full prompt: {prompt_value}\")\n",
" response = openai.OpenAI().completions.create(\n",
" model=\"gpt-3.5-turbo-instruct\",\n",
" prompt=prompt_value,\n",
" )\n",
" output = response.choices[0].text\n",
" elif model == \"anthropic\":\n",
" prompt_value = f\"Human:\\n\\n{prompt_value}\\n\\nAssistant:\"\n",
" print(f\"Full prompt: {prompt_value}\")\n",
" response = anthropic.Anthropic().completions.create(\n",
" model=\"claude-2\",\n",
" prompt=prompt_value,\n",
" max_tokens_to_sample=256,\n",
" )\n",
" output = response.completion\n",
" else:\n",
" raise ValueError(\n",
" f\"Invalid model {model}. Should be one of chat_openai, openai, anthropic.\"\n",
" )\n",
" print(f\"Output: {output}\")\n",
" return output\n",
"\n",
"\n",
"def manual_chain_with_fallbacks(\n",
" topic: str, *, model: str = \"chat_openai\", fallbacks: Tuple[str] = (\"anthropic\",)\n",
") -> str:\n",
" for fallback in fallbacks:\n",
" try:\n",
" return manual_chain(topic, model=model)\n",
" except Exception as e:\n",
" print(f\"Error {e}\")\n",
" model = fallback\n",
" raise e\n",
"\n",
"\n",
"def manual_chain_batch(\n",
" topics: List[str],\n",
" *,\n",
" model: str = \"chat_openai\",\n",
" fallbacks: Tuple[str] = (\"anthropic\",),\n",
") -> List[str]:\n",
" models = [model] * len(topics)\n",
" fallbacks_list = [fallbacks] * len(topics)\n",
" with ThreadPoolExecutor(max_workers=5) as executor:\n",
" return list(\n",
" executor.map(manual_chain_with_fallbacks, topics, models, fallbacks_list)\n",
" )\n",
"\n",
"\n",
"def manual_chain_stream(topic: str, *, model: str = \"chat_openai\") -> Iterator[str]:\n",
" print(f\"Input: {topic}\")\n",
" prompt_value = prompt_template.format(topic=topic)\n",
"\n",
" if model == \"chat_openai\":\n",
" print(f\"Full prompt: {prompt_value}\")\n",
" stream = openai.OpenAI().chat.completions.create(\n",
" model=\"gpt-3.5-turbo\",\n",
" messages=[{\"role\": \"user\", \"content\": prompt_value}],\n",
" stream=True,\n",
" )\n",
" for response in stream:\n",
" content = response.choices[0].delta.content\n",
" if content is not None:\n",
" yield content\n",
" elif model == \"openai\":\n",
" print(f\"Full prompt: {prompt_value}\")\n",
" stream = openai.OpenAI().completions.create(\n",
" model=\"gpt-3.5-turbo-instruct\", prompt=prompt_value, stream=True\n",
" )\n",
" for response in stream:\n",
" yield response.choices[0].text\n",
" elif model == \"anthropic\":\n",
" prompt_value = f\"Human:\\n\\n{prompt_value}\\n\\nAssistant:\"\n",
" print(f\"Full prompt: {prompt_value}\")\n",
" stream = anthropic.Anthropic().completions.create(\n",
" model=\"claude-2\", prompt=prompt_value, max_tokens_to_sample=256, stream=True\n",
" )\n",
" for response in stream:\n",
" yield response.completion\n",
" else:\n",
" raise ValueError(\n",
" f\"Invalid model {model}. Should be one of chat_openai, openai, anthropic.\"\n",
" )\n",
"\n",
"\n",
"async def manual_chain_async(topic: str, *, model: str = \"chat_openai\") -> str:\n",
" # You get the idea :)\n",
" ...\n",
"\n",
"\n",
"async def manual_chain_async_batch(\n",
" topics: List[str], *, model: str = \"chat_openai\"\n",
") -> List[str]:\n",
" ...\n",
"\n",
"\n",
"async def manual_chain_async_stream(\n",
" topic: str, *, model: str = \"chat_openai\"\n",
") -> Iterator[str]:\n",
" ...\n",
"\n",
"\n",
"def manual_chain_stream_with_fallbacks(\n",
" topic: str, *, model: str = \"chat_openai\", fallbacks: Tuple[str] = (\"anthropic\",)\n",
") -> Iterator[str]:\n",
" ..."
]
}
],
"metadata": {
"kernelspec": {
"display_name": "poetry-venv",
"language": "python",
"name": "poetry-venv"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.1"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

View File

@@ -20,7 +20,7 @@ Whenever your LCEL chains have steps that can be executed in parallel (eg if you
Configure retries and fallbacks for any part of your LCEL chain. This is a great way to make your chains more reliable at scale. Were currently working on adding streaming support for retries/fallbacks, so you can get the added reliability without any latency cost.
**Access intermediate results**
For more complex chains its often very useful to access the results of intermediate steps even before the final output is produced. This can be used let end-users know something is happening, or even just to debug your chain. You can stream intermediate results, and its available on every [LangServe](/docs/langserve) server.
For more complex chains its often very useful to access the results of intermediate steps even before the final output is produced. This can be used to let end-users know something is happening, or even just to debug your chain. You can stream intermediate results, and its available on every [LangServe](/docs/langserve) server.
**Input and output schemas**
Input and output schemas give every LCEL chain Pydantic and JSONSchema schemas inferred from the structure of your chain. This can be used for validation of inputs and outputs, and is an integral part of LangServe.
@@ -30,4 +30,4 @@ As your chains get more and more complex, it becomes increasingly important to u
With LCEL, **all** steps are automatically logged to [LangSmith](/docs/langsmith/) for maximum observability and debuggability.
**Seamless LangServe deployment integration**
Any chain created with LCEL can be easily deployed using [LangServe](/docs/langserve).
Any chain created with LCEL can be easily deployed using [LangServe](/docs/langserve).

View File

@@ -6,7 +6,7 @@
"metadata": {},
"source": [
"---\n",
"sidebar_position: 1\n",
"sidebar_position: 0\n",
"title: Interface\n",
"---"
]

View File

@@ -79,7 +79,7 @@ Walkthroughs and techniques for common end-to-end use cases, like:
### [Integrations](/docs/integrations/providers/)
LangChain is part of a rich ecosystem of tools that integrate with our framework and build on top of it. Check out our growing list of [integrations](/docs/integrations/providers/).
### [Guides](/docs/guides/adapters/openai)
### [Guides](/docs/guides/guides/debugging)
Best practices for developing with LangChain.
### [API reference](https://api.python.langchain.com)

View File

@@ -119,6 +119,159 @@
"chat_model(messages)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Extraction\n",
" \n",
"Update your version of Ollama and supply the [`format`](https://github.com/jmorganca/ollama/blob/main/docs/api.md#json-mode) flag.\n",
"\n",
"We can enforce the model to produce JSON.\n",
"\n",
"**Note:** You can also try out the experimental [OllamaFunctions](https://python.langchain.com/docs/integrations/chat/ollama_functions) wrapper for convenience."
]
},
{
"cell_type": "code",
"execution_count": 8,
"metadata": {},
"outputs": [],
"source": [
"from langchain.callbacks.manager import CallbackManager\n",
"from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler\n",
"from langchain.chat_models import ChatOllama\n",
"\n",
"chat_model = ChatOllama(\n",
" model=\"llama2\",\n",
" format=\"json\",\n",
" callback_manager=CallbackManager([StreamingStdOutCallbackHandler()]),\n",
")"
]
},
{
"cell_type": "code",
"execution_count": 11,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
" Sure! Here's a JSON response with the colors of the sky at different times of the day:\n",
" Begriffe und Abkürzungen:\n",
"\n",
"* `time`: The time of day (in 24-hour format)\n",
"* `sky_color`: The color of the sky at that time (as a hex code)\n",
"\n",
"Here are the colors of the sky at different times of the day:\n",
"```json\n",
"[\n",
" {\n",
" \"time\": \"6am\",\n",
" \"sky_color\": \"#0080c0\"\n",
" },\n",
" {\n",
" \"time\": \"9am\",\n",
" \"sky_color\": \"#3498db\"\n",
" },\n",
" {\n",
" \"time\": \"12pm\",\n",
" \"sky_color\": \"#ef7c00\"\n",
" },\n",
" {\n",
" \"time\": \"3pm\",\n",
" \"sky_color\": \"#9564b6\"\n",
" },\n",
" {\n",
" \"time\": \"6pm\",\n",
" \"sky_color\": \"#e78ac3\"\n",
" },\n",
" {\n",
" \"time\": \"9pm\",\n",
" \"sky_color\": \"#5f006a\"\n",
" }\n",
"]\n",
"```\n",
"In this response, the `time` property is a string in 24-hour format, representing the time of day. The `sky_color` property is a hex code representing the color of the sky at that time. For example, at 6am, the sky is blue (#0080c0), while at 9pm, it's dark blue (#5f006a)."
]
}
],
"source": [
"from langchain.schema import HumanMessage\n",
"\n",
"messages = [\n",
" HumanMessage(\n",
" content=\"What color is the sky at different times of the day? Respond using JSON\"\n",
" )\n",
"]\n",
"\n",
"chat_model_response = chat_model(messages)"
]
},
{
"cell_type": "code",
"execution_count": 9,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
" Sure! Based on the JSON schema you provided, here's the information we can gather about a person named John who is 35 years old and loves pizza:\n",
"\n",
"**Name:** John\n",
"\n",
"**Age:** 35 (integer)\n",
"\n",
"**Favorite food:** Pizza (string)\n",
"\n",
"So, the JSON object for John would look like this:\n",
"```json\n",
"{\n",
" \"name\": \"John\",\n",
" \"age\": 35,\n",
" \"fav_food\": \"pizza\"\n",
"}\n",
"```\n",
"Note that we cannot provide additional information about John beyond what is specified in the schema. For example, we do not have any information about his gender, occupation, or address, as those fields are not included in the schema."
]
}
],
"source": [
"import json\n",
"\n",
"from langchain.schema import HumanMessage\n",
"\n",
"json_schema = {\n",
" \"title\": \"Person\",\n",
" \"description\": \"Identifying information about a person.\",\n",
" \"type\": \"object\",\n",
" \"properties\": {\n",
" \"name\": {\"title\": \"Name\", \"description\": \"The person's name\", \"type\": \"string\"},\n",
" \"age\": {\"title\": \"Age\", \"description\": \"The person's age\", \"type\": \"integer\"},\n",
" \"fav_food\": {\n",
" \"title\": \"Fav Food\",\n",
" \"description\": \"The person's favorite food\",\n",
" \"type\": \"string\",\n",
" },\n",
" },\n",
" \"required\": [\"name\", \"age\"],\n",
"}\n",
"\n",
"messages = [\n",
" HumanMessage(\n",
" content=\"Please tell me about a person using the following JSON schema:\"\n",
" ),\n",
" HumanMessage(content=json.dumps(json_schema, indent=2)),\n",
" HumanMessage(\n",
" content=\"Now, considering the schema, tell me about a person named John who is 35 years old and loves pizza.\"\n",
" ),\n",
"]\n",
"\n",
"chat_model_response = chat_model(messages)"
]
},
{
"cell_type": "markdown",
"metadata": {},
@@ -375,5 +528,5 @@
}
},
"nbformat": 4,
"nbformat_minor": 2
"nbformat_minor": 4
}

View File

@@ -0,0 +1,171 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Ollama Functions\n",
"\n",
"This notebook shows how to use an experimental wrapper around Ollama that gives it the same API as OpenAI Functions.\n",
"\n",
"Note that more powerful and capable models will perform better with complex schema and/or multiple functions. The examples below use Mistral.\n",
"For a complete list of supported models and model variants, see the [Ollama model library](https://ollama.ai/library).\n",
"\n",
"## Setup\n",
"\n",
"Follow [these instructions](https://github.com/jmorganca/ollama) to set up and run a local Ollama instance.\n",
"\n",
"## Usage\n",
"\n",
"You can initialize OllamaFunctions in a similar way to how you'd initialize a standard ChatOllama instance:"
]
},
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [],
"source": [
"from langchain_experimental.llms.ollama_functions import OllamaFunctions\n",
"\n",
"model = OllamaFunctions(model=\"mistral\")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"You can then bind functions defined with JSON Schema parameters and a `function_call` parameter to force the model to call the given function:"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [],
"source": [
"model = model.bind(\n",
" functions=[\n",
" {\n",
" \"name\": \"get_current_weather\",\n",
" \"description\": \"Get the current weather in a given location\",\n",
" \"parameters\": {\n",
" \"type\": \"object\",\n",
" \"properties\": {\n",
" \"location\": {\n",
" \"type\": \"string\",\n",
" \"description\": \"The city and state, \" \"e.g. San Francisco, CA\",\n",
" },\n",
" \"unit\": {\n",
" \"type\": \"string\",\n",
" \"enum\": [\"celsius\", \"fahrenheit\"],\n",
" },\n",
" },\n",
" \"required\": [\"location\"],\n",
" },\n",
" }\n",
" ],\n",
" function_call={\"name\": \"get_current_weather\"},\n",
")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Calling a function with this model then results in JSON output matching the provided schema:"
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"AIMessage(content='', additional_kwargs={'function_call': {'name': 'get_current_weather', 'arguments': '{\"location\": \"Boston, MA\", \"unit\": \"celsius\"}'}})"
]
},
"execution_count": 3,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"from langchain.schema import HumanMessage\n",
"\n",
"model.invoke(\"what is the weather in Boston?\")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Using for extraction\n",
"\n",
"One useful thing you can do with function calling here is extracting properties from a given input in a structured format:"
]
},
{
"cell_type": "code",
"execution_count": 4,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"[{'name': 'Alex', 'height': 5, 'hair_color': 'blonde'},\n",
" {'name': 'Claudia', 'height': 6, 'hair_color': 'brunette'}]"
]
},
"execution_count": 4,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"from langchain.chains import create_extraction_chain\n",
"\n",
"# Schema\n",
"schema = {\n",
" \"properties\": {\n",
" \"name\": {\"type\": \"string\"},\n",
" \"height\": {\"type\": \"integer\"},\n",
" \"hair_color\": {\"type\": \"string\"},\n",
" },\n",
" \"required\": [\"name\", \"height\"],\n",
"}\n",
"\n",
"# Input\n",
"input = \"\"\"Alex is 5 feet tall. Claudia is 1 feet taller than Alex and jumps higher than him. Claudia is a brunette and Alex is blonde.\"\"\"\n",
"\n",
"# Run chain\n",
"llm = OllamaFunctions(model=\"mistral\", temperature=0)\n",
"chain = create_extraction_chain(schema, llm)\n",
"chain.run(input)"
]
}
],
"metadata": {
"kernelspec": {
"display_name": ".venv",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.5"
}
},
"nbformat": 4,
"nbformat_minor": 2
}

View File

@@ -0,0 +1,177 @@
{
"cells": [
{
"cell_type": "markdown",
"id": "404758628c7b20f6",
"metadata": {
"collapsed": false
},
"source": [
"# Volc Engine Maas\n",
"\n",
"This notebook provides you with a guide on how to get started with volc engine maas chat models."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "2cd2ebd9d023c4d3",
"metadata": {
"collapsed": false
},
"outputs": [],
"source": [
"# Install the package\n",
"!pip install volcengine"
]
},
{
"cell_type": "code",
"execution_count": 20,
"id": "51e7f967cb78f5b7",
"metadata": {
"ExecuteTime": {
"end_time": "2023-11-27T10:43:37.131292Z",
"start_time": "2023-11-27T10:43:37.127250Z"
},
"collapsed": false
},
"outputs": [],
"source": [
"from langchain.chat_models import VolcEngineMaasChat\n",
"from langchain.schema import HumanMessage"
]
},
{
"cell_type": "code",
"execution_count": 25,
"id": "139667d44689f9e0",
"metadata": {
"ExecuteTime": {
"end_time": "2023-11-27T10:43:49.911867Z",
"start_time": "2023-11-27T10:43:49.908329Z"
},
"collapsed": false
},
"outputs": [],
"source": [
"chat = VolcEngineMaasChat(volc_engine_maas_ak=\"your ak\", volc_engine_maas_sk=\"your sk\")"
]
},
{
"cell_type": "markdown",
"id": "e84ebc4feedcc739",
"metadata": {
"collapsed": false
},
"source": [
"or you can set access_key and secret_key in your environment variables\n",
"```bash\n",
"export VOLC_ACCESSKEY=YOUR_AK\n",
"export VOLC_SECRETKEY=YOUR_SK\n",
"```"
]
},
{
"cell_type": "code",
"execution_count": 26,
"id": "35da18414ad17aa0",
"metadata": {
"ExecuteTime": {
"end_time": "2023-11-27T10:43:53.101852Z",
"start_time": "2023-11-27T10:43:51.741041Z"
},
"collapsed": false
},
"outputs": [
{
"data": {
"text/plain": "AIMessage(content='好的,这是一个笑话:\\n\\n为什么鸟儿不会玩电脑游戏\\n\\n因为它们没有翅膀')"
},
"execution_count": 26,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"chat([HumanMessage(content=\"给我讲个笑话\")])"
]
},
{
"cell_type": "markdown",
"id": "a55e5a9ed80ec49e",
"metadata": {
"collapsed": false
},
"source": [
"# volc engine maas chat with stream"
]
},
{
"cell_type": "code",
"execution_count": 27,
"id": "b4e4049980ac68ef",
"metadata": {
"ExecuteTime": {
"end_time": "2023-11-27T10:43:55.120405Z",
"start_time": "2023-11-27T10:43:55.114707Z"
},
"collapsed": false
},
"outputs": [],
"source": [
"chat = VolcEngineMaasChat(\n",
" volc_engine_maas_ak=\"your ak\",\n",
" volc_engine_maas_sk=\"your sk\",\n",
" streaming=True,\n",
")"
]
},
{
"cell_type": "code",
"execution_count": 28,
"id": "fe709a4ffb5c811d",
"metadata": {
"ExecuteTime": {
"end_time": "2023-11-27T10:43:58.775294Z",
"start_time": "2023-11-27T10:43:56.799401Z"
},
"collapsed": false
},
"outputs": [
{
"data": {
"text/plain": "AIMessage(content='好的,这是一个笑话:\\n\\n三岁的女儿说她会造句了妈妈让她用“年轻”造句女儿说“妈妈减肥一年轻了好几斤”。')"
},
"execution_count": 28,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"chat([HumanMessage(content=\"给我讲个笑话\")])"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 2
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython2",
"version": "2.7.6"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

View File

@@ -30,7 +30,7 @@
"name": "stdout",
"output_type": "stream",
"text": [
"Overwriting discord_chats.txt\n"
"Writing discord_chats.txt\n"
]
}
],
@@ -240,14 +240,14 @@
{
"data": {
"text/plain": [
"[{'messages': [AIMessage(content='Love music! Do you like jazz?', additional_kwargs={'sender': 'talkingtower', 'events': [{'message_time': '08/15/2023 11:10 AM\\n'}]}, example=False),\n",
" HumanMessage(content='Yes! Jazz is fantastic. Ever heard this one?\\nWebsite\\nListen to classic jazz track...', additional_kwargs={'sender': 'reporterbob', 'events': [{'message_time': '08/15/2023 9:27 PM\\n'}]}, example=False),\n",
" AIMessage(content='Indeed! Great choice. 🎷', additional_kwargs={'sender': 'talkingtower', 'events': [{'message_time': 'Yesterday at 5:03 AM\\n'}]}, example=False),\n",
" HumanMessage(content='Thanks! How about some virtual sightseeing?\\nWebsite\\nVirtual tour of famous landmarks...', additional_kwargs={'sender': 'reporterbob', 'events': [{'message_time': 'Yesterday at 5:23 AM\\n'}]}, example=False),\n",
" AIMessage(content=\"Sounds fun! Let's explore.\", additional_kwargs={'sender': 'talkingtower', 'events': [{'message_time': 'Today at 2:38 PM\\n'}]}, example=False),\n",
" HumanMessage(content='Enjoy the tour! See you around.', additional_kwargs={'sender': 'reporterbob', 'events': [{'message_time': 'Today at 2:56 PM\\n'}]}, example=False),\n",
" AIMessage(content='Thank you! Goodbye! 👋', additional_kwargs={'sender': 'talkingtower', 'events': [{'message_time': 'Today at 3:00 PM\\n'}]}, example=False),\n",
" HumanMessage(content='Farewell! Happy exploring.', additional_kwargs={'sender': 'reporterbob', 'events': [{'message_time': 'Today at 3:02 PM\\n'}]}, example=False)]}]"
"[{'messages': [AIMessage(content='Love music! Do you like jazz?', additional_kwargs={'sender': 'talkingtower', 'events': [{'message_time': '08/15/2023 11:10 AM\\n'}]}),\n",
" HumanMessage(content='Yes! Jazz is fantastic. Ever heard this one?\\nWebsite\\nListen to classic jazz track...', additional_kwargs={'sender': 'reporterbob', 'events': [{'message_time': '08/15/2023 9:27 PM\\n'}]}),\n",
" AIMessage(content='Indeed! Great choice. 🎷', additional_kwargs={'sender': 'talkingtower', 'events': [{'message_time': 'Yesterday at 5:03 AM\\n'}]}),\n",
" HumanMessage(content='Thanks! How about some virtual sightseeing?\\nWebsite\\nVirtual tour of famous landmarks...', additional_kwargs={'sender': 'reporterbob', 'events': [{'message_time': 'Yesterday at 5:23 AM\\n'}]}),\n",
" AIMessage(content=\"Sounds fun! Let's explore.\", additional_kwargs={'sender': 'talkingtower', 'events': [{'message_time': 'Today at 2:38 PM\\n'}]}),\n",
" HumanMessage(content='Enjoy the tour! See you around.', additional_kwargs={'sender': 'reporterbob', 'events': [{'message_time': 'Today at 2:56 PM\\n'}]}),\n",
" AIMessage(content='Thank you! Goodbye! 👋', additional_kwargs={'sender': 'talkingtower', 'events': [{'message_time': 'Today at 3:00 PM\\n'}]}),\n",
" HumanMessage(content='Farewell! Happy exploring.', additional_kwargs={'sender': 'reporterbob', 'events': [{'message_time': 'Today at 3:02 PM\\n'}]})]}]"
]
},
"execution_count": 5,
@@ -279,7 +279,7 @@
"name": "stdout",
"output_type": "stream",
"text": [
"Thank you! Have a wonderful day! 🌟"
"Thank you! Have a great day!"
]
}
],
@@ -317,7 +317,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.2"
"version": "3.11.5"
}
},
"nbformat": 4,

View File

@@ -32,7 +32,7 @@
},
{
"cell_type": "code",
"execution_count": 2,
"execution_count": 1,
"id": "647f2158-a42e-4634-b283-b8492caf542a",
"metadata": {},
"outputs": [
@@ -91,7 +91,7 @@
},
{
"cell_type": "code",
"execution_count": 1,
"execution_count": 2,
"id": "a0869bc6",
"metadata": {},
"outputs": [],
@@ -114,7 +114,7 @@
},
{
"cell_type": "code",
"execution_count": 6,
"execution_count": 4,
"id": "f61ee277",
"metadata": {},
"outputs": [],
@@ -126,19 +126,19 @@
},
{
"cell_type": "code",
"execution_count": 9,
"execution_count": 5,
"id": "ec466ad7",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"[HumanMessage(content=\"Hi Hermione! How's your summer going so far?\", additional_kwargs={'sender': 'Harry Potter'}, example=False),\n",
" HumanMessage(content=\"Harry! Lovely to hear from you. My summer is going well, though I do miss everyone. I'm spending most of my time going through my books and researching fascinating new topics. How about you?\", additional_kwargs={'sender': 'Hermione Granger'}, example=False),\n",
" HumanMessage(content=\"I miss you all too. The Dursleys are being their usual unpleasant selves but I'm getting by. At least I can practice some spells in my room without them knowing. Let me know if you find anything good in your researching!\", additional_kwargs={'sender': 'Harry Potter'}, example=False)]"
"[HumanMessage(content=\"Hi Hermione! How's your summer going so far?\", additional_kwargs={'sender': 'Harry Potter'}),\n",
" HumanMessage(content=\"Harry! Lovely to hear from you. My summer is going well, though I do miss everyone. I'm spending most of my time going through my books and researching fascinating new topics. How about you?\", additional_kwargs={'sender': 'Hermione Granger'}),\n",
" HumanMessage(content=\"I miss you all too. The Dursleys are being their usual unpleasant selves but I'm getting by. At least I can practice some spells in my room without them knowing. Let me know if you find anything good in your researching!\", additional_kwargs={'sender': 'Harry Potter'})]"
]
},
"execution_count": 9,
"execution_count": 5,
"metadata": {},
"output_type": "execute_result"
}
@@ -150,7 +150,7 @@
},
{
"cell_type": "code",
"execution_count": 10,
"execution_count": 6,
"id": "8a3ee473",
"metadata": {},
"outputs": [],
@@ -162,7 +162,7 @@
},
{
"cell_type": "code",
"execution_count": 12,
"execution_count": 7,
"id": "9f41e122",
"metadata": {},
"outputs": [
@@ -172,7 +172,7 @@
"9"
]
},
"execution_count": 12,
"execution_count": 7,
"metadata": {},
"output_type": "execute_result"
}
@@ -196,7 +196,7 @@
},
{
"cell_type": "code",
"execution_count": 14,
"execution_count": 8,
"id": "5a78030d-b757-4bbe-8a6c-841056f46df7",
"metadata": {},
"outputs": [],
@@ -209,7 +209,7 @@
},
{
"cell_type": "code",
"execution_count": 17,
"execution_count": 9,
"id": "ff35b028-78bf-4c5b-9ec6-939fe67de7f7",
"metadata": {},
"outputs": [],
@@ -220,19 +220,19 @@
},
{
"cell_type": "code",
"execution_count": 19,
"execution_count": 10,
"id": "4b11906e-a496-4d01-9f0d-1938c14147bf",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"[AIMessage(content=\"Professor Snape, I was hoping I could speak with you for a moment about something that's been concerning me lately.\", additional_kwargs={'sender': 'Harry Potter'}, example=False),\n",
" HumanMessage(content=\"What is it, Potter? I'm quite busy at the moment.\", additional_kwargs={'sender': 'Severus Snape'}, example=False),\n",
" AIMessage(content=\"I apologize for the interruption, sir. I'll be brief. I've noticed some strange activity around the school grounds at night. I saw a cloaked figure lurking near the Forbidden Forest last night. I'm worried someone may be plotting something sinister.\", additional_kwargs={'sender': 'Harry Potter'}, example=False)]"
"[AIMessage(content=\"Professor Snape, I was hoping I could speak with you for a moment about something that's been concerning me lately.\", additional_kwargs={'sender': 'Harry Potter'}),\n",
" HumanMessage(content=\"What is it, Potter? I'm quite busy at the moment.\", additional_kwargs={'sender': 'Severus Snape'}),\n",
" AIMessage(content=\"I apologize for the interruption, sir. I'll be brief. I've noticed some strange activity around the school grounds at night. I saw a cloaked figure lurking near the Forbidden Forest last night. I'm worried someone may be plotting something sinister.\", additional_kwargs={'sender': 'Harry Potter'})]"
]
},
"execution_count": 19,
"execution_count": 10,
"metadata": {},
"output_type": "execute_result"
}
@@ -253,7 +253,7 @@
},
{
"cell_type": "code",
"execution_count": 20,
"execution_count": 11,
"id": "21372331",
"metadata": {},
"outputs": [],
@@ -263,7 +263,7 @@
},
{
"cell_type": "code",
"execution_count": 38,
"execution_count": 12,
"id": "92c5ae7a",
"metadata": {},
"outputs": [
@@ -282,7 +282,7 @@
},
{
"cell_type": "code",
"execution_count": 33,
"execution_count": 13,
"id": "dfcbd181",
"metadata": {
"scrolled": true
@@ -299,7 +299,7 @@
" 'content': \"I apologize for the interruption, sir. I'll be brief. I've noticed some strange activity around the school grounds at night. I saw a cloaked figure lurking near the Forbidden Forest last night. I'm worried someone may be plotting something sinister.\"}]"
]
},
"execution_count": 33,
"execution_count": 13,
"metadata": {},
"output_type": "execute_result"
}
@@ -321,7 +321,7 @@
},
{
"cell_type": "code",
"execution_count": 42,
"execution_count": 14,
"id": "13cd290a-b1e9-4686-bb5e-d99de8b8612b",
"metadata": {},
"outputs": [
@@ -331,7 +331,7 @@
"100"
]
},
"execution_count": 42,
"execution_count": 14,
"metadata": {},
"output_type": "execute_result"
}
@@ -364,7 +364,7 @@
},
{
"cell_type": "code",
"execution_count": 43,
"execution_count": 15,
"id": "95ce3f63-3c80-44b2-9060-534ad74e16fa",
"metadata": {},
"outputs": [],
@@ -374,7 +374,7 @@
},
{
"cell_type": "code",
"execution_count": 58,
"execution_count": 16,
"id": "ab9e28eb",
"metadata": {},
"outputs": [
@@ -382,7 +382,7 @@
"name": "stdout",
"output_type": "stream",
"text": [
"File file-zCyNBeg4snpbBL7VkvsuhCz8 ready afer 30.55 seconds.\n"
"File file-ULumAXLEFw3vB6bb9uy6DNVC ready after 0.00 seconds.\n"
]
}
],
@@ -399,16 +399,16 @@
" my_file.write((json.dumps({\"messages\": m}) + \"\\n\").encode(\"utf-8\"))\n",
"\n",
"my_file.seek(0)\n",
"training_file = openai.File.create(file=my_file, purpose=\"fine-tune\")\n",
"training_file = openai.files.create(file=my_file, purpose=\"fine-tune\")\n",
"\n",
"# OpenAI audits each training file for compliance reasons.\n",
"# This make take a few minutes\n",
"status = openai.File.retrieve(training_file.id).status\n",
"status = openai.files.retrieve(training_file.id).status\n",
"start_time = time.time()\n",
"while status != \"processed\":\n",
" print(f\"Status=[{status}]... {time.time() - start_time:.2f}s\", end=\"\\r\", flush=True)\n",
" time.sleep(5)\n",
" status = openai.File.retrieve(training_file.id).status\n",
" status = openai.files.retrieve(training_file.id).status\n",
"print(f\"File {training_file.id} ready after {time.time() - start_time:.2f} seconds.\")"
]
},
@@ -422,12 +422,12 @@
},
{
"cell_type": "code",
"execution_count": 59,
"execution_count": 17,
"id": "3f451425",
"metadata": {},
"outputs": [],
"source": [
"job = openai.FineTuningJob.create(\n",
"job = openai.fine_tuning.jobs.create(\n",
" training_file=training_file.id,\n",
" model=\"gpt-3.5-turbo\",\n",
")"
@@ -443,7 +443,7 @@
},
{
"cell_type": "code",
"execution_count": 60,
"execution_count": 18,
"id": "bac1637a-c087-4523-ade1-c47f9bf4c6f4",
"metadata": {},
"outputs": [
@@ -451,23 +451,23 @@
"name": "stdout",
"output_type": "stream",
"text": [
"Status=[running]... 908.87s\r"
"Status=[running]... 874.29s. 56.93s\r"
]
}
],
"source": [
"status = openai.FineTuningJob.retrieve(job.id).status\n",
"status = openai.fine_tuning.jobs.retrieve(job.id).status\n",
"start_time = time.time()\n",
"while status != \"succeeded\":\n",
" print(f\"Status=[{status}]... {time.time() - start_time:.2f}s\", end=\"\\r\", flush=True)\n",
" time.sleep(5)\n",
" job = openai.FineTuningJob.retrieve(job.id)\n",
" job = openai.fine_tuning.jobs.retrieve(job.id)\n",
" status = job.status"
]
},
{
"cell_type": "code",
"execution_count": 66,
"execution_count": 19,
"id": "535895e1-bc69-40e5-82ed-e24ed2baeeee",
"metadata": {},
"outputs": [
@@ -475,7 +475,7 @@
"name": "stdout",
"output_type": "stream",
"text": [
"ft:gpt-3.5-turbo-0613:personal::7rDwkaOq\n"
"ft:gpt-3.5-turbo-0613:personal::8QnAzWMr\n"
]
}
],
@@ -495,7 +495,7 @@
},
{
"cell_type": "code",
"execution_count": 67,
"execution_count": 20,
"id": "3925d60d",
"metadata": {},
"outputs": [],
@@ -510,7 +510,7 @@
},
{
"cell_type": "code",
"execution_count": 69,
"execution_count": 21,
"id": "7190cf2e-ab34-4ceb-bdad-45f24f069c29",
"metadata": {},
"outputs": [],
@@ -529,7 +529,7 @@
},
{
"cell_type": "code",
"execution_count": 72,
"execution_count": 22,
"id": "f02057e9-f914-40b1-9c9d-9432ff594b98",
"metadata": {},
"outputs": [
@@ -537,7 +537,7 @@
"name": "stdout",
"output_type": "stream",
"text": [
"The usual - Potions, Transfiguration, Defense Against the Dark Arts. What about you?"
"I'm taking Charms, Defense Against the Dark Arts, Herbology, Potions, Transfiguration, and Ancient Runes. How about you?"
]
}
],
@@ -545,14 +545,6 @@
"for tok in chain.stream({\"input\": \"What classes are you taking?\"}):\n",
" print(tok, end=\"\", flush=True)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "35331503-3cc6-4d64-955e-64afe6b5fef3",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
@@ -571,7 +563,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.1"
"version": "3.11.5"
}
},
"nbformat": 4,

View File

@@ -15,7 +15,7 @@
"1. Create a Dropbox app.\n",
"2. Give the app these scope permissions: `files.metadata.read` and `files.content.read`.\n",
"3. Generate access token: https://www.dropbox.com/developers/apps/create.\n",
"4. `pip install dropbox` (requires `pip install unstructured` for PDF filetype).\n",
"4. `pip install dropbox` (requires `pip install \"unstructured[pdf]\"` for PDF filetype).\n",
"\n",
"## Instructions\n",
"\n",

View File

@@ -0,0 +1,124 @@
{
"cells": [
{
"cell_type": "markdown",
"id": "404758628c7b20f6",
"metadata": {
"collapsed": false
},
"source": [
"# Volc Engine Maas\n",
"\n",
"This notebook provides you with a guide on how to get started with Volc Engine's MaaS llm models."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "946db204b33c2ef7",
"metadata": {
"collapsed": false
},
"outputs": [],
"source": [
"# Install the package\n",
"!pip install volcengine"
]
},
{
"cell_type": "code",
"execution_count": 2,
"id": "51e7f967cb78f5b7",
"metadata": {
"ExecuteTime": {
"end_time": "2023-11-27T10:40:26.897649Z",
"start_time": "2023-11-27T10:40:26.552589Z"
},
"collapsed": false
},
"outputs": [],
"source": [
"from langchain.llms import VolcEngineMaasLLM\n",
"from langchain.prompts import PromptTemplate\n",
"from langchain.schema.output_parser import StrOutputParser"
]
},
{
"cell_type": "code",
"execution_count": 3,
"id": "139667d44689f9e0",
"metadata": {
"ExecuteTime": {
"end_time": "2023-11-27T10:40:27.938517Z",
"start_time": "2023-11-27T10:40:27.861324Z"
},
"collapsed": false
},
"outputs": [],
"source": [
"llm = VolcEngineMaasLLM(volc_engine_maas_ak=\"your ak\", volc_engine_maas_sk=\"your sk\")"
]
},
{
"cell_type": "markdown",
"id": "e84ebc4feedcc739",
"metadata": {
"collapsed": false
},
"source": [
"or you can set access_key and secret_key in your environment variables\n",
"```bash\n",
"export VOLC_ACCESSKEY=YOUR_AK\n",
"export VOLC_SECRETKEY=YOUR_SK\n",
"```"
]
},
{
"cell_type": "code",
"execution_count": 8,
"id": "35da18414ad17aa0",
"metadata": {
"ExecuteTime": {
"end_time": "2023-11-27T10:41:35.528526Z",
"start_time": "2023-11-27T10:41:32.562238Z"
},
"collapsed": false
},
"outputs": [
{
"data": {
"text/plain": "'好的,下面是一个笑话:\\n\\n大学暑假我配了隐形眼镜回家给爷爷说我现在配了隐形眼镜。\\n爷爷让我给他看看于是我用小镊子夹了一片给爷爷看。\\n爷爷看完便准备出门边走还边说“真高级啊还真是隐形眼镜”\\n等爷爷出去后我才发现我刚没夹起来'"
},
"execution_count": 8,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"chain = PromptTemplate.from_template(\"给我讲个笑话\") | llm | StrOutputParser()\n",
"chain.invoke({})"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 2
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython2",
"version": "2.7.6"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

View File

@@ -0,0 +1,321 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# MongoDB Atlas\n",
"\n",
"[MongoDB Atlas](https://www.mongodb.com/) is a document database that can be \n",
"used as a vector databse.\n",
"\n",
"In the walkthrough, we'll demo the `SelfQueryRetriever` with a `MongoDB Atlas` vector store."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Creating a MongoDB Atlas vectorstore\n",
"First we'll want to create a MongoDB Atlas VectorStore and seed it with some data. We've created a small demo set of documents that contain summaries of movies.\n",
"\n",
"NOTE: The self-query retriever requires you to have `lark` installed (`pip install lark`). We also need the `pymongo` package."
]
},
{
"cell_type": "code",
"execution_count": 5,
"metadata": {},
"outputs": [],
"source": [
"#!pip install lark pymongo"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"We want to use `OpenAIEmbeddings` so we have to get the OpenAI API Key."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import os\n",
"\n",
"OPENAI_API_KEY = \"Use your OpenAI key\"\n",
"\n",
"os.environ[\"OPENAI_API_KEY\"] = OPENAI_API_KEY"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from langchain.embeddings.openai import OpenAIEmbeddings\n",
"from langchain.schema import Document\n",
"from langchain.vectorstores import MongoDBAtlasVectorSearch\n",
"from pymongo import MongoClient\n",
"\n",
"CONNECTION_STRING = \"Use your MongoDB Atlas connection string\"\n",
"DB_NAME = \"Name of your MongoDB Atlas database\"\n",
"COLLECTION_NAME = \"Name of your collection in the database\"\n",
"INDEX_NAME = \"Name of a search index defined on the collection\"\n",
"\n",
"MongoClient = MongoClient(CONNECTION_STRING)\n",
"collection = MongoClient[DB_NAME][COLLECTION_NAME]\n",
"\n",
"embeddings = OpenAIEmbeddings()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"docs = [\n",
" Document(\n",
" page_content=\"A bunch of scientists bring back dinosaurs and mayhem breaks loose\",\n",
" metadata={\"year\": 1993, \"rating\": 7.7, \"genre\": \"action\"},\n",
" ),\n",
" Document(\n",
" page_content=\"Leo DiCaprio gets lost in a dream within a dream within a dream within a ...\",\n",
" metadata={\"year\": 2010, \"genre\": \"thriller\", \"rating\": 8.2},\n",
" ),\n",
" Document(\n",
" page_content=\"A bunch of normal-sized women are supremely wholesome and some men pine after them\",\n",
" metadata={\"year\": 2019, \"rating\": 8.3, \"genre\": \"drama\"},\n",
" ),\n",
" Document(\n",
" page_content=\"Three men walk into the Zone, three men walk out of the Zone\",\n",
" metadata={\"year\": 1979, \"rating\": 9.9, \"genre\": \"science fiction\"},\n",
" ),\n",
" Document(\n",
" page_content=\"A psychologist / detective gets lost in a series of dreams within dreams within dreams and Inception reused the idea\",\n",
" metadata={\"year\": 2006, \"genre\": \"thriller\", \"rating\": 9.0},\n",
" ),\n",
" Document(\n",
" page_content=\"Toys come alive and have a blast doing so\",\n",
" metadata={\"year\": 1995, \"genre\": \"animated\", \"rating\": 9.3},\n",
" ),\n",
"]\n",
"\n",
"vectorstore = MongoDBAtlasVectorSearch.from_documents(\n",
" docs,\n",
" embeddings,\n",
" collection=collection,\n",
" index_name=INDEX_NAME,\n",
")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Now, let's create a vector search index on your cluster. In the below example, `embedding` is the name of the field that contains the embedding vector. Please refer to the [documentation](https://www.mongodb.com/docs/atlas/atlas-search/field-types/knn-vector) to get more details on how to define an Atlas Vector Search index.\n",
"You can name the index `{COLLECTION_NAME}` and create the index on the namespace `{DB_NAME}.{COLLECTION_NAME}`. Finally, write the following definition in the JSON editor on MongoDB Atlas:\n",
"\n",
"```json\n",
"{\n",
" \"mappings\": {\n",
" \"dynamic\": true,\n",
" \"fields\": {\n",
" \"embedding\": {\n",
" \"dimensions\": 1536,\n",
" \"similarity\": \"cosine\",\n",
" \"type\": \"knnVector\"\n",
" },\n",
" \"genre\": {\n",
" \"type\": \"token\"\n",
" },\n",
" \"ratings\": {\n",
" \"type\": \"number\"\n",
" },\n",
" \"year\": {\n",
" \"type\": \"number\"\n",
" }\n",
" }\n",
" }\n",
"}\n",
"```"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Creating our self-querying retriever\n",
"Now we can instantiate our retriever. To do this we'll need to provide some information upfront about the metadata fields that our documents support and a short description of the document contents."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from langchain.chains.query_constructor.base import AttributeInfo\n",
"from langchain.llms import OpenAI\n",
"from langchain.retrievers.self_query.base import SelfQueryRetriever\n",
"\n",
"metadata_field_info = [\n",
" AttributeInfo(\n",
" name=\"genre\",\n",
" description=\"The genre of the movie\",\n",
" type=\"string\",\n",
" ),\n",
" AttributeInfo(\n",
" name=\"year\",\n",
" description=\"The year the movie was released\",\n",
" type=\"integer\",\n",
" ),\n",
" AttributeInfo(\n",
" name=\"rating\", description=\"A 1-10 rating for the movie\", type=\"float\"\n",
" ),\n",
"]\n",
"document_content_description = \"Brief summary of a movie\""
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"llm = OpenAI(temperature=0)\n",
"retriever = SelfQueryRetriever.from_llm(\n",
" llm, vectorstore, document_content_description, metadata_field_info, verbose=True\n",
")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Testing it out\n",
"And now we can try actually using our retriever!"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# This example only specifies a relevant query\n",
"retriever.get_relevant_documents(\"What are some movies about dinosaurs\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# This example specifies a filter\n",
"retriever.get_relevant_documents(\"What are some highly rated movies (above 9)?\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# This example only specifies a query and a filter\n",
"retriever.get_relevant_documents(\n",
" \"I want to watch a movie about toys rated higher than 9\"\n",
")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# This example specifies a composite filter\n",
"retriever.get_relevant_documents(\n",
" \"What's a highly rated (above or equal 9) thriller film?\"\n",
")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# This example specifies a query and composite filter\n",
"retriever.get_relevant_documents(\n",
" \"What's a movie after 1990 but before 2005 that's all about dinosaurs, \\\n",
" and preferably has a lot of action\"\n",
")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Filter k\n",
"\n",
"We can also use the self query retriever to specify `k`: the number of documents to fetch.\n",
"\n",
"We can do this by passing `enable_limit=True` to the constructor."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"retriever = SelfQueryRetriever.from_llm(\n",
" llm,\n",
" vectorstore,\n",
" document_content_description,\n",
" metadata_field_info,\n",
" verbose=True,\n",
" enable_limit=True,\n",
")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# This example only specifies a relevant query\n",
"retriever.get_relevant_documents(\"What are two movies about dinosaurs?\")"
]
}
],
"metadata": {
"kernelspec": {
"display_name": ".venv",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.5"
}
},
"nbformat": 4,
"nbformat_minor": 2
}

View File

@@ -0,0 +1,112 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Google Finance\n",
"\n",
"This notebook goes over how to use the Google Finance Tool to get information from the Google Finance page\n",
"\n",
"To get an SerpApi key key, sign up at: https://serpapi.com/users/sign_up.\n",
"\n",
"Then install google-search-results with the command: \n",
"\n",
"pip install google-search-results\n",
"\n",
"Then set the environment variable SERPAPI_API_KEY to your SerpApi key\n",
"\n",
"Or pass the key in as a argument to the wrapper serp_api_key=\"your secret key\""
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Use the Tool"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"!pip install google-search-results"
]
},
{
"cell_type": "code",
"execution_count": 9,
"metadata": {},
"outputs": [],
"source": [
"import os\n",
"\n",
"from langchain.tools.google_finance import GoogleFinanceQueryRun\n",
"from langchain.utilities.google_finance import GoogleFinanceAPIWrapper\n",
"\n",
"os.environ[\"SERPAPI_API_KEY\"] = \"\"\n",
"tool = GoogleFinanceQueryRun(api_wrapper=GoogleFinanceAPIWrapper())"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"tool.run(\"Google\")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Using it with Langchain"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import os\n",
"\n",
"from langchain.agents import AgentType, initialize_agent, load_tools\n",
"from langchain.llms import OpenAI\n",
"\n",
"os.environ[\"OPENAI_API_KEY\"] = \"\"\n",
"os.environ[\"SERP_API_KEY\"] = \"\"\n",
"llm = OpenAI()\n",
"tools = load_tools([\"google-scholar\", \"google-finance\"], llm=llm)\n",
"agent = initialize_agent(\n",
" tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True\n",
")\n",
"agent.run(\"what is google's stock\")"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.5"
}
},
"nbformat": 4,
"nbformat_minor": 2
}

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View File

@@ -0,0 +1,109 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Google Trends\n",
"\n",
"This notebook goes over how to use the Google Trends Tool to fetch trends information.\n",
"\n",
"First, you need to sign up for an `SerpApi key` key at: https://serpapi.com/users/sign_up.\n",
"\n",
"Then you must install `google-search-results` with the command:\n",
"\n",
"`pip install google-search-results`\n",
"\n",
"Then you will need to set the environment variable `SERPAPI_API_KEY` to your `SerpApi key`\n",
"\n",
"[Alternatively you can pass the key in as a argument to the wrapper `serp_api_key=\"your secret key\"`]\n",
"\n",
"## Use the Tool"
]
},
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Requirement already satisfied: google-search-results in c:\\python311\\lib\\site-packages (2.4.2)\n",
"Requirement already satisfied: requests in c:\\python311\\lib\\site-packages (from google-search-results) (2.31.0)\n",
"Requirement already satisfied: charset-normalizer<4,>=2 in c:\\python311\\lib\\site-packages (from requests->google-search-results) (3.3.2)\n",
"Requirement already satisfied: idna<4,>=2.5 in c:\\python311\\lib\\site-packages (from requests->google-search-results) (3.4)\n",
"Requirement already satisfied: urllib3<3,>=1.21.1 in c:\\python311\\lib\\site-packages (from requests->google-search-results) (2.1.0)\n",
"Requirement already satisfied: certifi>=2017.4.17 in c:\\python311\\lib\\site-packages (from requests->google-search-results) (2023.7.22)\n"
]
}
],
"source": [
"!pip install google-search-results"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [],
"source": [
"import os\n",
"\n",
"from langchain.tools.google_trends import GoogleTrendsQueryRun\n",
"from langchain.utilities.google_trends import GoogleTrendsAPIWrapper\n",
"\n",
"os.environ[\"SERPAPI_API_KEY\"] = \"\"\n",
"tool = GoogleTrendsQueryRun(api_wrapper=GoogleTrendsAPIWrapper())"
]
},
{
"cell_type": "code",
"execution_count": 5,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"'Query: Water\\nDate From: Nov 20, 2022\\nDate To: Nov 11, 2023\\nMin Value: 72\\nMax Value: 100\\nAverage Value: 84.25490196078431\\nPrecent Change: 5.555555555555555%\\nTrend values: 72, 72, 74, 77, 86, 80, 82, 88, 79, 79, 85, 82, 81, 84, 83, 77, 80, 85, 82, 80, 88, 84, 82, 84, 83, 85, 92, 92, 100, 92, 100, 96, 94, 95, 94, 98, 96, 84, 86, 84, 85, 83, 83, 76, 81, 85, 78, 77, 81, 75, 76\\nRising Related Queries: avatar way of water, avatar the way of water, owala water bottle, air up water bottle, lake mead water level\\nTop Related Queries: water park, water bottle, water heater, water filter, water tank, water bill, water world, avatar way of water, avatar the way of water, coconut water, deep water, water cycle, water dispenser, water purifier, water pollution, distilled water, hot water heater, water cooler, sparkling water, american water, micellar water, density of water, tankless water heater, tonic water, water jug'"
]
},
"execution_count": 5,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"tool.run(\"Water\")"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3.9.16 ('langchain')",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.4"
},
"orig_nbformat": 4,
"vscode": {
"interpreter": {
"hash": "15e58ce194949b77a891bd4339ce3d86a9bd138e905926019517993f97db9e6c"
}
}
},
"nbformat": 4,
"nbformat_minor": 2
}

File diff suppressed because one or more lines are too long

View File

@@ -0,0 +1,262 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Reddit Search \n",
"\n",
"In this notebook, we learn how the Reddit search tool works. \n",
"First make sure that you have installed praw with the command below: "
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"vscode": {
"languageId": "shellscript"
}
},
"outputs": [],
"source": [
"!pip install praw"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Then you need to set you need to set up the proper API keys and environment variables. You would need to create a Reddit user account and get credentials. So, create a Reddit user account by going to https://www.reddit.com and signing up. \n",
"Then get your credentials by going to https://www.reddit.com/prefs/apps and creating an app. \n",
"You should have your client_id and secret from creating the app. Now, you can paste those strings in client_id and client_secret variable. \n",
"Note: You can put any string for user_agent "
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"client_id = \"\"\n",
"client_secret = \"\"\n",
"user_agent = \"\""
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from langchain.tools.reddit_search.tool import RedditSearchRun\n",
"from langchain.utilities.reddit_search import RedditSearchAPIWrapper\n",
"\n",
"search = RedditSearchRun(\n",
" api_wrapper=RedditSearchAPIWrapper(\n",
" reddit_client_id=client_id,\n",
" reddit_client_secret=client_secret,\n",
" reddit_user_agent=user_agent,\n",
" )\n",
")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"You can then set your queries for example, what subreddit you want to query, how many posts you want to be returned, how you would like the result to be sorted etc."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from langchain.tools.reddit_search.tool import RedditSearchSchema\n",
"\n",
"search_params = RedditSearchSchema(\n",
" query=\"beginner\", sort=\"new\", time_filter=\"week\", subreddit=\"python\", limit=\"2\"\n",
")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Finally run the search and get your results"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"result = search.run(tool_input=search_params.dict())"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"print(result)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Here is an example of printing the result. \n",
"Note: You may get different output depending on the newest post in the subreddit but the formatting should be similar."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"\n",
"> Searching r/python found 2 posts:\n",
"> Post Title: 'Setup Github Copilot in Visual Studio Code'\n",
"> User: Feisty-Recording-715\n",
"> Subreddit: r/Python:\n",
"> Text body: 🛠️ This tutorial is perfect for beginners looking to strengthen their understanding of version control or for experienced developers seeking a quick reference for GitHub setup in Visual Studio Code.\n",
">\n",
">🎓 By the end of this video, you'll be equipped with the skills to confidently manage your codebase, collaborate with others, and contribute to open-source projects on GitHub.\n",
">\n",
">\n",
">Video link: https://youtu.be/IdT1BhrSfdo?si=mV7xVpiyuhlD8Zrw\n",
">\n",
">Your feedback is welcome\n",
"> Post URL: https://www.reddit.com/r/Python/comments/1823wr7/setup_github_copilot_in_visual_studio_code/\n",
"> Post Category: N/A.\n",
"> Score: 0\n",
">\n",
">Post Title: 'A Chinese Checkers game made with pygame and PySide6, with custom bots support'\n",
">User: HenryChess\n",
">Subreddit: r/Python:\n",
"> Text body: GitHub link: https://github.com/henrychess/pygame-chinese-checkers\n",
">\n",
">I'm not sure if this counts as beginner or intermediate. I think I'm still in the beginner zone, so I flair it as beginner.\n",
">\n",
">This is a Chinese Checkers (aka Sternhalma) game for 2 to 3 players. The bots I wrote are easy to beat, as they're mainly for debugging the game logic part of the code. However, you can write up your own custom bots. There is a guide at the github page.\n",
"> Post URL: https://www.reddit.com/r/Python/comments/181xq0u/a_chinese_checkers_game_made_with_pygame_and/\n",
"> Post Category: N/A.\n",
" > Score: 1\n",
"\n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Using tool with an agent chain\n",
"\n",
"Reddit search functionality is also provided as a multi-input tool. In this example, we adapt [existing code from the docs](https://python.langchain.com/docs/modules/agents/how_to/sharedmemory_for_tools), and use ChatOpenAI to create an agent chain with memory. This agent chain is able to pull information from Reddit and use these posts to respond to subsequent input. \n",
"\n",
"To run the example, add your reddit API access information and also get an OpenAI key from the [OpenAI API](https://help.openai.com/en/articles/4936850-where-do-i-find-my-api-key)."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Adapted code from https://python.langchain.com/docs/modules/agents/how_to/sharedmemory_for_tools\n",
"\n",
"from langchain.agents import AgentExecutor, StructuredChatAgent, Tool\n",
"from langchain.chains import LLMChain\n",
"from langchain.chat_models import ChatOpenAI\n",
"from langchain.memory import ConversationBufferMemory, ReadOnlySharedMemory\n",
"from langchain.prompts import PromptTemplate\n",
"from langchain.tools.reddit_search.tool import RedditSearchRun\n",
"from langchain.utilities.reddit_search import RedditSearchAPIWrapper\n",
"\n",
"# Provide keys for Reddit\n",
"client_id = \"\"\n",
"client_secret = \"\"\n",
"user_agent = \"\"\n",
"# Provide key for OpenAI\n",
"openai_api_key = \"\"\n",
"\n",
"template = \"\"\"This is a conversation between a human and a bot:\n",
"\n",
"{chat_history}\n",
"\n",
"Write a summary of the conversation for {input}:\n",
"\"\"\"\n",
"\n",
"prompt = PromptTemplate(input_variables=[\"input\", \"chat_history\"], template=template)\n",
"memory = ConversationBufferMemory(memory_key=\"chat_history\")\n",
"\n",
"prefix = \"\"\"Have a conversation with a human, answering the following questions as best you can. You have access to the following tools:\"\"\"\n",
"suffix = \"\"\"Begin!\"\n",
"\n",
"{chat_history}\n",
"Question: {input}\n",
"{agent_scratchpad}\"\"\"\n",
"\n",
"tools = [\n",
" RedditSearchRun(\n",
" api_wrapper=RedditSearchAPIWrapper(\n",
" reddit_client_id=client_id,\n",
" reddit_client_secret=client_secret,\n",
" reddit_user_agent=user_agent,\n",
" )\n",
" )\n",
"]\n",
"\n",
"prompt = StructuredChatAgent.create_prompt(\n",
" prefix=prefix,\n",
" tools=tools,\n",
" suffix=suffix,\n",
" input_variables=[\"input\", \"chat_history\", \"agent_scratchpad\"],\n",
")\n",
"\n",
"llm = ChatOpenAI(temperature=0, openai_api_key=openai_api_key)\n",
"\n",
"llm_chain = LLMChain(llm=llm, prompt=prompt)\n",
"agent = StructuredChatAgent(llm_chain=llm_chain, verbose=True, tools=tools)\n",
"agent_chain = AgentExecutor.from_agent_and_tools(\n",
" agent=agent, verbose=True, memory=memory, tools=tools\n",
")\n",
"\n",
"# Answering the first prompt requires usage of the Reddit search tool.\n",
"agent_chain.run(input=\"What is the newest post on r/langchain for the week?\")\n",
"# Answering the subsequent prompt uses memory.\n",
"agent_chain.run(input=\"Who is the author of the post?\")"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3.11.5 64-bit ('langchaindev')",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.5"
},
"vscode": {
"interpreter": {
"hash": "3929050b09828356c9f5ebaf862d05c053d8228eddbc70f990c168e54dd824ba"
}
}
},
"nbformat": 4,
"nbformat_minor": 2
}

View File

@@ -149,6 +149,156 @@
"source": [
"db.max_marginal_relevance_search(query, k=2, fetch_k=10)"
]
},
{
"cell_type": "markdown",
"id": "7dc7ce16-35af-49b7-8009-7eaadb7abbcb",
"metadata": {},
"source": [
"## Example of using secure connection\n",
"In order to run this notebook, it is necessary to run a Vald cluster with secure connection.\n",
"\n",
"Here is an example of a Vald cluster with the following configuration using [Athenz](https://github.com/AthenZ/athenz) authentication.\n",
"\n",
"ingress(TLS) -> [authorization-proxy](https://github.com/AthenZ/authorization-proxy)(Check athenz-role-auth in grpc metadata) -> vald-lb-gateway"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "6894c02d-7a86-4600-bab1-f7e9cce79333",
"metadata": {},
"outputs": [],
"source": [
"import grpc\n",
"\n",
"with open(\"test_root_cacert.crt\", \"rb\") as root:\n",
" credentials = grpc.ssl_channel_credentials(root_certificates=root.read())\n",
"\n",
"# Refresh is required for server use\n",
"with open(\".ztoken\", \"rb\") as ztoken:\n",
" token = ztoken.read().strip()\n",
"\n",
"metadata = [(b\"athenz-role-auth\", token)]"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "cc15c20b-485d-435e-a2ec-c7dcb9db40b5",
"metadata": {},
"outputs": [],
"source": [
"from langchain.document_loaders import TextLoader\n",
"from langchain.embeddings import HuggingFaceEmbeddings\n",
"from langchain.text_splitter import CharacterTextSplitter\n",
"from langchain.vectorstores import Vald\n",
"\n",
"raw_documents = TextLoader(\"state_of_the_union.txt\").load()\n",
"text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)\n",
"documents = text_splitter.split_documents(raw_documents)\n",
"embeddings = HuggingFaceEmbeddings()\n",
"\n",
"db = Vald.from_documents(\n",
" documents,\n",
" embeddings,\n",
" host=\"localhost\",\n",
" port=443,\n",
" grpc_use_secure=True,\n",
" grpc_credentials=credentials,\n",
" grpc_metadata=metadata,\n",
")"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "069b96c6-6db2-46ce-a820-24e8933156a0",
"metadata": {},
"outputs": [],
"source": [
"query = \"What did the president say about Ketanji Brown Jackson\"\n",
"docs = db.similarity_search(query, grpc_metadata=metadata)\n",
"docs[0].page_content"
]
},
{
"cell_type": "markdown",
"id": "8327accb-6776-4a20-a325-b5da92e3a049",
"metadata": {},
"source": [
"### Similarity search by vector"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "d0ab2a97-83e4-490d-81a5-8aaa032d8811",
"metadata": {},
"outputs": [],
"source": [
"embedding_vector = embeddings.embed_query(query)\n",
"docs = db.similarity_search_by_vector(embedding_vector, grpc_metadata=metadata)\n",
"docs[0].page_content"
]
},
{
"cell_type": "markdown",
"id": "f3f987bd-512e-4e29-acb3-e110e74b51a2",
"metadata": {},
"source": [
"### Similarity search with score"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "88dd39bc-8764-4a8c-ac89-06e2341aefa6",
"metadata": {},
"outputs": [],
"source": [
"docs_and_scores = db.similarity_search_with_score(query, grpc_metadata=metadata)\n",
"docs_and_scores[0]"
]
},
{
"cell_type": "markdown",
"id": "fef1bd41-484e-4845-88a9-c7f504068db0",
"metadata": {},
"source": [
"### Maximal Marginal Relevance Search (MMR)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "6cf08477-87b0-41ac-9536-52dec1c5d67f",
"metadata": {},
"outputs": [],
"source": [
"retriever = db.as_retriever(\n",
" search_kwargs={\"search_type\": \"mmr\", \"grpc_metadata\": metadata}\n",
")\n",
"retriever.get_relevant_documents(query, grpc_metadata=metadata)"
]
},
{
"cell_type": "markdown",
"id": "f994fa57-53e4-4fe6-9418-59a5136c6fe8",
"metadata": {},
"source": [
"Or:"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "2111ce42-07c7-4ccc-bdbf-459165e3a410",
"metadata": {},
"outputs": [],
"source": [
"db.max_marginal_relevance_search(query, k=2, fetch_k=10, grpc_metadata=metadata)"
]
}
],
"metadata": {
@@ -167,7 +317,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.4"
"version": "3.11.4"
}
},
"nbformat": 4,

View File

@@ -0,0 +1,229 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Pandas DataFrame Parser\n",
"\n",
"A Pandas DataFrame is a popular data structure in the Python programming language, commonly used for data manipulation and analysis. It provides a comprehensive set of tools for working with structured data, making it a versatile option for tasks such as data cleaning, transformation, and analysis.\n",
"\n",
"This output parser allows users to specify an arbitrary Pandas DataFrame and query LLMs for data in the form of a formatted dictionary that extracts data from the corresponding DataFrame. Keep in mind that large language models are leaky abstractions! You'll have to use an LLM with sufficient capacity to generate a well-formed query as per the defined format instructions.\n",
"\n",
"Use Pandas' DataFrame object to declare the DataFrame you wish to perform queries on."
]
},
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [],
"source": [
"import pprint\n",
"from typing import Any, Dict\n",
"\n",
"import pandas as pd\n",
"from langchain.llms import OpenAI\n",
"from langchain.output_parsers import PandasDataFrameOutputParser\n",
"from langchain.prompts import PromptTemplate"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"model_name = \"text-davinci-003\"\n",
"temperature = 0.5\n",
"model = OpenAI(model_name=model_name, temperature=temperature)"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [],
"source": [
"# Solely for documentation purposes.\n",
"def format_parser_output(parser_output: Dict[str, Any]) -> None:\n",
" for key in parser_output.keys():\n",
" parser_output[key] = parser_output[key].to_dict()\n",
" return pprint.PrettyPrinter(width=4, compact=True).pprint(parser_output)"
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {},
"outputs": [],
"source": [
"# Define your desired Pandas DataFrame.\n",
"df = pd.DataFrame(\n",
" {\n",
" \"num_legs\": [2, 4, 8, 0],\n",
" \"num_wings\": [2, 0, 0, 0],\n",
" \"num_specimen_seen\": [10, 2, 1, 8],\n",
" }\n",
")\n",
"\n",
"# Set up a parser + inject instructions into the prompt template.\n",
"parser = PandasDataFrameOutputParser(dataframe=df)"
]
},
{
"cell_type": "code",
"execution_count": 4,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"LLM Output: column:num_wings\n",
"{'num_wings': {0: 2,\n",
" 1: 0,\n",
" 2: 0,\n",
" 3: 0}}\n"
]
}
],
"source": [
"# Here's an example of a column operation being performed.\n",
"df_query = \"Retrieve the num_wings column.\"\n",
"\n",
"# Set up the prompt.\n",
"prompt = PromptTemplate(\n",
" template=\"Answer the user query.\\n{format_instructions}\\n{query}\\n\",\n",
" input_variables=[\"query\"],\n",
" partial_variables={\"format_instructions\": parser.get_format_instructions()},\n",
")\n",
"\n",
"_input = prompt.format_prompt(query=df_query)\n",
"output = model(_input.to_string())\n",
"print(\"LLM Output:\", output)\n",
"parser_output = parser.parse(output)\n",
"\n",
"format_parser_output(parser_output)"
]
},
{
"cell_type": "code",
"execution_count": 5,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"LLM Output: row:1\n",
"{'1': {'num_legs': 4,\n",
" 'num_specimen_seen': 2,\n",
" 'num_wings': 0}}\n"
]
}
],
"source": [
"# Here's an example of a row operation being performed.\n",
"df_query = \"Retrieve the first row.\"\n",
"\n",
"# Set up the prompt.\n",
"prompt = PromptTemplate(\n",
" template=\"Answer the user query.\\n{format_instructions}\\n{query}\\n\",\n",
" input_variables=[\"query\"],\n",
" partial_variables={\"format_instructions\": parser.get_format_instructions()},\n",
")\n",
"\n",
"_input = prompt.format_prompt(query=df_query)\n",
"output = model(_input.to_string())\n",
"print(\"LLM Output:\", output)\n",
"parser_output = parser.parse(output)\n",
"\n",
"format_parser_output(parser_output)"
]
},
{
"cell_type": "code",
"execution_count": 6,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"LLM Output: mean:num_legs[1..3]\n"
]
},
{
"data": {
"text/plain": [
"{'mean': 4.0}"
]
},
"execution_count": 6,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"# Here's an example of a random Pandas DataFrame operation limiting the number of rows\n",
"df_query = \"Retrieve the average of the num_legs column from rows 1 to 3.\"\n",
"\n",
"# Set up the prompt.\n",
"prompt = PromptTemplate(\n",
" template=\"Answer the user query.\\n{format_instructions}\\n{query}\\n\",\n",
" input_variables=[\"query\"],\n",
" partial_variables={\"format_instructions\": parser.get_format_instructions()},\n",
")\n",
"\n",
"_input = prompt.format_prompt(query=df_query)\n",
"output = model(_input.to_string())\n",
"print(\"LLM Output:\", output)\n",
"parser.parse(output)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Here's an example of a poorly formatted query\n",
"df_query = \"Retrieve the mean of the num_fingers column.\"\n",
"\n",
"# Set up the prompt.\n",
"prompt = PromptTemplate(\n",
" template=\"Answer the user query.\\n{format_instructions}\\n{query}\\n\",\n",
" input_variables=[\"query\"],\n",
" partial_variables={\"format_instructions\": parser.get_format_instructions()},\n",
")\n",
"\n",
"_input = prompt.format_prompt(query=df_query)\n",
"output = model(_input.to_string()) # Expected Output: \"Invalid column: num_fingers\".\n",
"print(\"LLM Output:\", output)\n",
"parser.parse(output) # Expected Output: Will raise an OutputParserException."
]
}
],
"metadata": {
"kernelspec": {
"display_name": "venv",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.2"
}
},
"nbformat": 4,
"nbformat_minor": 2
}

View File

@@ -53,5 +53,5 @@ python3.11 scripts/copy_templates.py
cp ../cookbook/README.md src/pages/cookbook.mdx
cp ../.github/CONTRIBUTING.md docs/contributing.md
wget https://raw.githubusercontent.com/langchain-ai/langserve/main/README.md -O docs/langserve.md
nbdoc_build --srcdir docs
nbdoc_build --srcdir docs --pause 0
python3.11 scripts/generate_api_reference_links.py

View File

@@ -13,12 +13,15 @@ def default(obj: Any) -> Any:
return to_json_not_implemented(obj)
def dumps(obj: Any, *, pretty: bool = False) -> str:
def dumps(obj: Any, *, pretty: bool = False, **kwargs: Any) -> str:
"""Return a json string representation of an object."""
if "default" in kwargs:
raise ValueError("`default` should not be passed to dumps")
if pretty:
return json.dumps(obj, default=default, indent=2)
indent = kwargs.pop("indent", 2)
return json.dumps(obj, default=default, indent=indent, **kwargs)
else:
return json.dumps(obj, default=default)
return json.dumps(obj, default=default, **kwargs)
def dumpd(obj: Any) -> Dict[str, Any]:

View File

@@ -67,13 +67,27 @@ class BasePromptTemplate(RunnableSerializable[Dict, PromptValue], ABC):
**{k: (self.input_types.get(k, str), None) for k in self.input_variables},
)
def _format_prompt_with_error_handling(self, inner_input: Dict) -> PromptValue:
try:
input_dict = {key: inner_input[key] for key in self.input_variables}
except TypeError as e:
raise TypeError(
f"Expected mapping type as input to {self.__class__.__name__}. "
f"Received {type(inner_input)}."
) from e
except KeyError as e:
raise KeyError(
f"Input to {self.__class__.__name__} is missing variable {e}. "
f" Expected: {self.input_variables}"
f" Received: {list(inner_input.keys())}"
) from e
return self.format_prompt(**input_dict)
def invoke(
self, input: Dict, config: Optional[RunnableConfig] = None
) -> PromptValue:
return self._call_with_config(
lambda inner_input: self.format_prompt(
**{key: inner_input[key] for key in self.input_variables}
),
self._format_prompt_with_error_handling,
input,
config,
run_type="prompt",

View File

@@ -22,6 +22,7 @@ from langchain_core.utils.utils import (
raise_for_status_with_text,
xor_args,
)
from langchain_core.utils.env import get_from_dict_or_env, get_from_env
__all__ = [
"StrictFormatter",
@@ -39,4 +40,6 @@ __all__ = [
"xor_args",
"try_load_from_hub",
"build_extra_kwargs",
"get_from_dict_or_env",
"get_from_env",
]

View File

@@ -1,6 +1,7 @@
from __future__ import annotations
import os
from typing import Any, Dict, Optional
def env_var_is_set(env_var: str) -> bool:
@@ -18,3 +19,26 @@ def env_var_is_set(env_var: str) -> bool:
"false",
"False",
)
def get_from_dict_or_env(
data: Dict[str, Any], key: str, env_key: str, default: Optional[str] = None
) -> str:
"""Get a value from a dictionary or an environment variable."""
if key in data and data[key]:
return data[key]
else:
return get_from_env(key, env_key, default=default)
def get_from_env(key: str, env_key: str, default: Optional[str] = None) -> str:
"""Get a value from a dictionary or an environment variable."""
if env_key in os.environ and os.environ[env_key]:
return os.environ[env_key]
elif default is not None:
return default
else:
raise ValueError(
f"Did not find {key}, please add an environment variable"
f" `{env_key}` which contains it, or pass"
f" `{key}` as a named parameter."
)

View File

@@ -1,6 +1,6 @@
[tool.poetry]
name = "langchain-core"
version = "0.0.7"
version = "0.0.8"
description = "Building applications with LLMs through composability"
authors = []
license = "MIT"

View File

@@ -0,0 +1,141 @@
import json
from typing import Any, Dict, List, Optional
from langchain.chat_models.ollama import ChatOllama
from langchain_core.callbacks import CallbackManagerForLLMRun
from langchain_core.language_models import BaseChatModel
from langchain_core.messages import AIMessage, BaseMessage
from langchain_core.outputs import ChatGeneration, ChatResult
from langchain_core.prompts import SystemMessagePromptTemplate
from langchain_experimental.pydantic_v1 import root_validator
DEFAULT_SYSTEM_TEMPLATE = """You have access to the following tools:
{tools}
You must always select one of the above tools and respond with only a JSON object matching the following schema:
{{
"tool": <name of the selected tool>,
"tool_input": <parameters for the selected tool, matching the tool's JSON schema>
}}
""" # noqa: E501
DEFAULT_RESPONSE_FUNCTION = {
"name": "__conversational_response",
"description": (
"Respond conversationally if no other tools should be called for a given query."
),
"parameters": {
"type": "object",
"properties": {
"response": {
"type": "string",
"description": "Conversational response to the user.",
},
},
"required": ["response"],
},
}
class OllamaFunctions(BaseChatModel):
llm: ChatOllama
tool_system_prompt_template: str
@root_validator(pre=True)
def validate_environment(cls, values: Dict) -> Dict:
values["llm"] = values.get("llm") or ChatOllama(**values, format="json")
values["tool_system_prompt_template"] = (
values.get("tool_system_prompt_template") or DEFAULT_SYSTEM_TEMPLATE
)
return values
@property
def model(self) -> BaseChatModel:
"""For backwards compatibility."""
return self.llm
def _generate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
functions = kwargs.get("functions", [])
if "function_call" in kwargs:
functions = [
fn for fn in functions if fn["name"] == kwargs["function_call"]["name"]
]
if not functions:
raise ValueError(
'If "function_call" is specified, you must also pass a matching \
function in "functions".'
)
del kwargs["function_call"]
elif not functions:
functions.append(DEFAULT_RESPONSE_FUNCTION)
system_message_prompt_template = SystemMessagePromptTemplate.from_template(
self.tool_system_prompt_template
)
system_message = system_message_prompt_template.format(
tools=json.dumps(functions, indent=2)
)
if "functions" in kwargs:
del kwargs["functions"]
response_message = self.llm.predict_messages(
[system_message] + messages, stop=stop, callbacks=run_manager, **kwargs
)
chat_generation_content = response_message.content
if not isinstance(chat_generation_content, str):
raise ValueError("OllamaFunctions does not support non-string output.")
try:
parsed_chat_result = json.loads(chat_generation_content)
except json.JSONDecodeError:
raise ValueError(
f'"{self.llm.model}" did not respond with valid JSON. Please try again.'
)
called_tool_name = parsed_chat_result["tool"]
called_tool_arguments = parsed_chat_result["tool_input"]
called_tool = next(
(fn for fn in functions if fn["name"] == called_tool_name), None
)
if called_tool is None:
raise ValueError(
f"Failed to parse a function call from {self.llm.model} \
output: {chat_generation_content}"
)
if called_tool["name"] == DEFAULT_RESPONSE_FUNCTION["name"]:
return ChatResult(
generations=[
ChatGeneration(
message=AIMessage(
content=called_tool_arguments["response"],
)
)
]
)
response_message_with_functions = AIMessage(
content="",
additional_kwargs={
"function_call": {
"name": called_tool_name,
"arguments": json.dumps(called_tool_arguments)
if called_tool_arguments
else "",
},
},
)
return ChatResult(
generations=[ChatGeneration(message=response_message_with_functions)]
)
@property
def _llm_type(self) -> str:
return "ollama_functions"

View File

@@ -1642,7 +1642,7 @@ files = [
[[package]]
name = "langchain"
version = "0.0.342"
version = "0.0.343"
description = "Building applications with LLMs through composability"
optional = false
python-versions = ">=3.8.1,<4.0"
@@ -1665,14 +1665,14 @@ SQLAlchemy = ">=1.4,<3"
tenacity = "^8.1.0"
[package.extras]
all = ["O365 (>=2.0.26,<3.0.0)", "aleph-alpha-client (>=2.15.0,<3.0.0)", "amadeus (>=8.1.0)", "arxiv (>=1.4,<2.0)", "atlassian-python-api (>=3.36.0,<4.0.0)", "awadb (>=0.3.9,<0.4.0)", "azure-ai-formrecognizer (>=3.2.1,<4.0.0)", "azure-ai-textanalytics (>=5.3.0,<6.0.0)", "azure-ai-vision (>=0.11.1b1,<0.12.0)", "azure-cognitiveservices-speech (>=1.28.0,<2.0.0)", "azure-cosmos (>=4.4.0b1,<5.0.0)", "azure-identity (>=1.12.0,<2.0.0)", "beautifulsoup4 (>=4,<5)", "clarifai (>=9.1.0)", "clickhouse-connect (>=0.5.14,<0.6.0)", "cohere (>=4,<5)", "deeplake (>=3.8.3,<4.0.0)", "docarray[hnswlib] (>=0.32.0,<0.33.0)", "duckduckgo-search (>=3.8.3,<4.0.0)", "elasticsearch (>=8,<9)", "esprima (>=4.0.1,<5.0.0)", "faiss-cpu (>=1,<2)", "google-api-python-client (==2.70.0)", "google-auth (>=2.18.1,<3.0.0)", "google-search-results (>=2,<3)", "gptcache (>=0.1.7)", "html2text (>=2020.1.16,<2021.0.0)", "huggingface_hub (>=0,<1)", "jinja2 (>=3,<4)", "jq (>=1.4.1,<2.0.0)", "lancedb (>=0.1,<0.2)", "langkit (>=0.0.6,<0.1.0)", "lark (>=1.1.5,<2.0.0)", "librosa (>=0.10.0.post2,<0.11.0)", "lxml (>=4.9.2,<5.0.0)", "manifest-ml (>=0.0.1,<0.0.2)", "marqo (>=1.2.4,<2.0.0)", "momento (>=1.13.0,<2.0.0)", "nebula3-python (>=3.4.0,<4.0.0)", "neo4j (>=5.8.1,<6.0.0)", "networkx (>=2.6.3,<4)", "nlpcloud (>=1,<2)", "nltk (>=3,<4)", "nomic (>=1.0.43,<2.0.0)", "openai (<2)", "openlm (>=0.0.5,<0.0.6)", "opensearch-py (>=2.0.0,<3.0.0)", "pdfminer-six (>=20221105,<20221106)", "pexpect (>=4.8.0,<5.0.0)", "pgvector (>=0.1.6,<0.2.0)", "pinecone-client (>=2,<3)", "pinecone-text (>=0.4.2,<0.5.0)", "psycopg2-binary (>=2.9.5,<3.0.0)", "pymongo (>=4.3.3,<5.0.0)", "pyowm (>=3.3.0,<4.0.0)", "pypdf (>=3.4.0,<4.0.0)", "pytesseract (>=0.3.10,<0.4.0)", "python-arango (>=7.5.9,<8.0.0)", "pyvespa (>=0.33.0,<0.34.0)", "qdrant-client (>=1.3.1,<2.0.0)", "rdflib (>=6.3.2,<7.0.0)", "redis (>=4,<5)", "requests-toolbelt (>=1.0.0,<2.0.0)", "sentence-transformers (>=2,<3)", "singlestoredb (>=0.7.1,<0.8.0)", "tensorflow-text (>=2.11.0,<3.0.0)", "tigrisdb (>=1.0.0b6,<2.0.0)", "tiktoken (>=0.3.2,<0.6.0)", "torch (>=1,<3)", "transformers (>=4,<5)", "weaviate-client (>=3,<4)", "wikipedia (>=1,<2)", "wolframalpha (==5.0.0)"]
all = ["O365 (>=2.0.26,<3.0.0)", "aleph-alpha-client (>=2.15.0,<3.0.0)", "amadeus (>=8.1.0)", "arxiv (>=1.4,<2.0)", "atlassian-python-api (>=3.36.0,<4.0.0)", "awadb (>=0.3.9,<0.4.0)", "azure-ai-formrecognizer (>=3.2.1,<4.0.0)", "azure-ai-textanalytics (>=5.3.0,<6.0.0)", "azure-ai-vision (>=0.11.1b1,<0.12.0)", "azure-cognitiveservices-speech (>=1.28.0,<2.0.0)", "azure-cosmos (>=4.4.0b1,<5.0.0)", "azure-identity (>=1.12.0,<2.0.0)", "beautifulsoup4 (>=4,<5)", "clarifai (>=9.1.0)", "clickhouse-connect (>=0.5.14,<0.6.0)", "cohere (>=4,<5)", "deeplake (>=3.8.3,<4.0.0)", "dgml-utils (>=0.3.0,<0.4.0)", "docarray[hnswlib] (>=0.32.0,<0.33.0)", "duckduckgo-search (>=3.8.3,<4.0.0)", "elasticsearch (>=8,<9)", "esprima (>=4.0.1,<5.0.0)", "faiss-cpu (>=1,<2)", "google-api-python-client (==2.70.0)", "google-auth (>=2.18.1,<3.0.0)", "google-search-results (>=2,<3)", "gptcache (>=0.1.7)", "html2text (>=2020.1.16,<2021.0.0)", "huggingface_hub (>=0,<1)", "jinja2 (>=3,<4)", "jq (>=1.4.1,<2.0.0)", "lancedb (>=0.1,<0.2)", "langkit (>=0.0.6,<0.1.0)", "lark (>=1.1.5,<2.0.0)", "librosa (>=0.10.0.post2,<0.11.0)", "lxml (>=4.9.2,<5.0.0)", "manifest-ml (>=0.0.1,<0.0.2)", "marqo (>=1.2.4,<2.0.0)", "momento (>=1.13.0,<2.0.0)", "nebula3-python (>=3.4.0,<4.0.0)", "neo4j (>=5.8.1,<6.0.0)", "networkx (>=2.6.3,<4)", "nlpcloud (>=1,<2)", "nltk (>=3,<4)", "nomic (>=1.0.43,<2.0.0)", "openai (<2)", "openlm (>=0.0.5,<0.0.6)", "opensearch-py (>=2.0.0,<3.0.0)", "pdfminer-six (>=20221105,<20221106)", "pexpect (>=4.8.0,<5.0.0)", "pgvector (>=0.1.6,<0.2.0)", "pinecone-client (>=2,<3)", "pinecone-text (>=0.4.2,<0.5.0)", "psycopg2-binary (>=2.9.5,<3.0.0)", "pymongo (>=4.3.3,<5.0.0)", "pyowm (>=3.3.0,<4.0.0)", "pypdf (>=3.4.0,<4.0.0)", "pytesseract (>=0.3.10,<0.4.0)", "python-arango (>=7.5.9,<8.0.0)", "pyvespa (>=0.33.0,<0.34.0)", "qdrant-client (>=1.3.1,<2.0.0)", "rdflib (>=6.3.2,<7.0.0)", "redis (>=4,<5)", "requests-toolbelt (>=1.0.0,<2.0.0)", "sentence-transformers (>=2,<3)", "singlestoredb (>=0.7.1,<0.8.0)", "tensorflow-text (>=2.11.0,<3.0.0)", "tigrisdb (>=1.0.0b6,<2.0.0)", "tiktoken (>=0.3.2,<0.6.0)", "torch (>=1,<3)", "transformers (>=4,<5)", "weaviate-client (>=3,<4)", "wikipedia (>=1,<2)", "wolframalpha (==5.0.0)"]
azure = ["azure-ai-formrecognizer (>=3.2.1,<4.0.0)", "azure-ai-textanalytics (>=5.3.0,<6.0.0)", "azure-ai-vision (>=0.11.1b1,<0.12.0)", "azure-cognitiveservices-speech (>=1.28.0,<2.0.0)", "azure-core (>=1.26.4,<2.0.0)", "azure-cosmos (>=4.4.0b1,<5.0.0)", "azure-identity (>=1.12.0,<2.0.0)", "azure-search-documents (==11.4.0b8)", "openai (<2)"]
clarifai = ["clarifai (>=9.1.0)"]
cli = ["typer (>=0.9.0,<0.10.0)"]
cohere = ["cohere (>=4,<5)"]
docarray = ["docarray[hnswlib] (>=0.32.0,<0.33.0)"]
embeddings = ["sentence-transformers (>=2,<3)"]
extended-testing = ["aiosqlite (>=0.19.0,<0.20.0)", "aleph-alpha-client (>=2.15.0,<3.0.0)", "anthropic (>=0.3.11,<0.4.0)", "arxiv (>=1.4,<2.0)", "assemblyai (>=0.17.0,<0.18.0)", "atlassian-python-api (>=3.36.0,<4.0.0)", "beautifulsoup4 (>=4,<5)", "bibtexparser (>=1.4.0,<2.0.0)", "cassio (>=0.1.0,<0.2.0)", "chardet (>=5.1.0,<6.0.0)", "dashvector (>=1.0.1,<2.0.0)", "databricks-vectorsearch (>=0.21,<0.22)", "esprima (>=4.0.1,<5.0.0)", "faiss-cpu (>=1,<2)", "feedparser (>=6.0.10,<7.0.0)", "fireworks-ai (>=0.6.0,<0.7.0)", "geopandas (>=0.13.1,<0.14.0)", "gitpython (>=3.1.32,<4.0.0)", "google-cloud-documentai (>=2.20.1,<3.0.0)", "gql (>=3.4.1,<4.0.0)", "html2text (>=2020.1.16,<2021.0.0)", "javelin-sdk (>=0.1.8,<0.2.0)", "jinja2 (>=3,<4)", "jq (>=1.4.1,<2.0.0)", "jsonschema (>1)", "lxml (>=4.9.2,<5.0.0)", "markdownify (>=0.11.6,<0.12.0)", "motor (>=3.3.1,<4.0.0)", "msal (>=1.25.0,<2.0.0)", "mwparserfromhell (>=0.6.4,<0.7.0)", "mwxml (>=0.3.3,<0.4.0)", "newspaper3k (>=0.2.8,<0.3.0)", "numexpr (>=2.8.6,<3.0.0)", "openai (<2)", "openai (<2)", "openapi-pydantic (>=0.3.2,<0.4.0)", "pandas (>=2.0.1,<3.0.0)", "pdfminer-six (>=20221105,<20221106)", "pgvector (>=0.1.6,<0.2.0)", "psychicapi (>=0.8.0,<0.9.0)", "py-trello (>=0.19.0,<0.20.0)", "pymupdf (>=1.22.3,<2.0.0)", "pypdf (>=3.4.0,<4.0.0)", "pypdfium2 (>=4.10.0,<5.0.0)", "pyspark (>=3.4.0,<4.0.0)", "rank-bm25 (>=0.2.2,<0.3.0)", "rapidfuzz (>=3.1.1,<4.0.0)", "rapidocr-onnxruntime (>=1.3.2,<2.0.0)", "requests-toolbelt (>=1.0.0,<2.0.0)", "rspace_client (>=2.5.0,<3.0.0)", "scikit-learn (>=1.2.2,<2.0.0)", "sqlite-vss (>=0.1.2,<0.2.0)", "streamlit (>=1.18.0,<2.0.0)", "sympy (>=1.12,<2.0)", "telethon (>=1.28.5,<2.0.0)", "timescale-vector (>=0.0.1,<0.0.2)", "tqdm (>=4.48.0)", "upstash-redis (>=0.15.0,<0.16.0)", "xata (>=1.0.0a7,<2.0.0)", "xmltodict (>=0.13.0,<0.14.0)"]
extended-testing = ["aiosqlite (>=0.19.0,<0.20.0)", "aleph-alpha-client (>=2.15.0,<3.0.0)", "anthropic (>=0.3.11,<0.4.0)", "arxiv (>=1.4,<2.0)", "assemblyai (>=0.17.0,<0.18.0)", "atlassian-python-api (>=3.36.0,<4.0.0)", "beautifulsoup4 (>=4,<5)", "bibtexparser (>=1.4.0,<2.0.0)", "cassio (>=0.1.0,<0.2.0)", "chardet (>=5.1.0,<6.0.0)", "dashvector (>=1.0.1,<2.0.0)", "databricks-vectorsearch (>=0.21,<0.22)", "dgml-utils (>=0.3.0,<0.4.0)", "esprima (>=4.0.1,<5.0.0)", "faiss-cpu (>=1,<2)", "feedparser (>=6.0.10,<7.0.0)", "fireworks-ai (>=0.6.0,<0.7.0)", "geopandas (>=0.13.1,<0.14.0)", "gitpython (>=3.1.32,<4.0.0)", "google-cloud-documentai (>=2.20.1,<3.0.0)", "gql (>=3.4.1,<4.0.0)", "html2text (>=2020.1.16,<2021.0.0)", "javelin-sdk (>=0.1.8,<0.2.0)", "jinja2 (>=3,<4)", "jq (>=1.4.1,<2.0.0)", "jsonschema (>1)", "lxml (>=4.9.2,<5.0.0)", "markdownify (>=0.11.6,<0.12.0)", "motor (>=3.3.1,<4.0.0)", "msal (>=1.25.0,<2.0.0)", "mwparserfromhell (>=0.6.4,<0.7.0)", "mwxml (>=0.3.3,<0.4.0)", "newspaper3k (>=0.2.8,<0.3.0)", "numexpr (>=2.8.6,<3.0.0)", "openai (<2)", "openai (<2)", "openapi-pydantic (>=0.3.2,<0.4.0)", "pandas (>=2.0.1,<3.0.0)", "pdfminer-six (>=20221105,<20221106)", "pgvector (>=0.1.6,<0.2.0)", "psychicapi (>=0.8.0,<0.9.0)", "py-trello (>=0.19.0,<0.20.0)", "pymupdf (>=1.22.3,<2.0.0)", "pypdf (>=3.4.0,<4.0.0)", "pypdfium2 (>=4.10.0,<5.0.0)", "pyspark (>=3.4.0,<4.0.0)", "rank-bm25 (>=0.2.2,<0.3.0)", "rapidfuzz (>=3.1.1,<4.0.0)", "rapidocr-onnxruntime (>=1.3.2,<2.0.0)", "requests-toolbelt (>=1.0.0,<2.0.0)", "rspace_client (>=2.5.0,<3.0.0)", "scikit-learn (>=1.2.2,<2.0.0)", "sqlite-vss (>=0.1.2,<0.2.0)", "streamlit (>=1.18.0,<2.0.0)", "sympy (>=1.12,<2.0)", "telethon (>=1.28.5,<2.0.0)", "timescale-vector (>=0.0.1,<0.0.2)", "tqdm (>=4.48.0)", "upstash-redis (>=0.15.0,<0.16.0)", "xata (>=1.0.0a7,<2.0.0)", "xmltodict (>=0.13.0,<0.14.0)"]
javascript = ["esprima (>=4.0.1,<5.0.0)"]
llms = ["clarifai (>=9.1.0)", "cohere (>=4,<5)", "huggingface_hub (>=0,<1)", "manifest-ml (>=0.0.1,<0.0.2)", "nlpcloud (>=1,<2)", "openai (<2)", "openlm (>=0.0.5,<0.0.6)", "torch (>=1,<3)", "transformers (>=4,<5)"]
openai = ["openai (<2)", "tiktoken (>=0.3.2,<0.6.0)"]
@@ -4931,4 +4931,4 @@ extended-testing = ["faker", "presidio-analyzer", "presidio-anonymizer", "senten
[metadata]
lock-version = "2.0"
python-versions = ">=3.8.1,<4.0"
content-hash = "42fe69b21c0dba3a3550be5a8ad127db8aa77c1d357ac5e39b3b426561c8dc0b"
content-hash = "5ea902253757caa8e1708b13096e13bdd16931a20c4ea0e402af5dfe1c8a30ac"

View File

@@ -10,6 +10,7 @@ repository = "https://github.com/langchain-ai/langchain"
[tool.poetry.dependencies]
python = ">=3.8.1,<4.0"
langchain-core = ">=0.0.7,<0.1"
langchain = ">=0.0.342,<0.1"
presidio-anonymizer = {version = "^2.2.33", optional = true}
presidio-analyzer = {version = "^2.2.33", optional = true}

View File

@@ -0,0 +1,49 @@
"""Test OllamaFunctions"""
import unittest
from langchain.chat_models.ollama import ChatOllama
from langchain_experimental.llms.ollama_functions import OllamaFunctions
class TestOllamaFunctions(unittest.TestCase):
"""
Test OllamaFunctions
"""
def test_default_ollama_functions(self) -> None:
base_model = OllamaFunctions(model="mistral")
self.assertIsInstance(base_model.model, ChatOllama)
# bind functions
model = base_model.bind(
functions=[
{
"name": "get_current_weather",
"description": "Get the current weather in a given location",
"parameters": {
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "The city and state, "
"e.g. San Francisco, CA",
},
"unit": {
"type": "string",
"enum": ["celsius", "fahrenheit"],
},
},
"required": ["location"],
},
}
],
function_call={"name": "get_current_weather"},
)
res = model.invoke("What's the weather in San Francisco?")
function_call = res.additional_kwargs.get("function_call")
assert function_call
self.assertEqual(function_call.get("name"), "get_current_weather")

View File

@@ -0,0 +1,264 @@
from __future__ import annotations
import importlib
from typing import (
Any,
AsyncIterator,
Dict,
Iterable,
List,
Mapping,
Sequence,
Union,
overload,
)
from langchain_core.chat_sessions import ChatSession
from langchain_core.messages import (
AIMessage,
AIMessageChunk,
BaseMessage,
BaseMessageChunk,
ChatMessage,
FunctionMessage,
HumanMessage,
SystemMessage,
ToolMessage,
)
from typing_extensions import Literal
async def aenumerate(
iterable: AsyncIterator[Any], start: int = 0
) -> AsyncIterator[tuple[int, Any]]:
"""Async version of enumerate function."""
i = start
async for x in iterable:
yield i, x
i += 1
def convert_dict_to_message(_dict: Mapping[str, Any]) -> BaseMessage:
"""Convert a dictionary to a LangChain message.
Args:
_dict: The dictionary.
Returns:
The LangChain message.
"""
role = _dict["role"]
if role == "user":
return HumanMessage(content=_dict["content"])
elif role == "assistant":
# Fix for azure
# Also OpenAI returns None for tool invocations
content = _dict.get("content", "") or ""
additional_kwargs: Dict = {}
if _dict.get("function_call"):
additional_kwargs["function_call"] = dict(_dict["function_call"])
if _dict.get("tool_calls"):
additional_kwargs["tool_calls"] = _dict["tool_calls"]
return AIMessage(content=content, additional_kwargs=additional_kwargs)
elif role == "system":
return SystemMessage(content=_dict["content"])
elif role == "function":
return FunctionMessage(content=_dict["content"], name=_dict["name"])
elif role == "tool":
return ToolMessage(content=_dict["content"], tool_call_id=_dict["tool_call_id"])
else:
return ChatMessage(content=_dict["content"], role=role)
def convert_message_to_dict(message: BaseMessage) -> dict:
"""Convert a LangChain message to a dictionary.
Args:
message: The LangChain message.
Returns:
The dictionary.
"""
message_dict: Dict[str, Any]
if isinstance(message, ChatMessage):
message_dict = {"role": message.role, "content": message.content}
elif isinstance(message, HumanMessage):
message_dict = {"role": "user", "content": message.content}
elif isinstance(message, AIMessage):
message_dict = {"role": "assistant", "content": message.content}
if "function_call" in message.additional_kwargs:
message_dict["function_call"] = message.additional_kwargs["function_call"]
# If function call only, content is None not empty string
if message_dict["content"] == "":
message_dict["content"] = None
if "tool_calls" in message.additional_kwargs:
message_dict["tool_calls"] = message.additional_kwargs["tool_calls"]
# If tool calls only, content is None not empty string
if message_dict["content"] == "":
message_dict["content"] = None
elif isinstance(message, SystemMessage):
message_dict = {"role": "system", "content": message.content}
elif isinstance(message, FunctionMessage):
message_dict = {
"role": "function",
"content": message.content,
"name": message.name,
}
elif isinstance(message, ToolMessage):
message_dict = {
"role": "tool",
"content": message.content,
"tool_call_id": message.tool_call_id,
}
else:
raise TypeError(f"Got unknown type {message}")
if "name" in message.additional_kwargs:
message_dict["name"] = message.additional_kwargs["name"]
return message_dict
def convert_openai_messages(messages: Sequence[Dict[str, Any]]) -> List[BaseMessage]:
"""Convert dictionaries representing OpenAI messages to LangChain format.
Args:
messages: List of dictionaries representing OpenAI messages
Returns:
List of LangChain BaseMessage objects.
"""
return [convert_dict_to_message(m) for m in messages]
def _convert_message_chunk_to_delta(chunk: BaseMessageChunk, i: int) -> Dict[str, Any]:
_dict: Dict[str, Any] = {}
if isinstance(chunk, AIMessageChunk):
if i == 0:
# Only shows up in the first chunk
_dict["role"] = "assistant"
if "function_call" in chunk.additional_kwargs:
_dict["function_call"] = chunk.additional_kwargs["function_call"]
# If the first chunk is a function call, the content is not empty string,
# not missing, but None.
if i == 0:
_dict["content"] = None
else:
_dict["content"] = chunk.content
else:
raise ValueError(f"Got unexpected streaming chunk type: {type(chunk)}")
# This only happens at the end of streams, and OpenAI returns as empty dict
if _dict == {"content": ""}:
_dict = {}
return {"choices": [{"delta": _dict}]}
class ChatCompletion:
"""Chat completion."""
@overload
@staticmethod
def create(
messages: Sequence[Dict[str, Any]],
*,
provider: str = "ChatOpenAI",
stream: Literal[False] = False,
**kwargs: Any,
) -> dict:
...
@overload
@staticmethod
def create(
messages: Sequence[Dict[str, Any]],
*,
provider: str = "ChatOpenAI",
stream: Literal[True],
**kwargs: Any,
) -> Iterable:
...
@staticmethod
def create(
messages: Sequence[Dict[str, Any]],
*,
provider: str = "ChatOpenAI",
stream: bool = False,
**kwargs: Any,
) -> Union[dict, Iterable]:
models = importlib.import_module("langchain.chat_models")
model_cls = getattr(models, provider)
model_config = model_cls(**kwargs)
converted_messages = convert_openai_messages(messages)
if not stream:
result = model_config.invoke(converted_messages)
return {"choices": [{"message": convert_message_to_dict(result)}]}
else:
return (
_convert_message_chunk_to_delta(c, i)
for i, c in enumerate(model_config.stream(converted_messages))
)
@overload
@staticmethod
async def acreate(
messages: Sequence[Dict[str, Any]],
*,
provider: str = "ChatOpenAI",
stream: Literal[False] = False,
**kwargs: Any,
) -> dict:
...
@overload
@staticmethod
async def acreate(
messages: Sequence[Dict[str, Any]],
*,
provider: str = "ChatOpenAI",
stream: Literal[True],
**kwargs: Any,
) -> AsyncIterator:
...
@staticmethod
async def acreate(
messages: Sequence[Dict[str, Any]],
*,
provider: str = "ChatOpenAI",
stream: bool = False,
**kwargs: Any,
) -> Union[dict, AsyncIterator]:
models = importlib.import_module("langchain.chat_models")
model_cls = getattr(models, provider)
model_config = model_cls(**kwargs)
converted_messages = convert_openai_messages(messages)
if not stream:
result = await model_config.ainvoke(converted_messages)
return {"choices": [{"message": convert_message_to_dict(result)}]}
else:
return (
_convert_message_chunk_to_delta(c, i)
async for i, c in aenumerate(model_config.astream(converted_messages))
)
def _has_assistant_message(session: ChatSession) -> bool:
"""Check if chat session has an assistant message."""
return any([isinstance(m, AIMessage) for m in session["messages"]])
def convert_messages_for_finetuning(
sessions: Iterable[ChatSession],
) -> List[List[dict]]:
"""Convert messages to a list of lists of dictionaries for fine-tuning.
Args:
sessions: The chat sessions.
Returns:
The list of lists of dictionaries.
"""
return [
[convert_message_to_dict(s) for s in session["messages"]]
for session in sessions
if _has_assistant_message(session)
]

View File

@@ -0,0 +1,115 @@
"""Agent toolkits contain integrations with various resources and services.
LangChain has a large ecosystem of integrations with various external resources
like local and remote file systems, APIs and databases.
These integrations allow developers to create versatile applications that combine the
power of LLMs with the ability to access, interact with and manipulate external
resources.
When developing an application, developers should inspect the capabilities and
permissions of the tools that underlie the given agent toolkit, and determine
whether permissions of the given toolkit are appropriate for the application.
See [Security](https://python.langchain.com/docs/security) for more information.
"""
from pathlib import Path
from typing import Any
from langchain_core._api.path import as_import_path
from langchain_integrations.agent_toolkits.ainetwork.toolkit import AINetworkToolkit
#from langchain_integrations.agent_toolkits.amadeus.toolkit import AmadeusToolkit
from langchain_integrations.agent_toolkits.azure_cognitive_services import (
AzureCognitiveServicesToolkit,
)
#from langchain_integrations.agent_toolkits.conversational_retrieval.openai_functions import (
# create_conversational_retrieval_agent,
#)
from langchain_integrations.agent_toolkits.file_management.toolkit import (
FileManagementToolkit,
)
from langchain_integrations.agent_toolkits.gmail.toolkit import GmailToolkit
from langchain_integrations.agent_toolkits.jira.toolkit import JiraToolkit
#from langchain_integrations.agent_toolkits.json.base import create_json_agent
#from langchain_integrations.agent_toolkits.json.toolkit import JsonToolkit
from langchain_integrations.agent_toolkits.multion.toolkit import MultionToolkit
#from langchain_integrations.agent_toolkits.nla.toolkit import NLAToolkit
from langchain_integrations.agent_toolkits.office365.toolkit import O365Toolkit
#from langchain_integrations.agent_toolkits.openapi.base import create_openapi_agent
#from langchain_integrations.agent_toolkits.openapi.toolkit import OpenAPIToolkit
from langchain_integrations.agent_toolkits.playwright.toolkit import PlayWrightBrowserToolkit
#from langchain_integrations.agent_toolkits.powerbi.base import create_pbi_agent
#from langchain_integrations.agent_toolkits.powerbi.chat_base import create_pbi_chat_agent
#from langchain_integrations.agent_toolkits.powerbi.toolkit import PowerBIToolkit
#from langchain_integrations.agent_toolkits.spark_sql.base import create_spark_sql_agent
#from langchain_integrations.agent_toolkits.spark_sql.toolkit import SparkSQLToolkit
from langchain_integrations.agent_toolkits.sql.base import create_sql_agent
from langchain_integrations.agent_toolkits.sql.toolkit import SQLDatabaseToolkit
from langchain_integrations.agent_toolkits.vectorstore.base import (
create_vectorstore_agent,
create_vectorstore_router_agent,
)
from langchain_integrations.agent_toolkits.vectorstore.toolkit import (
VectorStoreInfo,
VectorStoreRouterToolkit,
VectorStoreToolkit,
)
from langchain_integrations.agent_toolkits.zapier.toolkit import ZapierToolkit
from langchain_integrations.tools.retriever import create_retriever_tool
DEPRECATED_AGENTS = [
"create_csv_agent",
"create_pandas_dataframe_agent",
"create_xorbits_agent",
"create_python_agent",
"create_spark_dataframe_agent",
]
def __getattr__(name: str) -> Any:
"""Get attr name."""
if name in DEPRECATED_AGENTS:
relative_path = as_import_path(Path(__file__).parent, suffix=name)
old_path = "langchain." + relative_path
new_path = "langchain_experimental." + relative_path
raise ImportError(
f"{name} has been moved to langchain experimental. "
"See https://github.com/langchain-ai/langchain/discussions/11680"
"for more information.\n"
f"Please update your import statement from: `{old_path}` to `{new_path}`."
)
raise AttributeError(f"{name} does not exist")
__all__ = [
"AINetworkToolkit",
"AmadeusToolkit",
"AzureCognitiveServicesToolkit",
"FileManagementToolkit",
"GmailToolkit",
"JiraToolkit",
"JsonToolkit",
"MultionToolkit",
"NLAToolkit",
"O365Toolkit",
"OpenAPIToolkit",
"PlayWrightBrowserToolkit",
"PowerBIToolkit",
"SQLDatabaseToolkit",
"SparkSQLToolkit",
"VectorStoreInfo",
"VectorStoreRouterToolkit",
"VectorStoreToolkit",
"ZapierToolkit",
"create_json_agent",
"create_openapi_agent",
"create_pbi_agent",
"create_pbi_chat_agent",
"create_spark_sql_agent",
"create_sql_agent",
"create_vectorstore_agent",
"create_vectorstore_router_agent",
"create_conversational_retrieval_agent",
"create_retriever_tool",
]

View File

@@ -0,0 +1 @@
"""AINetwork toolkit."""

View File

@@ -0,0 +1,53 @@
from __future__ import annotations
from typing import TYPE_CHECKING, List, Literal, Optional
from langchain_core.pydantic_v1 import root_validator
from langchain_integrations.agent_toolkits.base import BaseToolkit
from langchain_integrations.tools import BaseTool
from langchain_integrations.tools.ainetwork.app import AINAppOps
from langchain_integrations.tools.ainetwork.owner import AINOwnerOps
from langchain_integrations.tools.ainetwork.rule import AINRuleOps
from langchain_integrations.tools.ainetwork.transfer import AINTransfer
from langchain_integrations.tools.ainetwork.utils import authenticate
from langchain_integrations.tools.ainetwork.value import AINValueOps
if TYPE_CHECKING:
from ain.ain import Ain
class AINetworkToolkit(BaseToolkit):
"""Toolkit for interacting with AINetwork Blockchain.
*Security Note*: This toolkit contains tools that can read and modify
the state of a service; e.g., by reading, creating, updating, deleting
data associated with this service.
See https://python.langchain.com/docs/security for more information.
"""
network: Optional[Literal["mainnet", "testnet"]] = "testnet"
interface: Optional[Ain] = None
@root_validator(pre=True)
def set_interface(cls, values: dict) -> dict:
if not values.get("interface"):
values["interface"] = authenticate(network=values.get("network", "testnet"))
return values
class Config:
"""Pydantic config."""
validate_all = True
arbitrary_types_allowed = True
def get_tools(self) -> List[BaseTool]:
"""Get the tools in the toolkit."""
return [
AINAppOps(),
AINOwnerOps(),
AINRuleOps(),
AINTransfer(),
AINValueOps(),
]

View File

@@ -0,0 +1,32 @@
from __future__ import annotations
from typing import TYPE_CHECKING, List
from langchain_core.pydantic_v1 import Field
from langchain_integrations.agent_toolkits.base import BaseToolkit
from langchain_integrations.tools import BaseTool
from langchain_integrations.tools.amadeus.closest_airport import AmadeusClosestAirport
from langchain_integrations.tools.amadeus.flight_search import AmadeusFlightSearch
from langchain_integrations.tools.amadeus.utils import authenticate
if TYPE_CHECKING:
from amadeus import Client
class AmadeusToolkit(BaseToolkit):
"""Toolkit for interacting with Amadeus which offers APIs for travel search."""
client: Client = Field(default_factory=authenticate)
class Config:
"""Pydantic config."""
arbitrary_types_allowed = True
def get_tools(self) -> List[BaseTool]:
"""Get the tools in the toolkit."""
return [
AmadeusClosestAirport(),
AmadeusFlightSearch(),
]

View File

@@ -0,0 +1,33 @@
from __future__ import annotations
import sys
from typing import List
from langchain_integrations.agent_toolkits.base import BaseToolkit
from langchain_integrations.tools.azure_cognitive_services import (
AzureCogsFormRecognizerTool,
AzureCogsImageAnalysisTool,
AzureCogsSpeech2TextTool,
AzureCogsText2SpeechTool,
AzureCogsTextAnalyticsHealthTool,
)
from langchain_core.tools import BaseTool
class AzureCognitiveServicesToolkit(BaseToolkit):
"""Toolkit for Azure Cognitive Services."""
def get_tools(self) -> List[BaseTool]:
"""Get the tools in the toolkit."""
tools: List[BaseTool] = [
AzureCogsFormRecognizerTool(),
AzureCogsSpeech2TextTool(),
AzureCogsText2SpeechTool(),
AzureCogsTextAnalyticsHealthTool(),
]
# TODO: Remove check once azure-ai-vision supports MacOS.
if sys.platform.startswith("linux") or sys.platform.startswith("win"):
tools.append(AzureCogsImageAnalysisTool())
return tools

View File

@@ -0,0 +1,15 @@
"""Toolkits for agents."""
from abc import ABC, abstractmethod
from typing import List
from langchain_core.pydantic_v1 import BaseModel
from langchain_integrations.tools import BaseTool
class BaseToolkit(BaseModel, ABC):
"""Base Toolkit representing a collection of related tools."""
@abstractmethod
def get_tools(self) -> List[BaseTool]:
"""Get the tools in the toolkit."""

View File

@@ -0,0 +1,108 @@
from typing import Dict, List
from langchain_integrations.agent_toolkits.base import BaseToolkit
from langchain_integrations.tools import BaseTool
from langchain_integrations.tools.clickup.prompt import (
CLICKUP_FOLDER_CREATE_PROMPT,
CLICKUP_GET_ALL_TEAMS_PROMPT,
CLICKUP_GET_FOLDERS_PROMPT,
CLICKUP_GET_LIST_PROMPT,
CLICKUP_GET_SPACES_PROMPT,
CLICKUP_GET_TASK_ATTRIBUTE_PROMPT,
CLICKUP_GET_TASK_PROMPT,
CLICKUP_LIST_CREATE_PROMPT,
CLICKUP_TASK_CREATE_PROMPT,
CLICKUP_UPDATE_TASK_ASSIGNEE_PROMPT,
CLICKUP_UPDATE_TASK_PROMPT,
)
from langchain_integrations.tools.clickup.tool import ClickupAction
from langchain_integrations.utilities.clickup import ClickupAPIWrapper
class ClickupToolkit(BaseToolkit):
"""Clickup Toolkit.
*Security Note*: This toolkit contains tools that can read and modify
the state of a service; e.g., by reading, creating, updating, deleting
data associated with this service.
See https://python.langchain.com/docs/security for more information.
"""
tools: List[BaseTool] = []
@classmethod
def from_clickup_api_wrapper(
cls, clickup_api_wrapper: ClickupAPIWrapper
) -> "ClickupToolkit":
operations: List[Dict] = [
{
"mode": "get_task",
"name": "Get task",
"description": CLICKUP_GET_TASK_PROMPT,
},
{
"mode": "get_task_attribute",
"name": "Get task attribute",
"description": CLICKUP_GET_TASK_ATTRIBUTE_PROMPT,
},
{
"mode": "get_teams",
"name": "Get Teams",
"description": CLICKUP_GET_ALL_TEAMS_PROMPT,
},
{
"mode": "create_task",
"name": "Create Task",
"description": CLICKUP_TASK_CREATE_PROMPT,
},
{
"mode": "create_list",
"name": "Create List",
"description": CLICKUP_LIST_CREATE_PROMPT,
},
{
"mode": "create_folder",
"name": "Create Folder",
"description": CLICKUP_FOLDER_CREATE_PROMPT,
},
{
"mode": "get_list",
"name": "Get all lists in the space",
"description": CLICKUP_GET_LIST_PROMPT,
},
{
"mode": "get_folders",
"name": "Get all folders in the workspace",
"description": CLICKUP_GET_FOLDERS_PROMPT,
},
{
"mode": "get_spaces",
"name": "Get all spaces in the workspace",
"description": CLICKUP_GET_SPACES_PROMPT,
},
{
"mode": "update_task",
"name": "Update task",
"description": CLICKUP_UPDATE_TASK_PROMPT,
},
{
"mode": "update_task_assignees",
"name": "Update task assignees",
"description": CLICKUP_UPDATE_TASK_ASSIGNEE_PROMPT,
},
]
tools = [
ClickupAction(
name=action["name"],
description=action["description"],
mode=action["mode"],
api_wrapper=clickup_api_wrapper,
)
for action in operations
]
return cls(tools=tools)
def get_tools(self) -> List[BaseTool]:
"""Get the tools in the toolkit."""
return self.tools

View File

@@ -0,0 +1,88 @@
from typing import Any, List, Optional
from langchain_core.language_models import BaseLanguageModel
from langchain_core.memory import BaseMemory
from langchain_core.messages import SystemMessage
from langchain_core.prompts.chat import MessagesPlaceholder
from langchain_integrations.agents.agent import AgentExecutor
from langchain_integrations.agents.openai_functions_agent.agent_token_buffer_memory import (
AgentTokenBufferMemory,
)
from langchain_integrations.agents.openai_functions_agent.base import OpenAIFunctionsAgent
from langchain_openai.chat_model import ChatOpenAI
from langchain_integrations.memory.token_buffer import ConversationTokenBufferMemory
from langchain_core.tools import BaseTool
def _get_default_system_message() -> SystemMessage:
return SystemMessage(
content=(
"Do your best to answer the questions. "
"Feel free to use any tools available to look up "
"relevant information, only if necessary"
)
)
def create_conversational_retrieval_agent(
llm: BaseLanguageModel,
tools: List[BaseTool],
remember_intermediate_steps: bool = True,
memory_key: str = "chat_history",
system_message: Optional[SystemMessage] = None,
verbose: bool = False,
max_token_limit: int = 2000,
**kwargs: Any,
) -> AgentExecutor:
"""A convenience method for creating a conversational retrieval agent.
Args:
llm: The language model to use, should be ChatOpenAI
tools: A list of tools the agent has access to
remember_intermediate_steps: Whether the agent should remember intermediate
steps or not. Intermediate steps refer to prior action/observation
pairs from previous questions. The benefit of remembering these is if
there is relevant information in there, the agent can use it to answer
follow up questions. The downside is it will take up more tokens.
memory_key: The name of the memory key in the prompt.
system_message: The system message to use. By default, a basic one will
be used.
verbose: Whether or not the final AgentExecutor should be verbose or not,
defaults to False.
max_token_limit: The max number of tokens to keep around in memory.
Defaults to 2000.
Returns:
An agent executor initialized appropriately
"""
if not isinstance(llm, ChatOpenAI):
raise ValueError("Only supported with ChatOpenAI models.")
if remember_intermediate_steps:
memory: BaseMemory = AgentTokenBufferMemory(
memory_key=memory_key, llm=llm, max_token_limit=max_token_limit
)
else:
memory = ConversationTokenBufferMemory(
memory_key=memory_key,
return_messages=True,
output_key="output",
llm=llm,
max_token_limit=max_token_limit,
)
_system_message = system_message or _get_default_system_message()
prompt = OpenAIFunctionsAgent.create_prompt(
system_message=_system_message,
extra_prompt_messages=[MessagesPlaceholder(variable_name=memory_key)],
)
agent = OpenAIFunctionsAgent(llm=llm, tools=tools, prompt=prompt)
return AgentExecutor(
agent=agent,
tools=tools,
memory=memory,
verbose=verbose,
return_intermediate_steps=remember_intermediate_steps,
**kwargs,
)

View File

@@ -0,0 +1,3 @@
from langchain_integrations.tools.retriever import create_retriever_tool
__all__ = ["create_retriever_tool"]

View File

@@ -0,0 +1,22 @@
from pathlib import Path
from typing import Any
from langchain_core._api.path import as_import_path
def __getattr__(name: str) -> Any:
"""Get attr name."""
here = as_import_path(Path(__file__).parent)
old_path = "langchain." + here + "." + name
new_path = "langchain_experimental." + here + "." + name
raise AttributeError(
"This agent has been moved to langchain experiment. "
"This agent relies on python REPL tool under the hood, so to use it "
"safely please sandbox the python REPL. "
"Read https://github.com/langchain-ai/langchain/blob/master/SECURITY.md "
"and https://github.com/langchain-ai/langchain/discussions/11680"
"To keep using this code as is, install langchain experimental and "
f"update your import statement from:\n `{old_path}` to `{new_path}`."
)

View File

@@ -0,0 +1,7 @@
"""Local file management toolkit."""
from langchain_integrations.agent_toolkits.file_management.toolkit import (
FileManagementToolkit,
)
__all__ = ["FileManagementToolkit"]

View File

@@ -0,0 +1,81 @@
from __future__ import annotations
from typing import List, Optional
from langchain_core.pydantic_v1 import root_validator
from langchain_integrations.agent_toolkits.base import BaseToolkit
from langchain_integrations.tools import BaseTool
from langchain_integrations.tools.file_management.copy import CopyFileTool
from langchain_integrations.tools.file_management.delete import DeleteFileTool
from langchain_integrations.tools.file_management.file_search import FileSearchTool
from langchain_integrations.tools.file_management.list_dir import ListDirectoryTool
from langchain_integrations.tools.file_management.move import MoveFileTool
from langchain_integrations.tools.file_management.read import ReadFileTool
from langchain_integrations.tools.file_management.write import WriteFileTool
_FILE_TOOLS = {
# "Type[Runnable[Any, Any]]" has no attribute "__fields__" [attr-defined]
tool_cls.__fields__["name"].default: tool_cls # type: ignore[attr-defined]
for tool_cls in [
CopyFileTool,
DeleteFileTool,
FileSearchTool,
MoveFileTool,
ReadFileTool,
WriteFileTool,
ListDirectoryTool,
]
}
class FileManagementToolkit(BaseToolkit):
"""Toolkit for interacting with local files.
*Security Notice*: This toolkit provides methods to interact with local files.
If providing this toolkit to an agent on an LLM, ensure you scope
the agent's permissions to only include the necessary permissions
to perform the desired operations.
By **default** the agent will have access to all files within
the root dir and will be able to Copy, Delete, Move, Read, Write
and List files in that directory.
Consider the following:
- Limit access to particular directories using `root_dir`.
- Use filesystem permissions to restrict access and permissions to only
the files and directories required by the agent.
- Limit the tools available to the agent to only the file operations
necessary for the agent's intended use.
- Sandbox the agent by running it in a container.
See https://python.langchain.com/docs/security for more information.
"""
root_dir: Optional[str] = None
"""If specified, all file operations are made relative to root_dir."""
selected_tools: Optional[List[str]] = None
"""If provided, only provide the selected tools. Defaults to all."""
@root_validator
def validate_tools(cls, values: dict) -> dict:
selected_tools = values.get("selected_tools") or []
for tool_name in selected_tools:
if tool_name not in _FILE_TOOLS:
raise ValueError(
f"File Tool of name {tool_name} not supported."
f" Permitted tools: {list(_FILE_TOOLS)}"
)
return values
def get_tools(self) -> List[BaseTool]:
"""Get the tools in the toolkit."""
allowed_tools = self.selected_tools or _FILE_TOOLS.keys()
tools: List[BaseTool] = []
for tool in allowed_tools:
tool_cls = _FILE_TOOLS[tool]
tools.append(tool_cls(root_dir=self.root_dir)) # type: ignore
return tools
__all__ = ["FileManagementToolkit"]

View File

@@ -0,0 +1 @@
"""GitHub Toolkit."""

View File

@@ -0,0 +1,94 @@
"""GitHub Toolkit."""
from typing import Dict, List
from langchain_integrations.agent_toolkits.base import BaseToolkit
from langchain_integrations.tools import BaseTool
from langchain_integrations.tools.github.prompt import (
COMMENT_ON_ISSUE_PROMPT,
CREATE_FILE_PROMPT,
CREATE_PULL_REQUEST_PROMPT,
DELETE_FILE_PROMPT,
GET_ISSUE_PROMPT,
GET_ISSUES_PROMPT,
READ_FILE_PROMPT,
UPDATE_FILE_PROMPT,
)
from langchain_integrations.tools.github.tool import GitHubAction
from langchain_integrations.utilities.github import GitHubAPIWrapper
class GitHubToolkit(BaseToolkit):
"""GitHub Toolkit.
*Security Note*: This toolkit contains tools that can read and modify
the state of a service; e.g., by creating, deleting, or updating,
reading underlying data.
For example, this toolkit can be used to create issues, pull requests,
and comments on GitHub.
See [Security](https://python.langchain.com/docs/security) for more information.
"""
tools: List[BaseTool] = []
@classmethod
def from_github_api_wrapper(
cls, github_api_wrapper: GitHubAPIWrapper
) -> "GitHubToolkit":
operations: List[Dict] = [
{
"mode": "get_issues",
"name": "Get Issues",
"description": GET_ISSUES_PROMPT,
},
{
"mode": "get_issue",
"name": "Get Issue",
"description": GET_ISSUE_PROMPT,
},
{
"mode": "comment_on_issue",
"name": "Comment on Issue",
"description": COMMENT_ON_ISSUE_PROMPT,
},
{
"mode": "create_pull_request",
"name": "Create Pull Request",
"description": CREATE_PULL_REQUEST_PROMPT,
},
{
"mode": "create_file",
"name": "Create File",
"description": CREATE_FILE_PROMPT,
},
{
"mode": "read_file",
"name": "Read File",
"description": READ_FILE_PROMPT,
},
{
"mode": "update_file",
"name": "Update File",
"description": UPDATE_FILE_PROMPT,
},
{
"mode": "delete_file",
"name": "Delete File",
"description": DELETE_FILE_PROMPT,
},
]
tools = [
GitHubAction(
name=action["name"],
description=action["description"],
mode=action["mode"],
api_wrapper=github_api_wrapper,
)
for action in operations
]
return cls(tools=tools)
def get_tools(self) -> List[BaseTool]:
"""Get the tools in the toolkit."""
return self.tools

View File

@@ -0,0 +1 @@
"""GitLab Toolkit."""

View File

@@ -0,0 +1,94 @@
"""GitHub Toolkit."""
from typing import Dict, List
from langchain_integrations.agent_toolkits.base import BaseToolkit
from langchain_integrations.tools import BaseTool
from langchain_integrations.tools.gitlab.prompt import (
COMMENT_ON_ISSUE_PROMPT,
CREATE_FILE_PROMPT,
CREATE_PULL_REQUEST_PROMPT,
DELETE_FILE_PROMPT,
GET_ISSUE_PROMPT,
GET_ISSUES_PROMPT,
READ_FILE_PROMPT,
UPDATE_FILE_PROMPT,
)
from langchain_integrations.tools.gitlab.tool import GitLabAction
from langchain_integrations.utilities.gitlab import GitLabAPIWrapper
class GitLabToolkit(BaseToolkit):
"""GitLab Toolkit.
*Security Note*: This toolkit contains tools that can read and modify
the state of a service; e.g., by creating, deleting, or updating,
reading underlying data.
For example, this toolkit can be used to create issues, pull requests,
and comments on GitLab.
See https://python.langchain.com/docs/security for more information.
"""
tools: List[BaseTool] = []
@classmethod
def from_gitlab_api_wrapper(
cls, gitlab_api_wrapper: GitLabAPIWrapper
) -> "GitLabToolkit":
operations: List[Dict] = [
{
"mode": "get_issues",
"name": "Get Issues",
"description": GET_ISSUES_PROMPT,
},
{
"mode": "get_issue",
"name": "Get Issue",
"description": GET_ISSUE_PROMPT,
},
{
"mode": "comment_on_issue",
"name": "Comment on Issue",
"description": COMMENT_ON_ISSUE_PROMPT,
},
{
"mode": "create_pull_request",
"name": "Create Pull Request",
"description": CREATE_PULL_REQUEST_PROMPT,
},
{
"mode": "create_file",
"name": "Create File",
"description": CREATE_FILE_PROMPT,
},
{
"mode": "read_file",
"name": "Read File",
"description": READ_FILE_PROMPT,
},
{
"mode": "update_file",
"name": "Update File",
"description": UPDATE_FILE_PROMPT,
},
{
"mode": "delete_file",
"name": "Delete File",
"description": DELETE_FILE_PROMPT,
},
]
tools = [
GitLabAction(
name=action["name"],
description=action["description"],
mode=action["mode"],
api_wrapper=gitlab_api_wrapper,
)
for action in operations
]
return cls(tools=tools)
def get_tools(self) -> List[BaseTool]:
"""Get the tools in the toolkit."""
return self.tools

View File

@@ -0,0 +1 @@
"""Gmail toolkit."""

View File

@@ -0,0 +1,58 @@
from __future__ import annotations
from typing import TYPE_CHECKING, List
from langchain_core.pydantic_v1 import Field
from langchain_integrations.agent_toolkits.base import BaseToolkit
from langchain_integrations.tools import BaseTool
from langchain_integrations.tools.gmail.create_draft import GmailCreateDraft
from langchain_integrations.tools.gmail.get_message import GmailGetMessage
from langchain_integrations.tools.gmail.get_thread import GmailGetThread
from langchain_integrations.tools.gmail.search import GmailSearch
from langchain_integrations.tools.gmail.send_message import GmailSendMessage
from langchain_integrations.tools.gmail.utils import build_resource_service
if TYPE_CHECKING:
# This is for linting and IDE typehints
from googleapiclient.discovery import Resource
else:
try:
# We do this so pydantic can resolve the types when instantiating
from googleapiclient.discovery import Resource
except ImportError:
pass
SCOPES = ["https://mail.google.com/"]
class GmailToolkit(BaseToolkit):
"""Toolkit for interacting with Gmail.
*Security Note*: This toolkit contains tools that can read and modify
the state of a service; e.g., by reading, creating, updating, deleting
data associated with this service.
For example, this toolkit can be used to send emails on behalf of the
associated account.
See https://python.langchain.com/docs/security for more information.
"""
api_resource: Resource = Field(default_factory=build_resource_service)
class Config:
"""Pydantic config."""
arbitrary_types_allowed = True
def get_tools(self) -> List[BaseTool]:
"""Get the tools in the toolkit."""
return [
GmailCreateDraft(api_resource=self.api_resource),
GmailSendMessage(api_resource=self.api_resource),
GmailSearch(api_resource=self.api_resource),
GmailGetMessage(api_resource=self.api_resource),
GmailGetThread(api_resource=self.api_resource),
]

View File

@@ -0,0 +1 @@
"""Jira Toolkit."""

View File

@@ -0,0 +1,70 @@
from typing import Dict, List
from langchain_integrations.agent_toolkits.base import BaseToolkit
from langchain_integrations.tools import BaseTool
from langchain_integrations.tools.jira.prompt import (
JIRA_CATCH_ALL_PROMPT,
JIRA_CONFLUENCE_PAGE_CREATE_PROMPT,
JIRA_GET_ALL_PROJECTS_PROMPT,
JIRA_ISSUE_CREATE_PROMPT,
JIRA_JQL_PROMPT,
)
from langchain_integrations.tools.jira.tool import JiraAction
from langchain_integrations.utilities.jira import JiraAPIWrapper
class JiraToolkit(BaseToolkit):
"""Jira Toolkit.
*Security Note*: This toolkit contains tools that can read and modify
the state of a service; e.g., by creating, deleting, or updating,
reading underlying data.
See https://python.langchain.com/docs/security for more information.
"""
tools: List[BaseTool] = []
@classmethod
def from_jira_api_wrapper(cls, jira_api_wrapper: JiraAPIWrapper) -> "JiraToolkit":
operations: List[Dict] = [
{
"mode": "jql",
"name": "JQL Query",
"description": JIRA_JQL_PROMPT,
},
{
"mode": "get_projects",
"name": "Get Projects",
"description": JIRA_GET_ALL_PROJECTS_PROMPT,
},
{
"mode": "create_issue",
"name": "Create Issue",
"description": JIRA_ISSUE_CREATE_PROMPT,
},
{
"mode": "other",
"name": "Catch all Jira API call",
"description": JIRA_CATCH_ALL_PROMPT,
},
{
"mode": "create_page",
"name": "Create confluence page",
"description": JIRA_CONFLUENCE_PAGE_CREATE_PROMPT,
},
]
tools = [
JiraAction(
name=action["name"],
description=action["description"],
mode=action["mode"],
api_wrapper=jira_api_wrapper,
)
for action in operations
]
return cls(tools=tools)
def get_tools(self) -> List[BaseTool]:
"""Get the tools in the toolkit."""
return self.tools

View File

@@ -0,0 +1 @@
"""Json agent."""

View File

@@ -0,0 +1,49 @@
"""Json agent."""
from typing import Any, Dict, List, Optional
from langchain_core.language_models import BaseLanguageModel
from langchain_integrations.agents.agent import AgentExecutor
from langchain_integrations.agent_toolkits.json.prompt import JSON_PREFIX, JSON_SUFFIX
from langchain_integrations.agent_toolkits.json.toolkit import JsonToolkit
from langchain_integrations.agents.mrkl.base import ZeroShotAgent
from langchain_integrations.agents.mrkl.prompt import FORMAT_INSTRUCTIONS
from langchain_core.callbacks.base import BaseCallbackManager
from langchain_integrations.chains.llm import LLMChain
def create_json_agent(
llm: BaseLanguageModel,
toolkit: JsonToolkit,
callback_manager: Optional[BaseCallbackManager] = None,
prefix: str = JSON_PREFIX,
suffix: str = JSON_SUFFIX,
format_instructions: str = FORMAT_INSTRUCTIONS,
input_variables: Optional[List[str]] = None,
verbose: bool = False,
agent_executor_kwargs: Optional[Dict[str, Any]] = None,
**kwargs: Any,
) -> AgentExecutor:
"""Construct a json agent from an LLM and tools."""
tools = toolkit.get_tools()
prompt = ZeroShotAgent.create_prompt(
tools,
prefix=prefix,
suffix=suffix,
format_instructions=format_instructions,
input_variables=input_variables,
)
llm_chain = LLMChain(
llm=llm,
prompt=prompt,
callback_manager=callback_manager,
)
tool_names = [tool.name for tool in tools]
agent = ZeroShotAgent(llm_chain=llm_chain, allowed_tools=tool_names, **kwargs)
return AgentExecutor.from_agent_and_tools(
agent=agent,
tools=tools,
callback_manager=callback_manager,
verbose=verbose,
**(agent_executor_kwargs or {}),
)

View File

@@ -0,0 +1,25 @@
# flake8: noqa
JSON_PREFIX = """You are an agent designed to interact with JSON.
Your goal is to return a final answer by interacting with the JSON.
You have access to the following tools which help you learn more about the JSON you are interacting with.
Only use the below tools. Only use the information returned by the below tools to construct your final answer.
Do not make up any information that is not contained in the JSON.
Your input to the tools should be in the form of `data["key"][0]` where `data` is the JSON blob you are interacting with, and the syntax used is Python.
You should only use keys that you know for a fact exist. You must validate that a key exists by seeing it previously when calling `json_spec_list_keys`.
If you have not seen a key in one of those responses, you cannot use it.
You should only add one key at a time to the path. You cannot add multiple keys at once.
If you encounter a "KeyError", go back to the previous key, look at the available keys, and try again.
If the question does not seem to be related to the JSON, just return "I don't know" as the answer.
Always begin your interaction with the `json_spec_list_keys` tool with input "data" to see what keys exist in the JSON.
Note that sometimes the value at a given path is large. In this case, you will get an error "Value is a large dictionary, should explore its keys directly".
In this case, you should ALWAYS follow up by using the `json_spec_list_keys` tool to see what keys exist at that path.
Do not simply refer the user to the JSON or a section of the JSON, as this is not a valid answer. Keep digging until you find the answer and explicitly return it.
"""
JSON_SUFFIX = """Begin!"
Question: {input}
Thought: I should look at the keys that exist in data to see what I have access to
{agent_scratchpad}"""

View File

@@ -0,0 +1,20 @@
from __future__ import annotations
from typing import List
from langchain_integrations.agent_toolkits.base import BaseToolkit
from langchain_integrations.tools import BaseTool
from langchain_integrations.tools.json.tool import JsonGetValueTool, JsonListKeysTool, JsonSpec
class JsonToolkit(BaseToolkit):
"""Toolkit for interacting with a JSON spec."""
spec: JsonSpec
def get_tools(self) -> List[BaseTool]:
"""Get the tools in the toolkit."""
return [
JsonListKeysTool(spec=self.spec),
JsonGetValueTool(spec=self.spec),
]

View File

@@ -0,0 +1 @@
"""MultiOn Toolkit."""

View File

@@ -0,0 +1,33 @@
"""MultiOn agent."""
from __future__ import annotations
from typing import List
from langchain_integrations.agent_toolkits.base import BaseToolkit
from langchain_integrations.tools import BaseTool
from langchain_integrations.tools.multion.close_session import MultionCloseSession
from langchain_integrations.tools.multion.create_session import MultionCreateSession
from langchain_integrations.tools.multion.update_session import MultionUpdateSession
class MultionToolkit(BaseToolkit):
"""Toolkit for interacting with the Browser Agent.
**Security Note**: This toolkit contains tools that interact with the
user's browser via the multion API which grants an agent
access to the user's browser.
Please review the documentation for the multion API to understand
the security implications of using this toolkit.
See https://python.langchain.com/docs/security for more information.
"""
class Config:
"""Pydantic config."""
arbitrary_types_allowed = True
def get_tools(self) -> List[BaseTool]:
"""Get the tools in the toolkit."""
return [MultionCreateSession(), MultionUpdateSession(), MultionCloseSession()]

View File

@@ -0,0 +1,55 @@
"""Tool for interacting with a single API with natural language definition."""
from typing import Any, Optional
from langchain_core.language_models import BaseLanguageModel
from langchain_integrations.agents.tools import Tool
from langchain_integrations.chains.api.openapi.chain import OpenAPIEndpointChain
from langchain_integrations.tools.openapi.utils.api_models import APIOperation
from langchain_integrations.tools.openapi.utils.openapi_utils import OpenAPISpec
from langchain_integrations.utilities.requests import Requests
class NLATool(Tool):
"""Natural Language API Tool."""
@classmethod
def from_open_api_endpoint_chain(
cls, chain: OpenAPIEndpointChain, api_title: str
) -> "NLATool":
"""Convert an endpoint chain to an API endpoint tool."""
expanded_name = (
f'{api_title.replace(" ", "_")}.{chain.api_operation.operation_id}'
)
description = (
f"I'm an AI from {api_title}. Instruct what you want,"
" and I'll assist via an API with description:"
f" {chain.api_operation.description}"
)
return cls(name=expanded_name, func=chain.run, description=description)
@classmethod
def from_llm_and_method(
cls,
llm: BaseLanguageModel,
path: str,
method: str,
spec: OpenAPISpec,
requests: Optional[Requests] = None,
verbose: bool = False,
return_intermediate_steps: bool = False,
**kwargs: Any,
) -> "NLATool":
"""Instantiate the tool from the specified path and method."""
api_operation = APIOperation.from_openapi_spec(spec, path, method)
chain = OpenAPIEndpointChain.from_api_operation(
api_operation,
llm,
requests=requests,
verbose=verbose,
return_intermediate_steps=return_intermediate_steps,
**kwargs,
)
return cls.from_open_api_endpoint_chain(chain, spec.info.title)

View File

@@ -0,0 +1,127 @@
from __future__ import annotations
from typing import Any, List, Optional, Sequence
from langchain_core.language_models import BaseLanguageModel
from langchain_core.pydantic_v1 import Field
from langchain_integrations.agent_toolkits.base import BaseToolkit
from langchain_integrations.agent_toolkits.nla.tool import NLATool
from langchain_core.tools import BaseTool
from langchain_integrations.tools.openapi.utils.openapi_utils import OpenAPISpec
from langchain_integrations.tools.plugin import AIPlugin
from langchain_integrations.utilities.requests import Requests
class NLAToolkit(BaseToolkit):
"""Natural Language API Toolkit.
*Security Note*: This toolkit creates tools that enable making calls
to an Open API compliant API.
The tools created by this toolkit may be able to make GET, POST,
PATCH, PUT, DELETE requests to any of the exposed endpoints on
the API.
Control access to who can use this toolkit.
See https://python.langchain.com/docs/security for more information.
"""
nla_tools: Sequence[NLATool] = Field(...)
"""List of API Endpoint Tools."""
def get_tools(self) -> List[BaseTool]:
"""Get the tools for all the API operations."""
return list(self.nla_tools)
@staticmethod
def _get_http_operation_tools(
llm: BaseLanguageModel,
spec: OpenAPISpec,
requests: Optional[Requests] = None,
verbose: bool = False,
**kwargs: Any,
) -> List[NLATool]:
"""Get the tools for all the API operations."""
if not spec.paths:
return []
http_operation_tools = []
for path in spec.paths:
for method in spec.get_methods_for_path(path):
endpoint_tool = NLATool.from_llm_and_method(
llm=llm,
path=path,
method=method,
spec=spec,
requests=requests,
verbose=verbose,
**kwargs,
)
http_operation_tools.append(endpoint_tool)
return http_operation_tools
@classmethod
def from_llm_and_spec(
cls,
llm: BaseLanguageModel,
spec: OpenAPISpec,
requests: Optional[Requests] = None,
verbose: bool = False,
**kwargs: Any,
) -> NLAToolkit:
"""Instantiate the toolkit by creating tools for each operation."""
http_operation_tools = cls._get_http_operation_tools(
llm=llm, spec=spec, requests=requests, verbose=verbose, **kwargs
)
return cls(nla_tools=http_operation_tools)
@classmethod
def from_llm_and_url(
cls,
llm: BaseLanguageModel,
open_api_url: str,
requests: Optional[Requests] = None,
verbose: bool = False,
**kwargs: Any,
) -> NLAToolkit:
"""Instantiate the toolkit from an OpenAPI Spec URL"""
spec = OpenAPISpec.from_url(open_api_url)
return cls.from_llm_and_spec(
llm=llm, spec=spec, requests=requests, verbose=verbose, **kwargs
)
@classmethod
def from_llm_and_ai_plugin(
cls,
llm: BaseLanguageModel,
ai_plugin: AIPlugin,
requests: Optional[Requests] = None,
verbose: bool = False,
**kwargs: Any,
) -> NLAToolkit:
"""Instantiate the toolkit from an OpenAPI Spec URL"""
spec = OpenAPISpec.from_url(ai_plugin.api.url)
# TODO: Merge optional Auth information with the `requests` argument
return cls.from_llm_and_spec(
llm=llm,
spec=spec,
requests=requests,
verbose=verbose,
**kwargs,
)
@classmethod
def from_llm_and_ai_plugin_url(
cls,
llm: BaseLanguageModel,
ai_plugin_url: str,
requests: Optional[Requests] = None,
verbose: bool = False,
**kwargs: Any,
) -> NLAToolkit:
"""Instantiate the toolkit from an OpenAPI Spec URL"""
plugin = AIPlugin.from_url(ai_plugin_url)
return cls.from_llm_and_ai_plugin(
llm=llm, ai_plugin=plugin, requests=requests, verbose=verbose, **kwargs
)

View File

@@ -0,0 +1 @@
"""Office365 toolkit."""

View File

@@ -0,0 +1,51 @@
from __future__ import annotations
from typing import TYPE_CHECKING, List
from langchain_core.pydantic_v1 import Field
from langchain_integrations.agent_toolkits.base import BaseToolkit
from langchain_integrations.tools import BaseTool
from langchain_integrations.tools.office365.create_draft_message import O365CreateDraftMessage
from langchain_integrations.tools.office365.events_search import O365SearchEvents
from langchain_integrations.tools.office365.messages_search import O365SearchEmails
from langchain_integrations.tools.office365.send_event import O365SendEvent
from langchain_integrations.tools.office365.send_message import O365SendMessage
from langchain_integrations.tools.office365.utils import authenticate
if TYPE_CHECKING:
from O365 import Account
class O365Toolkit(BaseToolkit):
"""Toolkit for interacting with Office 365.
*Security Note*: This toolkit contains tools that can read and modify
the state of a service; e.g., by reading, creating, updating, deleting
data associated with this service.
For example, this toolkit can be used search through emails and events,
send messages and event invites, and create draft messages.
Please make sure that the permissions given by this toolkit
are appropriate for your use case.
See https://python.langchain.com/docs/security for more information.
"""
account: Account = Field(default_factory=authenticate)
class Config:
"""Pydantic config."""
arbitrary_types_allowed = True
def get_tools(self) -> List[BaseTool]:
"""Get the tools in the toolkit."""
return [
O365SearchEvents(),
O365CreateDraftMessage(),
O365SearchEmails(),
O365SendEvent(),
O365SendMessage(),
]

View File

@@ -0,0 +1 @@
"""OpenAPI spec agent."""

View File

@@ -0,0 +1,73 @@
"""OpenAPI spec agent."""
from typing import Any, Dict, List, Optional
from langchain_core.language_models import BaseLanguageModel
from langchain_integrations.agents.agent import AgentExecutor
from langchain_integrations.agent_toolkits.openapi.prompt import (
OPENAPI_PREFIX,
OPENAPI_SUFFIX,
)
from langchain_integrations.agent_toolkits.openapi.toolkit import OpenAPIToolkit
from langchain_integrations.agents.mrkl.base import ZeroShotAgent
from langchain_integrations.agents.mrkl.prompt import FORMAT_INSTRUCTIONS
from langchain_core.callbacks.base import BaseCallbackManager
from langchain_integrations.chains.llm import LLMChain
def create_openapi_agent(
llm: BaseLanguageModel,
toolkit: OpenAPIToolkit,
callback_manager: Optional[BaseCallbackManager] = None,
prefix: str = OPENAPI_PREFIX,
suffix: str = OPENAPI_SUFFIX,
format_instructions: str = FORMAT_INSTRUCTIONS,
input_variables: Optional[List[str]] = None,
max_iterations: Optional[int] = 15,
max_execution_time: Optional[float] = None,
early_stopping_method: str = "force",
verbose: bool = False,
return_intermediate_steps: bool = False,
agent_executor_kwargs: Optional[Dict[str, Any]] = None,
**kwargs: Any,
) -> AgentExecutor:
"""Construct an OpenAPI agent from an LLM and tools.
*Security Note*: When creating an OpenAPI agent, check the permissions
and capabilities of the underlying toolkit.
For example, if the default implementation of OpenAPIToolkit
uses the RequestsToolkit which contains tools to make arbitrary
network requests against any URL (e.g., GET, POST, PATCH, PUT, DELETE),
Control access to who can submit issue requests using this toolkit and
what network access it has.
See https://python.langchain.com/docs/security for more information.
"""
tools = toolkit.get_tools()
prompt = ZeroShotAgent.create_prompt(
tools,
prefix=prefix,
suffix=suffix,
format_instructions=format_instructions,
input_variables=input_variables,
)
llm_chain = LLMChain(
llm=llm,
prompt=prompt,
callback_manager=callback_manager,
)
tool_names = [tool.name for tool in tools]
agent = ZeroShotAgent(llm_chain=llm_chain, allowed_tools=tool_names, **kwargs)
return AgentExecutor.from_agent_and_tools(
agent=agent,
tools=tools,
callback_manager=callback_manager,
verbose=verbose,
return_intermediate_steps=return_intermediate_steps,
max_iterations=max_iterations,
max_execution_time=max_execution_time,
early_stopping_method=early_stopping_method,
**(agent_executor_kwargs or {}),
)

View File

@@ -0,0 +1,357 @@
"""Agent that interacts with OpenAPI APIs via a hierarchical planning approach."""
import json
import re
from functools import partial
from typing import Any, Callable, Dict, List, Optional
import yaml
from langchain_core.language_models import BaseLanguageModel
from langchain_core.prompts import BasePromptTemplate, PromptTemplate
from langchain_core.pydantic_v1 import Field
from langchain_integrations.agents.agent import AgentExecutor
from langchain_integrations.agent_toolkits.openapi.planner_prompt import (
API_CONTROLLER_PROMPT,
API_CONTROLLER_TOOL_DESCRIPTION,
API_CONTROLLER_TOOL_NAME,
API_ORCHESTRATOR_PROMPT,
API_PLANNER_PROMPT,
API_PLANNER_TOOL_DESCRIPTION,
API_PLANNER_TOOL_NAME,
PARSING_DELETE_PROMPT,
PARSING_GET_PROMPT,
PARSING_PATCH_PROMPT,
PARSING_POST_PROMPT,
PARSING_PUT_PROMPT,
REQUESTS_DELETE_TOOL_DESCRIPTION,
REQUESTS_GET_TOOL_DESCRIPTION,
REQUESTS_PATCH_TOOL_DESCRIPTION,
REQUESTS_POST_TOOL_DESCRIPTION,
REQUESTS_PUT_TOOL_DESCRIPTION,
)
from langchain_integrations.agent_toolkits.openapi.spec import ReducedOpenAPISpec
from langchain_integrations.agents.mrkl.base import ZeroShotAgent
from langchain_integrations.agents.tools import Tool
from langchain_core.callbacks.base import BaseCallbackManager
from langchain_integrations.chains.llm import LLMChain
from langchain_openai.llm import OpenAI
from langchain_integrations.memory import ReadOnlySharedMemory
from langchain_core.tools import BaseTool
from langchain_integrations.tools.requests.tool import BaseRequestsTool
from langchain_integrations.utilities.requests import RequestsWrapper
#
# Requests tools with LLM-instructed extraction of truncated responses.
#
# Of course, truncating so bluntly may lose a lot of valuable
# information in the response.
# However, the goal for now is to have only a single inference step.
MAX_RESPONSE_LENGTH = 5000
"""Maximum length of the response to be returned."""
def _get_default_llm_chain(prompt: BasePromptTemplate) -> LLMChain:
return LLMChain(
llm=OpenAI(),
prompt=prompt,
)
def _get_default_llm_chain_factory(
prompt: BasePromptTemplate,
) -> Callable[[], LLMChain]:
"""Returns a default LLMChain factory."""
return partial(_get_default_llm_chain, prompt)
class RequestsGetToolWithParsing(BaseRequestsTool, BaseTool):
"""Requests GET tool with LLM-instructed extraction of truncated responses."""
name: str = "requests_get"
"""Tool name."""
description = REQUESTS_GET_TOOL_DESCRIPTION
"""Tool description."""
response_length: Optional[int] = MAX_RESPONSE_LENGTH
"""Maximum length of the response to be returned."""
llm_chain: LLMChain = Field(
default_factory=_get_default_llm_chain_factory(PARSING_GET_PROMPT)
)
"""LLMChain used to extract the response."""
def _run(self, text: str) -> str:
try:
data = json.loads(text)
except json.JSONDecodeError as e:
raise e
data_params = data.get("params")
response = self.requests_wrapper.get(data["url"], params=data_params)
response = response[: self.response_length]
return self.llm_chain.predict(
response=response, instructions=data["output_instructions"]
).strip()
async def _arun(self, text: str) -> str:
raise NotImplementedError()
class RequestsPostToolWithParsing(BaseRequestsTool, BaseTool):
"""Requests POST tool with LLM-instructed extraction of truncated responses."""
name: str = "requests_post"
"""Tool name."""
description = REQUESTS_POST_TOOL_DESCRIPTION
"""Tool description."""
response_length: Optional[int] = MAX_RESPONSE_LENGTH
"""Maximum length of the response to be returned."""
llm_chain: LLMChain = Field(
default_factory=_get_default_llm_chain_factory(PARSING_POST_PROMPT)
)
"""LLMChain used to extract the response."""
def _run(self, text: str) -> str:
try:
data = json.loads(text)
except json.JSONDecodeError as e:
raise e
response = self.requests_wrapper.post(data["url"], data["data"])
response = response[: self.response_length]
return self.llm_chain.predict(
response=response, instructions=data["output_instructions"]
).strip()
async def _arun(self, text: str) -> str:
raise NotImplementedError()
class RequestsPatchToolWithParsing(BaseRequestsTool, BaseTool):
"""Requests PATCH tool with LLM-instructed extraction of truncated responses."""
name: str = "requests_patch"
"""Tool name."""
description = REQUESTS_PATCH_TOOL_DESCRIPTION
"""Tool description."""
response_length: Optional[int] = MAX_RESPONSE_LENGTH
"""Maximum length of the response to be returned."""
llm_chain: LLMChain = Field(
default_factory=_get_default_llm_chain_factory(PARSING_PATCH_PROMPT)
)
"""LLMChain used to extract the response."""
def _run(self, text: str) -> str:
try:
data = json.loads(text)
except json.JSONDecodeError as e:
raise e
response = self.requests_wrapper.patch(data["url"], data["data"])
response = response[: self.response_length]
return self.llm_chain.predict(
response=response, instructions=data["output_instructions"]
).strip()
async def _arun(self, text: str) -> str:
raise NotImplementedError()
class RequestsPutToolWithParsing(BaseRequestsTool, BaseTool):
"""Requests PUT tool with LLM-instructed extraction of truncated responses."""
name: str = "requests_put"
"""Tool name."""
description = REQUESTS_PUT_TOOL_DESCRIPTION
"""Tool description."""
response_length: Optional[int] = MAX_RESPONSE_LENGTH
"""Maximum length of the response to be returned."""
llm_chain: LLMChain = Field(
default_factory=_get_default_llm_chain_factory(PARSING_PUT_PROMPT)
)
"""LLMChain used to extract the response."""
def _run(self, text: str) -> str:
try:
data = json.loads(text)
except json.JSONDecodeError as e:
raise e
response = self.requests_wrapper.put(data["url"], data["data"])
response = response[: self.response_length]
return self.llm_chain.predict(
response=response, instructions=data["output_instructions"]
).strip()
async def _arun(self, text: str) -> str:
raise NotImplementedError()
class RequestsDeleteToolWithParsing(BaseRequestsTool, BaseTool):
"""A tool that sends a DELETE request and parses the response."""
name: str = "requests_delete"
"""The name of the tool."""
description = REQUESTS_DELETE_TOOL_DESCRIPTION
"""The description of the tool."""
response_length: Optional[int] = MAX_RESPONSE_LENGTH
"""The maximum length of the response."""
llm_chain: LLMChain = Field(
default_factory=_get_default_llm_chain_factory(PARSING_DELETE_PROMPT)
)
"""The LLM chain used to parse the response."""
def _run(self, text: str) -> str:
try:
data = json.loads(text)
except json.JSONDecodeError as e:
raise e
response = self.requests_wrapper.delete(data["url"])
response = response[: self.response_length]
return self.llm_chain.predict(
response=response, instructions=data["output_instructions"]
).strip()
async def _arun(self, text: str) -> str:
raise NotImplementedError()
#
# Orchestrator, planner, controller.
#
def _create_api_planner_tool(
api_spec: ReducedOpenAPISpec, llm: BaseLanguageModel
) -> Tool:
endpoint_descriptions = [
f"{name} {description}" for name, description, _ in api_spec.endpoints
]
prompt = PromptTemplate(
template=API_PLANNER_PROMPT,
input_variables=["query"],
partial_variables={"endpoints": "- " + "- ".join(endpoint_descriptions)},
)
chain = LLMChain(llm=llm, prompt=prompt)
tool = Tool(
name=API_PLANNER_TOOL_NAME,
description=API_PLANNER_TOOL_DESCRIPTION,
func=chain.run,
)
return tool
def _create_api_controller_agent(
api_url: str,
api_docs: str,
requests_wrapper: RequestsWrapper,
llm: BaseLanguageModel,
) -> AgentExecutor:
get_llm_chain = LLMChain(llm=llm, prompt=PARSING_GET_PROMPT)
post_llm_chain = LLMChain(llm=llm, prompt=PARSING_POST_PROMPT)
tools: List[BaseTool] = [
RequestsGetToolWithParsing(
requests_wrapper=requests_wrapper, llm_chain=get_llm_chain
),
RequestsPostToolWithParsing(
requests_wrapper=requests_wrapper, llm_chain=post_llm_chain
),
]
prompt = PromptTemplate(
template=API_CONTROLLER_PROMPT,
input_variables=["input", "agent_scratchpad"],
partial_variables={
"api_url": api_url,
"api_docs": api_docs,
"tool_names": ", ".join([tool.name for tool in tools]),
"tool_descriptions": "\n".join(
[f"{tool.name}: {tool.description}" for tool in tools]
),
},
)
agent = ZeroShotAgent(
llm_chain=LLMChain(llm=llm, prompt=prompt),
allowed_tools=[tool.name for tool in tools],
)
return AgentExecutor.from_agent_and_tools(agent=agent, tools=tools, verbose=True)
def _create_api_controller_tool(
api_spec: ReducedOpenAPISpec,
requests_wrapper: RequestsWrapper,
llm: BaseLanguageModel,
) -> Tool:
"""Expose controller as a tool.
The tool is invoked with a plan from the planner, and dynamically
creates a controller agent with relevant documentation only to
constrain the context.
"""
base_url = api_spec.servers[0]["url"] # TODO: do better.
def _create_and_run_api_controller_agent(plan_str: str) -> str:
pattern = r"\b(GET|POST|PATCH|DELETE)\s+(/\S+)*"
matches = re.findall(pattern, plan_str)
endpoint_names = [
"{method} {route}".format(method=method, route=route.split("?")[0])
for method, route in matches
]
docs_str = ""
for endpoint_name in endpoint_names:
found_match = False
for name, _, docs in api_spec.endpoints:
regex_name = re.compile(re.sub("\{.*?\}", ".*", name))
if regex_name.match(endpoint_name):
found_match = True
docs_str += f"== Docs for {endpoint_name} == \n{yaml.dump(docs)}\n"
if not found_match:
raise ValueError(f"{endpoint_name} endpoint does not exist.")
agent = _create_api_controller_agent(base_url, docs_str, requests_wrapper, llm)
return agent.run(plan_str)
return Tool(
name=API_CONTROLLER_TOOL_NAME,
func=_create_and_run_api_controller_agent,
description=API_CONTROLLER_TOOL_DESCRIPTION,
)
def create_openapi_agent(
api_spec: ReducedOpenAPISpec,
requests_wrapper: RequestsWrapper,
llm: BaseLanguageModel,
shared_memory: Optional[ReadOnlySharedMemory] = None,
callback_manager: Optional[BaseCallbackManager] = None,
verbose: bool = True,
agent_executor_kwargs: Optional[Dict[str, Any]] = None,
**kwargs: Any,
) -> AgentExecutor:
"""Instantiate OpenAI API planner and controller for a given spec.
Inject credentials via requests_wrapper.
We use a top-level "orchestrator" agent to invoke the planner and controller,
rather than a top-level planner
that invokes a controller with its plan. This is to keep the planner simple.
"""
tools = [
_create_api_planner_tool(api_spec, llm),
_create_api_controller_tool(api_spec, requests_wrapper, llm),
]
prompt = PromptTemplate(
template=API_ORCHESTRATOR_PROMPT,
input_variables=["input", "agent_scratchpad"],
partial_variables={
"tool_names": ", ".join([tool.name for tool in tools]),
"tool_descriptions": "\n".join(
[f"{tool.name}: {tool.description}" for tool in tools]
),
},
)
agent = ZeroShotAgent(
llm_chain=LLMChain(llm=llm, prompt=prompt, memory=shared_memory),
allowed_tools=[tool.name for tool in tools],
**kwargs,
)
return AgentExecutor.from_agent_and_tools(
agent=agent,
tools=tools,
callback_manager=callback_manager,
verbose=verbose,
**(agent_executor_kwargs or {}),
)

View File

@@ -0,0 +1,235 @@
# flake8: noqa
from langchain_core.prompts.prompt import PromptTemplate
API_PLANNER_PROMPT = """You are a planner that plans a sequence of API calls to assist with user queries against an API.
You should:
1) evaluate whether the user query can be solved by the API documentated below. If no, say why.
2) if yes, generate a plan of API calls and say what they are doing step by step.
3) If the plan includes a DELETE call, you should always return an ask from the User for authorization first unless the User has specifically asked to delete something.
You should only use API endpoints documented below ("Endpoints you can use:").
You can only use the DELETE tool if the User has specifically asked to delete something. Otherwise, you should return a request authorization from the User first.
Some user queries can be resolved in a single API call, but some will require several API calls.
The plan will be passed to an API controller that can format it into web requests and return the responses.
----
Here are some examples:
Fake endpoints for examples:
GET /user to get information about the current user
GET /products/search search across products
POST /users/{{id}}/cart to add products to a user's cart
PATCH /users/{{id}}/cart to update a user's cart
PUT /users/{{id}}/coupon to apply idempotent coupon to a user's cart
DELETE /users/{{id}}/cart to delete a user's cart
User query: tell me a joke
Plan: Sorry, this API's domain is shopping, not comedy.
User query: I want to buy a couch
Plan: 1. GET /products with a query param to search for couches
2. GET /user to find the user's id
3. POST /users/{{id}}/cart to add a couch to the user's cart
User query: I want to add a lamp to my cart
Plan: 1. GET /products with a query param to search for lamps
2. GET /user to find the user's id
3. PATCH /users/{{id}}/cart to add a lamp to the user's cart
User query: I want to add a coupon to my cart
Plan: 1. GET /user to find the user's id
2. PUT /users/{{id}}/coupon to apply the coupon
User query: I want to delete my cart
Plan: 1. GET /user to find the user's id
2. DELETE required. Did user specify DELETE or previously authorize? Yes, proceed.
3. DELETE /users/{{id}}/cart to delete the user's cart
User query: I want to start a new cart
Plan: 1. GET /user to find the user's id
2. DELETE required. Did user specify DELETE or previously authorize? No, ask for authorization.
3. Are you sure you want to delete your cart?
----
Here are endpoints you can use. Do not reference any of the endpoints above.
{endpoints}
----
User query: {query}
Plan:"""
API_PLANNER_TOOL_NAME = "api_planner"
API_PLANNER_TOOL_DESCRIPTION = f"Can be used to generate the right API calls to assist with a user query, like {API_PLANNER_TOOL_NAME}(query). Should always be called before trying to call the API controller."
# Execution.
API_CONTROLLER_PROMPT = """You are an agent that gets a sequence of API calls and given their documentation, should execute them and return the final response.
If you cannot complete them and run into issues, you should explain the issue. If you're unable to resolve an API call, you can retry the API call. When interacting with API objects, you should extract ids for inputs to other API calls but ids and names for outputs returned to the User.
Here is documentation on the API:
Base url: {api_url}
Endpoints:
{api_docs}
Here are tools to execute requests against the API: {tool_descriptions}
Starting below, you should follow this format:
Plan: the plan of API calls to execute
Thought: you should always think about what to do
Action: the action to take, should be one of the tools [{tool_names}]
Action Input: the input to the action
Observation: the output of the action
... (this Thought/Action/Action Input/Observation can repeat N times)
Thought: I am finished executing the plan (or, I cannot finish executing the plan without knowing some other information.)
Final Answer: the final output from executing the plan or missing information I'd need to re-plan correctly.
Begin!
Plan: {input}
Thought:
{agent_scratchpad}
"""
API_CONTROLLER_TOOL_NAME = "api_controller"
API_CONTROLLER_TOOL_DESCRIPTION = f"Can be used to execute a plan of API calls, like {API_CONTROLLER_TOOL_NAME}(plan)."
# Orchestrate planning + execution.
# The goal is to have an agent at the top-level (e.g. so it can recover from errors and re-plan) while
# keeping planning (and specifically the planning prompt) simple.
API_ORCHESTRATOR_PROMPT = """You are an agent that assists with user queries against API, things like querying information or creating resources.
Some user queries can be resolved in a single API call, particularly if you can find appropriate params from the OpenAPI spec; though some require several API calls.
You should always plan your API calls first, and then execute the plan second.
If the plan includes a DELETE call, be sure to ask the User for authorization first unless the User has specifically asked to delete something.
You should never return information without executing the api_controller tool.
Here are the tools to plan and execute API requests: {tool_descriptions}
Starting below, you should follow this format:
User query: the query a User wants help with related to the API
Thought: you should always think about what to do
Action: the action to take, should be one of the tools [{tool_names}]
Action Input: the input to the action
Observation: the result of the action
... (this Thought/Action/Action Input/Observation can repeat N times)
Thought: I am finished executing a plan and have the information the user asked for or the data the user asked to create
Final Answer: the final output from executing the plan
Example:
User query: can you add some trendy stuff to my shopping cart.
Thought: I should plan API calls first.
Action: api_planner
Action Input: I need to find the right API calls to add trendy items to the users shopping cart
Observation: 1) GET /items with params 'trending' is 'True' to get trending item ids
2) GET /user to get user
3) POST /cart to post the trending items to the user's cart
Thought: I'm ready to execute the API calls.
Action: api_controller
Action Input: 1) GET /items params 'trending' is 'True' to get trending item ids
2) GET /user to get user
3) POST /cart to post the trending items to the user's cart
...
Begin!
User query: {input}
Thought: I should generate a plan to help with this query and then copy that plan exactly to the controller.
{agent_scratchpad}"""
REQUESTS_GET_TOOL_DESCRIPTION = """Use this to GET content from a website.
Input to the tool should be a json string with 3 keys: "url", "params" and "output_instructions".
The value of "url" should be a string.
The value of "params" should be a dict of the needed and available parameters from the OpenAPI spec related to the endpoint.
If parameters are not needed, or not available, leave it empty.
The value of "output_instructions" should be instructions on what information to extract from the response,
for example the id(s) for a resource(s) that the GET request fetches.
"""
PARSING_GET_PROMPT = PromptTemplate(
template="""Here is an API response:\n\n{response}\n\n====
Your task is to extract some information according to these instructions: {instructions}
When working with API objects, you should usually use ids over names.
If the response indicates an error, you should instead output a summary of the error.
Output:""",
input_variables=["response", "instructions"],
)
REQUESTS_POST_TOOL_DESCRIPTION = """Use this when you want to POST to a website.
Input to the tool should be a json string with 3 keys: "url", "data", and "output_instructions".
The value of "url" should be a string.
The value of "data" should be a dictionary of key-value pairs you want to POST to the url.
The value of "output_instructions" should be instructions on what information to extract from the response, for example the id(s) for a resource(s) that the POST request creates.
Always use double quotes for strings in the json string."""
PARSING_POST_PROMPT = PromptTemplate(
template="""Here is an API response:\n\n{response}\n\n====
Your task is to extract some information according to these instructions: {instructions}
When working with API objects, you should usually use ids over names. Do not return any ids or names that are not in the response.
If the response indicates an error, you should instead output a summary of the error.
Output:""",
input_variables=["response", "instructions"],
)
REQUESTS_PATCH_TOOL_DESCRIPTION = """Use this when you want to PATCH content on a website.
Input to the tool should be a json string with 3 keys: "url", "data", and "output_instructions".
The value of "url" should be a string.
The value of "data" should be a dictionary of key-value pairs of the body params available in the OpenAPI spec you want to PATCH the content with at the url.
The value of "output_instructions" should be instructions on what information to extract from the response, for example the id(s) for a resource(s) that the PATCH request creates.
Always use double quotes for strings in the json string."""
PARSING_PATCH_PROMPT = PromptTemplate(
template="""Here is an API response:\n\n{response}\n\n====
Your task is to extract some information according to these instructions: {instructions}
When working with API objects, you should usually use ids over names. Do not return any ids or names that are not in the response.
If the response indicates an error, you should instead output a summary of the error.
Output:""",
input_variables=["response", "instructions"],
)
REQUESTS_PUT_TOOL_DESCRIPTION = """Use this when you want to PUT to a website.
Input to the tool should be a json string with 3 keys: "url", "data", and "output_instructions".
The value of "url" should be a string.
The value of "data" should be a dictionary of key-value pairs you want to PUT to the url.
The value of "output_instructions" should be instructions on what information to extract from the response, for example the id(s) for a resource(s) that the PUT request creates.
Always use double quotes for strings in the json string."""
PARSING_PUT_PROMPT = PromptTemplate(
template="""Here is an API response:\n\n{response}\n\n====
Your task is to extract some information according to these instructions: {instructions}
When working with API objects, you should usually use ids over names. Do not return any ids or names that are not in the response.
If the response indicates an error, you should instead output a summary of the error.
Output:""",
input_variables=["response", "instructions"],
)
REQUESTS_DELETE_TOOL_DESCRIPTION = """ONLY USE THIS TOOL WHEN THE USER HAS SPECIFICALLY REQUESTED TO DELETE CONTENT FROM A WEBSITE.
Input to the tool should be a json string with 2 keys: "url", and "output_instructions".
The value of "url" should be a string.
The value of "output_instructions" should be instructions on what information to extract from the response, for example the id(s) for a resource(s) that the DELETE request creates.
Always use double quotes for strings in the json string.
ONLY USE THIS TOOL IF THE USER HAS SPECIFICALLY REQUESTED TO DELETE SOMETHING."""
PARSING_DELETE_PROMPT = PromptTemplate(
template="""Here is an API response:\n\n{response}\n\n====
Your task is to extract some information according to these instructions: {instructions}
When working with API objects, you should usually use ids over names. Do not return any ids or names that are not in the response.
If the response indicates an error, you should instead output a summary of the error.
Output:""",
input_variables=["response", "instructions"],
)

View File

@@ -0,0 +1,29 @@
# flake8: noqa
OPENAPI_PREFIX = """You are an agent designed to answer questions by making web requests to an API given the openapi spec.
If the question does not seem related to the API, return I don't know. Do not make up an answer.
Only use information provided by the tools to construct your response.
First, find the base URL needed to make the request.
Second, find the relevant paths needed to answer the question. Take note that, sometimes, you might need to make more than one request to more than one path to answer the question.
Third, find the required parameters needed to make the request. For GET requests, these are usually URL parameters and for POST requests, these are request body parameters.
Fourth, make the requests needed to answer the question. Ensure that you are sending the correct parameters to the request by checking which parameters are required. For parameters with a fixed set of values, please use the spec to look at which values are allowed.
Use the exact parameter names as listed in the spec, do not make up any names or abbreviate the names of parameters.
If you get a not found error, ensure that you are using a path that actually exists in the spec.
"""
OPENAPI_SUFFIX = """Begin!
Question: {input}
Thought: I should explore the spec to find the base url for the API.
{agent_scratchpad}"""
DESCRIPTION = """Can be used to answer questions about the openapi spec for the API. Always use this tool before trying to make a request.
Example inputs to this tool:
'What are the required query parameters for a GET request to the /bar endpoint?`
'What are the required parameters in the request body for a POST request to the /foo endpoint?'
Always give this tool a specific question."""

View File

@@ -0,0 +1,75 @@
"""Quick and dirty representation for OpenAPI specs."""
from dataclasses import dataclass
from typing import List, Tuple
from langchain_core.utils.json_schema import dereference_refs
@dataclass(frozen=True)
class ReducedOpenAPISpec:
"""A reduced OpenAPI spec.
This is a quick and dirty representation for OpenAPI specs.
Attributes:
servers: The servers in the spec.
description: The description of the spec.
endpoints: The endpoints in the spec.
"""
servers: List[dict]
description: str
endpoints: List[Tuple[str, str, dict]]
def reduce_openapi_spec(spec: dict, dereference: bool = True) -> ReducedOpenAPISpec:
"""Simplify/distill/minify a spec somehow.
I want a smaller target for retrieval and (more importantly)
I want smaller results from retrieval.
I was hoping https://openapi.tools/ would have some useful bits
to this end, but doesn't seem so.
"""
# 1. Consider only get, post, patch, put, delete endpoints.
endpoints = [
(f"{operation_name.upper()} {route}", docs.get("description"), docs)
for route, operation in spec["paths"].items()
for operation_name, docs in operation.items()
if operation_name in ["get", "post", "patch", "put", "delete"]
]
# 2. Replace any refs so that complete docs are retrieved.
# Note: probably want to do this post-retrieval, it blows up the size of the spec.
if dereference:
endpoints = [
(name, description, dereference_refs(docs, full_schema=spec))
for name, description, docs in endpoints
]
# 3. Strip docs down to required request args + happy path response.
def reduce_endpoint_docs(docs: dict) -> dict:
out = {}
if docs.get("description"):
out["description"] = docs.get("description")
if docs.get("parameters"):
out["parameters"] = [
parameter
for parameter in docs.get("parameters", [])
if parameter.get("required")
]
if "200" in docs["responses"]:
out["responses"] = docs["responses"]["200"]
if docs.get("requestBody"):
out["requestBody"] = docs.get("requestBody")
return out
endpoints = [
(name, description, reduce_endpoint_docs(docs))
for name, description, docs in endpoints
]
return ReducedOpenAPISpec(
servers=spec["servers"],
description=spec["info"].get("description", ""),
endpoints=endpoints,
)

View File

@@ -0,0 +1,91 @@
"""Requests toolkit."""
from __future__ import annotations
from typing import Any, List
from langchain_core.language_models import BaseLanguageModel
from langchain_integrations.agents.agent import AgentExecutor
from langchain_integrations.agent_toolkits.base import BaseToolkit
from langchain_integrations.agent_toolkits.json.base import create_json_agent
from langchain_integrations.agent_toolkits.json.toolkit import JsonToolkit
from langchain_integrations.agent_toolkits.openapi.prompt import DESCRIPTION
from langchain_integrations.agents.tools import Tool
from langchain_integrations.tools import BaseTool
from langchain_integrations.tools.json.tool import JsonSpec
from langchain_integrations.tools.requests.tool import (
RequestsDeleteTool,
RequestsGetTool,
RequestsPatchTool,
RequestsPostTool,
RequestsPutTool,
)
from langchain_integrations.utilities.requests import TextRequestsWrapper
class RequestsToolkit(BaseToolkit):
"""Toolkit for making REST requests.
*Security Note*: This toolkit contains tools to make GET, POST, PATCH, PUT,
and DELETE requests to an API.
Exercise care in who is allowed to use this toolkit. If exposing
to end users, consider that users will be able to make arbitrary
requests on behalf of the server hosting the code. For example,
users could ask the server to make a request to a private API
that is only accessible from the server.
Control access to who can submit issue requests using this toolkit and
what network access it has.
See https://python.langchain.com/docs/security for more information.
"""
requests_wrapper: TextRequestsWrapper
def get_tools(self) -> List[BaseTool]:
"""Return a list of tools."""
return [
RequestsGetTool(requests_wrapper=self.requests_wrapper),
RequestsPostTool(requests_wrapper=self.requests_wrapper),
RequestsPatchTool(requests_wrapper=self.requests_wrapper),
RequestsPutTool(requests_wrapper=self.requests_wrapper),
RequestsDeleteTool(requests_wrapper=self.requests_wrapper),
]
class OpenAPIToolkit(BaseToolkit):
"""Toolkit for interacting with an OpenAPI API.
*Security Note*: This toolkit contains tools that can read and modify
the state of a service; e.g., by creating, deleting, or updating,
reading underlying data.
For example, this toolkit can be used to delete data exposed via
an OpenAPI compliant API.
"""
json_agent: AgentExecutor
requests_wrapper: TextRequestsWrapper
def get_tools(self) -> List[BaseTool]:
"""Get the tools in the toolkit."""
json_agent_tool = Tool(
name="json_explorer",
func=self.json_agent.run,
description=DESCRIPTION,
)
request_toolkit = RequestsToolkit(requests_wrapper=self.requests_wrapper)
return [*request_toolkit.get_tools(), json_agent_tool]
@classmethod
def from_llm(
cls,
llm: BaseLanguageModel,
json_spec: JsonSpec,
requests_wrapper: TextRequestsWrapper,
**kwargs: Any,
) -> OpenAPIToolkit:
"""Create json agent from llm, then initialize."""
json_agent = create_json_agent(llm, JsonToolkit(spec=json_spec), **kwargs)
return cls(json_agent=json_agent, requests_wrapper=requests_wrapper)

View File

@@ -0,0 +1,22 @@
from pathlib import Path
from typing import Any
from langchain_core._api.path import as_import_path
def __getattr__(name: str) -> Any:
"""Get attr name."""
here = as_import_path(Path(__file__).parent)
old_path = "langchain." + here + "." + name
new_path = "langchain_experimental." + here + "." + name
raise AttributeError(
"This agent has been moved to langchain experiment. "
"This agent relies on python REPL tool under the hood, so to use it "
"safely please sandbox the python REPL. "
"Read https://github.com/langchain-ai/langchain/blob/master/SECURITY.md "
"and https://github.com/langchain-ai/langchain/discussions/11680"
"To keep using this code as is, install langchain experimental and "
f"update your import statement from:\n `{old_path}` to `{new_path}`."
)

View File

@@ -0,0 +1,4 @@
"""Playwright browser toolkit."""
from langchain_integrations.agent_toolkits.playwright.toolkit import PlayWrightBrowserToolkit
__all__ = ["PlayWrightBrowserToolkit"]

View File

@@ -0,0 +1,108 @@
"""Playwright web browser toolkit."""
from __future__ import annotations
from typing import TYPE_CHECKING, List, Optional, Type, cast
from langchain_core.pydantic_v1 import Extra, root_validator
from langchain_integrations.agent_toolkits.base import BaseToolkit
from langchain_core.tools import BaseTool
from langchain_integrations.tools.playwright.base import (
BaseBrowserTool,
lazy_import_playwright_browsers,
)
from langchain_integrations.tools.playwright.click import ClickTool
from langchain_integrations.tools.playwright.current_page import CurrentWebPageTool
from langchain_integrations.tools.playwright.extract_hyperlinks import ExtractHyperlinksTool
from langchain_integrations.tools.playwright.extract_text import ExtractTextTool
from langchain_integrations.tools.playwright.get_elements import GetElementsTool
from langchain_integrations.tools.playwright.navigate import NavigateTool
from langchain_integrations.tools.playwright.navigate_back import NavigateBackTool
if TYPE_CHECKING:
from playwright.async_api import Browser as AsyncBrowser
from playwright.sync_api import Browser as SyncBrowser
else:
try:
# We do this so pydantic can resolve the types when instantiating
from playwright.async_api import Browser as AsyncBrowser
from playwright.sync_api import Browser as SyncBrowser
except ImportError:
pass
class PlayWrightBrowserToolkit(BaseToolkit):
"""Toolkit for PlayWright browser tools.
**Security Note**: This toolkit provides code to control a web-browser.
Careful if exposing this toolkit to end-users. The tools in the toolkit
are capable of navigating to arbitrary webpages, clicking on arbitrary
elements, and extracting arbitrary text and hyperlinks from webpages.
Specifically, by default this toolkit allows navigating to:
- Any URL (including any internal network URLs)
- And local files
If exposing to end-users, consider limiting network access to the
server that hosts the agent; in addition, consider it is advised
to create a custom NavigationTool wht an args_schema that limits the URLs
that can be navigated to (e.g., only allow navigating to URLs that
start with a particular prefix).
Remember to scope permissions to the minimal permissions necessary for
the application. If the default tool selection is not appropriate for
the application, consider creating a custom toolkit with the appropriate
tools.
See https://python.langchain.com/docs/security for more information.
"""
sync_browser: Optional["SyncBrowser"] = None
async_browser: Optional["AsyncBrowser"] = None
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
arbitrary_types_allowed = True
@root_validator
def validate_imports_and_browser_provided(cls, values: dict) -> dict:
"""Check that the arguments are valid."""
lazy_import_playwright_browsers()
if values.get("async_browser") is None and values.get("sync_browser") is None:
raise ValueError("Either async_browser or sync_browser must be specified.")
return values
def get_tools(self) -> List[BaseTool]:
"""Get the tools in the toolkit."""
tool_classes: List[Type[BaseBrowserTool]] = [
ClickTool,
NavigateTool,
NavigateBackTool,
ExtractTextTool,
ExtractHyperlinksTool,
GetElementsTool,
CurrentWebPageTool,
]
tools = [
tool_cls.from_browser(
sync_browser=self.sync_browser, async_browser=self.async_browser
)
for tool_cls in tool_classes
]
return cast(List[BaseTool], tools)
@classmethod
def from_browser(
cls,
sync_browser: Optional[SyncBrowser] = None,
async_browser: Optional[AsyncBrowser] = None,
) -> PlayWrightBrowserToolkit:
"""Instantiate the toolkit."""
# This is to raise a better error than the forward ref ones Pydantic would have
lazy_import_playwright_browsers()
return cls(sync_browser=sync_browser, async_browser=async_browser)

View File

@@ -0,0 +1 @@
"""Power BI agent."""

View File

@@ -0,0 +1,63 @@
"""Power BI agent."""
from typing import Any, Dict, List, Optional
from langchain_core.language_models import BaseLanguageModel
from langchain_integrations.agents import AgentExecutor
from langchain_integrations.agent_toolkits.powerbi.prompt import (
POWERBI_PREFIX,
POWERBI_SUFFIX,
)
from langchain_integrations.agent_toolkits.powerbi.toolkit import PowerBIToolkit
from langchain_integrations.agents.mrkl.base import ZeroShotAgent
from langchain_integrations.agents.mrkl.prompt import FORMAT_INSTRUCTIONS
from langchain_core.callbacks.base import BaseCallbackManager
from langchain_integrations.chains.llm import LLMChain
from langchain_integrations.utilities.powerbi import PowerBIDataset
def create_pbi_agent(
llm: BaseLanguageModel,
toolkit: Optional[PowerBIToolkit] = None,
powerbi: Optional[PowerBIDataset] = None,
callback_manager: Optional[BaseCallbackManager] = None,
prefix: str = POWERBI_PREFIX,
suffix: str = POWERBI_SUFFIX,
format_instructions: str = FORMAT_INSTRUCTIONS,
examples: Optional[str] = None,
input_variables: Optional[List[str]] = None,
top_k: int = 10,
verbose: bool = False,
agent_executor_kwargs: Optional[Dict[str, Any]] = None,
**kwargs: Any,
) -> AgentExecutor:
"""Construct a Power BI agent from an LLM and tools."""
if toolkit is None:
if powerbi is None:
raise ValueError("Must provide either a toolkit or powerbi dataset")
toolkit = PowerBIToolkit(powerbi=powerbi, llm=llm, examples=examples)
tools = toolkit.get_tools()
tables = powerbi.table_names if powerbi else toolkit.powerbi.table_names
agent = ZeroShotAgent(
llm_chain=LLMChain(
llm=llm,
prompt=ZeroShotAgent.create_prompt(
tools,
prefix=prefix.format(top_k=top_k).format(tables=tables),
suffix=suffix,
format_instructions=format_instructions,
input_variables=input_variables,
),
callback_manager=callback_manager, # type: ignore
verbose=verbose,
),
allowed_tools=[tool.name for tool in tools],
**kwargs,
)
return AgentExecutor.from_agent_and_tools(
agent=agent,
tools=tools,
callback_manager=callback_manager,
verbose=verbose,
**(agent_executor_kwargs or {}),
)

View File

@@ -0,0 +1,64 @@
"""Power BI agent."""
from typing import Any, Dict, List, Optional
from langchain_integrations.agents import AgentExecutor
from langchain_integrations.agents.agent import AgentOutputParser
from langchain_integrations.agent_toolkits.powerbi.prompt import (
POWERBI_CHAT_PREFIX,
POWERBI_CHAT_SUFFIX,
)
from langchain_integrations.agent_toolkits.powerbi.toolkit import PowerBIToolkit
from langchain_integrations.agents.conversational_chat.base import ConversationalChatAgent
from langchain_core.callbacks.base import BaseCallbackManager
from langchain_integrations.chat_models.base import BaseChatModel
from langchain_integrations.memory import ConversationBufferMemory
from langchain_integrations.memory.chat_memory import BaseChatMemory
from langchain_integrations.utilities.powerbi import PowerBIDataset
def create_pbi_chat_agent(
llm: BaseChatModel,
toolkit: Optional[PowerBIToolkit] = None,
powerbi: Optional[PowerBIDataset] = None,
callback_manager: Optional[BaseCallbackManager] = None,
output_parser: Optional[AgentOutputParser] = None,
prefix: str = POWERBI_CHAT_PREFIX,
suffix: str = POWERBI_CHAT_SUFFIX,
examples: Optional[str] = None,
input_variables: Optional[List[str]] = None,
memory: Optional[BaseChatMemory] = None,
top_k: int = 10,
verbose: bool = False,
agent_executor_kwargs: Optional[Dict[str, Any]] = None,
**kwargs: Any,
) -> AgentExecutor:
"""Construct a Power BI agent from a Chat LLM and tools.
If you supply only a toolkit and no Power BI dataset, the same LLM is used for both.
"""
if toolkit is None:
if powerbi is None:
raise ValueError("Must provide either a toolkit or powerbi dataset")
toolkit = PowerBIToolkit(powerbi=powerbi, llm=llm, examples=examples)
tools = toolkit.get_tools()
tables = powerbi.table_names if powerbi else toolkit.powerbi.table_names
agent = ConversationalChatAgent.from_llm_and_tools(
llm=llm,
tools=tools,
system_message=prefix.format(top_k=top_k).format(tables=tables),
human_message=suffix,
input_variables=input_variables,
callback_manager=callback_manager,
output_parser=output_parser,
verbose=verbose,
**kwargs,
)
return AgentExecutor.from_agent_and_tools(
agent=agent,
tools=tools,
callback_manager=callback_manager,
memory=memory
or ConversationBufferMemory(memory_key="chat_history", return_messages=True),
verbose=verbose,
**(agent_executor_kwargs or {}),
)

View File

@@ -0,0 +1,38 @@
# flake8: noqa
"""Prompts for PowerBI agent."""
POWERBI_PREFIX = """You are an agent designed to help users interact with a PowerBI Dataset.
Agent has access to a tool that can write a query based on the question and then run those against PowerBI, Microsofts business intelligence tool. The questions from the users should be interpreted as related to the dataset that is available and not general questions about the world. If the question does not seem related to the dataset, return "This does not appear to be part of this dataset." as the answer.
Given an input question, ask to run the questions against the dataset, then look at the results and return the answer, the answer should be a complete sentence that answers the question, if multiple rows are asked find a way to write that in a easily readable format for a human, also make sure to represent numbers in readable ways, like 1M instead of 1000000. Unless the user specifies a specific number of examples they wish to obtain, always limit your query to at most {top_k} results.
"""
POWERBI_SUFFIX = """Begin!
Question: {input}
Thought: I can first ask which tables I have, then how each table is defined and then ask the query tool the question I need, and finally create a nice sentence that answers the question.
{agent_scratchpad}"""
POWERBI_CHAT_PREFIX = """Assistant is a large language model built to help users interact with a PowerBI Dataset.
Assistant should try to create a correct and complete answer to the question from the user. If the user asks a question not related to the dataset it should return "This does not appear to be part of this dataset." as the answer. The user might make a mistake with the spelling of certain values, if you think that is the case, ask the user to confirm the spelling of the value and then run the query again. Unless the user specifies a specific number of examples they wish to obtain, and the results are too large, limit your query to at most {top_k} results, but make it clear when answering which field was used for the filtering. The user has access to these tables: {{tables}}.
The answer should be a complete sentence that answers the question, if multiple rows are asked find a way to write that in a easily readable format for a human, also make sure to represent numbers in readable ways, like 1M instead of 1000000.
"""
POWERBI_CHAT_SUFFIX = """TOOLS
------
Assistant can ask the user to use tools to look up information that may be helpful in answering the users original question. The tools the human can use are:
{{tools}}
{format_instructions}
USER'S INPUT
--------------------
Here is the user's input (remember to respond with a markdown code snippet of a json blob with a single action, and NOTHING else):
{{{{input}}}}
"""

View File

@@ -0,0 +1,102 @@
"""Toolkit for interacting with a Power BI dataset."""
from typing import List, Optional, Union
from langchain_core.language_models import BaseLanguageModel
from langchain_core.prompts import PromptTemplate
from langchain_core.prompts.chat import (
ChatPromptTemplate,
HumanMessagePromptTemplate,
SystemMessagePromptTemplate,
)
from langchain_core.pydantic_v1 import Field
from langchain_integrations.agent_toolkits.base import BaseToolkit
from langchain_core.callbacks.base import BaseCallbackManager
from langchain_integrations.chains.llm import LLMChain
from langchain_integrations.chat_models.base import BaseChatModel
from langchain_integrations.tools import BaseTool
from langchain_integrations.tools.powerbi.prompt import (
QUESTION_TO_QUERY_BASE,
SINGLE_QUESTION_TO_QUERY,
USER_INPUT,
)
from langchain_integrations.tools.powerbi.tool import (
InfoPowerBITool,
ListPowerBITool,
QueryPowerBITool,
)
from langchain_integrations.utilities.powerbi import PowerBIDataset
class PowerBIToolkit(BaseToolkit):
"""Toolkit for interacting with Power BI dataset.
*Security Note*: This toolkit interacts with an external service.
Control access to who can use this toolkit.
Make sure that the capabilities given by this toolkit to the calling
code are appropriately scoped to the application.
See https://python.langchain.com/docs/security for more information.
"""
powerbi: PowerBIDataset = Field(exclude=True)
llm: Union[BaseLanguageModel, BaseChatModel] = Field(exclude=True)
examples: Optional[str] = None
max_iterations: int = 5
callback_manager: Optional[BaseCallbackManager] = None
output_token_limit: Optional[int] = None
tiktoken_model_name: Optional[str] = None
class Config:
"""Configuration for this pydantic object."""
arbitrary_types_allowed = True
def get_tools(self) -> List[BaseTool]:
"""Get the tools in the toolkit."""
return [
QueryPowerBITool(
llm_chain=self._get_chain(),
powerbi=self.powerbi,
examples=self.examples,
max_iterations=self.max_iterations,
output_token_limit=self.output_token_limit,
tiktoken_model_name=self.tiktoken_model_name,
),
InfoPowerBITool(powerbi=self.powerbi),
ListPowerBITool(powerbi=self.powerbi),
]
def _get_chain(self) -> LLMChain:
"""Construct the chain based on the callback manager and model type."""
if isinstance(self.llm, BaseLanguageModel):
return LLMChain(
llm=self.llm,
callback_manager=self.callback_manager
if self.callback_manager
else None,
prompt=PromptTemplate(
template=SINGLE_QUESTION_TO_QUERY,
input_variables=["tool_input", "tables", "schemas", "examples"],
),
)
system_prompt = SystemMessagePromptTemplate(
prompt=PromptTemplate(
template=QUESTION_TO_QUERY_BASE,
input_variables=["tables", "schemas", "examples"],
)
)
human_prompt = HumanMessagePromptTemplate(
prompt=PromptTemplate(
template=USER_INPUT,
input_variables=["tool_input"],
)
)
return LLMChain(
llm=self.llm,
callback_manager=self.callback_manager if self.callback_manager else None,
prompt=ChatPromptTemplate.from_messages([system_prompt, human_prompt]),
)

View File

@@ -0,0 +1,22 @@
from pathlib import Path
from typing import Any
from langchain_core._api.path import as_import_path
def __getattr__(name: str) -> Any:
"""Get attr name."""
here = as_import_path(Path(__file__).parent)
old_path = "langchain." + here + "." + name
new_path = "langchain_experimental." + here + "." + name
raise AttributeError(
"This agent has been moved to langchain experiment. "
"This agent relies on python REPL tool under the hood, so to use it "
"safely please sandbox the python REPL. "
"Read https://github.com/langchain-ai/langchain/blob/master/SECURITY.md "
"and https://github.com/langchain-ai/langchain/discussions/11680"
"To keep using this code as is, install langchain experimental and "
f"update your import statement from:\n `{old_path}` to `{new_path}`."
)

View File

@@ -0,0 +1,22 @@
from pathlib import Path
from typing import Any
from langchain_core._api.path import as_import_path
def __getattr__(name: str) -> Any:
"""Get attr name."""
here = as_import_path(Path(__file__).parent)
old_path = "langchain." + here + "." + name
new_path = "langchain_experimental." + here + "." + name
raise AttributeError(
"This agent has been moved to langchain experiment. "
"This agent relies on python REPL tool under the hood, so to use it "
"safely please sandbox the python REPL. "
"Read https://github.com/langchain-ai/langchain/blob/master/SECURITY.md "
"and https://github.com/langchain-ai/langchain/discussions/11680"
"To keep using this code as is, install langchain experimental and "
f"update your import statement from:\n `{old_path}` to `{new_path}`."
)

View File

@@ -0,0 +1 @@
"""Spark SQL agent."""

View File

@@ -0,0 +1,60 @@
"""Spark SQL agent."""
from typing import Any, Dict, List, Optional
from langchain_core.language_models import BaseLanguageModel
from langchain_integrations.agents.agent import AgentExecutor
from langchain_integrations.agent_toolkits.spark_sql.prompt import SQL_PREFIX, SQL_SUFFIX
from langchain_integrations.agent_toolkits.spark_sql.toolkit import SparkSQLToolkit
from langchain_integrations.agents.mrkl.base import ZeroShotAgent
from langchain_integrations.agents.mrkl.prompt import FORMAT_INSTRUCTIONS
from langchain_core.callbacks.base import BaseCallbackManager, Callbacks
from langchain_integrations.chains.llm import LLMChain
def create_spark_sql_agent(
llm: BaseLanguageModel,
toolkit: SparkSQLToolkit,
callback_manager: Optional[BaseCallbackManager] = None,
callbacks: Callbacks = None,
prefix: str = SQL_PREFIX,
suffix: str = SQL_SUFFIX,
format_instructions: str = FORMAT_INSTRUCTIONS,
input_variables: Optional[List[str]] = None,
top_k: int = 10,
max_iterations: Optional[int] = 15,
max_execution_time: Optional[float] = None,
early_stopping_method: str = "force",
verbose: bool = False,
agent_executor_kwargs: Optional[Dict[str, Any]] = None,
**kwargs: Any,
) -> AgentExecutor:
"""Construct a Spark SQL agent from an LLM and tools."""
tools = toolkit.get_tools()
prefix = prefix.format(top_k=top_k)
prompt = ZeroShotAgent.create_prompt(
tools,
prefix=prefix,
suffix=suffix,
format_instructions=format_instructions,
input_variables=input_variables,
)
llm_chain = LLMChain(
llm=llm,
prompt=prompt,
callback_manager=callback_manager,
callbacks=callbacks,
)
tool_names = [tool.name for tool in tools]
agent = ZeroShotAgent(llm_chain=llm_chain, allowed_tools=tool_names, **kwargs)
return AgentExecutor.from_agent_and_tools(
agent=agent,
tools=tools,
callback_manager=callback_manager,
callbacks=callbacks,
verbose=verbose,
max_iterations=max_iterations,
max_execution_time=max_execution_time,
early_stopping_method=early_stopping_method,
**(agent_executor_kwargs or {}),
)

View File

@@ -0,0 +1,21 @@
# flake8: noqa
SQL_PREFIX = """You are an agent designed to interact with Spark SQL.
Given an input question, create a syntactically correct Spark SQL query to run, then look at the results of the query and return the answer.
Unless the user specifies a specific number of examples they wish to obtain, always limit your query to at most {top_k} results.
You can order the results by a relevant column to return the most interesting examples in the database.
Never query for all the columns from a specific table, only ask for the relevant columns given the question.
You have access to tools for interacting with the database.
Only use the below tools. Only use the information returned by the below tools to construct your final answer.
You MUST double check your query before executing it. If you get an error while executing a query, rewrite the query and try again.
DO NOT make any DML statements (INSERT, UPDATE, DELETE, DROP etc.) to the database.
If the question does not seem related to the database, just return "I don't know" as the answer.
"""
SQL_SUFFIX = """Begin!
Question: {input}
Thought: I should look at the tables in the database to see what I can query.
{agent_scratchpad}"""

View File

@@ -0,0 +1,36 @@
"""Toolkit for interacting with Spark SQL."""
from typing import List
from langchain_core.language_models import BaseLanguageModel
from langchain_core.pydantic_v1 import Field
from langchain_integrations.agent_toolkits.base import BaseToolkit
from langchain_integrations.tools import BaseTool
from langchain_integrations.tools.spark_sql.tool import (
InfoSparkSQLTool,
ListSparkSQLTool,
QueryCheckerTool,
QuerySparkSQLTool,
)
from langchain_integrations.utilities.spark_sql import SparkSQL
class SparkSQLToolkit(BaseToolkit):
"""Toolkit for interacting with Spark SQL."""
db: SparkSQL = Field(exclude=True)
llm: BaseLanguageModel = Field(exclude=True)
class Config:
"""Configuration for this pydantic object."""
arbitrary_types_allowed = True
def get_tools(self) -> List[BaseTool]:
"""Get the tools in the toolkit."""
return [
QuerySparkSQLTool(db=self.db),
InfoSparkSQLTool(db=self.db),
ListSparkSQLTool(db=self.db),
QueryCheckerTool(db=self.db, llm=self.llm),
]

View File

@@ -0,0 +1 @@
"""SQL agent."""

View File

@@ -0,0 +1,96 @@
"""SQL agent."""
from typing import Any, Dict, List, Optional, Sequence
from langchain_core.language_models import BaseLanguageModel
from langchain_core.messages import AIMessage, SystemMessage
from langchain_core.prompts.chat import (
ChatPromptTemplate,
HumanMessagePromptTemplate,
MessagesPlaceholder,
)
from langchain_integrations.agents.agent import AgentExecutor, BaseSingleActionAgent
from langchain_integrations.agent_toolkits.sql.prompt import (
SQL_FUNCTIONS_SUFFIX,
SQL_PREFIX,
SQL_SUFFIX,
)
from langchain_integrations.agent_toolkits.sql.toolkit import SQLDatabaseToolkit
from langchain_integrations.agents.agent_types import AgentType
from langchain_integrations.agents.mrkl.base import ZeroShotAgent
from langchain_integrations.agents.mrkl.prompt import FORMAT_INSTRUCTIONS
from langchain_integrations.agents.openai_functions_agent.base import OpenAIFunctionsAgent
from langchain_core.callbacks.base import BaseCallbackManager
from langchain_integrations.chains.llm import LLMChain
from langchain_integrations.tools import BaseTool
def create_sql_agent(
llm: BaseLanguageModel,
toolkit: SQLDatabaseToolkit,
agent_type: AgentType = AgentType.ZERO_SHOT_REACT_DESCRIPTION,
callback_manager: Optional[BaseCallbackManager] = None,
prefix: str = SQL_PREFIX,
suffix: Optional[str] = None,
format_instructions: str = FORMAT_INSTRUCTIONS,
input_variables: Optional[List[str]] = None,
top_k: int = 10,
max_iterations: Optional[int] = 15,
max_execution_time: Optional[float] = None,
early_stopping_method: str = "force",
verbose: bool = False,
agent_executor_kwargs: Optional[Dict[str, Any]] = None,
extra_tools: Sequence[BaseTool] = (),
**kwargs: Any,
) -> AgentExecutor:
"""Construct an SQL agent from an LLM and tools."""
tools = toolkit.get_tools() + list(extra_tools)
prefix = prefix.format(dialect=toolkit.dialect, top_k=top_k)
agent: BaseSingleActionAgent
if agent_type == AgentType.ZERO_SHOT_REACT_DESCRIPTION:
prompt = ZeroShotAgent.create_prompt(
tools,
prefix=prefix,
suffix=suffix or SQL_SUFFIX,
format_instructions=format_instructions,
input_variables=input_variables,
)
llm_chain = LLMChain(
llm=llm,
prompt=prompt,
callback_manager=callback_manager,
)
tool_names = [tool.name for tool in tools]
agent = ZeroShotAgent(llm_chain=llm_chain, allowed_tools=tool_names, **kwargs)
elif agent_type == AgentType.OPENAI_FUNCTIONS:
messages = [
SystemMessage(content=prefix),
HumanMessagePromptTemplate.from_template("{input}"),
AIMessage(content=suffix or SQL_FUNCTIONS_SUFFIX),
MessagesPlaceholder(variable_name="agent_scratchpad"),
]
input_variables = ["input", "agent_scratchpad"]
_prompt = ChatPromptTemplate(input_variables=input_variables, messages=messages)
agent = OpenAIFunctionsAgent(
llm=llm,
prompt=_prompt,
tools=tools,
callback_manager=callback_manager,
**kwargs,
)
else:
raise ValueError(f"Agent type {agent_type} not supported at the moment.")
return AgentExecutor.from_agent_and_tools(
agent=agent,
tools=tools,
callback_manager=callback_manager,
verbose=verbose,
max_iterations=max_iterations,
max_execution_time=max_execution_time,
early_stopping_method=early_stopping_method,
**(agent_executor_kwargs or {}),
)

View File

@@ -0,0 +1,23 @@
# flake8: noqa
SQL_PREFIX = """You are an agent designed to interact with a SQL database.
Given an input question, create a syntactically correct {dialect} query to run, then look at the results of the query and return the answer.
Unless the user specifies a specific number of examples they wish to obtain, always limit your query to at most {top_k} results.
You can order the results by a relevant column to return the most interesting examples in the database.
Never query for all the columns from a specific table, only ask for the relevant columns given the question.
You have access to tools for interacting with the database.
Only use the below tools. Only use the information returned by the below tools to construct your final answer.
You MUST double check your query before executing it. If you get an error while executing a query, rewrite the query and try again.
DO NOT make any DML statements (INSERT, UPDATE, DELETE, DROP etc.) to the database.
If the question does not seem related to the database, just return "I don't know" as the answer.
"""
SQL_SUFFIX = """Begin!
Question: {input}
Thought: I should look at the tables in the database to see what I can query. Then I should query the schema of the most relevant tables.
{agent_scratchpad}"""
SQL_FUNCTIONS_SUFFIX = """I should look at the tables in the database to see what I can query. Then I should query the schema of the most relevant tables."""

View File

@@ -0,0 +1,71 @@
"""Toolkit for interacting with an SQL database."""
from typing import List
from langchain_core.language_models import BaseLanguageModel
from langchain_core.pydantic_v1 import Field
from langchain_integrations.agent_toolkits.base import BaseToolkit
from langchain_integrations.tools import BaseTool
from langchain_integrations.tools.sql_database.tool import (
InfoSQLDatabaseTool,
ListSQLDatabaseTool,
QuerySQLCheckerTool,
QuerySQLDataBaseTool,
)
from langchain_integrations.utilities.sql_database import SQLDatabase
class SQLDatabaseToolkit(BaseToolkit):
"""Toolkit for interacting with SQL databases."""
db: SQLDatabase = Field(exclude=True)
llm: BaseLanguageModel = Field(exclude=True)
@property
def dialect(self) -> str:
"""Return string representation of SQL dialect to use."""
return self.db.dialect
class Config:
"""Configuration for this pydantic object."""
arbitrary_types_allowed = True
def get_tools(self) -> List[BaseTool]:
"""Get the tools in the toolkit."""
list_sql_database_tool = ListSQLDatabaseTool(db=self.db)
info_sql_database_tool_description = (
"Input to this tool is a comma-separated list of tables, output is the "
"schema and sample rows for those tables. "
"Be sure that the tables actually exist by calling "
f"{list_sql_database_tool.name} first! "
"Example Input: table1, table2, table3"
)
info_sql_database_tool = InfoSQLDatabaseTool(
db=self.db, description=info_sql_database_tool_description
)
query_sql_database_tool_description = (
"Input to this tool is a detailed and correct SQL query, output is a "
"result from the database. If the query is not correct, an error message "
"will be returned. If an error is returned, rewrite the query, check the "
"query, and try again. If you encounter an issue with Unknown column "
f"'xxxx' in 'field list', use {info_sql_database_tool.name} "
"to query the correct table fields."
)
query_sql_database_tool = QuerySQLDataBaseTool(
db=self.db, description=query_sql_database_tool_description
)
query_sql_checker_tool_description = (
"Use this tool to double check if your query is correct before executing "
"it. Always use this tool before executing a query with "
f"{query_sql_database_tool.name}!"
)
query_sql_checker_tool = QuerySQLCheckerTool(
db=self.db, llm=self.llm, description=query_sql_checker_tool_description
)
return [
query_sql_database_tool,
info_sql_database_tool,
list_sql_database_tool,
query_sql_checker_tool,
]

View File

@@ -0,0 +1 @@
"""Agent toolkit for interacting with vector stores."""

View File

@@ -0,0 +1,96 @@
"""VectorStore agent."""
from typing import Any, Dict, Optional
from langchain_core.language_models import BaseLanguageModel
from langchain_integrations.agents.agent import AgentExecutor
from langchain_integrations.agent_toolkits.vectorstore.prompt import PREFIX, ROUTER_PREFIX
from langchain_integrations.agent_toolkits.vectorstore.toolkit import (
VectorStoreRouterToolkit,
VectorStoreToolkit,
)
from langchain_integrations.agents.mrkl.base import ZeroShotAgent
from langchain_core.callbacks.base import BaseCallbackManager
from langchain_integrations.chains.llm import LLMChain
def create_vectorstore_agent(
llm: BaseLanguageModel,
toolkit: VectorStoreToolkit,
callback_manager: Optional[BaseCallbackManager] = None,
prefix: str = PREFIX,
verbose: bool = False,
agent_executor_kwargs: Optional[Dict[str, Any]] = None,
**kwargs: Any,
) -> AgentExecutor:
"""Construct a VectorStore agent from an LLM and tools.
Args:
llm (BaseLanguageModel): LLM that will be used by the agent
toolkit (VectorStoreToolkit): Set of tools for the agent
callback_manager (Optional[BaseCallbackManager], optional): Object to handle the callback [ Defaults to None. ]
prefix (str, optional): The prefix prompt for the agent. If not provided uses default PREFIX.
verbose (bool, optional): If you want to see the content of the scratchpad. [ Defaults to False ]
agent_executor_kwargs (Optional[Dict[str, Any]], optional): If there is any other parameter you want to send to the agent. [ Defaults to None ]
**kwargs: Additional named parameters to pass to the ZeroShotAgent.
Returns:
AgentExecutor: Returns a callable AgentExecutor object. Either you can call it or use run method with the query to get the response
""" # noqa: E501
tools = toolkit.get_tools()
prompt = ZeroShotAgent.create_prompt(tools, prefix=prefix)
llm_chain = LLMChain(
llm=llm,
prompt=prompt,
callback_manager=callback_manager,
)
tool_names = [tool.name for tool in tools]
agent = ZeroShotAgent(llm_chain=llm_chain, allowed_tools=tool_names, **kwargs)
return AgentExecutor.from_agent_and_tools(
agent=agent,
tools=tools,
callback_manager=callback_manager,
verbose=verbose,
**(agent_executor_kwargs or {}),
)
def create_vectorstore_router_agent(
llm: BaseLanguageModel,
toolkit: VectorStoreRouterToolkit,
callback_manager: Optional[BaseCallbackManager] = None,
prefix: str = ROUTER_PREFIX,
verbose: bool = False,
agent_executor_kwargs: Optional[Dict[str, Any]] = None,
**kwargs: Any,
) -> AgentExecutor:
"""Construct a VectorStore router agent from an LLM and tools.
Args:
llm (BaseLanguageModel): LLM that will be used by the agent
toolkit (VectorStoreRouterToolkit): Set of tools for the agent which have routing capability with multiple vector stores
callback_manager (Optional[BaseCallbackManager], optional): Object to handle the callback [ Defaults to None. ]
prefix (str, optional): The prefix prompt for the router agent. If not provided uses default ROUTER_PREFIX.
verbose (bool, optional): If you want to see the content of the scratchpad. [ Defaults to False ]
agent_executor_kwargs (Optional[Dict[str, Any]], optional): If there is any other parameter you want to send to the agent. [ Defaults to None ]
**kwargs: Additional named parameters to pass to the ZeroShotAgent.
Returns:
AgentExecutor: Returns a callable AgentExecutor object. Either you can call it or use run method with the query to get the response.
""" # noqa: E501
tools = toolkit.get_tools()
prompt = ZeroShotAgent.create_prompt(tools, prefix=prefix)
llm_chain = LLMChain(
llm=llm,
prompt=prompt,
callback_manager=callback_manager,
)
tool_names = [tool.name for tool in tools]
agent = ZeroShotAgent(llm_chain=llm_chain, allowed_tools=tool_names, **kwargs)
return AgentExecutor.from_agent_and_tools(
agent=agent,
tools=tools,
callback_manager=callback_manager,
verbose=verbose,
**(agent_executor_kwargs or {}),
)

View File

@@ -0,0 +1,13 @@
# flake8: noqa
PREFIX = """You are an agent designed to answer questions about sets of documents.
You have access to tools for interacting with the documents, and the inputs to the tools are questions.
Sometimes, you will be asked to provide sources for your questions, in which case you should use the appropriate tool to do so.
If the question does not seem relevant to any of the tools provided, just return "I don't know" as the answer.
"""
ROUTER_PREFIX = """You are an agent designed to answer questions.
You have access to tools for interacting with different sources, and the inputs to the tools are questions.
Your main task is to decide which of the tools is relevant for answering question at hand.
For complex questions, you can break the question down into sub questions and use tools to answers the sub questions.
"""

View File

@@ -0,0 +1,89 @@
"""Toolkit for interacting with a vector store."""
from typing import List
from langchain_core.language_models import BaseLanguageModel
from langchain_core.pydantic_v1 import BaseModel, Field
from langchain_core.vectorstores import VectorStore
from langchain_integrations.agent_toolkits.base import BaseToolkit
from langchain_openai.llm import OpenAI
from langchain_integrations.tools import BaseTool
from langchain_integrations.tools.vectorstore.tool import (
VectorStoreQATool,
VectorStoreQAWithSourcesTool,
)
class VectorStoreInfo(BaseModel):
"""Information about a VectorStore."""
vectorstore: VectorStore = Field(exclude=True)
name: str
description: str
class Config:
"""Configuration for this pydantic object."""
arbitrary_types_allowed = True
class VectorStoreToolkit(BaseToolkit):
"""Toolkit for interacting with a Vector Store."""
vectorstore_info: VectorStoreInfo = Field(exclude=True)
llm: BaseLanguageModel = Field(default_factory=lambda: OpenAI(temperature=0))
class Config:
"""Configuration for this pydantic object."""
arbitrary_types_allowed = True
def get_tools(self) -> List[BaseTool]:
"""Get the tools in the toolkit."""
description = VectorStoreQATool.get_description(
self.vectorstore_info.name, self.vectorstore_info.description
)
qa_tool = VectorStoreQATool(
name=self.vectorstore_info.name,
description=description,
vectorstore=self.vectorstore_info.vectorstore,
llm=self.llm,
)
description = VectorStoreQAWithSourcesTool.get_description(
self.vectorstore_info.name, self.vectorstore_info.description
)
qa_with_sources_tool = VectorStoreQAWithSourcesTool(
name=f"{self.vectorstore_info.name}_with_sources",
description=description,
vectorstore=self.vectorstore_info.vectorstore,
llm=self.llm,
)
return [qa_tool, qa_with_sources_tool]
class VectorStoreRouterToolkit(BaseToolkit):
"""Toolkit for routing between Vector Stores."""
vectorstores: List[VectorStoreInfo] = Field(exclude=True)
llm: BaseLanguageModel = Field(default_factory=lambda: OpenAI(temperature=0))
class Config:
"""Configuration for this pydantic object."""
arbitrary_types_allowed = True
def get_tools(self) -> List[BaseTool]:
"""Get the tools in the toolkit."""
tools: List[BaseTool] = []
for vectorstore_info in self.vectorstores:
description = VectorStoreQATool.get_description(
vectorstore_info.name, vectorstore_info.description
)
qa_tool = VectorStoreQATool(
name=vectorstore_info.name,
description=description,
vectorstore=vectorstore_info.vectorstore,
llm=self.llm,
)
tools.append(qa_tool)
return tools

View File

@@ -0,0 +1,22 @@
from pathlib import Path
from typing import Any
from langchain_core._api.path import as_import_path
def __getattr__(name: str) -> Any:
"""Get attr name."""
here = as_import_path(Path(__file__).parent)
old_path = "langchain." + here + "." + name
new_path = "langchain_experimental." + here + "." + name
raise AttributeError(
"This agent has been moved to langchain experiment. "
"This agent relies on python REPL tool under the hood, so to use it "
"safely please sandbox the python REPL. "
"Read https://github.com/langchain-ai/langchain/blob/master/SECURITY.md "
"and https://github.com/langchain-ai/langchain/discussions/11680"
"To keep using this code as is, install langchain experimental and "
f"update your import statement from:\n `{old_path}` to `{new_path}`."
)

View File

@@ -0,0 +1 @@
"""Zapier Toolkit."""

View File

@@ -0,0 +1,60 @@
"""[DEPRECATED] Zapier Toolkit."""
from typing import List
from langchain_core._api import warn_deprecated
from langchain_integrations.agent_toolkits.base import BaseToolkit
from langchain_integrations.tools import BaseTool
from langchain_integrations.tools.zapier.tool import ZapierNLARunAction
from langchain_integrations.utilities.zapier import ZapierNLAWrapper
class ZapierToolkit(BaseToolkit):
"""Zapier Toolkit."""
tools: List[BaseTool] = []
@classmethod
def from_zapier_nla_wrapper(
cls, zapier_nla_wrapper: ZapierNLAWrapper
) -> "ZapierToolkit":
"""Create a toolkit from a ZapierNLAWrapper."""
actions = zapier_nla_wrapper.list()
tools = [
ZapierNLARunAction(
action_id=action["id"],
zapier_description=action["description"],
params_schema=action["params"],
api_wrapper=zapier_nla_wrapper,
)
for action in actions
]
return cls(tools=tools)
@classmethod
async def async_from_zapier_nla_wrapper(
cls, zapier_nla_wrapper: ZapierNLAWrapper
) -> "ZapierToolkit":
"""Create a toolkit from a ZapierNLAWrapper."""
actions = await zapier_nla_wrapper.alist()
tools = [
ZapierNLARunAction(
action_id=action["id"],
zapier_description=action["description"],
params_schema=action["params"],
api_wrapper=zapier_nla_wrapper,
)
for action in actions
]
return cls(tools=tools)
def get_tools(self) -> List[BaseTool]:
"""Get the tools in the toolkit."""
warn_deprecated(
since="0.0.319",
message=(
"This tool will be deprecated on 2023-11-17. See "
"https://nla.zapier.com/sunset/ for details"
),
)
return self.tools

View File

@@ -0,0 +1,19 @@
"""**Chat Loaders** load chat messages from common communications platforms.
Load chat messages from various
communications platforms such as Facebook Messenger, Telegram, and
WhatsApp. The loaded chat messages can be used for fine-tuning models.
**Class hierarchy:**
.. code-block::
BaseChatLoader --> <name>ChatLoader # Examples: WhatsAppChatLoader, IMessageChatLoader
**Main helpers:**
.. code-block::
ChatSession
""" # noqa: E501

Some files were not shown because too many files have changed in this diff Show More