mirror of
https://github.com/hwchase17/langchain.git
synced 2026-02-11 11:40:19 +00:00
Compare commits
1 Commits
eugene/tra
...
eugene/roo
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
f2a1c2726c |
38
.github/actions/people/app/main.py
vendored
38
.github/actions/people/app/main.py
vendored
@@ -350,7 +350,11 @@ def get_graphql_pr_edges(*, settings: Settings, after: Union[str, None] = None):
|
||||
print("Querying PRs...")
|
||||
else:
|
||||
print(f"Querying PRs with cursor {after}...")
|
||||
data = get_graphql_response(settings=settings, query=prs_query, after=after)
|
||||
data = get_graphql_response(
|
||||
settings=settings,
|
||||
query=prs_query,
|
||||
after=after
|
||||
)
|
||||
graphql_response = PRsResponse.model_validate(data)
|
||||
return graphql_response.data.repository.pullRequests.edges
|
||||
|
||||
@@ -480,16 +484,10 @@ def get_contributors(settings: Settings):
|
||||
lines_changed = pr.additions + pr.deletions
|
||||
score = _logistic(files_changed, 20) + _logistic(lines_changed, 100)
|
||||
contributor_scores[pr.author.login] += score
|
||||
three_months_ago = datetime.now(timezone.utc) - timedelta(days=3 * 30)
|
||||
three_months_ago = (datetime.now(timezone.utc) - timedelta(days=3*30))
|
||||
if pr.createdAt > three_months_ago:
|
||||
recent_contributor_scores[pr.author.login] += score
|
||||
return (
|
||||
contributors,
|
||||
contributor_scores,
|
||||
recent_contributor_scores,
|
||||
reviewers,
|
||||
authors,
|
||||
)
|
||||
return contributors, contributor_scores, recent_contributor_scores, reviewers, authors
|
||||
|
||||
|
||||
def get_top_users(
|
||||
@@ -526,13 +524,9 @@ if __name__ == "__main__":
|
||||
# question_commentors, question_last_month_commentors, question_authors = get_experts(
|
||||
# settings=settings
|
||||
# )
|
||||
(
|
||||
contributors,
|
||||
contributor_scores,
|
||||
recent_contributor_scores,
|
||||
reviewers,
|
||||
pr_authors,
|
||||
) = get_contributors(settings=settings)
|
||||
contributors, contributor_scores, recent_contributor_scores, reviewers, pr_authors = get_contributors(
|
||||
settings=settings
|
||||
)
|
||||
# authors = {**question_authors, **pr_authors}
|
||||
authors = {**pr_authors}
|
||||
maintainers_logins = {
|
||||
@@ -565,7 +559,7 @@ if __name__ == "__main__":
|
||||
maintainers.append(
|
||||
{
|
||||
"login": login,
|
||||
"count": contributors[login], # + question_commentors[login],
|
||||
"count": contributors[login], #+ question_commentors[login],
|
||||
"avatarUrl": user.avatarUrl,
|
||||
"twitterUsername": user.twitterUsername,
|
||||
"url": user.url,
|
||||
@@ -621,7 +615,9 @@ if __name__ == "__main__":
|
||||
new_people_content = yaml.dump(
|
||||
people, sort_keys=False, width=200, allow_unicode=True
|
||||
)
|
||||
if people_old_content == new_people_content:
|
||||
if (
|
||||
people_old_content == new_people_content
|
||||
):
|
||||
logging.info("The LangChain People data hasn't changed, finishing.")
|
||||
sys.exit(0)
|
||||
people_path.write_text(new_people_content, encoding="utf-8")
|
||||
@@ -634,7 +630,9 @@ if __name__ == "__main__":
|
||||
logging.info(f"Creating a new branch {branch_name}")
|
||||
subprocess.run(["git", "checkout", "-B", branch_name], check=True)
|
||||
logging.info("Adding updated file")
|
||||
subprocess.run(["git", "add", str(people_path)], check=True)
|
||||
subprocess.run(
|
||||
["git", "add", str(people_path)], check=True
|
||||
)
|
||||
logging.info("Committing updated file")
|
||||
message = "👥 Update LangChain people data"
|
||||
result = subprocess.run(["git", "commit", "-m", message], check=True)
|
||||
@@ -643,4 +641,4 @@ if __name__ == "__main__":
|
||||
logging.info("Creating PR")
|
||||
pr = repo.create_pull(title=message, body=message, base="master", head=branch_name)
|
||||
logging.info(f"Created PR: {pr.number}")
|
||||
logging.info("Finished")
|
||||
logging.info("Finished")
|
||||
27
.github/scripts/check_diff.py
vendored
27
.github/scripts/check_diff.py
vendored
@@ -1,12 +1,11 @@
|
||||
import glob
|
||||
import json
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
import tomllib
|
||||
from collections import defaultdict
|
||||
import os
|
||||
from typing import Dict, List, Set
|
||||
|
||||
import tomllib
|
||||
from collections import defaultdict
|
||||
import glob
|
||||
|
||||
LANGCHAIN_DIRS = [
|
||||
"libs/core",
|
||||
@@ -16,13 +15,8 @@ LANGCHAIN_DIRS = [
|
||||
"libs/experimental",
|
||||
]
|
||||
|
||||
|
||||
def all_package_dirs() -> Set[str]:
|
||||
return {
|
||||
"/".join(path.split("/")[:-1]).lstrip("./")
|
||||
for path in glob.glob("./libs/**/pyproject.toml", recursive=True)
|
||||
if "libs/cli" not in path and "libs/standard-tests" not in path
|
||||
}
|
||||
return {"/".join(path.split("/")[:-1]) for path in glob.glob("./libs/**/pyproject.toml", recursive=True)}
|
||||
|
||||
|
||||
def dependents_graph() -> dict:
|
||||
@@ -32,9 +26,9 @@ def dependents_graph() -> dict:
|
||||
if "template" in path:
|
||||
continue
|
||||
with open(path, "rb") as f:
|
||||
pyproject = tomllib.load(f)["tool"]["poetry"]
|
||||
pyproject = tomllib.load(f)['tool']['poetry']
|
||||
pkg_dir = "libs" + "/".join(path.split("libs")[1].split("/")[:-1])
|
||||
for dep in pyproject["dependencies"]:
|
||||
for dep in pyproject['dependencies']:
|
||||
if "langchain" in dep:
|
||||
dependents[dep].add(pkg_dir)
|
||||
return dependents
|
||||
@@ -128,12 +122,9 @@ if __name__ == "__main__":
|
||||
|
||||
outputs = {
|
||||
"dirs-to-lint": add_dependents(
|
||||
dirs_to_run["lint"] | dirs_to_run["test"] | dirs_to_run["extended-test"],
|
||||
dependents,
|
||||
),
|
||||
"dirs-to-test": add_dependents(
|
||||
dirs_to_run["test"] | dirs_to_run["extended-test"], dependents
|
||||
dirs_to_run["lint"] | dirs_to_run["test"] | dirs_to_run["extended-test"], dependents
|
||||
),
|
||||
"dirs-to-test": add_dependents(dirs_to_run["test"] | dirs_to_run["extended-test"], dependents),
|
||||
"dirs-to-extended-test": list(dirs_to_run["extended-test"]),
|
||||
"docs-edited": "true" if docs_edited else "",
|
||||
}
|
||||
|
||||
4
.github/scripts/get_min_versions.py
vendored
4
.github/scripts/get_min_versions.py
vendored
@@ -74,4 +74,6 @@ if __name__ == "__main__":
|
||||
# Call the function to get the minimum versions
|
||||
min_versions = get_min_version_from_toml(toml_file)
|
||||
|
||||
print(" ".join([f"{lib}=={version}" for lib, version in min_versions.items()]))
|
||||
print(
|
||||
" ".join([f"{lib}=={version}" for lib, version in min_versions.items()])
|
||||
)
|
||||
|
||||
5
.github/workflows/check_new_docs.yml
vendored
5
.github/workflows/check_new_docs.yml
vendored
@@ -26,11 +26,6 @@ jobs:
|
||||
python-version: '3.10'
|
||||
- id: files
|
||||
uses: Ana06/get-changed-files@v2.2.0
|
||||
with:
|
||||
filter: |
|
||||
*.ipynb
|
||||
*.md
|
||||
*.mdx
|
||||
- name: Check new docs
|
||||
run: |
|
||||
python docs/scripts/check_templates.py ${{ steps.files.outputs.added }}
|
||||
|
||||
10
.github/workflows/scheduled_test.yml
vendored
10
.github/workflows/scheduled_test.yml
vendored
@@ -27,6 +27,7 @@ jobs:
|
||||
- "libs/partners/groq"
|
||||
- "libs/partners/mistralai"
|
||||
- "libs/partners/together"
|
||||
- "libs/partners/cohere"
|
||||
- "libs/partners/google-vertexai"
|
||||
- "libs/partners/google-genai"
|
||||
- "libs/partners/aws"
|
||||
@@ -39,6 +40,10 @@ jobs:
|
||||
with:
|
||||
repository: langchain-ai/langchain-google
|
||||
path: langchain-google
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
repository: langchain-ai/langchain-cohere
|
||||
path: langchain-cohere
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
repository: langchain-ai/langchain-aws
|
||||
@@ -48,9 +53,11 @@ jobs:
|
||||
run: |
|
||||
rm -rf \
|
||||
langchain/libs/partners/google-genai \
|
||||
langchain/libs/partners/google-vertexai
|
||||
langchain/libs/partners/google-vertexai \
|
||||
langchain/libs/partners/cohere
|
||||
mv langchain-google/libs/genai langchain/libs/partners/google-genai
|
||||
mv langchain-google/libs/vertexai langchain/libs/partners/google-vertexai
|
||||
mv langchain-cohere/libs/cohere langchain/libs/partners/cohere
|
||||
mv langchain-aws/libs/aws langchain/libs/partners/aws
|
||||
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
@@ -109,6 +116,7 @@ jobs:
|
||||
rm -rf \
|
||||
langchain/libs/partners/google-genai \
|
||||
langchain/libs/partners/google-vertexai \
|
||||
langchain/libs/partners/cohere \
|
||||
langchain/libs/partners/aws
|
||||
|
||||
- name: Ensure the tests did not create any additional files
|
||||
|
||||
@@ -776,54 +776,14 @@ a few ways to get structured output from models in LangChain.
|
||||
|
||||
#### `.with_structured_output()`
|
||||
|
||||
For convenience, some LangChain chat models support a [`.with_structured_output()`](/docs/how_to/structured_output/#the-with_structured_output-method)
|
||||
method. This method only requires a schema as input, and returns a dict or Pydantic object.
|
||||
For convenience, some LangChain chat models support a `.with_structured_output()` method.
|
||||
This method only requires a schema as input, and returns a dict or Pydantic object.
|
||||
Generally, this method is only present on models that support one of the more advanced methods described below,
|
||||
and will use one of them under the hood. It takes care of importing a suitable output parser and
|
||||
formatting the schema in the right format for the model.
|
||||
|
||||
Here's an example:
|
||||
|
||||
```python
|
||||
from typing import Optional
|
||||
|
||||
from langchain_core.pydantic_v1 import BaseModel, Field
|
||||
|
||||
|
||||
class Joke(BaseModel):
|
||||
"""Joke to tell user."""
|
||||
|
||||
setup: str = Field(description="The setup of the joke")
|
||||
punchline: str = Field(description="The punchline to the joke")
|
||||
rating: Optional[int] = Field(description="How funny the joke is, from 1 to 10")
|
||||
|
||||
structured_llm = llm.with_structured_output(Joke)
|
||||
|
||||
structured_llm.invoke("Tell me a joke about cats")
|
||||
```
|
||||
|
||||
```
|
||||
Joke(setup='Why was the cat sitting on the computer?', punchline='To keep an eye on the mouse!', rating=None)
|
||||
|
||||
```
|
||||
|
||||
We recommend this method as a starting point when working with structured output:
|
||||
|
||||
- It uses other model-specific features under the hood, without the need to import an output parser.
|
||||
- For the models that use tool calling, no special prompting is needed.
|
||||
- If multiple underlying techniques are supported, you can supply a `method` parameter to
|
||||
[toggle which one is used](/docs/how_to/structured_output/#advanced-specifying-the-method-for-structuring-outputs).
|
||||
|
||||
You may want or need to use other techiniques if:
|
||||
|
||||
- The chat model you are using does not support tool calling.
|
||||
- You are working with very complex schemas and the model is having trouble generating outputs that conform.
|
||||
|
||||
For more information, check out this [how-to guide](/docs/how_to/structured_output/#the-with_structured_output-method).
|
||||
|
||||
You can also check out [this table](/docs/integrations/chat/#advanced-features) for a list of models that support
|
||||
`with_structured_output()`.
|
||||
|
||||
#### Raw prompting
|
||||
|
||||
The most intuitive way to get a model to structure output is to ask nicely.
|
||||
@@ -846,8 +806,9 @@ for smooth parsing can be surprisingly difficult and model-specific.
|
||||
Some may be better at interpreting [JSON schema](https://json-schema.org/), others may be best with TypeScript definitions,
|
||||
and still others may prefer XML.
|
||||
|
||||
While features offered by model providers may increase reliability, prompting techniques remain important for tuning your
|
||||
results no matter which method you choose.
|
||||
While we'll next go over some ways that you can take advantage of features offered by
|
||||
model providers to increase reliability, prompting techniques remain important for tuning your
|
||||
results no matter what method you choose.
|
||||
|
||||
#### JSON mode
|
||||
<span data-heading-keywords="json mode"></span>
|
||||
@@ -857,11 +818,10 @@ Some models, such as [Mistral](/docs/integrations/chat/mistralai/), [OpenAI](/do
|
||||
support a feature called **JSON mode**, usually enabled via config.
|
||||
|
||||
When enabled, JSON mode will constrain the model's output to always be some sort of valid JSON.
|
||||
Often they require some custom prompting, but it's usually much less burdensome than completely raw prompting and
|
||||
more along the lines of, `"you must always return JSON"`. The [output also generally easier to parse](/docs/how_to/output_parser_json/).
|
||||
Often they require some custom prompting, but it's usually much less burdensome and along the lines of,
|
||||
`"you must always return JSON"`, and the [output is easier to parse](/docs/how_to/output_parser_json/).
|
||||
|
||||
It's also generally simpler to use directly and more commonly available than tool calling, and can give
|
||||
more flexibility around prompting and shaping results than tool calling.
|
||||
It's also generally simpler and more commonly available than tool calling.
|
||||
|
||||
Here's an example:
|
||||
|
||||
|
||||
@@ -23,12 +23,12 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install unstructured"
|
||||
"%pip install \"unstructured[html]\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"execution_count": 1,
|
||||
"id": "7d167ca3-c7c7-4ef0-b509-080629f0f482",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
@@ -36,14 +36,14 @@
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"[Document(page_content='My First Heading\\n\\nMy first paragraph.', metadata={'source': '../../docs/integrations/document_loaders/example_data/fake-content.html'})]\n"
|
||||
"[Document(page_content='My First Heading\\n\\nMy first paragraph.', metadata={'source': '../../../docs/integrations/document_loaders/example_data/fake-content.html'})]\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_community.document_loaders import UnstructuredHTMLLoader\n",
|
||||
"\n",
|
||||
"file_path = \"../../docs/integrations/document_loaders/example_data/fake-content.html\"\n",
|
||||
"file_path = \"../../../docs/integrations/document_loaders/example_data/fake-content.html\"\n",
|
||||
"\n",
|
||||
"loader = UnstructuredHTMLLoader(file_path)\n",
|
||||
"data = loader.load()\n",
|
||||
@@ -73,7 +73,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"execution_count": 2,
|
||||
"id": "0a2050a8-6df6-4696-9889-ba367d6f9caa",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
@@ -81,7 +81,7 @@
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"[Document(page_content='\\nTest Title\\n\\n\\nMy First Heading\\nMy first paragraph.\\n\\n\\n', metadata={'source': '../../docs/integrations/document_loaders/example_data/fake-content.html', 'title': 'Test Title'})]\n"
|
||||
"[Document(page_content='\\nTest Title\\n\\n\\nMy First Heading\\nMy first paragraph.\\n\\n\\n', metadata={'source': '../../../docs/integrations/document_loaders/example_data/fake-content.html', 'title': 'Test Title'})]\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
@@ -111,7 +111,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.5"
|
||||
"version": "3.10.4"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -21,12 +21,12 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"execution_count": 19,
|
||||
"id": "c8b147fb-6877-4f7a-b2ee-ee971c7bc662",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install \"unstructured[md]\""
|
||||
"# !pip install \"unstructured[md]\""
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -39,7 +39,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"execution_count": 1,
|
||||
"id": "80c50cc4-7ce9-4418-81b9-29c52c7b3627",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
@@ -62,7 +62,7 @@
|
||||
"from langchain_community.document_loaders import UnstructuredMarkdownLoader\n",
|
||||
"from langchain_core.documents import Document\n",
|
||||
"\n",
|
||||
"markdown_path = \"../../../README.md\"\n",
|
||||
"markdown_path = \"../../../../README.md\"\n",
|
||||
"loader = UnstructuredMarkdownLoader(markdown_path)\n",
|
||||
"\n",
|
||||
"data = loader.load()\n",
|
||||
@@ -84,7 +84,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"execution_count": 2,
|
||||
"id": "a986bbce-7fd3-41d1-bc47-49f9f57c7cd1",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
@@ -92,11 +92,11 @@
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Number of documents: 66\n",
|
||||
"Number of documents: 65\n",
|
||||
"\n",
|
||||
"page_content='🦜️🔗 LangChain' metadata={'source': '../../../README.md', 'category_depth': 0, 'last_modified': '2024-06-28T15:20:01', 'languages': ['eng'], 'filetype': 'text/markdown', 'file_directory': '../../..', 'filename': 'README.md', 'category': 'Title'}\n",
|
||||
"page_content='🦜️🔗 LangChain' metadata={'source': '../../../../README.md', 'last_modified': '2024-04-29T13:40:19', 'page_number': 1, 'languages': ['eng'], 'filetype': 'text/markdown', 'file_directory': '../../../..', 'filename': 'README.md', 'category': 'Title'}\n",
|
||||
"\n",
|
||||
"page_content='⚡ Build context-aware reasoning applications ⚡' metadata={'source': '../../../README.md', 'last_modified': '2024-06-28T15:20:01', 'languages': ['eng'], 'parent_id': '200b8a7d0dd03f66e4f13456566d2b3a', 'filetype': 'text/markdown', 'file_directory': '../../..', 'filename': 'README.md', 'category': 'NarrativeText'}\n",
|
||||
"page_content='⚡ Build context-aware reasoning applications ⚡' metadata={'source': '../../../../README.md', 'last_modified': '2024-04-29T13:40:19', 'page_number': 1, 'languages': ['eng'], 'parent_id': 'c3223b6f7100be08a78f1e8c0c28fde1', 'filetype': 'text/markdown', 'file_directory': '../../../..', 'filename': 'README.md', 'category': 'NarrativeText'}\n",
|
||||
"\n"
|
||||
]
|
||||
}
|
||||
@@ -121,7 +121,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"execution_count": 3,
|
||||
"id": "75abc139-3ded-4e8e-9f21-d0c8ec40fdfc",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
@@ -129,21 +129,13 @@
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"{'ListItem', 'NarrativeText', 'Title'}\n"
|
||||
"{'Title', 'NarrativeText', 'ListItem'}\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"print(set(document.metadata[\"category\"] for document in data))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "223b4c11",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
@@ -162,7 +154,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.5"
|
||||
"version": "3.10.4"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
File diff suppressed because one or more lines are too long
@@ -60,7 +60,7 @@
|
||||
" * document addition by id (`add_documents` method with `ids` argument)\n",
|
||||
" * delete by id (`delete` method with `ids` argument)\n",
|
||||
"\n",
|
||||
"Compatible Vectorstores: `Aerospike`, `AnalyticDB`, `AstraDB`, `AwaDB`, `AzureCosmosDBNoSqlVectorSearch`, `AzureCosmosDBVectorSearch`, `Bagel`, `Cassandra`, `Chroma`, `CouchbaseVectorStore`, `DashVector`, `DatabricksVectorSearch`, `DeepLake`, `Dingo`, `ElasticVectorSearch`, `ElasticsearchStore`, `FAISS`, `HanaDB`, `Milvus`, `MyScale`, `OpenSearchVectorSearch`, `PGVector`, `Pinecone`, `Qdrant`, `Redis`, `Rockset`, `ScaNN`, `SingleStoreDB`, `SupabaseVectorStore`, `SurrealDBStore`, `TimescaleVector`, `Vald`, `VDMS`, `Vearch`, `VespaStore`, `Weaviate`, `Yellowbrick`, `ZepVectorStore`, `TencentVectorDB`, `OpenSearchVectorSearch`.\n",
|
||||
"Compatible Vectorstores: `Aerospike`, `AnalyticDB`, `AstraDB`, `AwaDB`, `AzureCosmosDBNoSqlVectorSearch`, `AzureCosmosDBVectorSearch`, `Bagel`, `Cassandra`, `Chroma`, `CouchbaseVectorStore`, `DashVector`, `DatabricksVectorSearch`, `DeepLake`, `Dingo`, `ElasticVectorSearch`, `ElasticsearchStore`, `FAISS`, `HanaDB`, `Milvus`, `MyScale`, `OpenSearchVectorSearch`, `PGVector`, `Pinecone`, `Qdrant`, `Redis`, `Rockset`, `ScaNN`, `SupabaseVectorStore`, `SurrealDBStore`, `TimescaleVector`, `Vald`, `VDMS`, `Vearch`, `VespaStore`, `Weaviate`, `Yellowbrick`, `ZepVectorStore`, `TencentVectorDB`, `OpenSearchVectorSearch`.\n",
|
||||
" \n",
|
||||
"## Caution\n",
|
||||
"\n",
|
||||
|
||||
@@ -351,15 +351,7 @@
|
||||
"id": "68df3a09",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Memory"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "96e7ffc8",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### In LangChain\n",
|
||||
"## Memory\n",
|
||||
"\n",
|
||||
"With LangChain's [AgentExecutor](https://api.python.langchain.com/en/latest/agents/langchain.agents.agent.AgentExecutor.html#langchain.agents.agent.AgentExecutor.iter), you could add chat [Memory](https://api.python.langchain.com/en/latest/agents/langchain.agents.agent.AgentExecutor.html#langchain.agents.agent.AgentExecutor.memory) so it can engage in a multi-turn conversation."
|
||||
]
|
||||
@@ -447,7 +439,7 @@
|
||||
"id": "c2a5a32f",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### In LangGraph\n",
|
||||
"#### In LangGraph\n",
|
||||
"\n",
|
||||
"Memory is just [persistence](https://langchain-ai.github.io/langgraph/how-tos/persistence/), aka [checkpointing](https://langchain-ai.github.io/langgraph/reference/checkpoints/).\n",
|
||||
"\n",
|
||||
@@ -518,8 +510,6 @@
|
||||
"source": [
|
||||
"## Iterating through steps\n",
|
||||
"\n",
|
||||
"### In LangChain\n",
|
||||
"\n",
|
||||
"With LangChain's [AgentExecutor](https://api.python.langchain.com/en/latest/agents/langchain.agents.agent.AgentExecutor.html#langchain.agents.agent.AgentExecutor.iter), you could iterate over the steps using the [stream](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.Runnable.html#langchain_core.runnables.base.Runnable.stream) (or async `astream`) methods or the [iter](https://api.python.langchain.com/en/latest/agents/langchain.agents.agent.AgentExecutor.html#langchain.agents.agent.AgentExecutor.iter) method. LangGraph supports stepwise iteration using [stream](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.Runnable.html#langchain_core.runnables.base.Runnable.stream) "
|
||||
]
|
||||
},
|
||||
@@ -578,7 +568,7 @@
|
||||
"id": "46ccbcbf",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### In LangGraph\n",
|
||||
"#### In LangGraph\n",
|
||||
"\n",
|
||||
"In LangGraph, things are handled natively using [stream](https://langchain-ai.github.io/langgraph/reference/graphs/#langgraph.graph.graph.CompiledGraph.stream) or the asynchronous `astream` method."
|
||||
]
|
||||
@@ -629,8 +619,6 @@
|
||||
"source": [
|
||||
"## `return_intermediate_steps`\n",
|
||||
"\n",
|
||||
"### In LangChain\n",
|
||||
"\n",
|
||||
"Setting this parameter on AgentExecutor allows users to access intermediate_steps, which pairs agent actions (e.g., tool invocations) with their outcomes.\n"
|
||||
]
|
||||
},
|
||||
@@ -659,8 +647,6 @@
|
||||
"id": "594f7567-302f-4fa8-85bb-025ac8322162",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### In LangGraph\n",
|
||||
"\n",
|
||||
"By default the [react agent executor](https://langchain-ai.github.io/langgraph/reference/prebuilt/#create_react_agent) in LangGraph appends all messages to the central state. Therefore, it is easy to see any intermediate steps by just looking at the full state."
|
||||
]
|
||||
},
|
||||
@@ -701,9 +687,11 @@
|
||||
"source": [
|
||||
"## `max_iterations`\n",
|
||||
"\n",
|
||||
"### In LangChain\n",
|
||||
"`AgentExecutor` implements a `max_iterations` parameter, whereas this is controlled via `recursion_limit` in LangGraph.\n",
|
||||
"\n",
|
||||
"`AgentExecutor` implements a `max_iterations` parameter, allowing users to abort a run that exceeds a specified number of iterations."
|
||||
"Note that in AgentExecutor, an \"iteration\" includes a full turn of tool invocation and execution. In LangGraph, each step contributes to the recursion limit, so we will need to multiply by two (and add one) to get equivalent results.\n",
|
||||
"\n",
|
||||
"If the recursion limit is reached, LangGraph raises a specific exception type, that we can catch and manage similarly to AgentExecutor."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -781,20 +769,6 @@
|
||||
"agent_executor.invoke({\"input\": query})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "dd3a933f",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### In LangGraph\n",
|
||||
"\n",
|
||||
"In LangGraph this is controlled via `recursion_limit` configuration parameter.\n",
|
||||
"\n",
|
||||
"Note that in `AgentExecutor`, an \"iteration\" includes a full turn of tool invocation and execution. In LangGraph, each step contributes to the recursion limit, so we will need to multiply by two (and add one) to get equivalent results.\n",
|
||||
"\n",
|
||||
"If the recursion limit is reached, LangGraph raises a specific exception type, that we can catch and manage similarly to AgentExecutor."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 16,
|
||||
@@ -840,8 +814,6 @@
|
||||
"source": [
|
||||
"## `max_execution_time`\n",
|
||||
"\n",
|
||||
"### In LangChain\n",
|
||||
"\n",
|
||||
"`AgentExecutor` implements a `max_execution_time` parameter, allowing users to abort a run that exceeds a total time limit."
|
||||
]
|
||||
},
|
||||
@@ -908,8 +880,6 @@
|
||||
"id": "d02eb025",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### In LangGraph\n",
|
||||
"\n",
|
||||
"With LangGraph's react agent, you can control timeouts on two levels. \n",
|
||||
"\n",
|
||||
"You can set a `step_timeout` to bound each **step**:"
|
||||
@@ -998,8 +968,6 @@
|
||||
"source": [
|
||||
"## `early_stopping_method`\n",
|
||||
"\n",
|
||||
"### In LangChain\n",
|
||||
"\n",
|
||||
"With LangChain's [AgentExecutor](https://api.python.langchain.com/en/latest/agents/langchain.agents.agent.AgentExecutor.html#langchain.agents.agent.AgentExecutor.iter), you could configure an [early_stopping_method](https://api.python.langchain.com/en/latest/agents/langchain.agents.agent.AgentExecutor.html#langchain.agents.agent.AgentExecutor.early_stopping_method) to either return a string saying \"Agent stopped due to iteration limit or time limit.\" (`\"force\"`) or prompt the LLM a final time to respond (`\"generate\"`)."
|
||||
]
|
||||
},
|
||||
@@ -1060,7 +1028,7 @@
|
||||
"id": "706e05c4",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### In LangGraph\n",
|
||||
"#### In LangGraph\n",
|
||||
"\n",
|
||||
"In LangGraph, you can explicitly handle the response behavior outside the agent, since the full state can be accessed."
|
||||
]
|
||||
@@ -1109,8 +1077,6 @@
|
||||
"source": [
|
||||
"## `trim_intermediate_steps`\n",
|
||||
"\n",
|
||||
"### In LangChain\n",
|
||||
"\n",
|
||||
"With LangChain's [AgentExecutor](https://api.python.langchain.com/en/latest/agents/langchain.agents.agent.AgentExecutor.html#langchain.agents.agent.AgentExecutor), you could trim the intermediate steps of long-running agents using [trim_intermediate_steps](https://api.python.langchain.com/en/latest/agents/langchain.agents.agent.AgentExecutor.html#langchain.agents.agent.AgentExecutor.trim_intermediate_steps), which is either an integer (indicating the agent should keep the last N steps) or a custom function.\n",
|
||||
"\n",
|
||||
"For instance, we could trim the value so the agent only sees the most recent intermediate step."
|
||||
@@ -1214,7 +1180,7 @@
|
||||
"id": "3d450c5a",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### In LangGraph\n",
|
||||
"#### In LangGraph\n",
|
||||
"\n",
|
||||
"We can use the [`messages_modifier`](https://langchain-ai.github.io/langgraph/reference/prebuilt/#create_react_agent) just as before when passing in [prompt templates](#prompt-templates)."
|
||||
]
|
||||
|
||||
@@ -58,7 +58,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"execution_count": 1,
|
||||
"id": "6d55008f",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -81,17 +81,17 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"execution_count": 38,
|
||||
"id": "070bf702",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"Joke(setup='Why was the cat sitting on the computer?', punchline='Because it wanted to keep an eye on the mouse!', rating=8)"
|
||||
"Joke(setup='Why was the cat sitting on the computer?', punchline='To keep an eye on the mouse!', rating=None)"
|
||||
]
|
||||
},
|
||||
"execution_count": 3,
|
||||
"execution_count": 38,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -514,49 +514,12 @@
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "91e95aa2",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### (Advanced) Raw outputs\n",
|
||||
"\n",
|
||||
"LLMs aren't perfect at generating structured output, especially as schemas become complex. You can avoid raising exceptions and handle the raw output yourself by passing `include_raw=True`. This changes the output format to contain the raw message output, the `parsed` value (if successful), and any resulting errors:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "10ed2842",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'raw': AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_ASK4EmZeZ69Fi3p554Mb4rWy', 'function': {'arguments': '{\"setup\":\"Why was the cat sitting on the computer?\",\"punchline\":\"Because it wanted to keep an eye on the mouse!\"}', 'name': 'Joke'}, 'type': 'function'}]}, response_metadata={'token_usage': {'completion_tokens': 36, 'prompt_tokens': 107, 'total_tokens': 143}, 'model_name': 'gpt-4-0125-preview', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-6491d35b-9164-4656-b75c-d7882cfb76cb-0', tool_calls=[{'name': 'Joke', 'args': {'setup': 'Why was the cat sitting on the computer?', 'punchline': 'Because it wanted to keep an eye on the mouse!'}, 'id': 'call_ASK4EmZeZ69Fi3p554Mb4rWy'}], usage_metadata={'input_tokens': 107, 'output_tokens': 36, 'total_tokens': 143}),\n",
|
||||
" 'parsed': Joke(setup='Why was the cat sitting on the computer?', punchline='Because it wanted to keep an eye on the mouse!', rating=None),\n",
|
||||
" 'parsing_error': None}"
|
||||
]
|
||||
},
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"structured_llm = llm.with_structured_output(Joke, include_raw=True)\n",
|
||||
"\n",
|
||||
"structured_llm.invoke(\n",
|
||||
" \"Tell me a joke about cats, respond in JSON with `setup` and `punchline` keys\"\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "5e92a98a",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Prompting and parsing model outputs directly\n",
|
||||
"## Prompting and parsing model directly\n",
|
||||
"\n",
|
||||
"Not all models support `.with_structured_output()`, since not all models have tool calling or JSON mode support. For such models you'll need to directly prompt the model to use a specific format, and use an output parser to extract the structured response from the raw model output.\n",
|
||||
"\n",
|
||||
@@ -824,9 +787,9 @@
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3",
|
||||
"display_name": "poetry-venv-2",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
"name": "poetry-venv-2"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
@@ -838,7 +801,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.5"
|
||||
"version": "3.9.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -2,125 +2,86 @@
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "raw",
|
||||
"id": "afaf8039",
|
||||
"metadata": {},
|
||||
"id": "fbc66410",
|
||||
"metadata": {
|
||||
"vscode": {
|
||||
"languageId": "raw"
|
||||
}
|
||||
},
|
||||
"source": [
|
||||
"---\n",
|
||||
"sidebar_label: AWS Bedrock\n",
|
||||
"sidebar_label: Bedrock\n",
|
||||
"---"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e49f1e0d",
|
||||
"id": "bf733a38-db84-4363-89e2-de6735c37230",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# ChatBedrock\n",
|
||||
"\n",
|
||||
"This doc will help you get started with AWS Bedrock [chat models](/docs/concepts/#chat-models). Amazon Bedrock is a fully managed service that offers a choice of high-performing foundation models (FMs) from leading AI companies like AI21 Labs, Anthropic, Cohere, Meta, Stability AI, and Amazon via a single API, along with a broad set of capabilities you need to build generative AI applications with security, privacy, and responsible AI. Using Amazon Bedrock, you can easily experiment with and evaluate top FMs for your use case, privately customize them with your data using techniques such as fine-tuning and Retrieval Augmented Generation (RAG), and build agents that execute tasks using your enterprise systems and data sources. Since Amazon Bedrock is serverless, you don't have to manage any infrastructure, and you can securely integrate and deploy generative AI capabilities into your applications using the AWS services you are already familiar with.\n",
|
||||
"\n",
|
||||
"For more information on which models are accessible via Bedrock, head to the [AWS docs](https://docs.aws.amazon.com/bedrock/latest/userguide/models-features.html).\n",
|
||||
"\n",
|
||||
"For detailed documentation of all ChatBedrock features and configurations head to the [API reference](https://api.python.langchain.com/en/latest/chat_models/langchain_aws.chat_models.bedrock.ChatBedrock.html).\n",
|
||||
"\n",
|
||||
"## Overview\n",
|
||||
"### Integration details\n",
|
||||
"\n",
|
||||
"| Class | Package | Local | Serializable | [JS support](https://js.langchain.com/v0.2/docs/integrations/chat/bedrock) | Package downloads | Package latest |\n",
|
||||
"| :--- | :--- | :---: | :---: | :---: | :---: | :---: |\n",
|
||||
"| [ChatBedrock](https://api.python.langchain.com/en/latest/chat_models/langchain_aws.chat_models.bedrock.ChatBedrock.html) | [langchain-aws](https://api.python.langchain.com/en/latest/aws_api_reference.html) | ❌ | beta | ✅ |  |  |\n",
|
||||
"\n",
|
||||
"### Model features\n",
|
||||
"| [Tool calling](/docs/how_to/tool_calling) | [Structured output](/docs/how_to/structured_output/) | JSON mode | [Image input](/docs/how_to/multimodal_inputs/) | Audio input | Video input | [Token-level streaming](/docs/how_to/chat_streaming/) | Native async | [Token usage](/docs/how_to/chat_token_usage_tracking/) | [Logprobs](/docs/how_to/logprobs/) |\n",
|
||||
"| :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: |\n",
|
||||
"| ✅ | ✅ | ❌ | ✅ | ❌ | ❌ | ✅ | ❌ | ✅ | ❌ | \n",
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"To access Bedrock models you'll need to create an AWS account, set up the Bedrock API service, get an access key ID and secret key, and install the `langchain-aws` integration package.\n",
|
||||
"\n",
|
||||
"### Credentials\n",
|
||||
"\n",
|
||||
"Head to the [AWS docs](https://docs.aws.amazon.com/bedrock/latest/userguide/setting-up.html) to sign up to AWS and setup your credentials. You'll also need to turn on model access for your account, which you can do by following [these instructions](https://docs.aws.amazon.com/bedrock/latest/userguide/model-access.html)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "72ee0c4b-9764-423a-9dbf-95129e185210",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:"
|
||||
">[Amazon Bedrock](https://aws.amazon.com/bedrock/) is a fully managed service that offers a choice of \n",
|
||||
"> high-performing foundation models (FMs) from leading AI companies like `AI21 Labs`, `Anthropic`, `Cohere`, \n",
|
||||
"> `Meta`, `Stability AI`, and `Amazon` via a single API, along with a broad set of capabilities you need to \n",
|
||||
"> build generative AI applications with security, privacy, and responsible AI. Using `Amazon Bedrock`, \n",
|
||||
"> you can easily experiment with and evaluate top FMs for your use case, privately customize them with \n",
|
||||
"> your data using techniques such as fine-tuning and `Retrieval Augmented Generation` (`RAG`), and build \n",
|
||||
"> agents that execute tasks using your enterprise systems and data sources. Since `Amazon Bedrock` is \n",
|
||||
"> serverless, you don't have to manage any infrastructure, and you can securely integrate and deploy \n",
|
||||
"> generative AI capabilities into your applications using the AWS services you are already familiar with."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "a15d341e-3e26-4ca3-830b-5aab30ed66de",
|
||||
"execution_count": 2,
|
||||
"id": "d51edc81",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Note: you may need to restart the kernel to use updated packages.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# os.environ[\"LANGSMITH_API_KEY\"] = getpass.getpass(\"Enter your LangSmith API key: \")\n",
|
||||
"# os.environ[\"LANGSMITH_TRACING\"] = \"true\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "0730d6a1-c893-4840-9817-5e5251676d5d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Installation\n",
|
||||
"\n",
|
||||
"The LangChain Bedrock integration lives in the `langchain-aws` package:"
|
||||
"%pip install --upgrade --quiet langchain-aws"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "652d6238-1f87-422a-b135-f5abbb8652fc",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU langchain-aws"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "a38cde65-254d-4219-a441-068766c0d4b5",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Instantiation\n",
|
||||
"\n",
|
||||
"Now we can instantiate our model object and generate chat completions:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "cb09c344-1836-4e0c-acf8-11d13ac1dbae",
|
||||
"metadata": {},
|
||||
"execution_count": 1,
|
||||
"id": "d4a7c55d-b235-4ca4-a579-c90cc9570da9",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_aws import ChatBedrock\n",
|
||||
"\n",
|
||||
"llm = ChatBedrock(\n",
|
||||
"from langchain_core.messages import HumanMessage"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"id": "70cf04e8-423a-4ff6-8b09-f11fb711c817",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"chat = ChatBedrock(\n",
|
||||
" model_id=\"anthropic.claude-3-sonnet-20240229-v1:0\",\n",
|
||||
" model_kwargs=dict(temperature=0),\n",
|
||||
" # other params...\n",
|
||||
" model_kwargs={\"temperature\": 0.1},\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "2b4f3e15",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Invocation"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "62e0dbc3",
|
||||
"execution_count": 12,
|
||||
"id": "8199ef8f-eb8b-4253-9ea0-6c24a013ca4c",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
@@ -128,30 +89,38 @@
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content=\"Voici la traduction en français :\\n\\nJ'aime la programmation.\", additional_kwargs={'usage': {'prompt_tokens': 29, 'completion_tokens': 21, 'total_tokens': 50}, 'stop_reason': 'end_turn', 'model_id': 'anthropic.claude-3-sonnet-20240229-v1:0'}, response_metadata={'usage': {'prompt_tokens': 29, 'completion_tokens': 21, 'total_tokens': 50}, 'stop_reason': 'end_turn', 'model_id': 'anthropic.claude-3-sonnet-20240229-v1:0'}, id='run-fdb07dc3-ff72-430d-b22b-e7824b15c766-0', usage_metadata={'input_tokens': 29, 'output_tokens': 21, 'total_tokens': 50})"
|
||||
"AIMessage(content=\"Voici la traduction en français :\\n\\nJ'aime la programmation.\", additional_kwargs={'usage': {'prompt_tokens': 20, 'completion_tokens': 21, 'total_tokens': 41}}, response_metadata={'model_id': 'anthropic.claude-3-sonnet-20240229-v1:0', 'usage': {'prompt_tokens': 20, 'completion_tokens': 21, 'total_tokens': 41}}, id='run-994f0362-0e50-4524-afad-3c4f5bb11328-0')"
|
||||
]
|
||||
},
|
||||
"execution_count": 5,
|
||||
"execution_count": 12,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"messages = [\n",
|
||||
" (\n",
|
||||
" \"system\",\n",
|
||||
" \"You are a helpful assistant that translates English to French. Translate the user sentence.\",\n",
|
||||
" ),\n",
|
||||
" (\"human\", \"I love programming.\"),\n",
|
||||
" HumanMessage(\n",
|
||||
" content=\"Translate this sentence from English to French. I love programming.\"\n",
|
||||
" )\n",
|
||||
"]\n",
|
||||
"ai_msg = llm.invoke(messages)\n",
|
||||
"ai_msg"
|
||||
"chat.invoke(messages)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "a4a4f4d4",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Streaming\n",
|
||||
"\n",
|
||||
"To stream responses, you can use the runnable `.stream()` method."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "d86145b3-bfef-46e8-b227-4dda5c9c2705",
|
||||
"execution_count": 14,
|
||||
"id": "d9e52838",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
@@ -160,124 +129,84 @@
|
||||
"text": [
|
||||
"Voici la traduction en français :\n",
|
||||
"\n",
|
||||
"J'aime la programmation.\n"
|
||||
"J'aime la programmation."
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"print(ai_msg.content)"
|
||||
"for chunk in chat.stream(messages):\n",
|
||||
" print(chunk.content, end=\"\", flush=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "18e2bfc0-7e78-4528-a73f-499ac150dca8",
|
||||
"id": "c36575b3",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Chaining\n",
|
||||
"### LLM Caching with OpenSearch Semantic Cache\n",
|
||||
"\n",
|
||||
"We can [chain](/docs/how_to/sequence/) our model with a prompt template like so:"
|
||||
"Use OpenSearch as a semantic cache to cache prompts and responses and evaluate hits based on semantic similarity.\n",
|
||||
"\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "e197d1d7-a070-4c96-9f8a-a0e86d046e0b",
|
||||
"execution_count": null,
|
||||
"id": "375d4e56",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='Ich liebe Programmieren.', additional_kwargs={'usage': {'prompt_tokens': 23, 'completion_tokens': 11, 'total_tokens': 34}, 'stop_reason': 'end_turn', 'model_id': 'anthropic.claude-3-sonnet-20240229-v1:0'}, response_metadata={'usage': {'prompt_tokens': 23, 'completion_tokens': 11, 'total_tokens': 34}, 'stop_reason': 'end_turn', 'model_id': 'anthropic.claude-3-sonnet-20240229-v1:0'}, id='run-5ad005ce-9f31-4670-baa0-9373d418698a-0', usage_metadata={'input_tokens': 23, 'output_tokens': 11, 'total_tokens': 34})"
|
||||
]
|
||||
},
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"from langchain.globals import set_llm_cache\n",
|
||||
"from langchain_aws import BedrockEmbeddings, ChatBedrock\n",
|
||||
"from langchain_community.cache import OpenSearchSemanticCache\n",
|
||||
"from langchain_core.messages import HumanMessage\n",
|
||||
"\n",
|
||||
"prompt = ChatPromptTemplate.from_messages(\n",
|
||||
" [\n",
|
||||
" (\n",
|
||||
" \"system\",\n",
|
||||
" \"You are a helpful assistant that translates {input_language} to {output_language}.\",\n",
|
||||
" ),\n",
|
||||
" (\"human\", \"{input}\"),\n",
|
||||
" ]\n",
|
||||
"bedrock_embeddings = BedrockEmbeddings(\n",
|
||||
" model_id=\"amazon.titan-embed-text-v1\", region_name=\"us-east-1\"\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"chain = prompt | llm\n",
|
||||
"chain.invoke(\n",
|
||||
" {\n",
|
||||
" \"input_language\": \"English\",\n",
|
||||
" \"output_language\": \"German\",\n",
|
||||
" \"input\": \"I love programming.\",\n",
|
||||
" }\n",
|
||||
"chat = ChatBedrock(\n",
|
||||
" model_id=\"anthropic.claude-3-haiku-20240307-v1:0\", model_kwargs={\"temperature\": 0.5}\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# Enable LLM cache. Make sure OpenSearch is set up and running. Update URL accordingly.\n",
|
||||
"set_llm_cache(\n",
|
||||
" OpenSearchSemanticCache(\n",
|
||||
" opensearch_url=\"http://localhost:9200\", embedding=bedrock_embeddings\n",
|
||||
" )\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "d1ee55bc-ffc8-4cfa-801c-993953a08cfd",
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "bb5d25bb",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"## ***Beta***: Bedrock Converse API\n",
|
||||
"%%time\n",
|
||||
"# The first time, it is not yet in cache, so it should take longer\n",
|
||||
"messages = [HumanMessage(content=\"tell me about Amazon Bedrock\")]\n",
|
||||
"response_text = chat.invoke(messages)\n",
|
||||
"\n",
|
||||
"AWS has recently recently the Bedrock Converse API which provides a unified conversational interface for Bedrock models. This API does not yet support custom models. You can see a list of all [models that are supported here](https://docs.aws.amazon.com/bedrock/latest/userguide/conversation-inference.html). To improve reliability the ChatBedrock integration will switch to using the Bedrock Converse API as soon as it has feature parity with the existing Bedrock API. Until then a separate [ChatBedrockConverse](https://api.python.langchain.com/en/latest/chat_models/langchain_aws.chat_models.bedrock_converse.ChatBedrockConverse.html#langchain_aws.chat_models.bedrock_converse.ChatBedrockConverse) integration has been released in beta for users who do not need to use custom models.\n",
|
||||
"\n",
|
||||
"You can use it like so:"
|
||||
"print(response_text)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"id": "ae728e59-94d4-40cf-9d24-25ad8723fc59",
|
||||
"execution_count": null,
|
||||
"id": "6cfb3086",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"/Users/bagatur/langchain/libs/core/langchain_core/_api/beta_decorator.py:87: LangChainBetaWarning: The class `ChatBedrockConverse` is in beta. It is actively being worked on, so the API may change.\n",
|
||||
" warn_beta(\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content=\"Voici la traduction en français :\\n\\nJ'aime la programmation.\", response_metadata={'ResponseMetadata': {'RequestId': '122fb1c8-c3c5-4b06-941e-c95d210bfbc7', 'HTTPStatusCode': 200, 'HTTPHeaders': {'date': 'Mon, 01 Jul 2024 21:48:25 GMT', 'content-type': 'application/json', 'content-length': '243', 'connection': 'keep-alive', 'x-amzn-requestid': '122fb1c8-c3c5-4b06-941e-c95d210bfbc7'}, 'RetryAttempts': 0}, 'stopReason': 'end_turn', 'metrics': {'latencyMs': 830}}, id='run-0e3df22f-fcd8-4fbb-a4fb-565227e7e430-0', usage_metadata={'input_tokens': 29, 'output_tokens': 21, 'total_tokens': 50})"
|
||||
]
|
||||
},
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_aws import ChatBedrockConverse\n",
|
||||
"%%time\n",
|
||||
"# The second time, while not a direct hit, the question is semantically similar to the original question,\n",
|
||||
"# so it uses the cached result!\n",
|
||||
"\n",
|
||||
"llm = ChatBedrockConverse(\n",
|
||||
" model=\"anthropic.claude-3-sonnet-20240229-v1:0\",\n",
|
||||
" temperature=0,\n",
|
||||
" max_tokens=None,\n",
|
||||
" # other params...\n",
|
||||
")\n",
|
||||
"messages = [HumanMessage(content=\"what is amazon bedrock\")]\n",
|
||||
"response_text = chat.invoke(messages)\n",
|
||||
"\n",
|
||||
"llm.invoke(messages)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "3a5bb5ca-c3ae-4a58-be67-2cd18574b9a3",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## API reference\n",
|
||||
"\n",
|
||||
"For detailed documentation of all ChatBedrock features and configurations head to the API reference: https://api.python.langchain.com/en/latest/chat_models/langchain_aws.chat_models.bedrock.ChatBedrock.html\n",
|
||||
"\n",
|
||||
"For detailed documentation of all ChatBedrockConverse features and configurations head to the API reference: https://api.python.langchain.com/en/latest/chat_models/langchain_aws.chat_models.bedrock_converse.ChatBedrockConverse.html"
|
||||
"print(response_text)"
|
||||
]
|
||||
}
|
||||
],
|
||||
@@ -297,7 +226,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.9"
|
||||
"version": "3.11.4"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "raw",
|
||||
"id": "afaf8039",
|
||||
"id": "53fbf15f",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"---\n",
|
||||
@@ -12,129 +12,103 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e49f1e0d",
|
||||
"id": "bf733a38-db84-4363-89e2-de6735c37230",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# ChatCohere\n",
|
||||
"# Cohere\n",
|
||||
"\n",
|
||||
"This doc will help you get started with Cohere [chat models](/docs/concepts/#chat-models). For detailed documentation of all ChatCohere features and configurations head to the [API reference](https://api.python.langchain.com/en/latest/chat_models/langchain_cohere.chat_models.ChatCohere.html).\n",
|
||||
"\n",
|
||||
"For an overview of all Cohere models head to the [Cohere docs](https://docs.cohere.com/docs/models).\n",
|
||||
"\n",
|
||||
"## Overview\n",
|
||||
"### Integration details\n",
|
||||
"\n",
|
||||
"| Class | Package | Local | Serializable | [JS support](https://js.langchain.com/v0.2/docs/integrations/chat/cohere) | Package downloads | Package latest |\n",
|
||||
"| :--- | :--- | :---: | :---: | :---: | :---: | :---: |\n",
|
||||
"| [ChatCohere](https://api.python.langchain.com/en/latest/chat_models/langchain_cohere.chat_models.ChatCohere.html) | [langchain-cohere](https://api.python.langchain.com/en/latest/cohere_api_reference.html) | ❌ | beta | ✅ |  |  |\n",
|
||||
"\n",
|
||||
"### Model features\n",
|
||||
"| [Tool calling](/docs/how_to/tool_calling) | [Structured output](/docs/how_to/structured_output/) | JSON mode | [Image input](/docs/how_to/multimodal_inputs/) | Audio input | Video input | [Token-level streaming](/docs/how_to/chat_streaming/) | Native async | [Token usage](/docs/how_to/chat_token_usage_tracking/) | [Logprobs](/docs/how_to/logprobs/) |\n",
|
||||
"| :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: |\n",
|
||||
"| ✅ | ✅ | ❌ | ❌ | ❌ | ❌ | ✅ | ✅ | ❌ | ❌ | \n",
|
||||
"This notebook covers how to get started with [Cohere chat models](https://cohere.com/chat).\n",
|
||||
"\n",
|
||||
"Head to the [API reference](https://api.python.langchain.com/en/latest/chat_models/langchain_community.chat_models.cohere.ChatCohere.html) for detailed documentation of all attributes and methods."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "3607d67e-e56c-4102-bbba-df2edc0e109e",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"To access Cohere models you'll need to create a Cohere account, get an API key, and install the `langchain-cohere` integration package.\n",
|
||||
"The integration lives in the `langchain-cohere` package. We can install these with:\n",
|
||||
"\n",
|
||||
"### Credentials\n",
|
||||
"```bash\n",
|
||||
"pip install -U langchain-cohere\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"Head to https://dashboard.cohere.com/welcome/login to sign up to Cohere and generate an API key. Once you've done this set the COHERE_API_KEY environment variable:"
|
||||
"We'll also need to get a [Cohere API key](https://cohere.com/) and set the `COHERE_API_KEY` environment variable:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "433e8d2b-9519-4b49-b2c4-7ab65b046c94",
|
||||
"execution_count": 11,
|
||||
"id": "2108b517-1e8d-473d-92fa-4f930e8072a7",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import getpass\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"os.environ[\"COHERE_API_KEY\"] = getpass.getpass(\"Enter your Cohere API key: \")"
|
||||
"os.environ[\"COHERE_API_KEY\"] = getpass.getpass()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "72ee0c4b-9764-423a-9dbf-95129e185210",
|
||||
"id": "cf690fbb",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:"
|
||||
"It's also helpful (but not needed) to set up [LangSmith](https://smith.langchain.com/) for best-in-class observability"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "a15d341e-3e26-4ca3-830b-5aab30ed66de",
|
||||
"execution_count": 12,
|
||||
"id": "7f11de02",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# os.environ[\"LANGSMITH_API_KEY\"] = getpass.getpass(\"Enter your LangSmith API key: \")\n",
|
||||
"# os.environ[\"LANGSMITH_TRACING\"] = \"true\""
|
||||
"# os.environ[\"LANGCHAIN_TRACING_V2\"] = \"true\"\n",
|
||||
"# os.environ[\"LANGCHAIN_API_KEY\"] = getpass.getpass()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "0730d6a1-c893-4840-9817-5e5251676d5d",
|
||||
"id": "4c26754b-b3c9-4d93-8f36-43049bd943bf",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Installation\n",
|
||||
"## Usage\n",
|
||||
"\n",
|
||||
"The LangChain Cohere integration lives in the `langchain-cohere` package:"
|
||||
"ChatCohere supports all [ChatModel](/docs/how_to#chat-models) functionality:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "652d6238-1f87-422a-b135-f5abbb8652fc",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU langchain-cohere"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "a38cde65-254d-4219-a441-068766c0d4b5",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Instantiation\n",
|
||||
"\n",
|
||||
"Now we can instantiate our model object and generate chat completions:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "cb09c344-1836-4e0c-acf8-11d13ac1dbae",
|
||||
"metadata": {},
|
||||
"execution_count": 13,
|
||||
"id": "d4a7c55d-b235-4ca4-a579-c90cc9570da9",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_cohere import ChatCohere\n",
|
||||
"\n",
|
||||
"llm = ChatCohere(\n",
|
||||
" model=\"command-r-plus\",\n",
|
||||
" temperature=0,\n",
|
||||
" max_tokens=None,\n",
|
||||
" timeout=None,\n",
|
||||
" max_retries=2,\n",
|
||||
" # other params...\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "2b4f3e15",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Invocation"
|
||||
"from langchain_core.messages import HumanMessage"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "62e0dbc3",
|
||||
"execution_count": 14,
|
||||
"id": "70cf04e8-423a-4ff6-8b09-f11fb711c817",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"chat = ChatCohere(model=\"command\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 15,
|
||||
"id": "8199ef8f-eb8b-4253-9ea0-6c24a013ca4c",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
@@ -142,110 +116,134 @@
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content=\"J'adore programmer.\", additional_kwargs={'documents': None, 'citations': None, 'search_results': None, 'search_queries': None, 'is_search_required': None, 'generation_id': 'd84f80f3-4611-46e6-aed0-9d8665a20a11', 'token_count': {'input_tokens': 89, 'output_tokens': 5}}, response_metadata={'documents': None, 'citations': None, 'search_results': None, 'search_queries': None, 'is_search_required': None, 'generation_id': 'd84f80f3-4611-46e6-aed0-9d8665a20a11', 'token_count': {'input_tokens': 89, 'output_tokens': 5}}, id='run-514ab516-ed7e-48ac-b132-2598fb80ebef-0')"
|
||||
"AIMessage(content='4 && 5 \\n6 || 7 \\n\\nWould you like to play a game of odds and evens?', additional_kwargs={'documents': None, 'citations': None, 'search_results': None, 'search_queries': None, 'is_search_required': None, 'generation_id': '2076b614-52b3-4082-a259-cc92cd3d9fea', 'token_count': {'prompt_tokens': 68, 'response_tokens': 23, 'total_tokens': 91, 'billed_tokens': 77}}, response_metadata={'documents': None, 'citations': None, 'search_results': None, 'search_queries': None, 'is_search_required': None, 'generation_id': '2076b614-52b3-4082-a259-cc92cd3d9fea', 'token_count': {'prompt_tokens': 68, 'response_tokens': 23, 'total_tokens': 91, 'billed_tokens': 77}}, id='run-3475e0c8-c89b-4937-9300-e07d652455e1-0')"
|
||||
]
|
||||
},
|
||||
"execution_count": 2,
|
||||
"execution_count": 15,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"messages = [\n",
|
||||
" (\n",
|
||||
" \"system\",\n",
|
||||
" \"You are a helpful assistant that translates English to French. Translate the user sentence.\",\n",
|
||||
" ),\n",
|
||||
" (\"human\", \"I love programming.\"),\n",
|
||||
"]\n",
|
||||
"ai_msg = llm.invoke(messages)\n",
|
||||
"ai_msg"
|
||||
"messages = [HumanMessage(content=\"1\"), HumanMessage(content=\"2 3\")]\n",
|
||||
"chat.invoke(messages)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "d86145b3-bfef-46e8-b227-4dda5c9c2705",
|
||||
"metadata": {},
|
||||
"execution_count": 16,
|
||||
"id": "c5fac0e9-05a4-4fc1-a3b3-e5bbb24b971b",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='4 && 5', additional_kwargs={'documents': None, 'citations': None, 'search_results': None, 'search_queries': None, 'is_search_required': None, 'generation_id': 'f0708a92-f874-46ee-9b93-334d616ad92e', 'token_count': {'prompt_tokens': 68, 'response_tokens': 3, 'total_tokens': 71, 'billed_tokens': 57}}, response_metadata={'documents': None, 'citations': None, 'search_results': None, 'search_queries': None, 'is_search_required': None, 'generation_id': 'f0708a92-f874-46ee-9b93-334d616ad92e', 'token_count': {'prompt_tokens': 68, 'response_tokens': 3, 'total_tokens': 71, 'billed_tokens': 57}}, id='run-1635e63e-2994-4e7f-986e-152ddfc95777-0')"
|
||||
]
|
||||
},
|
||||
"execution_count": 16,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"await chat.ainvoke(messages)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 17,
|
||||
"id": "025be980-e50d-4a68-93dc-c9c7b500ce34",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"J'adore programmer.\n"
|
||||
"4 && 5"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"print(ai_msg.content)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "18e2bfc0-7e78-4528-a73f-499ac150dca8",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Chaining\n",
|
||||
"\n",
|
||||
"We can [chain](/docs/how_to/sequence/) our model with a prompt template like so:"
|
||||
"for chunk in chat.stream(messages):\n",
|
||||
" print(chunk.content, end=\"\", flush=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "e197d1d7-a070-4c96-9f8a-a0e86d046e0b",
|
||||
"execution_count": 18,
|
||||
"id": "064288e4-f184-4496-9427-bcf148fa055e",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='Ich liebe Programmierung.', additional_kwargs={'documents': None, 'citations': None, 'search_results': None, 'search_queries': None, 'is_search_required': None, 'generation_id': '053bebde-4e1d-4d06-8ee6-3446e7afa25e', 'token_count': {'input_tokens': 84, 'output_tokens': 6}}, response_metadata={'documents': None, 'citations': None, 'search_results': None, 'search_queries': None, 'is_search_required': None, 'generation_id': '053bebde-4e1d-4d06-8ee6-3446e7afa25e', 'token_count': {'input_tokens': 84, 'output_tokens': 6}}, id='run-53700708-b7fb-417b-af36-1a6fcde38e7d-0')"
|
||||
"[AIMessage(content='4 && 5', additional_kwargs={'documents': None, 'citations': None, 'search_results': None, 'search_queries': None, 'is_search_required': None, 'generation_id': '6770ca86-f6c3-4ba3-a285-c4772160612f', 'token_count': {'prompt_tokens': 68, 'response_tokens': 3, 'total_tokens': 71, 'billed_tokens': 57}}, response_metadata={'documents': None, 'citations': None, 'search_results': None, 'search_queries': None, 'is_search_required': None, 'generation_id': '6770ca86-f6c3-4ba3-a285-c4772160612f', 'token_count': {'prompt_tokens': 68, 'response_tokens': 3, 'total_tokens': 71, 'billed_tokens': 57}}, id='run-8d6fade2-1b39-4e31-ab23-4be622dd0027-0')]"
|
||||
]
|
||||
},
|
||||
"execution_count": 4,
|
||||
"execution_count": 18,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"\n",
|
||||
"prompt = ChatPromptTemplate.from_messages(\n",
|
||||
" [\n",
|
||||
" (\n",
|
||||
" \"system\",\n",
|
||||
" \"You are a helpful assistant that translates {input_language} to {output_language}.\",\n",
|
||||
" ),\n",
|
||||
" (\"human\", \"{input}\"),\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"chain = prompt | llm\n",
|
||||
"chain.invoke(\n",
|
||||
" {\n",
|
||||
" \"input_language\": \"English\",\n",
|
||||
" \"output_language\": \"German\",\n",
|
||||
" \"input\": \"I love programming.\",\n",
|
||||
" }\n",
|
||||
")"
|
||||
"chat.batch([messages])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "3a5bb5ca-c3ae-4a58-be67-2cd18574b9a3",
|
||||
"id": "f1c56460",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## API reference\n",
|
||||
"## Chaining\n",
|
||||
"\n",
|
||||
"For detailed documentation of all ChatCohere features and configurations head to the API reference: https://api.python.langchain.com/en/latest/chat_models/langchain_cohere.chat_models.ChatCohere.html"
|
||||
"You can also easily combine with a prompt template for easy structuring of user input. We can do this using [LCEL](/docs/concepts#langchain-expression-language-lcel)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 19,
|
||||
"id": "0851b103",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"\n",
|
||||
"prompt = ChatPromptTemplate.from_template(\"Tell me a joke about {topic}\")\n",
|
||||
"chain = prompt | chat"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 20,
|
||||
"id": "ae950c0f-1691-47f1-b609-273033cae707",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='What color socks do bears wear?\\n\\nThey don’t wear socks, they have bear feet. \\n\\nHope you laughed! If not, maybe this will help: laughter is the best medicine, and a good sense of humor is infectious!', additional_kwargs={'documents': None, 'citations': None, 'search_results': None, 'search_queries': None, 'is_search_required': None, 'generation_id': '6edccf44-9bc8-4139-b30e-13b368f3563c', 'token_count': {'prompt_tokens': 68, 'response_tokens': 51, 'total_tokens': 119, 'billed_tokens': 108}}, response_metadata={'documents': None, 'citations': None, 'search_results': None, 'search_queries': None, 'is_search_required': None, 'generation_id': '6edccf44-9bc8-4139-b30e-13b368f3563c', 'token_count': {'prompt_tokens': 68, 'response_tokens': 51, 'total_tokens': 119, 'billed_tokens': 108}}, id='run-ef7f9789-0d4d-43bf-a4f7-f2a0e27a5320-0')"
|
||||
]
|
||||
},
|
||||
"execution_count": 20,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chain.invoke({\"topic\": \"bears\"})"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "poetry-venv-2",
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "poetry-venv-2"
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
@@ -257,7 +255,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.9"
|
||||
"version": "3.11.7"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "raw",
|
||||
"id": "afaf8039",
|
||||
"id": "529aeba9",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"---\n",
|
||||
@@ -11,236 +11,190 @@
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "e49f1e0d",
|
||||
"id": "642fd21c-600a-47a1-be96-6e1438b421a9",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# ChatFireworks\n",
|
||||
"\n",
|
||||
"This doc help you get started with Fireworks AI [chat models](/docs/concepts/#chat-models). For detailed documentation of all ChatFireworks features and configurations head to the [API reference](https://api.python.langchain.com/en/latest/chat_models/langchain_fireworks.chat_models.ChatFireworks.html).\n",
|
||||
">[Fireworks](https://app.fireworks.ai/) accelerates product development on generative AI by creating an innovative AI experiment and production platform. \n",
|
||||
"\n",
|
||||
"Fireworks AI is an AI inference platform to run and customize models. For a list of all models served by Fireworks see the [Fireworks docs](https://fireworks.ai/models).\n",
|
||||
"\n",
|
||||
"## Overview\n",
|
||||
"### Integration details\n",
|
||||
"\n",
|
||||
"| Class | Package | Local | Serializable | [JS support](https://js.langchain.com/v0.2/docs/integrations/chat/fireworks) | Package downloads | Package latest |\n",
|
||||
"| :--- | :--- | :---: | :---: | :---: | :---: | :---: |\n",
|
||||
"| [ChatFireworks](https://api.python.langchain.com/en/latest/chat_models/langchain_fireworks.chat_models.ChatFireworks.html) | [langchain-fireworks](https://api.python.langchain.com/en/latest/fireworks_api_reference.html) | ❌ | beta | ✅ |  |  |\n",
|
||||
"\n",
|
||||
"### Model features\n",
|
||||
"| [Tool calling](/docs/how_to/tool_calling) | [Structured output](/docs/how_to/structured_output/) | JSON mode | [Image input](/docs/how_to/multimodal_inputs/) | Audio input | Video input | [Token-level streaming](/docs/how_to/chat_streaming/) | Native async | [Token usage](/docs/how_to/chat_token_usage_tracking/) | [Logprobs](/docs/how_to/logprobs/) |\n",
|
||||
"| :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: |\n",
|
||||
"| ✅ | ✅ | ✅ | ❌ | ❌ | ❌ | ✅ | ✅ | ✅ | ✅ | \n",
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"To access Fireworks models you'll need to create a Fireworks account, get an API key, and install the `langchain-fireworks` integration package.\n",
|
||||
"\n",
|
||||
"### Credentials\n",
|
||||
"\n",
|
||||
"Head to (ttps://fireworks.ai/login to sign up to Fireworks and generate an API key. Once you've done this set the FIREWORKS_API_KEY environment variable:"
|
||||
"This example goes over how to use LangChain to interact with `ChatFireworks` models."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "raw",
|
||||
"id": "4a7c795e",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"%pip install langchain-fireworks"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "433e8d2b-9519-4b49-b2c4-7ab65b046c94",
|
||||
"execution_count": 1,
|
||||
"id": "d00d850917865298",
|
||||
"metadata": {
|
||||
"collapsed": false,
|
||||
"jupyter": {
|
||||
"outputs_hidden": false
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_core.messages import HumanMessage, SystemMessage\n",
|
||||
"from langchain_fireworks import ChatFireworks"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "f28ebf8b-f14f-46c7-9962-8b8dc42e31be",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Setup\n",
|
||||
"\n",
|
||||
"1. Make sure the `langchain-fireworks` package is installed in your environment.\n",
|
||||
"2. Sign in to [Fireworks AI](http://fireworks.ai) for the an API Key to access our models, and make sure it is set as the `FIREWORKS_API_KEY` environment variable.\n",
|
||||
"3. Set up your model using a model id. If the model is not set, the default model is fireworks-llama-v2-7b-chat. See the full, most up-to-date model list on [app.fireworks.ai](https://app.fireworks.ai)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "d096fb14-8acc-4047-9cd0-c842430c3a1d",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import getpass\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"os.environ[\"FIREWORKS_API_KEY\"] = getpass.getpass(\"Enter your Fireworks API key: \")"
|
||||
"if \"FIREWORKS_API_KEY\" not in os.environ:\n",
|
||||
" os.environ[\"FIREWORKS_API_KEY\"] = getpass.getpass(\"Fireworks API Key:\")\n",
|
||||
"\n",
|
||||
"# Initialize a Fireworks chat model\n",
|
||||
"chat = ChatFireworks(model=\"accounts/fireworks/models/mixtral-8x7b-instruct\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "72ee0c4b-9764-423a-9dbf-95129e185210",
|
||||
"id": "d8f13144-37cf-47a5-b5a0-e3cdf76d9a72",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "a15d341e-3e26-4ca3-830b-5aab30ed66de",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# os.environ[\"LANGSMITH_API_KEY\"] = getpass.getpass(\"Enter your LangSmith API key: \")\n",
|
||||
"# os.environ[\"LANGSMITH_TRACING\"] = \"true\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "0730d6a1-c893-4840-9817-5e5251676d5d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Installation\n",
|
||||
"# Calling the Model Directly\n",
|
||||
"\n",
|
||||
"The LangChain Fireworks integration lives in the `langchain-fireworks` package:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "652d6238-1f87-422a-b135-f5abbb8652fc",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU langchain-fireworks"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "a38cde65-254d-4219-a441-068766c0d4b5",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Instantiation\n",
|
||||
"\n",
|
||||
"Now we can instantiate our model object and generate chat completions:\n",
|
||||
"\n",
|
||||
"- TODO: Update model instantiation with relevant params."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "cb09c344-1836-4e0c-acf8-11d13ac1dbae",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_fireworks import ChatFireworks\n",
|
||||
"\n",
|
||||
"llm = ChatFireworks(\n",
|
||||
" model=\"accounts/fireworks/models/llama-v3-70b-instruct\",\n",
|
||||
" temperature=0,\n",
|
||||
" max_tokens=None,\n",
|
||||
" timeout=None,\n",
|
||||
" max_retries=2,\n",
|
||||
" # other params...\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "2b4f3e15",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Invocation"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "62e0dbc3",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content=\"J'adore la programmation.\", response_metadata={'token_usage': {'prompt_tokens': 35, 'total_tokens': 44, 'completion_tokens': 9}, 'model_name': 'accounts/fireworks/models/llama-v3-70b-instruct', 'system_fingerprint': '', 'finish_reason': 'stop', 'logprobs': None}, id='run-df28e69a-ff30-457e-a743-06eb14d01cb0-0', usage_metadata={'input_tokens': 35, 'output_tokens': 9, 'total_tokens': 44})"
|
||||
]
|
||||
},
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"messages = [\n",
|
||||
" (\n",
|
||||
" \"system\",\n",
|
||||
" \"You are a helpful assistant that translates English to French. Translate the user sentence.\",\n",
|
||||
" ),\n",
|
||||
" (\"human\", \"I love programming.\"),\n",
|
||||
"]\n",
|
||||
"ai_msg = llm.invoke(messages)\n",
|
||||
"ai_msg"
|
||||
"You can call the model directly with a system and human message to get answers."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "d86145b3-bfef-46e8-b227-4dda5c9c2705",
|
||||
"id": "72340871-ae2f-415f-b399-0777d32dc379",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content=\"Hello! I'm an AI language model, a helpful assistant designed to chat and assist you with any questions or information you might need. I'm here to make your experience as smooth and enjoyable as possible. How can I assist you today?\")"
|
||||
]
|
||||
},
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# ChatFireworks Wrapper\n",
|
||||
"system_message = SystemMessage(content=\"You are to chat with the user.\")\n",
|
||||
"human_message = HumanMessage(content=\"Who are you?\")\n",
|
||||
"\n",
|
||||
"chat.invoke([system_message, human_message])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "68c6b1fa-2ff7-4a63-8d88-3cec302180b8",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content=\"I'm an AI and do not have the ability to experience the weather firsthand. However,\")"
|
||||
]
|
||||
},
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Setting additional parameters: temperature, max_tokens, top_p\n",
|
||||
"chat = ChatFireworks(\n",
|
||||
" model=\"accounts/fireworks/models/mixtral-8x7b-instruct\",\n",
|
||||
" temperature=1,\n",
|
||||
" max_tokens=20,\n",
|
||||
")\n",
|
||||
"system_message = SystemMessage(content=\"You are to chat with the user.\")\n",
|
||||
"human_message = HumanMessage(content=\"How's the weather today?\")\n",
|
||||
"chat.invoke([system_message, human_message])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "8c44cb36",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Tool Calling\n",
|
||||
"\n",
|
||||
"Fireworks offers the `FireFunction-v2` tool calling model. You can use it for structured output and function calling use cases:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"id": "ee2db682",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"J'adore la programmation.\n"
|
||||
"{'function': {'arguments': '{\"name\": \"Erick\", \"age\": 27}',\n",
|
||||
" 'name': 'ExtractFields'},\n",
|
||||
" 'id': 'call_J0WYP2TLenaFw3UeVU0UnWqx',\n",
|
||||
" 'index': 0,\n",
|
||||
" 'type': 'function'}\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"print(ai_msg.content)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "18e2bfc0-7e78-4528-a73f-499ac150dca8",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Chaining\n",
|
||||
"from pprint import pprint\n",
|
||||
"\n",
|
||||
"We can [chain](/docs/how_to/sequence/) our model with a prompt template like so:"
|
||||
"from langchain_core.pydantic_v1 import BaseModel\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"class ExtractFields(BaseModel):\n",
|
||||
" name: str\n",
|
||||
" age: int\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"chat = ChatFireworks(\n",
|
||||
" model=\"accounts/fireworks/models/firefunction-v2\",\n",
|
||||
").bind_tools([ExtractFields])\n",
|
||||
"\n",
|
||||
"result = chat.invoke(\"I am a 27 year old named Erick\")\n",
|
||||
"\n",
|
||||
"pprint(result.additional_kwargs[\"tool_calls\"][0])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "e197d1d7-a070-4c96-9f8a-a0e86d046e0b",
|
||||
"execution_count": null,
|
||||
"id": "2321a4e6",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='Ich liebe das Programmieren.', response_metadata={'token_usage': {'prompt_tokens': 30, 'total_tokens': 37, 'completion_tokens': 7}, 'model_name': 'accounts/fireworks/models/llama-v3-70b-instruct', 'system_fingerprint': '', 'finish_reason': 'stop', 'logprobs': None}, id='run-ff3f91ad-ed81-4acf-9f59-7490dc8d8f48-0', usage_metadata={'input_tokens': 30, 'output_tokens': 7, 'total_tokens': 37})"
|
||||
]
|
||||
},
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"\n",
|
||||
"prompt = ChatPromptTemplate.from_messages(\n",
|
||||
" [\n",
|
||||
" (\n",
|
||||
" \"system\",\n",
|
||||
" \"You are a helpful assistant that translates {input_language} to {output_language}.\",\n",
|
||||
" ),\n",
|
||||
" (\"human\", \"{input}\"),\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"chain = prompt | llm\n",
|
||||
"chain.invoke(\n",
|
||||
" {\n",
|
||||
" \"input_language\": \"English\",\n",
|
||||
" \"output_language\": \"German\",\n",
|
||||
" \"input\": \"I love programming.\",\n",
|
||||
" }\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "3a5bb5ca-c3ae-4a58-be67-2cd18574b9a3",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## API reference\n",
|
||||
"\n",
|
||||
"For detailed documentation of all ChatFireworks features and configurations head to the API reference: https://api.python.langchain.com/en/latest/chat_models/langchain_fireworks.chat_models.ChatFireworks.html"
|
||||
]
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
@@ -259,7 +213,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.9"
|
||||
"version": "3.11.4"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
File diff suppressed because one or more lines are too long
@@ -454,7 +454,7 @@
|
||||
"\n",
|
||||
"Please note that `ChatWatsonx.bind_tools` is on beta state, so right now we only support `mistralai/mixtral-8x7b-instruct-v01` model.\n",
|
||||
"\n",
|
||||
"You should also redefine `max_new_tokens` parameter to get the entire model response. By default `max_new_tokens` is set to 20."
|
||||
"You should also redefine `max_new_tokens` parameter to get the entire model response. By default `max_new_tokens` is set ot 20."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -577,7 +577,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.1.undefined"
|
||||
"version": "3.10.13"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -15,7 +15,7 @@
|
||||
"source": [
|
||||
"# OllamaFunctions\n",
|
||||
"\n",
|
||||
"This notebook shows how to use an experimental wrapper around Ollama that gives it [tool calling capabilities](https://python.langchain.com/v0.2/docs/concepts/#functiontool-calling).\n",
|
||||
"This notebook shows how to use an experimental wrapper around Ollama that gives it the same API as OpenAI Functions.\n",
|
||||
"\n",
|
||||
"Note that more powerful and capable models will perform better with complex schema and/or multiple functions. The examples below use llama3 and phi3 models.\n",
|
||||
"For a complete list of supported models and model variants, see the [Ollama model library](https://ollama.ai/library).\n",
|
||||
@@ -25,75 +25,81 @@
|
||||
"This is an experimental wrapper that attempts to bolt-on tool calling support to models that do not natively support it. Use with caution.\n",
|
||||
"\n",
|
||||
":::\n",
|
||||
"## Overview\n",
|
||||
"\n",
|
||||
"### Integration details\n",
|
||||
"\n",
|
||||
"| Class | Package | Local | Serializable | JS support | Package downloads | Package latest |\n",
|
||||
"|:-----------------------------------------------------------------------------------------------------------------------------------:|:-------:|:-----:|:------------:|:----------:|:-----------------:|:--------------:|\n",
|
||||
"| [OllamaFunctions](https://api.python.langchain.com/en/latest/llms/langchain_experimental.llms.ollama_function.OllamaFunctions.html) | [langchain-experimental](https://api.python.langchain.com/en/latest/openai_api_reference.html) | ✅ | ❌ | ❌ |  |  |\n",
|
||||
"\n",
|
||||
"### Model features\n",
|
||||
"\n",
|
||||
"| [Tool calling](/docs/how_to/tool_calling/) | [Structured output](/docs/how_to/structured_output/) | JSON mode | Image input | Audio input | Video input | [Token-level streaming](/docs/how_to/chat_streaming/) | Native async | [Token usage](/docs/how_to/chat_token_usage_tracking/) | [Logprobs](/docs/how_to/logprobs/) |\n",
|
||||
"| :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: |\n",
|
||||
"| ✅ | ✅ | ✅ | ✅ | ❌ | ❌ | ❌ | ✅ | ❌ | ❌ |\n",
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"To access `OllamaFunctions` you will need to install `langchain-experimental` integration package.\n",
|
||||
"Follow [these instructions](https://github.com/jmorganca/ollama) to set up and run a local Ollama instance as well as download and serve [supported models](https://ollama.com/library).\n",
|
||||
"Follow [these instructions](https://github.com/jmorganca/ollama) to set up and run a local Ollama instance.\n",
|
||||
"\n",
|
||||
"### Credentials\n",
|
||||
"## Usage\n",
|
||||
"\n",
|
||||
"Credentials support is not present at this time.\n",
|
||||
"\n",
|
||||
"### Installation\n",
|
||||
"\n",
|
||||
"The `OllamaFunctions` class lives in the `langchain-experimental` package:\n"
|
||||
"You can initialize OllamaFunctions in a similar way to how you'd initialize a standard ChatOllama instance:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU langchain-experimental"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Instantiation\n",
|
||||
"\n",
|
||||
"`OllamaFunctions` takes the same init parameters as `ChatOllama`. \n",
|
||||
"\n",
|
||||
"In order to use tool calling, you must also specify `format=\"json\"`."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"execution_count": 1,
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2024-06-23T15:20:21.818089Z",
|
||||
"start_time": "2024-06-23T15:20:21.815759Z"
|
||||
}
|
||||
"end_time": "2024-04-28T00:53:25.276543Z",
|
||||
"start_time": "2024-04-28T00:53:24.881202Z"
|
||||
},
|
||||
"scrolled": true
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_experimental.llms.ollama_functions import OllamaFunctions\n",
|
||||
"\n",
|
||||
"llm = OllamaFunctions(model=\"phi3\")"
|
||||
"model = OllamaFunctions(model=\"llama3\", format=\"json\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Invocation"
|
||||
"You can then bind functions defined with JSON Schema parameters and a `function_call` parameter to force the model to call the given function:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2024-04-26T04:59:17.270931Z",
|
||||
"start_time": "2024-04-26T04:59:17.263347Z"
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"model = model.bind_tools(\n",
|
||||
" tools=[\n",
|
||||
" {\n",
|
||||
" \"name\": \"get_current_weather\",\n",
|
||||
" \"description\": \"Get the current weather in a given location\",\n",
|
||||
" \"parameters\": {\n",
|
||||
" \"type\": \"object\",\n",
|
||||
" \"properties\": {\n",
|
||||
" \"location\": {\n",
|
||||
" \"type\": \"string\",\n",
|
||||
" \"description\": \"The city and state, \" \"e.g. San Francisco, CA\",\n",
|
||||
" },\n",
|
||||
" \"unit\": {\n",
|
||||
" \"type\": \"string\",\n",
|
||||
" \"enum\": [\"celsius\", \"fahrenheit\"],\n",
|
||||
" },\n",
|
||||
" },\n",
|
||||
" \"required\": [\"location\"],\n",
|
||||
" },\n",
|
||||
" }\n",
|
||||
" ],\n",
|
||||
" function_call={\"name\": \"get_current_weather\"},\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Calling a function with this model then results in JSON output matching the provided schema:"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -101,15 +107,15 @@
|
||||
"execution_count": 3,
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2024-06-23T15:20:46.794689Z",
|
||||
"start_time": "2024-06-23T15:20:44.982632Z"
|
||||
"end_time": "2024-04-26T04:59:26.092428Z",
|
||||
"start_time": "2024-04-26T04:59:17.272627Z"
|
||||
}
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content=\"J'adore programmer.\", id='run-94815fcf-ae11-438a-ba3f-00819328b5cd-0')"
|
||||
"AIMessage(content='', additional_kwargs={'function_call': {'name': 'get_current_weather', 'arguments': '{\"location\": \"Boston, MA\"}'}}, id='run-1791f9fe-95ad-4ca4-bdf7-9f73eab31e6f-0')"
|
||||
]
|
||||
},
|
||||
"execution_count": 3,
|
||||
@@ -118,55 +124,79 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"messages = [\n",
|
||||
" (\n",
|
||||
" \"system\",\n",
|
||||
" \"You are a helpful assistant that translates English to French. Translate the user sentence.\",\n",
|
||||
" ),\n",
|
||||
" (\"human\", \"I love programming.\"),\n",
|
||||
"]\n",
|
||||
"ai_msg = llm.invoke(messages)\n",
|
||||
"ai_msg"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"\"J'adore programmer.\""
|
||||
]
|
||||
},
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"ai_msg.content"
|
||||
"from langchain_core.messages import HumanMessage\n",
|
||||
"\n",
|
||||
"model.invoke(\"what is the weather in Boston?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Chaining\n",
|
||||
"## Structured Output\n",
|
||||
"\n",
|
||||
"We can [chain](https://python.langchain.com/v0.2/docs/how_to/sequence/) our model with a prompt template like so:"
|
||||
"One useful thing you can do with function calling using `with_structured_output()` function is extracting properties from a given input in a structured format:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2024-04-26T04:59:26.098828Z",
|
||||
"start_time": "2024-04-26T04:59:26.094021Z"
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_core.prompts import PromptTemplate\n",
|
||||
"from langchain_core.pydantic_v1 import BaseModel, Field\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# Schema for structured response\n",
|
||||
"class Person(BaseModel):\n",
|
||||
" name: str = Field(description=\"The person's name\", required=True)\n",
|
||||
" height: float = Field(description=\"The person's height\", required=True)\n",
|
||||
" hair_color: str = Field(description=\"The person's hair color\")\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# Prompt template\n",
|
||||
"prompt = PromptTemplate.from_template(\n",
|
||||
" \"\"\"Alex is 5 feet tall. \n",
|
||||
"Claudia is 1 feet taller than Alex and jumps higher than him. \n",
|
||||
"Claudia is a brunette and Alex is blonde.\n",
|
||||
"\n",
|
||||
"Human: {question}\n",
|
||||
"AI: \"\"\"\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# Chain\n",
|
||||
"llm = OllamaFunctions(model=\"phi3\", format=\"json\", temperature=0)\n",
|
||||
"structured_llm = llm.with_structured_output(Person)\n",
|
||||
"chain = prompt | structured_llm"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Extracting data about Alex"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2024-04-26T04:59:30.164955Z",
|
||||
"start_time": "2024-04-26T04:59:26.099790Z"
|
||||
}
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='Programmieren ist sehr verrückt! Es freut mich, dass Sie auf Programmierung so positiv eingestellt sind.', id='run-ee99be5e-4d48-4ab6-b602-35415f0bdbde-0')"
|
||||
"Person(name='Alex', height=5.0, hair_color='blonde')"
|
||||
]
|
||||
},
|
||||
"execution_count": 5,
|
||||
@@ -175,123 +205,41 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"\n",
|
||||
"prompt = ChatPromptTemplate.from_messages(\n",
|
||||
" [\n",
|
||||
" (\n",
|
||||
" \"system\",\n",
|
||||
" \"You are a helpful assistant that translates {input_language} to {output_language}.\",\n",
|
||||
" ),\n",
|
||||
" (\"human\", \"{input}\"),\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"chain = prompt | llm\n",
|
||||
"chain.invoke(\n",
|
||||
" {\n",
|
||||
" \"input_language\": \"English\",\n",
|
||||
" \"output_language\": \"German\",\n",
|
||||
" \"input\": \"I love programming.\",\n",
|
||||
" }\n",
|
||||
")"
|
||||
"alex = chain.invoke(\"Describe Alex\")\n",
|
||||
"alex"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Tool Calling\n",
|
||||
"\n",
|
||||
"### OllamaFunctions.bind_tools()\n",
|
||||
"\n",
|
||||
"With `OllamaFunctions.bind_tools`, we can easily pass in Pydantic classes, dict schemas, LangChain tools, or even functions as tools to the model. Under the hood these are converted to a tool definition schemas, which looks like:"
|
||||
"### Extracting data about Claudia"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_core.pydantic_v1 import BaseModel, Field\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"class GetWeather(BaseModel):\n",
|
||||
" \"\"\"Get the current weather in a given location\"\"\"\n",
|
||||
"\n",
|
||||
" location: str = Field(..., description=\"The city and state, e.g. San Francisco, CA\")\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"llm_with_tools = llm.bind_tools([GetWeather])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"execution_count": 6,
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2024-04-26T04:59:31.509846Z",
|
||||
"start_time": "2024-04-26T04:59:30.165662Z"
|
||||
}
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='', id='run-b9769435-ec6a-4cb8-8545-5a5035fc19bd-0', tool_calls=[{'name': 'GetWeather', 'args': {'location': 'San Francisco, CA'}, 'id': 'call_064c4e1cb27e4adb9e4e7ed60362ecc9'}])"
|
||||
"Person(name='Claudia', height=6.0, hair_color='brunette')"
|
||||
]
|
||||
},
|
||||
"execution_count": 8,
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"ai_msg = llm_with_tools.invoke(\n",
|
||||
" \"what is the weather like in San Francisco\",\n",
|
||||
")\n",
|
||||
"ai_msg"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### AIMessage.tool_calls\n",
|
||||
"\n",
|
||||
"Notice that the AIMessage has a `tool_calls` attribute. This contains in a standardized `ToolCall` format that is model-provider agnostic."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[{'name': 'GetWeather',\n",
|
||||
" 'args': {'location': 'San Francisco, CA'},\n",
|
||||
" 'id': 'call_064c4e1cb27e4adb9e4e7ed60362ecc9'}]"
|
||||
]
|
||||
},
|
||||
"execution_count": 10,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"ai_msg.tool_calls"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": "For more on binding tools and tool call outputs, head to the [tool calling](docs/how_to/function_calling) docs."
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## API reference\n",
|
||||
"\n",
|
||||
"For detailed documentation of all ToolCallingLLM features and configurations head to the API reference: https://api.python.langchain.com/en/latest/llms/langchain_experimental.llms.ollama_functions.OllamaFunctions.html\n"
|
||||
"claudia = chain.invoke(\"Describe Claudia\")\n",
|
||||
"claudia"
|
||||
]
|
||||
}
|
||||
],
|
||||
@@ -311,7 +259,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.12"
|
||||
"version": "3.9.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -45,7 +45,7 @@
|
||||
"The code provided assumes that your PPLX_API_KEY is set in your environment variables. If you would like to manually specify your API key and also choose a different model, you can use the following code:\n",
|
||||
"\n",
|
||||
"```python\n",
|
||||
"chat = ChatPerplexity(temperature=0, pplx_api_key=\"YOUR_API_KEY\", model=\"llama-3-sonar-small-32k-online\")\n",
|
||||
"chat = ChatPerplexity(temperature=0, pplx_api_key=\"YOUR_API_KEY\", model=\"pplx-70b-online\")\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"You can check a list of available models [here](https://docs.perplexity.ai/docs/model-cards). For reproducibility, we can set the API key dynamically by taking it as an input in this notebook."
|
||||
@@ -78,7 +78,7 @@
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"chat = ChatPerplexity(temperature=0, model=\"llama-3-sonar-small-32k-online\")"
|
||||
"chat = ChatPerplexity(temperature=0, model=\"pplx-70b-online\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -146,7 +146,7 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chat = ChatPerplexity(temperature=0, model=\"llama-3-sonar-small-32k-online\")\n",
|
||||
"chat = ChatPerplexity(temperature=0, model=\"pplx-70b-online\")\n",
|
||||
"prompt = ChatPromptTemplate.from_messages([(\"human\", \"Tell me a joke about {topic}\")])\n",
|
||||
"chain = prompt | chat\n",
|
||||
"response = chain.invoke({\"topic\": \"cats\"})\n",
|
||||
@@ -195,7 +195,7 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chat = ChatPerplexity(temperature=0.7, model=\"llama-3-sonar-small-32k-online\")\n",
|
||||
"chat = ChatPerplexity(temperature=0.7, model=\"pplx-70b-online\")\n",
|
||||
"prompt = ChatPromptTemplate.from_messages(\n",
|
||||
" [(\"human\", \"Give me a list of famous tourist attractions in Pakistan\")]\n",
|
||||
")\n",
|
||||
|
||||
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
@@ -7,9 +7,7 @@
|
||||
"source": [
|
||||
"# Email\n",
|
||||
"\n",
|
||||
"This notebook shows how to load email (`.eml`) or `Microsoft Outlook` (`.msg`) files.\n",
|
||||
"\n",
|
||||
"Please see [this guide](/docs/integrations/providers/unstructured/) for more instructions on setting up Unstructured locally, including setting up required system dependencies."
|
||||
"This notebook shows how to load email (`.eml`) or `Microsoft Outlook` (`.msg`) files."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -29,13 +27,49 @@
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install --upgrade --quiet unstructured"
|
||||
"%pip install --upgrade --quiet unstructured"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "40cd9806",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.document_loaders import UnstructuredEmailLoader"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "2d20b852",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"loader = UnstructuredEmailLoader(\"example_data/fake-email.eml\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "2d20b852",
|
||||
"id": "579fa702",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"data = loader.load()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "90c1d899",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
@@ -43,21 +77,15 @@
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[Document(page_content='This is a test email to use for unit tests.\\n\\nImportant points:\\n\\nRoses are red\\n\\nViolets are blue', metadata={'source': './example_data/fake-email.eml'})]"
|
||||
"[Document(page_content='This is a test email to use for unit tests.\\n\\nImportant points:\\n\\nRoses are red\\n\\nViolets are blue', metadata={'source': 'example_data/fake-email.eml'})]"
|
||||
]
|
||||
},
|
||||
"execution_count": 3,
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_community.document_loaders import UnstructuredEmailLoader\n",
|
||||
"\n",
|
||||
"loader = UnstructuredEmailLoader(\"./example_data/fake-email.eml\")\n",
|
||||
"\n",
|
||||
"data = loader.load()\n",
|
||||
"\n",
|
||||
"data"
|
||||
]
|
||||
},
|
||||
@@ -73,26 +101,42 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"execution_count": 5,
|
||||
"id": "b9592eaf",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"loader = UnstructuredEmailLoader(\"example_data/fake-email.eml\", mode=\"elements\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "0b16d03f",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"data = loader.load()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "d7bdc5e5",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"Document(page_content='This is a test email to use for unit tests.', metadata={'source': 'example_data/fake-email.eml', 'file_directory': 'example_data', 'filename': 'fake-email.eml', 'last_modified': '2022-12-16T17:04:16-05:00', 'sent_from': ['Matthew Robinson <mrobinson@unstructured.io>'], 'sent_to': ['Matthew Robinson <mrobinson@unstructured.io>'], 'subject': 'Test Email', 'languages': ['eng'], 'filetype': 'message/rfc822', 'category': 'NarrativeText'})"
|
||||
"Document(page_content='This is a test email to use for unit tests.', metadata={'source': 'example_data/fake-email.eml', 'filename': 'fake-email.eml', 'file_directory': 'example_data', 'date': '2022-12-16T17:04:16-05:00', 'filetype': 'message/rfc822', 'sent_from': ['Matthew Robinson <mrobinson@unstructured.io>'], 'sent_to': ['Matthew Robinson <mrobinson@unstructured.io>'], 'subject': 'Test Email', 'category': 'NarrativeText'})"
|
||||
]
|
||||
},
|
||||
"execution_count": 4,
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"loader = UnstructuredEmailLoader(\"example_data/fake-email.eml\", mode=\"elements\")\n",
|
||||
"\n",
|
||||
"data = loader.load()\n",
|
||||
"\n",
|
||||
"data[0]"
|
||||
]
|
||||
},
|
||||
@@ -108,30 +152,46 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"execution_count": 8,
|
||||
"id": "6539f166",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"Document(page_content='This is a test email to use for unit tests.', metadata={'source': 'example_data/fake-email.eml', 'file_directory': 'example_data', 'filename': 'fake-email.eml', 'last_modified': '2022-12-16T17:04:16-05:00', 'sent_from': ['Matthew Robinson <mrobinson@unstructured.io>'], 'sent_to': ['Matthew Robinson <mrobinson@unstructured.io>'], 'subject': 'Test Email', 'languages': ['eng'], 'filetype': 'message/rfc822', 'category': 'NarrativeText'})"
|
||||
]
|
||||
},
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"loader = UnstructuredEmailLoader(\n",
|
||||
" \"example_data/fake-email.eml\",\n",
|
||||
" mode=\"elements\",\n",
|
||||
" process_attachments=True,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"data = loader.load()\n",
|
||||
"\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"id": "aebead38",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"data = loader.load()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"id": "ddeb60f4",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"Document(page_content='This is a test email to use for unit tests.', metadata={'source': 'example_data/fake-email.eml', 'filename': 'fake-email.eml', 'file_directory': 'example_data', 'date': '2022-12-16T17:04:16-05:00', 'filetype': 'message/rfc822', 'sent_from': ['Matthew Robinson <mrobinson@unstructured.io>'], 'sent_to': ['Matthew Robinson <mrobinson@unstructured.io>'], 'subject': 'Test Email', 'category': 'NarrativeText'})"
|
||||
]
|
||||
},
|
||||
"execution_count": 10,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"data[0]"
|
||||
]
|
||||
},
|
||||
@@ -150,33 +210,57 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install --upgrade --quiet extract_msg"
|
||||
"%pip install --upgrade --quiet extract_msg"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"execution_count": 8,
|
||||
"id": "1e7a8444",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.document_loaders import OutlookMessageLoader"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"id": "77a055e6",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"loader = OutlookMessageLoader(\"example_data/fake-email.msg\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"id": "789882de",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"data = loader.load()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"id": "46aa0632",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"Document(page_content='This is a test email to experiment with the MS Outlook MSG Extractor\\r\\n\\r\\n\\r\\n-- \\r\\n\\r\\n\\r\\nKind regards\\r\\n\\r\\n\\r\\n\\r\\n\\r\\nBrian Zhou\\r\\n\\r\\n', metadata={'source': 'example_data/fake-email.msg', 'subject': 'Test for TIF files', 'sender': 'Brian Zhou <brizhou@gmail.com>', 'date': datetime.datetime(2013, 11, 18, 0, 26, 24, tzinfo=zoneinfo.ZoneInfo(key='America/Los_Angeles'))})"
|
||||
"Document(page_content='This is a test email to experiment with the MS Outlook MSG Extractor\\r\\n\\r\\n\\r\\n-- \\r\\n\\r\\n\\r\\nKind regards\\r\\n\\r\\n\\r\\n\\r\\n\\r\\nBrian Zhou\\r\\n\\r\\n', metadata={'subject': 'Test for TIF files', 'sender': 'Brian Zhou <brizhou@gmail.com>', 'date': 'Mon, 18 Nov 2013 16:26:24 +0800'})"
|
||||
]
|
||||
},
|
||||
"execution_count": 7,
|
||||
"execution_count": 11,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_community.document_loaders import OutlookMessageLoader\n",
|
||||
"\n",
|
||||
"loader = OutlookMessageLoader(\"example_data/fake-email.msg\")\n",
|
||||
"\n",
|
||||
"data = loader.load()\n",
|
||||
"\n",
|
||||
"data[0]"
|
||||
]
|
||||
},
|
||||
@@ -205,7 +289,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.5"
|
||||
"version": "3.8.13"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
File diff suppressed because one or more lines are too long
Binary file not shown.
Binary file not shown.
|
Before Width: | Height: | Size: 408 KiB |
@@ -1,723 +0,0 @@
|
||||
Madam Speaker, Madam Vice President, our First Lady and Second Gentleman. Members of Congress and the Cabinet. Justices of the Supreme Court. My fellow Americans.
|
||||
|
||||
Last year COVID-19 kept us apart. This year we are finally together again.
|
||||
|
||||
Tonight, we meet as Democrats Republicans and Independents. But most importantly as Americans.
|
||||
|
||||
With a duty to one another to the American people to the Constitution.
|
||||
|
||||
And with an unwavering resolve that freedom will always triumph over tyranny.
|
||||
|
||||
Six days ago, Russia’s Vladimir Putin sought to shake the foundations of the free world thinking he could make it bend to his menacing ways. But he badly miscalculated.
|
||||
|
||||
He thought he could roll into Ukraine and the world would roll over. Instead he met a wall of strength he never imagined.
|
||||
|
||||
He met the Ukrainian people.
|
||||
|
||||
From President Zelenskyy to every Ukrainian, their fearlessness, their courage, their determination, inspires the world.
|
||||
|
||||
Groups of citizens blocking tanks with their bodies. Everyone from students to retirees teachers turned soldiers defending their homeland.
|
||||
|
||||
In this struggle as President Zelenskyy said in his speech to the European Parliament “Light will win over darkness.” The Ukrainian Ambassador to the United States is here tonight.
|
||||
|
||||
Let each of us here tonight in this Chamber send an unmistakable signal to Ukraine and to the world.
|
||||
|
||||
Please rise if you are able and show that, Yes, we the United States of America stand with the Ukrainian people.
|
||||
|
||||
Throughout our history we’ve learned this lesson when dictators do not pay a price for their aggression they cause more chaos.
|
||||
|
||||
They keep moving.
|
||||
|
||||
And the costs and the threats to America and the world keep rising.
|
||||
|
||||
That’s why the NATO Alliance was created to secure peace and stability in Europe after World War 2.
|
||||
|
||||
The United States is a member along with 29 other nations.
|
||||
|
||||
It matters. American diplomacy matters. American resolve matters.
|
||||
|
||||
Putin’s latest attack on Ukraine was premeditated and unprovoked.
|
||||
|
||||
He rejected repeated efforts at diplomacy.
|
||||
|
||||
He thought the West and NATO wouldn’t respond. And he thought he could divide us at home. Putin was wrong. We were ready. Here is what we did.
|
||||
|
||||
We prepared extensively and carefully.
|
||||
|
||||
We spent months building a coalition of other freedom-loving nations from Europe and the Americas to Asia and Africa to confront Putin.
|
||||
|
||||
I spent countless hours unifying our European allies. We shared with the world in advance what we knew Putin was planning and precisely how he would try to falsely justify his aggression.
|
||||
|
||||
We countered Russia’s lies with truth.
|
||||
|
||||
And now that he has acted the free world is holding him accountable.
|
||||
|
||||
Along with twenty-seven members of the European Union including France, Germany, Italy, as well as countries like the United Kingdom, Canada, Japan, Korea, Australia, New Zealand, and many others, even Switzerland.
|
||||
|
||||
We are inflicting pain on Russia and supporting the people of Ukraine. Putin is now isolated from the world more than ever.
|
||||
|
||||
Together with our allies –we are right now enforcing powerful economic sanctions.
|
||||
|
||||
We are cutting off Russia’s largest banks from the international financial system.
|
||||
|
||||
Preventing Russia’s central bank from defending the Russian Ruble making Putin’s $630 Billion “war fund” worthless.
|
||||
|
||||
We are choking off Russia’s access to technology that will sap its economic strength and weaken its military for years to come.
|
||||
|
||||
Tonight I say to the Russian oligarchs and corrupt leaders who have bilked billions of dollars off this violent regime no more.
|
||||
|
||||
The U.S. Department of Justice is assembling a dedicated task force to go after the crimes of Russian oligarchs.
|
||||
|
||||
We are joining with our European allies to find and seize your yachts your luxury apartments your private jets. We are coming for your ill-begotten gains.
|
||||
|
||||
And tonight I am announcing that we will join our allies in closing off American air space to all Russian flights – further isolating Russia – and adding an additional squeeze –on their economy. The Ruble has lost 30% of its value.
|
||||
|
||||
The Russian stock market has lost 40% of its value and trading remains suspended. Russia’s economy is reeling and Putin alone is to blame.
|
||||
|
||||
Together with our allies we are providing support to the Ukrainians in their fight for freedom. Military assistance. Economic assistance. Humanitarian assistance.
|
||||
|
||||
We are giving more than $1 Billion in direct assistance to Ukraine.
|
||||
|
||||
And we will continue to aid the Ukrainian people as they defend their country and to help ease their suffering.
|
||||
|
||||
Let me be clear, our forces are not engaged and will not engage in conflict with Russian forces in Ukraine.
|
||||
|
||||
Our forces are not going to Europe to fight in Ukraine, but to defend our NATO Allies – in the event that Putin decides to keep moving west.
|
||||
|
||||
For that purpose we’ve mobilized American ground forces, air squadrons, and ship deployments to protect NATO countries including Poland, Romania, Latvia, Lithuania, and Estonia.
|
||||
|
||||
As I have made crystal clear the United States and our Allies will defend every inch of territory of NATO countries with the full force of our collective power.
|
||||
|
||||
And we remain clear-eyed. The Ukrainians are fighting back with pure courage. But the next few days weeks, months, will be hard on them.
|
||||
|
||||
Putin has unleashed violence and chaos. But while he may make gains on the battlefield – he will pay a continuing high price over the long run.
|
||||
|
||||
And a proud Ukrainian people, who have known 30 years of independence, have repeatedly shown that they will not tolerate anyone who tries to take their country backwards.
|
||||
|
||||
To all Americans, I will be honest with you, as I’ve always promised. A Russian dictator, invading a foreign country, has costs around the world.
|
||||
|
||||
And I’m taking robust action to make sure the pain of our sanctions is targeted at Russia’s economy. And I will use every tool at our disposal to protect American businesses and consumers.
|
||||
|
||||
Tonight, I can announce that the United States has worked with 30 other countries to release 60 Million barrels of oil from reserves around the world.
|
||||
|
||||
America will lead that effort, releasing 30 Million barrels from our own Strategic Petroleum Reserve. And we stand ready to do more if necessary, unified with our allies.
|
||||
|
||||
These steps will help blunt gas prices here at home. And I know the news about what’s happening can seem alarming.
|
||||
|
||||
But I want you to know that we are going to be okay.
|
||||
|
||||
When the history of this era is written Putin’s war on Ukraine will have left Russia weaker and the rest of the world stronger.
|
||||
|
||||
While it shouldn’t have taken something so terrible for people around the world to see what’s at stake now everyone sees it clearly.
|
||||
|
||||
We see the unity among leaders of nations and a more unified Europe a more unified West. And we see unity among the people who are gathering in cities in large crowds around the world even in Russia to demonstrate their support for Ukraine.
|
||||
|
||||
In the battle between democracy and autocracy, democracies are rising to the moment, and the world is clearly choosing the side of peace and security.
|
||||
|
||||
This is a real test. It’s going to take time. So let us continue to draw inspiration from the iron will of the Ukrainian people.
|
||||
|
||||
To our fellow Ukrainian Americans who forge a deep bond that connects our two nations we stand with you.
|
||||
|
||||
Putin may circle Kyiv with tanks, but he will never gain the hearts and souls of the Ukrainian people.
|
||||
|
||||
He will never extinguish their love of freedom. He will never weaken the resolve of the free world.
|
||||
|
||||
We meet tonight in an America that has lived through two of the hardest years this nation has ever faced.
|
||||
|
||||
The pandemic has been punishing.
|
||||
|
||||
And so many families are living paycheck to paycheck, struggling to keep up with the rising cost of food, gas, housing, and so much more.
|
||||
|
||||
I understand.
|
||||
|
||||
I remember when my Dad had to leave our home in Scranton, Pennsylvania to find work. I grew up in a family where if the price of food went up, you felt it.
|
||||
|
||||
That’s why one of the first things I did as President was fight to pass the American Rescue Plan.
|
||||
|
||||
Because people were hurting. We needed to act, and we did.
|
||||
|
||||
Few pieces of legislation have done more in a critical moment in our history to lift us out of crisis.
|
||||
|
||||
It fueled our efforts to vaccinate the nation and combat COVID-19. It delivered immediate economic relief for tens of millions of Americans.
|
||||
|
||||
Helped put food on their table, keep a roof over their heads, and cut the cost of health insurance.
|
||||
|
||||
And as my Dad used to say, it gave people a little breathing room.
|
||||
|
||||
And unlike the $2 Trillion tax cut passed in the previous administration that benefitted the top 1% of Americans, the American Rescue Plan helped working people—and left no one behind.
|
||||
|
||||
And it worked. It created jobs. Lots of jobs.
|
||||
|
||||
In fact—our economy created over 6.5 Million new jobs just last year, more jobs created in one year
|
||||
than ever before in the history of America.
|
||||
|
||||
Our economy grew at a rate of 5.7% last year, the strongest growth in nearly 40 years, the first step in bringing fundamental change to an economy that hasn’t worked for the working people of this nation for too long.
|
||||
|
||||
For the past 40 years we were told that if we gave tax breaks to those at the very top, the benefits would trickle down to everyone else.
|
||||
|
||||
But that trickle-down theory led to weaker economic growth, lower wages, bigger deficits, and the widest gap between those at the top and everyone else in nearly a century.
|
||||
|
||||
Vice President Harris and I ran for office with a new economic vision for America.
|
||||
|
||||
Invest in America. Educate Americans. Grow the workforce. Build the economy from the bottom up
|
||||
and the middle out, not from the top down.
|
||||
|
||||
Because we know that when the middle class grows, the poor have a ladder up and the wealthy do very well.
|
||||
|
||||
America used to have the best roads, bridges, and airports on Earth.
|
||||
|
||||
Now our infrastructure is ranked 13th in the world.
|
||||
|
||||
We won’t be able to compete for the jobs of the 21st Century if we don’t fix that.
|
||||
|
||||
That’s why it was so important to pass the Bipartisan Infrastructure Law—the most sweeping investment to rebuild America in history.
|
||||
|
||||
This was a bipartisan effort, and I want to thank the members of both parties who worked to make it happen.
|
||||
|
||||
We’re done talking about infrastructure weeks.
|
||||
|
||||
We’re going to have an infrastructure decade.
|
||||
|
||||
It is going to transform America and put us on a path to win the economic competition of the 21st Century that we face with the rest of the world—particularly with China.
|
||||
|
||||
As I’ve told Xi Jinping, it is never a good bet to bet against the American people.
|
||||
|
||||
We’ll create good jobs for millions of Americans, modernizing roads, airports, ports, and waterways all across America.
|
||||
|
||||
And we’ll do it all to withstand the devastating effects of the climate crisis and promote environmental justice.
|
||||
|
||||
We’ll build a national network of 500,000 electric vehicle charging stations, begin to replace poisonous lead pipes—so every child—and every American—has clean water to drink at home and at school, provide affordable high-speed internet for every American—urban, suburban, rural, and tribal communities.
|
||||
|
||||
4,000 projects have already been announced.
|
||||
|
||||
And tonight, I’m announcing that this year we will start fixing over 65,000 miles of highway and 1,500 bridges in disrepair.
|
||||
|
||||
When we use taxpayer dollars to rebuild America – we are going to Buy American: buy American products to support American jobs.
|
||||
|
||||
The federal government spends about $600 Billion a year to keep the country safe and secure.
|
||||
|
||||
There’s been a law on the books for almost a century
|
||||
to make sure taxpayers’ dollars support American jobs and businesses.
|
||||
|
||||
Every Administration says they’ll do it, but we are actually doing it.
|
||||
|
||||
We will buy American to make sure everything from the deck of an aircraft carrier to the steel on highway guardrails are made in America.
|
||||
|
||||
But to compete for the best jobs of the future, we also need to level the playing field with China and other competitors.
|
||||
|
||||
That’s why it is so important to pass the Bipartisan Innovation Act sitting in Congress that will make record investments in emerging technologies and American manufacturing.
|
||||
|
||||
Let me give you one example of why it’s so important to pass it.
|
||||
|
||||
If you travel 20 miles east of Columbus, Ohio, you’ll find 1,000 empty acres of land.
|
||||
|
||||
It won’t look like much, but if you stop and look closely, you’ll see a “Field of dreams,” the ground on which America’s future will be built.
|
||||
|
||||
This is where Intel, the American company that helped build Silicon Valley, is going to build its $20 billion semiconductor “mega site”.
|
||||
|
||||
Up to eight state-of-the-art factories in one place. 10,000 new good-paying jobs.
|
||||
|
||||
Some of the most sophisticated manufacturing in the world to make computer chips the size of a fingertip that power the world and our everyday lives.
|
||||
|
||||
Smartphones. The Internet. Technology we have yet to invent.
|
||||
|
||||
But that’s just the beginning.
|
||||
|
||||
Intel’s CEO, Pat Gelsinger, who is here tonight, told me they are ready to increase their investment from
|
||||
$20 billion to $100 billion.
|
||||
|
||||
That would be one of the biggest investments in manufacturing in American history.
|
||||
|
||||
And all they’re waiting for is for you to pass this bill.
|
||||
|
||||
So let’s not wait any longer. Send it to my desk. I’ll sign it.
|
||||
|
||||
And we will really take off.
|
||||
|
||||
And Intel is not alone.
|
||||
|
||||
There’s something happening in America.
|
||||
|
||||
Just look around and you’ll see an amazing story.
|
||||
|
||||
The rebirth of the pride that comes from stamping products “Made In America.” The revitalization of American manufacturing.
|
||||
|
||||
Companies are choosing to build new factories here, when just a few years ago, they would have built them overseas.
|
||||
|
||||
That’s what is happening. Ford is investing $11 billion to build electric vehicles, creating 11,000 jobs across the country.
|
||||
|
||||
GM is making the largest investment in its history—$7 billion to build electric vehicles, creating 4,000 jobs in Michigan.
|
||||
|
||||
All told, we created 369,000 new manufacturing jobs in America just last year.
|
||||
|
||||
Powered by people I’ve met like JoJo Burgess, from generations of union steelworkers from Pittsburgh, who’s here with us tonight.
|
||||
|
||||
As Ohio Senator Sherrod Brown says, “It’s time to bury the label “Rust Belt.”
|
||||
|
||||
It’s time.
|
||||
|
||||
But with all the bright spots in our economy, record job growth and higher wages, too many families are struggling to keep up with the bills.
|
||||
|
||||
Inflation is robbing them of the gains they might otherwise feel.
|
||||
|
||||
I get it. That’s why my top priority is getting prices under control.
|
||||
|
||||
Look, our economy roared back faster than most predicted, but the pandemic meant that businesses had a hard time hiring enough workers to keep up production in their factories.
|
||||
|
||||
The pandemic also disrupted global supply chains.
|
||||
|
||||
When factories close, it takes longer to make goods and get them from the warehouse to the store, and prices go up.
|
||||
|
||||
Look at cars.
|
||||
|
||||
Last year, there weren’t enough semiconductors to make all the cars that people wanted to buy.
|
||||
|
||||
And guess what, prices of automobiles went up.
|
||||
|
||||
So—we have a choice.
|
||||
|
||||
One way to fight inflation is to drive down wages and make Americans poorer.
|
||||
|
||||
I have a better plan to fight inflation.
|
||||
|
||||
Lower your costs, not your wages.
|
||||
|
||||
Make more cars and semiconductors in America.
|
||||
|
||||
More infrastructure and innovation in America.
|
||||
|
||||
More goods moving faster and cheaper in America.
|
||||
|
||||
More jobs where you can earn a good living in America.
|
||||
|
||||
And instead of relying on foreign supply chains, let’s make it in America.
|
||||
|
||||
Economists call it “increasing the productive capacity of our economy.”
|
||||
|
||||
I call it building a better America.
|
||||
|
||||
My plan to fight inflation will lower your costs and lower the deficit.
|
||||
|
||||
17 Nobel laureates in economics say my plan will ease long-term inflationary pressures. Top business leaders and most Americans support my plan. And here’s the plan:
|
||||
|
||||
First – cut the cost of prescription drugs. Just look at insulin. One in ten Americans has diabetes. In Virginia, I met a 13-year-old boy named Joshua Davis.
|
||||
|
||||
He and his Dad both have Type 1 diabetes, which means they need insulin every day. Insulin costs about $10 a vial to make.
|
||||
|
||||
But drug companies charge families like Joshua and his Dad up to 30 times more. I spoke with Joshua’s mom.
|
||||
|
||||
Imagine what it’s like to look at your child who needs insulin and have no idea how you’re going to pay for it.
|
||||
|
||||
What it does to your dignity, your ability to look your child in the eye, to be the parent you expect to be.
|
||||
|
||||
Joshua is here with us tonight. Yesterday was his birthday. Happy birthday, buddy.
|
||||
|
||||
For Joshua, and for the 200,000 other young people with Type 1 diabetes, let’s cap the cost of insulin at $35 a month so everyone can afford it.
|
||||
|
||||
Drug companies will still do very well. And while we’re at it let Medicare negotiate lower prices for prescription drugs, like the VA already does.
|
||||
|
||||
Look, the American Rescue Plan is helping millions of families on Affordable Care Act plans save $2,400 a year on their health care premiums. Let’s close the coverage gap and make those savings permanent.
|
||||
|
||||
Second – cut energy costs for families an average of $500 a year by combatting climate change.
|
||||
|
||||
Let’s provide investments and tax credits to weatherize your homes and businesses to be energy efficient and you get a tax credit; double America’s clean energy production in solar, wind, and so much more; lower the price of electric vehicles, saving you another $80 a month because you’ll never have to pay at the gas pump again.
|
||||
|
||||
Third – cut the cost of child care. Many families pay up to $14,000 a year for child care per child.
|
||||
|
||||
Middle-class and working families shouldn’t have to pay more than 7% of their income for care of young children.
|
||||
|
||||
My plan will cut the cost in half for most families and help parents, including millions of women, who left the workforce during the pandemic because they couldn’t afford child care, to be able to get back to work.
|
||||
|
||||
My plan doesn’t stop there. It also includes home and long-term care. More affordable housing. And Pre-K for every 3- and 4-year-old.
|
||||
|
||||
All of these will lower costs.
|
||||
|
||||
And under my plan, nobody earning less than $400,000 a year will pay an additional penny in new taxes. Nobody.
|
||||
|
||||
The one thing all Americans agree on is that the tax system is not fair. We have to fix it.
|
||||
|
||||
I’m not looking to punish anyone. But let’s make sure corporations and the wealthiest Americans start paying their fair share.
|
||||
|
||||
Just last year, 55 Fortune 500 corporations earned $40 billion in profits and paid zero dollars in federal income tax.
|
||||
|
||||
That’s simply not fair. That’s why I’ve proposed a 15% minimum tax rate for corporations.
|
||||
|
||||
We got more than 130 countries to agree on a global minimum tax rate so companies can’t get out of paying their taxes at home by shipping jobs and factories overseas.
|
||||
|
||||
That’s why I’ve proposed closing loopholes so the very wealthy don’t pay a lower tax rate than a teacher or a firefighter.
|
||||
|
||||
So that’s my plan. It will grow the economy and lower costs for families.
|
||||
|
||||
So what are we waiting for? Let’s get this done. And while you’re at it, confirm my nominees to the Federal Reserve, which plays a critical role in fighting inflation.
|
||||
|
||||
My plan will not only lower costs to give families a fair shot, it will lower the deficit.
|
||||
|
||||
The previous Administration not only ballooned the deficit with tax cuts for the very wealthy and corporations, it undermined the watchdogs whose job was to keep pandemic relief funds from being wasted.
|
||||
|
||||
But in my administration, the watchdogs have been welcomed back.
|
||||
|
||||
We’re going after the criminals who stole billions in relief money meant for small businesses and millions of Americans.
|
||||
|
||||
And tonight, I’m announcing that the Justice Department will name a chief prosecutor for pandemic fraud.
|
||||
|
||||
By the end of this year, the deficit will be down to less than half what it was before I took office.
|
||||
|
||||
The only president ever to cut the deficit by more than one trillion dollars in a single year.
|
||||
|
||||
Lowering your costs also means demanding more competition.
|
||||
|
||||
I’m a capitalist, but capitalism without competition isn’t capitalism.
|
||||
|
||||
It’s exploitation—and it drives up prices.
|
||||
|
||||
When corporations don’t have to compete, their profits go up, your prices go up, and small businesses and family farmers and ranchers go under.
|
||||
|
||||
We see it happening with ocean carriers moving goods in and out of America.
|
||||
|
||||
During the pandemic, these foreign-owned companies raised prices by as much as 1,000% and made record profits.
|
||||
|
||||
Tonight, I’m announcing a crackdown on these companies overcharging American businesses and consumers.
|
||||
|
||||
And as Wall Street firms take over more nursing homes, quality in those homes has gone down and costs have gone up.
|
||||
|
||||
That ends on my watch.
|
||||
|
||||
Medicare is going to set higher standards for nursing homes and make sure your loved ones get the care they deserve and expect.
|
||||
|
||||
We’ll also cut costs and keep the economy going strong by giving workers a fair shot, provide more training and apprenticeships, hire them based on their skills not degrees.
|
||||
|
||||
Let’s pass the Paycheck Fairness Act and paid leave.
|
||||
|
||||
Raise the minimum wage to $15 an hour and extend the Child Tax Credit, so no one has to raise a family in poverty.
|
||||
|
||||
Let’s increase Pell Grants and increase our historic support of HBCUs, and invest in what Jill—our First Lady who teaches full-time—calls America’s best-kept secret: community colleges.
|
||||
|
||||
And let’s pass the PRO Act when a majority of workers want to form a union—they shouldn’t be stopped.
|
||||
|
||||
When we invest in our workers, when we build the economy from the bottom up and the middle out together, we can do something we haven’t done in a long time: build a better America.
|
||||
|
||||
For more than two years, COVID-19 has impacted every decision in our lives and the life of the nation.
|
||||
|
||||
And I know you’re tired, frustrated, and exhausted.
|
||||
|
||||
But I also know this.
|
||||
|
||||
Because of the progress we’ve made, because of your resilience and the tools we have, tonight I can say
|
||||
we are moving forward safely, back to more normal routines.
|
||||
|
||||
We’ve reached a new moment in the fight against COVID-19, with severe cases down to a level not seen since last July.
|
||||
|
||||
Just a few days ago, the Centers for Disease Control and Prevention—the CDC—issued new mask guidelines.
|
||||
|
||||
Under these new guidelines, most Americans in most of the country can now be mask free.
|
||||
|
||||
And based on the projections, more of the country will reach that point across the next couple of weeks.
|
||||
|
||||
Thanks to the progress we have made this past year, COVID-19 need no longer control our lives.
|
||||
|
||||
I know some are talking about “living with COVID-19”. Tonight – I say that we will never just accept living with COVID-19.
|
||||
|
||||
We will continue to combat the virus as we do other diseases. And because this is a virus that mutates and spreads, we will stay on guard.
|
||||
|
||||
Here are four common sense steps as we move forward safely.
|
||||
|
||||
First, stay protected with vaccines and treatments. We know how incredibly effective vaccines are. If you’re vaccinated and boosted you have the highest degree of protection.
|
||||
|
||||
We will never give up on vaccinating more Americans. Now, I know parents with kids under 5 are eager to see a vaccine authorized for their children.
|
||||
|
||||
The scientists are working hard to get that done and we’ll be ready with plenty of vaccines when they do.
|
||||
|
||||
We’re also ready with anti-viral treatments. If you get COVID-19, the Pfizer pill reduces your chances of ending up in the hospital by 90%.
|
||||
|
||||
We’ve ordered more of these pills than anyone in the world. And Pfizer is working overtime to get us 1 Million pills this month and more than double that next month.
|
||||
|
||||
And we’re launching the “Test to Treat” initiative so people can get tested at a pharmacy, and if they’re positive, receive antiviral pills on the spot at no cost.
|
||||
|
||||
If you’re immunocompromised or have some other vulnerability, we have treatments and free high-quality masks.
|
||||
|
||||
We’re leaving no one behind or ignoring anyone’s needs as we move forward.
|
||||
|
||||
And on testing, we have made hundreds of millions of tests available for you to order for free.
|
||||
|
||||
Even if you already ordered free tests tonight, I am announcing that you can order more from covidtests.gov starting next week.
|
||||
|
||||
Second – we must prepare for new variants. Over the past year, we’ve gotten much better at detecting new variants.
|
||||
|
||||
If necessary, we’ll be able to deploy new vaccines within 100 days instead of many more months or years.
|
||||
|
||||
And, if Congress provides the funds we need, we’ll have new stockpiles of tests, masks, and pills ready if needed.
|
||||
|
||||
I cannot promise a new variant won’t come. But I can promise you we’ll do everything within our power to be ready if it does.
|
||||
|
||||
Third – we can end the shutdown of schools and businesses. We have the tools we need.
|
||||
|
||||
It’s time for Americans to get back to work and fill our great downtowns again. People working from home can feel safe to begin to return to the office.
|
||||
|
||||
We’re doing that here in the federal government. The vast majority of federal workers will once again work in person.
|
||||
|
||||
Our schools are open. Let’s keep it that way. Our kids need to be in school.
|
||||
|
||||
And with 75% of adult Americans fully vaccinated and hospitalizations down by 77%, most Americans can remove their masks, return to work, stay in the classroom, and move forward safely.
|
||||
|
||||
We achieved this because we provided free vaccines, treatments, tests, and masks.
|
||||
|
||||
Of course, continuing this costs money.
|
||||
|
||||
I will soon send Congress a request.
|
||||
|
||||
The vast majority of Americans have used these tools and may want to again, so I expect Congress to pass it quickly.
|
||||
|
||||
Fourth, we will continue vaccinating the world.
|
||||
|
||||
We’ve sent 475 Million vaccine doses to 112 countries, more than any other nation.
|
||||
|
||||
And we won’t stop.
|
||||
|
||||
We have lost so much to COVID-19. Time with one another. And worst of all, so much loss of life.
|
||||
|
||||
Let’s use this moment to reset. Let’s stop looking at COVID-19 as a partisan dividing line and see it for what it is: A God-awful disease.
|
||||
|
||||
Let’s stop seeing each other as enemies, and start seeing each other for who we really are: Fellow Americans.
|
||||
|
||||
We can’t change how divided we’ve been. But we can change how we move forward—on COVID-19 and other issues we must face together.
|
||||
|
||||
I recently visited the New York City Police Department days after the funerals of Officer Wilbert Mora and his partner, Officer Jason Rivera.
|
||||
|
||||
They were responding to a 9-1-1 call when a man shot and killed them with a stolen gun.
|
||||
|
||||
Officer Mora was 27 years old.
|
||||
|
||||
Officer Rivera was 22.
|
||||
|
||||
Both Dominican Americans who’d grown up on the same streets they later chose to patrol as police officers.
|
||||
|
||||
I spoke with their families and told them that we are forever in debt for their sacrifice, and we will carry on their mission to restore the trust and safety every community deserves.
|
||||
|
||||
I’ve worked on these issues a long time.
|
||||
|
||||
I know what works: Investing in crime prevention and community police officers who’ll walk the beat, who’ll know the neighborhood, and who can restore trust and safety.
|
||||
|
||||
So let’s not abandon our streets. Or choose between safety and equal justice.
|
||||
|
||||
Let’s come together to protect our communities, restore trust, and hold law enforcement accountable.
|
||||
|
||||
That’s why the Justice Department required body cameras, banned chokeholds, and restricted no-knock warrants for its officers.
|
||||
|
||||
That’s why the American Rescue Plan provided $350 Billion that cities, states, and counties can use to hire more police and invest in proven strategies like community violence interruption—trusted messengers breaking the cycle of violence and trauma and giving young people hope.
|
||||
|
||||
We should all agree: The answer is not to Defund the police. The answer is to FUND the police with the resources and training they need to protect our communities.
|
||||
|
||||
I ask Democrats and Republicans alike: Pass my budget and keep our neighborhoods safe.
|
||||
|
||||
And I will keep doing everything in my power to crack down on gun trafficking and ghost guns you can buy online and make at home—they have no serial numbers and can’t be traced.
|
||||
|
||||
And I ask Congress to pass proven measures to reduce gun violence. Pass universal background checks. Why should anyone on a terrorist list be able to purchase a weapon?
|
||||
|
||||
Ban assault weapons and high-capacity magazines.
|
||||
|
||||
Repeal the liability shield that makes gun manufacturers the only industry in America that can’t be sued.
|
||||
|
||||
These laws don’t infringe on the Second Amendment. They save lives.
|
||||
|
||||
The most fundamental right in America is the right to vote – and to have it counted. And it’s under assault.
|
||||
|
||||
In state after state, new laws have been passed, not only to suppress the vote, but to subvert entire elections.
|
||||
|
||||
We cannot let this happen.
|
||||
|
||||
Tonight. I call on the Senate to: Pass the Freedom to Vote Act. Pass the John Lewis Voting Rights Act. And while you’re at it, pass the Disclose Act so Americans can know who is funding our elections.
|
||||
|
||||
Tonight, I’d like to honor someone who has dedicated his life to serve this country: Justice Stephen Breyer—an Army veteran, Constitutional scholar, and retiring Justice of the United States Supreme Court. Justice Breyer, thank you for your service.
|
||||
|
||||
One of the most serious constitutional responsibilities a President has is nominating someone to serve on the United States Supreme Court.
|
||||
|
||||
And I did that 4 days ago, when I nominated Circuit Court of Appeals Judge Ketanji Brown Jackson. One of our nation’s top legal minds, who will continue Justice Breyer’s legacy of excellence.
|
||||
|
||||
A former top litigator in private practice. A former federal public defender. And from a family of public school educators and police officers. A consensus builder. Since she’s been nominated, she’s received a broad range of support—from the Fraternal Order of Police to former judges appointed by Democrats and Republicans.
|
||||
|
||||
And if we are to advance liberty and justice, we need to secure the Border and fix the immigration system.
|
||||
|
||||
We can do both. At our border, we’ve installed new technology like cutting-edge scanners to better detect drug smuggling.
|
||||
|
||||
We’ve set up joint patrols with Mexico and Guatemala to catch more human traffickers.
|
||||
|
||||
We’re putting in place dedicated immigration judges so families fleeing persecution and violence can have their cases heard faster.
|
||||
|
||||
We’re securing commitments and supporting partners in South and Central America to host more refugees and secure their own borders.
|
||||
|
||||
We can do all this while keeping lit the torch of liberty that has led generations of immigrants to this land—my forefathers and so many of yours.
|
||||
|
||||
Provide a pathway to citizenship for Dreamers, those on temporary status, farm workers, and essential workers.
|
||||
|
||||
Revise our laws so businesses have the workers they need and families don’t wait decades to reunite.
|
||||
|
||||
It’s not only the right thing to do—it’s the economically smart thing to do.
|
||||
|
||||
That’s why immigration reform is supported by everyone from labor unions to religious leaders to the U.S. Chamber of Commerce.
|
||||
|
||||
Let’s get it done once and for all.
|
||||
|
||||
Advancing liberty and justice also requires protecting the rights of women.
|
||||
|
||||
The constitutional right affirmed in Roe v. Wade—standing precedent for half a century—is under attack as never before.
|
||||
|
||||
If we want to go forward—not backward—we must protect access to health care. Preserve a woman’s right to choose. And let’s continue to advance maternal health care in America.
|
||||
|
||||
And for our LGBTQ+ Americans, let’s finally get the bipartisan Equality Act to my desk. The onslaught of state laws targeting transgender Americans and their families is wrong.
|
||||
|
||||
As I said last year, especially to our younger transgender Americans, I will always have your back as your President, so you can be yourself and reach your God-given potential.
|
||||
|
||||
While it often appears that we never agree, that isn’t true. I signed 80 bipartisan bills into law last year. From preventing government shutdowns to protecting Asian-Americans from still-too-common hate crimes to reforming military justice.
|
||||
|
||||
And soon, we’ll strengthen the Violence Against Women Act that I first wrote three decades ago. It is important for us to show the nation that we can come together and do big things.
|
||||
|
||||
So tonight I’m offering a Unity Agenda for the Nation. Four big things we can do together.
|
||||
|
||||
First, beat the opioid epidemic.
|
||||
|
||||
There is so much we can do. Increase funding for prevention, treatment, harm reduction, and recovery.
|
||||
|
||||
Get rid of outdated rules that stop doctors from prescribing treatments. And stop the flow of illicit drugs by working with state and local law enforcement to go after traffickers.
|
||||
|
||||
If you’re suffering from addiction, know you are not alone. I believe in recovery, and I celebrate the 23 million Americans in recovery.
|
||||
|
||||
Second, let’s take on mental health. Especially among our children, whose lives and education have been turned upside down.
|
||||
|
||||
The American Rescue Plan gave schools money to hire teachers and help students make up for lost learning.
|
||||
|
||||
I urge every parent to make sure your school does just that. And we can all play a part—sign up to be a tutor or a mentor.
|
||||
|
||||
Children were also struggling before the pandemic. Bullying, violence, trauma, and the harms of social media.
|
||||
|
||||
As Frances Haugen, who is here with us tonight, has shown, we must hold social media platforms accountable for the national experiment they’re conducting on our children for profit.
|
||||
|
||||
It’s time to strengthen privacy protections, ban targeted advertising to children, demand tech companies stop collecting personal data on our children.
|
||||
|
||||
And let’s get all Americans the mental health services they need. More people they can turn to for help, and full parity between physical and mental health care.
|
||||
|
||||
Third, support our veterans.
|
||||
|
||||
Veterans are the best of us.
|
||||
|
||||
I’ve always believed that we have a sacred obligation to equip all those we send to war and care for them and their families when they come home.
|
||||
|
||||
My administration is providing assistance with job training and housing, and now helping lower-income veterans get VA care debt-free.
|
||||
|
||||
Our troops in Iraq and Afghanistan faced many dangers.
|
||||
|
||||
One was stationed at bases and breathing in toxic smoke from “burn pits” that incinerated wastes of war—medical and hazard material, jet fuel, and more.
|
||||
|
||||
When they came home, many of the world’s fittest and best trained warriors were never the same.
|
||||
|
||||
Headaches. Numbness. Dizziness.
|
||||
|
||||
A cancer that would put them in a flag-draped coffin.
|
||||
|
||||
I know.
|
||||
|
||||
One of those soldiers was my son Major Beau Biden.
|
||||
|
||||
We don’t know for sure if a burn pit was the cause of his brain cancer, or the diseases of so many of our troops.
|
||||
|
||||
But I’m committed to finding out everything we can.
|
||||
|
||||
Committed to military families like Danielle Robinson from Ohio.
|
||||
|
||||
The widow of Sergeant First Class Heath Robinson.
|
||||
|
||||
He was born a soldier. Army National Guard. Combat medic in Kosovo and Iraq.
|
||||
|
||||
Stationed near Baghdad, just yards from burn pits the size of football fields.
|
||||
|
||||
Heath’s widow Danielle is here with us tonight. They loved going to Ohio State football games. He loved building Legos with their daughter.
|
||||
|
||||
But cancer from prolonged exposure to burn pits ravaged Heath’s lungs and body.
|
||||
|
||||
Danielle says Heath was a fighter to the very end.
|
||||
|
||||
He didn’t know how to stop fighting, and neither did she.
|
||||
|
||||
Through her pain she found purpose to demand we do better.
|
||||
|
||||
Tonight, Danielle—we are.
|
||||
|
||||
The VA is pioneering new ways of linking toxic exposures to diseases, already helping more veterans get benefits.
|
||||
|
||||
And tonight, I’m announcing we’re expanding eligibility to veterans suffering from nine respiratory cancers.
|
||||
|
||||
I’m also calling on Congress: pass a law to make sure veterans devastated by toxic exposures in Iraq and Afghanistan finally get the benefits and comprehensive health care they deserve.
|
||||
|
||||
And fourth, let’s end cancer as we know it.
|
||||
|
||||
This is personal to me and Jill, to Kamala, and to so many of you.
|
||||
|
||||
Cancer is the #2 cause of death in America–second only to heart disease.
|
||||
|
||||
Last month, I announced our plan to supercharge
|
||||
the Cancer Moonshot that President Obama asked me to lead six years ago.
|
||||
|
||||
Our goal is to cut the cancer death rate by at least 50% over the next 25 years, turn more cancers from death sentences into treatable diseases.
|
||||
|
||||
More support for patients and families.
|
||||
|
||||
To get there, I call on Congress to fund ARPA-H, the Advanced Research Projects Agency for Health.
|
||||
|
||||
It’s based on DARPA—the Defense Department project that led to the Internet, GPS, and so much more.
|
||||
|
||||
ARPA-H will have a singular purpose—to drive breakthroughs in cancer, Alzheimer’s, diabetes, and more.
|
||||
|
||||
A unity agenda for the nation.
|
||||
|
||||
We can do this.
|
||||
|
||||
My fellow Americans—tonight , we have gathered in a sacred space—the citadel of our democracy.
|
||||
|
||||
In this Capitol, generation after generation, Americans have debated great questions amid great strife, and have done great things.
|
||||
|
||||
We have fought for freedom, expanded liberty, defeated totalitarianism and terror.
|
||||
|
||||
And built the strongest, freest, and most prosperous nation the world has ever known.
|
||||
|
||||
Now is the hour.
|
||||
|
||||
Our moment of responsibility.
|
||||
|
||||
Our test of resolve and conscience, of history itself.
|
||||
|
||||
It is in this moment that our character is formed. Our purpose is found. Our future is forged.
|
||||
|
||||
Well I know this nation.
|
||||
|
||||
We will meet the test.
|
||||
|
||||
To protect freedom and liberty, to expand fairness and opportunity.
|
||||
|
||||
We will save democracy.
|
||||
|
||||
As hard as these times have been, I am more optimistic about America today than I have been my whole life.
|
||||
|
||||
Because I see the future that is within our grasp.
|
||||
|
||||
Because I know there is simply nothing beyond our capacity.
|
||||
|
||||
We are the only nation on Earth that has always turned every crisis we have faced into an opportunity.
|
||||
|
||||
The only nation that can be defined by a single word: possibilities.
|
||||
|
||||
So on this night, in our 245th year as a nation, I have come to report on the State of the Union.
|
||||
|
||||
And my report is this: the State of the Union is strong—because you, the American people, are strong.
|
||||
|
||||
We are stronger today than we were a year ago.
|
||||
|
||||
And we will be stronger a year from now than we are today.
|
||||
|
||||
Now is our moment to meet and overcome the challenges of our time.
|
||||
|
||||
And we will, as one people.
|
||||
|
||||
One America.
|
||||
|
||||
The United States of America.
|
||||
|
||||
May God bless you all. May God protect our troops.
|
||||
@@ -7,9 +7,7 @@
|
||||
"source": [
|
||||
"# Images\n",
|
||||
"\n",
|
||||
"This covers how to load images into a document format that we can use downstream with other LangChain modules.\n",
|
||||
"\n",
|
||||
"It uses [Unstructured](https://unstructured.io/) to handle a wide variety of image formats, such as `.jpg` and `.png`. Please see [this guide](/docs/integrations/providers/unstructured/) for more instructions on setting up Unstructured locally, including setting up required system dependencies."
|
||||
"This covers how to load images such as `JPG` or `PNG` into a document format that we can use downstream."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -29,35 +27,63 @@
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install --upgrade --quiet \"unstructured[all-docs]\""
|
||||
"%pip install --upgrade --quiet pdfminer"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "0cc0cd42",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.document_loaders.image import UnstructuredImageLoader"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "0cc0cd42",
|
||||
"id": "082d557c",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"loader = UnstructuredImageLoader(\"layout-parser-paper-fast.jpg\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "df11c953",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"data = loader.load()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "4284d44c",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"Document(page_content='2021\\n\\n2103.15348v2 [cs.CV] 21 Jun\\n\\narXiv\\n\\nLayoutParser: A Unified Toolkit for Deep Learning Based Document Image Analysis\\n\\nZejiang Shen! (&4), Ruochen Zhang?, Melissa Dell?, Benjamin Charles Germain Lee*, Jacob Carlson?, and Weining Li?\\n\\n1\\n\\nAllen Institute for AI shannons@allenai.org ? Brown University ruochen_zhang@brown. edu 3 Harvard University {melissadell, jacob_carlson}@fas.harvard.edu 4 University of Washington begl@cs.washington.edu 5 University of Waterloo w4221i@uwaterloo.ca\\n\\nAbstract. Recent advances in document image analysis (DIA) have been primarily driven by the application of neural networks. Ideally, research outcomes could be easily deployed in production and extended for further investigation. However, various factors like loosely organized codebases and sophisticated model configurations complicate the easy reuse of im- portant innovations by a wide audience. Though there have been on-going efforts to improve reusability and simplify deep learning (DL) model development in disciplines like natural language processing and computer vision, none of them are optimized for challenges in the domain of DIA. This represents a major gap in the existing toolkit, as DIA is central to academic research across a wide range of disciplines in the social sciences and humanities. This paper introduces LayoutParser, an open-source library for streamlining the usage of DL in DIA research and applica- tions. The core LayoutParser library comes with a set of simple and intuitive interfaces for applying and customizing DL models for layout de- tection, character recognition, and many other document processing tasks. To promote extensibility, LayoutParser also incorporates a community platform for sharing both pre-trained models and full document digiti- zation pipelines. We demonstrate that LayoutParser is helpful for both lightweight and large-scale digitization pipelines in real-word use cases. The library is publicly available at https: //layout-parser.github. io.\\n\\nKeywords: Document Image Analysis - Deep Learning - Layout Analysis - Character Recognition - Open Source library - Toolkit.\\n\\n1 Introduction\\n\\nDeep Learning(DL)-based approaches are the state-of-the-art for a wide range of document image analysis (DIA) tasks including document image classification [11,', metadata={'source': './example_data/layout-parser-paper-screenshot.png'})"
|
||||
"Document(page_content=\"LayoutParser: A Unified Toolkit for Deep\\nLearning Based Document Image Analysis\\n\\n\\n‘Zxjiang Shen' (F3}, Ruochen Zhang”, Melissa Dell*, Benjamin Charles Germain\\nLeet, Jacob Carlson, and Weining LiF\\n\\n\\nsugehen\\n\\nshangthrows, et\\n\\n“Abstract. Recent advanocs in document image analysis (DIA) have been\\n‘pimarliy driven bythe application of neural networks dell roar\\n{uteomer could be aly deployed in production and extended fo farther\\n[nvetigtion. However, various factory ke lcely organize codebanee\\nsnd sophisticated modal cnigurations compat the ey ree of\\n‘erin! innovation by wide sence, Though there have been sng\\n‘Hors to improve reuablty and simplify deep lees (DL) mode\\n‘aon, sone of them ae optimized for challenge inthe demain of DIA,\\nThis roprscte a major gap in the extng fol, sw DIA i eal to\\nscademic research acon wie range of dpi in the social ssencee\\n[rary for streamlining the sage of DL in DIA research and appicn\\n‘tons The core LayoutFaraer brary comes with a sch of simple and\\nIntative interfaee or applying and eutomiing DI. odel fr Inyo de\\npltfom for sharing both protrined modes an fal document dist\\n{ation pipeline We demonutate that LayootPareer shea fr both\\nlightweight and lrgeseledgtieation pipelines in eal-word uae ces\\nThe leary pblely smal at Btspe://layost-pareergsthab So\\n\\n\\n\\n‘Keywords: Document Image Analysis» Deep Learning Layout Analysis\\n‘Character Renguition - Open Serres dary « Tol\\n\\n\\nIntroduction\\n\\n\\n‘Deep Learning(DL)-based approaches are the state-of-the-art for a wide range of\\ndoctiment image analysis (DIA) tea including document image clasiffeation [I]\\n\", lookup_str='', metadata={'source': 'layout-parser-paper-fast.jpg'}, lookup_index=0)"
|
||||
]
|
||||
},
|
||||
"execution_count": 2,
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_community.document_loaders.image import UnstructuredImageLoader\n",
|
||||
"\n",
|
||||
"loader = UnstructuredImageLoader(\"./example_data/layout-parser-paper-screenshot.png\")\n",
|
||||
"\n",
|
||||
"data = loader.load()\n",
|
||||
"\n",
|
||||
"data[0]"
|
||||
]
|
||||
},
|
||||
@@ -68,33 +94,47 @@
|
||||
"source": [
|
||||
"### Retain Elements\n",
|
||||
"\n",
|
||||
"Under the hood, Unstructured creates different \"elements\" for different chunks of text. By default we combine those together, but you can keep that separation by specifying `mode=\"elements\"`."
|
||||
"Under the hood, Unstructured creates different \"elements\" for different chunks of text. By default we combine those together, but you can easily keep that separation by specifying `mode=\"elements\"`."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"execution_count": 5,
|
||||
"id": "0fab833b",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"loader = UnstructuredImageLoader(\"layout-parser-paper-fast.jpg\", mode=\"elements\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "c3e8ff1b",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"data = loader.load()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "43c23d2d",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"Document(page_content='2021', metadata={'source': './example_data/layout-parser-paper-screenshot.png', 'coordinates': {'points': ((47.0, 492.0), (47.0, 591.0), (83.0, 591.0), (83.0, 492.0)), 'system': 'PixelSpace', 'layout_width': 1624, 'layout_height': 1920}, 'last_modified': '2024-07-01T10:38:29', 'filetype': 'PNG', 'languages': ['eng'], 'page_number': 1, 'file_directory': './example_data', 'filename': 'layout-parser-paper-screenshot.png', 'category': 'UncategorizedText'})"
|
||||
"Document(page_content='LayoutParser: A Unified Toolkit for Deep\\nLearning Based Document Image Analysis\\n', lookup_str='', metadata={'source': 'layout-parser-paper-fast.jpg', 'filename': 'layout-parser-paper-fast.jpg', 'page_number': 1, 'category': 'Title'}, lookup_index=0)"
|
||||
]
|
||||
},
|
||||
"execution_count": 3,
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"loader = UnstructuredImageLoader(\n",
|
||||
" \"./example_data/layout-parser-paper-screenshot.png\", mode=\"elements\"\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"data = loader.load()\n",
|
||||
"\n",
|
||||
"data[0]"
|
||||
]
|
||||
}
|
||||
@@ -115,7 +155,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.5"
|
||||
"version": "3.10.6"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -7,9 +7,7 @@
|
||||
"source": [
|
||||
"# Microsoft Excel\n",
|
||||
"\n",
|
||||
"The `UnstructuredExcelLoader` is used to load `Microsoft Excel` files. The loader works with both `.xlsx` and `.xls` files. The page content will be the raw text of the Excel file. If you use the loader in `\"elements\"` mode, an HTML representation of the Excel file will be available in the document metadata under the `text_as_html` key.\n",
|
||||
"\n",
|
||||
"Please see [this guide](/docs/integrations/providers/unstructured/) for more instructions on setting up Unstructured locally, including setting up required system dependencies."
|
||||
"The `UnstructuredExcelLoader` is used to load `Microsoft Excel` files. The loader works with both `.xlsx` and `.xls` files. The page content will be the raw text of the Excel file. If you use the loader in `\"elements\"` mode, an HTML representation of the Excel file will be available in the document metadata under the `text_as_html` key."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -24,7 +22,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"execution_count": 6,
|
||||
"id": "a654e4d9",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
@@ -38,13 +36,13 @@
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[Document(page_content='Stanley Cups', metadata={'source': './example_data/stanley-cups.xlsx', 'file_directory': './example_data', 'filename': 'stanley-cups.xlsx', 'last_modified': '2023-12-19T13:42:18', 'page_name': 'Stanley Cups', 'page_number': 1, 'languages': ['eng'], 'filetype': 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet', 'category': 'Title'}),\n",
|
||||
" Document(page_content='\\n\\n\\nTeam\\nLocation\\nStanley Cups\\n\\n\\nBlues\\nSTL\\n1\\n\\n\\nFlyers\\nPHI\\n2\\n\\n\\nMaple Leafs\\nTOR\\n13\\n\\n\\n', metadata={'source': './example_data/stanley-cups.xlsx', 'file_directory': './example_data', 'filename': 'stanley-cups.xlsx', 'last_modified': '2023-12-19T13:42:18', 'page_name': 'Stanley Cups', 'page_number': 1, 'text_as_html': '<table border=\"1\" class=\"dataframe\">\\n <tbody>\\n <tr>\\n <td>Team</td>\\n <td>Location</td>\\n <td>Stanley Cups</td>\\n </tr>\\n <tr>\\n <td>Blues</td>\\n <td>STL</td>\\n <td>1</td>\\n </tr>\\n <tr>\\n <td>Flyers</td>\\n <td>PHI</td>\\n <td>2</td>\\n </tr>\\n <tr>\\n <td>Maple Leafs</td>\\n <td>TOR</td>\\n <td>13</td>\\n </tr>\\n </tbody>\\n</table>', 'languages': ['eng'], 'parent_id': '17e9a90f9616f2abed8cf32b5bd3810d', 'filetype': 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet', 'category': 'Table'}),\n",
|
||||
" Document(page_content='Stanley Cups Since 67', metadata={'source': './example_data/stanley-cups.xlsx', 'file_directory': './example_data', 'filename': 'stanley-cups.xlsx', 'last_modified': '2023-12-19T13:42:18', 'page_name': 'Stanley Cups Since 67', 'page_number': 2, 'languages': ['eng'], 'filetype': 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet', 'category': 'Title'}),\n",
|
||||
" Document(page_content='\\n\\n\\nTeam\\nLocation\\nStanley Cups\\n\\n\\nBlues\\nSTL\\n1\\n\\n\\nFlyers\\nPHI\\n2\\n\\n\\nMaple Leafs\\nTOR\\n0\\n\\n\\n', metadata={'source': './example_data/stanley-cups.xlsx', 'file_directory': './example_data', 'filename': 'stanley-cups.xlsx', 'last_modified': '2023-12-19T13:42:18', 'page_name': 'Stanley Cups Since 67', 'page_number': 2, 'text_as_html': '<table border=\"1\" class=\"dataframe\">\\n <tbody>\\n <tr>\\n <td>Team</td>\\n <td>Location</td>\\n <td>Stanley Cups</td>\\n </tr>\\n <tr>\\n <td>Blues</td>\\n <td>STL</td>\\n <td>1</td>\\n </tr>\\n <tr>\\n <td>Flyers</td>\\n <td>PHI</td>\\n <td>2</td>\\n </tr>\\n <tr>\\n <td>Maple Leafs</td>\\n <td>TOR</td>\\n <td>0</td>\\n </tr>\\n </tbody>\\n</table>', 'languages': ['eng'], 'parent_id': 'ee34bd8c186b57e3530d5443ffa58122', 'filetype': 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet', 'category': 'Table'})]"
|
||||
"[Document(page_content='Stanley Cups', metadata={'source': 'example_data/stanley-cups.xlsx', 'file_directory': 'example_data', 'filename': 'stanley-cups.xlsx', 'last_modified': '2023-12-19T13:42:18', 'page_name': 'Stanley Cups', 'page_number': 1, 'languages': ['eng'], 'filetype': 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet', 'category': 'Title'}),\n",
|
||||
" Document(page_content='\\n\\n\\nTeam\\nLocation\\nStanley Cups\\n\\n\\nBlues\\nSTL\\n1\\n\\n\\nFlyers\\nPHI\\n2\\n\\n\\nMaple Leafs\\nTOR\\n13\\n\\n\\n', metadata={'source': 'example_data/stanley-cups.xlsx', 'file_directory': 'example_data', 'filename': 'stanley-cups.xlsx', 'last_modified': '2023-12-19T13:42:18', 'page_name': 'Stanley Cups', 'page_number': 1, 'text_as_html': '<table border=\"1\" class=\"dataframe\">\\n <tbody>\\n <tr>\\n <td>Team</td>\\n <td>Location</td>\\n <td>Stanley Cups</td>\\n </tr>\\n <tr>\\n <td>Blues</td>\\n <td>STL</td>\\n <td>1</td>\\n </tr>\\n <tr>\\n <td>Flyers</td>\\n <td>PHI</td>\\n <td>2</td>\\n </tr>\\n <tr>\\n <td>Maple Leafs</td>\\n <td>TOR</td>\\n <td>13</td>\\n </tr>\\n </tbody>\\n</table>', 'languages': ['eng'], 'parent_id': '17e9a90f9616f2abed8cf32b5bd3810d', 'filetype': 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet', 'category': 'Table'}),\n",
|
||||
" Document(page_content='Stanley Cups Since 67', metadata={'source': 'example_data/stanley-cups.xlsx', 'file_directory': 'example_data', 'filename': 'stanley-cups.xlsx', 'last_modified': '2023-12-19T13:42:18', 'page_name': 'Stanley Cups Since 67', 'page_number': 2, 'languages': ['eng'], 'filetype': 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet', 'category': 'Title'}),\n",
|
||||
" Document(page_content='\\n\\n\\nTeam\\nLocation\\nStanley Cups\\n\\n\\nBlues\\nSTL\\n1\\n\\n\\nFlyers\\nPHI\\n2\\n\\n\\nMaple Leafs\\nTOR\\n0\\n\\n\\n', metadata={'source': 'example_data/stanley-cups.xlsx', 'file_directory': 'example_data', 'filename': 'stanley-cups.xlsx', 'last_modified': '2023-12-19T13:42:18', 'page_name': 'Stanley Cups Since 67', 'page_number': 2, 'text_as_html': '<table border=\"1\" class=\"dataframe\">\\n <tbody>\\n <tr>\\n <td>Team</td>\\n <td>Location</td>\\n <td>Stanley Cups</td>\\n </tr>\\n <tr>\\n <td>Blues</td>\\n <td>STL</td>\\n <td>1</td>\\n </tr>\\n <tr>\\n <td>Flyers</td>\\n <td>PHI</td>\\n <td>2</td>\\n </tr>\\n <tr>\\n <td>Maple Leafs</td>\\n <td>TOR</td>\\n <td>0</td>\\n </tr>\\n </tbody>\\n</table>', 'languages': ['eng'], 'parent_id': 'ee34bd8c186b57e3530d5443ffa58122', 'filetype': 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet', 'category': 'Table'})]"
|
||||
]
|
||||
},
|
||||
"execution_count": 2,
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -52,7 +50,7 @@
|
||||
"source": [
|
||||
"from langchain_community.document_loaders import UnstructuredExcelLoader\n",
|
||||
"\n",
|
||||
"loader = UnstructuredExcelLoader(\"./example_data/stanley-cups.xlsx\", mode=\"elements\")\n",
|
||||
"loader = UnstructuredExcelLoader(\"example_data/stanley-cups.xlsx\", mode=\"elements\")\n",
|
||||
"docs = loader.load()\n",
|
||||
"\n",
|
||||
"print(len(docs))\n",
|
||||
|
||||
@@ -9,9 +9,7 @@
|
||||
"\n",
|
||||
">[Microsoft PowerPoint](https://en.wikipedia.org/wiki/Microsoft_PowerPoint) is a presentation program by Microsoft.\n",
|
||||
"\n",
|
||||
"This covers how to load `Microsoft PowerPoint` documents into a document format that we can use downstream.\n",
|
||||
"\n",
|
||||
"Please see [this guide](/docs/integrations/providers/unstructured/) for more instructions on setting up Unstructured locally, including setting up required system dependencies."
|
||||
"This covers how to load `Microsoft PowerPoint` documents into a document format that we can use downstream."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -29,30 +27,60 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"execution_count": 1,
|
||||
"id": "721c48aa",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.document_loaders import UnstructuredPowerPointLoader"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "9d3d0e35",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"loader = UnstructuredPowerPointLoader(\"example_data/fake-power-point.pptx\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "06073f91",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"data = loader.load()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "c9adc5cb",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[Document(page_content='Adding a Bullet Slide\\n\\nFind the bullet slide layout\\n\\nUse _TextFrame.text for first bullet\\n\\nUse _TextFrame.add_paragraph() for subsequent bullets\\n\\nHere is a lot of text!\\n\\nHere is some text in a text box!', metadata={'source': './example_data/fake-power-point.pptx'})]"
|
||||
"[Document(page_content='Adding a Bullet Slide\\n\\nFind the bullet slide layout\\n\\nUse _TextFrame.text for first bullet\\n\\nUse _TextFrame.add_paragraph() for subsequent bullets\\n\\nHere is a lot of text!\\n\\nHere is some text in a text box!', metadata={'source': 'example_data/fake-power-point.pptx'})]"
|
||||
]
|
||||
},
|
||||
"execution_count": 2,
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_community.document_loaders import UnstructuredPowerPointLoader\n",
|
||||
"\n",
|
||||
"loader = UnstructuredPowerPointLoader(\"./example_data/fake-power-point.pptx\")\n",
|
||||
"\n",
|
||||
"data = loader.load()\n",
|
||||
"\n",
|
||||
"data"
|
||||
]
|
||||
},
|
||||
@@ -68,14 +96,36 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"execution_count": 2,
|
||||
"id": "064f9162",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"loader = UnstructuredPowerPointLoader(\n",
|
||||
" \"example_data/fake-power-point.pptx\", mode=\"elements\"\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "abefbbdb",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"data = loader.load()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "a547c534",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"Document(page_content='Adding a Bullet Slide', metadata={'source': './example_data/fake-power-point.pptx', 'category_depth': 0, 'file_directory': './example_data', 'filename': 'fake-power-point.pptx', 'last_modified': '2023-12-19T13:42:18', 'page_number': 1, 'languages': ['eng'], 'filetype': 'application/vnd.openxmlformats-officedocument.presentationml.presentation', 'category': 'Title'})"
|
||||
"Document(page_content='Adding a Bullet Slide', lookup_str='', metadata={'source': 'example_data/fake-power-point.pptx'}, lookup_index=0)"
|
||||
]
|
||||
},
|
||||
"execution_count": 4,
|
||||
@@ -84,12 +134,6 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"loader = UnstructuredPowerPointLoader(\n",
|
||||
" \"./example_data/fake-power-point.pptx\", mode=\"elements\"\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"data = loader.load()\n",
|
||||
"\n",
|
||||
"data[0]"
|
||||
]
|
||||
},
|
||||
@@ -165,7 +209,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.5"
|
||||
"version": "3.10.6"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -24,7 +24,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"execution_count": 2,
|
||||
"id": "7b80ea891",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -34,45 +34,38 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"execution_count": 3,
|
||||
"id": "7b80ea89",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[Document(page_content='Lorem ipsum dolor sit amet.', metadata={'source': './example_data/fake.docx'})]"
|
||||
]
|
||||
},
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.document_loaders import Docx2txtLoader\n",
|
||||
"\n",
|
||||
"loader = Docx2txtLoader(\"./example_data/fake.docx\")\n",
|
||||
"\n",
|
||||
"data = loader.load()\n",
|
||||
"\n",
|
||||
"data"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "8d40727d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Using Unstructured\n",
|
||||
"\n",
|
||||
"Please see [this guide](/docs/integrations/providers/unstructured/) for more instructions on setting up Unstructured locally, including setting up required system dependencies."
|
||||
"from langchain_community.document_loaders import Docx2txtLoader"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "721c48aa",
|
||||
"execution_count": 5,
|
||||
"id": "99a12031",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"loader = Docx2txtLoader(\"example_data/fake.docx\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "b92f68b0",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"data = loader.load()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "d83dd755",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
@@ -81,18 +74,71 @@
|
||||
"[Document(page_content='Lorem ipsum dolor sit amet.', metadata={'source': 'example_data/fake.docx'})]"
|
||||
]
|
||||
},
|
||||
"execution_count": 3,
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"data"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "8d40727d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Using Unstructured"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"id": "721c48aa",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.document_loaders import UnstructuredWordDocumentLoader"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "9d3d0e35",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"loader = UnstructuredWordDocumentLoader(\"example_data/fake.docx\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "06073f91",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"data = loader.load()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "c9adc5cb",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[Document(page_content='Lorem ipsum dolor sit amet.', lookup_str='', metadata={'source': 'fake.docx'}, lookup_index=0)]"
|
||||
]
|
||||
},
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_community.document_loaders import UnstructuredWordDocumentLoader\n",
|
||||
"\n",
|
||||
"loader = UnstructuredWordDocumentLoader(\"example_data/fake.docx\")\n",
|
||||
"\n",
|
||||
"data = loader.load()\n",
|
||||
"\n",
|
||||
"data"
|
||||
]
|
||||
},
|
||||
@@ -111,23 +157,39 @@
|
||||
"execution_count": 5,
|
||||
"id": "064f9162",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"loader = UnstructuredWordDocumentLoader(\"example_data/fake.docx\", mode=\"elements\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "abefbbdb",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"data = loader.load()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "a547c534",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"Document(page_content='Lorem ipsum dolor sit amet.', metadata={'source': './example_data/fake.docx', 'category_depth': 0, 'file_directory': './example_data', 'filename': 'fake.docx', 'last_modified': '2023-12-19T13:42:18', 'languages': ['por', 'cat'], 'filetype': 'application/vnd.openxmlformats-officedocument.wordprocessingml.document', 'category': 'Title'})"
|
||||
"Document(page_content='Lorem ipsum dolor sit amet.', lookup_str='', metadata={'source': 'fake.docx', 'filename': 'fake.docx', 'category': 'Title'}, lookup_index=0)"
|
||||
]
|
||||
},
|
||||
"execution_count": 5,
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"loader = UnstructuredWordDocumentLoader(\"./example_data/fake.docx\", mode=\"elements\")\n",
|
||||
"\n",
|
||||
"data = loader.load()\n",
|
||||
"\n",
|
||||
"data[0]"
|
||||
]
|
||||
},
|
||||
@@ -201,7 +263,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.5"
|
||||
"version": "3.10.6"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -19,21 +19,29 @@
|
||||
"execution_count": 1,
|
||||
"id": "e6616e3a",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.document_loaders import UnstructuredODTLoader"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "a654e4d9",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"Document(page_content='Lorem ipsum dolor sit amet.', metadata={'source': 'example_data/fake.odt', 'category_depth': 0, 'file_directory': 'example_data', 'filename': 'fake.odt', 'last_modified': '2023-12-19T13:42:18', 'languages': ['por', 'cat'], 'filetype': 'application/vnd.oasis.opendocument.text', 'category': 'Title'})"
|
||||
"Document(page_content='Lorem ipsum dolor sit amet.', metadata={'source': 'example_data/fake.odt', 'filename': 'example_data/fake.odt', 'category': 'Title'})"
|
||||
]
|
||||
},
|
||||
"execution_count": 1,
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_community.document_loaders import UnstructuredODTLoader\n",
|
||||
"\n",
|
||||
"loader = UnstructuredODTLoader(\"example_data/fake.odt\", mode=\"elements\")\n",
|
||||
"docs = loader.load()\n",
|
||||
"docs[0]"
|
||||
@@ -64,7 +72,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.5"
|
||||
"version": "3.10.6"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -22,23 +22,35 @@
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.document_loaders import UnstructuredOrgModeLoader"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"loader = UnstructuredOrgModeLoader(file_path=\"example_data/README.org\", mode=\"elements\")\n",
|
||||
"docs = loader.load()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"page_content='Example Docs' metadata={'source': './example_data/README.org', 'category_depth': 0, 'last_modified': '2023-12-19T13:42:18', 'languages': ['eng'], 'filetype': 'text/org', 'file_directory': './example_data', 'filename': 'README.org', 'category': 'Title'}\n"
|
||||
"page_content='Example Docs' metadata={'source': 'example_data/README.org', 'filename': 'README.org', 'file_directory': 'example_data', 'filetype': 'text/org', 'page_number': 1, 'category': 'Title'}\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_community.document_loaders import UnstructuredOrgModeLoader\n",
|
||||
"\n",
|
||||
"loader = UnstructuredOrgModeLoader(\n",
|
||||
" file_path=\"./example_data/README.org\", mode=\"elements\"\n",
|
||||
")\n",
|
||||
"docs = loader.load()\n",
|
||||
"\n",
|
||||
"print(docs[0])"
|
||||
]
|
||||
},
|
||||
@@ -66,7 +78,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.5"
|
||||
"version": "3.8.13"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -22,21 +22,35 @@
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.document_loaders import UnstructuredRSTLoader"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"loader = UnstructuredRSTLoader(file_path=\"example_data/README.rst\", mode=\"elements\")\n",
|
||||
"docs = loader.load()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"page_content='Example Docs' metadata={'source': './example_data/README.rst', 'category_depth': 0, 'last_modified': '2023-12-19T13:42:18', 'languages': ['eng'], 'filetype': 'text/x-rst', 'file_directory': './example_data', 'filename': 'README.rst', 'category': 'Title'}\n"
|
||||
"page_content='Example Docs' metadata={'source': 'example_data/README.rst', 'filename': 'README.rst', 'file_directory': 'example_data', 'filetype': 'text/x-rst', 'page_number': 1, 'category': 'Title'}\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_community.document_loaders import UnstructuredRSTLoader\n",
|
||||
"\n",
|
||||
"loader = UnstructuredRSTLoader(file_path=\"./example_data/README.rst\", mode=\"elements\")\n",
|
||||
"docs = loader.load()\n",
|
||||
"\n",
|
||||
"print(docs[0])"
|
||||
]
|
||||
},
|
||||
@@ -64,7 +78,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.5"
|
||||
"version": "3.11.3"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -22,6 +22,27 @@
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.document_loaders.tsv import UnstructuredTSVLoader"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"loader = UnstructuredTSVLoader(\n",
|
||||
" file_path=\"example_data/mlb_teams_2012.csv\", mode=\"elements\"\n",
|
||||
")\n",
|
||||
"docs = loader.load()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
@@ -30,9 +51,6 @@
|
||||
"<table border=\"1\" class=\"dataframe\">\n",
|
||||
" <tbody>\n",
|
||||
" <tr>\n",
|
||||
" <td>Team, \"Payroll (millions)\", \"Wins\"</td>\n",
|
||||
" </tr>\n",
|
||||
" <tr>\n",
|
||||
" <td>Nationals, 81.34, 98</td>\n",
|
||||
" </tr>\n",
|
||||
" <tr>\n",
|
||||
@@ -128,13 +146,6 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_community.document_loaders.tsv import UnstructuredTSVLoader\n",
|
||||
"\n",
|
||||
"loader = UnstructuredTSVLoader(\n",
|
||||
" file_path=\"./example_data/mlb_teams_2012.csv\", mode=\"elements\"\n",
|
||||
")\n",
|
||||
"docs = loader.load()\n",
|
||||
"\n",
|
||||
"print(docs[0].metadata[\"text_as_html\"])"
|
||||
]
|
||||
},
|
||||
@@ -162,7 +173,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.5"
|
||||
"version": "3.8.13"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -7,31 +7,18 @@
|
||||
"source": [
|
||||
"# Unstructured File\n",
|
||||
"\n",
|
||||
"This notebook covers how to use `Unstructured` package to load files of many types. `Unstructured` currently supports loading of text files, powerpoints, html, pdfs, images, and more.\n",
|
||||
"\n",
|
||||
"Please see [this guide](/docs/integrations/providers/unstructured/) for more instructions on setting up Unstructured locally, including setting up required system dependencies."
|
||||
"This notebook covers how to use `Unstructured` package to load files of many types. `Unstructured` currently supports loading of text files, powerpoints, html, pdfs, images, and more."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"execution_count": null,
|
||||
"id": "2886982e",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\u001b[1m[\u001b[0m\u001b[34;49mnotice\u001b[0m\u001b[1;39;49m]\u001b[0m\u001b[39;49m A new release of pip is available: \u001b[0m\u001b[31;49m24.0\u001b[0m\u001b[39;49m -> \u001b[0m\u001b[32;49m24.1.1\u001b[0m\n",
|
||||
"\u001b[1m[\u001b[0m\u001b[34;49mnotice\u001b[0m\u001b[1;39;49m]\u001b[0m\u001b[39;49m To update, run: \u001b[0m\u001b[32;49mpip install --upgrade pip\u001b[0m\n",
|
||||
"Note: you may need to restart the kernel to use updated packages.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# # Install package\n",
|
||||
"%pip install --upgrade --quiet \"unstructured[all-docs]\""
|
||||
"%pip install --upgrade --quiet \"unstructured[all-docs]\""
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -64,9 +51,39 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"execution_count": 2,
|
||||
"id": "79d3e549",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.document_loaders import UnstructuredFileLoader"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "2593d1dc",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"loader = UnstructuredFileLoader(\"./example_data/state_of_the_union.txt\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "fe34e941",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"docs = loader.load()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "ee449788",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
@@ -74,18 +91,12 @@
|
||||
"'Madam Speaker, Madam Vice President, our First Lady and Second Gentleman. Members of Congress and the Cabinet. Justices of the Supreme Court. My fellow Americans.\\n\\nLast year COVID-19 kept us apart. This year we are finally together again.\\n\\nTonight, we meet as Democrats Republicans and Independents. But most importantly as Americans.\\n\\nWith a duty to one another to the American people to the Constit'"
|
||||
]
|
||||
},
|
||||
"execution_count": 3,
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_community.document_loaders import UnstructuredFileLoader\n",
|
||||
"\n",
|
||||
"loader = UnstructuredFileLoader(\"./example_data/state_of_the_union.txt\")\n",
|
||||
"\n",
|
||||
"docs = loader.load()\n",
|
||||
"\n",
|
||||
"docs[0].page_content[:400]"
|
||||
]
|
||||
},
|
||||
@@ -99,28 +110,41 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"execution_count": 1,
|
||||
"id": "092d9a0b",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'1/22/23, 6:30 PM - User 1: Hi! Im interested in your bag. Im offering $50. Let me know if you are interested. Thanks!\\n\\n1/22/23, 8:24 PM - User 2: Goodmorning! $50 is too low.\\n\\n1/23/23, 2:59 AM - User 1: How much do you want?\\n\\n1/23/23, 3:00 AM - User 2: Online is at least $100\\n\\n1/23/23, 3:01 AM - User 2: Here is $129\\n\\n1/23/23, 3:01 AM - User 2: <Media omitted>\\n\\n1/23/23, 3:01 AM - User 1: Im not int'"
|
||||
]
|
||||
},
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"files = [\"./example_data/whatsapp_chat.txt\", \"./example_data/layout-parser-paper.pdf\"]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "f841c4f8",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"loader = UnstructuredFileLoader(files)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "993c240b",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"docs = loader.load()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "5ce4ff07",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"files = [\"./example_data/whatsapp_chat.txt\", \"./example_data/layout-parser-paper.pdf\"]\n",
|
||||
"\n",
|
||||
"loader = UnstructuredFileLoader(files)\n",
|
||||
"\n",
|
||||
"docs = loader.load()\n",
|
||||
"\n",
|
||||
"docs[0].page_content[:400]"
|
||||
]
|
||||
},
|
||||
@@ -136,32 +160,48 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"execution_count": 8,
|
||||
"id": "ff5b616d",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"loader = UnstructuredFileLoader(\n",
|
||||
" \"./example_data/state_of_the_union.txt\", mode=\"elements\"\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"id": "feca3b6c",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"docs = loader.load()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"id": "fec5bbac",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[Document(page_content='Madam Speaker, Madam Vice President, our First Lady and Second Gentleman. Members of Congress and the Cabinet. Justices of the Supreme Court. My fellow Americans.', metadata={'source': './example_data/state_of_the_union.txt', 'file_directory': './example_data', 'filename': 'state_of_the_union.txt', 'last_modified': '2024-07-01T11:18:22', 'languages': ['eng'], 'filetype': 'text/plain', 'category': 'NarrativeText'}),\n",
|
||||
" Document(page_content='Last year COVID-19 kept us apart. This year we are finally together again.', metadata={'source': './example_data/state_of_the_union.txt', 'file_directory': './example_data', 'filename': 'state_of_the_union.txt', 'last_modified': '2024-07-01T11:18:22', 'languages': ['eng'], 'filetype': 'text/plain', 'category': 'NarrativeText'}),\n",
|
||||
" Document(page_content='Tonight, we meet as Democrats Republicans and Independents. But most importantly as Americans.', metadata={'source': './example_data/state_of_the_union.txt', 'file_directory': './example_data', 'filename': 'state_of_the_union.txt', 'last_modified': '2024-07-01T11:18:22', 'languages': ['eng'], 'filetype': 'text/plain', 'category': 'NarrativeText'}),\n",
|
||||
" Document(page_content='With a duty to one another to the American people to the Constitution.', metadata={'source': './example_data/state_of_the_union.txt', 'file_directory': './example_data', 'filename': 'state_of_the_union.txt', 'last_modified': '2024-07-01T11:18:22', 'languages': ['eng'], 'filetype': 'text/plain', 'category': 'UncategorizedText'}),\n",
|
||||
" Document(page_content='And with an unwavering resolve that freedom will always triumph over tyranny.', metadata={'source': './example_data/state_of_the_union.txt', 'file_directory': './example_data', 'filename': 'state_of_the_union.txt', 'last_modified': '2024-07-01T11:18:22', 'languages': ['eng'], 'filetype': 'text/plain', 'category': 'NarrativeText'})]"
|
||||
"[Document(page_content='Madam Speaker, Madam Vice President, our First Lady and Second Gentleman. Members of Congress and the Cabinet. Justices of the Supreme Court. My fellow Americans.', lookup_str='', metadata={'source': '../../state_of_the_union.txt'}, lookup_index=0),\n",
|
||||
" Document(page_content='Last year COVID-19 kept us apart. This year we are finally together again.', lookup_str='', metadata={'source': '../../state_of_the_union.txt'}, lookup_index=0),\n",
|
||||
" Document(page_content='Tonight, we meet as Democrats Republicans and Independents. But most importantly as Americans.', lookup_str='', metadata={'source': '../../state_of_the_union.txt'}, lookup_index=0),\n",
|
||||
" Document(page_content='With a duty to one another to the American people to the Constitution.', lookup_str='', metadata={'source': '../../state_of_the_union.txt'}, lookup_index=0),\n",
|
||||
" Document(page_content='And with an unwavering resolve that freedom will always triumph over tyranny.', lookup_str='', metadata={'source': '../../state_of_the_union.txt'}, lookup_index=0)]"
|
||||
]
|
||||
},
|
||||
"execution_count": 5,
|
||||
"execution_count": 12,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"loader = UnstructuredFileLoader(\n",
|
||||
" \"./example_data/state_of_the_union.txt\", mode=\"elements\"\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"docs = loader.load()\n",
|
||||
"\n",
|
||||
"docs[:5]"
|
||||
]
|
||||
},
|
||||
@@ -177,35 +217,59 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"execution_count": 1,
|
||||
"id": "767238a4",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.document_loaders import UnstructuredFileLoader"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "9518b425",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"loader = UnstructuredFileLoader(\n",
|
||||
" \"layout-parser-paper-fast.pdf\", strategy=\"fast\", mode=\"elements\"\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "645f29e9",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"docs = loader.load()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "60685353",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[Document(page_content='2 v 8 4 3 5 1 . 3 0 1 2 : v i X r a', metadata={'source': './example_data/layout-parser-paper.pdf', 'coordinates': {'points': ((16.34, 393.9), (16.34, 560.0), (36.34, 560.0), (36.34, 393.9)), 'system': 'PixelSpace', 'layout_width': 612, 'layout_height': 792}, 'file_directory': './example_data', 'filename': 'layout-parser-paper.pdf', 'languages': ['eng'], 'last_modified': '2023-12-19T13:42:18', 'page_number': 1, 'parent_id': '89565df026a24279aaea20dc08cedbec', 'filetype': 'application/pdf', 'category': 'UncategorizedText'}),\n",
|
||||
" Document(page_content='LayoutParser: A Unified Toolkit for Deep Learning Based Document Image Analysis', metadata={'source': './example_data/layout-parser-paper.pdf', 'coordinates': {'points': ((157.62199999999999, 114.23496279999995), (157.62199999999999, 146.5141628), (457.7358962799999, 146.5141628), (457.7358962799999, 114.23496279999995)), 'system': 'PixelSpace', 'layout_width': 612, 'layout_height': 792}, 'file_directory': './example_data', 'filename': 'layout-parser-paper.pdf', 'languages': ['eng'], 'last_modified': '2023-12-19T13:42:18', 'page_number': 1, 'filetype': 'application/pdf', 'category': 'Title'}),\n",
|
||||
" Document(page_content='Zejiang Shen1 ((cid:0)), Ruochen Zhang2, Melissa Dell3, Benjamin Charles Germain Lee4, Jacob Carlson3, and Weining Li5', metadata={'source': './example_data/layout-parser-paper.pdf', 'coordinates': {'points': ((134.809, 168.64029940800003), (134.809, 192.2517444), (480.5464199080001, 192.2517444), (480.5464199080001, 168.64029940800003)), 'system': 'PixelSpace', 'layout_width': 612, 'layout_height': 792}, 'file_directory': './example_data', 'filename': 'layout-parser-paper.pdf', 'languages': ['eng'], 'last_modified': '2023-12-19T13:42:18', 'page_number': 1, 'parent_id': 'bde0b230a1aa488e3ce837d33015181b', 'filetype': 'application/pdf', 'category': 'UncategorizedText'}),\n",
|
||||
" Document(page_content='1 Allen Institute for AI shannons@allenai.org 2 Brown University ruochen zhang@brown.edu 3 Harvard University {melissadell,jacob carlson}@fas.harvard.edu 4 University of Washington bcgl@cs.washington.edu 5 University of Waterloo w422li@uwaterloo.ca', metadata={'source': './example_data/layout-parser-paper.pdf', 'coordinates': {'points': ((207.23000000000002, 202.57205439999996), (207.23000000000002, 311.8195408), (408.12676, 311.8195408), (408.12676, 202.57205439999996)), 'system': 'PixelSpace', 'layout_width': 612, 'layout_height': 792}, 'file_directory': './example_data', 'filename': 'layout-parser-paper.pdf', 'languages': ['eng'], 'last_modified': '2023-12-19T13:42:18', 'page_number': 1, 'parent_id': 'bde0b230a1aa488e3ce837d33015181b', 'filetype': 'application/pdf', 'category': 'UncategorizedText'}),\n",
|
||||
" Document(page_content='Abstract. Recent advances in document image analysis (DIA) have been primarily driven by the application of neural networks. Ideally, research outcomes could be easily deployed in production and extended for further investigation. However, various factors like loosely organized codebases and sophisticated model configurations complicate the easy reuse of im- portant innovations by a wide audience. Though there have been on-going efforts to improve reusability and simplify deep learning (DL) model development in disciplines like natural language processing and computer vision, none of them are optimized for challenges in the domain of DIA. This represents a major gap in the existing toolkit, as DIA is central to academic research across a wide range of disciplines in the social sciences and humanities. This paper introduces LayoutParser, an open-source library for streamlining the usage of DL in DIA research and applica- tions. The core LayoutParser library comes with a set of simple and intuitive interfaces for applying and customizing DL models for layout de- tection, character recognition, and many other document processing tasks. To promote extensibility, LayoutParser also incorporates a community platform for sharing both pre-trained models and full document digiti- zation pipelines. We demonstrate that LayoutParser is helpful for both lightweight and large-scale digitization pipelines in real-word use cases. The library is publicly available at https://layout-parser.github.io.', metadata={'source': './example_data/layout-parser-paper.pdf', 'coordinates': {'points': ((162.779, 338.45008160000003), (162.779, 566.8455408), (454.0372021523199, 566.8455408), (454.0372021523199, 338.45008160000003)), 'system': 'PixelSpace', 'layout_width': 612, 'layout_height': 792}, 'file_directory': './example_data', 'filename': 'layout-parser-paper.pdf', 'languages': ['eng'], 'last_modified': '2023-12-19T13:42:18', 'links': [{'text': ':// layout - parser . github . io', 'url': 'https://layout-parser.github.io', 'start_index': 1477}], 'page_number': 1, 'parent_id': 'bde0b230a1aa488e3ce837d33015181b', 'filetype': 'application/pdf', 'category': 'NarrativeText'})]"
|
||||
"[Document(page_content='1', lookup_str='', metadata={'source': 'layout-parser-paper-fast.pdf', 'filename': 'layout-parser-paper-fast.pdf', 'page_number': 1, 'category': 'UncategorizedText'}, lookup_index=0),\n",
|
||||
" Document(page_content='2', lookup_str='', metadata={'source': 'layout-parser-paper-fast.pdf', 'filename': 'layout-parser-paper-fast.pdf', 'page_number': 1, 'category': 'UncategorizedText'}, lookup_index=0),\n",
|
||||
" Document(page_content='0', lookup_str='', metadata={'source': 'layout-parser-paper-fast.pdf', 'filename': 'layout-parser-paper-fast.pdf', 'page_number': 1, 'category': 'UncategorizedText'}, lookup_index=0),\n",
|
||||
" Document(page_content='2', lookup_str='', metadata={'source': 'layout-parser-paper-fast.pdf', 'filename': 'layout-parser-paper-fast.pdf', 'page_number': 1, 'category': 'UncategorizedText'}, lookup_index=0),\n",
|
||||
" Document(page_content='n', lookup_str='', metadata={'source': 'layout-parser-paper-fast.pdf', 'filename': 'layout-parser-paper-fast.pdf', 'page_number': 1, 'category': 'Title'}, lookup_index=0)]"
|
||||
]
|
||||
},
|
||||
"execution_count": 9,
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_community.document_loaders import UnstructuredFileLoader\n",
|
||||
"\n",
|
||||
"loader = UnstructuredFileLoader(\n",
|
||||
" \"./example_data/layout-parser-paper.pdf\", strategy=\"fast\", mode=\"elements\"\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"docs = loader.load()\n",
|
||||
"\n",
|
||||
"docs[5:10]"
|
||||
"docs[:5]"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -223,33 +287,59 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"execution_count": 1,
|
||||
"id": "8ca8a648",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!wget https://raw.githubusercontent.com/Unstructured-IO/unstructured/main/example-docs/layout-parser-paper.pdf -P \"../../\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "686e5eb4",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"loader = UnstructuredFileLoader(\n",
|
||||
" \"./example_data/layout-parser-paper.pdf\", mode=\"elements\"\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "c90f0e94",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"docs = loader.load()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "6ec859d8",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[Document(page_content='2 v 8 4 3 5 1 . 3 0 1 2 : v i X r a', metadata={'source': './example_data/layout-parser-paper.pdf', 'coordinates': {'points': ((16.34, 393.9), (16.34, 560.0), (36.34, 560.0), (36.34, 393.9)), 'system': 'PixelSpace', 'layout_width': 612, 'layout_height': 792}, 'file_directory': './example_data', 'filename': 'layout-parser-paper.pdf', 'languages': ['eng'], 'last_modified': '2023-12-19T13:42:18', 'page_number': 1, 'parent_id': '89565df026a24279aaea20dc08cedbec', 'filetype': 'application/pdf', 'category': 'UncategorizedText'}),\n",
|
||||
" Document(page_content='LayoutParser: A Unified Toolkit for Deep Learning Based Document Image Analysis', metadata={'source': './example_data/layout-parser-paper.pdf', 'coordinates': {'points': ((157.62199999999999, 114.23496279999995), (157.62199999999999, 146.5141628), (457.7358962799999, 146.5141628), (457.7358962799999, 114.23496279999995)), 'system': 'PixelSpace', 'layout_width': 612, 'layout_height': 792}, 'file_directory': './example_data', 'filename': 'layout-parser-paper.pdf', 'languages': ['eng'], 'last_modified': '2023-12-19T13:42:18', 'page_number': 1, 'filetype': 'application/pdf', 'category': 'Title'}),\n",
|
||||
" Document(page_content='Zejiang Shen1 ((cid:0)), Ruochen Zhang2, Melissa Dell3, Benjamin Charles Germain Lee4, Jacob Carlson3, and Weining Li5', metadata={'source': './example_data/layout-parser-paper.pdf', 'coordinates': {'points': ((134.809, 168.64029940800003), (134.809, 192.2517444), (480.5464199080001, 192.2517444), (480.5464199080001, 168.64029940800003)), 'system': 'PixelSpace', 'layout_width': 612, 'layout_height': 792}, 'file_directory': './example_data', 'filename': 'layout-parser-paper.pdf', 'languages': ['eng'], 'last_modified': '2023-12-19T13:42:18', 'page_number': 1, 'parent_id': 'bde0b230a1aa488e3ce837d33015181b', 'filetype': 'application/pdf', 'category': 'UncategorizedText'}),\n",
|
||||
" Document(page_content='1 Allen Institute for AI shannons@allenai.org 2 Brown University ruochen zhang@brown.edu 3 Harvard University {melissadell,jacob carlson}@fas.harvard.edu 4 University of Washington bcgl@cs.washington.edu 5 University of Waterloo w422li@uwaterloo.ca', metadata={'source': './example_data/layout-parser-paper.pdf', 'coordinates': {'points': ((207.23000000000002, 202.57205439999996), (207.23000000000002, 311.8195408), (408.12676, 311.8195408), (408.12676, 202.57205439999996)), 'system': 'PixelSpace', 'layout_width': 612, 'layout_height': 792}, 'file_directory': './example_data', 'filename': 'layout-parser-paper.pdf', 'languages': ['eng'], 'last_modified': '2023-12-19T13:42:18', 'page_number': 1, 'parent_id': 'bde0b230a1aa488e3ce837d33015181b', 'filetype': 'application/pdf', 'category': 'UncategorizedText'}),\n",
|
||||
" Document(page_content='Abstract. Recent advances in document image analysis (DIA) have been primarily driven by the application of neural networks. Ideally, research outcomes could be easily deployed in production and extended for further investigation. However, various factors like loosely organized codebases and sophisticated model configurations complicate the easy reuse of im- portant innovations by a wide audience. Though there have been on-going efforts to improve reusability and simplify deep learning (DL) model development in disciplines like natural language processing and computer vision, none of them are optimized for challenges in the domain of DIA. This represents a major gap in the existing toolkit, as DIA is central to academic research across a wide range of disciplines in the social sciences and humanities. This paper introduces LayoutParser, an open-source library for streamlining the usage of DL in DIA research and applica- tions. The core LayoutParser library comes with a set of simple and intuitive interfaces for applying and customizing DL models for layout de- tection, character recognition, and many other document processing tasks. To promote extensibility, LayoutParser also incorporates a community platform for sharing both pre-trained models and full document digiti- zation pipelines. We demonstrate that LayoutParser is helpful for both lightweight and large-scale digitization pipelines in real-word use cases. The library is publicly available at https://layout-parser.github.io.', metadata={'source': './example_data/layout-parser-paper.pdf', 'coordinates': {'points': ((162.779, 338.45008160000003), (162.779, 566.8455408), (454.0372021523199, 566.8455408), (454.0372021523199, 338.45008160000003)), 'system': 'PixelSpace', 'layout_width': 612, 'layout_height': 792}, 'file_directory': './example_data', 'filename': 'layout-parser-paper.pdf', 'languages': ['eng'], 'last_modified': '2023-12-19T13:42:18', 'links': [{'text': ':// layout - parser . github . io', 'url': 'https://layout-parser.github.io', 'start_index': 1477}], 'page_number': 1, 'parent_id': 'bde0b230a1aa488e3ce837d33015181b', 'filetype': 'application/pdf', 'category': 'NarrativeText'})]"
|
||||
"[Document(page_content='LayoutParser : A Unified Toolkit for Deep Learning Based Document Image Analysis', lookup_str='', metadata={'source': '../../layout-parser-paper.pdf'}, lookup_index=0),\n",
|
||||
" Document(page_content='Zejiang Shen 1 ( (ea)\\n ), Ruochen Zhang 2 , Melissa Dell 3 , Benjamin Charles Germain Lee 4 , Jacob Carlson 3 , and Weining Li 5', lookup_str='', metadata={'source': '../../layout-parser-paper.pdf'}, lookup_index=0),\n",
|
||||
" Document(page_content='Allen Institute for AI shannons@allenai.org', lookup_str='', metadata={'source': '../../layout-parser-paper.pdf'}, lookup_index=0),\n",
|
||||
" Document(page_content='Brown University ruochen zhang@brown.edu', lookup_str='', metadata={'source': '../../layout-parser-paper.pdf'}, lookup_index=0),\n",
|
||||
" Document(page_content='Harvard University { melissadell,jacob carlson } @fas.harvard.edu', lookup_str='', metadata={'source': '../../layout-parser-paper.pdf'}, lookup_index=0)]"
|
||||
]
|
||||
},
|
||||
"execution_count": 12,
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"loader = UnstructuredFileLoader(\n",
|
||||
" \"./example_data/layout-parser-paper.pdf\", mode=\"elements\"\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"docs = loader.load()\n",
|
||||
"\n",
|
||||
"docs[5:10]"
|
||||
"docs[:5]"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -262,38 +352,62 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 14,
|
||||
"execution_count": 2,
|
||||
"id": "112e5538",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.document_loaders import UnstructuredFileLoader\n",
|
||||
"from unstructured.cleaners.core import clean_extra_whitespace"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "b9c5ac8d",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"loader = UnstructuredFileLoader(\n",
|
||||
" \"./example_data/layout-parser-paper.pdf\",\n",
|
||||
" mode=\"elements\",\n",
|
||||
" post_processors=[clean_extra_whitespace],\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "c44d5def",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"docs = loader.load()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "b6f27929",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[Document(page_content='2 v 8 4 3 5 1 . 3 0 1 2 : v i X r a', metadata={'source': './example_data/layout-parser-paper.pdf', 'coordinates': {'points': ((16.34, 393.9), (16.34, 560.0), (36.34, 560.0), (36.34, 393.9)), 'system': 'PixelSpace', 'layout_width': 612, 'layout_height': 792}, 'file_directory': './example_data', 'filename': 'layout-parser-paper.pdf', 'languages': ['eng'], 'last_modified': '2023-12-19T13:42:18', 'page_number': 1, 'parent_id': '89565df026a24279aaea20dc08cedbec', 'filetype': 'application/pdf', 'category': 'UncategorizedText'}),\n",
|
||||
" Document(page_content='LayoutParser: A Unified Toolkit for Deep Learning Based Document Image Analysis', metadata={'source': './example_data/layout-parser-paper.pdf', 'coordinates': {'points': ((157.62199999999999, 114.23496279999995), (157.62199999999999, 146.5141628), (457.7358962799999, 146.5141628), (457.7358962799999, 114.23496279999995)), 'system': 'PixelSpace', 'layout_width': 612, 'layout_height': 792}, 'file_directory': './example_data', 'filename': 'layout-parser-paper.pdf', 'languages': ['eng'], 'last_modified': '2023-12-19T13:42:18', 'page_number': 1, 'filetype': 'application/pdf', 'category': 'Title'}),\n",
|
||||
" Document(page_content='Zejiang Shen1 ((cid:0)), Ruochen Zhang2, Melissa Dell3, Benjamin Charles Germain Lee4, Jacob Carlson3, and Weining Li5', metadata={'source': './example_data/layout-parser-paper.pdf', 'coordinates': {'points': ((134.809, 168.64029940800003), (134.809, 192.2517444), (480.5464199080001, 192.2517444), (480.5464199080001, 168.64029940800003)), 'system': 'PixelSpace', 'layout_width': 612, 'layout_height': 792}, 'file_directory': './example_data', 'filename': 'layout-parser-paper.pdf', 'languages': ['eng'], 'last_modified': '2023-12-19T13:42:18', 'page_number': 1, 'parent_id': 'bde0b230a1aa488e3ce837d33015181b', 'filetype': 'application/pdf', 'category': 'UncategorizedText'}),\n",
|
||||
" Document(page_content='1 Allen Institute for AI shannons@allenai.org 2 Brown University ruochen zhang@brown.edu 3 Harvard University {melissadell,jacob carlson}@fas.harvard.edu 4 University of Washington bcgl@cs.washington.edu 5 University of Waterloo w422li@uwaterloo.ca', metadata={'source': './example_data/layout-parser-paper.pdf', 'coordinates': {'points': ((207.23000000000002, 202.57205439999996), (207.23000000000002, 311.8195408), (408.12676, 311.8195408), (408.12676, 202.57205439999996)), 'system': 'PixelSpace', 'layout_width': 612, 'layout_height': 792}, 'file_directory': './example_data', 'filename': 'layout-parser-paper.pdf', 'languages': ['eng'], 'last_modified': '2023-12-19T13:42:18', 'page_number': 1, 'parent_id': 'bde0b230a1aa488e3ce837d33015181b', 'filetype': 'application/pdf', 'category': 'UncategorizedText'}),\n",
|
||||
" Document(page_content='Abstract. Recent advances in document image analysis (DIA) have been primarily driven by the application of neural networks. Ideally, research outcomes could be easily deployed in production and extended for further investigation. However, various factors like loosely organized codebases and sophisticated model configurations complicate the easy reuse of im- portant innovations by a wide audience. Though there have been on-going efforts to improve reusability and simplify deep learning (DL) model development in disciplines like natural language processing and computer vision, none of them are optimized for challenges in the domain of DIA. This represents a major gap in the existing toolkit, as DIA is central to academic research across a wide range of disciplines in the social sciences and humanities. This paper introduces LayoutParser, an open-source library for streamlining the usage of DL in DIA research and applica- tions. The core LayoutParser library comes with a set of simple and intuitive interfaces for applying and customizing DL models for layout de- tection, character recognition, and many other document processing tasks. To promote extensibility, LayoutParser also incorporates a community platform for sharing both pre-trained models and full document digiti- zation pipelines. We demonstrate that LayoutParser is helpful for both lightweight and large-scale digitization pipelines in real-word use cases. The library is publicly available at https://layout-parser.github.io.', metadata={'source': './example_data/layout-parser-paper.pdf', 'coordinates': {'points': ((162.779, 338.45008160000003), (162.779, 566.8455408), (454.0372021523199, 566.8455408), (454.0372021523199, 338.45008160000003)), 'system': 'PixelSpace', 'layout_width': 612, 'layout_height': 792}, 'file_directory': './example_data', 'filename': 'layout-parser-paper.pdf', 'languages': ['eng'], 'last_modified': '2023-12-19T13:42:18', 'links': [{'text': ':// layout - parser . github . io', 'url': 'https://layout-parser.github.io', 'start_index': 1477}], 'page_number': 1, 'parent_id': 'bde0b230a1aa488e3ce837d33015181b', 'filetype': 'application/pdf', 'category': 'NarrativeText'})]"
|
||||
"[Document(page_content='LayoutParser: A Unified Toolkit for Deep Learning Based Document Image Analysis', metadata={'source': './example_data/layout-parser-paper.pdf', 'coordinates': {'points': ((157.62199999999999, 114.23496279999995), (157.62199999999999, 146.5141628), (457.7358962799999, 146.5141628), (457.7358962799999, 114.23496279999995)), 'system': 'PixelSpace', 'layout_width': 612, 'layout_height': 792}, 'filename': 'layout-parser-paper.pdf', 'file_directory': './example_data', 'filetype': 'application/pdf', 'page_number': 1, 'category': 'Title'}),\n",
|
||||
" Document(page_content='Zejiang Shen1 ((cid:0)), Ruochen Zhang2, Melissa Dell3, Benjamin Charles Germain Lee4, Jacob Carlson3, and Weining Li5', metadata={'source': './example_data/layout-parser-paper.pdf', 'coordinates': {'points': ((134.809, 168.64029940800003), (134.809, 192.2517444), (480.5464199080001, 192.2517444), (480.5464199080001, 168.64029940800003)), 'system': 'PixelSpace', 'layout_width': 612, 'layout_height': 792}, 'filename': 'layout-parser-paper.pdf', 'file_directory': './example_data', 'filetype': 'application/pdf', 'page_number': 1, 'category': 'UncategorizedText'}),\n",
|
||||
" Document(page_content='1 Allen Institute for AI shannons@allenai.org 2 Brown University ruochen zhang@brown.edu 3 Harvard University {melissadell,jacob carlson}@fas.harvard.edu 4 University of Washington bcgl@cs.washington.edu 5 University of Waterloo w422li@uwaterloo.ca', metadata={'source': './example_data/layout-parser-paper.pdf', 'coordinates': {'points': ((207.23000000000002, 202.57205439999996), (207.23000000000002, 311.8195408), (408.12676, 311.8195408), (408.12676, 202.57205439999996)), 'system': 'PixelSpace', 'layout_width': 612, 'layout_height': 792}, 'filename': 'layout-parser-paper.pdf', 'file_directory': './example_data', 'filetype': 'application/pdf', 'page_number': 1, 'category': 'UncategorizedText'}),\n",
|
||||
" Document(page_content='1 2 0 2', metadata={'source': './example_data/layout-parser-paper.pdf', 'coordinates': {'points': ((16.34, 213.36), (16.34, 253.36), (36.34, 253.36), (36.34, 213.36)), 'system': 'PixelSpace', 'layout_width': 612, 'layout_height': 792}, 'filename': 'layout-parser-paper.pdf', 'file_directory': './example_data', 'filetype': 'application/pdf', 'page_number': 1, 'category': 'UncategorizedText'}),\n",
|
||||
" Document(page_content='n u J', metadata={'source': './example_data/layout-parser-paper.pdf', 'coordinates': {'points': ((16.34, 258.36), (16.34, 286.14), (36.34, 286.14), (36.34, 258.36)), 'system': 'PixelSpace', 'layout_width': 612, 'layout_height': 792}, 'filename': 'layout-parser-paper.pdf', 'file_directory': './example_data', 'filetype': 'application/pdf', 'page_number': 1, 'category': 'Title'})]"
|
||||
]
|
||||
},
|
||||
"execution_count": 14,
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_community.document_loaders import UnstructuredFileLoader\n",
|
||||
"from unstructured.cleaners.core import clean_extra_whitespace\n",
|
||||
"\n",
|
||||
"loader = UnstructuredFileLoader(\n",
|
||||
" \"./example_data/layout-parser-paper.pdf\",\n",
|
||||
" mode=\"elements\",\n",
|
||||
" post_processors=[clean_extra_whitespace],\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"docs = loader.load()\n",
|
||||
"\n",
|
||||
"docs[5:10]"
|
||||
"docs[:5]"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -306,6 +420,39 @@
|
||||
"If you want to get up and running with less set up, you can simply run `pip install unstructured` and use `UnstructuredAPIFileLoader` or `UnstructuredAPIFileIOLoader`. That will process your document using the hosted Unstructured API. You can generate a free Unstructured API key [here](https://www.unstructured.io/api-key/). The [Unstructured documentation](https://unstructured-io.github.io/unstructured/) page will have instructions on how to generate an API key once they’re available. Check out the instructions [here](https://github.com/Unstructured-IO/unstructured-api#dizzy-instructions-for-using-the-docker-image) if you’d like to self-host the Unstructured API or run it locally."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "b50c70bc",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.document_loaders import UnstructuredAPIFileLoader"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "12b6d2cf",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"filenames = [\"example_data/fake.docx\", \"example_data/fake-email.eml\"]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "39a9894d",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"loader = UnstructuredAPIFileLoader(\n",
|
||||
" file_path=filenames[0],\n",
|
||||
" api_key=\"FAKE_API_KEY\",\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
@@ -324,15 +471,6 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_community.document_loaders import UnstructuredAPIFileLoader\n",
|
||||
"\n",
|
||||
"filenames = [\"example_data/fake.docx\", \"example_data/fake-email.eml\"]\n",
|
||||
"\n",
|
||||
"loader = UnstructuredAPIFileLoader(\n",
|
||||
" file_path=filenames[0],\n",
|
||||
" api_key=\"FAKE_API_KEY\",\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"docs = loader.load()\n",
|
||||
"docs[0]"
|
||||
]
|
||||
@@ -345,6 +483,19 @@
|
||||
"You can also batch multiple files through the Unstructured API in a single API using `UnstructuredAPIFileLoader`."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "79a18e7e",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"loader = UnstructuredAPIFileLoader(\n",
|
||||
" file_path=filenames,\n",
|
||||
" api_key=\"FAKE_API_KEY\",\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
@@ -363,11 +514,6 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"loader = UnstructuredAPIFileLoader(\n",
|
||||
" file_path=filenames,\n",
|
||||
" api_key=\"FAKE_API_KEY\",\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"docs = loader.load()\n",
|
||||
"docs[0]"
|
||||
]
|
||||
@@ -397,7 +543,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.5"
|
||||
"version": "3.9.0"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
File diff suppressed because one or more lines are too long
@@ -17,10 +17,29 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.document_loaders import UnstructuredXMLLoader\n",
|
||||
"\n",
|
||||
"from langchain_community.document_loaders import UnstructuredXMLLoader"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "a654e4d9",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"Document(page_content='United States\\n\\nWashington, DC\\n\\nJoe Biden\\n\\nBaseball\\n\\nCanada\\n\\nOttawa\\n\\nJustin Trudeau\\n\\nHockey\\n\\nFrance\\n\\nParis\\n\\nEmmanuel Macron\\n\\nSoccer\\n\\nTrinidad & Tobado\\n\\nPort of Spain\\n\\nKeith Rowley\\n\\nTrack & Field', metadata={'source': 'example_data/factbook.xml'})"
|
||||
]
|
||||
},
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"loader = UnstructuredXMLLoader(\n",
|
||||
" \"./example_data/factbook.xml\",\n",
|
||||
" \"example_data/factbook.xml\",\n",
|
||||
")\n",
|
||||
"docs = loader.load()\n",
|
||||
"docs[0]"
|
||||
|
||||
@@ -2147,32 +2147,6 @@
|
||||
"llm(\"Tell me one joke\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## SingleStoreDB Semantic Cache\n",
|
||||
"You can use [SingleStoreDB](https://python.langchain.com/docs/integrations/vectorstores/singlestoredb/) as a semantic cache to cache prompts and responses."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "d82f1bdc",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.cache import SingleStoreDBSemanticCache\n",
|
||||
"from langchain_openai import OpenAIEmbeddings\n",
|
||||
"\n",
|
||||
"set_llm_cache(\n",
|
||||
" SingleStoreDBSemanticCache(\n",
|
||||
" embedding=OpenAIEmbeddings(),\n",
|
||||
" host=\"root:pass@localhost:3306/db\",\n",
|
||||
" )\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "ae1f5e1c-085e-4998-9f2d-b5867d2c3d5b",
|
||||
@@ -2204,7 +2178,7 @@
|
||||
"source": [
|
||||
"**Cache** classes are implemented by inheriting the [BaseCache](https://api.python.langchain.com/en/latest/caches/langchain_core.caches.BaseCache.html) class.\n",
|
||||
"\n",
|
||||
"This table lists all 21 derived classes with links to the API Reference.\n",
|
||||
"This table lists all 20 derived classes with links to the API Reference.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"| Namespace 🔻 | Class |\n",
|
||||
@@ -2221,7 +2195,6 @@
|
||||
"| langchain_community.cache | [MomentoCache](https://api.python.langchain.com/en/latest/cache/langchain_community.cache.MomentoCache.html) |\n",
|
||||
"| langchain_community.cache | [OpenSearchSemanticCache](https://api.python.langchain.com/en/latest/cache/langchain_community.cache.OpenSearchSemanticCache.html) |\n",
|
||||
"| langchain_community.cache | [RedisSemanticCache](https://api.python.langchain.com/en/latest/cache/langchain_community.cache.RedisSemanticCache.html) |\n",
|
||||
"| langchain_community.cache | [SingleStoreDBSemanticCache](https://api.python.langchain.com/en/latest/cache/langchain_community.cache.SingleStoreDBSemanticCache.html) |\n",
|
||||
"| langchain_community.cache | [SQLAlchemyCache](https://api.python.langchain.com/en/latest/cache/langchain_community.cache.SQLAlchemyCache.html) |\n",
|
||||
"| langchain_community.cache | [SQLAlchemyMd5Cache](https://api.python.langchain.com/en/latest/cache/langchain_community.cache.SQLAlchemyMd5Cache.html) |\n",
|
||||
"| langchain_community.cache | [UpstashRedisCache](https://api.python.langchain.com/en/latest/cache/langchain_community.cache.UpstashRedisCache.html) |\n",
|
||||
|
||||
@@ -223,15 +223,9 @@ See a [usage example](/docs/integrations/document_loaders/microsoft_onenote).
|
||||
from langchain_community.document_loaders.onenote import OneNoteLoader
|
||||
```
|
||||
|
||||
## AI Agent Memory System
|
||||
## Vector stores
|
||||
|
||||
[AI agent](https://learn.microsoft.com/en-us/azure/cosmos-db/ai-agents) needs robust memory systems that support multi-modality, offer strong operational performance, and enable agent memory sharing as well as separation.
|
||||
|
||||
AI agents can rely on Azure Cosmos DB as a unified [memory system](https://learn.microsoft.com/en-us/azure/cosmos-db/ai-agents#memory-can-make-or-break-agents) solution, enjoying speed, scale, and simplicity. This service successfully [enabled OpenAI's ChatGPT service](https://www.youtube.com/watch?v=6IIUtEFKJec&t) to scale dynamically with high reliability and low maintenance. Powered by an atom-record-sequence engine, it is the world's first globally distributed [NoSQL](https://learn.microsoft.com/en-us/azure/cosmos-db/distributed-nosql), [relational](https://learn.microsoft.com/en-us/azure/cosmos-db/distributed-relational), and [vector database](https://learn.microsoft.com/en-us/azure/cosmos-db/vector-database) service that offers a serverless mode.
|
||||
|
||||
Below are two available Azure Cosmos DB APIs that can provide vector store functionalities.
|
||||
|
||||
### Azure Cosmos DB for MongoDB (vCore)
|
||||
### Azure Cosmos DB MongoDB vCore
|
||||
|
||||
>[Azure Cosmos DB for MongoDB vCore](https://learn.microsoft.com/en-us/azure/cosmos-db/mongodb/vcore/) makes it easy to create a database with full native MongoDB support.
|
||||
> You can apply your MongoDB experience and continue to use your favorite MongoDB drivers, SDKs, and tools by pointing your application to the API for MongoDB vCore account's connection string.
|
||||
|
||||
@@ -7,19 +7,29 @@
|
||||
"source": [
|
||||
"# MLflow\n",
|
||||
"\n",
|
||||
">[MLflow](https://mlflow.org/) is a versatile, open-source platform for managing workflows and artifacts across the machine learning lifecycle. It has built-in integrations with many popular ML libraries, but can be used with any library, algorithm, or deployment tool. It is designed to be extensible, so you can write plugins to support new workflows, libraries, and tools.\n",
|
||||
">[MLflow](https://www.mlflow.org/docs/latest/what-is-mlflow) is a versatile, expandable, open-source platform for managing workflows and artifacts across the machine learning lifecycle. It has built-in integrations with many popular ML libraries, but can be used with any library, algorithm, or deployment tool. It is designed to be extensible, so you can write plugins to support new workflows, libraries, and tools.\n",
|
||||
"\n",
|
||||
"In the context of LangChain integration, MLflow provides the following capabilities:\n",
|
||||
"\n",
|
||||
"- **Experiment Tracking**: MLflow tracks and stores artifacts from your LangChain experiments, including models, code, prompts, metrics, and more.\n",
|
||||
"- **Dependency Management**: MLflow automatically records model dependencies, ensuring consistency between development and production environments.\n",
|
||||
"- **Model Evaluation** MLflow offers native capabilities for evaluating LangChain applications.\n",
|
||||
"- **Tracing**: MLflow allows you to visually trace data flows through your LangChain chain, agent, retriever, or other components.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"**Note**: The tracing capability is only available in MLflow versions 2.14.0 and later.\n",
|
||||
"\n",
|
||||
"This notebook demonstrates how to track your LangChain experiments using MLflow. For more information about this feature and to explore tutorials and examples of using LangChain with MLflow, please refer to the [MLflow documentation for LangChain integration](https://mlflow.org/docs/latest/llms/langchain/index.html)."
|
||||
"This notebook goes over how to track your LangChain experiments into your `MLflow Server`"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "ea73efae-7182-4a89-a492-c865b1fcf981",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## External examples"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "97361a84-4e8f-45ba-b291-814cf73cd8f2",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"`MLflow` provides [several examples](https://github.com/mlflow/mlflow/tree/master/examples/langchain) for the `LangChain` integration:\n",
|
||||
"- [simple_chain](https://github.com/mlflow/mlflow/blob/master/examples/langchain/simple_chain.py)\n",
|
||||
"- [simple_agent](https://github.com/mlflow/mlflow/blob/master/examples/langchain/simple_agent.py)\n",
|
||||
"- [retriever_chain](https://github.com/mlflow/mlflow/blob/master/examples/langchain/retriever_chain.py)\n",
|
||||
"- [retrieval_qa_chain](https://github.com/mlflow/mlflow/blob/master/examples/langchain/retrieval_qa_chain.py)\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -27,37 +37,7 @@
|
||||
"id": "e0cbd74b-1542-45a4-a72b-b2eedeffd2e0",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"Install MLflow Python package:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "7fb27b941602401d91542211134fc71a",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install google-search-results num"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "42406548",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install mlflow -qU"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "8e626bb4",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"This example utilizes the OpenAI LLM. Feel free to skip the command below and proceed with a different LLM if desired."
|
||||
"## Example"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -67,535 +47,142 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install langchain-openai -qU"
|
||||
"%pip install --upgrade --quiet azureml-mlflow\n",
|
||||
"%pip install --upgrade --quiet pandas\n",
|
||||
"%pip install --upgrade --quiet textstat\n",
|
||||
"%pip install --upgrade --quiet spacy\n",
|
||||
"%pip install --upgrade --quiet langchain-openai\n",
|
||||
"%pip install --upgrade --quiet google-search-results\n",
|
||||
"!python -m spacy download en_core_web_sm"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "7e87b21d",
|
||||
"execution_count": null,
|
||||
"id": "bf8e1f5c",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"\n",
|
||||
"# Set MLflow tracking URI if you have MLflow Tracking Server running\n",
|
||||
"os.environ[\"MLFLOW_TRACKING_URI\"] = \"\"\n",
|
||||
"os.environ[\"OPENAI_API_KEY\"] = \"\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "84616d96",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"To begin, let's create a dedicated MLflow experiment in order track our model and artifacts. While you can opt to skip this step and use the default experiment, we strongly recommend organizing your runs and artifacts into separate experiments to avoid clutter and maintain a clean, structured workflow."
|
||||
"os.environ[\"OPENAI_API_KEY\"] = \"\"\n",
|
||||
"os.environ[\"SERPAPI_API_KEY\"] = \"\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "155d2a6f",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import mlflow\n",
|
||||
"\n",
|
||||
"mlflow.set_experiment(\"LangChain MLflow Integration\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "48accc76",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Overview\n",
|
||||
"\n",
|
||||
"Integrate MLflow with your LangChain Application using one of the following methods:\n",
|
||||
"\n",
|
||||
"1. **Autologging**: Enable seamless tracking with the `mlflow.langchain.autolog()` command, our recommended first option for leveraging the LangChain MLflow integration.\n",
|
||||
"2. **Manual Logging**: Use MLflow APIs to log LangChain chains and agents, providing fine-grained control over what to track in your experiment.\n",
|
||||
"3. **Custom Callbacks**: Pass MLflow callbacks manually when invoking chains, allowing for semi-automated customization of your workload, such as tracking specific invocations."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "c3f10055",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Scenario 1: MLFlow Autologging"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "71118a27",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"To get started with autologging, simply call `mlflow.langchain.autolog()`. In this example, we set the `log_models` parameter to `True`, which allows the chain definition and its dependency libraries to be recorded as an MLflow model, providing a comprehensive tracking experience."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 39,
|
||||
"id": "5b08145f",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import mlflow\n",
|
||||
"\n",
|
||||
"mlflow.langchain.autolog(\n",
|
||||
" # These are optional configurations to control what information should be logged automatically (default: False)\n",
|
||||
" # For the full list of the arguments, refer to https://mlflow.org/docs/latest/llms/langchain/autologging.html#id1\n",
|
||||
" log_models=True,\n",
|
||||
" log_input_examples=True,\n",
|
||||
" log_model_signatures=True,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "f0570c18",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Define a Chain"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 40,
|
||||
"id": "1b2627ef",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.schema.output_parser import StrOutputParser\n",
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"\n",
|
||||
"llm = ChatOpenAI(model_name=\"gpt-4o\", temperature=0)\n",
|
||||
"prompt = ChatPromptTemplate.from_messages(\n",
|
||||
" [\n",
|
||||
" (\n",
|
||||
" \"system\",\n",
|
||||
" \"You are a helpful assistant that translates {input_language} to {output_language}.\",\n",
|
||||
" ),\n",
|
||||
" (\"human\", \"{input}\"),\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"parser = StrOutputParser()\n",
|
||||
"\n",
|
||||
"chain = prompt | llm | parser"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "a5b38bae",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Invoke the Chain\n",
|
||||
"\n",
|
||||
"Note that this step may take a few seconds longer than usual, as MLflow runs several background tasks in the background to log models, traces, and artifacts to the tracking server."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 41,
|
||||
"id": "a1df4bc8",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'Ich liebe das Programmieren.'"
|
||||
]
|
||||
},
|
||||
"execution_count": 41,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"test_input = {\n",
|
||||
" \"input_language\": \"English\",\n",
|
||||
" \"output_language\": \"German\",\n",
|
||||
" \"input\": \"I love programming.\",\n",
|
||||
"}\n",
|
||||
"\n",
|
||||
"chain.invoke(test_input)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "5173cdd4",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Take a moment to explore the MLflow Tracking UI, where you can gain a deeper understanding of what information are being logged.\n",
|
||||
"* **Traces** - Navigate to the \"Traces\" tab in the experiment and click the request ID link of the first row. The displayed trace tree visualizes the call stack of your chain invocation, providing you with a deep insight into how each component is executed within the chain.\n",
|
||||
"* **MLflow Model** - As we set `log_model=True`, MLflow automatically creates an MLflow Run to track your chain definition. Navigate to the newest Run page and open the \"Artifacts\" tab, which lists file artifacts logged as an MLflow Model, including dependencies, input examples, model signatures, and more.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "36179573",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Invoke the Logged Chain\n",
|
||||
"\n",
|
||||
"Next, let's load the model back and verify that we can reproduce the same prediction, ensuring consistency and reliability.\n",
|
||||
"\n",
|
||||
"There are two ways to load the model\n",
|
||||
"1. `mlflow.langchain.load_model(MODEL_URI)` - This loads the model as the original LangChain object.\n",
|
||||
"2. `mlflow.pyfunc.load_model(MODEL_URI)` - This loads the model within the `PythonModel` wrapper and encapsulates the prediction logic with the `predict()` API, which contains additional logic such as schema enforcement."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 42,
|
||||
"id": "a8e39d72",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'Ich liebe Programmieren.'"
|
||||
]
|
||||
},
|
||||
"execution_count": 42,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Replace YOUR_RUN_ID with the Run ID displayed on the MLflow UI\n",
|
||||
"loaded_model = mlflow.langchain.load_model(\"runs:/{YOUR_RUN_ID}/model\")\n",
|
||||
"loaded_model.invoke(test_input)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 57,
|
||||
"id": "9619356d",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"['Ich liebe das Programmieren.']"
|
||||
]
|
||||
},
|
||||
"execution_count": 57,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"pyfunc_model = mlflow.pyfunc.load_model(\"runs:/{YOUR_RUN_ID}/model\")\n",
|
||||
"pyfunc_model.predict(test_input)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "eb23a78c",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Configure Autologging\n",
|
||||
"\n",
|
||||
"The `mlflow.langchain.autolog()` function offers several parameters that allow for fine-grained control over the artifacts logged to MLflow. For a comprehensive list of available configurations, please refer to the latest [MLflow LangChain Autologging Documentation](https://mlflow.org/docs/latest/llms/langchain/autologging.html)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "1bf6bb02",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Scenario 2: Manually Logging an Agent from Code"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "6e447a02",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"\n",
|
||||
"#### Prerequisites\n",
|
||||
"\n",
|
||||
"This example uses `SerpAPI`, a search engine API, as a tool for the agent to retrieve Google Search results. LangChain is natively integrated with `SerpAPI`, allowing you to configure the tool for your agent with just one line of code.\n",
|
||||
"\n",
|
||||
"To get started:\n",
|
||||
"\n",
|
||||
"* Install the required Python package via pip: `pip install google-search-results numexpr`.\n",
|
||||
"* Create an account at [SerpAPI's Official Website](https://serpapi.com/) and retrieve an API key.\n",
|
||||
"* Set the API key in the environment variable: `os.environ[\"SERPAPI_API_KEY\"] = \"YOUR_API_KEY\"`\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "d0c914e3",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Define an Agent\n",
|
||||
"\n",
|
||||
"In this example, we will log the agent definition **as code**, rather than directly feeding the Python object and saving it in a serialized format. This approach offers several benefits:\n",
|
||||
"\n",
|
||||
"1. **No serialization required**: By saving the model as code, we avoid the need for serialization, which can be problematic when working with components that don't natively support it. This approach also eliminates the risk of incompatibility issues when deserializing the model in a different environment.\n",
|
||||
"2. **Better transparency**: By inspecting the saved code file, you can gain valuable insights into what the model does. This is in contrast to serialized formats like pickle, where the model's behavior remains opaque until it's loaded back, potentially exposing security risks such as remote code execution.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "9190a609",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"First, create a separate `.py` file that defines the agent instance.\n",
|
||||
"\n",
|
||||
"In the interest of time, you can run the following cell to generate a Python file `agent.py`, which contains the agent definition code. In actual dev scenario, you would define it in another notebook or hand-crafted python script."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 64,
|
||||
"id": "62b20e17",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"script_content = \"\"\"\n",
|
||||
"from langchain.agents import AgentType, initialize_agent, load_tools\n",
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"import mlflow\n",
|
||||
"\n",
|
||||
"llm = ChatOpenAI(model_name=\"gpt-4o\", temperature=0)\n",
|
||||
"tools = load_tools([\"serpapi\", \"llm-math\"], llm=llm)\n",
|
||||
"agent = initialize_agent(tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION)\n",
|
||||
"\n",
|
||||
"# IMPORTANT: call set_model() to register the instance to be logged.\n",
|
||||
"mlflow.models.set_model(agent)\n",
|
||||
"\"\"\"\n",
|
||||
"\n",
|
||||
"with open(\"agent.py\", \"w\") as f:\n",
|
||||
" f.write(script_content)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "82a21f06",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Log the Agent\n",
|
||||
"\n",
|
||||
"Return to the original notebook and run the following cell to log the agent you've defined in the `agent.py` file.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 51,
|
||||
"id": "cd5b8bcc",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"The agent is successfully logged to MLflow!\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"question = \"How long would it take to drive to the Moon with F1 racing cars?\"\n",
|
||||
"\n",
|
||||
"with mlflow.start_run(run_name=\"search-math-agent\") as run:\n",
|
||||
" info = mlflow.langchain.log_model(\n",
|
||||
" lc_model=\"agent.py\", # Specify the relative code path to the agent definition\n",
|
||||
" artifact_path=\"model\",\n",
|
||||
" input_example=question,\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
"print(\"The agent is successfully logged to MLflow!\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "b4687052",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Now, open the MLflow UI and navigate to the \"Artifacts\" tab in the Run detail page. You should see that the `agent.py` file has been successfully logged, along with other model artifacts, such as dependencies, input examples, and more."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "9011db62",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Invoke the Logged Agent\n",
|
||||
"\n",
|
||||
"Now load the agent back and invoke it. There are two ways to load the model"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 53,
|
||||
"id": "b634b69d",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Downloading artifacts: 100%|██████████| 10/10 [00:00<00:00, 331.57it/s]\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"['It would take approximately 1194.5 hours to drive to the Moon with an F1 racing car.']"
|
||||
]
|
||||
},
|
||||
"execution_count": 53,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Let's turn on the autologging with default configuration, so we can see the trace for the agent invocation.\n",
|
||||
"mlflow.langchain.autolog()\n",
|
||||
"\n",
|
||||
"# Load the model back\n",
|
||||
"agent = mlflow.pyfunc.load_model(info.model_uri)\n",
|
||||
"\n",
|
||||
"# Invoke\n",
|
||||
"agent.predict(question)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "30bf6133",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Navigate to the **\"Traces\"** tab in the experiment and click the request ID link of the first row. The trace visualizes how the agent operate multiple tasks within the single prediction call:\n",
|
||||
"1. Determine what subtasks are required to answer the questions.\n",
|
||||
"2. Search for the speed of an F1 racing car.\n",
|
||||
"3. Search for the distance from Earth to Moon.\n",
|
||||
"4. Compute the division using LLM."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "cbd10f34",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Scenario 3. Using MLflow Callbacks\n",
|
||||
"\n",
|
||||
"**MLflow Callbacks** provide a semi-automated way to track your LangChain application in MLflow. There are two primary callbacks available:\n",
|
||||
"\n",
|
||||
"1. **`MlflowLangchainTracer`:** Primarily used for generating traces, available in `mlflow >= 2.14.0`.\n",
|
||||
"2. **`MLflowCallbackHandler`:** Logs metrics and artifacts to the MLflow tracking server."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "d013d309",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### MlflowLangchainTracer\n",
|
||||
"\n",
|
||||
"When the chain or agent is invoked with the `MlflowLangchainTracer` callback, MLflow will automatically generate a trace for the call stack and log it to the MLflow tracking server. The outcome is exactly same as `mlflow.langchain.autolog()`, but this is particularly useful when you want to only trace specific invocation. Autologging is applied to all invocation in the same notebook/script, on the other hand."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "46d48044",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from mlflow.langchain.langchain_tracer import MlflowLangchainTracer\n",
|
||||
"\n",
|
||||
"mlflow_tracer = MlflowLangchainTracer()\n",
|
||||
"\n",
|
||||
"# This call generates a trace\n",
|
||||
"chain.invoke(test_input, config={\"callbacks\": [mlflow_tracer]})\n",
|
||||
"\n",
|
||||
"# This call does not generate a trace\n",
|
||||
"chain.invoke(test_input)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "acb6692c",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#### Where to Pass the Callback\n",
|
||||
" LangChain supports two ways of passing callback instances: (1) Request time callbacks - pass them to the `invoke` method or bind with `with_config()` (2) Constructor callbacks - set them in the chain constructor. When using the `MlflowLangchainTracer` as a callback, you **must use request time callbacks**. Setting it in the constructor instead will only apply the callback to the top-level object, preventing it from being propagated to child components, resulting in incomplete traces. For more information on this behavior, please refer to [Callbacks Documentation](https://python.langchain.com/v0.2/docs/concepts/#callbacks) for more details.\n",
|
||||
"\n",
|
||||
"```python\n",
|
||||
"# OK\n",
|
||||
"chain.invoke(test_input, config={\"callbacks\": [mlflow_tracer]})\n",
|
||||
"chain.with_config(callbacks=[mlflow_tracer])\n",
|
||||
"# NG\n",
|
||||
"chain = TheNameOfSomeChain(callbacks=[mlflow_tracer])\n",
|
||||
"```"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "d6a60ba7",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#### Supported Methods\n",
|
||||
"\n",
|
||||
"`MlflowLangchainTracer` supports the following invocation methods from the [Runnable Interfaces](https://python.langchain.com/v0.1/docs/expression_language/interface/).\n",
|
||||
"- Standard interfaces: `invoke`, `stream`, `batch`\n",
|
||||
"- Async interfaces: `astream`, `ainvoke`, `abatch`, `astream_log`, `astream_events`\n",
|
||||
"\n",
|
||||
"Other methods are not guaranteed to be fully compatible."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "a72e8854",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### MlflowCallbackHandler\n",
|
||||
"\n",
|
||||
"`MlflowCallbackHandler` is a callback handler that resides in the LangChain Community code base.\n",
|
||||
"\n",
|
||||
"This callback can be passed for chain/agent invocation, but it must be explicitly finished by calling the `flush_tracker()` method.\n",
|
||||
"\n",
|
||||
"When a chain is invoked with the callback, it performs the following actions:\n",
|
||||
"\n",
|
||||
"1. Creates a new MLflow Run or retrieves an active one if available within the active MLflow Experiment.\n",
|
||||
"2. Logs metrics such as the number of LLM calls, token usage, and other relevant metrics. If the chain/agent includes LLM call and you have `spacy` library installed, it logs text complexity metrics such as `flesch_kincaid_grade`.\n",
|
||||
"3. Logs internal steps as a JSON file (this is a legacy version of traces).\n",
|
||||
"4. Logs chain input and output as a Pandas Dataframe.\n",
|
||||
"5. Calls the `flush_tracker()` method with a chain/agent instance, logging the chain/agent as an MLflow Model.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "b579aae1",
|
||||
"id": "fd49fd45",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.callbacks import MlflowCallbackHandler\n",
|
||||
"\n",
|
||||
"mlflow_callback = MlflowCallbackHandler()\n",
|
||||
"\n",
|
||||
"chain.invoke(\"What is LangChain callback?\", config={\"callbacks\": [mlflow_callback]})\n",
|
||||
"\n",
|
||||
"mlflow_callback.flush_tracker()"
|
||||
"from langchain_openai import OpenAI"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "84924e35",
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "578cac8c",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"## References\n",
|
||||
"To learn more about the feature and visit tutorials and examples of using LangChain with MLflow, please refer to the [MLflow documentation for LangChain integration](https://mlflow.org/docs/latest/llms/langchain/index.html).\n",
|
||||
"\"\"\"Main function.\n",
|
||||
"\n",
|
||||
"`MLflow` also provides several [tutorials](https://mlflow.org/docs/latest/llms/langchain/index.html#getting-started-with-the-mlflow-langchain-flavor-tutorials-and-guides) and [examples](https://github.com/mlflow/mlflow/tree/master/examples/langchain) for the `LangChain` integration:\n",
|
||||
"- [Quick Start](https://mlflow.org/docs/latest/llms/langchain/notebooks/langchain-quickstart.html)\n",
|
||||
"- [RAG Tutorial](https://mlflow.org/docs/latest/llms/langchain/notebooks/langchain-retriever.html)\n",
|
||||
"- [Agent Example](https://github.com/mlflow/mlflow/blob/master/examples/langchain/simple_agent.py)"
|
||||
"This function is used to try the callback handler.\n",
|
||||
"Scenarios:\n",
|
||||
"1. OpenAI LLM\n",
|
||||
"2. Chain with multiple SubChains on multiple generations\n",
|
||||
"3. Agent with Tools\n",
|
||||
"\"\"\"\n",
|
||||
"mlflow_callback = MlflowCallbackHandler()\n",
|
||||
"llm = OpenAI(\n",
|
||||
" model_name=\"gpt-3.5-turbo\", temperature=0, callbacks=[mlflow_callback], verbose=True\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "9b20acae",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# SCENARIO 1 - LLM\n",
|
||||
"llm_result = llm.generate([\"Tell me a joke\"])\n",
|
||||
"\n",
|
||||
"mlflow_callback.flush_tracker(llm)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "8b872046",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.chains import LLMChain\n",
|
||||
"from langchain_core.prompts import PromptTemplate"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "1b2627ef",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# SCENARIO 2 - Chain\n",
|
||||
"template = \"\"\"You are a playwright. Given the title of play, it is your job to write a synopsis for that title.\n",
|
||||
"Title: {title}\n",
|
||||
"Playwright: This is a synopsis for the above play:\"\"\"\n",
|
||||
"prompt_template = PromptTemplate(input_variables=[\"title\"], template=template)\n",
|
||||
"synopsis_chain = LLMChain(llm=llm, prompt=prompt_template, callbacks=[mlflow_callback])\n",
|
||||
"\n",
|
||||
"test_prompts = [\n",
|
||||
" {\n",
|
||||
" \"title\": \"documentary about good video games that push the boundary of game design\"\n",
|
||||
" },\n",
|
||||
"]\n",
|
||||
"synopsis_chain.apply(test_prompts)\n",
|
||||
"mlflow_callback.flush_tracker(synopsis_chain)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "e002823a",
|
||||
"metadata": {
|
||||
"id": "_jN73xcPVEpI"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.agents import AgentType, initialize_agent, load_tools"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "655bd47e",
|
||||
"metadata": {
|
||||
"id": "Gpq4rk6VT9cu"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# SCENARIO 3 - Agent with Tools\n",
|
||||
"tools = load_tools([\"serpapi\", \"llm-math\"], llm=llm, callbacks=[mlflow_callback])\n",
|
||||
"agent = initialize_agent(\n",
|
||||
" tools,\n",
|
||||
" llm,\n",
|
||||
" agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,\n",
|
||||
" callbacks=[mlflow_callback],\n",
|
||||
" verbose=True,\n",
|
||||
")\n",
|
||||
"agent.run(\n",
|
||||
" \"Who is Leo DiCaprio's girlfriend? What is her current age raised to the 0.43 power?\"\n",
|
||||
")\n",
|
||||
"mlflow_callback.flush_tracker(agent, finish=True)"
|
||||
]
|
||||
}
|
||||
],
|
||||
@@ -604,9 +191,9 @@
|
||||
"provenance": []
|
||||
},
|
||||
"kernelspec": {
|
||||
"display_name": "tracing",
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "tracing"
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
@@ -618,7 +205,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.12.2"
|
||||
"version": "3.10.12"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -14,18 +14,14 @@ its dependencies running locally.
|
||||
- Install the Python SDK with `pip install unstructured`.
|
||||
- You can install document specific dependencies with extras, i.e. `pip install "unstructured[docx]"`.
|
||||
- To install the dependencies for all document types, use `pip install "unstructured[all-docs]"`.
|
||||
- Install the following system dependencies if they are not already available on your system with e.g. `brew install` for Mac.
|
||||
- Install the following system dependencies if they are not already available on your system.
|
||||
Depending on what document types you're parsing, you may not need all of these.
|
||||
- `libmagic-dev` (filetype detection)
|
||||
- `poppler-utils` (images and PDFs)
|
||||
- `tesseract-ocr`(images and PDFs)
|
||||
- `qpdf` (PDFs)
|
||||
- `libreoffice` (MS Office docs)
|
||||
- `pandoc` (EPUBs)
|
||||
|
||||
When running locally, Unstructured also recommends using Docker [by following this guide](https://docs.unstructured.io/open-source/installation/docker-installation)
|
||||
to ensure all system dependencies are installed correctly.
|
||||
|
||||
If you want to get up and running with less set up, you can
|
||||
simply run `pip install unstructured` and use `UnstructuredAPIFileLoader` or
|
||||
`UnstructuredAPIFileIOLoader`. That will process your document using the hosted Unstructured API.
|
||||
|
||||
@@ -26,7 +26,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install --upgrade --quiet ain-py langchain-community"
|
||||
"%pip install --upgrade --quiet ain-py"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -11,15 +11,6 @@
|
||||
"Vectorstores often have a hard time answering questions that requires computing, grouping and filtering structured data so the high level idea is to use a `pandas` dataframe to help with these types of questions. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU langchain-community"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
|
||||
@@ -24,15 +24,6 @@
|
||||
"%pip install --upgrade --quiet amadeus > /dev/null"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU langchain-community"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
|
||||
@@ -34,8 +34,7 @@
|
||||
"%pip install --upgrade --quiet azure-ai-formrecognizer > /dev/null\n",
|
||||
"%pip install --upgrade --quiet azure-cognitiveservices-speech > /dev/null\n",
|
||||
"%pip install --upgrade --quiet azure-ai-textanalytics > /dev/null\n",
|
||||
"%pip install --upgrade --quiet azure-ai-vision-imageanalysis > /dev/null\n",
|
||||
"%pip install -qU langchain-community"
|
||||
"%pip install --upgrade --quiet azure-ai-vision-imageanalysis > /dev/null"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -39,15 +39,6 @@
|
||||
"%pip install --upgrade --quiet azure-ai-vision > /dev/null"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU langchain-community"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
|
||||
@@ -11,15 +11,6 @@
|
||||
">It is a cloud-based project management solution for businesses of all sizes featuring communication and collaboration tools to help achieve organizational goals."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU langchain-community"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
|
||||
@@ -45,16 +45,6 @@
|
||||
"- You will get the answer from your knowledge as the response. \n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "bfe4510e",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU langchain-community"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "1435b193",
|
||||
|
||||
@@ -43,15 +43,6 @@
|
||||
"You can see a LangSmith trace of this example [here](https://smith.langchain.com/public/4af5385a-afe9-46f6-8a53-57fe2d63c5bc/r)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU langchain-community"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
|
||||
@@ -12,16 +12,6 @@
|
||||
"The high level idea is we will create a question-answering chain for each document, and then use that "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "0dae50f1",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU langchain-community"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
|
||||
@@ -60,7 +60,7 @@
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install --upgrade --quiet pygithub langchain-community"
|
||||
"%pip install --upgrade --quiet pygithub"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -59,7 +59,7 @@
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install --upgrade --quiet python-gitlab langchain-community"
|
||||
"%pip install --upgrade --quiet python-gitlab"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -34,15 +34,6 @@
|
||||
"```"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU langchain-community"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
|
||||
@@ -14,8 +14,7 @@
|
||||
"To use this tool, you must first set as environment variables:\n",
|
||||
" JIRA_API_TOKEN\n",
|
||||
" JIRA_USERNAME\n",
|
||||
" JIRA_INSTANCE_URL\n",
|
||||
" JIRA_CLOUD"
|
||||
" JIRA_INSTANCE_URL"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -36,20 +35,6 @@
|
||||
"%pip install --upgrade --quiet atlassian-python-api"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "e84d425c",
|
||||
"metadata": {
|
||||
"vscode": {
|
||||
"languageId": "shellscript"
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU langchain-community"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
@@ -89,8 +74,7 @@
|
||||
"os.environ[\"JIRA_API_TOKEN\"] = \"abc\"\n",
|
||||
"os.environ[\"JIRA_USERNAME\"] = \"123\"\n",
|
||||
"os.environ[\"JIRA_INSTANCE_URL\"] = \"https://jira.atlassian.com\"\n",
|
||||
"os.environ[\"OPENAI_API_KEY\"] = \"xyz\"\n",
|
||||
"os.environ[\"JIRA_CLOUD\"] = \"True\""
|
||||
"os.environ[\"OPENAI_API_KEY\"] = \"xyz\""
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -15,16 +15,6 @@
|
||||
"We will use the JSON agent to answer some questions about the API spec."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "dc11cb15",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU langchain-community"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "893f90fd-f8f6-470a-a76d-1f200ba02e2f",
|
||||
|
||||
@@ -29,15 +29,6 @@
|
||||
"%pip install --upgrade --quiet multion langchain -q"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU langchain-community"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 37,
|
||||
|
||||
@@ -22,16 +22,6 @@
|
||||
"### Initializing the agent"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "e304b602",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU langchain-community"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
|
||||
@@ -24,8 +24,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install --upgrade --quiet O365\n",
|
||||
"%pip install --upgrade --quiet beautifulsoup4 # This is optional but is useful for parsing HTML messages\n",
|
||||
"%pip install -qU langchain-community"
|
||||
"%pip install --upgrade --quiet beautifulsoup4 # This is optional but is useful for parsing HTML messages"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -11,15 +11,6 @@
|
||||
"**Note:** these tools are not recommended for use outside a sandboxed environment! "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU langchain-community"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
@@ -29,7 +20,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"execution_count": 1,
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
@@ -57,7 +48,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"execution_count": 2,
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
@@ -97,7 +88,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"execution_count": 3,
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
@@ -125,7 +116,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
@@ -146,7 +137,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
|
||||
@@ -22,16 +22,6 @@
|
||||
"- Save your API key into GOLDEN_API_KEY env variable"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "f771643c",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU langchain-community"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
|
||||
@@ -99,7 +99,7 @@
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_googledrive.tools.google_drive.tool import GoogleDriveSearchTool\n",
|
||||
"from langchain_googldrive.tools.google_drive.tool import GoogleDriveSearchTool\n",
|
||||
"from langchain_googledrive.utilities.google_drive import GoogleDriveAPIWrapper\n",
|
||||
"\n",
|
||||
"# By default, search only in the filename.\n",
|
||||
|
||||
File diff suppressed because one or more lines are too long
@@ -343,15 +343,6 @@
|
||||
"print(docs[0].page_content)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "b63c73c7e905001c",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Filtered vector search (Preview)\n",
|
||||
"Azure Cosmos DB for MongoDB supports pre-filtering with $lt, $lte, $eq, $neq, $gte, $gt, $in, $nin, and $regex. To use this feature, enable \"filtering vector search\" in the \"Preview Features\" tab of your Azure Subscription. Learn more about preview features [here](https://learn.microsoft.com/azure/cosmos-db/mongodb/vcore/vector-search#filtered-vector-search-preview)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
|
||||
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
@@ -325,7 +325,7 @@
|
||||
"id": "fedf6f13",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Next, we can create the PromptTemplate. This will be a combination of the `system_template` as well as a simpler template for where to put the text to be translated"
|
||||
"Next, we can create the PromptTemplate. This will be a combination of the `system_template` as well as a simpler template for where the put the text"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -538,7 +538,7 @@
|
||||
"\n",
|
||||
"### Client\n",
|
||||
"\n",
|
||||
"Now let's set up a client for programmatically interacting with our service. We can easily do this with the [langserve.RemoteRunnable](/docs/langserve/#client).\n",
|
||||
"Now let's set up a client for programmatically interacting with our service. We can easily do this with the `[langserve.RemoteRunnable](/docs/langserve/#client)`.\n",
|
||||
"Using this, we can interact with the served chain as if it were running client-side."
|
||||
]
|
||||
},
|
||||
|
||||
@@ -640,7 +640,7 @@
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Splitting and summarizing in a single chain\n",
|
||||
"For convenience, we can wrap both the text splitting of our long document and summarizing in a single [chain](/docs/how_to/sequence):"
|
||||
"For convenience, we can wrap both the text splitting of our long document and summarizing in a single `AnalyzeDocumentsChain`."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -650,11 +650,12 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def split_text(text: str):\n",
|
||||
" return text_splitter.create_documents([text])\n",
|
||||
"from langchain.chains import AnalyzeDocumentChain\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"summarize_document_chain = split_text | chain"
|
||||
"summarize_document_chain = AnalyzeDocumentChain(\n",
|
||||
" combine_docs_chain=chain, text_splitter=text_splitter\n",
|
||||
")\n",
|
||||
"summarize_document_chain.invoke(docs[0].page_content)"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -61,7 +61,7 @@ class __ModuleName__Loader(BaseLoader):
|
||||
.. code-block:: python
|
||||
|
||||
TODO: Example output
|
||||
""" # noqa: E501
|
||||
"""
|
||||
|
||||
# TODO: This method must be implemented to load documents.
|
||||
# Do not implement load(), a default implementation is already available.
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
"""__ModuleName__ vector stores."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
|
||||
@@ -38,10 +38,10 @@ optional = true
|
||||
optional = true
|
||||
|
||||
[tool.poetry.group.lint.dependencies]
|
||||
ruff = "^0.5"
|
||||
ruff = "^0.1.8"
|
||||
|
||||
[tool.poetry.group.typing.dependencies]
|
||||
mypy = "^1.10"
|
||||
mypy = "^1.7.1"
|
||||
langchain-core = { path = "../../core", develop = true }
|
||||
|
||||
[tool.poetry.group.dev]
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
"""Test Chat__ModuleName__ chat model."""
|
||||
|
||||
from __module_name__.chat_models import Chat__ModuleName__
|
||||
|
||||
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
"""Test __ModuleName__ embeddings."""
|
||||
|
||||
from __module_name__.embeddings import __ModuleName__Embeddings
|
||||
|
||||
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
"""Test __ModuleName__LLM llm."""
|
||||
|
||||
from __module_name__.llms import __ModuleName__LLM
|
||||
|
||||
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
"""Test chat model integration."""
|
||||
|
||||
|
||||
from __module_name__.chat_models import Chat__ModuleName__
|
||||
|
||||
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
"""Test embedding model integration."""
|
||||
|
||||
|
||||
from __module_name__.embeddings import __ModuleName__Embeddings
|
||||
|
||||
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
"""Test __ModuleName__ Chat API wrapper."""
|
||||
|
||||
from __module_name__ import __ModuleName__LLM
|
||||
|
||||
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
"""
|
||||
Develop integration packages for LangChain.
|
||||
"""
|
||||
|
||||
import re
|
||||
import shutil
|
||||
import subprocess
|
||||
@@ -155,8 +154,7 @@ def create_doc(
|
||||
str,
|
||||
typer.Option(
|
||||
help=(
|
||||
"The type of component. Currently only 'ChatModel', 'DocumentLoader' "
|
||||
"supported."
|
||||
"The type of component. Currently only 'ChatModel', 'DocumentLoader' supported."
|
||||
),
|
||||
),
|
||||
] = "ChatModel",
|
||||
|
||||
@@ -10,7 +10,6 @@ This codemod deals with the following cases:
|
||||
4. `from pydantic.settings import BaseSettings as <name>` # TODO: This is not working.
|
||||
5. `import pydantic` -> `pydantic.BaseSettings`
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
"""Generate migrations from langchain to langchain-community or core packages."""
|
||||
|
||||
import importlib
|
||||
import inspect
|
||||
import pkgutil
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
"""Generate migrations for partner packages."""
|
||||
|
||||
import importlib
|
||||
from typing import List, Tuple
|
||||
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
"""Migrate LangChain to the most recent version."""
|
||||
|
||||
# Adapted from bump-pydantic
|
||||
# https://github.com/pydantic/bump-pydantic
|
||||
import difflib
|
||||
|
||||
41
libs/cli/poetry.lock
generated
41
libs/cli/poetry.lock
generated
@@ -1,4 +1,4 @@
|
||||
# This file is automatically @generated by Poetry 1.8.2 and should not be changed by hand.
|
||||
# This file is automatically @generated by Poetry 1.7.1 and should not be changed by hand.
|
||||
|
||||
[[package]]
|
||||
name = "aiohttp"
|
||||
@@ -1384,29 +1384,28 @@ jupyter = ["ipywidgets (>=7.5.1,<9)"]
|
||||
|
||||
[[package]]
|
||||
name = "ruff"
|
||||
version = "0.5.0"
|
||||
version = "0.1.15"
|
||||
description = "An extremely fast Python linter and code formatter, written in Rust."
|
||||
optional = false
|
||||
python-versions = ">=3.7"
|
||||
files = [
|
||||
{file = "ruff-0.5.0-py3-none-linux_armv6l.whl", hash = "sha256:ee770ea8ab38918f34e7560a597cc0a8c9a193aaa01bfbd879ef43cb06bd9c4c"},
|
||||
{file = "ruff-0.5.0-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:38f3b8327b3cb43474559d435f5fa65dacf723351c159ed0dc567f7ab735d1b6"},
|
||||
{file = "ruff-0.5.0-py3-none-macosx_11_0_arm64.whl", hash = "sha256:7594f8df5404a5c5c8f64b8311169879f6cf42142da644c7e0ba3c3f14130370"},
|
||||
{file = "ruff-0.5.0-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:adc7012d6ec85032bc4e9065110df205752d64010bed5f958d25dbee9ce35de3"},
|
||||
{file = "ruff-0.5.0-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d505fb93b0fabef974b168d9b27c3960714d2ecda24b6ffa6a87ac432905ea38"},
|
||||
{file = "ruff-0.5.0-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9dc5cfd3558f14513ed0d5b70ce531e28ea81a8a3b1b07f0f48421a3d9e7d80a"},
|
||||
{file = "ruff-0.5.0-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:db3ca35265de239a1176d56a464b51557fce41095c37d6c406e658cf80bbb362"},
|
||||
{file = "ruff-0.5.0-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b1a321c4f68809fddd9b282fab6a8d8db796b270fff44722589a8b946925a2a8"},
|
||||
{file = "ruff-0.5.0-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2c4dfcd8d34b143916994b3876b63d53f56724c03f8c1a33a253b7b1e6bf2a7d"},
|
||||
{file = "ruff-0.5.0-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:81e5facfc9f4a674c6a78c64d38becfbd5e4f739c31fcd9ce44c849f1fad9e4c"},
|
||||
{file = "ruff-0.5.0-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:e589e27971c2a3efff3fadafb16e5aef7ff93250f0134ec4b52052b673cf988d"},
|
||||
{file = "ruff-0.5.0-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:d2ffbc3715a52b037bcb0f6ff524a9367f642cdc5817944f6af5479bbb2eb50e"},
|
||||
{file = "ruff-0.5.0-py3-none-musllinux_1_2_i686.whl", hash = "sha256:cd096e23c6a4f9c819525a437fa0a99d1c67a1b6bb30948d46f33afbc53596cf"},
|
||||
{file = "ruff-0.5.0-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:46e193b36f2255729ad34a49c9a997d506e58f08555366b2108783b3064a0e1e"},
|
||||
{file = "ruff-0.5.0-py3-none-win32.whl", hash = "sha256:49141d267100f5ceff541b4e06552e98527870eafa1acc9dec9139c9ec5af64c"},
|
||||
{file = "ruff-0.5.0-py3-none-win_amd64.whl", hash = "sha256:e9118f60091047444c1b90952736ee7b1792910cab56e9b9a9ac20af94cd0440"},
|
||||
{file = "ruff-0.5.0-py3-none-win_arm64.whl", hash = "sha256:ed5c4df5c1fb4518abcb57725b576659542bdbe93366f4f329e8f398c4b71178"},
|
||||
{file = "ruff-0.5.0.tar.gz", hash = "sha256:eb641b5873492cf9bd45bc9c5ae5320648218e04386a5f0c264ad6ccce8226a1"},
|
||||
{file = "ruff-0.1.15-py3-none-macosx_10_12_x86_64.macosx_11_0_arm64.macosx_10_12_universal2.whl", hash = "sha256:5fe8d54df166ecc24106db7dd6a68d44852d14eb0729ea4672bb4d96c320b7df"},
|
||||
{file = "ruff-0.1.15-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:6f0bfbb53c4b4de117ac4d6ddfd33aa5fc31beeaa21d23c45c6dd249faf9126f"},
|
||||
{file = "ruff-0.1.15-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e0d432aec35bfc0d800d4f70eba26e23a352386be3a6cf157083d18f6f5881c8"},
|
||||
{file = "ruff-0.1.15-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:9405fa9ac0e97f35aaddf185a1be194a589424b8713e3b97b762336ec79ff807"},
|
||||
{file = "ruff-0.1.15-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c66ec24fe36841636e814b8f90f572a8c0cb0e54d8b5c2d0e300d28a0d7bffec"},
|
||||
{file = "ruff-0.1.15-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:6f8ad828f01e8dd32cc58bc28375150171d198491fc901f6f98d2a39ba8e3ff5"},
|
||||
{file = "ruff-0.1.15-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:86811954eec63e9ea162af0ffa9f8d09088bab51b7438e8b6488b9401863c25e"},
|
||||
{file = "ruff-0.1.15-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:fd4025ac5e87d9b80e1f300207eb2fd099ff8200fa2320d7dc066a3f4622dc6b"},
|
||||
{file = "ruff-0.1.15-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b17b93c02cdb6aeb696effecea1095ac93f3884a49a554a9afa76bb125c114c1"},
|
||||
{file = "ruff-0.1.15-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:ddb87643be40f034e97e97f5bc2ef7ce39de20e34608f3f829db727a93fb82c5"},
|
||||
{file = "ruff-0.1.15-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:abf4822129ed3a5ce54383d5f0e964e7fef74a41e48eb1dfad404151efc130a2"},
|
||||
{file = "ruff-0.1.15-py3-none-musllinux_1_2_i686.whl", hash = "sha256:6c629cf64bacfd136c07c78ac10a54578ec9d1bd2a9d395efbee0935868bf852"},
|
||||
{file = "ruff-0.1.15-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:1bab866aafb53da39c2cadfb8e1c4550ac5340bb40300083eb8967ba25481447"},
|
||||
{file = "ruff-0.1.15-py3-none-win32.whl", hash = "sha256:2417e1cb6e2068389b07e6fa74c306b2810fe3ee3476d5b8a96616633f40d14f"},
|
||||
{file = "ruff-0.1.15-py3-none-win_amd64.whl", hash = "sha256:3837ac73d869efc4182d9036b1405ef4c73d9b1f88da2413875e34e0d6919587"},
|
||||
{file = "ruff-0.1.15-py3-none-win_arm64.whl", hash = "sha256:9a933dfb1c14ec7a33cceb1e49ec4a16b51ce3c20fd42663198746efc0427360"},
|
||||
{file = "ruff-0.1.15.tar.gz", hash = "sha256:f6dfa8c1b21c913c326919056c390966648b680966febcb796cc9d1aaab8564e"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -1837,4 +1836,4 @@ serve = []
|
||||
[metadata]
|
||||
lock-version = "2.0"
|
||||
python-versions = ">=3.8.1,<4.0"
|
||||
content-hash = "f549b3468a0b27c75b171c3a4efd8df9c3b3ae737c7e097ffc3fb6fb0fe5f2ef"
|
||||
content-hash = "4576fb13ecd9e13bc6c85e4cd6f56520708c7c1468f4b81bc6a346b128c9f695"
|
||||
|
||||
@@ -29,7 +29,7 @@ pytest = "^7.4.2"
|
||||
pytest-watch = "^4.2.0"
|
||||
|
||||
[tool.poetry.group.lint.dependencies]
|
||||
ruff = "^0.5"
|
||||
ruff = "^0.1.5"
|
||||
|
||||
[tool.poetry.group.test.dependencies]
|
||||
|
||||
@@ -62,9 +62,9 @@ _bump_2.uses = { version = "version" }
|
||||
|
||||
_bump_1 = "poetry version patch"
|
||||
_check_formatting = "poetry run ruff format . --diff"
|
||||
_lint = "poetry run ruff check ."
|
||||
_lint = "poetry run ruff ."
|
||||
_format = "poetry run ruff format ."
|
||||
_lint_fix = "poetry run ruff check . --fix"
|
||||
_lint_fix = "poetry run ruff . --fix"
|
||||
|
||||
[build-system]
|
||||
requires = ["poetry-core"]
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
"""Script to generate migrations for the migration script."""
|
||||
|
||||
import json
|
||||
import pkgutil
|
||||
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
"""Handle a test case where the import is updated and may involve an alias change."""
|
||||
|
||||
from tests.unit_tests.migrate.cli_runner.case import Case
|
||||
from tests.unit_tests.migrate.cli_runner.file import File
|
||||
|
||||
|
||||
@@ -3,7 +3,6 @@
|
||||
Migration script only updates imports not the rest of the code that uses the
|
||||
import.
|
||||
"""
|
||||
|
||||
from langchain_cli.namespaces.migrate.codemods.replace_imports import (
|
||||
RULE_TO_PATHS,
|
||||
_load_migrations_from_fixtures,
|
||||
|
||||
@@ -48,14 +48,14 @@ lint lint_diff lint_package lint_tests:
|
||||
./scripts/check_pydantic.sh .
|
||||
./scripts/lint_imports.sh
|
||||
./scripts/check_pickle.sh .
|
||||
poetry run ruff check .
|
||||
poetry run ruff .
|
||||
[ "$(PYTHON_FILES)" = "" ] || poetry run ruff format $(PYTHON_FILES) --diff
|
||||
[ "$(PYTHON_FILES)" = "" ] || poetry run ruff check --select I $(PYTHON_FILES)
|
||||
[ "$(PYTHON_FILES)" = "" ] || poetry run ruff --select I $(PYTHON_FILES)
|
||||
[ "$(PYTHON_FILES)" = "" ] || mkdir -p $(MYPY_CACHE) && poetry run mypy $(PYTHON_FILES) --cache-dir $(MYPY_CACHE)
|
||||
|
||||
format format_diff:
|
||||
poetry run ruff format $(PYTHON_FILES)
|
||||
poetry run ruff check --select I --fix $(PYTHON_FILES)
|
||||
poetry run ruff --select I --fix $(PYTHON_FILES)
|
||||
|
||||
spell_check:
|
||||
poetry run codespell --toml pyproject.toml
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
"""Main entrypoint into package."""
|
||||
|
||||
from importlib import metadata
|
||||
|
||||
try:
|
||||
|
||||
@@ -206,7 +206,8 @@ class ChatCompletion:
|
||||
provider: str = "ChatOpenAI",
|
||||
stream: Literal[False] = False,
|
||||
**kwargs: Any,
|
||||
) -> dict: ...
|
||||
) -> dict:
|
||||
...
|
||||
|
||||
@overload
|
||||
@staticmethod
|
||||
@@ -216,7 +217,8 @@ class ChatCompletion:
|
||||
provider: str = "ChatOpenAI",
|
||||
stream: Literal[True],
|
||||
**kwargs: Any,
|
||||
) -> Iterable: ...
|
||||
) -> Iterable:
|
||||
...
|
||||
|
||||
@staticmethod
|
||||
def create(
|
||||
@@ -247,7 +249,8 @@ class ChatCompletion:
|
||||
provider: str = "ChatOpenAI",
|
||||
stream: Literal[False] = False,
|
||||
**kwargs: Any,
|
||||
) -> dict: ...
|
||||
) -> dict:
|
||||
...
|
||||
|
||||
@overload
|
||||
@staticmethod
|
||||
@@ -257,7 +260,8 @@ class ChatCompletion:
|
||||
provider: str = "ChatOpenAI",
|
||||
stream: Literal[True],
|
||||
**kwargs: Any,
|
||||
) -> AsyncIterator: ...
|
||||
) -> AsyncIterator:
|
||||
...
|
||||
|
||||
@staticmethod
|
||||
async def acreate(
|
||||
@@ -315,7 +319,8 @@ class Completions:
|
||||
provider: str = "ChatOpenAI",
|
||||
stream: Literal[False] = False,
|
||||
**kwargs: Any,
|
||||
) -> ChatCompletions: ...
|
||||
) -> ChatCompletions:
|
||||
...
|
||||
|
||||
@overload
|
||||
@staticmethod
|
||||
@@ -325,7 +330,8 @@ class Completions:
|
||||
provider: str = "ChatOpenAI",
|
||||
stream: Literal[True],
|
||||
**kwargs: Any,
|
||||
) -> Iterable: ...
|
||||
) -> Iterable:
|
||||
...
|
||||
|
||||
@staticmethod
|
||||
def create(
|
||||
@@ -360,7 +366,8 @@ class Completions:
|
||||
provider: str = "ChatOpenAI",
|
||||
stream: Literal[False] = False,
|
||||
**kwargs: Any,
|
||||
) -> ChatCompletions: ...
|
||||
) -> ChatCompletions:
|
||||
...
|
||||
|
||||
@overload
|
||||
@staticmethod
|
||||
@@ -370,7 +377,8 @@ class Completions:
|
||||
provider: str = "ChatOpenAI",
|
||||
stream: Literal[True],
|
||||
**kwargs: Any,
|
||||
) -> AsyncIterator: ...
|
||||
) -> AsyncIterator:
|
||||
...
|
||||
|
||||
@staticmethod
|
||||
async def acreate(
|
||||
|
||||
@@ -626,25 +626,6 @@ def load_huggingface_tool(
|
||||
)
|
||||
|
||||
|
||||
def raise_dangerous_tools_exception(name: str) -> None:
|
||||
raise ValueError(
|
||||
f"{name} is a dangerous tool. You cannot use it without opting in "
|
||||
"by setting allow_dangerous_tools to True. "
|
||||
"Most tools have some inherit risk to them merely because they are "
|
||||
'allowed to interact with the "real world".'
|
||||
"Please refer to LangChain security guidelines "
|
||||
"to https://python.langchain.com/docs/security."
|
||||
"Some tools have been designated as dangerous because they pose "
|
||||
"risk that is not intuitively obvious. For example, a tool that "
|
||||
"allows an agent to make requests to the web, can also be used "
|
||||
"to make requests to a server that is only accessible from the "
|
||||
"server hosting the code."
|
||||
"Again, all tools carry some risk, and it's your responsibility to "
|
||||
"understand which tools you're using and the risks associated with "
|
||||
"them."
|
||||
)
|
||||
|
||||
|
||||
def load_tools(
|
||||
tool_names: List[str],
|
||||
llm: Optional[BaseLanguageModel] = None,
|
||||
@@ -703,7 +684,22 @@ def load_tools(
|
||||
)
|
||||
for name in tool_names:
|
||||
if name in DANGEROUS_TOOLS and not allow_dangerous_tools:
|
||||
raise_dangerous_tools_exception(name)
|
||||
raise ValueError(
|
||||
f"{name} is a dangerous tool. You cannot use it without opting in "
|
||||
"by setting allow_dangerous_tools to True. "
|
||||
"Most tools have some inherit risk to them merely because they are "
|
||||
'allowed to interact with the "real world".'
|
||||
"Please refer to LangChain security guidelines "
|
||||
"to https://python.langchain.com/docs/security."
|
||||
"Some tools have been designated as dangerous because they pose "
|
||||
"risk that is not intuitively obvious. For example, a tool that "
|
||||
"allows an agent to make requests to the web, can also be used "
|
||||
"to make requests to a server that is only accessible from the "
|
||||
"server hosting the code."
|
||||
"Again, all tools carry some risk, and it's your responsibility to "
|
||||
"understand which tools you're using and the risks associated with "
|
||||
"them."
|
||||
)
|
||||
|
||||
if name in {"requests"}:
|
||||
warnings.warn(
|
||||
@@ -712,10 +708,8 @@ def load_tools(
|
||||
)
|
||||
if name == "requests_all":
|
||||
# expand requests into various methods
|
||||
if not allow_dangerous_tools:
|
||||
raise_dangerous_tools_exception(name)
|
||||
requests_method_tools = [
|
||||
_tool for _tool in DANGEROUS_TOOLS if _tool.startswith("requests_")
|
||||
_tool for _tool in _BASE_TOOLS if _tool.startswith("requests_")
|
||||
]
|
||||
tool_names.extend(requests_method_tools)
|
||||
elif name in _BASE_TOOLS:
|
||||
|
||||
@@ -209,7 +209,7 @@ class OpenAIAssistantV2Runnable(OpenAIAssistantRunnable):
|
||||
as_agent: bool = False
|
||||
"""Use as a LangChain agent, compatible with the AgentExecutor."""
|
||||
|
||||
@root_validator(pre=False, skip_on_failure=True)
|
||||
@root_validator()
|
||||
def validate_async_client(cls, values: dict) -> dict:
|
||||
if values["async_client"] is None:
|
||||
import openai
|
||||
|
||||
@@ -58,7 +58,6 @@ from langchain_community.vectorstores.azure_cosmos_db import (
|
||||
CosmosDBSimilarityType,
|
||||
CosmosDBVectorSearchType,
|
||||
)
|
||||
from langchain_community.vectorstores.utils import DistanceStrategy
|
||||
|
||||
try:
|
||||
from sqlalchemy.orm import declarative_base
|
||||
@@ -85,7 +84,6 @@ from langchain_community.vectorstores import (
|
||||
OpenSearchVectorSearch as OpenSearchVectorStore,
|
||||
)
|
||||
from langchain_community.vectorstores.redis import Redis as RedisVectorstore
|
||||
from langchain_community.vectorstores.singlestoredb import SingleStoreDB
|
||||
|
||||
logger = logging.getLogger(__file__)
|
||||
|
||||
@@ -2191,14 +2189,14 @@ class AzureCosmosDBSemanticCache(BaseCache):
|
||||
index_name=index_name,
|
||||
)
|
||||
else:
|
||||
self._cache_dict[index_name] = (
|
||||
AzureCosmosDBVectorSearch.from_connection_string(
|
||||
connection_string=self.cosmosdb_connection_string,
|
||||
namespace=namespace,
|
||||
embedding=self.embedding,
|
||||
index_name=index_name,
|
||||
application_name=self.application_name,
|
||||
)
|
||||
self._cache_dict[
|
||||
index_name
|
||||
] = AzureCosmosDBVectorSearch.from_connection_string(
|
||||
connection_string=self.cosmosdb_connection_string,
|
||||
namespace=namespace,
|
||||
embedding=self.embedding,
|
||||
index_name=index_name,
|
||||
application_name=self.application_name,
|
||||
)
|
||||
|
||||
# create index for the vectorstore
|
||||
@@ -2375,221 +2373,3 @@ class OpenSearchSemanticCache(BaseCache):
|
||||
if index_name in self._cache_dict:
|
||||
self._cache_dict[index_name].delete_index(index_name=index_name)
|
||||
del self._cache_dict[index_name]
|
||||
|
||||
|
||||
class SingleStoreDBSemanticCache(BaseCache):
|
||||
"""Cache that uses SingleStore DB as a backend"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
embedding: Embeddings,
|
||||
*,
|
||||
cache_table_prefix: str = "cache_",
|
||||
search_threshold: float = 0.2,
|
||||
**kwargs: Any,
|
||||
):
|
||||
"""Initialize with necessary components.
|
||||
|
||||
Args:
|
||||
embedding (Embeddings): A text embedding model.
|
||||
cache_table_prefix (str, optional): Prefix for the cache table name.
|
||||
Defaults to "cache_".
|
||||
search_threshold (float, optional): The minimum similarity score for
|
||||
a search result to be considered a match. Defaults to 0.2.
|
||||
|
||||
Following arguments pertrain to the SingleStoreDB vector store:
|
||||
|
||||
distance_strategy (DistanceStrategy, optional):
|
||||
Determines the strategy employed for calculating
|
||||
the distance between vectors in the embedding space.
|
||||
Defaults to DOT_PRODUCT.
|
||||
Available options are:
|
||||
- DOT_PRODUCT: Computes the scalar product of two vectors.
|
||||
This is the default behavior
|
||||
- EUCLIDEAN_DISTANCE: Computes the Euclidean distance between
|
||||
two vectors. This metric considers the geometric distance in
|
||||
the vector space, and might be more suitable for embeddings
|
||||
that rely on spatial relationships. This metric is not
|
||||
compatible with the WEIGHTED_SUM search strategy.
|
||||
|
||||
content_field (str, optional): Specifies the field to store the content.
|
||||
Defaults to "content".
|
||||
metadata_field (str, optional): Specifies the field to store metadata.
|
||||
Defaults to "metadata".
|
||||
vector_field (str, optional): Specifies the field to store the vector.
|
||||
Defaults to "vector".
|
||||
id_field (str, optional): Specifies the field to store the id.
|
||||
Defaults to "id".
|
||||
|
||||
use_vector_index (bool, optional): Toggles the use of a vector index.
|
||||
Works only with SingleStoreDB 8.5 or later. Defaults to False.
|
||||
If set to True, vector_size parameter is required to be set to
|
||||
a proper value.
|
||||
|
||||
vector_index_name (str, optional): Specifies the name of the vector index.
|
||||
Defaults to empty. Will be ignored if use_vector_index is set to False.
|
||||
|
||||
vector_index_options (dict, optional): Specifies the options for
|
||||
the vector index. Defaults to {}.
|
||||
Will be ignored if use_vector_index is set to False. The options are:
|
||||
index_type (str, optional): Specifies the type of the index.
|
||||
Defaults to IVF_PQFS.
|
||||
For more options, please refer to the SingleStoreDB documentation:
|
||||
https://docs.singlestore.com/cloud/reference/sql-reference/vector-functions/vector-indexing/
|
||||
|
||||
vector_size (int, optional): Specifies the size of the vector.
|
||||
Defaults to 1536. Required if use_vector_index is set to True.
|
||||
Should be set to the same value as the size of the vectors
|
||||
stored in the vector_field.
|
||||
|
||||
Following arguments pertain to the connection pool:
|
||||
|
||||
pool_size (int, optional): Determines the number of active connections in
|
||||
the pool. Defaults to 5.
|
||||
max_overflow (int, optional): Determines the maximum number of connections
|
||||
allowed beyond the pool_size. Defaults to 10.
|
||||
timeout (float, optional): Specifies the maximum wait time in seconds for
|
||||
establishing a connection. Defaults to 30.
|
||||
|
||||
Following arguments pertain to the database connection:
|
||||
|
||||
host (str, optional): Specifies the hostname, IP address, or URL for the
|
||||
database connection. The default scheme is "mysql".
|
||||
user (str, optional): Database username.
|
||||
password (str, optional): Database password.
|
||||
port (int, optional): Database port. Defaults to 3306 for non-HTTP
|
||||
connections, 80 for HTTP connections, and 443 for HTTPS connections.
|
||||
database (str, optional): Database name.
|
||||
|
||||
Additional optional arguments provide further customization over the
|
||||
database connection:
|
||||
|
||||
pure_python (bool, optional): Toggles the connector mode. If True,
|
||||
operates in pure Python mode.
|
||||
local_infile (bool, optional): Allows local file uploads.
|
||||
charset (str, optional): Specifies the character set for string values.
|
||||
ssl_key (str, optional): Specifies the path of the file containing the SSL
|
||||
key.
|
||||
ssl_cert (str, optional): Specifies the path of the file containing the SSL
|
||||
certificate.
|
||||
ssl_ca (str, optional): Specifies the path of the file containing the SSL
|
||||
certificate authority.
|
||||
ssl_cipher (str, optional): Sets the SSL cipher list.
|
||||
ssl_disabled (bool, optional): Disables SSL usage.
|
||||
ssl_verify_cert (bool, optional): Verifies the server's certificate.
|
||||
Automatically enabled if ``ssl_ca`` is specified.
|
||||
ssl_verify_identity (bool, optional): Verifies the server's identity.
|
||||
conv (dict[int, Callable], optional): A dictionary of data conversion
|
||||
functions.
|
||||
credential_type (str, optional): Specifies the type of authentication to
|
||||
use: auth.PASSWORD, auth.JWT, or auth.BROWSER_SSO.
|
||||
autocommit (bool, optional): Enables autocommits.
|
||||
results_type (str, optional): Determines the structure of the query results:
|
||||
tuples, namedtuples, dicts.
|
||||
results_format (str, optional): Deprecated. This option has been renamed to
|
||||
results_type.
|
||||
|
||||
Examples:
|
||||
Basic Usage:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
import langchain
|
||||
from langchain.cache import SingleStoreDBSemanticCache
|
||||
from langchain.embeddings import OpenAIEmbeddings
|
||||
|
||||
langchain.llm_cache = SingleStoreDBSemanticCache(
|
||||
embedding=OpenAIEmbeddings(),
|
||||
host="https://user:password@127.0.0.1:3306/database"
|
||||
)
|
||||
|
||||
Advanced Usage:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
import langchain
|
||||
from langchain.cache import SingleStoreDBSemanticCache
|
||||
from langchain.embeddings import OpenAIEmbeddings
|
||||
|
||||
langchain.llm_cache = = SingleStoreDBSemanticCache(
|
||||
embeddings=OpenAIEmbeddings(),
|
||||
use_vector_index=True,
|
||||
host="127.0.0.1",
|
||||
port=3306,
|
||||
user="user",
|
||||
password="password",
|
||||
database="db",
|
||||
table_name="my_custom_table",
|
||||
pool_size=10,
|
||||
timeout=60,
|
||||
)
|
||||
"""
|
||||
|
||||
self._cache_dict: Dict[str, SingleStoreDB] = {}
|
||||
self.embedding = embedding
|
||||
self.cache_table_prefix = cache_table_prefix
|
||||
self.search_threshold = search_threshold
|
||||
|
||||
# Pass the rest of the kwargs to the connection.
|
||||
self.connection_kwargs = kwargs
|
||||
|
||||
def _index_name(self, llm_string: str) -> str:
|
||||
hashed_index = _hash(llm_string)
|
||||
return f"{self.cache_table_prefix}{hashed_index}"
|
||||
|
||||
def _get_llm_cache(self, llm_string: str) -> SingleStoreDB:
|
||||
index_name = self._index_name(llm_string)
|
||||
|
||||
# return vectorstore client for the specific llm string
|
||||
if index_name not in self._cache_dict:
|
||||
self._cache_dict[index_name] = SingleStoreDB(
|
||||
embedding=self.embedding,
|
||||
table_name=index_name,
|
||||
**self.connection_kwargs,
|
||||
)
|
||||
return self._cache_dict[index_name]
|
||||
|
||||
def lookup(self, prompt: str, llm_string: str) -> Optional[RETURN_VAL_TYPE]:
|
||||
"""Look up based on prompt and llm_string."""
|
||||
llm_cache = self._get_llm_cache(llm_string)
|
||||
generations: List = []
|
||||
# Read from a Hash
|
||||
results = llm_cache.similarity_search_with_score(
|
||||
query=prompt,
|
||||
k=1,
|
||||
)
|
||||
if results:
|
||||
for document_score in results:
|
||||
if (
|
||||
document_score[1] > self.search_threshold
|
||||
and llm_cache.distance_strategy == DistanceStrategy.DOT_PRODUCT
|
||||
) or (
|
||||
document_score[1] < self.search_threshold
|
||||
and llm_cache.distance_strategy
|
||||
== DistanceStrategy.EUCLIDEAN_DISTANCE
|
||||
):
|
||||
generations.extend(loads(document_score[0].metadata["return_val"]))
|
||||
return generations if generations else None
|
||||
|
||||
def update(self, prompt: str, llm_string: str, return_val: RETURN_VAL_TYPE) -> None:
|
||||
"""Update cache based on prompt and llm_string."""
|
||||
for gen in return_val:
|
||||
if not isinstance(gen, Generation):
|
||||
raise ValueError(
|
||||
"SingleStoreDBSemanticCache only supports caching of "
|
||||
f"normal LLM generations, got {type(gen)}"
|
||||
)
|
||||
llm_cache = self._get_llm_cache(llm_string)
|
||||
metadata = {
|
||||
"llm_string": llm_string,
|
||||
"prompt": prompt,
|
||||
"return_val": dumps([g for g in return_val]),
|
||||
}
|
||||
llm_cache.add_texts(texts=[prompt], metadatas=[metadata])
|
||||
|
||||
def clear(self, **kwargs: Any) -> None:
|
||||
"""Clear semantic cache for a given llm_string."""
|
||||
index_name = self._index_name(kwargs["llm_string"])
|
||||
if index_name in self._cache_dict:
|
||||
self._cache_dict[index_name].drop()
|
||||
del self._cache_dict[index_name]
|
||||
|
||||
@@ -6,7 +6,6 @@
|
||||
|
||||
BaseCallbackHandler --> <name>CallbackHandler # Example: AimCallbackHandler
|
||||
"""
|
||||
|
||||
import importlib
|
||||
from typing import TYPE_CHECKING, Any
|
||||
|
||||
|
||||
@@ -82,9 +82,9 @@ class ArizeCallbackHandler(BaseCallbackHandler):
|
||||
"completion_tokens", 0
|
||||
)
|
||||
else:
|
||||
self.prompt_tokens = self.total_tokens = self.completion_tokens = (
|
||||
0 # assign default value
|
||||
)
|
||||
self.prompt_tokens = (
|
||||
self.total_tokens
|
||||
) = self.completion_tokens = 0 # assign default value
|
||||
|
||||
for generations in response.generations:
|
||||
for generation in generations:
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
"""ArthurAI's Callback Handler."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
"""Callback handler for promptlayer."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import datetime
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
"""A Tracer Implementation that records activity to Weights & Biases."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
@@ -235,9 +234,9 @@ def build_tree(runs: List[Dict[str, Any]]) -> Dict[str, Any]:
|
||||
|
||||
for child_id, parent_id in child_to_parent.items():
|
||||
parent_dict = id_to_data[parent_id]
|
||||
parent_dict[next(iter(parent_dict))][next(iter(id_to_data[child_id]))] = (
|
||||
id_to_data[child_id][next(iter(id_to_data[child_id]))]
|
||||
)
|
||||
parent_dict[next(iter(parent_dict))][
|
||||
next(iter(id_to_data[child_id]))
|
||||
] = id_to_data[child_id][next(iter(id_to_data[child_id]))]
|
||||
|
||||
root_dict = next(
|
||||
data for id_val, data in id_to_data.items() if id_val not in child_to_parent
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
"""Methods for creating chains that use Ernie function-calling APIs."""
|
||||
|
||||
import inspect
|
||||
from typing import (
|
||||
Any,
|
||||
@@ -192,9 +191,9 @@ def get_ernie_output_parser(
|
||||
}
|
||||
else:
|
||||
pydantic_schema = functions[0]
|
||||
output_parser: Union[BaseOutputParser, BaseGenerationOutputParser] = (
|
||||
PydanticOutputFunctionsParser(pydantic_schema=pydantic_schema)
|
||||
)
|
||||
output_parser: Union[
|
||||
BaseOutputParser, BaseGenerationOutputParser
|
||||
] = PydanticOutputFunctionsParser(pydantic_schema=pydantic_schema)
|
||||
else:
|
||||
output_parser = JsonOutputFunctionsParser(args_only=len(functions) <= 1)
|
||||
return output_parser
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
"""Question answering over a graph."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import re
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
"""Question answering over a graph."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user