mirror of
https://github.com/hwchase17/langchain.git
synced 2025-09-10 15:33:11 +00:00
Wfh/ref links (#8454)
This commit is contained in:
@@ -216,7 +216,7 @@
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.experimental.llms import JsonFormer\n",
|
||||
"from langchain_experimental.llms import JsonFormer\n",
|
||||
"\n",
|
||||
"json_former = JsonFormer(json_schema=decoder_schema, pipeline=hf_model)"
|
||||
]
|
||||
|
@@ -162,7 +162,7 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain.experimental.llms import RELLM\n",
|
||||
"from langchain_experimental.llms import RELLM\n",
|
||||
"\n",
|
||||
"model = RELLM(pipeline=hf_model, regex=pattern, max_new_tokens=200)\n",
|
||||
"\n",
|
||||
|
@@ -13,7 +13,7 @@ This page provides instructions on how to use the DataForSEO search APIs within
|
||||
The DataForSEO utility wraps the API. To import this utility, use:
|
||||
|
||||
```python
|
||||
from langchain.utilities import DataForSeoAPIWrapper
|
||||
from langchain.utilities.dataforseo_api_search import DataForSeoAPIWrapper
|
||||
```
|
||||
|
||||
For a detailed walkthrough of this wrapper, see [this notebook](/docs/integrations/tools/dataforseo.ipynb).
|
||||
|
@@ -177,8 +177,9 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.chains import TransformChain, SQLDatabaseChain, SimpleSequentialChain\n",
|
||||
"from langchain.sql_database import SQLDatabase"
|
||||
"from langchain.chains import TransformChain, SimpleSequentialChain\n",
|
||||
"from langchain.sql_database import SQLDatabase\n",
|
||||
"from langchain_experimental.sql import SQLDatabaseChain"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@@ -15,7 +15,7 @@ pip install rockset
|
||||
See a [usage example](/docs/integrations/vectorstores/rockset).
|
||||
|
||||
```python
|
||||
from langchain.vectorstores import RocksetDB
|
||||
from langchain.vectorstores import Rockset
|
||||
```
|
||||
|
||||
## Document Loader
|
||||
|
@@ -16,5 +16,5 @@ pip install spacy
|
||||
See a [usage example](/docs/modules/data_connection/document_transformers/text_splitters/split_by_token.html#spacy).
|
||||
|
||||
```python
|
||||
from langchain.llms import SpacyTextSplitter
|
||||
from langchain.text_splitter import SpacyTextSplitter
|
||||
```
|
||||
|
@@ -77,7 +77,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.embeddings.openai import LocalAIEmbeddings"
|
||||
"from langchain.embeddings import LocalAIEmbeddings"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@@ -15,7 +15,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.utilities import DataForSeoAPIWrapper"
|
||||
"from langchain.utilities.dataforseo_api_search import DataForSeoAPIWrapper"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@@ -124,7 +124,7 @@
|
||||
"from langchain.embeddings.openai import OpenAIEmbeddings\n",
|
||||
"from langchain.text_splitter import CharacterTextSplitter\n",
|
||||
"from langchain.document_loaders import TextLoader\n",
|
||||
"from langchain.vectorstores.rocksetdb import RocksetDB\n",
|
||||
"from langchain.vectorstores import Rockset\n",
|
||||
"\n",
|
||||
"loader = TextLoader(\"../../../state_of_the_union.txt\")\n",
|
||||
"documents = loader.load()\n",
|
||||
@@ -150,7 +150,7 @@
|
||||
"# Make sure the environment variable OPENAI_API_KEY is set up\n",
|
||||
"embeddings = OpenAIEmbeddings()\n",
|
||||
"\n",
|
||||
"docsearch = RocksetDB(\n",
|
||||
"docsearch = Rockset(\n",
|
||||
" client=rockset_client,\n",
|
||||
" embeddings=embeddings,\n",
|
||||
" collection_name=COLLECTION_NAME,\n",
|
||||
@@ -185,7 +185,7 @@
|
||||
"source": [
|
||||
"query = \"What did the president say about Ketanji Brown Jackson\"\n",
|
||||
"output = docsearch.similarity_search_with_relevance_scores(\n",
|
||||
" query, 4, RocksetDB.DistanceFunction.COSINE_SIM\n",
|
||||
" query, 4, Rockset.DistanceFunction.COSINE_SIM\n",
|
||||
")\n",
|
||||
"print(\"output length:\", len(output))\n",
|
||||
"for d, dist in output:\n",
|
||||
@@ -221,7 +221,7 @@
|
||||
"output = docsearch.similarity_search_with_relevance_scores(\n",
|
||||
" query,\n",
|
||||
" 4,\n",
|
||||
" RocksetDB.DistanceFunction.COSINE_SIM,\n",
|
||||
" Rockset.DistanceFunction.COSINE_SIM,\n",
|
||||
" where_str=\"{} NOT LIKE '%citizens%'\".format(TEXT_KEY),\n",
|
||||
")\n",
|
||||
"print(\"output length:\", len(output))\n",
|
||||
@@ -237,15 +237,16 @@
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "0765b822",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### 3. [Optional] Drop all inserted documents\n",
|
||||
"\n",
|
||||
"In order to delete texts from the Rockset collection, you need to know the unique ID associated with each document inside Rockset. These ids can either be supplied directly by the user while inserting the texts (in the `RocksetDB.add_texts()` function), else Rockset will generate a unique ID or each document. Either way, `Rockset.add_texts()` returns the ids for the inserted documents.\n",
|
||||
"In order to delete texts from the Rockset collection, you need to know the unique ID associated with each document inside Rockset. These ids can either be supplied directly by the user while inserting the texts (in the `Rockset.add_texts()` function), else Rockset will generate a unique ID or each document. Either way, `Rockset.add_texts()` returns the ids for the inserted documents.\n",
|
||||
"\n",
|
||||
"To delete these docs, simply use the `RocksetDB.delete_texts()` function."
|
||||
"To delete these docs, simply use the `Rockset.delete_texts()` function."
|
||||
]
|
||||
},
|
||||
{
|
||||
|
Reference in New Issue
Block a user