mirror of
https://github.com/hwchase17/langchain.git
synced 2025-08-07 03:56:39 +00:00
template readme's in docs (#13152)
This commit is contained in:
parent
86b93b5810
commit
dd7959f4ac
1
.gitignore
vendored
1
.gitignore
vendored
@ -178,3 +178,4 @@ docs/docs/build
|
|||||||
docs/docs/node_modules
|
docs/docs/node_modules
|
||||||
docs/docs/yarn.lock
|
docs/docs/yarn.lock
|
||||||
_dist
|
_dist
|
||||||
|
docs/docs/templates
|
@ -86,7 +86,8 @@
|
|||||||
"model = AutoModelForCausalLM.from_pretrained(model_id)\n",
|
"model = AutoModelForCausalLM.from_pretrained(model_id)\n",
|
||||||
"pipe = pipeline(\"text-generation\", model=model, tokenizer=tokenizer, max_new_tokens=10)\n",
|
"pipe = pipeline(\"text-generation\", model=model, tokenizer=tokenizer, max_new_tokens=10)\n",
|
||||||
"hf = HuggingFacePipeline(pipeline=pipe)"
|
"hf = HuggingFacePipeline(pipeline=pipe)"
|
||||||
]
|
],
|
||||||
|
"id": "7f426a4f"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
@ -96,7 +97,8 @@
|
|||||||
"\n",
|
"\n",
|
||||||
"With the model loaded into memory, you can compose it with a prompt to\n",
|
"With the model loaded into memory, you can compose it with a prompt to\n",
|
||||||
"form a chain."
|
"form a chain."
|
||||||
]
|
],
|
||||||
|
"id": "60e7ba8d"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
@ -152,7 +154,8 @@
|
|||||||
"question = \"What is electroencephalography?\"\n",
|
"question = \"What is electroencephalography?\"\n",
|
||||||
"\n",
|
"\n",
|
||||||
"print(gpu_chain.invoke({\"question\": question}))"
|
"print(gpu_chain.invoke({\"question\": question}))"
|
||||||
]
|
],
|
||||||
|
"id": "703c91c8"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
@ -161,7 +164,8 @@
|
|||||||
"### Batch GPU Inference\n",
|
"### Batch GPU Inference\n",
|
||||||
"\n",
|
"\n",
|
||||||
"If running on a device with GPU, you can also run inference on the GPU in batch mode."
|
"If running on a device with GPU, you can also run inference on the GPU in batch mode."
|
||||||
]
|
],
|
||||||
|
"id": "59276016"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
|
@ -175,6 +175,11 @@ const config = {
|
|||||||
label: "More",
|
label: "More",
|
||||||
position: "left",
|
position: "left",
|
||||||
items: [
|
items: [
|
||||||
|
{
|
||||||
|
type: "docSidebar",
|
||||||
|
sidebarId: "templates",
|
||||||
|
label: "Templates",
|
||||||
|
},
|
||||||
{
|
{
|
||||||
to: "/docs/community",
|
to: "/docs/community",
|
||||||
label: "Community",
|
label: "Community",
|
||||||
|
36
docs/scripts/copy_templates.py
Normal file
36
docs/scripts/copy_templates.py
Normal file
@ -0,0 +1,36 @@
|
|||||||
|
import glob
|
||||||
|
import os
|
||||||
|
from pathlib import Path
|
||||||
|
import re
|
||||||
|
import shutil
|
||||||
|
|
||||||
|
|
||||||
|
TEMPLATES_DIR = Path(os.path.abspath(__file__)).parents[2] / "templates"
|
||||||
|
DOCS_TEMPLATES_DIR = Path(os.path.abspath(__file__)).parents[1] / "docs" / "templates"
|
||||||
|
|
||||||
|
|
||||||
|
readmes = list(glob.glob(str(TEMPLATES_DIR) + "/*/README.md"))
|
||||||
|
destinations = [readme[len(str(TEMPLATES_DIR)) + 1:-10] + ".md" for readme in readmes]
|
||||||
|
for source, destination in zip(readmes, destinations):
|
||||||
|
full_destination = DOCS_TEMPLATES_DIR / destination
|
||||||
|
shutil.copyfile(source, full_destination)
|
||||||
|
with open(full_destination, "r") as f:
|
||||||
|
content = f.read()
|
||||||
|
# remove images
|
||||||
|
content = re.sub("\!\[.*?\]\((.*?)\)", "", content)
|
||||||
|
with open(full_destination, "w") as f:
|
||||||
|
f.write(content)
|
||||||
|
|
||||||
|
sidebar_hidden = """---
|
||||||
|
sidebar_class_name: hidden
|
||||||
|
---
|
||||||
|
|
||||||
|
"""
|
||||||
|
TEMPLATES_INDEX_DESTINATION = DOCS_TEMPLATES_DIR / "index.md"
|
||||||
|
with open(TEMPLATES_INDEX_DESTINATION, "r") as f:
|
||||||
|
content = f.read()
|
||||||
|
# replace relative links
|
||||||
|
content = re.sub("\]\(\.\.\/", "](/docs/templates/", content)
|
||||||
|
with open(TEMPLATES_INDEX_DESTINATION, "w") as f:
|
||||||
|
f.write(sidebar_hidden + content)
|
||||||
|
|
@ -123,4 +123,14 @@ module.exports = {
|
|||||||
guides: [
|
guides: [
|
||||||
{type: "autogenerated", dirName: "guides" }
|
{type: "autogenerated", dirName: "guides" }
|
||||||
],
|
],
|
||||||
|
templates: [
|
||||||
|
{
|
||||||
|
type: "category",
|
||||||
|
label: "Templates",
|
||||||
|
items: [
|
||||||
|
{ type: "autogenerated", dirName: "templates" },
|
||||||
|
],
|
||||||
|
link: { type: 'doc', id: "templates/index" }
|
||||||
|
},
|
||||||
|
],
|
||||||
};
|
};
|
||||||
|
@ -47,8 +47,11 @@ source .venv/bin/activate
|
|||||||
python3.11 -m pip install --upgrade pip
|
python3.11 -m pip install --upgrade pip
|
||||||
python3.11 -m pip install -r vercel_requirements.txt
|
python3.11 -m pip install -r vercel_requirements.txt
|
||||||
python3.11 scripts/model_feat_table.py
|
python3.11 scripts/model_feat_table.py
|
||||||
nbdoc_build --srcdir docs
|
mkdir docs/templates
|
||||||
|
cp ../templates/docs/INDEX.md docs/templates/index.md
|
||||||
|
python3.11 scripts/copy_templates.py
|
||||||
cp ../cookbook/README.md src/pages/cookbook.mdx
|
cp ../cookbook/README.md src/pages/cookbook.mdx
|
||||||
cp ../.github/CONTRIBUTING.md docs/contributing.md
|
cp ../.github/CONTRIBUTING.md docs/contributing.md
|
||||||
wget https://raw.githubusercontent.com/langchain-ai/langserve/main/README.md -O docs/langserve.md
|
wget https://raw.githubusercontent.com/langchain-ai/langserve/main/README.md -O docs/langserve.md
|
||||||
|
nbdoc_build --srcdir docs
|
||||||
python3.11 scripts/generate_api_reference_links.py
|
python3.11 scripts/generate_api_reference_links.py
|
||||||
|
Loading…
Reference in New Issue
Block a user