docs: Multi-version document build (#1741)

This commit is contained in:
Fangyin Cheng 2024-07-23 11:10:28 +08:00 committed by GitHub
parent 84988b89fe
commit 4149252321
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
24 changed files with 3093 additions and 1963 deletions

View File

@ -2,4 +2,10 @@ models/
plugins/
pilot/data
pilot/message
logs/
logs/
venv/
web/node_modules/
docs/node_modules/
build/
docs/build/
docs/Dockerfile-deploy

39
.github/workflows/doc-image-publish.yml vendored Normal file
View File

@ -0,0 +1,39 @@
name: Build and push docs image
on:
push:
branches:
- main
paths:
- 'docs/**'
release:
types: [published]
workflow_dispatch:
permissions:
contents: read
jobs:
build-image:
runs-on: ubuntu-latest
# run unless event type is pull_request
if: github.event_name != 'pull_request'
steps:
- uses: actions/checkout@v3
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Login to Docker Hub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Build and push
uses: docker/build-push-action@v4
with:
context: .
file: ./docs/Dockerfile-deploy
platforms: linux/amd64
push: true
tags: eosphorosai/dbgpt-docs:${{ github.ref_name }},eosphorosai/dbgpt-docs:latest

95
docs/Dockerfile-deploy Normal file
View File

@ -0,0 +1,95 @@
FROM node:lts-alpine as build
RUN apk add --no-cache git
ARG NPM_REGISTRY=https://registry.npmjs.org
ENV NPM_REGISTRY=$NPM_REGISTRY
# Set github CI environment variable
ARG CI=true
ENV CI=$CI
WORKDIR /app
# Copy package.json and package-lock.json to a separate build directory
COPY docs/package*.json /app-build/docs/
# Install dependencies in the separate build directory
RUN cd /app-build/docs && \
npm config set registry $NPM_REGISTRY && \
npm ci
# Copy the rest of the application to /app and /app-build
COPY . /app-build
COPY . /app
# Make sure we have the latest version of the repository
RUN if [ "$CI" = "true" ]; then \
git fetch --prune --unshallow; \
fi
ARG NUM_VERSION=2
ENV NUM_VERSION=$NUM_VERSION
# Commit the changes to the repository, just for local testing
# Sometimes, we just want to test the changes in the Dockerfile
RUN git config --global user.email "dbgpt@example.com" && \
git config --global user.name "DB-GPT" && \
git add . && git commit --no-verify -m "Commit message"
# New logic for building versions directly in Dockerfile
RUN git config --global --add safe.directory /app && \
# Record the current position
CURRENT_POSITION=$(git rev-parse --abbrev-ref HEAD) && \
# Get the latest tags
TAGS=$(git tag --sort=-creatordate | head -n $NUM_VERSION | tac) && \
# If there are no tags, get the latest commits
if [ -z "$TAGS" ]; then \
TAGS=$(git log --format="%h" -n $NUM_VERSION | tac); \
fi && \
for TAG in $TAGS; do \
echo "Creating version $TAG"; \
cd /app/docs && git checkout $TAG; \
echo "Checked out to tag: $TAG"; \
# Copy the necessary files to the build directory for each tag
rm -rf /app-build/docs/docs /app-build/docs/sidebars.js /app-build/docs/static /app-build/docs/src && \
cp -r /app/docs/docs /app-build/docs/ && \
cp /app/docs/sidebars.js /app-build/docs/ && \
cp -r /app/docs/static /app-build/docs/ && \
cp -r /app/docs/src /app-build/docs/; \
# Create a new version
cd /app-build/docs && npm run docusaurus docs:version $TAG || exit 1; \
done && \
# Return to the original position, build dev version
cd /app/docs && git checkout $CURRENT_POSITION && \
rm -rf /app-build/docs/docs /app-build/docs/sidebars.js /app-build/docs/static /app-build/docs/src && \
cp -r /app/docs/docs /app-build/docs/ && \
cp /app/docs/sidebars.js /app-build/docs/ && \
cp -r /app/docs/static /app-build/docs/ && \
cp -r /app/docs/src /app-build/docs/; \
cd /app-build/docs && npm run build && \
echo $TAGS | tr ' ' '\n' | tac > /app-build/docs/build/versions.txt && \
echo "latest" >> /app-build/docs/build/versions.txt && \
echo "Built versions:" && \
cat /app-build/docs/build/versions.txt
# For production
FROM nginx:alpine
# Copy the nginx configuration file
# COPY nginx.conf /etc/nginx/nginx.conf
# Copy the build output to replace the default nginx contents.
COPY --from=build /app-build/docs/build /usr/share/nginx/html
COPY --from=build /app-build/docs/versioned_docs/ /usr/share/nginx/html/versioned_docs/
COPY --from=build /app-build/docs/versioned_sidebars/ /usr/share/nginx/html/versioned_sidebars/
RUN echo '#!/bin/sh' > /usr/share/nginx/html/versions.sh && \
echo 'echo "Available versions:"' >> /usr/share/nginx/html/versions.sh && \
echo 'cat /usr/share/nginx/html/versions.txt' >> /usr/share/nginx/html/versions.sh && \
chmod +x /usr/share/nginx/html/versions.sh
EXPOSE 80
# Start Nginx server
CMD ["nginx", "-g", "daemon off;"]

View File

@ -17,3 +17,33 @@ yarn start
The default service starts on port `3000`, visit `localhost:3000`
## Deploy Multi-Version Documentation
We can deploy multiple versions of the documentation by docker.
### Build Docker Image
Firstly, build the docker image in `DB-GPT` project root directory.
```bash
# Use the default NPM_REGISTRY=https://registry.npmjs.org
# Use https://www.npmmirror.com/
NPM_REGISTRY=https://registry.npmmirror.com
docker build -f docs/Dockerfile-deploy \
-t eosphorosai/dbgpt-docs \
--build-arg NPM_REGISTRY=$NPM_REGISTRY \
--build-arg CI=false \
--build-arg NUM_VERSION=2 .
```
### Run Docker Container
Run the docker container with the following command:
```bash
docker run -it --rm -p 8089:8089 \
--name my-dbgpt-docs \
-v $(pwd)/docs/nginx/nginx-docs.conf:/etc/nginx/nginx.conf \
eosphorosai/dbgpt-docs
```
Open the browser and visit `localhost:8089` to see the documentation.

View File

@ -178,4 +178,4 @@ In the above code, we use the `ToolAssistantAgent` to select and call the approp
In the above code, we use the `tool` decorator to define the tool function. It will wrap the function to a
`FunctionTool` object. And `FunctionTool` is a subclass of `BaseTool`, which is a base class of all tools.
Actually, **tool** is a special **resource** in the `DB-GPT` agent. You will see more details in the [Resource](./resource.md) section.
Actually, **tool** is a special **resource** in the `DB-GPT` agent. You will see more details in the [Resource](../modules/resource/resource.md) section.

View File

@ -308,7 +308,7 @@ data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": "."}}]}
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": "\n\n<references title=\"References\" references=\"[{&quot;name&quot;: &quot;AWEL_URL&quot;, &quot;chunks&quot;: [{&quot;id&quot;: 2526, &quot;content&quot;: &quot;Agentic Workflow Expression Language(AWEL) is a set of intelligent agent workflow expression language specially designed for large model applicationdevelopment. It provides great functionality and flexibility. Through the AWEL API, you can focus on the development of business logic for LLMs applicationswithout paying attention to cumbersome model and environment details.AWEL adopts a layered API design. AWEL's layered API design architecture is shown in the figure below.AWEL Design&quot;, &quot;meta_info&quot;: &quot;{'source': 'https://docs.dbgpt.site/docs/latest/awel/', 'title': 'AWEL(Agentic Workflow Expression Language) | DB-GPT', 'description': 'Agentic Workflow Expression Language(AWEL) is a set of intelligent agent workflow expression language specially designed for large model application', 'language': 'en-US'}&quot;, &quot;recall_score&quot;: 0.6579902643967029}, {&quot;id&quot;: 2531, &quot;content&quot;: &quot;ExamplesThe preliminary version of AWEL has alse been released, and we have provided some built-in usage examples.OperatorsExample of API-RAGYou can find source code from examples/awel/simple_rag_example.py&quot;, &quot;meta_info&quot;: &quot;{'source': 'https://docs.dbgpt.site/docs/latest/awel/', 'title': 'AWEL(Agentic Workflow Expression Language) | DB-GPT', 'description': 'Agentic Workflow Expression Language(AWEL) is a set of intelligent agent workflow expression language specially designed for large model application', 'language': 'en-US'}&quot;, &quot;recall_score&quot;: 0.5997033286385491}, {&quot;id&quot;: 2538, &quot;content&quot;: &quot;Stand-alone environmentRay environmentPreviousWhy use AWEL?NextReleased V0.5.0 | Develop native data applications through workflows and agentsAWEL DesignExamplesOperatorsExample of API-RAGAgentFream ExampleDSL ExampleCurrently supported operatorsExecutable environmentCommunityDiscordDockerhubGithubGithubHuggingFaceMoreHacker NewsTwitterCopyright © 2024 DB-GPT&quot;, &quot;meta_info&quot;: &quot;{'source': 'https://docs.dbgpt.site/docs/latest/awel/', 'title': 'AWEL(Agentic Workflow Expression Language) | DB-GPT', 'description': 'Agentic Workflow Expression Language(AWEL) is a set of intelligent agent workflow expression language specially designed for large model application', 'language': 'en-US'}&quot;, &quot;recall_score&quot;: 0.5980204530753225}]}]\" />"}}]}
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": "\n\n<references title=\"References\" references=\"[{&quot;name&quot;: &quot;AWEL_URL&quot;, &quot;chunks&quot;: [{&quot;id&quot;: 2526, &quot;content&quot;: &quot;Agentic Workflow Expression Language(AWEL) is a set of intelligent agent workflow expression language specially designed for large model applicationdevelopment. It provides great functionality and flexibility. Through the AWEL API, you can focus on the development of business logic for LLMs applicationswithout paying attention to cumbersome model and environment details.AWEL adopts a layered API design. AWEL's layered API design architecture is shown in the figure below.AWEL Design&quot;, &quot;meta_info&quot;: &quot;{'source': 'https://docs.dbgpt.site/docs/awel/', 'title': 'AWEL(Agentic Workflow Expression Language) | DB-GPT', 'description': 'Agentic Workflow Expression Language(AWEL) is a set of intelligent agent workflow expression language specially designed for large model application', 'language': 'en-US'}&quot;, &quot;recall_score&quot;: 0.6579902643967029}, {&quot;id&quot;: 2531, &quot;content&quot;: &quot;ExamplesThe preliminary version of AWEL has alse been released, and we have provided some built-in usage examples.OperatorsExample of API-RAGYou can find source code from examples/awel/simple_rag_example.py&quot;, &quot;meta_info&quot;: &quot;{'source': 'https://docs.dbgpt.site/docs/awel/', 'title': 'AWEL(Agentic Workflow Expression Language) | DB-GPT', 'description': 'Agentic Workflow Expression Language(AWEL) is a set of intelligent agent workflow expression language specially designed for large model application', 'language': 'en-US'}&quot;, &quot;recall_score&quot;: 0.5997033286385491}, {&quot;id&quot;: 2538, &quot;content&quot;: &quot;Stand-alone environmentRay environmentPreviousWhy use AWEL?NextReleased V0.5.0 | Develop native data applications through workflows and agentsAWEL DesignExamplesOperatorsExample of API-RAGAgentFream ExampleDSL ExampleCurrently supported operatorsExecutable environmentCommunityDiscordDockerhubGithubGithubHuggingFaceMoreHacker NewsTwitterCopyright © 2024 DB-GPT&quot;, &quot;meta_info&quot;: &quot;{'source': 'https://docs.dbgpt.site/docs/awel/', 'title': 'AWEL(Agentic Workflow Expression Language) | DB-GPT', 'description': 'Agentic Workflow Expression Language(AWEL) is a set of intelligent agent workflow expression language specially designed for large model application', 'language': 'en-US'}&quot;, &quot;recall_score&quot;: 0.5980204530753225}]}]\" />"}}]}
data: [DONE]
```

View File

@ -7,7 +7,7 @@ Model API mainly means that DB-GPT adapts to various models and is uniformly pac
## Model API
In the DB-GPT project, we defined a service-oriented multi-model management framework (SMMF). Through the capabilities of SMMF, we can deploy multiple models, and these models provide external services through services. In order to allow clients to achieve seamless switching, we uniformly support the OpenAI SDK standards.
- Detail useage tutorial: [OpenAI SDK calls local multi-model ](/docs/installation/advanced_usage/OpenAI_SDK_call.md)
- Detail useage tutorial: [OpenAI SDK calls local multi-model ](../../installation/advanced_usage/OpenAI_SDK_call.md)
**Example:** The following is an example of calling through openai sdk

View File

@ -6,7 +6,7 @@ The following is a systematic introduction to the use of related command line to
## Preparation
Before using the dbgpt command, you first need to complete the installation of the project. For detailed installation tutorial, please refer to: [Source code installation](/docs/installation/sourcecode.md)
Before using the dbgpt command, you first need to complete the installation of the project. For detailed installation tutorial, please refer to: [Source code installation](../../installation/sourcecode.md)
## Usage
@ -143,7 +143,7 @@ Options:
## Model command
Model related commands are mainly used when deploying multiple models. For model cluster deployment, you can view the [cluster deployment mode](/docs/installation/cluster.md).
Model related commands are mainly used when deploying multiple models. For model cluster deployment, you can view the [cluster deployment mode](../../installation/model_service/cluster.md).
```python
~ dbgpt model --help
@ -417,8 +417,10 @@ Options:
</p>
#### worker command
`dbgpt start worker` is mainly used to start the working model. For detailed usage, [cluster deployment](/docs/installation/model_service/cluster.md)
`dbgpt start worker` is mainly used to start the working model. For detailed usage, [cluster deployment](../../installation/model_service/cluster.md)
## Debugging
The dbgpt project provides a wealth of debug commands. For detailed usage, [debugging](/docs/application/advanced_tutorial/debugging.md)
The dbgpt project provides a wealth of debug commands. For detailed usage, [debugging](./debugging.md)

View File

@ -1,9 +1,9 @@
# SMMF
The DB-GPT project provides service-oriented multi-model management capabilities. Developer who are interested in related capabilities can read the [SMMF](/docs/modules/smmf) module part. Here we focus on how to use multi-LLMs.
The DB-GPT project provides service-oriented multi-model management capabilities. Developer who are interested in related capabilities can read the [SMMF](../../modules/smmf.md) module part. Here we focus on how to use multi-LLMs.
Here we mainly introduce the usage through the web interface. For developer interested in the command line, you can refer to the [cluster deployment](/docs/installation/model_service/cluster) model. Open the DB-GPT-Web frontend service and click on `Model Management` to enter the multi-model management interface.
Here we mainly introduce the usage through the web interface. For developer interested in the command line, you can refer to the [cluster deployment](../../installation/model_service/cluster.md) model. Open the DB-GPT-Web frontend service and click on `Model Management` to enter the multi-model management interface.
## List Models
@ -61,4 +61,4 @@ Observability tools (viewing and analyzing observability logs)
We won't go into detail about the usage of the command-line tool here. You can use the `dbgpt --help` command to obtain specific usage documentation. Additionally, you can check the documentation for individual subcommands. For example, you can use `dbgpt start --help` to view the documentation for starting a service. For more information, please refer to the document provided below.
- [Debugging](/docs/application/advanced_tutorial/debugging)
- [Debugging](../advanced_tutorial/debugging.md)

View File

@ -25,7 +25,7 @@ python docker/examples/dashboard/test_case_mysql_data.py
### Add data source
The steps to add a data source are the same as [Chat Data](/docs/operation_manual/started_tutorial/chat_data.mdx). Select the corresponding database type in the data source management tab, then create it. Fill in the necessary information to complete the creation.
The steps to add a data source are the same as [Chat Data](./chat_data.md). Select the corresponding database type in the data source management tab, then create it. Fill in the necessary information to complete the creation.
<p align="left">
<img src={'/img/chat_dashboard/add_data.png'} width="720px" />

View File

@ -98,7 +98,7 @@ with DAG("load_knowledge_dag") as knowledge_dag:
)
knowledge_task >> assembler_task
chunks = asyncio.run(assembler_task.call("https://docs.dbgpt.site/docs/latest/awel/"))
chunks = asyncio.run(assembler_task.call("https://docs.dbgpt.site/docs/awel/"))
print(f"Chunk length: {len(chunks)}")
```
@ -288,7 +288,7 @@ with DAG("load_knowledge_dag") as knowledge_dag:
)
knowledge_task >> assembler_task
chunks = asyncio.run(assembler_task.call("https://docs.dbgpt.site/docs/latest/awel/"))
chunks = asyncio.run(assembler_task.call("https://docs.dbgpt.site/docs/awel/"))
print(f"Chunk length: {len(chunks)}\n")
prompt = """Based on the known information below, provide users with professional and concise answers to their questions.

View File

@ -37,7 +37,7 @@ pip install "dbgpt[rag]>=0.5.3rc0" -U
### Prepare Embedding Model
First, you need to prepare the embedding model, you can provide an embedding model
according [Prepare Embedding Model](docs/latest/awel/cookbook/first_rag_with_awel#prepare-embedding-model).
according [Prepare Embedding Model](./first_rag_with_awel.md#prepare-embedding-model).
Here we use OpenAI's embedding model.
@ -135,7 +135,7 @@ print("Retrieved schema:\n", chunks)
### Prepare LLM
We use LLM to generate SQL queries. Here we use OpenAI's LLM model, you can replace it
with other models according to [Prepare LLM](/docs/latest/awel/cookbook/first_rag_with_awel#prepare-llm).
with other models according to [Prepare LLM](./first_rag_with_awel.md#prepare-llm).
```python
from dbgpt.model.proxy import OpenAILLMClient

View File

@ -3,7 +3,7 @@ The call of multi-model services is compatible with the OpenAI interface, and th
:::info note
⚠️ Before using this project, you must first deploy the model service, which can be deployed through the [cluster deployment tutorial](/docs/latest/installation/model_service/cluster/).
⚠️ Before using this project, you must first deploy the model service, which can be deployed through the [cluster deployment tutorial](../model_service/cluster.md).
:::

View File

@ -4,7 +4,7 @@
## Architecture
Here is the architecture of the high availability cluster, more details can be found in
the [cluster deployment](/docs/latest/installation/model_service/cluster) mode and [SMMF](/docs/latest/modules/smmf) module.
the [cluster deployment](./cluster.md) mode and [SMMF](../../modules/smmf.md) module.
<p align="center">
<img src={'/img/module/smmf.png'} width="600px" />

View File

@ -366,7 +366,7 @@ pip install -e ".[llama_cpp]"
```
#### Modify configuration file
Modify the `.env` file to use llama.cpp, and then you can start the service by running the [command](/docs/quickstart.mdx)
Modify the `.env` file to use llama.cpp, and then you can start the service by running the [command](../quickstart.md)
#### More descriptions

View File

@ -37,8 +37,8 @@ The purpose is to build infrastructure in the field of large models, through the
## Getting Started
- [Quickstart](/docs/quickstart)
- [Installation](/docs/installation)
- [Quickstart](./quickstart.md)
- [Installation](./installation)
## Terminology
@ -59,32 +59,32 @@ The purpose is to build infrastructure in the field of large models, through the
## Use Cases
- [Use Cases](/docs/use_cases)
- [Use Cases](./use_cases.md)
## Modules
#### [SMMF](/docs/modules/smmf)
#### [SMMF](./modules/smmf.md)
Service-oriented Multi-model Management Framework
#### [Retrieval](/docs/modules/rag)
#### [Retrieval](./modules/rag.md)
Multi-Knowledge Enhanced Retrieval-Augmented Generation Framework
#### [Agents](/docs/modules/agent)
#### [Agents](./modules/agent.md)
Data Driven Multi-Agents
#### [Fine-tuning](/docs/modules/fine_tuning)
#### [Fine-tuning](./modules/fine_tuning.md)
Fine-tuning module for Text2SQL/Text2DSL
## More
- [Connections](/docs/modules/connections)
- [Connections](./modules/connections.md)
Connect various data sources
- [Obvervablity](/docs/operation/advanced_tutorial/debugging)
- [Obvervablity](./application/advanced_tutorial/observability.md)
Observing & monitoring
- [Evaluation](/docs/modules/eval)
- [Evaluation](./modules/eval.md)
Evaluate framework performance and accuracy
## Community

View File

@ -6,7 +6,7 @@ DB-GPT supports the installation and use of a variety of open source and closed
:::info note
- Detailed installation and deployment tutorials can be found in [Installation](/docs/installation).
- Detailed installation and deployment tutorials can be found in [Installation](./installation).
- This page only introduces deployment based on ChatGPT proxy and local glm model.
:::
@ -26,7 +26,7 @@ git clone https://github.com/eosphoros-ai/DB-GPT.git
### Miniconda environment installation
- The default database uses SQLite, so there is no need to install a database in the default startup mode. If you need to use other databases, you can read the [advanced tutorials](/docs/application_manual/advanced_tutorial/rag) below. We recommend installing the Python virtual environment through the conda virtual environment. For the installation of Miniconda environment, please refer to the [Miniconda installation tutorial](https://docs.conda.io/projects/miniconda/en/latest/).
- The default database uses SQLite, so there is no need to install a database in the default startup mode. If you need to use other databases, you can read the [advanced tutorials](./application/advanced_tutorial/rag.md) below. We recommend installing the Python virtual environment through the conda virtual environment. For the installation of Miniconda environment, please refer to the [Miniconda installation tutorial](https://docs.conda.io/projects/miniconda/en/latest/).
:::tip
Create a Python virtual environment

View File

@ -8,6 +8,33 @@ const path = require("path");
const {themes} = require('prism-react-renderer');
const lightCodeTheme = themes.github;
const darkCodeTheme = themes.dracula;
const isDev = process.env.NODE_ENV === "development";
const isBuildFast = !!process.env.BUILD_FAST;
const isVersioningDisabled = !!process.env.DISABLE_VERSIONING;
const versions = require("./versions.json");
console.log("versions", versions)
function isPrerelease(version) {
return (
version.includes('-') ||
version.includes('alpha') ||
version.includes('beta') ||
version.includes('rc')
);
}
function getLastStableVersion() {
const lastStableVersion = versions.find((version) => !isPrerelease(version));
if (!lastStableVersion) {
throw new Error('unexpected, no stable Docusaurus version?');
}
return lastStableVersion;
}
function getNextVersionName() {
return 'dev';
}
/** @type {import('@docusaurus/types').Config} */
const config = {
@ -26,8 +53,8 @@ const config = {
organizationName: 'eosphoros-ai', // Usually your GitHub org/user name.
projectName: 'DB-GPT', // Usually your repo name.
onBrokenLinks: 'warn',
onBrokenMarkdownLinks: 'warn',
onBrokenLinks: isDev ? 'throw' : 'warn',
onBrokenMarkdownLinks: isDev ? 'throw' : 'warn',
// Even if you don't use internalization, you can use this field to set useful
// metadata like html lang. For example, if your site is Chinese, you may want
@ -48,7 +75,10 @@ const config = {
mermaid: true,
},
themes: ['@docusaurus/theme-mermaid'],
themes: [
'@docusaurus/theme-mermaid',
'@easyops-cn/docusaurus-search-local',
],
plugins: [
() => ({
@ -100,6 +130,22 @@ const config = {
({
docs: {
sidebarPath: require.resolve('./sidebars.js'),
includeCurrentVersion: true,
// lastVersion: "current",
lastVersion: isDev || isBuildFast || isVersioningDisabled ? "current" : getLastStableVersion(),
onlyIncludeVersions: (() => {
if (isBuildFast) {
return ['current'];
} else if (!isVersioningDisabled && (isDev)) {
return ['current', ...versions.slice(0, 2)];
}
return undefined;
})(),
versions: {
current: {
label: `${getNextVersionName()}`,
},
},
remarkPlugins: [
[require("@docusaurus/remark-plugin-npm2yarn"), { sync: true }],
],
@ -110,11 +156,11 @@ const config = {
}){
const sidebarItems = await defaultSidebarItemsGenerator(args);
sidebarItems.forEach((subItem) => {
// This allows breaking long sidebar labels into multiple lines
// This allows breaking long sidebar labels into multiple lines
// by inserting a zero-width space after each slash.
if (
"label" in subItem &&
subItem.label &&
"label" in subItem &&
subItem.label &&
subItem.label.includes("/")
){
subItem.label = subItem.label.replace("/\//g", "\u200B");
@ -125,11 +171,11 @@ const config = {
// Please change this to your repo.
// Remove this to remove the "edit this page" links.
},
pages: {
remarkPlugins: [require("@docusaurus/remark-plugin-npm2yarn")],
},
theme: {
customCss: require.resolve('./src/css/custom.css'),
},
@ -143,6 +189,7 @@ const config = {
defaultClassicDocs: '/docs/get_started',
// Replace with your project's social card
navbar: {
hideOnScroll: true,
logo: {
alt: 'DB-GPT Logo',
src: 'img/dbgpt_logo.svg',
@ -177,6 +224,13 @@ const config = {
position: 'left',
label: "Community",
className: 'header-community-link',
},
{
type: "docsVersionDropdown",
position: "right",
dropdownItemsAfter: [{to: '/versions', label: 'All versions'}],
dropdownActiveClassDisabled: true,
},
{
href: 'https://github.com/eosphoros-ai/DB-GPT',

View File

@ -0,0 +1,58 @@
worker_processes 1;
events {
worker_connections 1024;
}
http {
include mime.types;
default_type application/octet-stream;
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"';
access_log /var/log/nginx/access.log main;
error_log /var/log/nginx/error.log;
sendfile on;
keepalive_timeout 65;
server {
listen 80;
server_name docs.dbgpt.site;
location / {
root /usr/share/nginx/html;
index index.html index.htm;
try_files $uri $uri/ /index.html;
}
error_page 500 502 503 504 /50x.html;
location = /50x.html {
root /usr/share/nginx/html;
}
# Redirect all HTTP requests to HTTPS
return 301 https://$host$request_uri;
}
server {
listen 443 ssl http2;
server_name docs.dbgpt.site;
ssl_certificate /etc/nginx/ssl/nginx.crt;
ssl_certificate_key /etc/nginx/ssl/nginx.key;
location / {
root /usr/share/nginx/html;
index index.html index.htm;
try_files $uri $uri/ /index.html;
}
error_page 500 502 503 504 /50x.html;
location = /50x.html {
root /usr/share/nginx/html;
}
}
}

View File

@ -0,0 +1,37 @@
worker_processes 1;
events {
worker_connections 1024;
}
http {
include mime.types;
default_type application/octet-stream;
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"';
access_log /var/log/nginx/access.log main;
error_log /var/log/nginx/error.log;
sendfile on;
keepalive_timeout 65;
server {
listen 8089;
server_name _;
location / {
root /usr/share/nginx/html;
index index.html index.htm;
try_files $uri $uri/ /index.html;
}
error_page 500 502 503 504 /50x.html;
location = /50x.html {
root /usr/share/nginx/html;
}
}
}

2331
docs/package-lock.json generated

File diff suppressed because it is too large Load Diff

View File

@ -14,10 +14,11 @@
"write-heading-ids": "docusaurus write-heading-ids"
},
"dependencies": {
"@docusaurus/core": "3.1.1",
"@docusaurus/preset-classic": "3.1.1",
"@docusaurus/remark-plugin-npm2yarn": "^3.1.1",
"@docusaurus/theme-mermaid": "^3.1.1",
"@docusaurus/core": "3.4.0",
"@docusaurus/preset-classic": "3.4.0",
"@docusaurus/remark-plugin-npm2yarn": "^3.4.0",
"@docusaurus/theme-mermaid": "^3.4.0",
"@easyops-cn/docusaurus-search-local": "^0.38.1",
"@mdx-js/react": "^3.0.0",
"clsx": "^1.2.1",
"flickity": "^2.2.1",
@ -25,11 +26,12 @@
"process": "^0.11.10",
"react": "^18.0.0",
"react-dom": "^18.0.0",
"react-flickity-component": "^4.0.6"
"react-flickity-component": "^4.0.6",
"winston": "^3.13.1"
},
"devDependencies": {
"@docusaurus/module-type-aliases": "3.1.1",
"@docusaurus/types": "3.1.1"
"@docusaurus/module-type-aliases": "3.4.0",
"@docusaurus/types": "3.4.0"
},
"browserslist": {
"production": [

2
docs/versions.json Normal file
View File

@ -0,0 +1,2 @@
[
]

File diff suppressed because it is too large Load Diff