mirror of
https://github.com/csunny/DB-GPT.git
synced 2025-09-03 10:05:13 +00:00
fix:client mypy error
This commit is contained in:
188
docs/docs/api/app.md
Normal file
188
docs/docs/api/app.md
Normal file
@@ -0,0 +1,188 @@
|
||||
# App
|
||||
|
||||
Get started with the App API
|
||||
|
||||
# Chat App
|
||||
|
||||
```python
|
||||
POST /api/v2/chat/completions
|
||||
```
|
||||
### Examples
|
||||
|
||||
import Tabs from '@theme/Tabs';
|
||||
import TabItem from '@theme/TabItem';
|
||||
|
||||
### Stream Chat App
|
||||
|
||||
|
||||
<Tabs
|
||||
defaultValue="python"
|
||||
groupId="chat"
|
||||
values={[
|
||||
{label: 'Curl', value: 'curl'},
|
||||
{label: 'Python', value: 'python'},
|
||||
]
|
||||
}>
|
||||
|
||||
<TabItem value="curl">
|
||||
|
||||
```shell
|
||||
DBGPT_API_KEY="dbgpt"
|
||||
APP_ID="{YOUR_APP_ID}"
|
||||
|
||||
curl -X POST "http://localhost:5000/api/v2/chat/completions" \
|
||||
-H "Authorization: Bearer $DBGPT_API_KEY" \
|
||||
-H "accept: application/json" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d "{\"messages\":\"Hello\",\"model\":\"chatgpt_proxyllm\", \"chat_mode\": \"chat_app\", \"chat_param\": "$APP_ID"}"
|
||||
|
||||
```
|
||||
</TabItem>
|
||||
|
||||
<TabItem value="python">
|
||||
|
||||
```python
|
||||
from dbgpt.client.client import Client
|
||||
|
||||
DBGPT_API_KEY = "dbgpt"
|
||||
APP_ID="{YOUR_APP_ID}"
|
||||
|
||||
client = Client(api_key=DBGPT_API_KEY)
|
||||
response = client.chat_stream(messages="Introduce AWEL", model="chatgpt_proxyllm", chat_mode="chat_app", chat_param=APP_ID)
|
||||
```
|
||||
</TabItem>
|
||||
</Tabs>
|
||||
|
||||
### Chat Completion Stream Response
|
||||
```commandline
|
||||
data: {"id": "109bfc28-fe87-452c-8e1f-d4fe43283b7d", "created": 1710919480, "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": "```agent-plans\n[{\"name\": \"Introduce Awel\", \"num\": 2, \"status\": \"complete\", \"agent\": \"Human\", \"markdown\": \"```agent-messages\\n[{\\\"sender\\\": \\\"Summarizer\\\", \\\"receiver\\\": \\\"Human\\\", \\\"model\\\": \\\"chatgpt_proxyllm\\\", \\\"markdown\\\": \\\"Agentic Workflow Expression Language (AWEL) is a specialized language designed for developing large model applications with intelligent agent workflows. It offers flexibility and functionality, allowing developers to focus on business logic for LLMs applications without getting bogged down in model and environment details. AWEL uses a layered API design architecture, making it easier to work with. You can find examples and source code to get started with AWEL, and it supports various operators and environments. AWEL is a powerful tool for building native data applications through workflows and agents.\"}]\n```"}}]}
|
||||
|
||||
data: [DONE]
|
||||
```
|
||||
### Get App
|
||||
|
||||
```python
|
||||
GET /api/v2/serve/apps/{app_id}
|
||||
```
|
||||
|
||||
#### Query Parameters
|
||||
________
|
||||
<b>app_id</b> <font color="gray"> string </font> <font color="red"> Required </font>
|
||||
|
||||
app id
|
||||
________
|
||||
|
||||
#### Response body
|
||||
Return <a href="#the-app-object">App Object</a>
|
||||
|
||||
### List App
|
||||
|
||||
```python
|
||||
GET /api/v2/serve/apps
|
||||
```
|
||||
|
||||
#### Response body
|
||||
Return <a href="#the-app-object">App Object</a> List
|
||||
|
||||
### The App Model
|
||||
________
|
||||
<b>id</b> <font color="gray"> string </font>
|
||||
|
||||
space id
|
||||
________
|
||||
<b>app_code</b> <font color="gray"> string </font>
|
||||
|
||||
app code
|
||||
________
|
||||
<b>app_name</b> <font color="gray"> string </font>
|
||||
|
||||
app name
|
||||
________
|
||||
|
||||
<b>app_describe</b> <font color="gray"> string </font>
|
||||
|
||||
app describe
|
||||
________
|
||||
<b>team_mode</b> <font color="gray"> string </font>
|
||||
|
||||
team mode
|
||||
________
|
||||
<b>language</b> <font color="gray"> string </font>
|
||||
|
||||
language
|
||||
________
|
||||
<b>team_context</b> <font color="gray"> string </font>
|
||||
|
||||
team context
|
||||
________
|
||||
<b>user_code</b> <font color="gray"> string </font>
|
||||
|
||||
user code
|
||||
________
|
||||
<b>sys_code</b> <font color="gray"> string </font>
|
||||
|
||||
sys code
|
||||
________
|
||||
<b>is_collected</b> <font color="gray"> string </font>
|
||||
|
||||
is collected
|
||||
________
|
||||
<b>icon</b> <font color="gray"> string </font>
|
||||
|
||||
icon
|
||||
________
|
||||
<b>created_at</b> <font color="gray"> string </font>
|
||||
|
||||
created at
|
||||
________
|
||||
<b>updated_at</b> <font color="gray"> string </font>
|
||||
|
||||
updated at
|
||||
________
|
||||
<b>details</b> <font color="gray"> string </font>
|
||||
|
||||
app details List[AppDetailModel]
|
||||
________
|
||||
|
||||
### The App Detail Model
|
||||
________
|
||||
<b>app_code</b> <font color="gray"> string </font>
|
||||
|
||||
app code
|
||||
________
|
||||
<b>app_name</b> <font color="gray"> string </font>
|
||||
|
||||
app name
|
||||
________
|
||||
<b>agent_name</b> <font color="gray"> string </font>
|
||||
|
||||
agent name
|
||||
________
|
||||
<b>node_id</b> <font color="gray"> string </font>
|
||||
|
||||
node id
|
||||
________
|
||||
<b>resources</b> <font color="gray"> string </font>
|
||||
|
||||
resources
|
||||
________
|
||||
<b>prompt_template</b> <font color="gray"> string </font>
|
||||
|
||||
prompt template
|
||||
________
|
||||
<b>llm_strategy</b> <font color="gray"> string </font>
|
||||
|
||||
llm strategy
|
||||
________
|
||||
<b>llm_strategy_value</b> <font color="gray"> string </font>
|
||||
|
||||
llm strategy value
|
||||
________
|
||||
<b>created_at</b> <font color="gray"> string </font>
|
||||
|
||||
created at
|
||||
________
|
||||
<b>updated_at</b> <font color="gray"> string </font>
|
||||
|
||||
updated at
|
||||
________
|
280
docs/docs/api/chat.md
Normal file
280
docs/docs/api/chat.md
Normal file
@@ -0,0 +1,280 @@
|
||||
# Chat
|
||||
|
||||
Given a list of messages comprising a conversation, the model will return a response.
|
||||
|
||||
# Create Chat Completion
|
||||
|
||||
```python
|
||||
POST /api/v2/chat/completions
|
||||
```
|
||||
|
||||
### Examples
|
||||
|
||||
import Tabs from '@theme/Tabs';
|
||||
import TabItem from '@theme/TabItem';
|
||||
|
||||
### Stream Chat Completion
|
||||
|
||||
|
||||
<Tabs
|
||||
defaultValue="python"
|
||||
groupId="chat"
|
||||
values={[
|
||||
{label: 'Curl', value: 'curl'},
|
||||
{label: 'Python', value: 'python'},
|
||||
]
|
||||
}>
|
||||
|
||||
<TabItem value="curl">
|
||||
|
||||
```shell
|
||||
DBGPT_API_KEY="dbgpt"
|
||||
|
||||
curl -X POST "http://localhost:5000/api/v2/chat/completions" \
|
||||
-H "Authorization: Bearer $DBGPT_API_KEY" \
|
||||
-H "accept: application/json" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d "{\"messages\":\"Hello\",\"model\":\"chatgpt_proxyllm\", \"stream\": true}"
|
||||
|
||||
```
|
||||
</TabItem>
|
||||
|
||||
<TabItem value="python">
|
||||
|
||||
```python
|
||||
from dbgpt.client.client import Client
|
||||
|
||||
DBGPT_API_KEY = "dbgpt"
|
||||
client = Client(api_key=DBGPT_API_KEY)
|
||||
response = client.chat_stream(messages="Hello", model="chatgpt_proxyllm")
|
||||
```
|
||||
</TabItem>
|
||||
</Tabs>
|
||||
|
||||
### Chat Completion Stream Response
|
||||
```commandline
|
||||
data: {"id": "chatcmpl-ba6fb52e-e5b2-11ee-b031-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": "Hello"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-ba6fb52e-e5b2-11ee-b031-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": "!"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-ba6fb52e-e5b2-11ee-b031-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " How"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-ba6fb52e-e5b2-11ee-b031-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " can"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-ba6fb52e-e5b2-11ee-b031-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " I"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-ba6fb52e-e5b2-11ee-b031-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " assist"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-ba6fb52e-e5b2-11ee-b031-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " you"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-ba6fb52e-e5b2-11ee-b031-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " today"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-ba6fb52e-e5b2-11ee-b031-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": "?"}}]}
|
||||
|
||||
data: [DONE]
|
||||
```
|
||||
|
||||
### Chat Completion
|
||||
<Tabs
|
||||
defaultValue="python"
|
||||
groupId="chat"
|
||||
values={[
|
||||
{label: 'Curl', value: 'curl'},
|
||||
{label: 'Python', value: 'python'},
|
||||
]
|
||||
}>
|
||||
|
||||
<TabItem value="curl">
|
||||
|
||||
```shell
|
||||
DBGPT_API_KEY="dbgpt"
|
||||
|
||||
curl -X POST "http://localhost:5000/api/v2/chat/completions" \
|
||||
-H "Authorization: Bearer $DBGPT_API_KEY" \
|
||||
-H "accept: application/json" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d "{\"messages\":\"Hello\",\"model\":\"chatgpt_proxyllm\", \"stream\": false}"
|
||||
```
|
||||
</TabItem>
|
||||
|
||||
<TabItem value="python">
|
||||
|
||||
```python
|
||||
from dbgpt.client.client import Client
|
||||
|
||||
DBGPT_API_KEY = "dbgpt"
|
||||
client = Client(api_key=DBGPT_API_KEY)
|
||||
response = client.chat(messages="Hello", model="chatgpt_proxyllm")
|
||||
```
|
||||
</TabItem>
|
||||
</Tabs>
|
||||
|
||||
### Chat Completion Response
|
||||
```json
|
||||
{
|
||||
"id": "a8321543-52e9-47a5-a0b6-3d997463f6a3",
|
||||
"object": "chat.completion",
|
||||
"created": 1710826792,
|
||||
"model": "chatgpt_proxyllm",
|
||||
"choices": [
|
||||
{
|
||||
"index": 0,
|
||||
"message": {
|
||||
"role": "assistant",
|
||||
"content": "Hello! How can I assist you today?"
|
||||
},
|
||||
"finish_reason": null
|
||||
}
|
||||
],
|
||||
"usage": {
|
||||
"prompt_tokens": 0,
|
||||
"total_tokens": 0,
|
||||
"completion_tokens": 0
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
|
||||
### Request body
|
||||
________
|
||||
<b>messages</b> <font color="gray"> string </font> <font color="red"> Required </font>
|
||||
|
||||
A list of messages comprising the conversation so far. Example Python code.
|
||||
________
|
||||
<b>model</b> <font color="gray"> string </font> <font color="red"> Required </font>
|
||||
|
||||
ID of the model to use. See the model endpoint compatibility table for details on which models work with the Chat API.
|
||||
________
|
||||
<b>chat_mode</b> <font color="gray"> string </font> <font color="red"> Optional </font>
|
||||
|
||||
The DB-GPT chat mode, which can be one of the following: `chat_normal`, `chat_app`, `chat_knowledge`, `chat_flow`, default is `chat_normal`.
|
||||
________
|
||||
<b>chat_param</b> <font color="gray"> string </font> <font color="red"> Optional </font>
|
||||
|
||||
The DB-GPT The chat param value of chat mode: `{app_id}`, `{space_id}`, `{flow_id}`, default is `None`.
|
||||
________
|
||||
<b>max_new_tokens</b> <font color="gray"> integer </font> <font color="red"> Optional </font>
|
||||
|
||||
The maximum number of tokens that can be generated in the chat completion.
|
||||
|
||||
The total length of input tokens and generated tokens is limited by the model's context length.
|
||||
________
|
||||
<b>stream</b> <font color="gray"> integer </font> <font color="red"> Optional </font>
|
||||
|
||||
If set, partial message deltas will be sent.
|
||||
Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a `data: [DONE]`
|
||||
________
|
||||
<b>temperature</b> <font color="gray"> integer </font> <font color="red"> Optional </font>
|
||||
|
||||
What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.
|
||||
________
|
||||
<b>conv_uid</b> <font color="gray"> string </font> <font color="red"> Optional </font>
|
||||
|
||||
The conversation id of the model inference, default is `None`
|
||||
________
|
||||
<b>span_id</b> <font color="gray"> string </font> <font color="red"> Optional </font>
|
||||
|
||||
The span id of the model inference, default is `None`
|
||||
________
|
||||
<b>sys_code</b> <font color="gray"> string </font> <font color="red"> Optional </font>
|
||||
|
||||
The system code, default is `None`
|
||||
________
|
||||
<b>user_name</b> <font color="gray"> string </font> <font color="red"> Optional </font>
|
||||
|
||||
The web server user name, default is `None`
|
||||
________
|
||||
|
||||
|
||||
### Chat Stream Response Body
|
||||
________
|
||||
<b>id</b> <font color="gray"> string </font>
|
||||
|
||||
conv_uid of the convsersation.
|
||||
________
|
||||
<b>model</b> <font color="gray"> string </font>
|
||||
|
||||
The model used for the chat completion.
|
||||
|
||||
________
|
||||
<b>created</b> <font color="gray"> string </font>
|
||||
|
||||
The Unix timestamp (in seconds) of when the chat completion was created.
|
||||
________
|
||||
<b>choices</b> <font color="gray"> array </font>
|
||||
|
||||
A list of chat completion choices. Can be more than one if n is greater than 1.
|
||||
|
||||
- <b>index</b> <font color="gray"> integer </font>
|
||||
|
||||
The index of the choice in the list of choices.
|
||||
- <b>delta</b> <font color="gray"> object </font>
|
||||
|
||||
The chat completion delta.
|
||||
- <b>role</b> <font color="gray"> string </font>
|
||||
|
||||
The role of the speaker. Can be `user` or `assistant`.
|
||||
- <b>content</b> <font color="gray"> string </font>
|
||||
|
||||
The content of the message.
|
||||
- <b>finish_reason</b> <font color="gray"> string </font>
|
||||
|
||||
The reason the chat completion finished. Can be `max_tokens` or `stop`.
|
||||
________
|
||||
|
||||
|
||||
### Chat Response Body
|
||||
________
|
||||
<b>id</b> <font color="gray"> string </font>
|
||||
|
||||
conv_uid of the convsersation.
|
||||
________
|
||||
<b>model</b> <font color="gray"> string </font>
|
||||
|
||||
The model used for the chat completion.
|
||||
|
||||
________
|
||||
<b>created</b> <font color="gray"> string </font>
|
||||
|
||||
The Unix timestamp (in seconds) of when the chat completion was created.
|
||||
________
|
||||
<b>object</b> <font color="gray"> string </font>
|
||||
|
||||
The object type of the chat completion.
|
||||
________
|
||||
<b>choices</b> <font color="gray"> array </font>
|
||||
|
||||
A list of chat completion choices. Can be more than one if n is greater than 1.
|
||||
|
||||
- <b>index</b> <font color="gray"> integer </font>
|
||||
|
||||
The index of the choice in the list of choices.
|
||||
|
||||
- <b>delta</b> <font color="gray"> object </font>
|
||||
|
||||
The chat completion delta.
|
||||
- <b>role</b> <font color="gray"> string </font>
|
||||
|
||||
The role of the speaker. Can be `user` or `assistant`.
|
||||
- <b>content</b> <font color="gray"> string </font>
|
||||
|
||||
The content of the message.
|
||||
- <b>finish_reason</b> <font color="gray"> string </font>
|
||||
|
||||
The reason the chat completion finished. Can be `max_tokens` or `stop`.
|
||||
________
|
||||
<b>usage</b> <font color="gray"> object </font>
|
||||
|
||||
The usage statistics for the chat completion.
|
||||
- <b>prompt_tokens</b> <font color="gray"> integer </font>
|
||||
|
||||
The number of tokens in the prompt.
|
||||
- <b>total_tokens</b> <font color="gray"> integer </font>
|
||||
|
||||
The total number of tokens in the chat completion.
|
||||
- <b>completion_tokens</b> <font color="gray"> integer </font>
|
||||
|
||||
The number of tokens in the chat completion.
|
||||
|
||||
|
306
docs/docs/api/flow.md
Normal file
306
docs/docs/api/flow.md
Normal file
@@ -0,0 +1,306 @@
|
||||
# Flow
|
||||
|
||||
Get started with the App API
|
||||
|
||||
# Chat Flow
|
||||
|
||||
```python
|
||||
POST /api/v2/chat/completions
|
||||
```
|
||||
### Examples
|
||||
|
||||
import Tabs from '@theme/Tabs';
|
||||
import TabItem from '@theme/TabItem';
|
||||
|
||||
### Stream Chat Flow
|
||||
|
||||
|
||||
<Tabs
|
||||
defaultValue="python"
|
||||
groupId="chat"
|
||||
values={[
|
||||
{label: 'Curl', value: 'curl'},
|
||||
{label: 'Python', value: 'python'},
|
||||
]
|
||||
}>
|
||||
|
||||
<TabItem value="curl">
|
||||
|
||||
```shell
|
||||
DBGPT_API_KEY="dbgpt"
|
||||
FLOW_ID="{YOUR_FLOW_ID}"
|
||||
|
||||
curl -X POST "http://localhost:5000/api/v2/chat/completions" \
|
||||
-H "Authorization: Bearer $DBGPT_API_KEY" \
|
||||
-H "accept: application/json" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d "{\"messages\":\"Hello\",\"model\":\"chatgpt_proxyllm\", \"chat_mode\": \"chat_flow\", \"chat_param\": "$FLOW_ID"}"
|
||||
|
||||
```
|
||||
</TabItem>
|
||||
|
||||
<TabItem value="python">
|
||||
|
||||
```python
|
||||
from dbgpt.client.client import Client
|
||||
|
||||
DBGPT_API_KEY = "dbgpt"
|
||||
FLOW_ID="{YOUR_FLOW_ID}"
|
||||
|
||||
client = Client(api_key=DBGPT_API_KEY)
|
||||
response = client.chat_stream(messages="Hello", model="chatgpt_proxyllm", chat_mode="chat_flow", chat_param=FLOW_ID)
|
||||
```
|
||||
</TabItem>
|
||||
</Tabs>
|
||||
|
||||
#### Chat Completion Stream Response
|
||||
```commandline
|
||||
data: {"id": "579f8862-fc4b-481e-af02-a127e6d036c8", "created": 1710918094, "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": "\n\n"}}]}
|
||||
```
|
||||
### Create Flow
|
||||
|
||||
```python
|
||||
POST /api/v2/serve/awel/flows
|
||||
```
|
||||
#### Request body
|
||||
Request <a href="#the-flow-object">Flow Object</a>
|
||||
|
||||
#### Response body
|
||||
Return <a href="#the-flow-object">Flow Object</a>
|
||||
|
||||
|
||||
### Update Flow
|
||||
|
||||
PUT /api/v2/serve/awel/flows
|
||||
|
||||
#### Request body
|
||||
Request <a href="#the-flow-object">Flow Object</a>
|
||||
|
||||
#### Response body
|
||||
Return <a href="#the-flow-object">Flow Object</a>
|
||||
|
||||
### Delete Flow
|
||||
|
||||
```python
|
||||
DELETE /api/v2/serve/awel/flows
|
||||
```
|
||||
|
||||
<Tabs
|
||||
defaultValue="curl_update_knowledge"
|
||||
groupId="chat1"
|
||||
values={[
|
||||
{label: 'Curl', value: 'curl_update_knowledge'},
|
||||
{label: 'Python', value: 'python_update_knowledge'},
|
||||
]
|
||||
}>
|
||||
|
||||
<TabItem value="curl_update_knowledge">
|
||||
|
||||
```shell
|
||||
DBGPT_API_KEY="dbgpt"
|
||||
FLOW_ID="{YOUR_FLOW_ID}"
|
||||
|
||||
curl -X DELETE "http://localhost:5000/api/v2/serve/knowledge/spaces/$FLOW_ID" \
|
||||
-H "Authorization: Bearer $DBGPT_API_KEY" \
|
||||
|
||||
```
|
||||
</TabItem>
|
||||
|
||||
<TabItem value="python_update_knowledge">
|
||||
|
||||
|
||||
```python
|
||||
from dbgpt.client.client import Client
|
||||
from dbgpt.client.flow import delete_flow
|
||||
|
||||
DBGPT_API_KEY = "dbgpt"
|
||||
flow_id = "{your_flow_id}"
|
||||
|
||||
client = Client(api_key=DBGPT_API_KEY)
|
||||
res = await delete_flow(client=client, flow_id=flow_id)
|
||||
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
</Tabs>
|
||||
|
||||
#### Delete Parameters
|
||||
________
|
||||
<b>uid</b> <font color="gray"> string </font> <font color="red"> Required </font>
|
||||
|
||||
flow id
|
||||
________
|
||||
|
||||
#### Response body
|
||||
Return <a href="#the-flow-object">Flow Object</a>
|
||||
|
||||
### Get Flow
|
||||
|
||||
```python
|
||||
GET /api/v2/serve/awel/flows/{flow_id}
|
||||
```
|
||||
<Tabs
|
||||
defaultValue="curl_get_knowledge"
|
||||
groupId="chat1"
|
||||
values={[
|
||||
{label: 'Curl', value: 'curl_get_knowledge'},
|
||||
{label: 'Python', value: 'python_get_knowledge'},
|
||||
]
|
||||
}>
|
||||
|
||||
<TabItem value="curl_get_knowledge">
|
||||
|
||||
```shell
|
||||
DBGPT_API_KEY="dbgpt"
|
||||
FLOW_ID="{YOUR_FLOW_ID}"
|
||||
|
||||
curl --location --request GET 'http://localhost:5000/api/v2/serve/awel/flows/$FLOW_ID' \
|
||||
--header 'Authorization: Bearer $DBGPT_API_KEY'
|
||||
```
|
||||
</TabItem>
|
||||
|
||||
<TabItem value="python_get_knowledge">
|
||||
|
||||
|
||||
```python
|
||||
from dbgpt.client.client import Client
|
||||
from dbgpt.client.knowledge import get_flow
|
||||
|
||||
DBGPT_API_KEY = "dbgpt"
|
||||
flow_id = "{your_flow_id}"
|
||||
|
||||
client = Client(api_key=DBGPT_API_KEY)
|
||||
res = await get_flow(client=client, flow_id=flow_id)
|
||||
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
</Tabs>
|
||||
|
||||
#### Query Parameters
|
||||
________
|
||||
<b>uid</b> <font color="gray"> string </font> <font color="red"> Required </font>
|
||||
|
||||
flow id
|
||||
________
|
||||
|
||||
#### Response body
|
||||
Return <a href="#the-flow-object">Flow Object</a>
|
||||
|
||||
### List Flow
|
||||
|
||||
```python
|
||||
GET /api/v2/serve/awel/flows
|
||||
```
|
||||
|
||||
|
||||
<Tabs
|
||||
defaultValue="curl_list_knowledge"
|
||||
groupId="chat1"
|
||||
values={[
|
||||
{label: 'Curl', value: 'curl_list_knowledge'},
|
||||
{label: 'Python', value: 'python_list_knowledge'},
|
||||
]
|
||||
}>
|
||||
|
||||
<TabItem value="curl_list_knowledge">
|
||||
|
||||
```shell
|
||||
DBGPT_API_KEY="dbgpt"
|
||||
|
||||
curl -X GET "http://localhost:5000/api/v2/serve/awel/flows" \
|
||||
-H "Authorization: Bearer $DBGPT_API_KEY" \
|
||||
```
|
||||
</TabItem>
|
||||
|
||||
<TabItem value="python_list_knowledge">
|
||||
|
||||
|
||||
```python
|
||||
from dbgpt.client.client import Client
|
||||
from dbgpt.client.flow import list_flow
|
||||
|
||||
DBGPT_API_KEY = "dbgpt"
|
||||
|
||||
client = Client(api_key=DBGPT_API_KEY)
|
||||
res = await list_flow(client=client)
|
||||
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
</Tabs>
|
||||
|
||||
#### Response body
|
||||
Return <a href="#the-flow-object">Flow Object</a>
|
||||
|
||||
### The Flow Object
|
||||
|
||||
________
|
||||
<b>uid</b> <font color="gray">string</font>
|
||||
|
||||
The unique id for the flow.
|
||||
________
|
||||
<b>name</b> <font color="gray">string</font>
|
||||
|
||||
The name of the flow.
|
||||
________
|
||||
<b>description</b> <font color="gray">string</font>
|
||||
|
||||
The description of the flow.
|
||||
________
|
||||
<b>label</b> <font color="gray">string</font>
|
||||
|
||||
The label of the flow.
|
||||
________
|
||||
<b>flow_category</b> <font color="gray">string</font>
|
||||
|
||||
The category of the flow. Default is FlowCategory.COMMON.
|
||||
________
|
||||
<b>flow_data</b> <font color="gray">object</font>
|
||||
|
||||
The flow data.
|
||||
________
|
||||
<b>state</b> <font color="gray">string</font>
|
||||
|
||||
The state of the flow.Default is INITIALIZING.
|
||||
________
|
||||
<b>error_message</b> <font color="gray">string</font>
|
||||
|
||||
The error message of the flow.
|
||||
________
|
||||
<b>source</b> <font color="gray">string</font>
|
||||
|
||||
The source of the flow. Default is DBGPT-WEB.
|
||||
________
|
||||
<b>source_url</b> <font color="gray">string</font>
|
||||
|
||||
The source url of the flow.
|
||||
________
|
||||
<b>version</b> <font color="gray">string</font>
|
||||
|
||||
The version of the flow. Default is 0.1.0.
|
||||
________
|
||||
<b>editable</b> <font color="gray">boolean</font>
|
||||
|
||||
Whether the flow is editable. Default is True.
|
||||
________
|
||||
<b>user_name</b> <font color="gray">string</font>
|
||||
|
||||
The user name of the flow.
|
||||
________
|
||||
<b>sys_code</b> <font color="gray">string</font>
|
||||
|
||||
The system code of the flow.
|
||||
________
|
||||
<b>dag_id</b> <font color="gray">string</font>
|
||||
|
||||
The dag id of the flow.
|
||||
________
|
||||
<b>gmt_created</b> <font color="gray">string</font>
|
||||
|
||||
The created time of the flow.
|
||||
________
|
||||
<b>gmt_modified</b> <font color="gray">string</font>
|
||||
|
||||
The modified time of the flow.
|
||||
________
|
37
docs/docs/api/introduction.md
Normal file
37
docs/docs/api/introduction.md
Normal file
@@ -0,0 +1,37 @@
|
||||
# Introduction
|
||||
|
||||
This is the introduction to the DB-GPT API documentation. You can interact with the API through HTTP requests from any language, via our official Python Client bindings.
|
||||
|
||||
# Authentication
|
||||
The DB-GPT API uses API keys for authentication. Visit your API Keys page to retrieve the API key you'll use in your requests.
|
||||
|
||||
Production requests must be routed through your own backend server where your API key can be securely loaded from an environment variable or key management service.
|
||||
|
||||
All API requests should include your API key in an Authorization HTTP header as follows:
|
||||
|
||||
```http
|
||||
Authorization: Bearer DBGPT_API_KEY
|
||||
```
|
||||
Example with the DB-GPT API curl command:
|
||||
|
||||
```bash
|
||||
curl "http://localhost:5000/api/v2/chat/completions" \
|
||||
-H "Authorization: Bearer $DBGPT_API_KEY" \
|
||||
```
|
||||
Example with the DB-GPT Client Python package:
|
||||
|
||||
```python
|
||||
from dbgpt.client.client import Client
|
||||
|
||||
DBGPT_API_KEY = "dbgpt"
|
||||
client = Client(api_key=DBGPT_API_KEY)
|
||||
```
|
||||
Set the API Key in .env file as follows:
|
||||
:::info note
|
||||
API_KEYS - The list of API keys that are allowed to access the API. Each of the below are an option, separated by commas.
|
||||
:::
|
||||
```python
|
||||
API_KEYS=dbgpt
|
||||
```
|
||||
|
||||
|
657
docs/docs/api/knowledge.md
Normal file
657
docs/docs/api/knowledge.md
Normal file
@@ -0,0 +1,657 @@
|
||||
# Knowledge
|
||||
|
||||
Get started with the Knowledge API
|
||||
|
||||
# Chat Knowledge Space
|
||||
|
||||
```python
|
||||
POST /api/v2/chat/completions
|
||||
```
|
||||
### Examples
|
||||
|
||||
import Tabs from '@theme/Tabs';
|
||||
import TabItem from '@theme/TabItem';
|
||||
|
||||
### Chat Knowledge
|
||||
|
||||
|
||||
<Tabs
|
||||
defaultValue="python"
|
||||
groupId="chat"
|
||||
values={[
|
||||
{label: 'Curl', value: 'curl'},
|
||||
{label: 'Python', value: 'python'},
|
||||
]
|
||||
}>
|
||||
|
||||
<TabItem value="curl">
|
||||
|
||||
```shell
|
||||
DBGPT_API_KEY="dbgpt"
|
||||
SPACE_NAME="{YOUR_SPACE_NAME}"
|
||||
|
||||
curl --location --request POST 'http://127.0.0.1:5000/api/v2/chat/completions' \
|
||||
--header 'Authorization: Bearer $DBGPT_API_KEY' \
|
||||
--header 'Content-Type: application/json' \
|
||||
--data-raw '{
|
||||
|
||||
"model": "chatgpt_proxyllm",
|
||||
"messages": "introduce awel",
|
||||
"chat_mode":"chat_knowledge",
|
||||
"chat_param":$SPACE_NAME
|
||||
}'
|
||||
|
||||
```
|
||||
</TabItem>
|
||||
|
||||
<TabItem value="python">
|
||||
|
||||
```python
|
||||
from dbgpt.client.client import Client
|
||||
|
||||
DBGPT_API_KEY = "dbgpt"
|
||||
SPACE_NAME="{YOUR_SPACE_NAME}"
|
||||
|
||||
client = Client(api_key=DBGPT_API_KEY)
|
||||
response = client.chat_stream(messages="Hello", model="chatgpt_proxyllm", chat_mode="chat_knowledge", chat_param=SPACE_NAME)
|
||||
```
|
||||
</TabItem>
|
||||
</Tabs>
|
||||
|
||||
#### Chat Completion Response
|
||||
```json
|
||||
{
|
||||
"id": "acb050ab-eb2c-4754-97e4-6f3b94b7dac2",
|
||||
"object": "chat.completion",
|
||||
"created": 1710917272,
|
||||
"model": "chatgpt_proxyllm",
|
||||
"choices": [
|
||||
{
|
||||
"index": 0,
|
||||
"message": {
|
||||
"role": "assistant",
|
||||
"content": "Agentic Workflow Expression Language (AWEL) is a specialized language designed for developing large model applications with intelligent agent workflows. It offers flexibility and functionality, allowing developers to focus on business logic for LLMs applications without getting bogged down in model and environment details. AWEL uses a layered API design architecture, making it easier to work with. You can find examples and source code to get started with AWEL, and it supports various operators and environments. AWEL is a powerful tool for building native data applications through workflows and agents."
|
||||
},
|
||||
"finish_reason": null
|
||||
}
|
||||
],
|
||||
"usage": {
|
||||
"prompt_tokens": 0,
|
||||
"total_tokens": 0,
|
||||
"completion_tokens": 0
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
#### Chat Completion Stream Response
|
||||
```commandline
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": "AW"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": "EL"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": ","}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " which"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " stands"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " for"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " Ag"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": "entic"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " Workflow"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " Expression"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " Language"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": ","}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " is"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " a"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " powerful"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " tool"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " designed"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " for"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " developing"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " large"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " model"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " applications"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": "."}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " It"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " simpl"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": "ifies"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " the"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " process"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " by"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " allowing"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " developers"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " to"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " focus"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " on"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " business"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " logic"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " without"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " getting"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " bog"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": "ged"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " down"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " in"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " complex"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " model"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " and"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " environment"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " details"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": "."}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " AW"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": "EL"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " offers"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " great"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " functionality"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " and"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " flexibility"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " through"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " its"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " layered"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " API"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " design"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " architecture"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": "."}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " It"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " provides"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " a"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " set"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " of"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " intelligent"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " agent"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " workflow"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " expression"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " language"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " that"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " enhances"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " efficiency"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " in"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " application"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " development"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": "."}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " If"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " you"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " want"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " to"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " learn"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " more"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " about"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " AW"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": "EL"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": ","}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " you"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " can"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " check"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " out"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " the"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " built"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": "-in"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " examples"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " and"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " resources"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " available"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " on"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " platforms"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " like"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " Github"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": ","}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " Docker"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": "hub"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": ","}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " and"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " more"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": "."}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": "\n\n<references title=\"References\" references=\"[{"name": "AWEL_URL", "chunks": [{"id": 2526, "content": "Agentic Workflow Expression Language(AWEL) is a set of intelligent agent workflow expression language specially designed for large model applicationdevelopment. It provides great functionality and flexibility. Through the AWEL API, you can focus on the development of business logic for LLMs applicationswithout paying attention to cumbersome model and environment details.AWEL adopts a layered API design. AWEL's layered API design architecture is shown in the figure below.AWEL Design", "meta_info": "{'source': 'https://docs.dbgpt.site/docs/latest/awel/', 'title': 'AWEL(Agentic Workflow Expression Language) | DB-GPT', 'description': 'Agentic Workflow Expression Language(AWEL) is a set of intelligent agent workflow expression language specially designed for large model application', 'language': 'en-US'}", "recall_score": 0.6579902643967029}, {"id": 2531, "content": "ExamplesThe preliminary version of AWEL has alse been released, and we have provided some built-in usage examples.OperatorsExample of API-RAGYou can find source code from examples/awel/simple_rag_example.py", "meta_info": "{'source': 'https://docs.dbgpt.site/docs/latest/awel/', 'title': 'AWEL(Agentic Workflow Expression Language) | DB-GPT', 'description': 'Agentic Workflow Expression Language(AWEL) is a set of intelligent agent workflow expression language specially designed for large model application', 'language': 'en-US'}", "recall_score": 0.5997033286385491}, {"id": 2538, "content": "Stand-alone environmentRay environmentPreviousWhy use AWEL?NextReleased V0.5.0 | Develop native data applications through workflows and agentsAWEL DesignExamplesOperatorsExample of API-RAGAgentFream ExampleDSL ExampleCurrently supported operatorsExecutable environmentCommunityDiscordDockerhubGithubGithubHuggingFaceMoreHacker NewsTwitterCopyright © 2024 DB-GPT", "meta_info": "{'source': 'https://docs.dbgpt.site/docs/latest/awel/', 'title': 'AWEL(Agentic Workflow Expression Language) | DB-GPT', 'description': 'Agentic Workflow Expression Language(AWEL) is a set of intelligent agent workflow expression language specially designed for large model application', 'language': 'en-US'}", "recall_score": 0.5980204530753225}]}]\" />"}}]}
|
||||
|
||||
data: [DONE]
|
||||
```
|
||||
### Create Knowledge Space
|
||||
|
||||
```python
|
||||
POST /api/v2/serve/knowledge/spaces
|
||||
```
|
||||
|
||||
|
||||
|
||||
<Tabs
|
||||
defaultValue="curl_knowledge"
|
||||
groupId="chat1"
|
||||
values={[
|
||||
{label: 'Curl', value: 'curl_knowledge'},
|
||||
{label: 'Python', value: 'python_knowledge'},
|
||||
]
|
||||
}>
|
||||
|
||||
<TabItem value="curl_knowledge">
|
||||
|
||||
```shell
|
||||
DBGPT_API_KEY="dbgpt"
|
||||
|
||||
curl --location --request POST 'http://localhost:5000/api/v2/serve/knowledge/spaces' \
|
||||
--header 'Authorization: Bearer $DBGPT_API_KEY' \
|
||||
--header 'Content-Type: application/json' \
|
||||
--data-raw '{"desc": "for client space desc", "name": "test_space_2", "owner": "dbgpt", "vector_type": "Chroma"
|
||||
}'
|
||||
```
|
||||
</TabItem>
|
||||
|
||||
<TabItem value="python_knowledge">
|
||||
|
||||
|
||||
```python
|
||||
from dbgpt.client.client import Client
|
||||
from dbgpt.client.knowledge import create_space
|
||||
from dbgpt.client.schemas import SpaceModel
|
||||
|
||||
DBGPT_API_KEY = "dbgpt"
|
||||
|
||||
client = Client(api_key=DBGPT_API_KEY)
|
||||
res = await create_space(client,SpaceModel(
|
||||
name="test_space",
|
||||
vector_type="Chroma",
|
||||
desc="for client space",
|
||||
owner="dbgpt"))
|
||||
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
</Tabs>
|
||||
|
||||
#### Request body
|
||||
|
||||
________
|
||||
<b>name</b> <font color="gray"> string </font> <font color="red"> Required </font>
|
||||
|
||||
knowledge space name
|
||||
________
|
||||
<b>vector_type</b> <font color="gray"> string </font> <font color="red"> Required </font>
|
||||
|
||||
vector db type, `Chroma`, `Milvus`, default is `Chroma`
|
||||
________
|
||||
<b>desc</b> <font color="gray"> string </font> <font color="red"> Optional </font>
|
||||
|
||||
description of the knowledge space
|
||||
________
|
||||
<b>owner</b> <font color="gray"> integer </font> <font color="red"> Optional </font>
|
||||
|
||||
The owner of the knowledge space
|
||||
________
|
||||
<b>context</b> <font color="gray"> integer </font> <font color="red"> Optional </font>
|
||||
|
||||
The context of the knowledge space argument
|
||||
________
|
||||
|
||||
#### Response body
|
||||
Return <a href="#the-space-object">Space Object</a>
|
||||
|
||||
### Update Knowledge Space
|
||||
|
||||
```python
|
||||
PUT /api/v2/serve/knowledge/spaces
|
||||
```
|
||||
|
||||
<Tabs
|
||||
defaultValue="curl_update_knowledge"
|
||||
groupId="chat1"
|
||||
values={[
|
||||
{label: 'Curl', value: 'curl_update_knowledge'},
|
||||
{label: 'Python', value: 'python_update_knowledge'},
|
||||
]
|
||||
}>
|
||||
|
||||
<TabItem value="curl_update_knowledge">
|
||||
|
||||
```shell
|
||||
DBGPT_API_KEY="dbgpt"
|
||||
|
||||
curl --location --request PUT 'http://localhost:5000/api/v2/serve/knowledge/spaces' \
|
||||
--header 'Authorization: Bearer $DBGPT_API_KEY' \
|
||||
--header 'Content-Type: application/json' \
|
||||
--data-raw '{"desc": "for client space desc v2", "id": "49", "name": "test_space_2", "owner": "dbgpt", "vector_type": "Chroma"
|
||||
}'
|
||||
```
|
||||
</TabItem>
|
||||
|
||||
<TabItem value="python_update_knowledge">
|
||||
|
||||
|
||||
```python
|
||||
from dbgpt.client.client import Client
|
||||
from dbgpt.client.knowledge import update_space
|
||||
from dbgpt.client.schemas import SpaceModel
|
||||
|
||||
DBGPT_API_KEY = "dbgpt"
|
||||
|
||||
client = Client(api_key=DBGPT_API_KEY)
|
||||
res = await update_space(client, SpaceModel(
|
||||
name="test_space",
|
||||
vector_type="Chroma",
|
||||
desc="for client space update",
|
||||
owner="dbgpt"))
|
||||
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
</Tabs>
|
||||
|
||||
#### Request body
|
||||
|
||||
________
|
||||
<b>id</b> <font color="gray"> string </font> <font color="red"> Required </font>
|
||||
|
||||
knowledge space id
|
||||
________
|
||||
<b>name</b> <font color="gray"> string </font> <font color="red"> Required </font>
|
||||
|
||||
knowledge space name
|
||||
________
|
||||
<b>vector_type</b> <font color="gray"> string </font> <font color="red"> Optional </font>
|
||||
|
||||
vector db type, `Chroma`, `Milvus`, default is `Chroma`
|
||||
________
|
||||
<b>desc</b> <font color="gray"> string </font> <font color="red"> Optional </font>
|
||||
|
||||
description of the knowledge space
|
||||
________
|
||||
<b>owner</b> <font color="gray"> integer </font> <font color="red"> Optional </font>
|
||||
|
||||
The owner of the knowledge space
|
||||
________
|
||||
<b>context</b> <font color="gray"> integer </font> <font color="red"> Optional </font>
|
||||
|
||||
The context of the knowledge space argument
|
||||
________
|
||||
|
||||
#### Response body
|
||||
Return <a href="#the-space-object">Space Object</a>
|
||||
|
||||
### Delete Knowledge Space
|
||||
|
||||
```python
|
||||
DELETE /api/v2/serve/knowledge/spaces
|
||||
```
|
||||
|
||||
|
||||
<Tabs
|
||||
defaultValue="curl_update_knowledge"
|
||||
groupId="chat1"
|
||||
values={[
|
||||
{label: 'Curl', value: 'curl_update_knowledge'},
|
||||
{label: 'Python', value: 'python_update_knowledge'},
|
||||
]
|
||||
}>
|
||||
|
||||
<TabItem value="curl_update_knowledge">
|
||||
|
||||
```shell
|
||||
DBGPT_API_KEY="dbgpt"
|
||||
SPACE_ID="{YOUR_SPACE_ID}"
|
||||
|
||||
curl -X DELETE "http://localhost:5000/api/v2/serve/knowledge/spaces/$SPACE_ID" \
|
||||
-H "Authorization: Bearer $DBGPT_API_KEY" \
|
||||
-H "accept: application/json" \
|
||||
-H "Content-Type: application/json" \
|
||||
```
|
||||
</TabItem>
|
||||
|
||||
<TabItem value="python_update_knowledge">
|
||||
|
||||
|
||||
```python
|
||||
from dbgpt.client.client import Client
|
||||
from dbgpt.client.knowledge import delete_space
|
||||
|
||||
DBGPT_API_KEY = "dbgpt"
|
||||
space_id = "{your_space_id}"
|
||||
|
||||
client = Client(api_key=DBGPT_API_KEY)
|
||||
res = await delete_space(client=client, space_id=space_id)
|
||||
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
</Tabs>
|
||||
|
||||
#### Delete Parameters
|
||||
________
|
||||
<b>id</b> <font color="gray"> string </font> <font color="red"> Required </font>
|
||||
|
||||
knowledge space id
|
||||
________
|
||||
|
||||
#### Response body
|
||||
Return <a href="#the-space-object">Space Object</a>
|
||||
|
||||
### Get Knowledge Space
|
||||
|
||||
```python
|
||||
GET /api/v2/serve/knowledge/spaces/{space_id}
|
||||
```
|
||||
|
||||
<Tabs
|
||||
defaultValue="curl_get_knowledge"
|
||||
groupId="chat1"
|
||||
values={[
|
||||
{label: 'Curl', value: 'curl_get_knowledge'},
|
||||
{label: 'Python', value: 'python_get_knowledge'},
|
||||
]
|
||||
}>
|
||||
|
||||
<TabItem value="curl_get_knowledge">
|
||||
|
||||
```shell
|
||||
DBGPT_API_KEY="dbgpt"
|
||||
SPACE_ID="{YOUR_SPACE_ID}"
|
||||
|
||||
curl --location --request GET 'http://localhost:5000/api/v2/serve/knowledge/spaces/$SPACE_ID' \
|
||||
--header 'Authorization: Bearer $DBGPT_API_KEY'
|
||||
```
|
||||
</TabItem>
|
||||
|
||||
<TabItem value="python_get_knowledge">
|
||||
|
||||
|
||||
```python
|
||||
from dbgpt.client.client import Client
|
||||
from dbgpt.client.knowledge import get_space
|
||||
|
||||
DBGPT_API_KEY = "dbgpt"
|
||||
space_id = "{your_space_id}"
|
||||
|
||||
client = Client(api_key=DBGPT_API_KEY)
|
||||
res = await get_space(client=client, space_id=space_id)
|
||||
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
</Tabs>
|
||||
|
||||
#### Query Parameters
|
||||
________
|
||||
<b>id</b> <font color="gray"> string </font> <font color="red"> Required </font>
|
||||
|
||||
knowledge space id
|
||||
________
|
||||
|
||||
#### Response body
|
||||
Return <a href="#the-space-object">Space Object</a>
|
||||
|
||||
### List Knowledge Space
|
||||
|
||||
```python
|
||||
GET /api/v2/serve/knowledge/spaces
|
||||
```
|
||||
|
||||
<Tabs
|
||||
defaultValue="curl_list_knowledge"
|
||||
groupId="chat1"
|
||||
values={[
|
||||
{label: 'Curl', value: 'curl_list_knowledge'},
|
||||
{label: 'Python', value: 'python_list_knowledge'},
|
||||
]
|
||||
}>
|
||||
|
||||
<TabItem value="curl_list_knowledge">
|
||||
|
||||
```shell
|
||||
DBGPT_API_KEY="dbgpt"
|
||||
|
||||
curl --location --request GET 'http://localhost:5000/api/v2/serve/knowledge/spaces' \
|
||||
--header 'Authorization: Bearer dbgpt'
|
||||
```
|
||||
</TabItem>
|
||||
|
||||
<TabItem value="python_list_knowledge">
|
||||
|
||||
|
||||
```python
|
||||
from dbgpt.client.client import Client
|
||||
from dbgpt.client.knowledge import list_space
|
||||
|
||||
DBGPT_API_KEY = "dbgpt"
|
||||
space_id = "{your_space_id}"
|
||||
|
||||
client = Client(api_key=DBGPT_API_KEY)
|
||||
res = await list_space(client=client)
|
||||
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
</Tabs>
|
||||
|
||||
#### Response body
|
||||
Return <a href="#the-space-object">Space Object</a> List
|
||||
|
||||
### The Space Object
|
||||
|
||||
________
|
||||
<b>id</b> <font color="gray"> string </font>
|
||||
|
||||
space id
|
||||
________
|
||||
<b>name</b> <font color="gray"> string </font>
|
||||
|
||||
knowledge space name
|
||||
________
|
||||
<b>vector_type</b> <font color="gray"> string </font>
|
||||
|
||||
vector db type, `Chroma`, `Milvus`, default is `Chroma`
|
||||
________
|
||||
<b>desc</b> <font color="gray"> string </font> <font color="red"> Optional </font>
|
||||
|
||||
description of the knowledge space
|
||||
________
|
||||
<b>owner</b> <font color="gray"> integer </font> <font color="red"> Optional </font>
|
||||
|
||||
The owner of the knowledge space
|
||||
________
|
||||
<b>context</b> <font color="gray"> integer </font> <font color="red"> Optional </font>
|
||||
|
||||
The context of the knowledge space argument
|
||||
________
|
@@ -356,6 +356,39 @@ const sidebars = {
|
||||
},
|
||||
},
|
||||
|
||||
{
|
||||
type: "category",
|
||||
label: "API Reference",
|
||||
collapsed: false,
|
||||
collapsible: false,
|
||||
items: [
|
||||
{
|
||||
type: 'doc',
|
||||
id: 'api/introduction'
|
||||
},
|
||||
{
|
||||
type: 'doc',
|
||||
id: 'api/chat'
|
||||
},
|
||||
{
|
||||
type: 'doc',
|
||||
id: 'api/app'
|
||||
},
|
||||
{
|
||||
type: 'doc',
|
||||
id: 'api/flow'
|
||||
},
|
||||
{
|
||||
type: 'doc',
|
||||
id: 'api/knowledge'
|
||||
},
|
||||
],
|
||||
link: {
|
||||
type: 'generated-index',
|
||||
slug: "api",
|
||||
},
|
||||
},
|
||||
|
||||
{
|
||||
type: "category",
|
||||
label: "Modules",
|
||||
|
Reference in New Issue
Block a user