mirror of
https://github.com/csunny/DB-GPT.git
synced 2025-07-31 07:34:07 +00:00
refactor:adapt rag storage and add integration documents. (#2361)
This commit is contained in:
parent
94b51284e0
commit
22598ca79f
57
configs/dbgpt-graphrag.toml
Normal file
57
configs/dbgpt-graphrag.toml
Normal file
@ -0,0 +1,57 @@
|
||||
[system]
|
||||
# Load language from environment variable(It is set by the hook)
|
||||
language = "${env:DBGPT_LANG:-zh}"
|
||||
log_level = "INFO"
|
||||
api_keys = []
|
||||
encrypt_key = "your_secret_key"
|
||||
|
||||
# Server Configurations
|
||||
[service.web]
|
||||
host = "127.0.0.1"
|
||||
port = 5670
|
||||
|
||||
[service.web.database]
|
||||
type = "sqlite"
|
||||
path = "pilot/meta_data/dbgpt.db"
|
||||
|
||||
|
||||
[rag]
|
||||
chunk_size=1000
|
||||
chunk_overlap=0
|
||||
similarity_top_k=5
|
||||
similarity_score_threshold=0.0
|
||||
max_chunks_once_load=10
|
||||
max_threads=1
|
||||
rerank_top_k=3
|
||||
|
||||
[rag.storage]
|
||||
[rag.storage.vector]
|
||||
type = "Chroma"
|
||||
persist_path = "pilot/data"
|
||||
|
||||
[rag.storage.graph]
|
||||
type = "TuGraph"
|
||||
host="127.0.0.1"
|
||||
port=7687
|
||||
username="admin"
|
||||
password="73@TuGraph"
|
||||
#enable_summary="True"
|
||||
#enable_similarity_search="True"
|
||||
|
||||
|
||||
|
||||
# Model Configurations
|
||||
[models]
|
||||
[[models.llms]]
|
||||
name = "${env:LLM_MODEL_NAME:-gpt-4o}"
|
||||
provider = "${env:LLM_MODEL_PROVIDER:-proxy/openai}"
|
||||
api_base = "${env:OPENAI_API_BASE:-https://api.openai.com/v1}"
|
||||
api_key = "${env:OPENAI_API_KEY}"
|
||||
|
||||
[[models.embeddings]]
|
||||
name = "${env:EMBEDDING_MODEL_NAME:-text-embedding-3-small}"
|
||||
provider = "${env:EMBEDDING_MODEL_PROVIDER:-proxy/openai}"
|
||||
api_url = "${env:EMBEDDING_MODEL_API_URL:-https://api.openai.com/v1/embeddings}"
|
||||
api_key = "${env:OPENAI_API_KEY}"
|
||||
|
||||
|
@ -34,7 +34,7 @@ import TabItem from '@theme/TabItem';
|
||||
-H "Authorization: Bearer $DBGPT_API_KEY" \
|
||||
-H "accept: application/json" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d "{\"messages\":\"Hello\",\"model\":\"chatgpt_proxyllm\", \"chat_mode\": \"chat_app\", \"chat_param\": \"$APP_ID\"}"
|
||||
-d "{\"messages\":\"Hello\",\"model\":\"gpt-4o\", \"chat_mode\": \"chat_app\", \"chat_param\": \"$APP_ID\"}"
|
||||
|
||||
```
|
||||
</TabItem>
|
||||
@ -51,7 +51,7 @@ client = Client(api_key=DBGPT_API_KEY)
|
||||
|
||||
async for data in client.chat_stream(
|
||||
messages="Introduce AWEL",
|
||||
model="chatgpt_proxyllm",
|
||||
model="gpt-4o",
|
||||
chat_mode="chat_app",
|
||||
chat_param=APP_ID
|
||||
):
|
||||
@ -63,7 +63,7 @@ async for data in client.chat_stream(
|
||||
|
||||
### Chat Completion Stream Response
|
||||
```commandline
|
||||
data: {"id": "109bfc28-fe87-452c-8e1f-d4fe43283b7d", "created": 1710919480, "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": "```agent-plans\n[{\"name\": \"Introduce Awel\", \"num\": 2, \"status\": \"complete\", \"agent\": \"Human\", \"markdown\": \"```agent-messages\\n[{\\\"sender\\\": \\\"Summarizer\\\", \\\"receiver\\\": \\\"Human\\\", \\\"model\\\": \\\"chatgpt_proxyllm\\\", \\\"markdown\\\": \\\"Agentic Workflow Expression Language (AWEL) is a specialized language designed for developing large model applications with intelligent agent workflows. It offers flexibility and functionality, allowing developers to focus on business logic for LLMs applications without getting bogged down in model and environment details. AWEL uses a layered API design architecture, making it easier to work with. You can find examples and source code to get started with AWEL, and it supports various operators and environments. AWEL is a powerful tool for building native data applications through workflows and agents.\"}]\n```"}}]}
|
||||
data: {"id": "109bfc28-fe87-452c-8e1f-d4fe43283b7d", "created": 1710919480, "model": "gpt-4o", "choices": [{"index": 0, "delta": {"role": "assistant", "content": "```agent-plans\n[{\"name\": \"Introduce Awel\", \"num\": 2, \"status\": \"complete\", \"agent\": \"Human\", \"markdown\": \"```agent-messages\\n[{\\\"sender\\\": \\\"Summarizer\\\", \\\"receiver\\\": \\\"Human\\\", \\\"model\\\": \\\"gpt-4o\\\", \\\"markdown\\\": \\\"Agentic Workflow Expression Language (AWEL) is a specialized language designed for developing large model applications with intelligent agent workflows. It offers flexibility and functionality, allowing developers to focus on business logic for LLMs applications without getting bogged down in model and environment details. AWEL uses a layered API design architecture, making it easier to work with. You can find examples and source code to get started with AWEL, and it supports various operators and environments. AWEL is a powerful tool for building native data applications through workflows and agents.\"}]\n```"}}]}
|
||||
|
||||
data: [DONE]
|
||||
```
|
||||
|
@ -34,7 +34,7 @@ import TabItem from '@theme/TabItem';
|
||||
-H "Authorization: Bearer $DBGPT_API_KEY" \
|
||||
-H "accept: application/json" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d "{\"messages\":\"Hello\",\"model\":\"chatgpt_proxyllm\", \"stream\": true}"
|
||||
-d "{\"messages\":\"Hello\",\"model\":\"gpt-4o\", \"stream\": true}"
|
||||
|
||||
```
|
||||
</TabItem>
|
||||
@ -48,7 +48,7 @@ DBGPT_API_KEY = "dbgpt"
|
||||
client = Client(api_key=DBGPT_API_KEY)
|
||||
|
||||
async for data in client.chat_stream(
|
||||
model="chatgpt_proxyllm",
|
||||
model="gpt-4o",
|
||||
messages="hello",
|
||||
):
|
||||
print(data)
|
||||
@ -58,23 +58,23 @@ async for data in client.chat_stream(
|
||||
|
||||
### Chat Completion Stream Response
|
||||
```commandline
|
||||
data: {"id": "chatcmpl-ba6fb52e-e5b2-11ee-b031-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": "Hello"}}]}
|
||||
data: {"id": "chatcmpl-ba6fb52e-e5b2-11ee-b031-acde48001122", "model": "gpt-4o", "choices": [{"index": 0, "delta": {"role": "assistant", "content": "Hello"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-ba6fb52e-e5b2-11ee-b031-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": "!"}}]}
|
||||
data: {"id": "chatcmpl-ba6fb52e-e5b2-11ee-b031-acde48001122", "model": "gpt-4o", "choices": [{"index": 0, "delta": {"role": "assistant", "content": "!"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-ba6fb52e-e5b2-11ee-b031-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " How"}}]}
|
||||
data: {"id": "chatcmpl-ba6fb52e-e5b2-11ee-b031-acde48001122", "model": "gpt-4o", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " How"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-ba6fb52e-e5b2-11ee-b031-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " can"}}]}
|
||||
data: {"id": "chatcmpl-ba6fb52e-e5b2-11ee-b031-acde48001122", "model": "gpt-4o", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " can"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-ba6fb52e-e5b2-11ee-b031-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " I"}}]}
|
||||
data: {"id": "chatcmpl-ba6fb52e-e5b2-11ee-b031-acde48001122", "model": "gpt-4o", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " I"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-ba6fb52e-e5b2-11ee-b031-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " assist"}}]}
|
||||
data: {"id": "chatcmpl-ba6fb52e-e5b2-11ee-b031-acde48001122", "model": "gpt-4o", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " assist"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-ba6fb52e-e5b2-11ee-b031-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " you"}}]}
|
||||
data: {"id": "chatcmpl-ba6fb52e-e5b2-11ee-b031-acde48001122", "model": "gpt-4o", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " you"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-ba6fb52e-e5b2-11ee-b031-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " today"}}]}
|
||||
data: {"id": "chatcmpl-ba6fb52e-e5b2-11ee-b031-acde48001122", "model": "gpt-4o", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " today"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-ba6fb52e-e5b2-11ee-b031-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": "?"}}]}
|
||||
data: {"id": "chatcmpl-ba6fb52e-e5b2-11ee-b031-acde48001122", "model": "gpt-4o", "choices": [{"index": 0, "delta": {"role": "assistant", "content": "?"}}]}
|
||||
|
||||
data: [DONE]
|
||||
```
|
||||
@ -98,7 +98,7 @@ data: [DONE]
|
||||
-H "Authorization: Bearer $DBGPT_API_KEY" \
|
||||
-H "accept: application/json" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d "{\"messages\":\"Hello\",\"model\":\"chatgpt_proxyllm\", \"stream\": false}"
|
||||
-d "{\"messages\":\"Hello\",\"model\":\"gpt-4o\", \"stream\": false}"
|
||||
```
|
||||
</TabItem>
|
||||
|
||||
@ -109,7 +109,7 @@ from dbgpt_client import Client
|
||||
|
||||
DBGPT_API_KEY = "dbgpt"
|
||||
client = Client(api_key=DBGPT_API_KEY)
|
||||
response = await client.chat(model="chatgpt_proxyllm" ,messages="hello")
|
||||
response = await client.chat(model="gpt-4o" ,messages="hello")
|
||||
```
|
||||
</TabItem>
|
||||
</Tabs>
|
||||
@ -120,7 +120,7 @@ response = await client.chat(model="chatgpt_proxyllm" ,messages="hello")
|
||||
"id": "a8321543-52e9-47a5-a0b6-3d997463f6a3",
|
||||
"object": "chat.completion",
|
||||
"created": 1710826792,
|
||||
"model": "chatgpt_proxyllm",
|
||||
"model": "gpt-4o",
|
||||
"choices": [
|
||||
{
|
||||
"index": 0,
|
||||
|
@ -34,7 +34,7 @@ curl -X POST "http://localhost:5670/api/v2/chat/completions" \
|
||||
-H "Authorization: Bearer $DBGPT_API_KEY" \
|
||||
-H "accept: application/json" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d "{\"messages\":\"show space datas limit 5\",\"model\":\"chatgpt_proxyllm\", \"chat_mode\": \"chat_data\", \"chat_param\": \"$DB_NAME\"}"
|
||||
-d "{\"messages\":\"show space datas limit 5\",\"model\":\"gpt-4o\", \"chat_mode\": \"chat_data\", \"chat_param\": \"$DB_NAME\"}"
|
||||
|
||||
```
|
||||
</TabItem>
|
||||
@ -50,7 +50,7 @@ DB_NAME="{your_db_name}"
|
||||
client = Client(api_key=DBGPT_API_KEY)
|
||||
res = client.chat(
|
||||
messages="show space datas limit 5",
|
||||
model="chatgpt_proxyllm",
|
||||
model="gpt-4o",
|
||||
chat_mode="chat_data",
|
||||
chat_param=DB_NAME
|
||||
)
|
||||
@ -64,7 +64,7 @@ res = client.chat(
|
||||
"id": "2bb80fdd-e47e-4083-8bc9-7ca66ee0931b",
|
||||
"object": "chat.completion",
|
||||
"created": 1711509733,
|
||||
"model": "chatgpt_proxyllm",
|
||||
"model": "gpt-4o",
|
||||
"choices": [
|
||||
{
|
||||
"index": 0,
|
||||
|
@ -34,7 +34,7 @@ curl -X POST "http://localhost:5670/api/v2/chat/completions" \
|
||||
-H "Authorization: Bearer $DBGPT_API_KEY" \
|
||||
-H "accept: application/json" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d "{\"messages\":\"Hello\",\"model\":\"chatgpt_proxyllm\", \"chat_mode\": \"chat_knowledge\", \"chat_param\": \"$SPACE_NAME\"}"
|
||||
-d "{\"messages\":\"Hello\",\"model\":\"gpt-4o\", \"chat_mode\": \"chat_knowledge\", \"chat_param\": \"$SPACE_NAME\"}"
|
||||
```
|
||||
</TabItem>
|
||||
|
||||
@ -50,7 +50,7 @@ client = Client(api_key=DBGPT_API_KEY)
|
||||
|
||||
async for data in client.chat_stream(
|
||||
messages="Introduce AWEL",
|
||||
model="chatgpt_proxyllm",
|
||||
model="gpt-4o",
|
||||
chat_mode="chat_knowledge",
|
||||
chat_param=SPACE_NAME
|
||||
):
|
||||
@ -65,7 +65,7 @@ async for data in client.chat_stream(
|
||||
"id": "acb050ab-eb2c-4754-97e4-6f3b94b7dac2",
|
||||
"object": "chat.completion",
|
||||
"created": 1710917272,
|
||||
"model": "chatgpt_proxyllm",
|
||||
"model": "gpt-4o",
|
||||
"choices": [
|
||||
{
|
||||
"index": 0,
|
||||
@ -86,229 +86,229 @@ async for data in client.chat_stream(
|
||||
|
||||
#### Chat Completion Stream Response
|
||||
```commandline
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": "AW"}}]}
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "gpt-4o", "choices": [{"index": 0, "delta": {"role": "assistant", "content": "AW"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": "EL"}}]}
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "gpt-4o", "choices": [{"index": 0, "delta": {"role": "assistant", "content": "EL"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": ","}}]}
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "gpt-4o", "choices": [{"index": 0, "delta": {"role": "assistant", "content": ","}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " which"}}]}
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "gpt-4o", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " which"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " stands"}}]}
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "gpt-4o", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " stands"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " for"}}]}
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "gpt-4o", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " for"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " Ag"}}]}
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "gpt-4o", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " Ag"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": "entic"}}]}
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "gpt-4o", "choices": [{"index": 0, "delta": {"role": "assistant", "content": "entic"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " Workflow"}}]}
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "gpt-4o", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " Workflow"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " Expression"}}]}
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "gpt-4o", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " Expression"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " Language"}}]}
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "gpt-4o", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " Language"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": ","}}]}
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "gpt-4o", "choices": [{"index": 0, "delta": {"role": "assistant", "content": ","}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " is"}}]}
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "gpt-4o", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " is"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " a"}}]}
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "gpt-4o", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " a"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " powerful"}}]}
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "gpt-4o", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " powerful"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " tool"}}]}
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "gpt-4o", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " tool"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " designed"}}]}
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "gpt-4o", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " designed"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " for"}}]}
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "gpt-4o", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " for"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " developing"}}]}
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "gpt-4o", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " developing"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " large"}}]}
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "gpt-4o", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " large"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " model"}}]}
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "gpt-4o", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " model"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " applications"}}]}
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "gpt-4o", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " applications"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": "."}}]}
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "gpt-4o", "choices": [{"index": 0, "delta": {"role": "assistant", "content": "."}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " It"}}]}
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "gpt-4o", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " It"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " simpl"}}]}
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "gpt-4o", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " simpl"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": "ifies"}}]}
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "gpt-4o", "choices": [{"index": 0, "delta": {"role": "assistant", "content": "ifies"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " the"}}]}
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "gpt-4o", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " the"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " process"}}]}
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "gpt-4o", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " process"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " by"}}]}
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "gpt-4o", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " by"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " allowing"}}]}
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "gpt-4o", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " allowing"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " developers"}}]}
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "gpt-4o", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " developers"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " to"}}]}
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "gpt-4o", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " to"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " focus"}}]}
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "gpt-4o", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " focus"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " on"}}]}
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "gpt-4o", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " on"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " business"}}]}
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "gpt-4o", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " business"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " logic"}}]}
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "gpt-4o", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " logic"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " without"}}]}
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "gpt-4o", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " without"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " getting"}}]}
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "gpt-4o", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " getting"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " bog"}}]}
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "gpt-4o", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " bog"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": "ged"}}]}
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "gpt-4o", "choices": [{"index": 0, "delta": {"role": "assistant", "content": "ged"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " down"}}]}
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "gpt-4o", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " down"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " in"}}]}
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "gpt-4o", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " in"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " complex"}}]}
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "gpt-4o", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " complex"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " model"}}]}
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "gpt-4o", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " model"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " and"}}]}
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "gpt-4o", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " and"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " environment"}}]}
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "gpt-4o", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " environment"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " details"}}]}
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "gpt-4o", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " details"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": "."}}]}
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "gpt-4o", "choices": [{"index": 0, "delta": {"role": "assistant", "content": "."}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " AW"}}]}
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "gpt-4o", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " AW"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": "EL"}}]}
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "gpt-4o", "choices": [{"index": 0, "delta": {"role": "assistant", "content": "EL"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " offers"}}]}
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "gpt-4o", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " offers"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " great"}}]}
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "gpt-4o", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " great"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " functionality"}}]}
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "gpt-4o", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " functionality"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " and"}}]}
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "gpt-4o", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " and"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " flexibility"}}]}
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "gpt-4o", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " flexibility"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " through"}}]}
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "gpt-4o", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " through"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " its"}}]}
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "gpt-4o", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " its"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " layered"}}]}
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "gpt-4o", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " layered"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " API"}}]}
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "gpt-4o", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " API"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " design"}}]}
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "gpt-4o", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " design"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " architecture"}}]}
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "gpt-4o", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " architecture"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": "."}}]}
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "gpt-4o", "choices": [{"index": 0, "delta": {"role": "assistant", "content": "."}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " It"}}]}
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "gpt-4o", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " It"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " provides"}}]}
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "gpt-4o", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " provides"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " a"}}]}
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "gpt-4o", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " a"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " set"}}]}
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "gpt-4o", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " set"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " of"}}]}
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "gpt-4o", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " of"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " intelligent"}}]}
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "gpt-4o", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " intelligent"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " agent"}}]}
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "gpt-4o", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " agent"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " workflow"}}]}
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "gpt-4o", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " workflow"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " expression"}}]}
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "gpt-4o", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " expression"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " language"}}]}
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "gpt-4o", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " language"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " that"}}]}
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "gpt-4o", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " that"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " enhances"}}]}
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "gpt-4o", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " enhances"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " efficiency"}}]}
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "gpt-4o", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " efficiency"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " in"}}]}
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "gpt-4o", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " in"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " application"}}]}
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "gpt-4o", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " application"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " development"}}]}
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "gpt-4o", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " development"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": "."}}]}
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "gpt-4o", "choices": [{"index": 0, "delta": {"role": "assistant", "content": "."}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " If"}}]}
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "gpt-4o", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " If"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " you"}}]}
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "gpt-4o", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " you"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " want"}}]}
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "gpt-4o", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " want"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " to"}}]}
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "gpt-4o", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " to"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " learn"}}]}
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "gpt-4o", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " learn"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " more"}}]}
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "gpt-4o", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " more"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " about"}}]}
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "gpt-4o", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " about"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " AW"}}]}
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "gpt-4o", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " AW"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": "EL"}}]}
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "gpt-4o", "choices": [{"index": 0, "delta": {"role": "assistant", "content": "EL"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": ","}}]}
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "gpt-4o", "choices": [{"index": 0, "delta": {"role": "assistant", "content": ","}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " you"}}]}
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "gpt-4o", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " you"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " can"}}]}
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "gpt-4o", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " can"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " check"}}]}
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "gpt-4o", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " check"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " out"}}]}
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "gpt-4o", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " out"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " the"}}]}
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "gpt-4o", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " the"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " built"}}]}
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "gpt-4o", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " built"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": "-in"}}]}
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "gpt-4o", "choices": [{"index": 0, "delta": {"role": "assistant", "content": "-in"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " examples"}}]}
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "gpt-4o", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " examples"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " and"}}]}
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "gpt-4o", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " and"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " resources"}}]}
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "gpt-4o", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " resources"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " available"}}]}
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "gpt-4o", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " available"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " on"}}]}
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "gpt-4o", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " on"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " platforms"}}]}
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "gpt-4o", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " platforms"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " like"}}]}
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "gpt-4o", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " like"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " Github"}}]}
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "gpt-4o", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " Github"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": ","}}]}
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "gpt-4o", "choices": [{"index": 0, "delta": {"role": "assistant", "content": ","}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " Docker"}}]}
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "gpt-4o", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " Docker"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": "hub"}}]}
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "gpt-4o", "choices": [{"index": 0, "delta": {"role": "assistant", "content": "hub"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": ","}}]}
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "gpt-4o", "choices": [{"index": 0, "delta": {"role": "assistant", "content": ","}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " and"}}]}
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "gpt-4o", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " and"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " more"}}]}
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "gpt-4o", "choices": [{"index": 0, "delta": {"role": "assistant", "content": " more"}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": "."}}]}
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "gpt-4o", "choices": [{"index": 0, "delta": {"role": "assistant", "content": "."}}]}
|
||||
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "chatgpt_proxyllm", "choices": [{"index": 0, "delta": {"role": "assistant", "content": "\n\n<references title=\"References\" references=\"[{"name": "AWEL_URL", "chunks": [{"id": 2526, "content": "Agentic Workflow Expression Language(AWEL) is a set of intelligent agent workflow expression language specially designed for large model applicationdevelopment. It provides great functionality and flexibility. Through the AWEL API, you can focus on the development of business logic for LLMs applicationswithout paying attention to cumbersome model and environment details.AWEL adopts a layered API design. AWEL's layered API design architecture is shown in the figure below.AWEL Design", "meta_info": "{'source': 'https://docs.dbgpt.site/docs/awel/', 'title': 'AWEL(Agentic Workflow Expression Language) | DB-GPT', 'description': 'Agentic Workflow Expression Language(AWEL) is a set of intelligent agent workflow expression language specially designed for large model application', 'language': 'en-US'}", "recall_score": 0.6579902643967029}, {"id": 2531, "content": "ExamplesThe preliminary version of AWEL has alse been released, and we have provided some built-in usage examples.OperatorsExample of API-RAGYou can find source code from examples/awel/simple_rag_example.py", "meta_info": "{'source': 'https://docs.dbgpt.site/docs/awel/', 'title': 'AWEL(Agentic Workflow Expression Language) | DB-GPT', 'description': 'Agentic Workflow Expression Language(AWEL) is a set of intelligent agent workflow expression language specially designed for large model application', 'language': 'en-US'}", "recall_score": 0.5997033286385491}, {"id": 2538, "content": "Stand-alone environmentRay environmentPreviousWhy use AWEL?NextReleased V0.5.0 | Develop native data applications through workflows and agentsAWEL DesignExamplesOperatorsExample of API-RAGAgentFream ExampleDSL ExampleCurrently supported operatorsExecutable environmentCommunityDiscordDockerhubGithubGithubHuggingFaceMoreHacker NewsTwitterCopyright © 2024 DB-GPT", "meta_info": "{'source': 'https://docs.dbgpt.site/docs/awel/', 'title': 'AWEL(Agentic Workflow Expression Language) | DB-GPT', 'description': 'Agentic Workflow Expression Language(AWEL) is a set of intelligent agent workflow expression language specially designed for large model application', 'language': 'en-US'}", "recall_score": 0.5980204530753225}]}]\" />"}}]}
|
||||
data: {"id": "chatcmpl-86f60a0c-e686-11ee-9322-acde48001122", "model": "gpt-4o", "choices": [{"index": 0, "delta": {"role": "assistant", "content": "\n\n<references title=\"References\" references=\"[{"name": "AWEL_URL", "chunks": [{"id": 2526, "content": "Agentic Workflow Expression Language(AWEL) is a set of intelligent agent workflow expression language specially designed for large model applicationdevelopment. It provides great functionality and flexibility. Through the AWEL API, you can focus on the development of business logic for LLMs applicationswithout paying attention to cumbersome model and environment details.AWEL adopts a layered API design. AWEL's layered API design architecture is shown in the figure below.AWEL Design", "meta_info": "{'source': 'https://docs.dbgpt.site/docs/awel/', 'title': 'AWEL(Agentic Workflow Expression Language) | DB-GPT', 'description': 'Agentic Workflow Expression Language(AWEL) is a set of intelligent agent workflow expression language specially designed for large model application', 'language': 'en-US'}", "recall_score": 0.6579902643967029}, {"id": 2531, "content": "ExamplesThe preliminary version of AWEL has alse been released, and we have provided some built-in usage examples.OperatorsExample of API-RAGYou can find source code from examples/awel/simple_rag_example.py", "meta_info": "{'source': 'https://docs.dbgpt.site/docs/awel/', 'title': 'AWEL(Agentic Workflow Expression Language) | DB-GPT', 'description': 'Agentic Workflow Expression Language(AWEL) is a set of intelligent agent workflow expression language specially designed for large model application', 'language': 'en-US'}", "recall_score": 0.5997033286385491}, {"id": 2538, "content": "Stand-alone environmentRay environmentPreviousWhy use AWEL?NextReleased V0.5.0 | Develop native data applications through workflows and agentsAWEL DesignExamplesOperatorsExample of API-RAGAgentFream ExampleDSL ExampleCurrently supported operatorsExecutable environmentCommunityDiscordDockerhubGithubGithubHuggingFaceMoreHacker NewsTwitterCopyright © 2024 DB-GPT", "meta_info": "{'source': 'https://docs.dbgpt.site/docs/awel/', 'title': 'AWEL(Agentic Workflow Expression Language) | DB-GPT', 'description': 'Agentic Workflow Expression Language(AWEL) is a set of intelligent agent workflow expression language specially designed for large model application', 'language': 'en-US'}", "recall_score": 0.5980204530753225}]}]\" />"}}]}
|
||||
|
||||
data: [DONE]
|
||||
```
|
||||
|
@ -10,7 +10,12 @@ You can refer to the python example file `DB-GPT/examples/rag/graph_rag_example.
|
||||
First, you need to install the `dbgpt` library.
|
||||
|
||||
```bash
|
||||
pip install "dbgpt[graph_rag]>=0.6.1"
|
||||
uv sync --all-packages --frozen \
|
||||
--extra "proxy_openai" \
|
||||
--extra "rag" \
|
||||
--extra "storage_chromadb" \
|
||||
--extra "dbgpts"
|
||||
--extra "graph_rag"
|
||||
````
|
||||
|
||||
### Prepare Graph Database
|
||||
@ -52,11 +57,7 @@ import TabItem from '@theme/TabItem';
|
||||
]}>
|
||||
<TabItem value="openai">
|
||||
|
||||
First, you should install the `openai` library.
|
||||
|
||||
```bash
|
||||
pip install openai
|
||||
```
|
||||
Then set your API key in the environment `OPENAI_API_KEY`.
|
||||
|
||||
```python
|
||||
@ -70,19 +71,9 @@ llm_client = OpenAILLMClient()
|
||||
|
||||
You should have a YI account and get the API key from the YI official website.
|
||||
|
||||
First, you should install the `openai` library.
|
||||
|
||||
```bash
|
||||
pip install openai
|
||||
```
|
||||
|
||||
Then set your API key in the environment variable `YI_API_KEY`.
|
||||
|
||||
```python
|
||||
from dbgpt.model.proxy import YiLLMClient
|
||||
|
||||
llm_client = YiLLMClient()
|
||||
```
|
||||
</TabItem>
|
||||
|
||||
<TabItem value="model_service">
|
||||
|
108
docs/docs/installation/graph_rag_install.md
Normal file
108
docs/docs/installation/graph_rag_install.md
Normal file
@ -0,0 +1,108 @@
|
||||
# Graph RAG Installation
|
||||
|
||||
|
||||
In this example, we will show how to use the Graph RAG framework in DB-GPT. Using a graph database to implement RAG can, to some extent, alleviate the uncertainty and interpretability issues brought about by vector database retrieval.
|
||||
|
||||
You can refer to the python example file `DB-GPT/examples/rag/graph_rag_example.py` in the source code. This example demonstrates how to load knowledge from a document and store it in a graph store. Subsequently, it recalls knowledge relevant to your question by searching for triplets in the graph store.
|
||||
|
||||
|
||||
### Install Dependencies
|
||||
|
||||
First, you need to install the `dbgpt graph_rag` library.
|
||||
|
||||
```bash
|
||||
uv sync --all-packages --frozen \
|
||||
--extra "proxy_openai" \
|
||||
--extra "rag" \
|
||||
--extra "storage_chromadb" \
|
||||
--extra "dbgpts"
|
||||
--extra "graph_rag"
|
||||
````
|
||||
|
||||
### Prepare Graph Database
|
||||
|
||||
To store the knowledge in graph, we need an graph database, [TuGraph](https://github.com/TuGraph-family/tugraph-db) is the first graph database supported by DB-GPT.
|
||||
|
||||
Visit github repository of TuGraph to view [Quick Start](https://tugraph-db.readthedocs.io/zh-cn/latest/3.quick-start/1.preparation.html#id5) document, follow the instructions to pull the TuGraph database docker image (latest / version >= 4.5.1) and launch it.
|
||||
|
||||
```
|
||||
docker pull tugraph/tugraph-runtime-centos7:4.5.1
|
||||
docker run -d -p 7070:7070 -p 7687:7687 -p 9090:9090 --name tugraph_demo tugraph/tugraph-runtime-centos7:latest lgraph_server -d run --enable_plugin true
|
||||
```
|
||||
|
||||
The default port for the bolt protocol is `7687`.
|
||||
|
||||
> **Download Tips:**
|
||||
>
|
||||
> There is also a corresponding version of the TuGraph Docker image package on OSS. You can also directly download and import it.
|
||||
>
|
||||
> ```
|
||||
> wget 'https://tugraph-web.oss-cn-beijing.aliyuncs.com/tugraph/tugraph-4.5.1/tugraph-runtime-centos7-4.5.1.tar' -O tugraph-runtime-centos7-4.5.1.tar
|
||||
> docker load -i tugraph-runtime-centos7-4.5.1.tar
|
||||
> ```
|
||||
|
||||
|
||||
|
||||
### TuGraph Configuration
|
||||
|
||||
Set variables below in `configs/dbgpt-graphrag.toml` file, let DB-GPT know how to connect to TuGraph.
|
||||
|
||||
```
|
||||
[rag.storage.graph]
|
||||
type = "TuGraph"
|
||||
host="127.0.0.1"
|
||||
port=7687
|
||||
username="admin"
|
||||
password="73@TuGraph"
|
||||
enable_summary="True"
|
||||
enable_similarity_search="True"
|
||||
```
|
||||
|
||||
Then run the following command to start the webserver:
|
||||
```bash
|
||||
uv run python packages/dbgpt-app/src/dbgpt_app/dbgpt_server.py --config configs/dbgpt-graphrag.toml
|
||||
```
|
||||
|
||||
Optionally, you can also use the following command to start the webserver:
|
||||
uv run python packages/dbgpt-app/src/dbgpt_app/dbgpt_server.py --config configs/dbgpt-proxy-openai.toml
|
||||
|
||||
|
||||
|
||||
|
||||
### Load into Knowledge Graph
|
||||
|
||||
When using a graph database as the underlying knowledge storage platform, it is necessary to build a knowledge graph to facilitate the archiving and retrieval of documents. DB-GPT leverages the capabilities of large language models to implement an integrated knowledge graph, while still maintaining the flexibility to freely connect to other knowledge graph systems and graph database systems.
|
||||
|
||||
We created a knowledge graph with graph community summaries based on `CommunitySummaryKnowledgeGraph`.
|
||||
|
||||
|
||||
|
||||
### Chat Knowledge via GraphRAG
|
||||
|
||||
> Note: The current test data is in Chinese.
|
||||
|
||||
Here we demonstrate how to achieve chat knowledge through Graph RAG on web page.
|
||||
|
||||
First, create a knowledge base using the `Knowledge Graph` type.
|
||||
|
||||
|
||||
<p align="left">
|
||||
<img src={'/img/chat_knowledge/graph_rag/create_knowledge_graph.png'} width="1000px"/>
|
||||
</p>
|
||||
|
||||
Then, upload the documents ([graphrag-test.md](https://github.com/eosphoros-ai/DB-GPT/blob/main/examples/test_files/graphrag-test.md)) and process them automatically (markdown header by default).
|
||||
|
||||
<p align="left">
|
||||
<img src={'/img/chat_knowledge/graph_rag/upload_file.png'} width="1000px"/>
|
||||
</p>
|
||||
|
||||
After indexing, the graph data may look like this.
|
||||
<p align="left">
|
||||
<img src={'/img/chat_knowledge/graph_rag/graph_data.png'} width="1000px"/>
|
||||
</p>
|
||||
|
||||
Start to chat on knowledge graph.
|
||||
<p align="left">
|
||||
<img src={'/img/chat_knowledge/graph_rag/graph_rag_chat.png'} width="1000px"/>
|
||||
</p>
|
||||
|
40
docs/docs/installation/integrations.md
Normal file
40
docs/docs/installation/integrations.md
Normal file
@ -0,0 +1,40 @@
|
||||
# DB-GPT Integrations
|
||||
|
||||
DB-GPT integrates with many datasources and rag storage providers.
|
||||
|
||||
Integration Packages
|
||||
|
||||
|
||||
|
||||
# Datasource Providers
|
||||
|
||||
| Provider | Supported | Install Packages |
|
||||
|-------------|-----------|----------------------|
|
||||
| MySQL | ✅ | --extra datasource_mysql |
|
||||
| OceanBase | ✅ | |
|
||||
| ClickHouse | ✅ | --extra datasource_clickhouse |
|
||||
| Hive | ✅ | --extra datasource_hive |
|
||||
| MSSQL | ✅ | --extra datasource_mssql |
|
||||
| PostgreSQL | ✅ | --extra datasource_postgres |
|
||||
| ApacheDoris | ✅ | |
|
||||
| StarRocks | ✅ | --extra datasource_starroks |
|
||||
| Spark | ✅ | --extra datasource_spark |
|
||||
| Oracle | ❌ | |
|
||||
|
||||
|
||||
# RAG Storage Providers
|
||||
|
||||
| Provider | Supported | Install Packages |
|
||||
|-------------|-----------|--------------------------------|
|
||||
| Chroma | ✅ | --extra storage_chroma |
|
||||
| Milvus | ✅ | --extra storage_milvus |
|
||||
| Elasticsearch | ✅ | --extra storage_elasticsearch |
|
||||
| OceanBase | ✅ | --extra storage_obvector |
|
||||
|
||||
|
||||
# Graph RAG Storage Providers
|
||||
|
||||
| Provider | Supported | Install Packages |
|
||||
|----------|-----------|------------------|
|
||||
| TuGraph | ✅ | --extra graph_rag|
|
||||
| Neo4j | ❌ | |
|
46
docs/docs/installation/milvus_rag_install.md
Normal file
46
docs/docs/installation/milvus_rag_install.md
Normal file
@ -0,0 +1,46 @@
|
||||
# Milvus RAG Installation
|
||||
|
||||
|
||||
In this example, we will show how to use the Milvus as in DB-GPT RAG Storage. Using a graph database to implement RAG can, to some extent, alleviate the uncertainty and interpretability issues brought about by vector database retrieval.
|
||||
|
||||
|
||||
### Install Dependencies
|
||||
|
||||
First, you need to install the `dbgpt milvus storage` library.
|
||||
|
||||
```bash
|
||||
uv sync --all-packages --frozen \
|
||||
--extra "proxy_openai" \
|
||||
--extra "rag" \
|
||||
--extra "storage_milvus" \
|
||||
--extra "dbgpts"
|
||||
````
|
||||
|
||||
### Prepare Milvus
|
||||
|
||||
Prepare Milvus database service, reference-[Milvus Installation](https://milvus.io/docs/install_standalone-docker-compose.md) .
|
||||
|
||||
|
||||
### TuGraph Configuration
|
||||
|
||||
Set rag storage variables below in `configs/dbgpt-proxy-openai.toml` file, let DB-GPT know how to connect to Milvus.
|
||||
|
||||
```
|
||||
[rag.storage]
|
||||
[rag.storage.vector]
|
||||
type = "Milvus"
|
||||
uri = "127.0.0.1"
|
||||
port = "19530"
|
||||
#username="dbgpt"
|
||||
#password=19530
|
||||
```
|
||||
|
||||
Then run the following command to start the webserver:
|
||||
```bash
|
||||
uv run python packages/dbgpt-app/src/dbgpt_app/dbgpt_server.py --config configs/dbgpt-proxy-openai.toml
|
||||
```
|
||||
|
||||
Optionally, you can also use the following command to start the webserver:
|
||||
```bash
|
||||
uv run python packages/dbgpt-app/src/dbgpt_app/dbgpt_server.py --config configs/dbgpt-proxy-openai.toml
|
||||
```
|
@ -230,6 +230,28 @@ const sidebars = {
|
||||
id: 'installation/sourcecode',
|
||||
},
|
||||
{
|
||||
// type: 'doc',
|
||||
// id: 'installation/integrations',
|
||||
type: "category",
|
||||
label: "Integrations",
|
||||
collapsed: false,
|
||||
collapsible: false,
|
||||
items: [
|
||||
{
|
||||
type: "doc",
|
||||
id: "installation/integrations"
|
||||
},
|
||||
{
|
||||
type: "doc",
|
||||
id: "installation/graph_rag_install"
|
||||
},
|
||||
{
|
||||
type: "doc",
|
||||
id: "installation/milvus_rag_install"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
type: 'doc',
|
||||
id: 'installation/docker',
|
||||
},
|
||||
|
@ -57,7 +57,7 @@ async def main():
|
||||
# initialize client
|
||||
DBGPT_API_KEY = "dbgpt"
|
||||
client = Client(api_key=DBGPT_API_KEY)
|
||||
data = await client.chat(model="chatgpt_proxyllm", messages="hello")
|
||||
data = await client.chat(model="Qwen2.5-72B-Instruct", messages="hello")
|
||||
# async for data in client.chat_stream(
|
||||
# model="chatgpt_proxyllm",
|
||||
# messages="hello",
|
||||
|
@ -7,12 +7,16 @@ from dbgpt.model.parameter import (
|
||||
ModelServiceConfig,
|
||||
)
|
||||
from dbgpt.storage.cache.manager import ModelCacheParameters
|
||||
from dbgpt.storage.vector_store.base import VectorStoreConfig
|
||||
from dbgpt.util.configure import HookConfig
|
||||
from dbgpt.util.i18n_utils import _
|
||||
from dbgpt.util.parameter_utils import BaseParameters
|
||||
from dbgpt.util.tracer import TracerParameters
|
||||
from dbgpt.util.utils import LoggingParameters
|
||||
from dbgpt_ext.datasource.rdbms.conn_sqlite import SQLiteConnectorParameters
|
||||
from dbgpt_ext.storage.knowledge_graph.knowledge_graph import (
|
||||
BuiltinKnowledgeGraphConfig,
|
||||
)
|
||||
from dbgpt_serve.core import BaseServeConfig
|
||||
|
||||
|
||||
@ -68,14 +72,20 @@ class StorageGraphConfig(BaseParameters):
|
||||
|
||||
@dataclass
|
||||
class StorageConfig(BaseParameters):
|
||||
vector: StorageVectorConfig = field(
|
||||
default_factory=StorageVectorConfig,
|
||||
vector: VectorStoreConfig = field(
|
||||
default_factory=VectorStoreConfig,
|
||||
metadata={
|
||||
"help": _("default vector type"),
|
||||
},
|
||||
)
|
||||
graph: StorageGraphConfig = field(
|
||||
default_factory=StorageGraphConfig,
|
||||
graph: BuiltinKnowledgeGraphConfig = field(
|
||||
default_factory=BuiltinKnowledgeGraphConfig,
|
||||
metadata={
|
||||
"help": _("default graph type"),
|
||||
},
|
||||
)
|
||||
full_text: BuiltinKnowledgeGraphConfig = field(
|
||||
default_factory=BuiltinKnowledgeGraphConfig,
|
||||
metadata={
|
||||
"help": _("default graph type"),
|
||||
},
|
||||
|
@ -77,6 +77,11 @@ class KnowledgeService:
|
||||
).create()
|
||||
return DefaultLLMClient(worker_manager, True)
|
||||
|
||||
@property
|
||||
def rag_config(self):
|
||||
rag_config = CFG.SYSTEM_APP.config.configs.get("app_config").rag
|
||||
return rag_config
|
||||
|
||||
def create_knowledge_space(self, request: KnowledgeSpaceRequest):
|
||||
"""create knowledge space
|
||||
Args:
|
||||
@ -86,7 +91,7 @@ class KnowledgeService:
|
||||
name=request.name,
|
||||
)
|
||||
if request.vector_type == "VectorStore":
|
||||
request.vector_type = CFG.VECTOR_STORE_TYPE
|
||||
request.vector_type = self.rag_config.storage.vector.get("type")
|
||||
if request.vector_type == "KnowledgeGraph":
|
||||
knowledge_space_name_pattern = r"^[a-zA-Z0-9\u4e00-\u9fa5]+$"
|
||||
if not re.match(knowledge_space_name_pattern, request.name):
|
||||
|
@ -9,7 +9,7 @@ from fastapi.security import HTTPAuthorizationCredentials, HTTPBearer
|
||||
from starlette.responses import JSONResponse, StreamingResponse
|
||||
|
||||
from dbgpt._private.pydantic import model_to_dict, model_to_json
|
||||
from dbgpt.component import logger
|
||||
from dbgpt.component import SystemApp, logger
|
||||
from dbgpt.core.awel import CommonLLMHttpRequestBody
|
||||
from dbgpt.core.schema.api import (
|
||||
ChatCompletionResponse,
|
||||
@ -72,6 +72,7 @@ async def check_api_key(
|
||||
@router.post("/v2/chat/completions", dependencies=[Depends(check_api_key)])
|
||||
async def chat_completions(
|
||||
request: ChatCompletionRequestBody = Body(),
|
||||
service=Depends(get_service),
|
||||
):
|
||||
"""Chat V2 completions
|
||||
Args:
|
||||
@ -133,7 +134,7 @@ async def chat_completions(
|
||||
span_type=SpanType.CHAT,
|
||||
metadata=model_to_dict(request),
|
||||
):
|
||||
chat: BaseChat = await get_chat_instance(request)
|
||||
chat: BaseChat = await get_chat_instance(request, service.system_app)
|
||||
|
||||
if not request.stream:
|
||||
return await no_stream_wrapper(request, chat)
|
||||
@ -158,11 +159,14 @@ async def chat_completions(
|
||||
)
|
||||
|
||||
|
||||
async def get_chat_instance(dialogue: ChatCompletionRequestBody = Body()) -> BaseChat:
|
||||
async def get_chat_instance(
|
||||
dialogue: ChatCompletionRequestBody = Body(), system_app: SystemApp = None
|
||||
) -> BaseChat:
|
||||
"""
|
||||
Get chat instance
|
||||
Args:
|
||||
dialogue (OpenAPIChatCompletionRequest): The chat request.
|
||||
system_app (SystemApp): system app.
|
||||
"""
|
||||
logger.info(f"get_chat_instance:{dialogue}")
|
||||
if not dialogue.chat_mode:
|
||||
@ -191,6 +195,7 @@ async def get_chat_instance(dialogue: ChatCompletionRequestBody = Body()) -> Bas
|
||||
get_executor(),
|
||||
CHAT_FACTORY.get_implementation,
|
||||
dialogue.chat_mode,
|
||||
system_app,
|
||||
**{"chat_param": chat_param},
|
||||
)
|
||||
return chat
|
||||
|
@ -42,11 +42,20 @@ class ChatKnowledge(BaseChat):
|
||||
self.knowledge_space = chat_param["select_param"]
|
||||
chat_param["chat_mode"] = ChatScene.ChatKnowledge
|
||||
super().__init__(chat_param=chat_param, system_app=system_app)
|
||||
from dbgpt_serve.rag.models.models import (
|
||||
KnowledgeSpaceDao,
|
||||
)
|
||||
|
||||
space_dao = KnowledgeSpaceDao()
|
||||
space = space_dao.get_one({"name": self.knowledge_space})
|
||||
if not space:
|
||||
space = space_dao.get_one({"id": self.knowledge_space})
|
||||
if not space:
|
||||
raise Exception(f"have not found knowledge space:{self.knowledge_space}")
|
||||
self.rag_config = self.app_config.rag
|
||||
self.space_context = self.get_space_context(self.knowledge_space)
|
||||
self.space_context = self.get_space_context(space.name)
|
||||
self.top_k = (
|
||||
self.get_knowledge_search_top_size(self.knowledge_space)
|
||||
self.get_knowledge_search_top_size(space.name)
|
||||
if self.space_context is None
|
||||
else int(self.space_context["embedding"]["topk"])
|
||||
)
|
||||
@ -55,17 +64,6 @@ class ChatKnowledge(BaseChat):
|
||||
if self.space_context is None
|
||||
else float(self.space_context["embedding"]["recall_score"])
|
||||
)
|
||||
from dbgpt_serve.rag.models.models import (
|
||||
KnowledgeSpaceDao,
|
||||
KnowledgeSpaceEntity,
|
||||
)
|
||||
|
||||
spaces = KnowledgeSpaceDao().get_knowledge_space(
|
||||
KnowledgeSpaceEntity(name=self.knowledge_space)
|
||||
)
|
||||
if len(spaces) != 1:
|
||||
raise Exception(f"invalid space name:{self.knowledge_space}")
|
||||
space = spaces[0]
|
||||
|
||||
query_rewrite = None
|
||||
if self.rag_config.query_rewrite:
|
||||
@ -230,9 +228,9 @@ class ChatKnowledge(BaseChat):
|
||||
request = KnowledgeSpaceRequest(name=space_name)
|
||||
spaces = service.get_knowledge_space(request)
|
||||
if len(spaces) == 1:
|
||||
from dbgpt_ext.storage import vector_store
|
||||
from dbgpt_ext.storage import __knowledge_graph__ as graph_storages
|
||||
|
||||
if spaces[0].vector_type in vector_store.__knowledge_graph__:
|
||||
if spaces[0].vector_type in graph_storages:
|
||||
return self.rag_config.graph_search_top_k
|
||||
|
||||
return self.rag_config.similarity_top_k
|
||||
|
@ -19,6 +19,11 @@ class IndexStoreConfig(BaseModel):
|
||||
|
||||
model_config = ConfigDict(arbitrary_types_allowed=True, extra="allow")
|
||||
|
||||
type: Optional[str] = Field(
|
||||
default=None,
|
||||
description="storage type",
|
||||
)
|
||||
|
||||
name: str = Field(
|
||||
default="dbgpt_collection",
|
||||
description="The name of index store, if not set, will use the default name.",
|
||||
|
@ -107,6 +107,10 @@ class VectorStoreConfig(IndexStoreConfig):
|
||||
default=0.3,
|
||||
description="Recall score of vector search",
|
||||
)
|
||||
type: Optional[str] = Field(
|
||||
default=None,
|
||||
description="vector storage type",
|
||||
)
|
||||
|
||||
|
||||
class VectorStoreBase(IndexStoreBase, ABC):
|
||||
|
@ -70,6 +70,8 @@ datasource_duckdb = [
|
||||
storage_milvus = ["pymilvus"]
|
||||
storage_weaviate = ["weaviate-client"]
|
||||
storage_chromadb = ["chromadb>=0.4.22"]
|
||||
storage_elasticsearch = ["elasticsearch"]
|
||||
storage_obvector = ["pyobvector"]
|
||||
|
||||
[tool.uv]
|
||||
managed = true
|
||||
|
@ -1 +1,131 @@
|
||||
"""Module of storage."""
|
||||
"""Module of RAG storage."""
|
||||
|
||||
from typing import Tuple, Type
|
||||
|
||||
|
||||
def _import_pgvector() -> Tuple[Type, Type]:
|
||||
from dbgpt_ext.storage.vector_store.pgvector_store import (
|
||||
PGVectorConfig,
|
||||
PGVectorStore,
|
||||
)
|
||||
|
||||
return PGVectorStore, PGVectorConfig
|
||||
|
||||
|
||||
def _import_milvus() -> Tuple[Type, Type]:
|
||||
from dbgpt_ext.storage.vector_store.milvus_store import (
|
||||
MilvusStore,
|
||||
MilvusVectorConfig,
|
||||
)
|
||||
|
||||
return MilvusStore, MilvusVectorConfig
|
||||
|
||||
|
||||
def _import_chroma() -> Tuple[Type, Type]:
|
||||
from dbgpt_ext.storage.vector_store.chroma_store import (
|
||||
ChromaStore,
|
||||
ChromaVectorConfig,
|
||||
)
|
||||
|
||||
return ChromaStore, ChromaVectorConfig
|
||||
|
||||
|
||||
def _import_weaviate() -> Tuple[Type, Type]:
|
||||
from dbgpt_ext.storage.vector_store.weaviate_store import (
|
||||
WeaviateStore,
|
||||
WeaviateVectorConfig,
|
||||
)
|
||||
|
||||
return WeaviateStore, WeaviateVectorConfig
|
||||
|
||||
|
||||
def _import_oceanbase() -> Tuple[Type, Type]:
|
||||
from dbgpt_ext.storage.vector_store.oceanbase_store import (
|
||||
OceanBaseConfig,
|
||||
OceanBaseStore,
|
||||
)
|
||||
|
||||
return OceanBaseStore, OceanBaseConfig
|
||||
|
||||
|
||||
def _import_elastic() -> Tuple[Type, Type]:
|
||||
from dbgpt_ext.storage.vector_store.elastic_store import (
|
||||
ElasticsearchVectorConfig,
|
||||
ElasticStore,
|
||||
)
|
||||
|
||||
return ElasticStore, ElasticsearchVectorConfig
|
||||
|
||||
|
||||
def _import_builtin_knowledge_graph() -> Tuple[Type, Type]:
|
||||
from dbgpt_ext.storage.knowledge_graph.knowledge_graph import (
|
||||
BuiltinKnowledgeGraph,
|
||||
BuiltinKnowledgeGraphConfig,
|
||||
)
|
||||
|
||||
return BuiltinKnowledgeGraph, BuiltinKnowledgeGraphConfig
|
||||
|
||||
|
||||
def _import_community_summary_knowledge_graph() -> Tuple[Type, Type]:
|
||||
from dbgpt_ext.storage.knowledge_graph.community_summary import (
|
||||
CommunitySummaryKnowledgeGraph,
|
||||
CommunitySummaryKnowledgeGraphConfig,
|
||||
)
|
||||
|
||||
return CommunitySummaryKnowledgeGraph, CommunitySummaryKnowledgeGraphConfig
|
||||
|
||||
|
||||
def _import_openspg() -> Tuple[Type, Type]:
|
||||
from dbgpt_ext.storage.knowledge_graph.open_spg import OpenSPG, OpenSPGConfig
|
||||
|
||||
return OpenSPG, OpenSPGConfig
|
||||
|
||||
|
||||
def _import_full_text() -> Tuple[Type, Type]:
|
||||
from dbgpt_ext.storage.full_text.elasticsearch import (
|
||||
ElasticDocumentConfig,
|
||||
ElasticDocumentStore,
|
||||
)
|
||||
|
||||
return ElasticDocumentStore, ElasticDocumentConfig
|
||||
|
||||
|
||||
def _select_rag_storage(name: str) -> Tuple[Type, Type]:
|
||||
if name == "Chroma":
|
||||
return _import_chroma()
|
||||
elif name == "Milvus":
|
||||
return _import_milvus()
|
||||
elif name == "Weaviate":
|
||||
return _import_weaviate()
|
||||
elif name == "PGVector":
|
||||
return _import_pgvector()
|
||||
elif name == "OceanBase":
|
||||
return _import_oceanbase()
|
||||
elif name == "ElasticSearch":
|
||||
return _import_elastic()
|
||||
elif name == "KnowledgeGraph":
|
||||
return _import_builtin_knowledge_graph()
|
||||
elif name == "CommunitySummaryKnowledgeGraph":
|
||||
return _import_community_summary_knowledge_graph()
|
||||
elif name == "OpenSPG":
|
||||
return _import_openspg()
|
||||
elif name == "FullText":
|
||||
return _import_full_text()
|
||||
else:
|
||||
raise AttributeError(f"Could not find: {name}")
|
||||
|
||||
|
||||
__vector_store__ = [
|
||||
"Chroma",
|
||||
"Milvus",
|
||||
"Weaviate",
|
||||
"OceanBase",
|
||||
"PGVector",
|
||||
"ElasticSearch",
|
||||
]
|
||||
|
||||
__knowledge_graph__ = ["KnowledgeGraph", "CommunitySummaryKnowledgeGraph", "OpenSPG"]
|
||||
|
||||
__document_store__ = ["FullText"]
|
||||
|
||||
__all__ = __vector_store__ + __knowledge_graph__ + __document_store__
|
||||
|
@ -1,7 +1,7 @@
|
||||
"""Graph store factory."""
|
||||
|
||||
import logging
|
||||
from typing import Tuple, Type
|
||||
from typing import Optional, Tuple, Type
|
||||
|
||||
from dbgpt.storage.graph_store.base import GraphStoreBase, GraphStoreConfig
|
||||
from dbgpt_ext.storage import graph_store
|
||||
@ -13,7 +13,11 @@ class GraphStoreFactory:
|
||||
"""Factory for graph store."""
|
||||
|
||||
@staticmethod
|
||||
def create(graph_store_type: str, graph_store_configure=None) -> GraphStoreBase:
|
||||
def create(
|
||||
graph_store_type: str,
|
||||
graph_store_configure=None,
|
||||
graph_config: Optional[dict] = None,
|
||||
) -> GraphStoreBase:
|
||||
"""Create a GraphStore instance.
|
||||
|
||||
Args:
|
||||
@ -23,7 +27,10 @@ class GraphStoreFactory:
|
||||
store_cls, cfg_cls = GraphStoreFactory.__find_type(graph_store_type)
|
||||
|
||||
try:
|
||||
config = cfg_cls()
|
||||
if graph_config:
|
||||
config = cfg_cls(**graph_config)
|
||||
else:
|
||||
config = cfg_cls()
|
||||
if graph_store_configure:
|
||||
graph_store_configure(config)
|
||||
return store_cls(config)
|
||||
|
@ -83,23 +83,18 @@ class TuGraphStore(GraphStoreBase):
|
||||
def __init__(self, config: TuGraphStoreConfig) -> None:
|
||||
"""Initialize the TuGraphStore with connection details."""
|
||||
self._config = config
|
||||
self._host = os.getenv("TUGRAPH_HOST", config.host)
|
||||
self._port = int(os.getenv("TUGRAPH_PORT", config.port))
|
||||
self._username = os.getenv("TUGRAPH_USERNAME", config.username)
|
||||
self._password = os.getenv("TUGRAPH_PASSWORD", config.password)
|
||||
self.enable_summary = (
|
||||
self._host = config.host or os.getenv("TUGRAPH_HOST")
|
||||
self._port = int(config.port or os.getenv("TUGRAPH_PORT"))
|
||||
self._username = config.username or os.getenv("TUGRAPH_USERNAME")
|
||||
self._password = config.password or os.getenv("TUGRAPH_PASSWORD")
|
||||
self.enable_summary = config.enable_summary or (
|
||||
os.getenv("GRAPH_COMMUNITY_SUMMARY_ENABLED", "").lower() == "true"
|
||||
if "GRAPH_COMMUNITY_SUMMARY_ENABLED" in os.environ
|
||||
else config.enable_summary
|
||||
)
|
||||
self.enable_similarity_search = (
|
||||
os.environ["SIMILARITY_SEARCH_ENABLED"].lower() == "true"
|
||||
if "SIMILARITY_SEARCH_ENABLED" in os.environ
|
||||
else config.enable_similarity_search
|
||||
self.enable_similarity_search = config.enable_similarity_search or (
|
||||
os.getenv("SIMILARITY_SEARCH_ENABLED", "").lower() == "true"
|
||||
)
|
||||
self._plugin_names = (
|
||||
self._plugin_names = config.plugin_names or (
|
||||
os.getenv("TUGRAPH_PLUGIN_NAMES", "leiden").split(",")
|
||||
or config.plugin_names
|
||||
)
|
||||
|
||||
self._graph_name = config.name
|
||||
|
@ -107,9 +107,7 @@ class BuiltinKnowledgeGraphConfig(KnowledgeGraphConfig):
|
||||
|
||||
model_name: str = Field(default=None, description="The name of llm model.")
|
||||
|
||||
graph_store_type: str = Field(
|
||||
default="TuGraph", description="The type of graph store."
|
||||
)
|
||||
type: str = Field(default="TuGraph", description="The type of graph store.")
|
||||
|
||||
|
||||
@register_resource(
|
||||
@ -151,8 +149,8 @@ class BuiltinKnowledgeGraph(KnowledgeGraphBase):
|
||||
cfg.name = config.name
|
||||
cfg.embedding_fn = config.embedding_fn
|
||||
|
||||
graph_store_type = os.getenv("GRAPH_STORE_TYPE") or config.graph_store_type
|
||||
return GraphStoreFactory.create(graph_store_type, configure)
|
||||
graph_store_type = os.getenv("GRAPH_STORE_TYPE") or config.type
|
||||
return GraphStoreFactory.create(graph_store_type, configure, config.dict())
|
||||
|
||||
def __init_graph_store_adapter(self):
|
||||
return GraphStoreAdapterFactory.create(self._graph_store)
|
||||
|
@ -1,131 +0,0 @@
|
||||
"""Vector Store Module."""
|
||||
|
||||
from typing import Tuple, Type
|
||||
|
||||
|
||||
def _import_pgvector() -> Tuple[Type, Type]:
|
||||
from dbgpt_ext.storage.vector_store.pgvector_store import (
|
||||
PGVectorConfig,
|
||||
PGVectorStore,
|
||||
)
|
||||
|
||||
return PGVectorStore, PGVectorConfig
|
||||
|
||||
|
||||
def _import_milvus() -> Tuple[Type, Type]:
|
||||
from dbgpt_ext.storage.vector_store.milvus_store import (
|
||||
MilvusStore,
|
||||
MilvusVectorConfig,
|
||||
)
|
||||
|
||||
return MilvusStore, MilvusVectorConfig
|
||||
|
||||
|
||||
def _import_chroma() -> Tuple[Type, Type]:
|
||||
from dbgpt_ext.storage.vector_store.chroma_store import (
|
||||
ChromaStore,
|
||||
ChromaVectorConfig,
|
||||
)
|
||||
|
||||
return ChromaStore, ChromaVectorConfig
|
||||
|
||||
|
||||
def _import_weaviate() -> Tuple[Type, Type]:
|
||||
from dbgpt_ext.storage.vector_store.weaviate_store import (
|
||||
WeaviateStore,
|
||||
WeaviateVectorConfig,
|
||||
)
|
||||
|
||||
return WeaviateStore, WeaviateVectorConfig
|
||||
|
||||
|
||||
def _import_oceanbase() -> Tuple[Type, Type]:
|
||||
from dbgpt_ext.storage.vector_store.oceanbase_store import (
|
||||
OceanBaseConfig,
|
||||
OceanBaseStore,
|
||||
)
|
||||
|
||||
return OceanBaseStore, OceanBaseConfig
|
||||
|
||||
|
||||
def _import_elastic() -> Tuple[Type, Type]:
|
||||
from dbgpt_ext.storage.vector_store.elastic_store import (
|
||||
ElasticsearchVectorConfig,
|
||||
ElasticStore,
|
||||
)
|
||||
|
||||
return ElasticStore, ElasticsearchVectorConfig
|
||||
|
||||
|
||||
def _import_builtin_knowledge_graph() -> Tuple[Type, Type]:
|
||||
from dbgpt_ext.storage.knowledge_graph.knowledge_graph import (
|
||||
BuiltinKnowledgeGraph,
|
||||
BuiltinKnowledgeGraphConfig,
|
||||
)
|
||||
|
||||
return BuiltinKnowledgeGraph, BuiltinKnowledgeGraphConfig
|
||||
|
||||
|
||||
def _import_community_summary_knowledge_graph() -> Tuple[Type, Type]:
|
||||
from dbgpt_ext.storage.knowledge_graph.community_summary import (
|
||||
CommunitySummaryKnowledgeGraph,
|
||||
CommunitySummaryKnowledgeGraphConfig,
|
||||
)
|
||||
|
||||
return CommunitySummaryKnowledgeGraph, CommunitySummaryKnowledgeGraphConfig
|
||||
|
||||
|
||||
def _import_openspg() -> Tuple[Type, Type]:
|
||||
from dbgpt_ext.storage.knowledge_graph.open_spg import OpenSPG, OpenSPGConfig
|
||||
|
||||
return OpenSPG, OpenSPGConfig
|
||||
|
||||
|
||||
def _import_full_text() -> Tuple[Type, Type]:
|
||||
from dbgpt_ext.storage.full_text.elasticsearch import (
|
||||
ElasticDocumentConfig,
|
||||
ElasticDocumentStore,
|
||||
)
|
||||
|
||||
return ElasticDocumentStore, ElasticDocumentConfig
|
||||
|
||||
|
||||
def __getattr__(name: str) -> Tuple[Type, Type]:
|
||||
if name == "Chroma":
|
||||
return _import_chroma()
|
||||
elif name == "Milvus":
|
||||
return _import_milvus()
|
||||
elif name == "Weaviate":
|
||||
return _import_weaviate()
|
||||
elif name == "PGVector":
|
||||
return _import_pgvector()
|
||||
elif name == "OceanBase":
|
||||
return _import_oceanbase()
|
||||
elif name == "ElasticSearch":
|
||||
return _import_elastic()
|
||||
elif name == "KnowledgeGraph":
|
||||
return _import_builtin_knowledge_graph()
|
||||
elif name == "CommunitySummaryKnowledgeGraph":
|
||||
return _import_community_summary_knowledge_graph()
|
||||
elif name == "OpenSPG":
|
||||
return _import_openspg()
|
||||
elif name == "FullText":
|
||||
return _import_full_text()
|
||||
else:
|
||||
raise AttributeError(f"Could not find: {name}")
|
||||
|
||||
|
||||
__vector_store__ = [
|
||||
"Chroma",
|
||||
"Milvus",
|
||||
"Weaviate",
|
||||
"OceanBase",
|
||||
"PGVector",
|
||||
"ElasticSearch",
|
||||
]
|
||||
|
||||
__knowledge_graph__ = ["KnowledgeGraph", "CommunitySummaryKnowledgeGraph", "OpenSPG"]
|
||||
|
||||
__document_store__ = ["FullText"]
|
||||
|
||||
__all__ = __vector_store__ + __knowledge_graph__ + __document_store__
|
@ -4,10 +4,6 @@ import logging
|
||||
import os
|
||||
from typing import Any, Dict, Iterable, List, Mapping, Optional, Union
|
||||
|
||||
from chromadb import PersistentClient
|
||||
from chromadb.api.client import SharedSystemClient
|
||||
from chromadb.config import Settings
|
||||
|
||||
from dbgpt._private.pydantic import ConfigDict, Field
|
||||
from dbgpt.configs.model_config import PILOT_PATH
|
||||
from dbgpt.core import Chunk
|
||||
@ -85,7 +81,10 @@ class ChromaStore(VectorStoreBase):
|
||||
"""
|
||||
super().__init__()
|
||||
self._vector_store_config = vector_store_config
|
||||
|
||||
try:
|
||||
from chromadb import PersistentClient, Settings
|
||||
except ImportError:
|
||||
raise ImportError("Please install chroma package first.")
|
||||
chroma_vector_config = vector_store_config.to_dict(exclude_none=True)
|
||||
chroma_path = chroma_vector_config.get(
|
||||
"persist_path", os.path.join(PILOT_PATH, "data")
|
||||
@ -203,8 +202,11 @@ class ChromaStore(VectorStoreBase):
|
||||
|
||||
def delete_vector_name(self, vector_name: str):
|
||||
"""Delete vector name."""
|
||||
try:
|
||||
from chromadb.api.client import SharedSystemClient
|
||||
except ImportError:
|
||||
raise ImportError("Please install chroma package first.")
|
||||
logger.info(f"chroma vector_name:{vector_name} begin delete...")
|
||||
# self.vector_store_client.delete_collection()
|
||||
self._chroma_client.delete_collection(self._collection.name)
|
||||
SharedSystemClient.clear_system_cache()
|
||||
self._clean_persist_folder()
|
||||
|
@ -25,6 +25,11 @@ class BaseService(BaseComponent, Generic[T, REQ, RES], ABC):
|
||||
"""
|
||||
self._system_app = system_app
|
||||
|
||||
@property
|
||||
def system_app(self) -> SystemApp:
|
||||
"""Returns system_app."""
|
||||
return self._system_app
|
||||
|
||||
@property
|
||||
@abstractmethod
|
||||
def dao(self) -> BaseDao[T, REQ, RES]:
|
||||
|
@ -159,7 +159,7 @@ class DBSummaryClient:
|
||||
vector_store_name = dbname + "_profile"
|
||||
table_vector_store_config = VectorStoreConfig(name=vector_store_name)
|
||||
table_vector_connector = VectorStoreConnector.from_default(
|
||||
self.storage_config.vector.type,
|
||||
self.storage_config.vector.get("type"),
|
||||
self.embeddings,
|
||||
vector_store_config=table_vector_store_config,
|
||||
system_app=self.system_app,
|
||||
@ -167,7 +167,7 @@ class DBSummaryClient:
|
||||
field_vector_store_name = dbname + "_profile_field"
|
||||
field_vector_store_config = VectorStoreConfig(name=field_vector_store_name)
|
||||
field_vector_connector = VectorStoreConnector.from_default(
|
||||
self.storage_config.vector.type,
|
||||
self.storage_config.vector.get("type"),
|
||||
self.embeddings,
|
||||
vector_store_config=field_vector_store_config,
|
||||
system_app=self.system_app,
|
||||
|
@ -8,9 +8,11 @@ from typing import Any, DefaultDict, Dict, List, Optional, Tuple, Type, cast
|
||||
|
||||
from dbgpt import SystemApp
|
||||
from dbgpt.core import Chunk, Embeddings
|
||||
from dbgpt.storage.base import IndexStoreBase, IndexStoreConfig
|
||||
from dbgpt.storage.base import IndexStoreConfig
|
||||
from dbgpt.storage.vector_store.base import VectorStoreConfig
|
||||
from dbgpt.storage.vector_store.filters import MetadataFilters
|
||||
from dbgpt_ext.storage import __knowledge_graph__ as supported_kg_store_list
|
||||
from dbgpt_ext.storage import __vector_store__ as supported_vector_store_list
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@ -69,6 +71,11 @@ class VectorStoreConnector:
|
||||
self._embeddings = vector_store_config.embedding_fn
|
||||
|
||||
config_dict = {}
|
||||
storage_config = self.app_config.rag.storage
|
||||
if vector_store_type in supported_vector_store_list:
|
||||
config_dict = storage_config.vector
|
||||
elif vector_store_type in supported_kg_store_list:
|
||||
config_dict = storage_config.graph
|
||||
for key in vector_store_config.to_dict().keys():
|
||||
value = getattr(vector_store_config, key)
|
||||
if value is not None:
|
||||
@ -268,11 +275,9 @@ class VectorStoreConnector:
|
||||
return bool(connector.get(vector_store_type))
|
||||
|
||||
def _register(self):
|
||||
from dbgpt_ext.storage import vector_store
|
||||
from dbgpt_ext.storage import __all__ as rag_storages
|
||||
from dbgpt_ext.storage import _select_rag_storage
|
||||
|
||||
for cls in vector_store.__all__:
|
||||
store_cls, config_cls = getattr(vector_store, cls)
|
||||
if issubclass(store_cls, IndexStoreBase) and issubclass(
|
||||
config_cls, IndexStoreConfig
|
||||
):
|
||||
connector[cls] = (store_cls, config_cls)
|
||||
for cls_name in rag_storages:
|
||||
store_cls, config_cls = _select_rag_storage(cls_name)
|
||||
connector[cls_name] = (store_cls, config_cls)
|
||||
|
Loading…
Reference in New Issue
Block a user