mirror of
https://github.com/csunny/DB-GPT.git
synced 2025-07-30 23:28:35 +00:00
fix:update app schemas.
This commit is contained in:
parent
a1369c02c4
commit
0cf08f37b0
@ -1,16 +1,14 @@
|
|||||||
"""this module contains the schemas for the dbgpt client."""
|
"""this module contains the schemas for the dbgpt client."""
|
||||||
|
import json
|
||||||
from datetime import datetime
|
from datetime import datetime
|
||||||
from enum import Enum
|
from enum import Enum
|
||||||
from typing import List, Optional, Union
|
from typing import Any, Dict, List, Optional, Union
|
||||||
|
|
||||||
from fastapi import File, UploadFile
|
from fastapi import File, UploadFile
|
||||||
|
|
||||||
from dbgpt._private.pydantic import BaseModel, Field
|
from dbgpt._private.pydantic import BaseModel, Field
|
||||||
from dbgpt.agent.resource.resource_api import AgentResource
|
|
||||||
from dbgpt.rag.chunk_manager import ChunkParameters
|
from dbgpt.rag.chunk_manager import ChunkParameters
|
||||||
|
|
||||||
"""Chat completion request body"""
|
|
||||||
|
|
||||||
|
|
||||||
class ChatCompletionRequestBody(BaseModel):
|
class ChatCompletionRequestBody(BaseModel):
|
||||||
"""ChatCompletion LLM http request body."""
|
"""ChatCompletion LLM http request body."""
|
||||||
@ -26,7 +24,8 @@ class ChatCompletionRequestBody(BaseModel):
|
|||||||
temperature: Optional[float] = Field(
|
temperature: Optional[float] = Field(
|
||||||
default=None,
|
default=None,
|
||||||
description="What sampling temperature to use, between 0 and 2. Higher values "
|
description="What sampling temperature to use, between 0 and 2. Higher values "
|
||||||
"like 0.8 will make the output more random, while lower values like 0.2 will "
|
"like 0.8 will make the output more random, "
|
||||||
|
"while lower values like 0.2 will "
|
||||||
"make it more focused and deterministic.",
|
"make it more focused and deterministic.",
|
||||||
)
|
)
|
||||||
max_new_tokens: Optional[int] = Field(
|
max_new_tokens: Optional[int] = Field(
|
||||||
@ -66,9 +65,6 @@ class ChatCompletionRequestBody(BaseModel):
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
"""Chat completion response"""
|
|
||||||
|
|
||||||
|
|
||||||
class ChatMode(Enum):
|
class ChatMode(Enum):
|
||||||
"""Chat mode."""
|
"""Chat mode."""
|
||||||
|
|
||||||
@ -78,27 +74,6 @@ class ChatMode(Enum):
|
|||||||
CHAT_KNOWLEDGE = "chat_knowledge"
|
CHAT_KNOWLEDGE = "chat_knowledge"
|
||||||
|
|
||||||
|
|
||||||
"""Agent model"""
|
|
||||||
|
|
||||||
|
|
||||||
class AppDetailModel(BaseModel):
|
|
||||||
"""App detail model."""
|
|
||||||
|
|
||||||
app_code: Optional[str] = Field(None, description="app code")
|
|
||||||
app_name: Optional[str] = Field(None, description="app name")
|
|
||||||
agent_name: Optional[str] = Field(None, description="agent name")
|
|
||||||
node_id: Optional[str] = Field(None, description="node id")
|
|
||||||
resources: Optional[list[AgentResource]] = Field(None, description="resources")
|
|
||||||
prompt_template: Optional[str] = Field(None, description="prompt template")
|
|
||||||
llm_strategy: Optional[str] = Field(None, description="llm strategy")
|
|
||||||
llm_strategy_value: Optional[str] = Field(None, description="llm strategy value")
|
|
||||||
created_at: datetime = datetime.now()
|
|
||||||
updated_at: datetime = datetime.now()
|
|
||||||
|
|
||||||
|
|
||||||
"""Awel team model"""
|
|
||||||
|
|
||||||
|
|
||||||
class AwelTeamModel(BaseModel):
|
class AwelTeamModel(BaseModel):
|
||||||
"""Awel team model."""
|
"""Awel team model."""
|
||||||
|
|
||||||
@ -151,6 +126,77 @@ class AwelTeamModel(BaseModel):
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class AgentResourceType(Enum):
|
||||||
|
"""Agent resource type."""
|
||||||
|
|
||||||
|
DB = "database"
|
||||||
|
Knowledge = "knowledge"
|
||||||
|
Internet = "internet"
|
||||||
|
Plugin = "plugin"
|
||||||
|
TextFile = "text_file"
|
||||||
|
ExcelFile = "excel_file"
|
||||||
|
ImageFile = "image_file"
|
||||||
|
AwelFlow = "awel_flow"
|
||||||
|
|
||||||
|
|
||||||
|
class AgentResourceModel(BaseModel):
|
||||||
|
"""Agent resource model."""
|
||||||
|
|
||||||
|
type: AgentResourceType
|
||||||
|
name: str
|
||||||
|
value: str
|
||||||
|
is_dynamic: bool = (
|
||||||
|
False # Is the current resource predefined or dynamically passed in?
|
||||||
|
)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def from_dict(d: Dict[str, Any]):
|
||||||
|
"""From dict."""
|
||||||
|
if d is None:
|
||||||
|
return None
|
||||||
|
return AgentResourceModel(
|
||||||
|
type=AgentResourceType(d.get("type")),
|
||||||
|
name=d.get("name"),
|
||||||
|
introduce=d.get("introduce"),
|
||||||
|
value=d.get("value", None),
|
||||||
|
is_dynamic=d.get("is_dynamic", False),
|
||||||
|
)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def from_json_list_str(d: Optional[str]):
|
||||||
|
"""From json list str."""
|
||||||
|
if d is None:
|
||||||
|
return None
|
||||||
|
try:
|
||||||
|
json_array = json.loads(d)
|
||||||
|
except Exception as e:
|
||||||
|
raise ValueError(f"Illegal AgentResource json string!{d},{e}")
|
||||||
|
return [AgentResourceModel.from_dict(item) for item in json_array]
|
||||||
|
|
||||||
|
def to_dict(self) -> Dict[str, Any]:
|
||||||
|
"""To dict."""
|
||||||
|
temp = self.dict()
|
||||||
|
for field, value in temp.items():
|
||||||
|
if isinstance(value, Enum):
|
||||||
|
temp[field] = value.value
|
||||||
|
return temp
|
||||||
|
|
||||||
|
|
||||||
|
class AppDetailModel(BaseModel):
|
||||||
|
"""App detail model."""
|
||||||
|
|
||||||
|
app_code: Optional[str] = Field(None, description="app code")
|
||||||
|
app_name: Optional[str] = Field(None, description="app name")
|
||||||
|
agent_name: Optional[str] = Field(None, description="agent name")
|
||||||
|
node_id: Optional[str] = Field(None, description="node id")
|
||||||
|
resources: Optional[list[AgentResourceModel]] = Field(None, description="resources")
|
||||||
|
prompt_template: Optional[str] = Field(None, description="prompt template")
|
||||||
|
llm_strategy: Optional[str] = Field(None, description="llm strategy")
|
||||||
|
llm_strategy_value: Optional[str] = Field(None, description="llm strategy value")
|
||||||
|
created_at: datetime = datetime.now()
|
||||||
|
updated_at: datetime = datetime.now()
|
||||||
|
|
||||||
|
|
||||||
class AppModel(BaseModel):
|
class AppModel(BaseModel):
|
||||||
"""App model."""
|
"""App model."""
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user