feat: (0.6)New UI (#1855)

Co-authored-by: 夏姜 <wenfengjiang.jwf@digital-engine.com>
Co-authored-by: aries_ckt <916701291@qq.com>
Co-authored-by: wb-lh513319 <wb-lh513319@alibaba-inc.com>
Co-authored-by: csunny <cfqsunny@163.com>
This commit is contained in:
明天
2024-08-21 17:37:45 +08:00
committed by GitHub
parent 3fc82693ba
commit b124ecc10b
824 changed files with 93371 additions and 2515 deletions

View File

@@ -505,6 +505,8 @@ class Parameter(TypeMetadata, Serializable):
):
# Resource type can have multiple parameters.
resource_id = view_value
if resource_id not in resources:
return {self.name: None}
resource_metadata = resources[resource_id]
# Check the type.
resource_type = _get_type_cls(resource_metadata.type_cls)

View File

@@ -25,6 +25,8 @@ class _RegisterItem:
_COMPAT_FLOW_MAPPING: Dict[str, _RegisterItem] = {}
_OLD_AGENT_OPERATOR_MODULE = "dbgpt.serve.agent.team.layout.agent_operator"
_NEW_AGENT_OPERATOR_MODULE = "dbgpt.agent.core.plan.awel.agent_operator"
_OLD_AGENT_RESOURCE_MODULE_1 = "dbgpt.serve.agent.team.layout.agent_operator_resource"
_OLD_AGENT_RESOURCE_MODULE_2 = "dbgpt.agent.plan.awel.agent_operator_resource"
@@ -82,6 +84,50 @@ _register(
_register(
_OLD_AGENT_RESOURCE_MODULE_2, _NEW_AGENT_RESOURCE_MODULE, "AWELAgent", "AWELAgent"
)
_register(
_OLD_AGENT_RESOURCE_MODULE_1,
_NEW_AGENT_RESOURCE_MODULE,
"AwelAgentKnowledgeResource",
"AWELAgentKnowledgeResource",
)
_register(
_OLD_AGENT_RESOURCE_MODULE_1,
_NEW_AGENT_RESOURCE_MODULE,
"AgentPrompt",
)
# AGENT Operator
_register(
_OLD_AGENT_RESOURCE_MODULE_1,
_NEW_AGENT_RESOURCE_MODULE,
"AwelAgentOperator",
"AWELAgentOperator",
)
_register(
_OLD_AGENT_RESOURCE_MODULE_1,
_NEW_AGENT_RESOURCE_MODULE,
"AgentBranchOperator",
)
_register(
_OLD_AGENT_RESOURCE_MODULE_1,
_NEW_AGENT_RESOURCE_MODULE,
"AgentBranchJoinOperator",
)
_register(
_OLD_AGENT_OPERATOR_MODULE,
_NEW_AGENT_OPERATOR_MODULE,
"AgentDummyTrigger",
)
_register(
_OLD_AGENT_OPERATOR_MODULE,
_NEW_AGENT_OPERATOR_MODULE,
"AgentDummyTrigger",
)
_register(
"dbgpt.storage.vector_store.connector",
"dbgpt.serve.rag.connector",

View File

@@ -536,6 +536,7 @@ class FlowFactory:
except FlowMetadataException as e:
raise e
except Exception as e:
logger.warning(str(e), e)
raise FlowMetadataException(
f"Unable to build resource instance: {resource_key}, resource_cls: "
f"{resource_cls}, error: {e}"

View File

@@ -280,7 +280,6 @@ class BaseOperator(DAGNode, ABC, Generic[OUT], metaclass=BaseOperatorMeta):
if call_data != EMPTY_DATA:
call_data = {"data": call_data}
with root_tracer.start_span("dbgpt.awel.operator.call_stream"):
out_ctx = await self._runner.execute_workflow(
self, call_data, streaming_call=True, exist_dag_ctx=dag_ctx
)
@@ -291,7 +290,6 @@ class BaseOperator(DAGNode, ABC, Generic[OUT], metaclass=BaseOperatorMeta):
out_ctx.current_task_context.task_output.output_stream
)
else:
# No stream output, wrap the output in a stream
async def _gen():
yield task_output.output

View File

@@ -19,6 +19,15 @@ def is_chat_flow_type(output_obj: Any, is_class: bool = False) -> bool:
return isinstance(output_obj, chat_types)
def is_agent_flow_type(output_obj: Any, is_class: bool = False) -> bool:
"""Check whether the output object is a agent flow type."""
if is_class:
return output_obj in (str, CommonLLMHttpResponseBody, ModelOutput)
else:
chat_types = (str, CommonLLMHttpResponseBody)
return isinstance(output_obj, chat_types)
async def safe_chat_with_dag_task(
task: BaseOperator, request: Any, covert_to_str: bool = False
) -> ModelOutput:

View File

@@ -2,16 +2,17 @@
import asyncio
import string
from abc import ABC, abstractmethod
from collections import defaultdict
from typing import (
TYPE_CHECKING,
Any,
AsyncIterator,
Callable,
Generic,
Iterator,
List,
Optional,
Sequence,
Type,
TypeVar,
Union,
)
@@ -30,6 +31,11 @@ PredictionType = Union[str, Any]
ContextType = Union[str, Sequence[str], Any]
DatasetType = Union["InputSource", Iterator, AsyncIterator]
EVALUATE_FILE_COL_QUESTION = "query"
EVALUATE_FILE_COL_ANSWER = "factual"
EVALUATE_FILE_COL_PREDICTION = "prediction"
EVALUATE_FILE_COL_PREDICTION_COST = "prediction_cost"
class BaseEvaluationResult(BaseModel):
"""Base evaluation result."""
@@ -40,11 +46,14 @@ class BaseEvaluationResult(BaseModel):
"retrieval, etc.)",
)
contexts: Optional[ContextType] = Field(None, description="Context data")
score: Optional[float] = Field(None, description="Score for the prediction")
score: Optional[float] = Field(
None, description="Score for the prediction in now metric"
)
passing: Optional[bool] = Field(
None, description="Binary evaluation result (passing or not)"
True, description="Determine whether the current prediction result is valid"
)
metric_name: Optional[str] = Field(None, description="Name of the metric")
prediction_cost: int = 0
class EvaluationResult(BaseEvaluationResult):
@@ -55,6 +64,7 @@ class EvaluationResult(BaseEvaluationResult):
query: Optional[QueryType] = Field(None, description="Query data")
raw_dataset: Optional[Any] = Field(None, description="Raw dataset")
feedback: Optional[str] = Field(None, description="feedback")
Q = TypeVar("Q")
@@ -65,39 +75,51 @@ C = TypeVar("C")
class EvaluationMetric(ABC, Generic[P, C]):
"""Base class for evaluation metric."""
@property
def name(self) -> str:
def __init__(self, **kwargs): # noqa
pass
@classmethod
def name(cls) -> str:
"""Name of the metric."""
return self.__class__.__name__
return cls.__name__
@classmethod
def describe(cls) -> str:
"""Describe."""
return f"This is an evaluation result index calculation tool, named {cls.name} "
async def compute(
self,
prediction: P,
contexts: Optional[Sequence[C]] = None,
query: Optional[str] = None,
) -> BaseEvaluationResult:
"""Compute the evaluation metric.
Args:
prediction(P): The prediction data.
contexts(Optional[Sequence[C]]): The context data.
query:(Optional[str]) The query text.
Returns:
BaseEvaluationResult: The evaluation result.
"""
return await asyncio.get_running_loop().run_in_executor(
None, self.sync_compute, prediction, contexts
None, self.sync_compute, prediction, contexts, query
)
def sync_compute(
self,
prediction: P,
contexts: Optional[Sequence[C]] = None,
query: Optional[str] = None,
) -> BaseEvaluationResult:
"""Compute the evaluation metric.
Args:
prediction(P): The prediction data.
contexts(Optional[Sequence[C]]): The context data.
contexts(Optional[Sequence[C]]): The factual data.
query:(Optional[str]) The query text.
Returns:
BaseEvaluationResult: The evaluation result.
@@ -108,14 +130,7 @@ class EvaluationMetric(ABC, Generic[P, C]):
class FunctionMetric(EvaluationMetric[P, C], Generic[P, C]):
"""Evaluation metric based on a function."""
def __init__(
self,
name: str,
func: Callable[
[P, Optional[Sequence[C]]],
BaseEvaluationResult,
],
):
def __init__(self, **kwargs):
"""Create a FunctionMetric.
Args:
@@ -123,11 +138,16 @@ class FunctionMetric(EvaluationMetric[P, C], Generic[P, C]):
func(Callable[[P, Optional[Sequence[C]]], BaseEvaluationResult]):
The function to use for evaluation.
"""
self._name = name
self.func = func
if "name" not in kwargs:
raise ValueError("Must need param name")
if "func" not in kwargs:
raise ValueError("Must need param func")
self._name = kwargs.get("name", None)
self.func = kwargs.get("func", None)
@property
def name(self) -> str:
def name(self) -> str: # type: ignore # noqa
"""Name of the metric."""
return self._name
@@ -135,6 +155,7 @@ class FunctionMetric(EvaluationMetric[P, C], Generic[P, C]):
self,
prediction: P,
context: Optional[Sequence[C]] = None,
query: Optional[str] = None,
) -> BaseEvaluationResult:
"""Compute the evaluation metric."""
return self.func(prediction, context)
@@ -146,15 +167,16 @@ class ExactMatchMetric(EvaluationMetric[str, str]):
Just support string prediction and context.
"""
def __init__(self, ignore_case: bool = False, ignore_punctuation: bool = False):
def __init__(self, **kwargs):
"""Create an ExactMatchMetric."""
self._ignore_case = ignore_case
self._ignore_punctuation = ignore_punctuation
self._ignore_case = kwargs.get("ignore_case", False)
self._ignore_punctuation = kwargs.get("ignore_punctuation", False)
async def compute(
self,
prediction: str,
contexts: Optional[Sequence[str]] = None,
query: Optional[str] = None,
) -> BaseEvaluationResult:
"""Compute the evaluation metric."""
if self._ignore_case:
@@ -182,14 +204,17 @@ class SimilarityMetric(EvaluationMetric[str, str]):
Calculate the cosine similarity between a prediction and a list of contexts.
"""
def __init__(self, embeddings: Embeddings):
def __init__(self, **kwargs):
"""Create a SimilarityMetric with embeddings."""
self._embeddings = embeddings
self._embeddings = kwargs.get("embeddings", None)
if self._embeddings is None or not isinstance(self._embeddings, Embeddings):
raise ValueError("Need embedding service")
def sync_compute(
self,
prediction: str,
contexts: Optional[Sequence[str]] = None,
query: Optional[str] = None,
) -> BaseEvaluationResult:
"""Compute the evaluation metric."""
if not contexts:
@@ -232,7 +257,7 @@ class Evaluator(ABC):
contexts_key: str = "contexts",
prediction_key: str = "prediction",
parallel_num: int = 1,
**kwargs
**kwargs,
) -> List[List[EvaluationResult]]:
"""Run evaluation with a dataset and metrics.
@@ -251,3 +276,36 @@ class Evaluator(ABC):
result equals to the length of the dataset. The first element in the
list is the list of evaluation results for metrics.
"""
class MetricManage:
"""MetricManage."""
def __init__(self):
"""Init metricManage."""
self.metrics = defaultdict()
def register_metric(self, cls: Type[EvaluationMetric]):
"""Register metric."""
self.metrics[cls.name] = cls
def get_by_name(self, name: str) -> Type[EvaluationMetric]:
"""Get by name."""
if name not in self.metrics:
raise ValueError(f"Metric:{name} not register!")
return self.metrics[name]
def all_metric_infos(self):
"""Get all metric infos."""
result = []
for name, cls in self.metrics.items():
result.append(
{
"name": name,
"describe": cls.describe,
}
)
return result
metric_mange = MetricManage()

View File

@@ -2,7 +2,7 @@
import json
import uuid
from typing import Any, Dict
from typing import Any, Dict, Optional
from dbgpt._private.pydantic import BaseModel, Field, model_to_dict
@@ -61,6 +61,7 @@ class Chunk(Document):
default="\n",
description="Separator between metadata fields when converting to string.",
)
retriever: Optional[str] = Field(default=None, description="retriever name")
def to_dict(self, **kwargs: Any) -> Dict[str, Any]:
"""Convert Chunk to dict."""

View File

@@ -481,6 +481,7 @@ class OnceConversation:
user_name: Optional[str] = None,
sys_code: Optional[str] = None,
summary: Optional[str] = None,
app_code: Optional[str] = None,
**kwargs,
):
"""Create a new conversation."""
@@ -488,6 +489,7 @@ class OnceConversation:
self.user_name: Optional[str] = user_name
self.sys_code: Optional[str] = sys_code
self.summary: Optional[str] = summary
self.app_code: Optional[str] = app_code
self.messages: List[BaseMessage] = kwargs.get("messages", [])
self.start_date: str = kwargs.get("start_date", "")
@@ -640,6 +642,8 @@ class OnceConversation:
self.chat_order = conversation.chat_order
if not self.model_name and conversation.model_name:
self.model_name = conversation.model_name
if not self.app_code and conversation.app_code:
self.app_code = conversation.app_code
if not self.param_type and conversation.param_type:
self.param_type = conversation.param_type
if not self.param_value and conversation.param_value:
@@ -973,6 +977,7 @@ class StorageConversation(OnceConversation, StorageItem):
sys_code: Optional[str] = None,
message_ids: Optional[List[str]] = None,
summary: Optional[str] = None,
app_code: Optional[str] = None,
save_message_independent: bool = True,
conv_storage: Optional[StorageInterface] = None,
message_storage: Optional[StorageInterface] = None,
@@ -980,7 +985,7 @@ class StorageConversation(OnceConversation, StorageItem):
**kwargs,
):
"""Create a conversation."""
super().__init__(chat_mode, user_name, sys_code, summary, **kwargs)
super().__init__(chat_mode, user_name, sys_code, summary, app_code, **kwargs)
self.conv_uid = conv_uid
self._message_ids = message_ids
# Record the message index last time saved to the storage,
@@ -1036,6 +1041,8 @@ class StorageConversation(OnceConversation, StorageItem):
# Save messages independently
self.message_storage.save_list(messages_to_save)
# Save conversation
if self.summary is not None and len(self.summary) > 4000:
self.summary = self.summary[0:4000]
self.conv_storage.save_or_update(self)
def load_from_storage(
@@ -1122,6 +1129,24 @@ class StorageConversation(OnceConversation, StorageItem):
)
)
def clear(self) -> None:
"""Clear all the messages and conversation."""
# Clear messages first
message_list = self._get_message_items()
message_ids = [message.identifier for message in message_list]
self.message_storage.delete_list(message_ids)
# Clear conversation
self.conv_storage.delete(self.identifier)
# Overwrite the current conversation with empty conversation
self.from_conversation(
StorageConversation(
self.conv_uid,
save_message_independent=self.save_message_independent,
conv_storage=self.conv_storage,
message_storage=self.message_storage,
)
)
def _conversation_to_dict(once: OnceConversation) -> Dict:
start_str: str = ""
@@ -1219,11 +1244,9 @@ def _append_view_messages(messages: List[BaseMessage]) -> List[BaseMessage]:
content=ai_message.content,
index=ai_message.index,
round_index=ai_message.round_index,
additional_kwargs=(
ai_message.additional_kwargs.copy()
if ai_message.additional_kwargs
else {}
),
additional_kwargs=ai_message.additional_kwargs.copy()
if ai_message.additional_kwargs
else {},
)
current_round.append(view_message)
return sum(messages_by_round, [])