feat(model): Support moonshot proxy LLM (#1404)

This commit is contained in:
Fangyin Cheng
2024-04-10 23:41:50 +08:00
committed by GitHub
parent 1c6a897137
commit 7d6dfd9ea8
9 changed files with 186 additions and 0 deletions

View File

@@ -857,3 +857,31 @@ class LLMClient(ABC):
if not model_metadata:
raise ValueError(f"Model {model} not found")
return model_metadata
def __call__(self, *args, **kwargs) -> ModelOutput:
"""Return the model output.
Call the LLM client to generate the response for the given message.
Please do not use this method in the production environment, it is only used
for debugging.
"""
from dbgpt.util import get_or_create_event_loop
messages = kwargs.get("messages")
model = kwargs.get("model")
if messages:
del kwargs["messages"]
model_messages = ModelMessage.from_openai_messages(messages)
else:
model_messages = [ModelMessage.build_human_message(args[0])]
if not model:
if hasattr(self, "default_model"):
model = getattr(self, "default_model")
else:
raise ValueError("The default model is not set")
if "model" in kwargs:
del kwargs["model"]
req = ModelRequest.build_request(model, model_messages, **kwargs)
loop = get_or_create_event_loop()
return loop.run_until_complete(self.generate(req))