mirror of
https://github.com/csunny/DB-GPT.git
synced 2025-09-06 19:40:13 +00:00
feat(model): Support DeepSeek proxy LLM (#1491)
This commit is contained in:
@@ -294,6 +294,31 @@ class MoonshotProxyLLMModelAdapter(ProxyLLMModelAdapter):
|
||||
return moonshot_generate_stream
|
||||
|
||||
|
||||
class DeepseekProxyLLMModelAdapter(ProxyLLMModelAdapter):
|
||||
"""Deepseek proxy LLM model adapter.
|
||||
|
||||
See Also: `Deepseek Documentation <https://platform.deepseek.com/api-docs/>`_
|
||||
"""
|
||||
|
||||
def support_async(self) -> bool:
|
||||
return True
|
||||
|
||||
def do_match(self, lower_model_name_or_path: Optional[str] = None):
|
||||
return lower_model_name_or_path == "deepseek_proxyllm"
|
||||
|
||||
def get_llm_client_class(
|
||||
self, params: ProxyModelParameters
|
||||
) -> Type[ProxyLLMClient]:
|
||||
from dbgpt.model.proxy.llms.deepseek import DeepseekLLMClient
|
||||
|
||||
return DeepseekLLMClient
|
||||
|
||||
def get_async_generate_stream_function(self, model, model_path: str):
|
||||
from dbgpt.model.proxy.llms.deepseek import deepseek_generate_stream
|
||||
|
||||
return deepseek_generate_stream
|
||||
|
||||
|
||||
register_model_adapter(OpenAIProxyLLMModelAdapter)
|
||||
register_model_adapter(TongyiProxyLLMModelAdapter)
|
||||
register_model_adapter(OllamaLLMModelAdapter)
|
||||
@@ -305,3 +330,4 @@ register_model_adapter(BardProxyLLMModelAdapter)
|
||||
register_model_adapter(BaichuanProxyLLMModelAdapter)
|
||||
register_model_adapter(YiProxyLLMModelAdapter)
|
||||
register_model_adapter(MoonshotProxyLLMModelAdapter)
|
||||
register_model_adapter(DeepseekProxyLLMModelAdapter)
|
||||
|
@@ -12,6 +12,7 @@ def __lazy_import(name):
|
||||
"YiLLMClient": "dbgpt.model.proxy.llms.yi",
|
||||
"MoonshotLLMClient": "dbgpt.model.proxy.llms.moonshot",
|
||||
"OllamaLLMClient": "dbgpt.model.proxy.llms.ollama",
|
||||
"DeepseekLLMClient": "dbgpt.model.proxy.llms.deepseek",
|
||||
}
|
||||
|
||||
if name in module_path:
|
||||
@@ -35,4 +36,5 @@ __all__ = [
|
||||
"YiLLMClient",
|
||||
"MoonshotLLMClient",
|
||||
"OllamaLLMClient",
|
||||
"DeepseekLLMClient",
|
||||
]
|
||||
|
104
dbgpt/model/proxy/llms/deepseek.py
Normal file
104
dbgpt/model/proxy/llms/deepseek.py
Normal file
@@ -0,0 +1,104 @@
|
||||
import os
|
||||
from typing import TYPE_CHECKING, Any, Dict, Optional, Union, cast
|
||||
|
||||
from dbgpt.core import ModelRequest, ModelRequestContext
|
||||
from dbgpt.model.proxy.llms.proxy_model import ProxyModel
|
||||
|
||||
from .chatgpt import OpenAILLMClient
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from httpx._types import ProxiesTypes
|
||||
from openai import AsyncAzureOpenAI, AsyncOpenAI
|
||||
|
||||
ClientType = Union[AsyncAzureOpenAI, AsyncOpenAI]
|
||||
|
||||
# 32K model
|
||||
_DEFAULT_MODEL = "deepseek-chat"
|
||||
|
||||
|
||||
async def deepseek_generate_stream(
|
||||
model: ProxyModel, tokenizer, params, device, context_len=2048
|
||||
):
|
||||
client: DeepseekLLMClient = cast(DeepseekLLMClient, model.proxy_llm_client)
|
||||
context = ModelRequestContext(stream=True, user_name=params.get("user_name"))
|
||||
request = ModelRequest.build_request(
|
||||
client.default_model,
|
||||
messages=params["messages"],
|
||||
temperature=params.get("temperature"),
|
||||
context=context,
|
||||
max_new_tokens=params.get("max_new_tokens"),
|
||||
)
|
||||
async for r in client.generate_stream(request):
|
||||
yield r
|
||||
|
||||
|
||||
class DeepseekLLMClient(OpenAILLMClient):
|
||||
"""Deepseek LLM Client.
|
||||
|
||||
Deepseek's API is compatible with OpenAI's API, so we inherit from OpenAILLMClient.
|
||||
|
||||
API Reference: https://platform.deepseek.com/api-docs/
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
api_key: Optional[str] = None,
|
||||
api_base: Optional[str] = None,
|
||||
api_type: Optional[str] = None,
|
||||
api_version: Optional[str] = None,
|
||||
model: Optional[str] = _DEFAULT_MODEL,
|
||||
proxies: Optional["ProxiesTypes"] = None,
|
||||
timeout: Optional[int] = 240,
|
||||
model_alias: Optional[str] = "deepseek_proxyllm",
|
||||
context_length: Optional[int] = None,
|
||||
openai_client: Optional["ClientType"] = None,
|
||||
openai_kwargs: Optional[Dict[str, Any]] = None,
|
||||
**kwargs,
|
||||
):
|
||||
api_base = (
|
||||
api_base or os.getenv("DEEPSEEK_API_BASE") or "https://api.deepseek.com/v1"
|
||||
)
|
||||
api_key = api_key or os.getenv("DEEPSEEK_API_KEY")
|
||||
model = model or _DEFAULT_MODEL
|
||||
if not context_length:
|
||||
if "deepseek-chat" in model:
|
||||
context_length = 1024 * 32
|
||||
elif "deepseek-coder" in model:
|
||||
context_length = 1024 * 16
|
||||
else:
|
||||
# 8k
|
||||
context_length = 1024 * 8
|
||||
|
||||
if not api_key:
|
||||
raise ValueError(
|
||||
"Deepseek API key is required, please set 'DEEPSEEK_API_KEY' in "
|
||||
"environment variable or pass it to the client."
|
||||
)
|
||||
super().__init__(
|
||||
api_key=api_key,
|
||||
api_base=api_base,
|
||||
api_type=api_type,
|
||||
api_version=api_version,
|
||||
model=model,
|
||||
proxies=proxies,
|
||||
timeout=timeout,
|
||||
model_alias=model_alias,
|
||||
context_length=context_length,
|
||||
openai_client=openai_client,
|
||||
openai_kwargs=openai_kwargs,
|
||||
**kwargs,
|
||||
)
|
||||
|
||||
def check_sdk_version(self, version: str) -> None:
|
||||
if not version >= "1.0":
|
||||
raise ValueError(
|
||||
"Deepseek API requires openai>=1.0, please upgrade it by "
|
||||
"`pip install --upgrade 'openai>=1.0'`"
|
||||
)
|
||||
|
||||
@property
|
||||
def default_model(self) -> str:
|
||||
model = self._model
|
||||
if not model:
|
||||
model = _DEFAULT_MODEL
|
||||
return model
|
Reference in New Issue
Block a user