mirror of
https://github.com/csunny/DB-GPT.git
synced 2025-09-06 19:40:13 +00:00
feat(model): Support claude proxy models (#2155)
This commit is contained in:
@@ -1,8 +1,9 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
from typing import TYPE_CHECKING, List, Optional, Union
|
||||
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Union
|
||||
|
||||
from dbgpt.core import ModelRequest, ModelRequestContext
|
||||
from dbgpt.model.parameter import ProxyModelParameters
|
||||
from dbgpt.model.proxy.base import ProxyLLMClient
|
||||
from dbgpt.model.utils.token_utils import ProxyTokenizerWrapper
|
||||
@@ -41,3 +42,30 @@ class ProxyModel:
|
||||
int: token count, -1 if failed
|
||||
"""
|
||||
return self._tokenizer.count_token(messages, model_name)
|
||||
|
||||
|
||||
def parse_model_request(
|
||||
params: Dict[str, Any], default_model: str, stream: bool = True
|
||||
) -> ModelRequest:
|
||||
"""Parse model request from params.
|
||||
|
||||
Args:
|
||||
params (Dict[str, Any]): request params
|
||||
default_model (str): default model name
|
||||
stream (bool, optional): whether stream. Defaults to True.
|
||||
"""
|
||||
context = ModelRequestContext(
|
||||
stream=stream,
|
||||
user_name=params.get("user_name"),
|
||||
request_id=params.get("request_id"),
|
||||
)
|
||||
request = ModelRequest.build_request(
|
||||
default_model,
|
||||
messages=params["messages"],
|
||||
temperature=params.get("temperature"),
|
||||
context=context,
|
||||
max_new_tokens=params.get("max_new_tokens"),
|
||||
stop=params.get("stop"),
|
||||
top_p=params.get("top_p"),
|
||||
)
|
||||
return request
|
||||
|
Reference in New Issue
Block a user