chore: Add pylint for DB-GPT core lib (#1076)

This commit is contained in:
Fangyin Cheng
2024-01-16 17:36:26 +08:00
committed by GitHub
parent 3a54d1ef9a
commit 40c853575a
79 changed files with 2213 additions and 839 deletions

View File

@@ -86,6 +86,11 @@ class OpenAILLMClient(ProxyLLMClient):
self._openai_kwargs = openai_kwargs or {}
super().__init__(model_names=[model_alias], context_length=context_length)
if self._openai_less_then_v1:
from dbgpt.model.utils.chatgpt_utils import _initialize_openai
_initialize_openai(self._init_params)
@classmethod
def new_client(
cls,

View File

@@ -114,7 +114,6 @@ class GeminiLLMClient(ProxyLLMClient):
self._api_key = api_key if api_key else os.getenv("GEMINI_PROXY_API_KEY")
self._api_base = api_base if api_base else os.getenv("GEMINI_PROXY_API_BASE")
self._model = model
self.default_model = self._model
if not self._api_key:
raise RuntimeError("api_key can't be empty")
@@ -148,6 +147,10 @@ class GeminiLLMClient(ProxyLLMClient):
executor=default_executor,
)
@property
def default_model(self) -> str:
return self._model
def sync_generate_stream(
self,
request: ModelRequest,

View File

@@ -8,9 +8,6 @@ from datetime import datetime
from time import mktime
from typing import Iterator, Optional
from urllib.parse import urlencode, urlparse
from wsgiref.handlers import format_date_time
from websockets.sync.client import connect
from dbgpt.core import MessageConverter, ModelOutput, ModelRequest, ModelRequestContext
from dbgpt.model.parameter import ProxyModelParameters
@@ -56,6 +53,8 @@ def spark_generate_stream(
def get_response(request_url, data):
from websockets.sync.client import connect
with connect(request_url) as ws:
ws.send(json.dumps(data, ensure_ascii=False))
result = ""
@@ -87,6 +86,8 @@ class SparkAPI:
self.spark_url = spark_url
def gen_url(self):
from wsgiref.handlers import format_date_time
# 生成RFC1123格式的时间戳
now = datetime.now()
date = format_date_time(mktime(now.timetuple()))
@@ -145,7 +146,6 @@ class SparkLLMClient(ProxyLLMClient):
if not api_domain:
api_domain = domain
self._model = model
self.default_model = self._model
self._model_version = model_version
self._api_base = api_base
self._domain = api_domain
@@ -183,6 +183,10 @@ class SparkLLMClient(ProxyLLMClient):
executor=default_executor,
)
@property
def default_model(self) -> str:
return self._model
def sync_generate_stream(
self,
request: ModelRequest,

View File

@@ -51,7 +51,6 @@ class TongyiLLMClient(ProxyLLMClient):
if api_region:
dashscope.api_region = api_region
self._model = model
self.default_model = self._model
super().__init__(
model_names=[model, model_alias],
@@ -73,6 +72,10 @@ class TongyiLLMClient(ProxyLLMClient):
executor=default_executor,
)
@property
def default_model(self) -> str:
return self._model
def sync_generate_stream(
self,
request: ModelRequest,

View File

@@ -121,7 +121,6 @@ class WenxinLLMClient(ProxyLLMClient):
self._api_key = api_key
self._api_secret = api_secret
self._model_version = model_version
self.default_model = self._model
super().__init__(
model_names=[model, model_alias],
@@ -145,6 +144,10 @@ class WenxinLLMClient(ProxyLLMClient):
executor=default_executor,
)
@property
def default_model(self) -> str:
return self._model
def sync_generate_stream(
self,
request: ModelRequest,

View File

@@ -54,7 +54,6 @@ class ZhipuLLMClient(ProxyLLMClient):
if api_key:
zhipuai.api_key = api_key
self._model = model
self.default_model = self._model
super().__init__(
model_names=[model, model_alias],
@@ -76,6 +75,10 @@ class ZhipuLLMClient(ProxyLLMClient):
executor=default_executor,
)
@property
def default_model(self) -> str:
return self._model
def sync_generate_stream(
self,
request: ModelRequest,