feat(model): Add support for Azure openAI (#1137)

Co-authored-by: like <406979945@qq.com>
Co-authored-by: csunny <cfqsunny@163.com>
This commit is contained in:
Likename Haojie 2024-02-02 16:41:29 +08:00 committed by GitHub
parent 208d91dea0
commit 3f70da4e1f
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
3 changed files with 179 additions and 7 deletions

View File

@ -37,6 +37,7 @@ class OpenAIParameters:
api_base: Optional[str] = None
api_key: Optional[str] = None
api_version: Optional[str] = None
api_azure_deployment: Optional[str] = None
full_url: Optional[str] = None
proxies: Optional["ProxiesTypes"] = None
@ -71,6 +72,9 @@ def _initialize_openai_v1(init_params: OpenAIParameters):
)
api_version = api_version or os.getenv("OPENAI_API_VERSION")
api_azure_deployment = init_params.api_azure_deployment or os.getenv(
"API_AZURE_DEPLOYMENT"
)
if not base_url and full_url:
base_url = full_url.split("/chat/completions")[0]
@ -81,11 +85,8 @@ def _initialize_openai_v1(init_params: OpenAIParameters):
if base_url.endswith("/"):
base_url = base_url[:-1]
openai_params = {
"api_key": api_key,
"base_url": base_url,
}
return openai_params, api_type, api_version
openai_params = {"api_key": api_key, "base_url": base_url}
return openai_params, api_type, api_version, api_azure_deployment
def _initialize_openai(params: OpenAIParameters):
@ -127,13 +128,16 @@ def _initialize_openai(params: OpenAIParameters):
def _build_openai_client(init_params: OpenAIParameters) -> Tuple[str, ClientType]:
import httpx
openai_params, api_type, api_version = _initialize_openai_v1(init_params)
openai_params, api_type, api_version, api_azure_deployment = _initialize_openai_v1(
init_params
)
if api_type == "azure":
from openai import AsyncAzureOpenAI
return api_type, AsyncAzureOpenAI(
api_key=openai_params["api_key"],
api_version=api_version,
azure_deployment=api_azure_deployment,
azure_endpoint=openai_params["base_url"],
http_client=httpx.AsyncClient(proxies=init_params.proxies),
)

View File

@ -0,0 +1,164 @@
# ProxyLLMs
DB-GPT can be deployed on servers with lower hardware through proxy LLMs, and now dbgpt support many proxy llms, such as OpenAI、Azure、Wenxin、Tongyi、Zhipu and so on.
### Proxy model
import Tabs from '@theme/Tabs';
import TabItem from '@theme/TabItem';
<Tabs
defaultValue="openai"
values={[
{label: 'Open AI', value: 'openai'},
{label: 'Azure', value: 'Azure'},
{label: 'Qwen', value: 'qwen'},
{label: 'ChatGLM', value: 'chatglm'},
{label: 'WenXin', value: 'erniebot'},
]}>
<TabItem value="openai" label="open ai">
Install dependencies
```python
pip install -e ".[openai]"
```
Download embedding model
```python
cd DB-GPT
mkdir models and cd models
git clone https://huggingface.co/GanymedeNil/text2vec-large-chinese
```
Configure the proxy and modify LLM_MODEL, PROXY_API_URL and API_KEY in the `.env`file
```python
# .env
LLM_MODEL=chatgpt_proxyllm
PROXY_API_KEY={your-openai-sk}
PROXY_SERVER_URL=https://api.openai.com/v1/chat/completions
# If you use gpt-4
# PROXYLLM_BACKEND=gpt-4
```
</TabItem>
<TabItem value="Azure" label="Azure">
Install dependencies
```python
pip install -e ".[openai]"
```
Download embedding model
```python
cd DB-GPT
mkdir models and cd models
git clone https://huggingface.co/GanymedeNil/text2vec-large-chinese # change this to other embedding model if needed.
```
Configure the proxy and modify LLM_MODEL, PROXY_API_URL and API_KEY in the `.env`file
```python
# .env
LLM_MODEL=proxyllm
PROXY_API_KEY=xxxx
PROXY_API_BASE=https://xxxxxx.openai.azure.com/
PROXY_API_TYPE=azure
PROXY_SERVER_URL=xxxx
PROXY_API_VERSION=2023-05-15
PROXYLLM_BACKEND=gpt-35-turbo
API_AZURE_DEPLOYMENT=xxxx[deployment_name]
```
</TabItem>
<TabItem value="qwen" label="通义千问">
Install dependencies
```python
pip install dashscope
```
Download embedding model
```python
cd DB-GPT
mkdir models and cd models
# embedding model
git clone https://huggingface.co/GanymedeNil/text2vec-large-chinese
or
git clone https://huggingface.co/moka-ai/m3e-large
```
Configure the proxy and modify LLM_MODEL, PROXY_API_URL and API_KEY in the `.env`file
```python
# .env
# Aliyun tongyiqianwen
LLM_MODEL=tongyi_proxyllm
TONGYI_PROXY_API_KEY={your-tongyi-sk}
PROXY_SERVER_URL={your_service_url}
```
</TabItem>
<TabItem value="chatglm" label="chatglm" >
Install dependencies
```python
pip install zhipuai
```
Download embedding model
```python
cd DB-GPT
mkdir models and cd models
# embedding model
git clone https://huggingface.co/GanymedeNil/text2vec-large-chinese
or
git clone https://huggingface.co/moka-ai/m3e-large
```
Configure the proxy and modify LLM_MODEL, PROXY_API_URL and API_KEY in the `.env`file
```python
# .env
LLM_MODEL=zhipu_proxyllm
PROXY_SERVER_URL={your_service_url}
ZHIPU_MODEL_VERSION={version}
ZHIPU_PROXY_API_KEY={your-zhipu-sk}
```
</TabItem>
<TabItem value="erniebot" label="文心一言" default>
Download embedding model
```python
cd DB-GPT
mkdir models and cd models
# embedding model
git clone https://huggingface.co/GanymedeNil/text2vec-large-chinese
or
git clone https://huggingface.co/moka-ai/m3e-large
```
Configure the proxy and modify LLM_MODEL, MODEL_VERSION, API_KEY and API_SECRET in the `.env`file
```python
# .env
LLM_MODEL=wenxin_proxyllm
WEN_XIN_MODEL_VERSION={version} # ERNIE-Bot or ERNIE-Bot-turbo
WEN_XIN_API_KEY={your-wenxin-sk}
WEN_XIN_API_SECRET={your-wenxin-sct}
```
</TabItem>
</Tabs>
:::info note
⚠️ Be careful not to overwrite the contents of the `.env` configuration file
:::

View File

@ -162,8 +162,12 @@ const sidebars = {
},
{
type: 'category',
label: 'Adanced Usage',
label: 'Advanced Usage',
items: [
{
type: 'doc',
id: 'installation/advanced_usage/More_proxyllms',
},
{
type: 'doc',
id: 'installation/advanced_usage/vLLM_inference',