mirror of
https://github.com/csunny/DB-GPT.git
synced 2025-09-15 22:19:28 +00:00
feat(core): APP use new SDK component (#1050)
This commit is contained in:
@@ -1,8 +1,8 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding:utf-8 -*-
|
||||
|
||||
from typing import List
|
||||
import re
|
||||
from typing import List
|
||||
|
||||
import torch
|
||||
|
||||
|
@@ -1,6 +1,7 @@
|
||||
import torch
|
||||
from threading import Thread
|
||||
from transformers import TextIteratorStreamer, StoppingCriteriaList, StoppingCriteria
|
||||
|
||||
import torch
|
||||
from transformers import StoppingCriteria, StoppingCriteriaList, TextIteratorStreamer
|
||||
|
||||
|
||||
def falcon_generate_output(model, tokenizer, params, device, context_len=2048):
|
||||
|
@@ -1,6 +1,7 @@
|
||||
import torch
|
||||
from threading import Thread
|
||||
from transformers import TextIteratorStreamer, StoppingCriteriaList, StoppingCriteria
|
||||
|
||||
import torch
|
||||
from transformers import StoppingCriteria, StoppingCriteriaList, TextIteratorStreamer
|
||||
|
||||
|
||||
def guanaco_generate_output(model, tokenizer, params, device, context_len=2048):
|
||||
|
@@ -1,6 +1,7 @@
|
||||
import logging
|
||||
import torch
|
||||
from threading import Thread
|
||||
|
||||
import torch
|
||||
from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
@@ -1,4 +1,5 @@
|
||||
from typing import Dict
|
||||
|
||||
import torch
|
||||
|
||||
|
||||
|
@@ -2,16 +2,16 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
import time
|
||||
|
||||
from dbgpt.model.proxy.llms.chatgpt import chatgpt_generate_stream
|
||||
from dbgpt.model.proxy.llms.bard import bard_generate_stream
|
||||
from dbgpt.model.proxy.llms.claude import claude_generate_stream
|
||||
from dbgpt.model.proxy.llms.wenxin import wenxin_generate_stream
|
||||
from dbgpt.model.proxy.llms.tongyi import tongyi_generate_stream
|
||||
from dbgpt.model.proxy.llms.zhipu import zhipu_generate_stream
|
||||
from dbgpt.model.proxy.llms.gemini import gemini_generate_stream
|
||||
from dbgpt.model.proxy.llms.baichuan import baichuan_generate_stream
|
||||
from dbgpt.model.proxy.llms.spark import spark_generate_stream
|
||||
from dbgpt.model.proxy.llms.bard import bard_generate_stream
|
||||
from dbgpt.model.proxy.llms.chatgpt import chatgpt_generate_stream
|
||||
from dbgpt.model.proxy.llms.claude import claude_generate_stream
|
||||
from dbgpt.model.proxy.llms.gemini import gemini_generate_stream
|
||||
from dbgpt.model.proxy.llms.proxy_model import ProxyModel
|
||||
from dbgpt.model.proxy.llms.spark import spark_generate_stream
|
||||
from dbgpt.model.proxy.llms.tongyi import tongyi_generate_stream
|
||||
from dbgpt.model.proxy.llms.wenxin import wenxin_generate_stream
|
||||
from dbgpt.model.proxy.llms.zhipu import zhipu_generate_stream
|
||||
|
||||
|
||||
def proxyllm_generate_stream(
|
||||
|
@@ -8,9 +8,9 @@ from urllib.parse import urljoin
|
||||
import requests
|
||||
from langchain.embeddings.base import Embeddings
|
||||
from langchain.llms.base import LLM
|
||||
from dbgpt._private.pydantic import BaseModel
|
||||
|
||||
from dbgpt._private.config import Config
|
||||
from dbgpt._private.pydantic import BaseModel
|
||||
|
||||
CFG = Config()
|
||||
|
||||
|
@@ -1,9 +1,9 @@
|
||||
from typing import Dict
|
||||
import os
|
||||
from vllm import AsyncLLMEngine
|
||||
from vllm.utils import random_uuid
|
||||
from vllm.sampling_params import SamplingParams
|
||||
from typing import Dict
|
||||
|
||||
from vllm import AsyncLLMEngine
|
||||
from vllm.sampling_params import SamplingParams
|
||||
from vllm.utils import random_uuid
|
||||
|
||||
_IS_BENCHMARK = os.getenv("DB_GPT_MODEL_BENCHMARK", "False").lower() == "true"
|
||||
|
||||
|
Reference in New Issue
Block a user