mirror of
https://github.com/csunny/DB-GPT.git
synced 2025-07-30 23:28:35 +00:00
rely file local
This commit is contained in:
parent
4ce257e84f
commit
239895f7bc
@ -1,56 +1,3 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding:utf-8 -*-
|
||||
|
||||
import requests
|
||||
import json
|
||||
import time
|
||||
from urllib.parse import urljoin
|
||||
import gradio as gr
|
||||
from pilot.configs.model_config import *
|
||||
vicuna_base_uri = "http://192.168.31.114:21002/"
|
||||
vicuna_stream_path = "worker_generate_stream"
|
||||
vicuna_status_path = "worker_get_status"
|
||||
|
||||
def generate(prompt):
|
||||
params = {
|
||||
"model": "vicuna-13b",
|
||||
"prompt": prompt,
|
||||
"temperature": 0.7,
|
||||
"max_new_tokens": 512,
|
||||
"stop": "###"
|
||||
}
|
||||
|
||||
sts_response = requests.post(
|
||||
url=urljoin(vicuna_base_uri, vicuna_status_path)
|
||||
)
|
||||
print(sts_response.text)
|
||||
|
||||
response = requests.post(
|
||||
url=urljoin(vicuna_base_uri, vicuna_stream_path), data=json.dumps(params)
|
||||
)
|
||||
|
||||
skip_echo_len = len(params["prompt"]) + 1 - params["prompt"].count("</s>") * 3
|
||||
for chunk in response.iter_lines(decode_unicode=False, delimiter=b"\0"):
|
||||
if chunk:
|
||||
data = json.loads(chunk.decode())
|
||||
if data["error_code"] == 0:
|
||||
output = data["text"]
|
||||
yield(output)
|
||||
|
||||
time.sleep(0.02)
|
||||
|
||||
if __name__ == "__main__":
|
||||
print(LLM_MODEL)
|
||||
with gr.Blocks() as demo:
|
||||
gr.Markdown("数据库SQL生成助手")
|
||||
with gr.Tab("SQL生成"):
|
||||
text_input = gr.TextArea()
|
||||
text_output = gr.TextArea()
|
||||
text_button = gr.Button("提交")
|
||||
|
||||
|
||||
text_button.click(generate, inputs=text_input, outputs=text_output)
|
||||
|
||||
demo.queue(concurrency_count=3).launch(server_name="0.0.0.0")
|
||||
|
||||
|
@ -1,3 +1,56 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding:utf-8 -*-
|
||||
|
||||
import requests
|
||||
import json
|
||||
import time
|
||||
from urllib.parse import urljoin
|
||||
import gradio as gr
|
||||
from pilot.configs.model_config import *
|
||||
vicuna_base_uri = "http://192.168.31.114:21002/"
|
||||
vicuna_stream_path = "worker_generate_stream"
|
||||
vicuna_status_path = "worker_get_status"
|
||||
|
||||
def generate(prompt):
|
||||
params = {
|
||||
"model": "vicuna-13b",
|
||||
"prompt": prompt,
|
||||
"temperature": 0.7,
|
||||
"max_new_tokens": 512,
|
||||
"stop": "###"
|
||||
}
|
||||
|
||||
sts_response = requests.post(
|
||||
url=urljoin(vicuna_base_uri, vicuna_status_path)
|
||||
)
|
||||
print(sts_response.text)
|
||||
|
||||
response = requests.post(
|
||||
url=urljoin(vicuna_base_uri, vicuna_stream_path), data=json.dumps(params)
|
||||
)
|
||||
|
||||
skip_echo_len = len(params["prompt"]) + 1 - params["prompt"].count("</s>") * 3
|
||||
for chunk in response.iter_lines(decode_unicode=False, delimiter=b"\0"):
|
||||
if chunk:
|
||||
data = json.loads(chunk.decode())
|
||||
if data["error_code"] == 0:
|
||||
output = data["text"]
|
||||
yield(output)
|
||||
|
||||
time.sleep(0.02)
|
||||
|
||||
if __name__ == "__main__":
|
||||
print(LLM_MODEL)
|
||||
with gr.Blocks() as demo:
|
||||
gr.Markdown("数据库SQL生成助手")
|
||||
with gr.Tab("SQL生成"):
|
||||
text_input = gr.TextArea()
|
||||
text_output = gr.TextArea()
|
||||
text_button = gr.Button("提交")
|
||||
|
||||
|
||||
text_button.click(generate, inputs=text_input, outputs=text_output)
|
||||
|
||||
demo.queue(concurrency_count=3).launch(server_name="0.0.0.0")
|
||||
|
||||
|
76
pilot/server/gradio_css.py
Normal file
76
pilot/server/gradio_css.py
Normal file
@ -0,0 +1,76 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding:utf-8 -*-
|
||||
|
||||
code_highlight_css = (
|
||||
"""
|
||||
#chatbot .hll { background-color: #ffffcc }
|
||||
#chatbot .c { color: #408080; font-style: italic }
|
||||
#chatbot .err { border: 1px solid #FF0000 }
|
||||
#chatbot .k { color: #008000; font-weight: bold }
|
||||
#chatbot .o { color: #666666 }
|
||||
#chatbot .ch { color: #408080; font-style: italic }
|
||||
#chatbot .cm { color: #408080; font-style: italic }
|
||||
#chatbot .cp { color: #BC7A00 }
|
||||
#chatbot .cpf { color: #408080; font-style: italic }
|
||||
#chatbot .c1 { color: #408080; font-style: italic }
|
||||
#chatbot .cs { color: #408080; font-style: italic }
|
||||
#chatbot .gd { color: #A00000 }
|
||||
#chatbot .ge { font-style: italic }
|
||||
#chatbot .gr { color: #FF0000 }
|
||||
#chatbot .gh { color: #000080; font-weight: bold }
|
||||
#chatbot .gi { color: #00A000 }
|
||||
#chatbot .go { color: #888888 }
|
||||
#chatbot .gp { color: #000080; font-weight: bold }
|
||||
#chatbot .gs { font-weight: bold }
|
||||
#chatbot .gu { color: #800080; font-weight: bold }
|
||||
#chatbot .gt { color: #0044DD }
|
||||
#chatbot .kc { color: #008000; font-weight: bold }
|
||||
#chatbot .kd { color: #008000; font-weight: bold }
|
||||
#chatbot .kn { color: #008000; font-weight: bold }
|
||||
#chatbot .kp { color: #008000 }
|
||||
#chatbot .kr { color: #008000; font-weight: bold }
|
||||
#chatbot .kt { color: #B00040 }
|
||||
#chatbot .m { color: #666666 }
|
||||
#chatbot .s { color: #BA2121 }
|
||||
#chatbot .na { color: #7D9029 }
|
||||
#chatbot .nb { color: #008000 }
|
||||
#chatbot .nc { color: #0000FF; font-weight: bold }
|
||||
#chatbot .no { color: #880000 }
|
||||
#chatbot .nd { color: #AA22FF }
|
||||
#chatbot .ni { color: #999999; font-weight: bold }
|
||||
#chatbot .ne { color: #D2413A; font-weight: bold }
|
||||
#chatbot .nf { color: #0000FF }
|
||||
#chatbot .nl { color: #A0A000 }
|
||||
#chatbot .nn { color: #0000FF; font-weight: bold }
|
||||
#chatbot .nt { color: #008000; font-weight: bold }
|
||||
#chatbot .nv { color: #19177C }
|
||||
#chatbot .ow { color: #AA22FF; font-weight: bold }
|
||||
#chatbot .w { color: #bbbbbb }
|
||||
#chatbot .mb { color: #666666 }
|
||||
#chatbot .mf { color: #666666 }
|
||||
#chatbot .mh { color: #666666 }
|
||||
#chatbot .mi { color: #666666 }
|
||||
#chatbot .mo { color: #666666 }
|
||||
#chatbot .sa { color: #BA2121 }
|
||||
#chatbot .sb { color: #BA2121 }
|
||||
#chatbot .sc { color: #BA2121 }
|
||||
#chatbot .dl { color: #BA2121 }
|
||||
#chatbot .sd { color: #BA2121; font-style: italic }
|
||||
#chatbot .s2 { color: #BA2121 }
|
||||
#chatbot .se { color: #BB6622; font-weight: bold }
|
||||
#chatbot .sh { color: #BA2121 }
|
||||
#chatbot .si { color: #BB6688; font-weight: bold }
|
||||
#chatbot .sx { color: #008000 }
|
||||
#chatbot .sr { color: #BB6688 }
|
||||
#chatbot .s1 { color: #BA2121 }
|
||||
#chatbot .ss { color: #19177C }
|
||||
#chatbot .bp { color: #008000 }
|
||||
#chatbot .fm { color: #0000FF }
|
||||
#chatbot .vc { color: #19177C }
|
||||
#chatbot .vg { color: #19177C }
|
||||
#chatbot .vi { color: #19177C }
|
||||
#chatbot .vm { color: #19177C }
|
||||
#chatbot .il { color: #666666 }
|
||||
""")
|
||||
#.highlight { background: #f8f8f8; }
|
||||
|
167
pilot/server/gradio_patch.py
Normal file
167
pilot/server/gradio_patch.py
Normal file
@ -0,0 +1,167 @@
|
||||
"""
|
||||
Fork from https://github.com/lm-sys/FastChat/blob/main/fastchat/serve/gradio_patch.py
|
||||
"""
|
||||
from __future__ import annotations
|
||||
|
||||
from gradio.components import *
|
||||
from markdown2 import Markdown
|
||||
|
||||
|
||||
class _Keywords(Enum):
|
||||
NO_VALUE = "NO_VALUE" # Used as a sentinel to determine if nothing is provided as a argument for `value` in `Component.update()`
|
||||
FINISHED_ITERATING = "FINISHED_ITERATING" # Used to skip processing of a component's value (needed for generators + state)
|
||||
|
||||
|
||||
@document("style")
|
||||
class Chatbot(Changeable, Selectable, IOComponent, JSONSerializable):
|
||||
"""
|
||||
Displays a chatbot output showing both user submitted messages and responses. Supports a subset of Markdown including bold, italics, code, and images.
|
||||
Preprocessing: this component does *not* accept input.
|
||||
Postprocessing: expects function to return a {List[Tuple[str | None | Tuple, str | None | Tuple]]}, a list of tuples with user message and response messages. Messages should be strings, tuples, or Nones. If the message is a string, it can include Markdown. If it is a tuple, it should consist of (string filepath to image/video/audio, [optional string alt text]). Messages that are `None` are not displayed.
|
||||
|
||||
Demos: chatbot_simple, chatbot_multimodal
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
value: List[Tuple[str | None, str | None]] | Callable | None = None,
|
||||
color_map: Dict[str, str] | None = None, # Parameter moved to Chatbot.style()
|
||||
*,
|
||||
label: str | None = None,
|
||||
every: float | None = None,
|
||||
show_label: bool = True,
|
||||
visible: bool = True,
|
||||
elem_id: str | None = None,
|
||||
elem_classes: List[str] | str | None = None,
|
||||
**kwargs,
|
||||
):
|
||||
"""
|
||||
Parameters:
|
||||
value: Default value to show in chatbot. If callable, the function will be called whenever the app loads to set the initial value of the component.
|
||||
label: component name in interface.
|
||||
every: If `value` is a callable, run the function 'every' number of seconds while the client connection is open. Has no effect otherwise. Queue must be enabled. The event can be accessed (e.g. to cancel it) via this component's .load_event attribute.
|
||||
show_label: if True, will display label.
|
||||
visible: If False, component will be hidden.
|
||||
elem_id: An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles.
|
||||
elem_classes: An optional list of strings that are assigned as the classes of this component in the HTML DOM. Can be used for targeting CSS styles.
|
||||
"""
|
||||
if color_map is not None:
|
||||
warnings.warn(
|
||||
"The 'color_map' parameter has been deprecated.",
|
||||
)
|
||||
#self.md = utils.get_markdown_parser()
|
||||
self.md = Markdown(extras=["fenced-code-blocks", "tables", "break-on-newline"])
|
||||
self.select: EventListenerMethod
|
||||
"""
|
||||
Event listener for when the user selects message from Chatbot.
|
||||
Uses event data gradio.SelectData to carry `value` referring to text of selected message, and `index` tuple to refer to [message, participant] index.
|
||||
See EventData documentation on how to use this event data.
|
||||
"""
|
||||
|
||||
IOComponent.__init__(
|
||||
self,
|
||||
label=label,
|
||||
every=every,
|
||||
show_label=show_label,
|
||||
visible=visible,
|
||||
elem_id=elem_id,
|
||||
elem_classes=elem_classes,
|
||||
value=value,
|
||||
**kwargs,
|
||||
)
|
||||
|
||||
def get_config(self):
|
||||
return {
|
||||
"value": self.value,
|
||||
"selectable": self.selectable,
|
||||
**IOComponent.get_config(self),
|
||||
}
|
||||
|
||||
@staticmethod
|
||||
def update(
|
||||
value: Any | Literal[_Keywords.NO_VALUE] | None = _Keywords.NO_VALUE,
|
||||
label: str | None = None,
|
||||
show_label: bool | None = None,
|
||||
visible: bool | None = None,
|
||||
):
|
||||
updated_config = {
|
||||
"label": label,
|
||||
"show_label": show_label,
|
||||
"visible": visible,
|
||||
"value": value,
|
||||
"__type__": "update",
|
||||
}
|
||||
return updated_config
|
||||
|
||||
def _process_chat_messages(
|
||||
self, chat_message: str | Tuple | List | Dict | None
|
||||
) -> str | Dict | None:
|
||||
if chat_message is None:
|
||||
return None
|
||||
elif isinstance(chat_message, (tuple, list)):
|
||||
mime_type = processing_utils.get_mimetype(chat_message[0])
|
||||
return {
|
||||
"name": chat_message[0],
|
||||
"mime_type": mime_type,
|
||||
"alt_text": chat_message[1] if len(chat_message) > 1 else None,
|
||||
"data": None, # These last two fields are filled in by the frontend
|
||||
"is_file": True,
|
||||
}
|
||||
elif isinstance(
|
||||
chat_message, dict
|
||||
): # This happens for previously processed messages
|
||||
return chat_message
|
||||
elif isinstance(chat_message, str):
|
||||
#return self.md.render(chat_message)
|
||||
return str(self.md.convert(chat_message))
|
||||
else:
|
||||
raise ValueError(f"Invalid message for Chatbot component: {chat_message}")
|
||||
|
||||
def postprocess(
|
||||
self,
|
||||
y: List[
|
||||
Tuple[str | Tuple | List | Dict | None, str | Tuple | List | Dict | None]
|
||||
],
|
||||
) -> List[Tuple[str | Dict | None, str | Dict | None]]:
|
||||
"""
|
||||
Parameters:
|
||||
y: List of tuples representing the message and response pairs. Each message and response should be a string, which may be in Markdown format. It can also be a tuple whose first element is a string filepath or URL to an image/video/audio, and second (optional) element is the alt text, in which case the media file is displayed. It can also be None, in which case that message is not displayed.
|
||||
Returns:
|
||||
List of tuples representing the message and response. Each message and response will be a string of HTML, or a dictionary with media information.
|
||||
"""
|
||||
if y is None:
|
||||
return []
|
||||
processed_messages = []
|
||||
for message_pair in y:
|
||||
assert isinstance(
|
||||
message_pair, (tuple, list)
|
||||
), f"Expected a list of lists or list of tuples. Received: {message_pair}"
|
||||
assert (
|
||||
len(message_pair) == 2
|
||||
), f"Expected a list of lists of length 2 or list of tuples of length 2. Received: {message_pair}"
|
||||
processed_messages.append(
|
||||
(
|
||||
#self._process_chat_messages(message_pair[0]),
|
||||
'<pre style="font-family: var(--font)">' +
|
||||
message_pair[0] + "</pre>",
|
||||
self._process_chat_messages(message_pair[1]),
|
||||
)
|
||||
)
|
||||
return processed_messages
|
||||
|
||||
def style(self, height: int | None = None, **kwargs):
|
||||
"""
|
||||
This method can be used to change the appearance of the Chatbot component.
|
||||
"""
|
||||
if height is not None:
|
||||
self._style["height"] = height
|
||||
if kwargs.get("color_map") is not None:
|
||||
warnings.warn("The 'color_map' parameter has been deprecated.")
|
||||
|
||||
Component.style(
|
||||
self,
|
||||
**kwargs,
|
||||
)
|
||||
return self
|
||||
|
||||
|
@ -1,50 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
#-*- coding: utf-8 -*-
|
||||
|
||||
|
||||
import json
|
||||
import torch
|
||||
import gradio as gr
|
||||
from fastchat.serve.inference import generate_stream
|
||||
|
||||
from transformers import AutoTokenizer, AutoModelForCausalLM
|
||||
device = "cuda" if torch.cuda.is_available() else "cpu"
|
||||
BASE_MODE = "/home/magic/workspace/github/DB-GPT/models/vicuna-13b"
|
||||
|
||||
tokenizer = AutoTokenizer.from_pretrained(BASE_MODE, use_fast=False)
|
||||
model = AutoModelForCausalLM.from_pretrained(
|
||||
BASE_MODE,
|
||||
low_cpu_mem_usage=True,
|
||||
torch_dtype=torch.float16,
|
||||
device_map="auto",
|
||||
)
|
||||
|
||||
def generate(prompt):
|
||||
model.to(device)
|
||||
print(model, tokenizer)
|
||||
params = {
|
||||
"model": "vicuna-13b",
|
||||
"prompt": "这是一个用户与助手之间的对话, 助手精通数据库领域的知识, 并能够对数据库领域知识做出非常专业的回答。以下是用户的问题:" + prompt,
|
||||
"temperature": 0.7,
|
||||
"max_new_tokens": 512,
|
||||
"stop": "###"
|
||||
}
|
||||
output = generate_stream(
|
||||
model, tokenizer, params, device, context_len=2048, stream_interval=2)
|
||||
|
||||
for chunk in output:
|
||||
yield chunk
|
||||
|
||||
if __name__ == "__main__":
|
||||
with gr.Blocks() as demo:
|
||||
gr.Markdown("数据库SQL生成助手")
|
||||
with gr.Tab("SQL生成"):
|
||||
text_input = gr.TextArea()
|
||||
text_output = gr.TextArea()
|
||||
text_button = gr.Button("提交")
|
||||
|
||||
|
||||
text_button.click(generate, inputs=text_input, outputs=text_output)
|
||||
|
||||
demo.queue(concurrency_count=3).launch(server_name="0.0.0.0")
|
||||
|
@ -29,8 +29,8 @@ from fastchat.utils import (
|
||||
moderation_msg
|
||||
)
|
||||
|
||||
from fastchat.serve.gradio_patch import Chatbot as grChatbot
|
||||
from fastchat.serve.gradio_css import code_highlight_css
|
||||
from pilot.server.gradio_css import code_highlight_css
|
||||
from pilot.server.gradio_patch import Chatbot as grChatbot
|
||||
|
||||
logger = build_logger("webserver", LOGDIR + "webserver.log")
|
||||
headers = {"User-Agent": "dbgpt Client"}
|
||||
|
Loading…
Reference in New Issue
Block a user