mirror of
https://github.com/csunny/DB-GPT.git
synced 2025-08-07 11:23:40 +00:00
WEB API independent
This commit is contained in:
parent
d6b4da7559
commit
33c7374168
@ -11,6 +11,7 @@ def generate_stream(
|
||||
"""Fork from fastchat: https://github.com/lm-sys/FastChat/blob/main/fastchat/serve/inference.py"""
|
||||
prompt = params["prompt"]
|
||||
l_prompt = len(prompt)
|
||||
prompt= prompt.replace("ai:", "assistant:").replace("human:", "user:")
|
||||
temperature = float(params.get("temperature", 1.0))
|
||||
max_new_tokens = int(params.get("max_new_tokens", 2048))
|
||||
stop_str = params.get("stop", None)
|
||||
|
@ -115,7 +115,7 @@ class BaseChat(ABC):
|
||||
|
||||
payload = {
|
||||
"model": self.llm_model,
|
||||
"prompt": self.generate_llm_text().replace("ai:", "assistant:"),
|
||||
"prompt": self.generate_llm_text(),
|
||||
"temperature": float(self.prompt_template.temperature),
|
||||
"max_new_tokens": int(self.prompt_template.max_new_tokens),
|
||||
"stop": self.prompt_template.sep,
|
||||
|
@ -42,8 +42,9 @@ class DbChatOutputParser(BaseOutputParser):
|
||||
html_table = df.to_html(index=False, escape=False)
|
||||
html = f"<html><head>{table_style}</head><body>{html_table}</body></html>"
|
||||
else:
|
||||
html = df.to_html(index=False, escape=False, sparsify=False)
|
||||
html = "".join(html.split())
|
||||
html_table = df.to_html(index=False, escape=False, sparsify=False)
|
||||
table_str = "".join(html_table.split())
|
||||
html = f"""<div class="w-full overflow-auto">{table_str}</table></div>"""
|
||||
|
||||
view_text = f"##### {str(speak)}" + "\n" + html.replace("\n", " ")
|
||||
return view_text
|
||||
|
@ -11,7 +11,7 @@ CFG = Config()
|
||||
PROMPT_SCENE_DEFINE = None
|
||||
|
||||
_DEFAULT_TEMPLATE = """
|
||||
You are a SQL expert. Given an input question, create a syntactically correct {dialect} query.
|
||||
You are a SQL expert. Given an input question, create a syntactically correct {dialect} sql.
|
||||
|
||||
Unless the user specifies in his question a specific number of examples he wishes to obtain, always limit your query to at most {top_k} results.
|
||||
Use as few tables as possible when querying.
|
||||
@ -51,7 +51,6 @@ prompt = PromptTemplate(
|
||||
sep=PROMPT_SEP, is_stream_out=PROMPT_NEED_NEED_STREAM_OUT
|
||||
),
|
||||
example_selector=sql_data_example,
|
||||
# example_selector=None,
|
||||
temperature=PROMPT_TEMPERATURE
|
||||
)
|
||||
CFG.prompt_templates.update({prompt.template_scene: prompt})
|
||||
|
Loading…
Reference in New Issue
Block a user