mirror of
https://github.com/csunny/DB-GPT.git
synced 2025-08-10 12:42:34 +00:00
WEB API independent
This commit is contained in:
parent
e8c61c29e2
commit
314920b6e1
@ -11,6 +11,7 @@ def generate_stream(
|
|||||||
"""Fork from fastchat: https://github.com/lm-sys/FastChat/blob/main/fastchat/serve/inference.py"""
|
"""Fork from fastchat: https://github.com/lm-sys/FastChat/blob/main/fastchat/serve/inference.py"""
|
||||||
prompt = params["prompt"]
|
prompt = params["prompt"]
|
||||||
l_prompt = len(prompt)
|
l_prompt = len(prompt)
|
||||||
|
prompt= prompt.replace("ai:", "assistant:").replace("human:", "user:")
|
||||||
temperature = float(params.get("temperature", 1.0))
|
temperature = float(params.get("temperature", 1.0))
|
||||||
max_new_tokens = int(params.get("max_new_tokens", 2048))
|
max_new_tokens = int(params.get("max_new_tokens", 2048))
|
||||||
stop_str = params.get("stop", None)
|
stop_str = params.get("stop", None)
|
||||||
|
@ -115,7 +115,7 @@ class BaseChat(ABC):
|
|||||||
|
|
||||||
payload = {
|
payload = {
|
||||||
"model": self.llm_model,
|
"model": self.llm_model,
|
||||||
"prompt": self.generate_llm_text().replace("ai:", "assistant:"),
|
"prompt": self.generate_llm_text(),
|
||||||
"temperature": float(self.prompt_template.temperature),
|
"temperature": float(self.prompt_template.temperature),
|
||||||
"max_new_tokens": int(self.prompt_template.max_new_tokens),
|
"max_new_tokens": int(self.prompt_template.max_new_tokens),
|
||||||
"stop": self.prompt_template.sep,
|
"stop": self.prompt_template.sep,
|
||||||
|
@ -42,8 +42,9 @@ class DbChatOutputParser(BaseOutputParser):
|
|||||||
html_table = df.to_html(index=False, escape=False)
|
html_table = df.to_html(index=False, escape=False)
|
||||||
html = f"<html><head>{table_style}</head><body>{html_table}</body></html>"
|
html = f"<html><head>{table_style}</head><body>{html_table}</body></html>"
|
||||||
else:
|
else:
|
||||||
html = df.to_html(index=False, escape=False, sparsify=False)
|
html_table = df.to_html(index=False, escape=False, sparsify=False)
|
||||||
html = "".join(html.split())
|
table_str = "".join(html_table.split())
|
||||||
|
html = f"""<div class="w-full overflow-auto">{table_str}</table></div>"""
|
||||||
|
|
||||||
view_text = f"##### {str(speak)}" + "\n" + html.replace("\n", " ")
|
view_text = f"##### {str(speak)}" + "\n" + html.replace("\n", " ")
|
||||||
return view_text
|
return view_text
|
||||||
|
@ -11,7 +11,7 @@ CFG = Config()
|
|||||||
PROMPT_SCENE_DEFINE = None
|
PROMPT_SCENE_DEFINE = None
|
||||||
|
|
||||||
_DEFAULT_TEMPLATE = """
|
_DEFAULT_TEMPLATE = """
|
||||||
You are a SQL expert. Given an input question, create a syntactically correct {dialect} query.
|
You are a SQL expert. Given an input question, create a syntactically correct {dialect} sql.
|
||||||
|
|
||||||
Unless the user specifies in his question a specific number of examples he wishes to obtain, always limit your query to at most {top_k} results.
|
Unless the user specifies in his question a specific number of examples he wishes to obtain, always limit your query to at most {top_k} results.
|
||||||
Use as few tables as possible when querying.
|
Use as few tables as possible when querying.
|
||||||
@ -51,7 +51,6 @@ prompt = PromptTemplate(
|
|||||||
sep=PROMPT_SEP, is_stream_out=PROMPT_NEED_NEED_STREAM_OUT
|
sep=PROMPT_SEP, is_stream_out=PROMPT_NEED_NEED_STREAM_OUT
|
||||||
),
|
),
|
||||||
example_selector=sql_data_example,
|
example_selector=sql_data_example,
|
||||||
# example_selector=None,
|
|
||||||
temperature=PROMPT_TEMPERATURE
|
temperature=PROMPT_TEMPERATURE
|
||||||
)
|
)
|
||||||
CFG.prompt_templates.update({prompt.template_scene: prompt})
|
CFG.prompt_templates.update({prompt.template_scene: prompt})
|
||||||
|
Loading…
Reference in New Issue
Block a user