fix: conflicts

This commit is contained in:
csunny
2023-06-13 21:41:18 +08:00
13 changed files with 119 additions and 77 deletions

View File

@@ -10,5 +10,6 @@ if "pytest" in sys.argv or "pytest" in sys.modules or os.getenv("CI"):
# Load the users .env file into environment variables
load_dotenv(verbose=True, override=True)
load_dotenv(".plugin_env")
del load_dotenv

View File

@@ -17,14 +17,10 @@ nltk.data.path = [os.path.join(PILOT_PATH, "nltk_data")] + nltk.data.path
PLUGINS_DIR = os.path.join(ROOT_PATH, "plugins")
FONT_DIR = os.path.join(PILOT_PATH, "fonts")
# 获取当前工作目录
current_directory = os.getcwd()
print("当前工作目录:", current_directory)
# 设置当前工作目录
new_directory = PILOT_PATH
os.chdir(new_directory)
print("新的工作目录:", os.getcwd())
DEVICE = (
"cuda"

View File

@@ -44,7 +44,7 @@ lang_dicts = {
"learn_more_markdown": "The service is a research preview intended for non-commercial use only. subject to the model [License](https://github.com/facebookresearch/llama/blob/main/MODEL_CARD.md) of Vicuna-13B",
"model_control_param": "Model Parameters",
"sql_generate_mode_direct": "Execute directly",
"sql_generate_mode_none": "chat to db",
"sql_generate_mode_none": "db chat",
"max_input_token_size": "Maximum output token size",
"please_choose_database": "Please choose database",
"sql_generate_diagnostics": "SQL Generation & Diagnostics",

View File

@@ -51,7 +51,7 @@ def proxyllm_generate_stream(model, tokenizer, params, device, context_len=2048)
}
)
# 把最后一个用户的信息移动到末尾
# Move the last user's information to the end
temp_his = history[::-1]
last_user_input = None
for m in temp_his:
@@ -66,7 +66,7 @@ def proxyllm_generate_stream(model, tokenizer, params, device, context_len=2048)
"messages": history,
"temperature": params.get("temperature"),
"max_tokens": params.get("max_new_tokens"),
"stream": True
"stream": True,
}
res = requests.post(
@@ -78,30 +78,9 @@ def proxyllm_generate_stream(model, tokenizer, params, device, context_len=2048)
if line:
json_data = line.split(b': ', 1)[1]
decoded_line = json_data.decode("utf-8")
if decoded_line.lower() != '[DONE]'.lower():
if decoded_line.lower() != "[DONE]".lower():
obj = json.loads(json_data)
if obj['choices'][0]['delta'].get('content') is not None:
content = obj['choices'][0]['delta']['content']
if obj["choices"][0]["delta"].get("content") is not None:
content = obj["choices"][0]["delta"]["content"]
text += content
yield text
# native result.
# payloads = {
# "model": "gpt-3.5-turbo", # just for test, remove this later
# "messages": history,
# "temperature": params.get("temperature"),
# "max_tokens": params.get("max_new_tokens"),
# }
#
# res = requests.post(
# CFG.proxy_server_url, headers=headers, json=payloads, stream=True
# )
#
# text = ""
# line = res.content
# if line:
# decoded_line = line.decode("utf-8")
# json_line = json.loads(decoded_line)
# print(json_line)
# text += json_line["choices"][0]["message"]["content"]
# yield text

View File

@@ -1,3 +1,5 @@
from chromadb.errors import NoIndexException
from pilot.scene.base_chat import BaseChat, logger, headers
from pilot.scene.base import ChatScene
from pilot.common.sql_database import Database
@@ -46,12 +48,15 @@ class ChatDefaultKnowledge(BaseChat):
)
def generate_input_values(self):
docs = self.knowledge_embedding_client.similar_search(
self.current_user_input, CFG.KNOWLEDGE_SEARCH_TOP_SIZE
)
context = [d.page_content for d in docs]
context = context[:2000]
input_values = {"context": context, "question": self.current_user_input}
try:
docs = self.knowledge_embedding_client.similar_search(
self.current_user_input, CFG.KNOWLEDGE_SEARCH_TOP_SIZE
)
context = [d.page_content for d in docs]
context = context[:2000]
input_values = {"context": context, "question": self.current_user_input}
except NoIndexException:
raise ValueError("you have no default knowledge store, please execute python knowledge_init.py")
return input_values
def do_with_prompt_response(self, prompt_response):