[hotfix] fix typo s/keywrods/keywords etc. (#5429)

This commit is contained in:
digger yu 2024-03-12 11:25:16 +08:00 committed by GitHub
parent da885ed540
commit 385e85afd4
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
17 changed files with 25 additions and 25 deletions

View File

@ -117,8 +117,8 @@ class CustomBaseRetrievalQA(BaseRetrievalQA):
) = copy.deepcopy(buffered_history_backup), copy.deepcopy(summarized_history_temp_backup) ) = copy.deepcopy(buffered_history_backup), copy.deepcopy(summarized_history_temp_backup)
# if rejection_trigger_keywords is not given, return the response from LLM directly # if rejection_trigger_keywords is not given, return the response from LLM directly
rejection_trigger_keywrods = inputs.get('rejection_trigger_keywrods', []) rejection_trigger_keywords = inputs.get('rejection_trigger_keywords', [])
answer = answer if all([rej not in answer for rej in rejection_trigger_keywrods]) else None answer = answer if all([rej not in answer for rej in rejection_trigger_keywords]) else None
if answer is None: if answer is None:
answer = inputs.get('rejection_answer', "抱歉,根据提供的信息无法回答该问题。") answer = inputs.get('rejection_answer', "抱歉,根据提供的信息无法回答该问题。")
if self.combine_documents_chain.memory is not None: if self.combine_documents_chain.memory is not None:
@ -161,8 +161,8 @@ class CustomBaseRetrievalQA(BaseRetrievalQA):
input_documents=docs, question=question, callbacks=_run_manager.get_child(), **kwargs input_documents=docs, question=question, callbacks=_run_manager.get_child(), **kwargs
) )
# if rejection_trigger_keywords is not given, return the response from LLM directly # if rejection_trigger_keywords is not given, return the response from LLM directly
rejection_trigger_keywrods = inputs.get('rejection_trigger_keywrods', []) rejection_trigger_keywords = inputs.get('rejection_trigger_keywords', [])
answer = answer if all([rej not in answer for rej in rejection_trigger_keywrods]) or len(rejection_trigger_keywrods)==0 else None answer = answer if all([rej not in answer for rej in rejection_trigger_keywords]) or len(rejection_trigger_keywords)==0 else None
if answer is None: if answer is None:
answer = inputs.get('rejection_answer', "抱歉,根据提供的信息无法回答该问题。") answer = inputs.get('rejection_answer', "抱歉,根据提供的信息无法回答该问题。")
self.combine_documents_chain.memory.save_context({"question": question}, {"output": answer}) self.combine_documents_chain.memory.save_context({"question": question}, {"output": answer})

View File

@ -75,7 +75,7 @@ Assistant: 我认识一个叫张三的人
# Below are English retrieval qa prompts # Below are English retrieval qa prompts
_EN_RETRIEVAL_QA_PROMPT = """[INST] <<SYS>>Always answer as helpfully as possible, while being safe. Your answers should not include any harmful, unethical, racist, sexist content. _EN_RETRIEVAL_QA_PROMPT = """[INST] <<SYS>>Always answer as helpfully as possible, while being safe. Your answers should not include any harmful, unethical, racist, sexist content.
If the answer cannot be infered based on the given context, please say "I cannot answer the question based on the information given.".<</SYS>> If the answer cannot be inferred based on the given context, please say "I cannot answer the question based on the information given.".<</SYS>>
Use the context and chat history to answer the question. Use the context and chat history to answer the question.
context: context:
@ -97,8 +97,8 @@ Chat history:
Human: I have a friend, Mike. Do you know him? Human: I have a friend, Mike. Do you know him?
Assistant: Yes, I know a person named Mike Assistant: Yes, I know a person named Mike
sentence: What's his favorate food? sentence: What's his favorite food?
disambiguated sentence: What's Mike's favorate food? disambiguated sentence: What's Mike's favorite food?
[/INST] [/INST]
Chat history: Chat history:
{chat_history} {chat_history}

View File

@ -80,7 +80,7 @@ class EnglishRetrievalConversation:
self.retrieval_chain.run( self.retrieval_chain.run(
query=user_input, query=user_input,
stop=[self.memory.human_prefix + ": "], stop=[self.memory.human_prefix + ": "],
rejection_trigger_keywrods=["cannot answer the question"], rejection_trigger_keywords=["cannot answer the question"],
rejection_answer="Sorry, this question cannot be answered based on the information provided.", rejection_answer="Sorry, this question cannot be answered based on the information provided.",
).split("\n")[0], ).split("\n")[0],
self.memory, self.memory,

View File

@ -103,7 +103,7 @@ class UniversalRetrievalConversation:
break break
data_name = input("Enter a short description of the data:") data_name = input("Enter a short description of the data:")
separator = input( separator = input(
"Enter a separator to force separating text into chunks, if no separator is given, the defaut separator is '\\n\\n', press ENTER directly to skip:" "Enter a separator to force separating text into chunks, if no separator is given, the default separator is '\\n\\n', press ENTER directly to skip:"
) )
separator = separator if separator != "" else "\n\n" separator = separator if separator != "" else "\n\n"
retriever_data = DocumentLoader([[file, data_name.replace(" ", "_")]]).all_data retriever_data = DocumentLoader([[file, data_name.replace(" ", "_")]]).all_data

View File

@ -87,7 +87,7 @@ class ChineseRetrievalConversation:
query=user_input, query=user_input,
stop=["</答案>"], stop=["</答案>"],
doc_prefix="支持文档", doc_prefix="支持文档",
rejection_trigger_keywrods=["无法回答该问题"], rejection_trigger_keywords=["无法回答该问题"],
rejection_answer="抱歉,根据提供的信息无法回答该问题。", rejection_answer="抱歉,根据提供的信息无法回答该问题。",
).split("\n")[0], ).split("\n")[0],
self.memory, self.memory,

View File

@ -61,7 +61,7 @@ if __name__ == "__main__":
information_retriever.add_documents(docs=documents, cleanup="incremental", mode="by_source", embedding=embedding) information_retriever.add_documents(docs=documents, cleanup="incremental", mode="by_source", embedding=embedding)
prompt_template = """Always answer as helpfully as possible, while being safe. Your answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure that your responses are socially unbiased and positive in nature. prompt_template = """Always answer as helpfully as possible, while being safe. Your answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure that your responses are socially unbiased and positive in nature.
If the answer cannot be infered based on the given context, please don't share false information. If the answer cannot be inferred based on the given context, please don't share false information.
Use the context and chat history to respond to the human's input at the end or carry on the conversation. You should generate one response only. No following up is needed. Use the context and chat history to respond to the human's input at the end or carry on the conversation. You should generate one response only. No following up is needed.
context: context:

View File

@ -67,7 +67,7 @@ if __name__ == "__main__":
break break
data_name = input("Enter a short description of the data:") data_name = input("Enter a short description of the data:")
separator = input( separator = input(
"Enter a separator to force separating text into chunks, if no separator is given, the defaut separator is '\\n\\n'. Note that" "Enter a separator to force separating text into chunks, if no separator is given, the default separator is '\\n\\n'. Note that"
+ "we use neural text spliter to split texts into chunks, the seperator only serves as a delimiter to force split long passage into" + "we use neural text spliter to split texts into chunks, the seperator only serves as a delimiter to force split long passage into"
+ " chunks before passing to the neural network. Press ENTER directly to skip:" + " chunks before passing to the neural network. Press ENTER directly to skip:"
) )
@ -112,7 +112,7 @@ if __name__ == "__main__":
agent_response = retrieval_chain.run( agent_response = retrieval_chain.run(
query=user_input, query=user_input,
stop=["Human: "], stop=["Human: "],
rejection_trigger_keywrods=EN_RETRIEVAL_QA_TRIGGER_KEYWORDS, rejection_trigger_keywords=EN_RETRIEVAL_QA_TRIGGER_KEYWORDS,
rejection_answer=EN_RETRIEVAL_QA_REJECTION_ANSWER, rejection_answer=EN_RETRIEVAL_QA_REJECTION_ANSWER,
) )
agent_response = agent_response.split("\n")[0] agent_response = agent_response.split("\n")[0]

View File

@ -142,7 +142,7 @@ if __name__ == "__main__":
agent_response = retrieval_chain.run( agent_response = retrieval_chain.run(
query=user_input, query=user_input,
stop=["Human: "], stop=["Human: "],
rejection_trigger_keywrods=EN_RETRIEVAL_QA_TRIGGER_KEYWORDS, rejection_trigger_keywords=EN_RETRIEVAL_QA_TRIGGER_KEYWORDS,
rejection_answer=EN_RETRIEVAL_QA_REJECTION_ANSWER, rejection_answer=EN_RETRIEVAL_QA_REJECTION_ANSWER,
) )
agent_response = agent_response.split("\n")[0] agent_response = agent_response.split("\n")[0]

View File

@ -11,7 +11,7 @@ if __name__ == '__main__':
parser.add_argument('--sql_file_path', type=str, default=None, help='path to the a empty folder for storing sql files for indexing') parser.add_argument('--sql_file_path', type=str, default=None, help='path to the a empty folder for storing sql files for indexing')
args = parser.parse_args() args = parser.parse_args()
# Will ask for documents path in runnning time # Will ask for documents path in running time
session = UniversalRetrievalConversation(files_en=None, session = UniversalRetrievalConversation(files_en=None,
files_zh=None, files_zh=None,
zh_model_path=args.zh_model_path, en_model_path=args.en_model_path, zh_model_path=args.zh_model_path, en_model_path=args.en_model_path,

View File

@ -107,7 +107,7 @@ if __name__ == "__main__":
query=user_input, query=user_input,
stop=["</答案>"], stop=["</答案>"],
doc_prefix="支持文档", doc_prefix="支持文档",
rejection_trigger_keywrods=ZH_RETRIEVAL_QA_TRIGGER_KEYWORDS, rejection_trigger_keywords=ZH_RETRIEVAL_QA_TRIGGER_KEYWORDS,
rejection_answer=ZH_RETRIEVAL_QA_REJECTION_ANSWER, rejection_answer=ZH_RETRIEVAL_QA_REJECTION_ANSWER,
) )
print(f"Agent: {agent_response}") print(f"Agent: {agent_response}")

View File

@ -140,7 +140,7 @@ class RAG_ChatBot:
result = self.rag_chain.run( result = self.rag_chain.run(
query=user_input, query=user_input,
stop=[memory.human_prefix + ": "], stop=[memory.human_prefix + ": "],
rejection_trigger_keywrods=ZH_RETRIEVAL_QA_TRIGGER_KEYWORDS, rejection_trigger_keywords=ZH_RETRIEVAL_QA_TRIGGER_KEYWORDS,
rejection_answer=ZH_RETRIEVAL_QA_REJECTION_ANSWER, rejection_answer=ZH_RETRIEVAL_QA_REJECTION_ANSWER,
) )
return result, memory return result, memory

View File

@ -89,7 +89,7 @@ docker pull hpcaitech/colossalai-inference:v2
docker run -it --gpus all --name ANY_NAME -v $PWD:/workspace -w /workspace hpcaitech/colossalai-inference:v2 /bin/bash docker run -it --gpus all --name ANY_NAME -v $PWD:/workspace -w /workspace hpcaitech/colossalai-inference:v2 /bin/bash
# enter into docker container # enter into docker container
cd /path/to/CollossalAI cd /path/to/ColossalAI
pip install -e . pip install -e .
``` ```

View File

@ -86,7 +86,7 @@ docker pull hpcaitech/colossalai-inference:v2
docker run -it --gpus all --name ANY_NAME -v $PWD:/workspace -w /workspace hpcaitech/colossalai-inference:v2 /bin/bash docker run -it --gpus all --name ANY_NAME -v $PWD:/workspace -w /workspace hpcaitech/colossalai-inference:v2 /bin/bash
# enter into docker container # enter into docker container
cd /path/to/CollossalAI cd /path/to/ColossalAI
pip install -e . pip install -e .
# install lightllm # install lightllm

View File

@ -46,7 +46,7 @@ class CaiInferEngine:
model = LlamaForCausalLM.from_pretrained("your_path_to_model") model = LlamaForCausalLM.from_pretrained("your_path_to_model")
tokenizer = LlamaTokenizer.from_pretrained("/home/lczyh/share/models/llama-7b-hf") tokenizer = LlamaTokenizer.from_pretrained("/home/lczyh/share/models/llama-7b-hf")
# assume the model is infered with 2 pipeline stages # assume the model is inferred with 2 pipeline stages
inferengine = CaiInferEngine(pp_size=2, model=model, model_policy=LlamaModelInferPolicy()) inferengine = CaiInferEngine(pp_size=2, model=model, model_policy=LlamaModelInferPolicy())
input = ["Introduce a landmark in China ","Introduce a landmark in China "] input = ["Introduce a landmark in China ","Introduce a landmark in China "]
@ -70,7 +70,7 @@ class CaiInferEngine:
max_input_len: int = 32, max_input_len: int = 32,
max_output_len: int = 32, max_output_len: int = 32,
verbose: bool = False, verbose: bool = False,
# TODO: implement early_stopping, and various gerneration options # TODO: implement early_stopping, and various generation options
early_stopping: bool = False, early_stopping: bool = False,
do_sample: bool = False, do_sample: bool = False,
num_beams: int = 1, num_beams: int = 1,

View File

@ -47,7 +47,7 @@ be optimized jointly to further speed up training.
2. Model Accuracy 2. Model Accuracy
- Communication Efficiency - Communication Efficiency
- Reduce Volumn of Comm. - Reduce Volume of Comm.
- Reduce Frequency of Comm. - Reduce Frequency of Comm.
- Memory Efficiency - Memory Efficiency
- Mix-Precision Training - Mix-Precision Training

View File

@ -164,7 +164,7 @@ class GenerateSchedule(PipelineSchedule):
self.timestamps[self.mb_manager.idx].append(time.time()) self.timestamps[self.mb_manager.idx].append(time.time())
assert ( assert (
"logits" in logits "logits" in logits
), f"When first stage in GENERATE phase, the ouput should have attribute `logits`, but has {logits.keys()}" ), f"When first stage in GENERATE phase, the output should have attribute `logits`, but has {logits.keys()}"
new_token = self._get_token_id(logits["logits"]) new_token = self._get_token_id(logits["logits"])
self.mb_manager.step(new_token) self.mb_manager.step(new_token)
@ -401,7 +401,7 @@ class GenerateSchedule(PipelineSchedule):
self.timestamps[self.mb_manager.idx].append(time.time()) self.timestamps[self.mb_manager.idx].append(time.time())
assert ( assert (
"logits" in logits "logits" in logits
), f"When first stage in GENERATE phase, the ouput should have attribute `logits`, but has {logits.keys()}" ), f"When first stage in GENERATE phase, the output should have attribute `logits`, but has {logits.keys()}"
new_token = self._get_token_id(logits["logits"]) new_token = self._get_token_id(logits["logits"])
self.mb_manager.step(new_token) self.mb_manager.step(new_token)
# If the current micro batch is not DONE, go through blocks # If the current micro batch is not DONE, go through blocks

View File

@ -338,7 +338,7 @@ def count_flops_attn(model, _x, y):
class QKVAttentionLegacy(nn.Module): class QKVAttentionLegacy(nn.Module):
""" """
A module which performs QKV attention. Matches legacy QKVAttention + input/ouput heads shaping A module which performs QKV attention. Matches legacy QKVAttention + input/output heads shaping
""" """
def __init__(self, n_heads): def __init__(self, n_heads):