chore: run black against modified code

This commit is contained in:
LBYPatrick 2023-06-16 12:23:28 +08:00
parent 8a049b8af1
commit 0bb0246fb1
3 changed files with 9 additions and 6 deletions

View File

@ -43,9 +43,8 @@ LLM_MODEL_CONFIG = {
"guanaco-33b-merged": os.path.join(MODEL_PATH, "guanaco-33b-merged"),
"falcon-40b": os.path.join(MODEL_PATH, "falcon-40b"),
"gorilla-7b": os.path.join(MODEL_PATH, "gorilla-7b"),
# TODO Support baichuan-7b
#"baichuan-7b" : os.path.join(MODEL_PATH, "baichuan-7b"),
# "baichuan-7b" : os.path.join(MODEL_PATH, "baichuan-7b"),
"gptj-6b": os.path.join(MODEL_PATH, "ggml-gpt4all-j-v1.3-groovy.bin"),
"proxyllm": "proxyllm",
}

View File

@ -32,9 +32,14 @@ class BaseLLMAdaper:
return True
def loader(self, model_path: str, from_pretrained_kwargs: dict):
tokenizer = AutoTokenizer.from_pretrained(model_path, use_fast=False,trust_remote_code=True)
tokenizer = AutoTokenizer.from_pretrained(
model_path, use_fast=False, trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
model_path, low_cpu_mem_usage=True, trust_remote_code=True, **from_pretrained_kwargs
model_path,
low_cpu_mem_usage=True,
trust_remote_code=True,
**from_pretrained_kwargs,
)
return model, tokenizer

View File

@ -90,8 +90,7 @@ class ModelWorker:
ret = {"text": "**GPU OutOfMemory, Please Refresh.**", "error_code": 0}
yield json.dumps(ret).encode() + b"\0"
except Exception as e:
msg = "{}: {}".format(str(e),traceback.format_exc())
msg = "{}: {}".format(str(e), traceback.format_exc())
ret = {
"text": f"**LLMServer Generate Error, Please CheckErrorInfo.**: {msg}",