From e2f18fc187811e93abe5c4874e0781ad3b192639 Mon Sep 17 00:00:00 2001 From: csunny Date: Fri, 19 May 2023 21:44:54 +0800 Subject: [PATCH 01/15] add multi adapter --- pilot/configs/model_config.py | 4 +++- pilot/model/adapter.py | 13 +++++++++++++ 2 files changed, 16 insertions(+), 1 deletion(-) diff --git a/pilot/configs/model_config.py b/pilot/configs/model_config.py index 5ab2e4fbe..9699061b7 100644 --- a/pilot/configs/model_config.py +++ b/pilot/configs/model_config.py @@ -21,7 +21,9 @@ LLM_MODEL_CONFIG = { "flan-t5-base": os.path.join(MODEL_PATH, "flan-t5-base"), "vicuna-13b": os.path.join(MODEL_PATH, "vicuna-13b"), "text2vec": os.path.join(MODEL_PATH, "text2vec-large-chinese"), - "sentence-transforms": os.path.join(MODEL_PATH, "all-MiniLM-L6-v2") + "sentence-transforms": os.path.join(MODEL_PATH, "all-MiniLM-L6-v2"), + "codegen2-7b": os.path.join(MODEL_PATH, ""), + "codet5p-2b": os.path.join(MODEL_PATH, "codet5p-2b"), } # Load model config diff --git a/pilot/model/adapter.py b/pilot/model/adapter.py index 9afd2c01f..83d8a3717 100644 --- a/pilot/model/adapter.py +++ b/pilot/model/adapter.py @@ -68,6 +68,19 @@ class ChatGLMAdapater(BaseLLMAdaper): model_path, trust_remote_code=True, **from_pretrained_kwargs ).half().cuda() return model, tokenizer + +class ZiYaLLaMaAdapter(BaseLLMAdaper): + # TODO + pass + +class CodeGenAdapter(BaseLLMAdaper): + pass + +class StarCoderAdapter(BaseLLMAdaper): + pass + +class T5CodeAdapter(BaseLLMAdaper): + pass class KoalaLLMAdapter(BaseLLMAdaper): """Koala LLM Adapter which Based LLaMA """ From c0532246afec941dec406e07ff3e74c29d65689a Mon Sep 17 00:00:00 2001 From: csunny Date: Sat, 20 May 2023 15:45:43 +0800 Subject: [PATCH 02/15] llm: add chatglm --- pilot/model/adapter.py | 4 ---- pilot/model/chat.py | 3 --- pilot/model/chatglm_llm.py | 41 ++++++++++++++++++++++++++++++++++++++ 3 files changed, 41 insertions(+), 7 deletions(-) delete mode 100644 pilot/model/chat.py create mode 100644 pilot/model/chatglm_llm.py diff --git a/pilot/model/adapter.py b/pilot/model/adapter.py index 83d8a3717..84cd699ac 100644 --- a/pilot/model/adapter.py +++ b/pilot/model/adapter.py @@ -69,10 +69,6 @@ class ChatGLMAdapater(BaseLLMAdaper): ).half().cuda() return model, tokenizer -class ZiYaLLaMaAdapter(BaseLLMAdaper): - # TODO - pass - class CodeGenAdapter(BaseLLMAdaper): pass diff --git a/pilot/model/chat.py b/pilot/model/chat.py deleted file mode 100644 index 97206f2d5..000000000 --- a/pilot/model/chat.py +++ /dev/null @@ -1,3 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding:utf-8 -*- - diff --git a/pilot/model/chatglm_llm.py b/pilot/model/chatglm_llm.py new file mode 100644 index 000000000..ef54e92d7 --- /dev/null +++ b/pilot/model/chatglm_llm.py @@ -0,0 +1,41 @@ +#!/usr/bin/env python3 +# -*- coding:utf-8 -*- + +import torch + +@torch.inference_mode() +def chatglm_generate_stream(model, tokenizer, params, device, context_len=2048, stream_interval=2): + + """Generate text using chatglm model's chat api """ + messages = params["prompt"] + max_new_tokens = int(params.get("max_new_tokens", 256)) + temperature = float(params.get("temperature", 1.0)) + top_p = float(params.get("top_p", 1.0)) + echo = params.get("echo", True) + + generate_kwargs = { + "max_new_tokens": max_new_tokens, + "do_sample": True if temperature > 1e-5 else False, + "top_p": top_p, + "logits_processor": None + } + + if temperature > 1e-5: + generate_kwargs["temperature"] = temperature + + hist = [] + for i in range(0, len(messages) - 2, 2): + hist.append(messages[i][1], messages[i + 1][1]) + + query = messages[-2][1] + output = "" + i = 0 + for i, (response, new_hist) in enumerate(model.stream_chat(tokenizer, query, hist, **generate_kwargs)): + if echo: + output = query + " " + response + else: + output = response + + yield output + + yield output \ No newline at end of file From cbf1d0662a0b9a39eab5d0491859206b7c09c033 Mon Sep 17 00:00:00 2001 From: csunny Date: Sat, 20 May 2023 16:06:32 +0800 Subject: [PATCH 03/15] llms: add models --- .gitignore | 1 + pilot/configs/model_config.py | 5 ++++- pilot/server/chat_adapter.py | 13 +++++++++++++ pilot/server/llmserver.py | 1 - 4 files changed, 18 insertions(+), 2 deletions(-) create mode 100644 pilot/server/chat_adapter.py diff --git a/.gitignore b/.gitignore index 5043f7db0..22bb204db 100644 --- a/.gitignore +++ b/.gitignore @@ -23,6 +23,7 @@ lib/ lib64/ parts/ sdist/ +models var/ wheels/ models/ diff --git a/pilot/configs/model_config.py b/pilot/configs/model_config.py index 9699061b7..265007ae5 100644 --- a/pilot/configs/model_config.py +++ b/pilot/configs/model_config.py @@ -20,10 +20,13 @@ DEVICE = "cuda" if torch.cuda.is_available() else "cpu" LLM_MODEL_CONFIG = { "flan-t5-base": os.path.join(MODEL_PATH, "flan-t5-base"), "vicuna-13b": os.path.join(MODEL_PATH, "vicuna-13b"), + "vicuna-7b": os.path.join(MODEL_PATH, "vicuna-7b"), "text2vec": os.path.join(MODEL_PATH, "text2vec-large-chinese"), "sentence-transforms": os.path.join(MODEL_PATH, "all-MiniLM-L6-v2"), - "codegen2-7b": os.path.join(MODEL_PATH, ""), + "codegen2-1b": os.path.join(MODEL_PATH, "codegen2-1B"), "codet5p-2b": os.path.join(MODEL_PATH, "codet5p-2b"), + "chatglm-6b-int4": os.path.join(MODEL_PATH, "chatglm-6b-int4"), + "chatglm-6b": os.path.join(MODEL_PATH, "chatglm-6b"), } # Load model config diff --git a/pilot/server/chat_adapter.py b/pilot/server/chat_adapter.py new file mode 100644 index 000000000..9c32c911d --- /dev/null +++ b/pilot/server/chat_adapter.py @@ -0,0 +1,13 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- + + +class BaseChatAdpter: + """The Base class for chat with llm models. it will match the model, + and fetch output from model""" + + def match(self, model_path: str): + return True + + def get_generate_stream_func(self): + pass \ No newline at end of file diff --git a/pilot/server/llmserver.py b/pilot/server/llmserver.py index e1c7556f6..33d3d545d 100644 --- a/pilot/server/llmserver.py +++ b/pilot/server/llmserver.py @@ -30,7 +30,6 @@ model_path = LLM_MODEL_CONFIG[CFG.LLM_MODEL] ml = ModelLoader(model_path=model_path) model, tokenizer = ml.loader(num_gpus=1, load_8bit=ISLOAD_8BIT, debug=ISDEBUG) -#model, tokenizer = load_model(model_path=model_path, device=DEVICE, num_gpus=1, load_8bit=True, debug=False) class ModelWorker: def __init__(self): From 370e327bf3ebb037dc5e275fe58005c175463382 Mon Sep 17 00:00:00 2001 From: csunny Date: Sat, 20 May 2023 16:23:07 +0800 Subject: [PATCH 04/15] add chatglm model --- pilot/model/adapter.py | 1 + pilot/server/chat_adapter.py | 71 +++++++++++++++++++++++++++++++++++- 2 files changed, 71 insertions(+), 1 deletion(-) diff --git a/pilot/model/adapter.py b/pilot/model/adapter.py index 84cd699ac..bf0e291ce 100644 --- a/pilot/model/adapter.py +++ b/pilot/model/adapter.py @@ -100,6 +100,7 @@ class GPT4AllAdapter(BaseLLMAdaper): register_llm_model_adapters(VicunaLLMAdapater) +register_llm_model_adapters(ChatGLMAdapater) # TODO Default support vicuna, other model need to tests and Evaluate register_llm_model_adapters(BaseLLMAdaper) \ No newline at end of file diff --git a/pilot/server/chat_adapter.py b/pilot/server/chat_adapter.py index 9c32c911d..ded0a1b19 100644 --- a/pilot/server/chat_adapter.py +++ b/pilot/server/chat_adapter.py @@ -1,6 +1,9 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- +from typing import List +from functools import cache +from pilot.model.inference import generate_stream class BaseChatAdpter: """The Base class for chat with llm models. it will match the model, @@ -10,4 +13,70 @@ class BaseChatAdpter: return True def get_generate_stream_func(self): - pass \ No newline at end of file + """Return the generate stream handler func""" + pass + + +llm_model_chat_adapters: List[BaseChatAdpter] = [] + + +def register_llm_model_chat_adapter(cls): + """Register a chat adapter""" + llm_model_chat_adapters.append(cls()) + + +@cache +def get_llm_chat_adapter(model_path: str) -> BaseChatAdpter: + """Get a chat generate func for a model""" + for adapter in llm_model_chat_adapters: + if adapter.match(model_path): + return adapter + + raise ValueError(f"Invalid model for chat adapter {model_path}") + + +class VicunaChatAdapter(BaseChatAdpter): + + """ Model chat Adapter for vicuna""" + def match(self, model_path: str): + return "vicuna" in model_path + + def get_generate_stream_func(self): + return generate_stream + + +class ChatGLMChatAdapter(BaseChatAdpter): + """ Model chat Adapter for ChatGLM""" + def match(self, model_path: str): + return "chatglm" in model_path + + def get_generate_stream_func(self): + from pilot.model.chatglm_llm import chatglm_generate_stream + return chatglm_generate_stream + + +class CodeT5ChatAdapter(BaseChatAdpter): + + """ Model chat adapter for CodeT5 """ + def match(self, model_path: str): + return "codet5" in model_path + + def get_generate_stream_func(self): + # TODO + pass + +class CodeGenChatAdapter(BaseChatAdpter): + + """ Model chat adapter for CodeGen """ + def match(self, model_path: str): + return "codegen" in model_path + + def get_generate_stream_func(self): + # TODO + pass + + +register_llm_model_chat_adapter(VicunaChatAdapter) +register_llm_model_chat_adapter(ChatGLMChatAdapter) + +register_llm_model_chat_adapter(BaseChatAdpter) \ No newline at end of file From 8e127b38636c7df742017199951a7330b78fc9cf Mon Sep 17 00:00:00 2001 From: csunny Date: Sun, 21 May 2023 10:13:59 +0800 Subject: [PATCH 05/15] llms: add chatglm --- pilot/server/chat_adapter.py | 2 +- pilot/server/llmserver.py | 99 ++++++++++++++++++++++++------------ 2 files changed, 67 insertions(+), 34 deletions(-) diff --git a/pilot/server/chat_adapter.py b/pilot/server/chat_adapter.py index ded0a1b19..805cacb3d 100644 --- a/pilot/server/chat_adapter.py +++ b/pilot/server/chat_adapter.py @@ -15,7 +15,7 @@ class BaseChatAdpter: def get_generate_stream_func(self): """Return the generate stream handler func""" pass - + llm_model_chat_adapters: List[BaseChatAdpter] = [] diff --git a/pilot/server/llmserver.py b/pilot/server/llmserver.py index 33d3d545d..fa1da5608 100644 --- a/pilot/server/llmserver.py +++ b/pilot/server/llmserver.py @@ -23,19 +23,67 @@ from pilot.model.inference import generate_output, get_embeddings from pilot.model.loader import ModelLoader from pilot.configs.model_config import * from pilot.configs.config import Config +from pilot.server.chat_adapter import get_llm_chat_adapter CFG = Config() model_path = LLM_MODEL_CONFIG[CFG.LLM_MODEL] - -ml = ModelLoader(model_path=model_path) -model, tokenizer = ml.loader(num_gpus=1, load_8bit=ISLOAD_8BIT, debug=ISDEBUG) +print(model_path) class ModelWorker: - def __init__(self): - pass - # TODO + def __init__(self, model_path, model_name, device, num_gpus=1): + + if model_path.endswith("/"): + model_path = model_path[:-1] + self.model_name = model_name or model_path.split("/")[-1] + self.device = device + + self.ml = ModelLoader(model_path=model_path) + self.model, self.tokenizer = self.ml.loader(num_gpus, load_8bit=ISLOAD_8BIT, debug=ISDEBUG) + + if hasattr(self.model.config, "max_sequence_length"): + self.context_len = self.model.config.max_sequence_length + elif hasattr(self.model.config, "max_position_embeddings"): + self.context_len = self.model.config.max_position_embeddings + + else: + self.context_len = 2048 + + self.llm_chat_adapter = get_llm_chat_adapter(model_path) + self.generate_stream_func = self.llm_chat_adapter.get_generate_stream_func() + + def get_queue_length(self): + if model_semaphore is None or model_semaphore._value is None or model_semaphore._waiters is None: + return 0 + else: + CFG.LIMIT_MODEL_CONCURRENCY - model_semaphore._value + len(model_semaphore._waiters) + + def generate_stream_gate(self, params): + try: + for output in self.generate_stream_func( + self.model, + self.tokenizer, + params, + DEVICE, + CFG.MAX_POSITION_EMBEDDINGS + ): + print("output: ", output) + ret = { + "text": output, + "error_code": 0, + } + yield json.dumps(ret).encode() + b"\0" + + except torch.cuda.CudaError: + ret = { + "text": "**GPU OutOfMemory, Please Refresh.**", + "error_code": 0 + } + yield json.dumps(ret).encode() + b"\0" + + def get_embeddings(self, prompt): + return get_embeddings(self.model, self.tokenizer, prompt) app = FastAPI() @@ -60,41 +108,17 @@ def release_model_semaphore(): model_semaphore.release() -def generate_stream_gate(params): - try: - for output in generate_stream( - model, - tokenizer, - params, - DEVICE, - CFG.MAX_POSITION_EMBEDDINGS, - ): - print("output: ", output) - ret = { - "text": output, - "error_code": 0, - } - yield json.dumps(ret).encode() + b"\0" - except torch.cuda.CudaError: - ret = { - "text": "**GPU OutOfMemory, Please Refresh.**", - "error_code": 0 - } - yield json.dumps(ret).encode() + b"\0" - - @app.post("/generate_stream") async def api_generate_stream(request: Request): global model_semaphore, global_counter global_counter += 1 params = await request.json() - print(model, tokenizer, params, DEVICE) if model_semaphore is None: model_semaphore = asyncio.Semaphore(CFG.LIMIT_MODEL_CONCURRENCY) await model_semaphore.acquire() - generator = generate_stream_gate(params) + generator = worker.generate_stream_gate(params) background_tasks = BackgroundTasks() background_tasks.add_task(release_model_semaphore) return StreamingResponse(generator, background=background_tasks) @@ -110,7 +134,7 @@ def generate(prompt_request: PromptRequest): response = [] rsp_str = "" - output = generate_stream_gate(params) + output = worker.generate_stream_gate(params) for rsp in output: # rsp = rsp.decode("utf-8") rsp_str = str(rsp, "utf-8") @@ -124,9 +148,18 @@ def generate(prompt_request: PromptRequest): def embeddings(prompt_request: EmbeddingRequest): params = {"prompt": prompt_request.prompt} print("Received prompt: ", params["prompt"]) - output = get_embeddings(model, tokenizer, params["prompt"]) + output = worker.get_embeddings(params["prompt"]) return {"response": [float(x) for x in output]} if __name__ == "__main__": + + + worker = ModelWorker( + model_path=model_path, + model_name=CFG.LLM_MODEL, + device=DEVICE, + num_gpus=1 + ) + uvicorn.run(app, host="0.0.0.0", port=CFG.MODEL_PORT, log_level="info") \ No newline at end of file From 42b76979a3a22d4b95133513eed8c51648ab9e29 Mon Sep 17 00:00:00 2001 From: csunny Date: Sun, 21 May 2023 11:22:56 +0800 Subject: [PATCH 06/15] llms: multi model support --- examples/embdserver.py | 4 ++-- pilot/server/llmserver.py | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/examples/embdserver.py b/examples/embdserver.py index 79140ba66..bb0016f00 100644 --- a/examples/embdserver.py +++ b/examples/embdserver.py @@ -12,7 +12,7 @@ from pilot.conversation import conv_qa_prompt_template, conv_templates from langchain.prompts import PromptTemplate -vicuna_stream_path = "generate_stream" +llmstream_stream_path = "generate_stream" CFG = Config() @@ -44,7 +44,7 @@ def generate(query): } response = requests.post( - url=urljoin(CFG.MODEL_SERVER, vicuna_stream_path), data=json.dumps(params) + url=urljoin(CFG.MODEL_SERVER, llmstream_stream_path), data=json.dumps(params) ) skip_echo_len = len(params["prompt"]) + 1 - params["prompt"].count("") * 3 diff --git a/pilot/server/llmserver.py b/pilot/server/llmserver.py index fa1da5608..79b3450d3 100644 --- a/pilot/server/llmserver.py +++ b/pilot/server/llmserver.py @@ -27,8 +27,6 @@ from pilot.server.chat_adapter import get_llm_chat_adapter CFG = Config() -model_path = LLM_MODEL_CONFIG[CFG.LLM_MODEL] -print(model_path) class ModelWorker: @@ -154,6 +152,8 @@ def embeddings(prompt_request: EmbeddingRequest): if __name__ == "__main__": + model_path = LLM_MODEL_CONFIG[CFG.LLM_MODEL] + print(model_path) worker = ModelWorker( model_path=model_path, From 7b454d88670b4f75c799261fe8b3301105631b97 Mon Sep 17 00:00:00 2001 From: csunny Date: Sun, 21 May 2023 14:08:18 +0800 Subject: [PATCH 07/15] llms: add chatglm model --- examples/embdserver.py | 35 ++++++++++++++++++++++++----------- pilot/model/chatglm_llm.py | 14 ++++++++++---- pilot/server/webserver.py | 10 +++++++++- 3 files changed, 43 insertions(+), 16 deletions(-) diff --git a/examples/embdserver.py b/examples/embdserver.py index bb0016f00..b8525fe15 100644 --- a/examples/embdserver.py +++ b/examples/embdserver.py @@ -5,8 +5,15 @@ import requests import json import time import uuid +import os +import sys from urllib.parse import urljoin import gradio as gr + +ROOT_PATH = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) +sys.path.append(ROOT_PATH) + + from pilot.configs.config import Config from pilot.conversation import conv_qa_prompt_template, conv_templates from langchain.prompts import PromptTemplate @@ -21,24 +28,24 @@ def generate(query): template_name = "conv_one_shot" state = conv_templates[template_name].copy() - pt = PromptTemplate( - template=conv_qa_prompt_template, - input_variables=["context", "question"] - ) + # pt = PromptTemplate( + # template=conv_qa_prompt_template, + # input_variables=["context", "question"] + # ) - result = pt.format(context="This page covers how to use the Chroma ecosystem within LangChain. It is broken into two parts: installation and setup, and then references to specific Chroma wrappers.", - question=query) + # result = pt.format(context="This page covers how to use the Chroma ecosystem within LangChain. It is broken into two parts: installation and setup, and then references to specific Chroma wrappers.", + # question=query) - print(result) + # print(result) - state.append_message(state.roles[0], result) + state.append_message(state.roles[0], query) state.append_message(state.roles[1], None) prompt = state.get_prompt() params = { - "model": "vicuna-13b", + "model": "chatglm-6b", "prompt": prompt, - "temperature": 0.7, + "temperature": 1.0, "max_new_tokens": 1024, "stop": "###" } @@ -48,11 +55,17 @@ def generate(query): ) skip_echo_len = len(params["prompt"]) + 1 - params["prompt"].count("") * 3 + for chunk in response.iter_lines(decode_unicode=False, delimiter=b"\0"): + if chunk: data = json.loads(chunk.decode()) if data["error_code"] == 0: - output = data["text"][skip_echo_len:].strip() + + if "vicuna" in CFG.LLM_MODEL: + output = data["text"][skip_echo_len:].strip() + else: + output = data["text"].strip() state.messages[-1][-1] = output + "▌" yield(output) diff --git a/pilot/model/chatglm_llm.py b/pilot/model/chatglm_llm.py index ef54e92d7..656252785 100644 --- a/pilot/model/chatglm_llm.py +++ b/pilot/model/chatglm_llm.py @@ -7,10 +7,11 @@ import torch def chatglm_generate_stream(model, tokenizer, params, device, context_len=2048, stream_interval=2): """Generate text using chatglm model's chat api """ - messages = params["prompt"] + prompt = params["prompt"] max_new_tokens = int(params.get("max_new_tokens", 256)) temperature = float(params.get("temperature", 1.0)) top_p = float(params.get("top_p", 1.0)) + stop = params.get("stop", "###") echo = params.get("echo", True) generate_kwargs = { @@ -23,11 +24,16 @@ def chatglm_generate_stream(model, tokenizer, params, device, context_len=2048, if temperature > 1e-5: generate_kwargs["temperature"] = temperature + # TODO, Fix this hist = [] - for i in range(0, len(messages) - 2, 2): - hist.append(messages[i][1], messages[i + 1][1]) - query = messages[-2][1] + messages = prompt.split(stop) + + # Add history chat to hist for model. + for i in range(1, len(messages) - 2, 2): + hist.append((messages[i].split(":")[1], messages[i+1].split(":")[1])) + + query = messages[-2].split(":")[1] output = "" i = 0 for i, (response, new_hist) in enumerate(model.stream_chat(tokenizer, query, hist, **generate_kwargs)): diff --git a/pilot/server/webserver.py b/pilot/server/webserver.py index ea8d8fc6d..2dd2ba9e0 100644 --- a/pilot/server/webserver.py +++ b/pilot/server/webserver.py @@ -364,8 +364,16 @@ def http_bot(state, mode, sql_mode, db_selector, temperature, max_new_tokens, re for chunk in response.iter_lines(decode_unicode=False, delimiter=b"\0"): if chunk: data = json.loads(chunk.decode()) + + """ TODO Multi mode output handler, rewrite this for multi model, use adapter mode. + """ if data["error_code"] == 0: - output = data["text"][skip_echo_len:].strip() + + if "vicuna" in CFG.LLM_MODEL: + output = data["text"][skip_echo_len:].strip() + else: + output = data["text"].strip() + output = post_process_code(output) state.messages[-1][-1] = output + "▌" yield (state, state.to_gradio_chatbot()) + (disable_btn,) * 5 From ce728200854a4eb3159c2eabdaa5d5c2c9003239 Mon Sep 17 00:00:00 2001 From: csunny Date: Sun, 21 May 2023 14:48:54 +0800 Subject: [PATCH 08/15] llms: add mps support --- pilot/configs/model_config.py | 2 +- pilot/model/llm/monkey_patch.py | 125 ++++++++++++++++++++++++++++++++ pilot/model/loader.py | 67 +++++++++++++---- pilot/server/llmserver.py | 2 +- 4 files changed, 181 insertions(+), 15 deletions(-) create mode 100644 pilot/model/llm/monkey_patch.py diff --git a/pilot/configs/model_config.py b/pilot/configs/model_config.py index 265007ae5..3199d0004 100644 --- a/pilot/configs/model_config.py +++ b/pilot/configs/model_config.py @@ -16,7 +16,7 @@ DATA_DIR = os.path.join(PILOT_PATH, "data") nltk.data.path = [os.path.join(PILOT_PATH, "nltk_data")] + nltk.data.path -DEVICE = "cuda" if torch.cuda.is_available() else "cpu" +DEVICE = "cuda" if torch.cuda.is_available() else "mps" if torch.backends.mps.is_available() else "cpu" LLM_MODEL_CONFIG = { "flan-t5-base": os.path.join(MODEL_PATH, "flan-t5-base"), "vicuna-13b": os.path.join(MODEL_PATH, "vicuna-13b"), diff --git a/pilot/model/llm/monkey_patch.py b/pilot/model/llm/monkey_patch.py new file mode 100644 index 000000000..a50481281 --- /dev/null +++ b/pilot/model/llm/monkey_patch.py @@ -0,0 +1,125 @@ +#!/usr/bin/env python3 +# -*- coding:utf-8 -*- + +import math +from typing import Optional, Tuple + +import torch +from torch import nn +import transformers + + +def rotate_half(x): + """Rotates half the hidden dims of the input.""" + x1 = x[..., : x.shape[-1] // 2].clone() + x2 = x[..., x.shape[-1] // 2 :].clone() + return torch.cat((-x2, x1), dim=-1) + + +def apply_rotary_pos_emb(q, k, cos, sin, position_ids): + gather_indices = position_ids[:, None, :, None] # [bs, 1, seq_len, 1] + gather_indices = gather_indices.repeat(1, cos.shape[1], 1, cos.shape[3]) + cos = torch.gather(cos.repeat(gather_indices.shape[0], 1, 1, 1), 2, gather_indices) + sin = torch.gather(sin.repeat(gather_indices.shape[0], 1, 1, 1), 2, gather_indices) + q_embed = (q * cos) + (rotate_half(q) * sin) + k_embed = (k * cos) + (rotate_half(k) * sin) + return q_embed, k_embed + + +def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_value: Optional[Tuple[torch.Tensor]] = None, + output_attentions: bool = False, + use_cache: bool = False, +) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: + bsz, q_len, _ = hidden_states.size() + + query_states = ( + self.q_proj(hidden_states) + .view(bsz, q_len, self.num_heads, self.head_dim) + .transpose(1, 2) + ) + key_states = ( + self.k_proj(hidden_states) + .view(bsz, q_len, self.num_heads, self.head_dim) + .transpose(1, 2) + ) + value_states = ( + self.v_proj(hidden_states) + .view(bsz, q_len, self.num_heads, self.head_dim) + .transpose(1, 2) + ) + + kv_seq_len = key_states.shape[-2] + if past_key_value is not None: + kv_seq_len += past_key_value[0].shape[-2] + cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len) + query_states, key_states = apply_rotary_pos_emb( + query_states, key_states, cos, sin, position_ids + ) + # [bsz, nh, t, hd] + + if past_key_value is not None: + # reuse k, v, self_attention + key_states = torch.cat([past_key_value[0], key_states], dim=2) + value_states = torch.cat([past_key_value[1], value_states], dim=2) + + past_key_value = (key_states, value_states) if use_cache else None + + attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt( + self.head_dim + ) + + if attn_weights.size() != (bsz, self.num_heads, q_len, kv_seq_len): + raise ValueError( + f"Attention weights should be of size {(bsz * self.num_heads, q_len, kv_seq_len)}, but is" + f" {attn_weights.size()}" + ) + + if attention_mask is not None: + if attention_mask.size() != (bsz, 1, q_len, kv_seq_len): + raise ValueError( + f"Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}" + ) + attn_weights = attn_weights + attention_mask + attn_weights = torch.max( + attn_weights, torch.tensor(torch.finfo(attn_weights.dtype).min) + ) + + # upcast attention to fp32 + attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to( + query_states.dtype + ) + attn_output = torch.matmul(attn_weights, value_states) + + if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim): + raise ValueError( + f"`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is" + f" {attn_output.size()}" + ) + + attn_output = attn_output.transpose(1, 2) + attn_output = attn_output.reshape(bsz, q_len, self.hidden_size) + + attn_output = self.o_proj(attn_output) + + if not output_attentions: + attn_weights = None + + return attn_output, attn_weights, past_key_value + + +def replace_llama_attn_with_non_inplace_operations(): + """Avoid bugs in mps backend by not using in-place operations.""" + transformers.models.llama.modeling_llama.LlamaAttention.forward = forward + +import transformers + + + +def replace_llama_attn_with_non_inplace_operations(): + """Avoid bugs in mps backend by not using in-place operations.""" + transformers.models.llama.modeling_llama.LlamaAttention.forward = forward diff --git a/pilot/model/loader.py b/pilot/model/loader.py index 66d9c733e..1c32939ec 100644 --- a/pilot/model/loader.py +++ b/pilot/model/loader.py @@ -2,11 +2,39 @@ # -*- coding: utf-8 -*- import torch +import sys import warnings from pilot.singleton import Singleton - +from typing import Optional from pilot.model.compression import compress_module from pilot.model.adapter import get_llm_model_adapter +from pilot.utils import get_gpu_memory +from pilot.model.llm.monkey_patch import replace_llama_attn_with_non_inplace_operations + +def raise_warning_for_incompatible_cpu_offloading_configuration( + device: str, load_8bit: bool, cpu_offloading: bool +): + if cpu_offloading: + if not load_8bit: + warnings.warn( + "The cpu-offloading feature can only be used while also using 8-bit-quantization.\n" + "Use '--load-8bit' to enable 8-bit-quantization\n" + "Continuing without cpu-offloading enabled\n" + ) + return False + if not "linux" in sys.platform: + warnings.warn( + "CPU-offloading is only supported on linux-systems due to the limited compatability with the bitsandbytes-package\n" + "Continuing without cpu-offloading enabled\n" + ) + return False + if device != "cuda": + warnings.warn( + "CPU-offloading is only enabled when using CUDA-devices\n" + "Continuing without cpu-offloading enabled\n" + ) + return False + return cpu_offloading class ModelLoader(metaclass=Singleton): @@ -30,26 +58,39 @@ class ModelLoader(metaclass=Singleton): } # TODO multi gpu support - def loader(self, num_gpus, load_8bit=False, debug=False): + def loader(self, num_gpus, load_8bit=False, debug=False, cpu_offloading=False, max_gpu_memory: Optional[str]=None): + + cpu_offloading(self.device, load_8bit, cpu_offloading) + if self.device == "cpu": - kwargs = {} + kwargs = {"torch_dtype": torch.float32} elif self.device == "cuda": kwargs = {"torch_dtype": torch.float16} - if num_gpus == "auto": + num_gpus = int(num_gpus) + + if num_gpus != 1: kwargs["device_map"] = "auto" + if max_gpu_memory is None: + kwargs["device_map"] = "sequential" + + available_gpu_memory = get_gpu_memory(num_gpus) + kwargs["max_memory"] = { + i: str(int(available_gpu_memory[i] * 0.85)) + "GiB" + for i in range(num_gpus) + } + else: - num_gpus = int(num_gpus) - if num_gpus != 1: - kwargs.update({ - "device_map": "auto", - "max_memory": {i: "13GiB" for i in range(num_gpus)}, - }) + kwargs["max_memory"] = {i: max_gpu_memory for i in range(num_gpus)} + + elif self.device == "mps": + kwargs = kwargs = {"torch_dtype": torch.float16} + replace_llama_attn_with_non_inplace_operations() else: - # Todo Support mps for practise raise ValueError(f"Invalid device: {self.device}") - + # TODO when cpu loading, need use quantization config + llm_adapter = get_llm_model_adapter(self.model_path) model, tokenizer = llm_adapter.loader(self.model_path, kwargs) @@ -61,7 +102,7 @@ class ModelLoader(metaclass=Singleton): else: compress_module(model, self.device) - if (self.device == "cuda" and num_gpus == 1): + if (self.device == "cuda" and num_gpus == 1 and not cpu_offloading) or self.device == "mps": model.to(self.device) if debug: diff --git a/pilot/server/llmserver.py b/pilot/server/llmserver.py index 79b3450d3..2dcdce2ca 100644 --- a/pilot/server/llmserver.py +++ b/pilot/server/llmserver.py @@ -153,7 +153,7 @@ def embeddings(prompt_request: EmbeddingRequest): if __name__ == "__main__": model_path = LLM_MODEL_CONFIG[CFG.LLM_MODEL] - print(model_path) + print(model_path, DEVICE) worker = ModelWorker( model_path=model_path, From f52c7523b5c2d4ee377191ebb5f70955c401cf61 Mon Sep 17 00:00:00 2001 From: csunny Date: Sun, 21 May 2023 14:54:16 +0800 Subject: [PATCH 09/15] llms: fix --- pilot/model/loader.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/pilot/model/loader.py b/pilot/model/loader.py index 1c32939ec..531080314 100644 --- a/pilot/model/loader.py +++ b/pilot/model/loader.py @@ -59,8 +59,6 @@ class ModelLoader(metaclass=Singleton): # TODO multi gpu support def loader(self, num_gpus, load_8bit=False, debug=False, cpu_offloading=False, max_gpu_memory: Optional[str]=None): - - cpu_offloading(self.device, load_8bit, cpu_offloading) if self.device == "cpu": kwargs = {"torch_dtype": torch.float32} From 89970bd71c510f32aa120e62a36b0380dda304da Mon Sep 17 00:00:00 2001 From: csunny Date: Sun, 21 May 2023 16:05:53 +0800 Subject: [PATCH 10/15] llms: add cpu support --- pilot/model/adapter.py | 19 ++++++++++++++----- pilot/server/llmserver.py | 1 + requirements.txt | 1 + 3 files changed, 16 insertions(+), 5 deletions(-) diff --git a/pilot/model/adapter.py b/pilot/model/adapter.py index bf0e291ce..be8980726 100644 --- a/pilot/model/adapter.py +++ b/pilot/model/adapter.py @@ -9,6 +9,8 @@ from transformers import ( AutoModel ) +from pilot.configs.model_config import DEVICE + class BaseLLMAdaper: """The Base class for multi model, in our project. We will support those model, which performance resemble ChatGPT """ @@ -61,13 +63,20 @@ class ChatGLMAdapater(BaseLLMAdaper): """LLM Adatpter for THUDM/chatglm-6b""" def match(self, model_path: str): return "chatglm" in model_path - + def loader(self, model_path: str, from_pretrained_kwargs: dict): tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True) - model = AutoModel.from_pretrained( - model_path, trust_remote_code=True, **from_pretrained_kwargs - ).half().cuda() - return model, tokenizer + + if DEVICE != "cuda": + model = AutoModel.from_pretrained( + model_path, trust_remote_code=True, **from_pretrained_kwargs + ).float() + return model, tokenizer + else: + model = AutoModel.from_pretrained( + model_path, trust_remote_code=True, **from_pretrained_kwargs + ).half().cuda() + return model, tokenizer class CodeGenAdapter(BaseLLMAdaper): pass diff --git a/pilot/server/llmserver.py b/pilot/server/llmserver.py index 2dcdce2ca..bc227d518 100644 --- a/pilot/server/llmserver.py +++ b/pilot/server/llmserver.py @@ -155,6 +155,7 @@ if __name__ == "__main__": model_path = LLM_MODEL_CONFIG[CFG.LLM_MODEL] print(model_path, DEVICE) + worker = ModelWorker( model_path=model_path, model_name=CFG.LLM_MODEL, diff --git a/requirements.txt b/requirements.txt index 410d3129c..f82d2e2a8 100644 --- a/requirements.txt +++ b/requirements.txt @@ -42,6 +42,7 @@ tenacity==8.2.2 peft pycocoevalcap sentence-transformers +cpm_kernels umap-learn notebook gradio==3.23 From 604d269797bfd6bf81eea7bc416a776b8d5f2a09 Mon Sep 17 00:00:00 2001 From: csunny Date: Sun, 21 May 2023 16:11:52 +0800 Subject: [PATCH 11/15] fix and update --- pilot/model/chatglm_llm.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pilot/model/chatglm_llm.py b/pilot/model/chatglm_llm.py index 656252785..f8279be7f 100644 --- a/pilot/model/chatglm_llm.py +++ b/pilot/model/chatglm_llm.py @@ -8,16 +8,15 @@ def chatglm_generate_stream(model, tokenizer, params, device, context_len=2048, """Generate text using chatglm model's chat api """ prompt = params["prompt"] - max_new_tokens = int(params.get("max_new_tokens", 256)) temperature = float(params.get("temperature", 1.0)) top_p = float(params.get("top_p", 1.0)) stop = params.get("stop", "###") echo = params.get("echo", True) generate_kwargs = { - "max_new_tokens": max_new_tokens, "do_sample": True if temperature > 1e-5 else False, "top_p": top_p, + "repetition_penalty": 1.0, "logits_processor": None } @@ -34,6 +33,7 @@ def chatglm_generate_stream(model, tokenizer, params, device, context_len=2048, hist.append((messages[i].split(":")[1], messages[i+1].split(":")[1])) query = messages[-2].split(":")[1] + print("Query Message: ", query) output = "" i = 0 for i, (response, new_hist) in enumerate(model.stream_chat(tokenizer, query, hist, **generate_kwargs)): From 5ec1f413b637489eaa67eb4ff7091566c663649f Mon Sep 17 00:00:00 2001 From: csunny Date: Sun, 21 May 2023 16:18:33 +0800 Subject: [PATCH 12/15] update --- pilot/model/chatglm_llm.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pilot/model/chatglm_llm.py b/pilot/model/chatglm_llm.py index f8279be7f..b0a3c8296 100644 --- a/pilot/model/chatglm_llm.py +++ b/pilot/model/chatglm_llm.py @@ -11,7 +11,7 @@ def chatglm_generate_stream(model, tokenizer, params, device, context_len=2048, temperature = float(params.get("temperature", 1.0)) top_p = float(params.get("top_p", 1.0)) stop = params.get("stop", "###") - echo = params.get("echo", True) + echo = params.get("echo", False) generate_kwargs = { "do_sample": True if temperature > 1e-5 else False, From a3fae0bdf203a2262e9d40601cdd0dd54b1ba831 Mon Sep 17 00:00:00 2001 From: csunny Date: Sun, 21 May 2023 16:30:03 +0800 Subject: [PATCH 13/15] add chatglm support --- pilot/conversation.py | 3 +++ pilot/model/chatglm_llm.py | 6 ++++-- 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/pilot/conversation.py b/pilot/conversation.py index 7f526fb89..0470bc720 100644 --- a/pilot/conversation.py +++ b/pilot/conversation.py @@ -15,6 +15,9 @@ DB_SETTINGS = { "port": CFG.LOCAL_DB_PORT } +ROLE_USER = "USER" +ROLE_ASSISTANT = "Assistant" + class SeparatorStyle(Enum): SINGLE = auto() TWO = auto() diff --git a/pilot/model/chatglm_llm.py b/pilot/model/chatglm_llm.py index b0a3c8296..0f8b74efa 100644 --- a/pilot/model/chatglm_llm.py +++ b/pilot/model/chatglm_llm.py @@ -3,6 +3,8 @@ import torch +from pilot.conversation import ROLE_USER, ROLE_ASSISTANT + @torch.inference_mode() def chatglm_generate_stream(model, tokenizer, params, device, context_len=2048, stream_interval=2): @@ -30,9 +32,9 @@ def chatglm_generate_stream(model, tokenizer, params, device, context_len=2048, # Add history chat to hist for model. for i in range(1, len(messages) - 2, 2): - hist.append((messages[i].split(":")[1], messages[i+1].split(":")[1])) + hist.append((messages[i].split(ROLE_USER + ":")[1], messages[i+1].split(ROLE_ASSISTANT + ":")[1])) - query = messages[-2].split(":")[1] + query = messages[-2].split(ROLE_USER + ":")[1] print("Query Message: ", query) output = "" i = 0 From b22907f25e07007fa50c44d78edcb5516ac70106 Mon Sep 17 00:00:00 2001 From: csunny Date: Sun, 21 May 2023 17:19:54 +0800 Subject: [PATCH 14/15] llms: support multi large models --- README.md | 7 +++++++ README.zh.md | 7 +++++++ 2 files changed, 14 insertions(+) diff --git a/README.md b/README.md index 57ca42fbf..20e1a2017 100644 --- a/README.md +++ b/README.md @@ -29,6 +29,9 @@ Currently, we have released multiple key features, which are listed below to dem - Unified vector storage/indexing of knowledge base - Support for unstructured data such as PDF, Markdown, CSV, and WebURL +- Milti LLMs Support + - Supports multiple large language models, currently supporting Vicuna (7b, 13b), ChatGLM-6b (int4, int8) + ## Demo @@ -175,6 +178,10 @@ Notice: the webserver need to connect llmserver, so you need change the .env f We provide a user interface for Gradio, which allows you to use DB-GPT through our user interface. Additionally, we have prepared several reference articles (written in Chinese) that introduce the code and principles related to our project. - [LLM Practical In Action Series (1) — Combined Langchain-Vicuna Application Practical](https://medium.com/@cfqcsunny/llm-practical-in-action-series-1-combined-langchain-vicuna-application-practical-701cd0413c9f) +### Multi LLMs Usage + +To use multiple models, modify the LLM_MODEL parameter in the .env configuration file to switch between the models. + ## Acknowledgement The achievements of this project are thanks to the technical community, especially the following projects: diff --git a/README.zh.md b/README.zh.md index ed7e3f03c..38a450308 100644 --- a/README.zh.md +++ b/README.zh.md @@ -26,6 +26,9 @@ DB-GPT 是一个开源的以数据库为基础的GPT实验项目,使用本地 - 知识库统一向量存储/索引 - 非结构化数据支持包括PDF、MarkDown、CSV、WebURL +- 多模型支持 + - 支持多种大语言模型, 当前已支持Vicuna(7b,13b), ChatGLM-6b(int4, int8) + ## 效果演示 示例通过 RTX 4090 GPU 演示,[YouTube 地址](https://www.youtube.com/watch?v=1PWI6F89LPo) @@ -178,6 +181,10 @@ $ python webserver.py 2. [大模型实战系列(2) —— DB-GPT 阿里云部署指南](https://zhuanlan.zhihu.com/p/629467580) 3. [大模型实战系列(3) —— DB-GPT插件模型原理与使用](https://zhuanlan.zhihu.com/p/629623125) + +### 多模型使用 +在.env 配置文件当中, 修改LLM_MODEL参数来切换使用的模型。 + ## 感谢 项目取得的成果,需要感谢技术社区,尤其以下项目。 From a537ce026af6dee313290e5ea13b52f7abfd603b Mon Sep 17 00:00:00 2001 From: csunny Date: Sun, 21 May 2023 17:22:16 +0800 Subject: [PATCH 15/15] docs: add todo --- README.md | 1 + README.zh.md | 1 + 2 files changed, 2 insertions(+) diff --git a/README.md b/README.md index 20e1a2017..c783f7e09 100644 --- a/README.md +++ b/README.md @@ -31,6 +31,7 @@ Currently, we have released multiple key features, which are listed below to dem - Milti LLMs Support - Supports multiple large language models, currently supporting Vicuna (7b, 13b), ChatGLM-6b (int4, int8) + - TODO: codegen2, codet5p ## Demo diff --git a/README.zh.md b/README.zh.md index 38a450308..279920cce 100644 --- a/README.zh.md +++ b/README.zh.md @@ -28,6 +28,7 @@ DB-GPT 是一个开源的以数据库为基础的GPT实验项目,使用本地 - 多模型支持 - 支持多种大语言模型, 当前已支持Vicuna(7b,13b), ChatGLM-6b(int4, int8) + - TODO: codet5p, codegen2 ## 效果演示