[Feature] Add document retrieval QA (#5020)

* add langchain

* add langchain

* Add files via upload

* add langchain

* fix style

* fix style: remove extra space

* add pytest; modified retriever

* add pytest; modified retriever

* add tests to build_on_pr.yml

* fix build_on_pr.yml

* fix build on pr; fix environ vars

* seperate unit tests for colossalqa from build from pr

* fix container setting; fix environ vars

* commented dev code

* add incremental update

* remove stale code

* fix style

* change to sha3 224

* fix retriever; fix style; add unit test for document loader

* fix ci workflow config

* fix ci workflow config

* add set cuda visible device script in ci

* fix doc string

* fix style; update readme; refactored

* add force log info

* change build on pr, ignore colossalqa

* fix docstring, captitalize all initial letters

* fix indexing; fix text-splitter

* remove debug code, update reference

* reset previous commit

* update LICENSE update README add key-value mode, fix bugs

* add files back

* revert force push

* remove junk file

* add test files

* fix retriever bug, add intent classification

* change conversation chain design

* rewrite prompt and conversation chain

* add ui v1

* ui v1

* fix atavar

* add header

* Refactor the RAG Code and support Pangu

* Refactor the ColossalQA chain to Object-Oriented Programming and the UI demo.

* resolved conversation. tested scripts under examples. web demo still buggy

* fix ci tests

* Some modifications to add ChatGPT api

* modify llm.py and remove unnecessary files

* Delete applications/ColossalQA/examples/ui/test_frontend_input.json

* Remove OpenAI api key

* add colossalqa

* move files

* move files

* move files

* move files

* fix style

* Add Readme and fix some bugs.

* Add something to readme and modify some code

* modify a directory name for clarity

* remove redundant directory

* Correct a type in  llm.py

* fix AI prefix

* fix test_memory.py

* fix conversation

* fix some erros and typos

* Fix a missing import in RAG_ChatBot.py

* add colossalcloud LLM wrapper, correct issues in code review

---------

Co-authored-by: YeAnbang <anbangy2@outlook.com>
Co-authored-by: Orion-Zheng <zheng_zian@u.nus.edu>
Co-authored-by: Zian(Andy) Zheng <62330719+Orion-Zheng@users.noreply.github.com>
Co-authored-by: Orion-Zheng <zhengzian@u.nus.edu>
This commit is contained in:
YeAnbang
2023-11-23 10:33:48 +08:00
committed by GitHub
parent 3acbf6d496
commit e53e729d8e
69 changed files with 6758 additions and 0 deletions

View File

@@ -0,0 +1,125 @@
"""
LLM wrapper for LLMs running on ColossalCloud Platform
Usage:
os.environ['URL'] = ""
os.environ['HOST'] = ""
gen_config = {
'max_new_tokens': 100,
# 'top_k': 2,
'top_p': 0.9,
'temperature': 0.5,
'repetition_penalty': 2,
}
llm = ColossalCloudLLM(n=1)
llm.set_auth_config()
resp = llm(prompt='What do you call a three-ton kangaroo?', **gen_config)
print(resp) # super-heavyweight awesome-natured yawning Australian creature!
"""
import json
from typing import Any, List, Mapping, Optional
import requests
from langchain.llms.base import LLM
from langchain.utils import get_from_dict_or_env
class ColossalCloudLLM(LLM):
"""
A custom LLM class that integrates LLMs running on the ColossalCloud Platform
"""
n: int
gen_config: dict = None
auth_config: dict = None
valid_gen_para: list = ['max_new_tokens', 'top_k',
'top_p', 'temperature', 'repetition_penalty']
def __init__(self, gen_config=None, **kwargs):
"""
Args:
gen_config: config for generation,
max_new_tokens: 50 by default
top_k: (1, vocab_size)
top_p: (0, 1) if not None
temperature: (0, inf) if not None
repetition_penalty: (1, inf) if not None
"""
super(ColossalCloudLLM, self).__init__(**kwargs)
if gen_config is None:
self.gen_config = {"max_new_tokens": 50}
else:
assert "max_new_tokens" in gen_config, "max_new_tokens is a compulsory key in the gen config"
self.gen_config = gen_config
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {"n": self.n}
@property
def _llm_type(self) -> str:
return 'ColossalCloudLLM'
def set_auth_config(self, **kwargs):
url = get_from_dict_or_env(kwargs, "url", "URL")
host = get_from_dict_or_env(kwargs, "host", "HOST")
auth_config = {}
auth_config['endpoint'] = url
auth_config['Host'] = host
self.auth_config = auth_config
def _call(self, prompt: str, stop=None, **kwargs: Any) -> str:
"""
Args:
prompt: The prompt to pass into the model.
stop: A list of strings to stop generation when encountered
Returns:
The string generated by the model
"""
# Update the generation arguments
for key, value in kwargs.items():
if key not in self.valid_gen_para:
raise KeyError(f"Invalid generation parameter: '{key}'. Valid keys are: {', '.join(self.valid_gen_para)}")
if key in self.gen_config:
self.gen_config[key] = value
resp_text = self.text_completion(prompt, self.gen_config, self.auth_config)
# TODO: This may cause excessive tokens count
if stop is not None:
for stopping_words in stop:
if stopping_words in resp_text:
resp_text = resp_text.split(stopping_words)[0]
return resp_text
def text_completion(self, prompt, gen_config, auth_config):
# Complusory Parameters
endpoint = auth_config.pop('endpoint')
max_new_tokens = gen_config.pop('max_new_tokens')
# Optional Parameters
optional_params = ['top_k', 'top_p', 'temperature', 'repetition_penalty'] # Self.optional
gen_config = {key: gen_config[key] for key in optional_params if key in gen_config}
# Define the data payload
data = {
"max_new_tokens": max_new_tokens,
"history": [
{"instruction": prompt, "response": ""}
],
**gen_config
}
headers = {
"Content-Type": "application/json",
**auth_config # 'Host',
}
# Make the POST request
response = requests.post(endpoint, headers=headers, data=json.dumps(data))
response.raise_for_status() # raise error if return code is not 200(success)
# Check the response
return response.text

View File

@@ -0,0 +1,183 @@
"""
API and LLM warpper class for running LLMs locally
Usage:
import os
model_path = os.environ.get("ZH_MODEL_PATH")
model_name = "chatglm2"
colossal_api = ColossalAPI(model_name, model_path)
llm = ColossalLLM(n=1, api=colossal_api)
TEST_PROMPT_CHATGLM="续写文章:惊蛰一过,春寒加剧。先是料料峭峭,继而雨季开始,"
logger.info(llm(TEST_PROMPT_CHATGLM, max_new_tokens=100), verbose=True)
"""
from typing import Any, List, Mapping, Optional
import torch
from colossalqa.local.utils import get_response, post_http_request
from colossalqa.mylogging import get_logger
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.base import LLM
from transformers import AutoModelForCausalLM, AutoTokenizer
logger = get_logger()
class ColossalAPI:
"""
API for calling LLM.generate
"""
__instances = dict()
def __init__(self, model_type: str, model_path: str, ckpt_path: str = None) -> None:
"""
Configurate model
"""
if model_type + model_path + (ckpt_path or "") in ColossalAPI.__instances:
return
else:
ColossalAPI.__instances[model_type + model_path + (ckpt_path or "")] = self
self.model_type = model_type
self.model = AutoModelForCausalLM.from_pretrained(model_path, torch_dtype=torch.float16, trust_remote_code=True)
if ckpt_path is not None:
state_dict = torch.load(ckpt_path)
self.model.load_state_dict(state_dict)
self.model.to(torch.cuda.current_device())
# Configurate tokenizer
self.tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
self.model.eval()
@staticmethod
def get_api(model_type: str, model_path: str, ckpt_path: str = None):
if model_type + model_path + (ckpt_path or "") in ColossalAPI.__instances:
return ColossalAPI.__instances[model_type + model_path + (ckpt_path or "")]
else:
return ColossalAPI(model_type, model_path, ckpt_path)
def generate(self, input: str, **kwargs) -> str:
"""
Generate response given the prompt
Args:
input: input string
**kwargs: language model keyword type arguments, such as top_k, top_p, temperature, max_new_tokens...
Returns:
output: output string
"""
if self.model_type in ["chatglm", "chatglm2"]:
inputs = {
k: v.to(torch.cuda.current_device()) for k, v in self.tokenizer(input, return_tensors="pt").items()
}
else:
inputs = {
"input_ids": self.tokenizer(input, return_tensors="pt")["input_ids"].to(torch.cuda.current_device())
}
output = self.model.generate(**inputs, **kwargs)
output = output.cpu()
prompt_len = inputs["input_ids"].size(1)
response = output[0, prompt_len:]
output = self.tokenizer.decode(response, skip_special_tokens=True)
return output
class VllmAPI:
def __init__(self, host: str = "localhost", port: int = 8077) -> None:
# Configurate api for model served through web
self.host = host
self.port = port
self.url = f"http://{self.host}:{self.port}/generate"
def generate(self, input: str, **kwargs):
output = get_response(post_http_request(input, self.url, **kwargs))[0]
return output[len(input) :]
class ColossalLLM(LLM):
"""
Langchain LLM wrapper for a local LLM
"""
n: int
api: Any
kwargs = {"max_new_tokens": 100}
@property
def _llm_type(self) -> str:
return "custom"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
logger.info(f"kwargs:{kwargs}\nstop:{stop}\nprompt:{prompt}", verbose=self.verbose)
for k in self.kwargs:
if k not in kwargs:
kwargs[k] = self.kwargs[k]
generate_args = {k: kwargs[k] for k in kwargs if k not in ["stop", "n"]}
out = self.api.generate(prompt, **generate_args)
if isinstance(stop, list) and len(stop) != 0:
for stopping_words in stop:
if stopping_words in out:
out = out.split(stopping_words)[0]
logger.info(f"{prompt}{out}", verbose=self.verbose)
return out
@property
def _identifying_params(self) -> Mapping[str, int]:
"""Get the identifying parameters."""
return {"n": self.n}
class VllmLLM(LLM):
"""
Langchain LLM wrapper for a local LLM
"""
n: int
api: Any
kwargs = {"max_new_tokens": 100}
@property
def _llm_type(self) -> str:
return "custom"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
for k in self.kwargs:
if k not in kwargs:
kwargs[k] = self.kwargs[k]
logger.info(f"kwargs:{kwargs}\nstop:{stop}\nprompt:{prompt}", verbose=self.verbose)
generate_args = {k: kwargs[k] for k in kwargs if k in ["n", "max_tokens", "temperature", "stream"]}
out = self.api.generate(prompt, **generate_args)
if len(stop) != 0:
for stopping_words in stop:
if stopping_words in out:
out = out.split(stopping_words)[0]
logger.info(f"{prompt}{out}", verbose=self.verbose)
return out
def set_host_port(self, host: str = "localhost", port: int = 8077, **kwargs) -> None:
if "max_tokens" not in kwargs:
kwargs["max_tokens"] = 100
self.kwargs = kwargs
self.api = VllmAPI(host=host, port=port)
@property
def _identifying_params(self) -> Mapping[str, int]:
"""Get the identifying parameters."""
return {"n": self.n}

View File

@@ -0,0 +1,150 @@
"""
LLM wrapper for Pangu
Usage:
# URL: “盘古大模型套件管理”->点击“服务管理”->“模型列表”->点击想要使用的模型的“复制路径”
# USERNAME: 华为云控制台:“我的凭证”->“API凭证”下的“IAM用户名”也就是你登录IAM账户的名字
# PASSWORD: IAM用户的密码
# DOMAIN_NAME: 华为云控制台:“我的凭证”->“API凭证”下的“用户名”也就是公司管理IAM账户的总账户名
os.environ["URL"] = ""
os.environ["URLNAME"] = ""
os.environ["PASSWORD"] = ""
os.environ["DOMAIN_NAME"] = ""
pg = Pangu(id=1)
pg.set_auth_config()
res = pg('你是谁') # 您好,我是华为盘古大模型。我能够通过和您对话互动为您提供帮助。请问您有什么想问我的吗?
"""
import http.client
import json
from typing import Any, List, Mapping, Optional
import requests
from langchain.llms.base import LLM
from langchain.utils import get_from_dict_or_env
class Pangu(LLM):
"""
A custom LLM class that integrates pangu models
"""
n: int
gen_config: dict = None
auth_config: dict = None
def __init__(self, gen_config=None, **kwargs):
super(Pangu, self).__init__(**kwargs)
if gen_config is None:
self.gen_config = {"user": "User", "max_tokens": 50, "temperature": 0.95, "n": 1}
else:
self.gen_config = gen_config
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {"n": self.n}
@property
def _llm_type(self) -> str:
return "pangu"
def _call(self, prompt: str, stop: Optional[List[str]] = None, **kwargs) -> str:
"""
Args:
prompt: The prompt to pass into the model.
stop: A list of strings to stop generation when encountered
Returns:
The string generated by the model
"""
# Update the generation arguments
for key, value in kwargs.items():
if key in self.gen_config:
self.gen_config[key] = value
response = self.text_completion(prompt, self.gen_config, self.auth_config)
text = response["choices"][0]["text"]
if stop is not None:
for stopping_words in stop:
if stopping_words in text:
text = text.split(stopping_words)[0]
return text
def set_auth_config(self, **kwargs):
url = get_from_dict_or_env(kwargs, "url", "URL")
username = get_from_dict_or_env(kwargs, "username", "USERNAME")
password = get_from_dict_or_env(kwargs, "password", "PASSWORD")
domain_name = get_from_dict_or_env(kwargs, "domain_name", "DOMAIN_NAME")
region = url.split(".")[1]
auth_config = {}
auth_config["endpoint"] = url[url.find("https://") + 8 : url.find(".com") + 4]
auth_config["resource_path"] = url[url.find(".com") + 4 :]
auth_config["auth_token"] = self.get_latest_auth_token(region, username, password, domain_name)
self.auth_config = auth_config
def get_latest_auth_token(self, region, username, password, domain_name):
url = f"https://iam.{region}.myhuaweicloud.com/v3/auth/tokens"
payload = json.dumps(
{
"auth": {
"identity": {
"methods": ["password"],
"password": {"user": {"name": username, "password": password, "domain": {"name": domain_name}}},
},
"scope": {"project": {"name": region}},
}
}
)
headers = {"Content-Type": "application/json"}
response = requests.request("POST", url, headers=headers, data=payload)
return response.headers["X-Subject-Token"]
def text_completion(self, text, gen_config, auth_config):
conn = http.client.HTTPSConnection(auth_config["endpoint"])
payload = json.dumps(
{
"prompt": text,
"user": gen_config["user"],
"max_tokens": gen_config["max_tokens"],
"temperature": gen_config["temperature"],
"n": gen_config["n"],
}
)
headers = {
"X-Auth-Token": auth_config["auth_token"],
"Content-Type": "application/json",
}
conn.request("POST", auth_config["resource_path"], payload, headers)
res = conn.getresponse()
data = res.read()
data = json.loads(data.decode("utf-8"))
return data
def chat_model(self, messages, gen_config, auth_config):
conn = http.client.HTTPSConnection(auth_config["endpoint"])
payload = json.dumps(
{
"messages": messages,
"user": gen_config["user"],
"max_tokens": gen_config["max_tokens"],
"temperature": gen_config["temperature"],
"n": gen_config["n"],
}
)
headers = {
"X-Auth-Token": auth_config["auth_token"],
"Content-Type": "application/json",
}
conn.request("POST", auth_config["resource_path"], payload, headers)
res = conn.getresponse()
data = res.read()
data = json.loads(data.decode("utf-8"))
return data

View File

@@ -0,0 +1,29 @@
"""
Generation utilities
"""
import json
from typing import List
import requests
def post_http_request(
prompt: str, api_url: str, n: int = 1, max_tokens: int = 100, temperature: float = 0.0, stream: bool = False
) -> requests.Response:
headers = {"User-Agent": "Test Client"}
pload = {
"prompt": prompt,
"n": 1,
"use_beam_search": False,
"temperature": temperature,
"max_tokens": max_tokens,
"stream": stream,
}
response = requests.post(api_url, headers=headers, json=pload, stream=True, timeout=3)
return response
def get_response(response: requests.Response) -> List[str]:
data = json.loads(response.content)
output = data["text"]
return output