Merge pull request #18 from csunny/dev

Dev
This commit is contained in:
magic.chen
2023-05-08 09:58:25 +08:00
committed by GitHub
24 changed files with 329 additions and 14 deletions

BIN
asserts/exeable.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 277 KiB

View File

@@ -1,3 +1,2 @@
#!/usr/bin/env python3
# -*- coding:utf-8 -*-

7
pilot/agent/agent.py Normal file
View File

@@ -0,0 +1,7 @@
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
class Agent:
"""Agent class for interacting with DB-GPT """
pass

View File

@@ -0,0 +1,23 @@
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
from pilot.singleton import Singleton
class AgentManager(metaclass=Singleton):
"""Agent manager for managing DB-GPT agents"""
def __init__(self) -> None:
self.agents = {} #TODO need to define
def create_agent(self):
pass
def message_agent(self):
pass
def list_agents(self):
pass
def delete_agent(self):
pass

2
pilot/chain/audio.py Normal file
View File

@@ -0,0 +1,2 @@
#!/usr/bin/env python3
# -*- coding:utf-8 -*-

12
pilot/configs/config.py Normal file
View File

@@ -0,0 +1,12 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from auto_gpt_plugin_template import AutoGPTPluginTemplate
from pilot.singleton import Singleton
class Config(metaclass=Singleton):
"""Configuration class to store the state of bools for different scripts access"""
def __init__(self) -> None:
"""Initialize the Config class"""
pass

View File

@@ -11,6 +11,7 @@ PILOT_PATH = os.path.join(ROOT_PATH, "pilot")
VECTORE_PATH = os.path.join(PILOT_PATH, "vector_store")
LOGDIR = os.path.join(ROOT_PATH, "logs")
DATASETS_DIR = os.path.join(PILOT_PATH, "datasets")
DATA_DIR = os.path.join(PILOT_PATH, "data")
nltk.data.path = [os.path.join(PILOT_PATH, "nltk_data")] + nltk.data.path

View File

@@ -0,0 +1,8 @@
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
"""We need to design a base class. That other connector can Write with this"""
class BaseConnection:
pass

View File

@@ -0,0 +1,7 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
class ClickHouseConnector:
"""ClickHouseConnector"""
pass

7
pilot/connections/es.py Normal file
View File

@@ -0,0 +1,7 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
class ElasticSearchConnector:
"""ElasticSearchConnector"""
pass

View File

@@ -0,0 +1,6 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
class MongoConnector:
"""MongoConnector is a class which connect to mongo and chat with LLM"""
pass

View File

@@ -4,7 +4,11 @@
import pymysql
class MySQLOperator:
"""Connect MySQL Database fetch MetaData For LLM Prompt """
"""Connect MySQL Database fetch MetaData For LLM Prompt
Args:
Usage:
"""
default_db = ["information_schema", "performance_schema", "sys", "mysql"]
def __init__(self, user, password, host="localhost", port=3306) -> None:
@@ -26,6 +30,9 @@ class MySQLOperator:
cursor.execute(_sql)
results = cursor.fetchall()
return results
def get_index(self, schema_name):
pass
def get_db_list(self):
with self.conn.cursor() as cursor:
@@ -38,5 +45,7 @@ class MySQLOperator:
dbs = [d["Database"] for d in results if d["Database"] not in self.default_db]
return dbs
def get_meta(self, schema_name):
pass

View File

@@ -0,0 +1,6 @@
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
class OracleConnector:
"""OracleConnector"""
pass

View File

@@ -0,0 +1,8 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
class PostgresConnector:
"""PostgresConnector is a class which Connector to chat with LLM"""
pass

View File

@@ -0,0 +1,7 @@
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
class RedisConnector:
"""RedisConnector"""
pass

View File

@@ -89,7 +89,7 @@ class Conversation:
def gen_sqlgen_conversation(dbname):
from pilot.connections.mysql_conn import MySQLOperator
from pilot.connections.mysql import MySQLOperator
mo = MySQLOperator(
**DB_SETTINGS
)

View File

@@ -10,7 +10,12 @@ from transformers import (
from fastchat.serve.compression import compress_module
class ModerLoader:
class ModelLoader:
"""Model loader is a class for model load
Args: model_path
"""
kwargs = {}

View File

@@ -0,0 +1,180 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import json
import transformers
from transformers import LlamaTokenizer, LlamaForCausalLM
from typing import List
from peft import (
LoraConfig,
get_peft_model,
get_peft_model_state_dict,
prepare_model_for_int8_training,
)
import torch
from datasets import load_dataset
import pandas as pd
from pilot.configs.model_config import DATA_DIR, LLM_MODEL, LLM_MODEL_CONFIG
device = "cuda" if torch.cuda.is_available() else "cpu"
CUTOFF_LEN = 50
df = pd.read_csv(os.path.join(DATA_DIR, "BTC_Tweets_Updated.csv"))
def sentiment_score_to_name(score: float):
if score > 0:
return "Positive"
elif score < 0:
return "Negative"
return "Neutral"
dataset_data = [
{
"instruction": "Detect the sentiment of the tweet.",
"input": row_dict["Tweet"],
"output": sentiment_score_to_name(row_dict["New_Sentiment_State"])
}
for row_dict in df.to_dict(orient="records")
]
with open(os.path.join(DATA_DIR, "alpaca-bitcoin-sentiment-dataset.json"), "w") as f:
json.dump(dataset_data, f)
data = load_dataset("json", data_files=os.path.join(DATA_DIR, "alpaca-bitcoin-sentiment-dataset.json"))
print(data["train"])
BASE_MODEL = LLM_MODEL_CONFIG[LLM_MODEL]
model = LlamaForCausalLM.from_pretrained(
BASE_MODEL,
torch_dtype=torch.float16,
device_map="auto",
offload_folder=os.path.join(DATA_DIR, "vicuna-lora")
)
tokenizer = LlamaTokenizer.from_pretrained(BASE_MODEL)
tokenizer.pad_token_id = (0)
tokenizer.padding_side = "left"
def generate_prompt(data_point):
return f"""Blow is an instruction that describes a task, paired with an input that provide future context.
Write a response that appropriately completes the request. #noqa:
### Instruct:
{data_point["instruction"]}
### Input
{data_point["input"]}
### Response
{data_point["output"]}
"""
def tokenize(prompt, add_eos_token=True):
result = tokenizer(
prompt,
truncation=True,
max_length=CUTOFF_LEN,
padding=False,
return_tensors=None,
)
if (result["input_ids"][-1] != tokenizer.eos_token_id and len(result["input_ids"]) < CUTOFF_LEN and add_eos_token):
result["input_ids"].append(tokenizer.eos_token_id)
result["attention_mask"].append(1)
result["labels"] = result["input_ids"].copy()
return result
def generate_and_tokenize_prompt(data_point):
full_prompt = generate_prompt(data_point)
tokenized_full_prompt = tokenize(full_prompt)
return tokenized_full_prompt
train_val = data["train"].train_test_split(
test_size=200, shuffle=True, seed=42
)
train_data = (
train_val["train"].map(generate_and_tokenize_prompt)
)
val_data = (
train_val["test"].map(generate_and_tokenize_prompt)
)
# Training
LORA_R = 8
LORA_ALPHA = 16
LORA_DROPOUT = 0.05
LORA_TARGET_MODULES = [
"q_proj",
"v_proj",
]
BATCH_SIZE = 128
MICRO_BATCH_SIZE = 4
GRADIENT_ACCUMULATION_STEPS = BATCH_SIZE // MICRO_BATCH_SIZE
LEARNING_RATE = 3e-4
TRAIN_STEPS = 300
OUTPUT_DIR = "experiments"
# We can now prepare model for training
model = prepare_model_for_int8_training(model)
config = LoraConfig(
r = LORA_R,
lora_alpha=LORA_ALPHA,
target_modules=LORA_TARGET_MODULES,
lora_dropout=LORA_DROPOUT,
bias="none",
task_type="CAUSAL_LM",
)
model = get_peft_model(model, config)
model.print_trainable_parameters()
training_arguments = transformers.TrainingArguments(
per_device_train_batch_size=MICRO_BATCH_SIZE,
gradient_accumulation_steps=GRADIENT_ACCUMULATION_STEPS,
warmup_steps=100,
max_steps=TRAIN_STEPS,
no_cuda=True,
learning_rate=LEARNING_RATE,
logging_steps=10,
optim="adamw_torch",
evaluation_strategy="steps",
save_strategy="steps",
eval_steps=50,
save_steps=50,
output_dir=OUTPUT_DIR,
save_total_limit=3,
load_best_model_at_end=True,
report_to="tensorboard"
)
data_collector = transformers.DataCollatorForSeq2Seq(
tokenizer, pad_to_multiple_of=8, return_tensors="pt", padding=True
)
trainer = transformers.Trainer(
model=model,
train_dataset=train_data,
eval_dataset=val_data,
args=training_arguments,
data_collector=data_collector
)
model.config.use_cache = False
old_state_dict = model.state_dict
model.state_dict = (
lambda self, *_, **__: get_peft_model_state_dict(
self, old_state_dict()
)
).__get__(model, type(model))
trainer.train()
model.save_pretrained(OUTPUT_DIR)

View File

@@ -13,7 +13,7 @@ from pilot.model.inference import generate_output, get_embeddings
from fastchat.serve.inference import load_model
from pilot.model.loader import ModerLoader
from pilot.model.loader import ModelLoader
from pilot.configs.model_config import *
model_path = LLM_MODEL_CONFIG[LLM_MODEL]
@@ -22,7 +22,7 @@ model_path = LLM_MODEL_CONFIG[LLM_MODEL]
global_counter = 0
model_semaphore = None
ml = ModerLoader(model_path=model_path)
ml = ModelLoader(model_path=model_path)
model, tokenizer = ml.loader(num_gpus=1, load_8bit=ISLOAD_8BIT, debug=ISDEBUG)
#model, tokenizer = load_model(model_path=model_path, device=DEVICE, num_gpus=1, load_8bit=True, debug=False)

View File

@@ -12,7 +12,7 @@ import requests
from urllib.parse import urljoin
from pilot.configs.model_config import DB_SETTINGS
from pilot.server.vectordb_qa import KnownLedgeBaseQA
from pilot.connections.mysql_conn import MySQLOperator
from pilot.connections.mysql import MySQLOperator
from pilot.vector_store.extract_tovec import get_vector_storelist, load_knownledge_from_doc, knownledge_tovec_st
from pilot.configs.model_config import LOGDIR, VICUNA_MODEL_SERVER, LLM_MODEL, DATASETS_DIR

21
pilot/singleton.py Normal file
View File

@@ -0,0 +1,21 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""The singleton metaclass for ensuring only one instance of a class."""
import abc
from typing import Any
class Singleton(abc.ABCMeta, type):
""" Singleton metaclass for ensuring only one instance of a class"""
_instances = {}
def __call__(cls, *args: Any, **kwargs: Any) -> Any:
"""Call method for the singleton metaclass"""
if cls not in cls._instances:
cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs)
return cls._instances[cls]
class AbstractSingleton(abc.ABC, metaclass=Singleton):
"""Abstract singleton class for ensuring only one instance of a class"""
pass

View File

@@ -15,6 +15,20 @@ from pilot.configs.model_config import VECTORE_PATH, DATASETS_DIR, LLM_MODEL_CON
class KnownLedge2Vector:
"""KnownLedge2Vector class is order to load document to vector
and persist to vector store.
Args:
- model_name
Usage:
k2v = KnownLedge2Vector()
persist_dir = os.path.join(VECTORE_PATH, ".vectordb")
print(persist_dir)
for s, dc in k2v.query("what is oceanbase?"):
print(s, dc.page_content, dc.metadata)
"""
embeddings: object = None
model_name = LLM_MODEL_CONFIG["sentence-transforms"]
top_k: int = VECTOR_SEARCH_TOP_K
@@ -81,11 +95,4 @@ class KnownLedge2Vector:
dc, s = doc
yield s, dc
if __name__ == "__main__":
k2v = KnownLedge2Vector()
persist_dir = os.path.join(VECTORE_PATH, ".vectordb")
print(persist_dir)
for s, dc in k2v.query("什么是OceanBase"):
print(s, dc.page_content, dc.metadata)