mirror of
https://github.com/csunny/DB-GPT.git
synced 2025-10-22 09:28:42 +00:00
23 lines
680 B
Python
23 lines
680 B
Python
#!/usr/bin/env python3
|
|
#-*- coding: utf-8 -*-
|
|
|
|
import torch
|
|
from fastchat.serve.inference import generate_stream, compress_module
|
|
|
|
BASE_MODE = "/home/magic/workspace/github/DB-GPT/models/vicuna-13b"
|
|
from transformers import AutoTokenizer, AutoModelForCausalLM
|
|
|
|
if __name__ == "__main__":
|
|
|
|
device = "cuda" if torch.cuda.is_available() else "cpu"
|
|
tokenizer = AutoTokenizer.from_pretrained(BASE_MODE, use_fast=False)
|
|
model = AutoModelForCausalLM.from_pretrained(
|
|
BASE_MODE,
|
|
low_cpu_mem_usage=True,
|
|
torch_dtype=torch.float16,
|
|
device_map="auto",
|
|
)
|
|
|
|
print(device)
|
|
#compress_module(model, device)
|
|
print(model, tokenizer) |