#!/usr/bin/env python3 # -*- coding:utf-8 -*- import gradio as gr from langchain.agents import ( load_tools, initialize_agent, AgentType ) from pilot.model.vicuna_llm import VicunaRequestLLM, VicunaEmbeddingLLM from llama_index import LLMPredictor, LangchainEmbedding, ServiceContext from langchain.embeddings.huggingface import HuggingFaceEmbeddings from llama_index import Document, GPTSimpleVectorIndex def agent_demo(): llm = VicunaRequestLLM() tools = load_tools(['python_repl'], llm=llm) agent = initialize_agent(tools, llm, agent=AgentType.CHAT_ZERO_SHOT_REACT_DESCRIPTION, verbose=True) agent.run( "Write a SQL script that Query 'select count(1)!'" ) def knowledged_qa_demo(text_list): llm_predictor = LLMPredictor(llm=VicunaRequestLLM()) hfemb = VicunaEmbeddingLLM() embed_model = LangchainEmbedding(hfemb) documents = [Document(t) for t in text_list] service_context = ServiceContext.from_defaults(llm_predictor=llm_predictor, embed_model=embed_model) index = GPTSimpleVectorIndex.from_documents(documents, service_context=service_context) return index def get_answer(q): base_knowledge = """ 这是一段测试文字 """ text_list = [base_knowledge] index = knowledged_qa_demo(text_list) response = index.query(q) return response.response if __name__ == "__main__": # agent_demo() with gr.Blocks() as demo: gr.Markdown("数据库智能助手") with gr.Tab("知识问答"): text_input = gr.TextArea() text_output = gr.TextArea() text_button = gr.Button() text_button.click(get_answer, inputs=text_input, outputs=text_output) demo.queue(concurrency_count=3).launch(server_name="0.0.0.0")