diff --git a/examples/app.py b/examples/app.py index 52086c20b..07f8a5a51 100644 --- a/examples/app.py +++ b/examples/app.py @@ -5,7 +5,7 @@ import gradio as gr from langchain.agents import AgentType, initialize_agent, load_tools from llama_index import ( Document, - GPTSimpleVectorIndex, + GPTVectorStoreIndex, LangchainEmbedding, LLMPredictor, ServiceContext, @@ -33,7 +33,7 @@ def knowledged_qa_demo(text_list): service_context = ServiceContext.from_defaults( llm_predictor=llm_predictor, embed_model=embed_model ) - index = GPTSimpleVectorIndex.from_documents( + index = GPTVectorStoreIndex.from_documents( documents, service_context=service_context ) return index diff --git a/examples/gpt_index.py b/examples/gpt_index.py index 2a0841a24..a4683af1a 100644 --- a/examples/gpt_index.py +++ b/examples/gpt_index.py @@ -4,7 +4,7 @@ import logging import sys -from llama_index import GPTSimpleVectorIndex, SimpleDirectoryReader +from llama_index import GPTVectorStoreIndex, SimpleDirectoryReader logging.basicConfig(stream=sys.stdout, level=logging.INFO) logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout)) @@ -13,7 +13,7 @@ logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout)) documents = SimpleDirectoryReader("data").load_data() # split the document to chunk, max token size=500, convert chunk to vector -index = GPTSimpleVectorIndex(documents) +index = GPTVectorStoreIndex(documents) # save index index.save_to_disk("index.json") diff --git a/examples/t5_example.py b/examples/t5_example.py index ab2b7f2e3..b49e79d4e 100644 --- a/examples/t5_example.py +++ b/examples/t5_example.py @@ -6,7 +6,7 @@ from langchain.embeddings.huggingface import HuggingFaceEmbeddings from langchain.llms.base import LLM from llama_index import ( GPTListIndex, - GPTSimpleVectorIndex, + GPTVectorStoreIndex, LangchainEmbedding, LLMPredictor, PromptHelper,