feat:add storage client cache (#2810)

This commit is contained in:
Aries-ckt 2025-07-05 19:44:47 +08:00 committed by GitHub
parent d27fdb7928
commit 1eea9c8eec
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

View File

@ -1,5 +1,6 @@
"""RAG STORAGE MANAGER manager.""" """RAG STORAGE MANAGER manager."""
import threading
from typing import List, Optional, Type from typing import List, Optional, Type
from dbgpt import BaseComponent from dbgpt import BaseComponent
@ -22,6 +23,8 @@ class StorageManager(BaseComponent):
def __init__(self, system_app: SystemApp): def __init__(self, system_app: SystemApp):
"""Create a new ConnectorManager.""" """Create a new ConnectorManager."""
self.system_app = system_app self.system_app = system_app
self._store_cache = {}
self._cache_lock = threading.Lock()
super().__init__(system_app) super().__init__(system_app)
def init_app(self, system_app: SystemApp): def init_app(self, system_app: SystemApp):
@ -62,17 +65,22 @@ class StorageManager(BaseComponent):
"""Create vector store.""" """Create vector store."""
app_config = self.system_app.config.configs.get("app_config") app_config = self.system_app.config.configs.get("app_config")
storage_config = app_config.rag.storage storage_config = app_config.rag.storage
embedding_factory = self.system_app.get_component( if index_name in self._store_cache:
"embedding_factory", EmbeddingFactory return self._store_cache[index_name]
) with self._cache_lock:
embedding_fn = embedding_factory.create() embedding_factory = self.system_app.get_component(
vector_store_config: VectorStoreConfig = storage_config.vector "embedding_factory", EmbeddingFactory
return vector_store_config.create_store( )
name=index_name, embedding_fn = embedding_factory.create()
embedding_fn=embedding_fn, vector_store_config: VectorStoreConfig = storage_config.vector
max_chunks_once_load=vector_store_config.max_chunks_once_load, new_store = vector_store_config.create_store(
max_threads=vector_store_config.max_threads, name=index_name,
) embedding_fn=embedding_fn,
max_chunks_once_load=vector_store_config.max_chunks_once_load,
max_threads=vector_store_config.max_threads,
)
self._store_cache[index_name] = new_store
return new_store
def create_kg_store( def create_kg_store(
self, index_name, llm_model: Optional[str] = None self, index_name, llm_model: Optional[str] = None
@ -81,63 +89,72 @@ class StorageManager(BaseComponent):
app_config = self.system_app.config.configs.get("app_config") app_config = self.system_app.config.configs.get("app_config")
rag_config = app_config.rag rag_config = app_config.rag
storage_config = app_config.rag.storage storage_config = app_config.rag.storage
worker_manager = self.system_app.get_component( if index_name in self._store_cache:
ComponentType.WORKER_MANAGER_FACTORY, WorkerManagerFactory return self._store_cache[index_name]
).create() with self._cache_lock:
llm_client = DefaultLLMClient(worker_manager=worker_manager) worker_manager = self.system_app.get_component(
embedding_factory = self.system_app.get_component( ComponentType.WORKER_MANAGER_FACTORY, WorkerManagerFactory
"embedding_factory", EmbeddingFactory ).create()
) llm_client = DefaultLLMClient(worker_manager=worker_manager)
embedding_fn = embedding_factory.create() embedding_factory = self.system_app.get_component(
if storage_config.graph: "embedding_factory", EmbeddingFactory
graph_config = storage_config.graph )
graph_config.llm_model = llm_model embedding_fn = embedding_factory.create()
if hasattr(graph_config, "enable_summary") and graph_config.enable_summary: if storage_config.graph:
from dbgpt_ext.storage.knowledge_graph.community_summary import ( graph_config = storage_config.graph
CommunitySummaryKnowledgeGraph, graph_config.llm_model = llm_model
) if (
hasattr(graph_config, "enable_summary")
and graph_config.enable_summary
):
from dbgpt_ext.storage.knowledge_graph.community_summary import (
CommunitySummaryKnowledgeGraph,
)
return CommunitySummaryKnowledgeGraph( return CommunitySummaryKnowledgeGraph(
config=storage_config.graph, config=storage_config.graph,
name=index_name, name=index_name,
llm_client=llm_client, llm_client=llm_client,
vector_store_config=storage_config.vector, vector_store_config=storage_config.vector,
kg_extract_top_k=rag_config.kg_extract_top_k, kg_extract_top_k=rag_config.kg_extract_top_k,
kg_extract_score_threshold=rag_config.kg_extract_score_threshold, kg_extract_score_threshold=rag_config.kg_extract_score_threshold,
kg_community_top_k=rag_config.kg_community_top_k, kg_community_top_k=rag_config.kg_community_top_k,
kg_community_score_threshold=rag_config.kg_community_score_threshold, kg_community_score_threshold=rag_config.kg_community_score_threshold,
kg_triplet_graph_enabled=rag_config.kg_triplet_graph_enabled, kg_triplet_graph_enabled=rag_config.kg_triplet_graph_enabled,
kg_document_graph_enabled=rag_config.kg_document_graph_enabled, kg_document_graph_enabled=rag_config.kg_document_graph_enabled,
kg_chunk_search_top_k=rag_config.kg_chunk_search_top_k, kg_chunk_search_top_k=rag_config.kg_chunk_search_top_k,
kg_extraction_batch_size=rag_config.kg_extraction_batch_size, kg_extraction_batch_size=rag_config.kg_extraction_batch_size,
kg_community_summary_batch_size=rag_config.kg_community_summary_batch_size, kg_community_summary_batch_size=rag_config.kg_community_summary_batch_size,
kg_embedding_batch_size=rag_config.kg_embedding_batch_size, kg_embedding_batch_size=rag_config.kg_embedding_batch_size,
kg_similarity_top_k=rag_config.kg_similarity_top_k, kg_similarity_top_k=rag_config.kg_similarity_top_k,
kg_similarity_score_threshold=rag_config.kg_similarity_score_threshold, kg_similarity_score_threshold=rag_config.kg_similarity_score_threshold,
kg_enable_text_search=rag_config.kg_enable_text_search, kg_enable_text_search=rag_config.kg_enable_text_search,
kg_text2gql_model_enabled=rag_config.kg_text2gql_model_enabled, kg_text2gql_model_enabled=rag_config.kg_text2gql_model_enabled,
kg_text2gql_model_name=rag_config.kg_text2gql_model_name, kg_text2gql_model_name=rag_config.kg_text2gql_model_name,
embedding_fn=embedding_fn, embedding_fn=embedding_fn,
kg_max_chunks_once_load=rag_config.max_chunks_once_load, kg_max_chunks_once_load=rag_config.max_chunks_once_load,
kg_max_threads=rag_config.max_threads, kg_max_threads=rag_config.max_threads,
) )
return BuiltinKnowledgeGraph( return BuiltinKnowledgeGraph(
config=storage_config.graph, config=storage_config.graph,
name=index_name, name=index_name,
llm_client=llm_client, llm_client=llm_client,
) )
def create_full_text_store(self, index_name) -> FullTextStoreBase: def create_full_text_store(self, index_name) -> FullTextStoreBase:
"""Create Full Text store.""" """Create Full Text store."""
app_config = self.system_app.config.configs.get("app_config") app_config = self.system_app.config.configs.get("app_config")
rag_config = app_config.rag rag_config = app_config.rag
storage_config = app_config.rag.storage storage_config = app_config.rag.storage
return ElasticDocumentStore( if index_name in self._store_cache:
es_config=storage_config.full_text, return self._store_cache[index_name]
name=index_name, with self._cache_lock:
k1=rag_config.bm25_k1, return ElasticDocumentStore(
b=rag_config.bm25_b, es_config=storage_config.full_text,
) name=index_name,
k1=rag_config.bm25_k1,
b=rag_config.bm25_b,
)
@property @property
def get_vector_supported_types(self) -> List[str]: def get_vector_supported_types(self) -> List[str]: