mirror of
				https://github.com/hwchase17/langchain.git
				synced 2025-11-04 02:03:32 +00:00 
			
		
		
		
	Optimize the initialization method of GPTCache (#4522)
Optimize the initialization method of GPTCache, so that users can use GPTCache more quickly.
This commit is contained in:
		@@ -408,25 +408,20 @@
 | 
			
		||||
   "metadata": {},
 | 
			
		||||
   "outputs": [],
 | 
			
		||||
   "source": [
 | 
			
		||||
    "import gptcache\n",
 | 
			
		||||
    "from gptcache import Cache\n",
 | 
			
		||||
    "from gptcache.manager.factory import manager_factory\n",
 | 
			
		||||
    "from gptcache.processor.pre import get_prompt\n",
 | 
			
		||||
    "from gptcache.manager.factory import get_data_manager\n",
 | 
			
		||||
    "from langchain.cache import GPTCache\n",
 | 
			
		||||
    "\n",
 | 
			
		||||
    "# Avoid multiple caches using the same file, causing different llm model caches to affect each other\n",
 | 
			
		||||
    "i = 0\n",
 | 
			
		||||
    "file_prefix = \"data_map\"\n",
 | 
			
		||||
    "\n",
 | 
			
		||||
    "def init_gptcache_map(cache_obj: gptcache.Cache):\n",
 | 
			
		||||
    "    global i\n",
 | 
			
		||||
    "    cache_path = f'{file_prefix}_{i}.txt'\n",
 | 
			
		||||
    "def init_gptcache(cache_obj: Cache, llm str):\n",
 | 
			
		||||
    "    cache_obj.init(\n",
 | 
			
		||||
    "        pre_embedding_func=get_prompt,\n",
 | 
			
		||||
    "        data_manager=get_data_manager(data_path=cache_path),\n",
 | 
			
		||||
    "        data_manager=manager_factory(manager=\"map\", data_dir=f\"map_cache_{llm}\"),\n",
 | 
			
		||||
    "    )\n",
 | 
			
		||||
    "    i += 1\n",
 | 
			
		||||
    "\n",
 | 
			
		||||
    "langchain.llm_cache = GPTCache(init_gptcache_map)"
 | 
			
		||||
    "langchain.llm_cache = GPTCache(init_gptcache)"
 | 
			
		||||
   ]
 | 
			
		||||
  },
 | 
			
		||||
  {
 | 
			
		||||
@@ -506,37 +501,16 @@
 | 
			
		||||
   "metadata": {},
 | 
			
		||||
   "outputs": [],
 | 
			
		||||
   "source": [
 | 
			
		||||
    "import gptcache\n",
 | 
			
		||||
    "from gptcache.processor.pre import get_prompt\n",
 | 
			
		||||
    "from gptcache.manager.factory import get_data_manager\n",
 | 
			
		||||
    "from langchain.cache import GPTCache\n",
 | 
			
		||||
    "from gptcache.manager import get_data_manager, CacheBase, VectorBase\n",
 | 
			
		||||
    "from gptcache import Cache\n",
 | 
			
		||||
    "from gptcache.embedding import Onnx\n",
 | 
			
		||||
    "from gptcache.similarity_evaluation.distance import SearchDistanceEvaluation\n",
 | 
			
		||||
    "from gptcache.adapter.api import init_similar_cache\n",
 | 
			
		||||
    "from langchain.cache import GPTCache\n",
 | 
			
		||||
    "\n",
 | 
			
		||||
    "# Avoid multiple caches using the same file, causing different llm model caches to affect each other\n",
 | 
			
		||||
    "i = 0\n",
 | 
			
		||||
    "file_prefix = \"data_map\"\n",
 | 
			
		||||
    "llm_cache = Cache()\n",
 | 
			
		||||
    "\n",
 | 
			
		||||
    "def init_gptcache(cache_obj: Cache, llm str):\n",
 | 
			
		||||
    "    init_similar_cache(cache_obj=cache_obj, data_dir=f\"similar_cache_{llm}\")\n",
 | 
			
		||||
    "\n",
 | 
			
		||||
    "def init_gptcache_map(cache_obj: gptcache.Cache):\n",
 | 
			
		||||
    "    global i\n",
 | 
			
		||||
    "    cache_path = f'{file_prefix}_{i}.txt'\n",
 | 
			
		||||
    "    onnx = Onnx()\n",
 | 
			
		||||
    "    cache_base = CacheBase('sqlite')\n",
 | 
			
		||||
    "    vector_base = VectorBase('faiss', dimension=onnx.dimension)\n",
 | 
			
		||||
    "    data_manager = get_data_manager(cache_base, vector_base, max_size=10, clean_size=2)\n",
 | 
			
		||||
    "    cache_obj.init(\n",
 | 
			
		||||
    "        pre_embedding_func=get_prompt,\n",
 | 
			
		||||
    "        embedding_func=onnx.to_embeddings,\n",
 | 
			
		||||
    "        data_manager=data_manager,\n",
 | 
			
		||||
    "        similarity_evaluation=SearchDistanceEvaluation(),\n",
 | 
			
		||||
    "    )\n",
 | 
			
		||||
    "    i += 1\n",
 | 
			
		||||
    "\n",
 | 
			
		||||
    "langchain.llm_cache = GPTCache(init_gptcache_map)"
 | 
			
		||||
    "langchain.llm_cache = GPTCache(init_gptcache)"
 | 
			
		||||
   ]
 | 
			
		||||
  },
 | 
			
		||||
  {
 | 
			
		||||
@@ -929,7 +903,7 @@
 | 
			
		||||
 ],
 | 
			
		||||
 "metadata": {
 | 
			
		||||
  "kernelspec": {
 | 
			
		||||
   "display_name": "Python 3 (ipykernel)",
 | 
			
		||||
   "display_name": "Python 3",
 | 
			
		||||
   "language": "python",
 | 
			
		||||
   "name": "python3"
 | 
			
		||||
  },
 | 
			
		||||
@@ -943,7 +917,7 @@
 | 
			
		||||
   "name": "python",
 | 
			
		||||
   "nbconvert_exporter": "python",
 | 
			
		||||
   "pygments_lexer": "ipython3",
 | 
			
		||||
   "version": "3.9.1"
 | 
			
		||||
   "version": "3.8.8"
 | 
			
		||||
  }
 | 
			
		||||
 },
 | 
			
		||||
 "nbformat": 4,
 | 
			
		||||
 
 | 
			
		||||
		Reference in New Issue
	
	Block a user