mirror of
https://github.com/hwchase17/langchain.git
synced 2025-07-30 08:14:47 +00:00
Deprecate direct access to globals like debug
and verbose
. (#11311)
Instead of accessing `langchain.debug`, `langchain.verbose`, or `langchain.llm_cache`, please use the new getter/setter functions in `langchain.globals`: - `langchain.globals.set_debug()` and `langchain.globals.get_debug()` - `langchain.globals.set_verbose()` and `langchain.globals.get_verbose()` - `langchain.globals.set_llm_cache()` and `langchain.globals.get_llm_cache()` Using the old globals directly will now raise a warning. --------- Co-authored-by: Harrison Chase <hw.chase.17@gmail.com>
This commit is contained in:
parent
01b7b46908
commit
9e32120cbb
@ -135,9 +135,9 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# We set this so we can see what exactly is going on\n",
|
||||
"import langchain\n",
|
||||
"from langchain.globals import set_verbose\n",
|
||||
"\n",
|
||||
"langchain.verbose = True"
|
||||
"set_verbose(True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -489,7 +489,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.3"
|
||||
"version": "3.10.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
@ -214,7 +214,7 @@
|
||||
"\n",
|
||||
"The way the chain is learning that Tom prefers veggetarian meals is via an AutoSelectionScorer that is built into the chain. The scorer will call the LLM again and ask it to evaluate the selection (`ToSelectFrom`) using the information wrapped in (`BasedOn`).\n",
|
||||
"\n",
|
||||
"You can set `langchain.debug=True` if you want to see the details of the auto-scorer, but you can also define the scoring prompt yourself."
|
||||
"You can set `set_debug(True)` if you want to see the details of the auto-scorer, but you can also define the scoring prompt yourself."
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -778,8 +778,9 @@
|
||||
],
|
||||
"source": [
|
||||
"from langchain.prompts.prompt import PromptTemplate\n",
|
||||
"import langchain\n",
|
||||
"langchain.debug = True\n",
|
||||
"from langchain.globals import set_debug\n",
|
||||
"\n",
|
||||
"set_debug(True)\n",
|
||||
"\n",
|
||||
"REWARD_PROMPT_TEMPLATE = \"\"\"\n",
|
||||
"\n",
|
||||
@ -812,9 +813,9 @@
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "poetry-venv",
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "poetry-venv"
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
@ -826,7 +827,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.1"
|
||||
"version": "3.10.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
@ -14,7 +14,7 @@ For anyone building production-grade LLM applications, we highly recommend using
|
||||
|
||||

|
||||
|
||||
## `langchain.debug` and `langchain.verbose`
|
||||
## `set_debug` and `set_verbose`
|
||||
|
||||
If you're prototyping in Jupyter Notebooks or running Python scripts, it can be helpful to print out the intermediate steps of a Chain run.
|
||||
|
||||
@ -45,15 +45,15 @@ agent.run("Who directed the 2023 film Oppenheimer and what is their age? What is
|
||||
|
||||
</CodeOutputBlock>
|
||||
|
||||
### `langchain.debug = True`
|
||||
### `set_debug(True)`
|
||||
|
||||
Setting the global `debug` flag will cause all LangChain components with callback support (chains, models, agents, tools, retrievers) to print the inputs they receive and outputs they generate. This is the most verbose setting and will fully log raw inputs and outputs.
|
||||
|
||||
|
||||
```python
|
||||
import langchain
|
||||
from langchain.globals import set_debug
|
||||
|
||||
langchain.debug = True
|
||||
set_debug(True)
|
||||
|
||||
agent.run("Who directed the 2023 film Oppenheimer and what is their age? What is their age in days (assume 365 days per year)?")
|
||||
```
|
||||
@ -376,15 +376,15 @@ agent.run("Who directed the 2023 film Oppenheimer and what is their age? What is
|
||||
|
||||
</details>
|
||||
|
||||
### `langchain.verbose = True`
|
||||
### `set_vebose(True)`
|
||||
|
||||
Setting the `verbose` flag will print out inputs and outputs in a slightly more readable format and will skip logging certain raw outputs (like the token usage stats for an LLM call) so that you can focus on application logic.
|
||||
|
||||
|
||||
```python
|
||||
import langchain
|
||||
from langchain.globals import set_verbose
|
||||
|
||||
langchain.verbose = True
|
||||
set_verbose(True)
|
||||
|
||||
agent.run("Who directed the 2023 film Oppenheimer and what is their age? What is their age in days (assume 365 days per year)?")
|
||||
```
|
||||
|
@ -30,12 +30,13 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import langchain\n",
|
||||
"from langchain.llms import NIBittensorLLM\n",
|
||||
"import json\n",
|
||||
"from pprint import pprint\n",
|
||||
"\n",
|
||||
"langchain.debug = True\n",
|
||||
"from langchain.globals import set_debug\n",
|
||||
"\n",
|
||||
"set_debug(True)\n",
|
||||
"\n",
|
||||
"# System parameter in NIBittensorLLM is optional but you can set whatever you want to perform with model\n",
|
||||
"llm_sys = NIBittensorLLM(\n",
|
||||
@ -79,11 +80,13 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import langchain\n",
|
||||
"from langchain.prompts import PromptTemplate\nfrom langchain.chains import LLMChain\n",
|
||||
"from langchain.prompts import PromptTemplate\n",
|
||||
"from langchain.chains import LLMChain\n",
|
||||
"from langchain.llms import NIBittensorLLM\n",
|
||||
"\n",
|
||||
"langchain.debug = True\n",
|
||||
"from langchain.globals import set_debug\n",
|
||||
"\n",
|
||||
"set_debug(True)\n",
|
||||
"\n",
|
||||
"template = \"\"\"Question: {question}\n",
|
||||
"\n",
|
||||
@ -123,7 +126,8 @@
|
||||
" AgentExecutor,\n",
|
||||
")\n",
|
||||
"from langchain.memory import ConversationBufferMemory\n",
|
||||
"from langchain.chains import LLMChain\nfrom langchain.prompts import PromptTemplate\n",
|
||||
"from langchain.chains import LLMChain\n",
|
||||
"from langchain.prompts import PromptTemplate\n",
|
||||
"from langchain.utilities import GoogleSearchAPIWrapper, SerpAPIWrapper\n",
|
||||
"from langchain.llms import NIBittensorLLM\n",
|
||||
"\n",
|
||||
@ -174,7 +178,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.12"
|
||||
"version": "3.10.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
@ -17,7 +17,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import langchain\n",
|
||||
"from langchain.globals import set_llm_cache\n",
|
||||
"from langchain.llms import OpenAI\n",
|
||||
"\n",
|
||||
"# To make the caching really obvious, lets use a slower model.\n",
|
||||
@ -44,12 +44,12 @@
|
||||
"source": [
|
||||
"from langchain.cache import InMemoryCache\n",
|
||||
"\n",
|
||||
"langchain.llm_cache = InMemoryCache()"
|
||||
"set_llm_cache(InMemoryCache())"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"execution_count": 3,
|
||||
"id": "64005d1f",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
@ -57,17 +57,17 @@
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"CPU times: user 35.9 ms, sys: 28.6 ms, total: 64.6 ms\n",
|
||||
"Wall time: 4.83 s\n"
|
||||
"CPU times: user 52.2 ms, sys: 15.2 ms, total: 67.4 ms\n",
|
||||
"Wall time: 1.19 s\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"\"\\n\\nWhy couldn't the bicycle stand up by itself? It was...two tired!\""
|
||||
"\"\\n\\nWhy couldn't the bicycle stand up by itself? Because it was...two tired!\""
|
||||
]
|
||||
},
|
||||
"execution_count": 4,
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@ -80,7 +80,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"execution_count": 4,
|
||||
"id": "c8a1cb2b",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
@ -88,17 +88,17 @@
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"CPU times: user 238 µs, sys: 143 µs, total: 381 µs\n",
|
||||
"Wall time: 1.76 ms\n"
|
||||
"CPU times: user 191 µs, sys: 11 µs, total: 202 µs\n",
|
||||
"Wall time: 205 µs\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"\"\\n\\nWhy couldn't the bicycle stand up by itself? It was...two tired!\""
|
||||
"\"\\n\\nWhy couldn't the bicycle stand up by itself? Because it was...two tired!\""
|
||||
]
|
||||
},
|
||||
"execution_count": 7,
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@ -122,8 +122,8 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"id": "3ff65b00",
|
||||
"execution_count": null,
|
||||
"id": "aefd9d2f",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
@ -132,7 +132,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"execution_count": 6,
|
||||
"id": "5f036236",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@ -140,12 +140,12 @@
|
||||
"# We can do the same thing with a SQLite cache\n",
|
||||
"from langchain.cache import SQLiteCache\n",
|
||||
"\n",
|
||||
"langchain.llm_cache = SQLiteCache(database_path=\".langchain.db\")"
|
||||
"set_llm_cache(SQLiteCache(database_path=\".langchain.db\"))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"execution_count": 7,
|
||||
"id": "fa18e3af",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
@ -153,8 +153,8 @@
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"CPU times: user 17 ms, sys: 9.76 ms, total: 26.7 ms\n",
|
||||
"Wall time: 825 ms\n"
|
||||
"CPU times: user 33.2 ms, sys: 18.1 ms, total: 51.2 ms\n",
|
||||
"Wall time: 667 ms\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -163,7 +163,7 @@
|
||||
"'\\n\\nWhy did the chicken cross the road?\\n\\nTo get to the other side.'"
|
||||
]
|
||||
},
|
||||
"execution_count": 11,
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@ -176,7 +176,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"execution_count": 8,
|
||||
"id": "5bf2f6fd",
|
||||
"metadata": {
|
||||
"scrolled": true
|
||||
@ -186,8 +186,8 @@
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"CPU times: user 2.46 ms, sys: 1.23 ms, total: 3.7 ms\n",
|
||||
"Wall time: 2.67 ms\n"
|
||||
"CPU times: user 4.86 ms, sys: 1.97 ms, total: 6.83 ms\n",
|
||||
"Wall time: 5.79 ms\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -196,7 +196,7 @@
|
||||
"'\\n\\nWhy did the chicken cross the road?\\n\\nTo get to the other side.'"
|
||||
]
|
||||
},
|
||||
"execution_count": 12,
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@ -239,7 +239,7 @@
|
||||
"from redis import Redis\n",
|
||||
"from langchain.cache import RedisCache\n",
|
||||
"\n",
|
||||
"langchain.llm_cache = RedisCache(redis_=Redis())"
|
||||
"set_llm_cache(RedisCache(redis_=Redis()))"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -324,8 +324,10 @@
|
||||
"from langchain.cache import RedisSemanticCache\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"langchain.llm_cache = RedisSemanticCache(\n",
|
||||
" redis_url=\"redis://localhost:6379\", embedding=OpenAIEmbeddings()\n",
|
||||
"set_llm_cache(\n",
|
||||
" RedisSemanticCache(\n",
|
||||
" redis_url=\"redis://localhost:6379\", embedding=OpenAIEmbeddings()\n",
|
||||
" )\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
@ -433,7 +435,7 @@
|
||||
" )\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"langchain.llm_cache = GPTCache(init_gptcache)"
|
||||
"set_llm_cache(GPTCache(init_gptcache))"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -528,7 +530,7 @@
|
||||
" init_similar_cache(cache_obj=cache_obj, data_dir=f\"similar_cache_{hashed_llm}\")\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"langchain.llm_cache = GPTCache(init_gptcache)"
|
||||
"set_llm_cache(GPTCache(init_gptcache))"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -670,7 +672,7 @@
|
||||
"\n",
|
||||
"cache_name = \"langchain\"\n",
|
||||
"ttl = timedelta(days=1)\n",
|
||||
"langchain.llm_cache = MomentoCache.from_client_params(cache_name, ttl)"
|
||||
"set_llm_cache(MomentoCache.from_client_params(cache_name, ttl))"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -760,7 +762,7 @@
|
||||
"# from sqlalchemy import create_engine\n",
|
||||
"\n",
|
||||
"# engine = create_engine(\"postgresql://postgres:postgres@localhost:5432/postgres\")\n",
|
||||
"# langchain.llm_cache = SQLAlchemyCache(engine)"
|
||||
"# set_llm_cache(SQLAlchemyCache(engine))"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -808,7 +810,7 @@
|
||||
"\n",
|
||||
"\n",
|
||||
"engine = create_engine(\"postgresql://postgres:postgres@localhost:5432/postgres\")\n",
|
||||
"langchain.llm_cache = SQLAlchemyCache(engine, FulltextLLMCache)"
|
||||
"set_llm_cache(SQLAlchemyCache(engine, FulltextLLMCache))"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -895,10 +897,10 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import langchain\n",
|
||||
"from langchain.globals import set_llm_cache\n",
|
||||
"from langchain.cache import CassandraCache\n",
|
||||
"\n",
|
||||
"langchain.llm_cache = CassandraCache(session=session, keyspace=keyspace)"
|
||||
"set_llm_cache(CassandraCache(session=session, keyspace=keyspace))"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -980,8 +982,10 @@
|
||||
"source": [
|
||||
"from langchain.cache import CassandraSemanticCache\n",
|
||||
"\n",
|
||||
"langchain.llm_cache = CassandraSemanticCache(\n",
|
||||
" session=session, keyspace=keyspace, embedding=embedding, table_name=\"cass_sem_cache\"\n",
|
||||
"set_llm_cache(\n",
|
||||
" CassandraSemanticCache(\n",
|
||||
" session=session, keyspace=keyspace, embedding=embedding, table_name=\"cass_sem_cache\"\n",
|
||||
" )\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
@ -1283,7 +1287,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.12"
|
||||
"version": "3.10.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
@ -58,7 +58,6 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import langchain\n",
|
||||
"from langchain.chains import LLMChain\n",
|
||||
"from langchain.prompts import PromptTemplate\n",
|
||||
"from langchain.callbacks.stdout import StdOutCallbackHandler\n",
|
||||
@ -67,8 +66,10 @@
|
||||
"\n",
|
||||
"from langchain.llms import OpaquePrompts\n",
|
||||
"\n",
|
||||
"langchain.verbose = True\n",
|
||||
"langchain.debug = True\n",
|
||||
"from langchain.globals import set_debug, set_verbose\n",
|
||||
"\n",
|
||||
"set_debug(True)\n",
|
||||
"set_verbose(True)\n",
|
||||
"\n",
|
||||
"prompt_template = \"\"\"\n",
|
||||
"As an AI assistant, you will answer questions according to given context.\n",
|
||||
@ -197,15 +198,22 @@
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "langchain",
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"version": "3.10.10"
|
||||
},
|
||||
"orig_nbformat": 4
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
|
@ -1,7 +1,6 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
@ -17,7 +16,6 @@
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
@ -43,11 +41,13 @@
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import langchain\n",
|
||||
"from langchain.prompts import PromptTemplate\nfrom langchain.chains import LLMChain\n",
|
||||
"from langchain.prompts import PromptTemplate\n",
|
||||
"from langchain.chains import LLMChain\n",
|
||||
"from langchain.llms import TextGen\n",
|
||||
"\n",
|
||||
"langchain.debug = True\n",
|
||||
"from langchain.globals import set_debug\n",
|
||||
"\n",
|
||||
"set_debug(True)\n",
|
||||
"\n",
|
||||
"template = \"\"\"Question: {question}\n",
|
||||
"\n",
|
||||
@ -92,12 +92,14 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import langchain\n",
|
||||
"from langchain.prompts import PromptTemplate\nfrom langchain.chains import LLMChain\n",
|
||||
"from langchain.prompts import PromptTemplate\n",
|
||||
"from langchain.chains import LLMChain\n",
|
||||
"from langchain.llms import TextGen\n",
|
||||
"from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler\n",
|
||||
"\n",
|
||||
"langchain.debug = True\n",
|
||||
"from langchain.globals import set_debug\n",
|
||||
"\n",
|
||||
"set_debug(True)\n",
|
||||
"\n",
|
||||
"template = \"\"\"Question: {question}\n",
|
||||
"\n",
|
||||
@ -144,7 +146,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.4"
|
||||
"version": "3.10.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
@ -29,7 +29,7 @@ Then, set it up with the following code:
|
||||
```python
|
||||
from datetime import timedelta
|
||||
from momento import CacheClient, Configurations, CredentialProvider
|
||||
import langchain
|
||||
from langchain.globals import set_llm_cache
|
||||
|
||||
# Instantiate the Momento client
|
||||
cache_client = CacheClient(
|
||||
@ -41,7 +41,7 @@ cache_client = CacheClient(
|
||||
cache_name = "langchain"
|
||||
|
||||
# Instantiate the LLM cache
|
||||
langchain.llm_cache = MomentoCache(cache_client, cache_name)
|
||||
set_llm_cache(MomentoCache(cache_client, cache_name))
|
||||
```
|
||||
|
||||
## Memory
|
||||
|
@ -41,8 +41,9 @@ Once again this is done through the SQLAlchemy wrapper.
|
||||
|
||||
```
|
||||
import sqlalchemy
|
||||
from langchain.globals import set_llm_cache
|
||||
eng = sqlalchemy.create_engine(conn_str)
|
||||
langchain.llm_cache = SQLAlchemyCache(engine=eng)
|
||||
set_llm_cache(SQLAlchemyCache(engine=eng))
|
||||
```
|
||||
|
||||
From here, see the [LLM Caching](/docs/modules/model_io/models/llms/how_to/llm_caching) documentation on how to use.
|
||||
|
@ -63,11 +63,11 @@ from langchain.cache import RedisCache
|
||||
|
||||
To use this cache with your LLMs:
|
||||
```python
|
||||
import langchain
|
||||
from langchain.globals import set_llm_cache
|
||||
import redis
|
||||
|
||||
redis_client = redis.Redis.from_url(...)
|
||||
langchain.llm_cache = RedisCache(redis_client)
|
||||
set_llm_cache(RedisCache(redis_client))
|
||||
```
|
||||
|
||||
#### Semantic Cache
|
||||
@ -80,7 +80,7 @@ from langchain.cache import RedisSemanticCache
|
||||
|
||||
To use this cache with your LLMs:
|
||||
```python
|
||||
import langchain
|
||||
from langchain.globals import set_llm_cache
|
||||
import redis
|
||||
|
||||
# use any embedding provider...
|
||||
@ -88,10 +88,10 @@ from tests.integration_tests.vectorstores.fake_embeddings import FakeEmbeddings
|
||||
|
||||
redis_url = "redis://localhost:6379"
|
||||
|
||||
langchain.llm_cache = RedisSemanticCache(
|
||||
set_llm_cache(RedisSemanticCache(
|
||||
embedding=FakeEmbeddings(),
|
||||
redis_url=redis_url
|
||||
)
|
||||
))
|
||||
```
|
||||
|
||||
### VectorStore
|
||||
|
@ -158,9 +158,9 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import langchain\n",
|
||||
"from langchain.globals import set_debug\n",
|
||||
"\n",
|
||||
"langchain.debug = True"
|
||||
"set_debug(True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -411,7 +411,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.12"
|
||||
"version": "3.10.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
@ -106,9 +106,9 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Do this so we can see exactly what's going on under the hood\n",
|
||||
"import langchain\n",
|
||||
"from langchain.globals import set_debug\n",
|
||||
"\n",
|
||||
"langchain.debug = True"
|
||||
"set_debug(True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@ -1,5 +1,5 @@
|
||||
```python
|
||||
import langchain
|
||||
from langchain.globals import set_llm_cache
|
||||
from langchain.chat_models import ChatOpenAI
|
||||
|
||||
llm = ChatOpenAI()
|
||||
@ -10,7 +10,7 @@ llm = ChatOpenAI()
|
||||
|
||||
```python
|
||||
from langchain.cache import InMemoryCache
|
||||
langchain.llm_cache = InMemoryCache()
|
||||
set_llm_cache(InMemoryCache())
|
||||
|
||||
# The first time, it is not yet in cache, so it should take longer
|
||||
llm.predict("Tell me a joke")
|
||||
@ -57,7 +57,7 @@ rm .langchain.db
|
||||
```python
|
||||
# We can do the same thing with a SQLite cache
|
||||
from langchain.cache import SQLiteCache
|
||||
langchain.llm_cache = SQLiteCache(database_path=".langchain.db")
|
||||
set_llm_cache(SQLiteCache(database_path=".langchain.db"))
|
||||
```
|
||||
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
```python
|
||||
import langchain
|
||||
from langchain.globals import set_llm_cache
|
||||
from langchain.llms import OpenAI
|
||||
|
||||
# To make the caching really obvious, lets use a slower model.
|
||||
@ -11,7 +11,7 @@ llm = OpenAI(model_name="text-davinci-002", n=2, best_of=2)
|
||||
|
||||
```python
|
||||
from langchain.cache import InMemoryCache
|
||||
langchain.llm_cache = InMemoryCache()
|
||||
set_llm_cache(InMemoryCache())
|
||||
|
||||
# The first time, it is not yet in cache, so it should take longer
|
||||
llm.predict("Tell me a joke")
|
||||
@ -58,7 +58,7 @@ rm .langchain.db
|
||||
```python
|
||||
# We can do the same thing with a SQLite cache
|
||||
from langchain.cache import SQLiteCache
|
||||
langchain.llm_cache = SQLiteCache(database_path=".langchain.db")
|
||||
set_llm_cache(SQLiteCache(database_path=".langchain.db"))
|
||||
```
|
||||
|
||||
|
||||
|
@ -2,14 +2,10 @@
|
||||
"""Main entrypoint into package."""
|
||||
import warnings
|
||||
from importlib import metadata
|
||||
from typing import TYPE_CHECKING, Any, Optional
|
||||
from typing import Any, Optional
|
||||
|
||||
from langchain._api.deprecation import surface_langchain_deprecation_warnings
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from langchain.schema import BaseCache
|
||||
|
||||
|
||||
try:
|
||||
__version__ = metadata.version(__package__)
|
||||
except metadata.PackageNotFoundError:
|
||||
@ -17,10 +13,6 @@ except metadata.PackageNotFoundError:
|
||||
__version__ = ""
|
||||
del metadata # optional, avoids polluting the results of dir(__package__)
|
||||
|
||||
verbose: bool = False
|
||||
debug: bool = False
|
||||
llm_cache: Optional["BaseCache"] = None
|
||||
|
||||
|
||||
def _is_interactive_env() -> bool:
|
||||
"""Determine if running within IPython or Jupyter."""
|
||||
@ -29,7 +21,7 @@ def _is_interactive_env() -> bool:
|
||||
return hasattr(sys, "ps2")
|
||||
|
||||
|
||||
def _warn_on_import(name: str) -> None:
|
||||
def _warn_on_import(name: str, replacement: Optional[str] = None) -> None:
|
||||
"""Warn on import of deprecated module."""
|
||||
if _is_interactive_env():
|
||||
# No warnings for interactive environments.
|
||||
@ -37,9 +29,16 @@ def _warn_on_import(name: str) -> None:
|
||||
# where users rely on auto-complete and may trigger this warning
|
||||
# even if they are not using any deprecated modules
|
||||
return
|
||||
warnings.warn(
|
||||
f"Importing {name} from langchain root module is no longer supported."
|
||||
)
|
||||
|
||||
if replacement:
|
||||
warnings.warn(
|
||||
f"Importing {name} from langchain root module is no longer supported. "
|
||||
f"Please use {replacement} instead."
|
||||
)
|
||||
else:
|
||||
warnings.warn(
|
||||
f"Importing {name} from langchain root module is no longer supported."
|
||||
)
|
||||
|
||||
|
||||
# Surfaces Deprecation and Pending Deprecation warnings from langchain.
|
||||
@ -328,6 +327,39 @@ def __getattr__(name: str) -> Any:
|
||||
_warn_on_import(name)
|
||||
|
||||
return SerpAPIWrapper
|
||||
elif name == "verbose":
|
||||
from langchain.globals import _verbose
|
||||
|
||||
_warn_on_import(
|
||||
name,
|
||||
replacement=(
|
||||
"langchain.globals.set_verbose() / langchain.globals.get_verbose()"
|
||||
),
|
||||
)
|
||||
|
||||
return _verbose
|
||||
elif name == "debug":
|
||||
from langchain.globals import _debug
|
||||
|
||||
_warn_on_import(
|
||||
name,
|
||||
replacement=(
|
||||
"langchain.globals.set_debug() / langchain.globals.get_debug()"
|
||||
),
|
||||
)
|
||||
|
||||
return _debug
|
||||
elif name == "llm_cache":
|
||||
from langchain.globals import _llm_cache
|
||||
|
||||
_warn_on_import(
|
||||
name,
|
||||
replacement=(
|
||||
"langchain.globals.set_llm_cache() / langchain.globals.get_llm_cache()"
|
||||
),
|
||||
)
|
||||
|
||||
return _llm_cache
|
||||
else:
|
||||
raise AttributeError(f"Could not find: {name}")
|
||||
|
||||
|
@ -27,7 +27,6 @@ from uuid import UUID
|
||||
|
||||
from tenacity import RetryCallState
|
||||
|
||||
import langchain
|
||||
from langchain.callbacks.base import (
|
||||
BaseCallbackHandler,
|
||||
BaseCallbackManager,
|
||||
@ -88,7 +87,9 @@ run_collector_var: ContextVar[
|
||||
|
||||
|
||||
def _get_debug() -> bool:
|
||||
return langchain.debug
|
||||
from langchain.globals import get_debug
|
||||
|
||||
return get_debug()
|
||||
|
||||
|
||||
@contextmanager
|
||||
|
@ -10,7 +10,6 @@ from typing import Any, Dict, List, Optional, Type, Union
|
||||
|
||||
import yaml
|
||||
|
||||
import langchain
|
||||
from langchain.callbacks.base import BaseCallbackManager
|
||||
from langchain.callbacks.manager import (
|
||||
AsyncCallbackManager,
|
||||
@ -34,7 +33,9 @@ logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def _get_verbosity() -> bool:
|
||||
return langchain.verbose
|
||||
from langchain.globals import get_verbose
|
||||
|
||||
return get_verbose()
|
||||
|
||||
|
||||
class Chain(RunnableSerializable[Dict[str, Any], Dict[str, Any]], ABC):
|
||||
@ -108,10 +109,10 @@ class Chain(RunnableSerializable[Dict[str, Any], Dict[str, Any]], ABC):
|
||||
|
||||
memory: Optional[BaseMemory] = None
|
||||
"""Optional memory object. Defaults to None.
|
||||
Memory is a class that gets called at the start
|
||||
Memory is a class that gets called at the start
|
||||
and at the end of every chain. At the start, memory loads variables and passes
|
||||
them along in the chain. At the end, it saves any returned variables.
|
||||
There are many different types of memory - please see memory docs
|
||||
There are many different types of memory - please see memory docs
|
||||
for the full catalog."""
|
||||
callbacks: Callbacks = Field(default=None, exclude=True)
|
||||
"""Optional list of callback handlers (or callback manager). Defaults to None.
|
||||
@ -123,7 +124,8 @@ class Chain(RunnableSerializable[Dict[str, Any], Dict[str, Any]], ABC):
|
||||
"""Deprecated, use `callbacks` instead."""
|
||||
verbose: bool = Field(default_factory=_get_verbosity)
|
||||
"""Whether or not run in verbose mode. In verbose mode, some intermediate logs
|
||||
will be printed to the console. Defaults to `langchain.verbose` value."""
|
||||
will be printed to the console. Defaults to the global `verbose` value,
|
||||
accessible via `langchain.globals.get_verbose()`."""
|
||||
tags: Optional[List[str]] = None
|
||||
"""Optional list of tags associated with the chain. Defaults to None.
|
||||
These tags will be associated with each call to this chain,
|
||||
|
@ -56,7 +56,8 @@ def create_extraction_chain(
|
||||
llm: The language model to use.
|
||||
prompt: The prompt to use for extraction.
|
||||
verbose: Whether to run in verbose mode. In verbose mode, some intermediate
|
||||
logs will be printed to the console. Defaults to `langchain.verbose` value.
|
||||
logs will be printed to the console. Defaults to the global `verbose` value,
|
||||
accessible via `langchain.globals.get_verbose()`.
|
||||
|
||||
Returns:
|
||||
Chain that can be used to extract information from a passage.
|
||||
@ -88,7 +89,8 @@ def create_extraction_chain_pydantic(
|
||||
llm: The language model to use.
|
||||
prompt: The prompt to use for extraction.
|
||||
verbose: Whether to run in verbose mode. In verbose mode, some intermediate
|
||||
logs will be printed to the console. Defaults to `langchain.verbose` value.
|
||||
logs will be printed to the console. Defaults to the global `verbose` value,
|
||||
accessible via `langchain.globals.get_verbose()`
|
||||
|
||||
Returns:
|
||||
Chain that can be used to extract information from a passage.
|
||||
|
@ -50,7 +50,9 @@ from langchain.schema.runnable import RunnableConfig
|
||||
|
||||
|
||||
def _get_verbosity() -> bool:
|
||||
return langchain.verbose
|
||||
from langchain.globals import get_verbose
|
||||
|
||||
return get_verbose()
|
||||
|
||||
|
||||
def _generate_from_stream(stream: Iterator[ChatGenerationChunk]) -> ChatResult:
|
||||
|
178
libs/langchain/langchain/globals/__init__.py
Normal file
178
libs/langchain/langchain/globals/__init__.py
Normal file
@ -0,0 +1,178 @@
|
||||
"""Global values and configuration that apply to all of LangChain."""
|
||||
import warnings
|
||||
from typing import TYPE_CHECKING, Optional
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from langchain.schema import BaseCache
|
||||
|
||||
|
||||
# DO NOT USE THESE VALUES DIRECTLY!
|
||||
# Use them only via `get_<X>()` and `set_<X>()` below,
|
||||
# or else your code may behave unexpectedly with other uses of these global settings:
|
||||
# https://github.com/langchain-ai/langchain/pull/11311#issuecomment-1743780004
|
||||
_verbose: bool = False
|
||||
_debug: bool = False
|
||||
_llm_cache: Optional["BaseCache"] = None
|
||||
|
||||
|
||||
def set_verbose(value: bool) -> None:
|
||||
"""Set a new value for the `verbose` global setting."""
|
||||
import langchain
|
||||
|
||||
# We're about to run some deprecated code, don't report warnings from it.
|
||||
# The user called the correct (non-deprecated) code path and shouldn't get warnings.
|
||||
with warnings.catch_warnings():
|
||||
warnings.filterwarnings(
|
||||
"ignore",
|
||||
message=(
|
||||
"Importing verbose from langchain root module is no longer supported"
|
||||
),
|
||||
)
|
||||
# N.B.: This is a workaround for an unfortunate quirk of Python's
|
||||
# module-level `__getattr__()` implementation:
|
||||
# https://github.com/langchain-ai/langchain/pull/11311#issuecomment-1743780004
|
||||
#
|
||||
# Remove it once `langchain.verbose` is no longer supported, and once all users
|
||||
# have migrated to using `set_verbose()` here.
|
||||
langchain.verbose = value
|
||||
|
||||
global _verbose
|
||||
_verbose = value
|
||||
|
||||
|
||||
def get_verbose() -> bool:
|
||||
"""Get the value of the `verbose` global setting."""
|
||||
import langchain
|
||||
|
||||
# We're about to run some deprecated code, don't report warnings from it.
|
||||
# The user called the correct (non-deprecated) code path and shouldn't get warnings.
|
||||
with warnings.catch_warnings():
|
||||
warnings.filterwarnings(
|
||||
"ignore",
|
||||
message=(
|
||||
"Importing verbose from langchain root module is no longer supported"
|
||||
),
|
||||
)
|
||||
# N.B.: This is a workaround for an unfortunate quirk of Python's
|
||||
# module-level `__getattr__()` implementation:
|
||||
# https://github.com/langchain-ai/langchain/pull/11311#issuecomment-1743780004
|
||||
#
|
||||
# Remove it once `langchain.verbose` is no longer supported, and once all users
|
||||
# have migrated to using `set_verbose()` here.
|
||||
#
|
||||
# In the meantime, the `verbose` setting is considered True if either the old
|
||||
# or the new value are True. This accommodates users who haven't migrated
|
||||
# to using `set_verbose()` yet. Those users are getting deprecation warnings
|
||||
# directing them to use `set_verbose()` when they import `langhchain.verbose`.
|
||||
old_verbose = langchain.verbose
|
||||
|
||||
global _verbose
|
||||
return _verbose or old_verbose
|
||||
|
||||
|
||||
def set_debug(value: bool) -> None:
|
||||
"""Set a new value for the `debug` global setting."""
|
||||
import langchain
|
||||
|
||||
# We're about to run some deprecated code, don't report warnings from it.
|
||||
# The user called the correct (non-deprecated) code path and shouldn't get warnings.
|
||||
with warnings.catch_warnings():
|
||||
warnings.filterwarnings(
|
||||
"ignore",
|
||||
message="Importing debug from langchain root module is no longer supported",
|
||||
)
|
||||
# N.B.: This is a workaround for an unfortunate quirk of Python's
|
||||
# module-level `__getattr__()` implementation:
|
||||
# https://github.com/langchain-ai/langchain/pull/11311#issuecomment-1743780004
|
||||
#
|
||||
# Remove it once `langchain.debug` is no longer supported, and once all users
|
||||
# have migrated to using `set_debug()` here.
|
||||
langchain.debug = value
|
||||
|
||||
global _debug
|
||||
_debug = value
|
||||
|
||||
|
||||
def get_debug() -> bool:
|
||||
"""Get the value of the `debug` global setting."""
|
||||
import langchain
|
||||
|
||||
# We're about to run some deprecated code, don't report warnings from it.
|
||||
# The user called the correct (non-deprecated) code path and shouldn't get warnings.
|
||||
with warnings.catch_warnings():
|
||||
warnings.filterwarnings(
|
||||
"ignore",
|
||||
message="Importing debug from langchain root module is no longer supported",
|
||||
)
|
||||
# N.B.: This is a workaround for an unfortunate quirk of Python's
|
||||
# module-level `__getattr__()` implementation:
|
||||
# https://github.com/langchain-ai/langchain/pull/11311#issuecomment-1743780004
|
||||
#
|
||||
# Remove it once `langchain.debug` is no longer supported, and once all users
|
||||
# have migrated to using `set_debug()` here.
|
||||
#
|
||||
# In the meantime, the `debug` setting is considered True if either the old
|
||||
# or the new value are True. This accommodates users who haven't migrated
|
||||
# to using `set_debug()` yet. Those users are getting deprecation warnings
|
||||
# directing them to use `set_debug()` when they import `langhchain.debug`.
|
||||
old_debug = langchain.debug
|
||||
|
||||
global _debug
|
||||
return _debug or old_debug
|
||||
|
||||
|
||||
def set_llm_cache(value: "BaseCache") -> None:
|
||||
"""Set a new LLM cache, overwriting the previous value, if any."""
|
||||
import langchain
|
||||
|
||||
# We're about to run some deprecated code, don't report warnings from it.
|
||||
# The user called the correct (non-deprecated) code path and shouldn't get warnings.
|
||||
with warnings.catch_warnings():
|
||||
warnings.filterwarnings(
|
||||
"ignore",
|
||||
message=(
|
||||
"Importing llm_cache from langchain root module is no longer supported"
|
||||
),
|
||||
)
|
||||
# N.B.: This is a workaround for an unfortunate quirk of Python's
|
||||
# module-level `__getattr__()` implementation:
|
||||
# https://github.com/langchain-ai/langchain/pull/11311#issuecomment-1743780004
|
||||
#
|
||||
# Remove it once `langchain.llm_cache` is no longer supported, and
|
||||
# once all users have migrated to using `set_llm_cache()` here.
|
||||
langchain.llm_cache = value
|
||||
|
||||
global _llm_cache
|
||||
_llm_cache = value
|
||||
|
||||
|
||||
def get_llm_cache() -> "BaseCache":
|
||||
"""Get the value of the `llm_cache` global setting."""
|
||||
import langchain
|
||||
|
||||
# We're about to run some deprecated code, don't report warnings from it.
|
||||
# The user called the correct (non-deprecated) code path and shouldn't get warnings.
|
||||
with warnings.catch_warnings():
|
||||
warnings.filterwarnings(
|
||||
"ignore",
|
||||
message=(
|
||||
"Importing llm_cache from langchain root module is no longer supported"
|
||||
),
|
||||
)
|
||||
# N.B.: This is a workaround for an unfortunate quirk of Python's
|
||||
# module-level `__getattr__()` implementation:
|
||||
# https://github.com/langchain-ai/langchain/pull/11311#issuecomment-1743780004
|
||||
#
|
||||
# Remove it once `langchain.llm_cache` is no longer supported, and
|
||||
# once all users have migrated to using `set_llm_cache()` here.
|
||||
#
|
||||
# In the meantime, the `llm_cache` setting returns whichever of
|
||||
# its two backing sources is truthy (not `None` and non-empty),
|
||||
# or the old value if both are falsy. This accommodates users
|
||||
# who haven't migrated to using `set_llm_cache()` yet.
|
||||
# Those users are getting deprecation warnings directing them
|
||||
# to use `set_llm_cache()` when they import `langhchain.llm_cache`.
|
||||
old_llm_cache = langchain.llm_cache
|
||||
|
||||
global _llm_cache
|
||||
return _llm_cache or old_llm_cache
|
@ -66,7 +66,9 @@ logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def _get_verbosity() -> bool:
|
||||
return langchain.verbose
|
||||
from langchain.globals import get_verbose
|
||||
|
||||
return get_verbose()
|
||||
|
||||
|
||||
@functools.lru_cache
|
||||
|
121
libs/langchain/tests/unit_tests/test_globals.py
Normal file
121
libs/langchain/tests/unit_tests/test_globals.py
Normal file
@ -0,0 +1,121 @@
|
||||
from langchain.globals import get_debug, get_verbose, set_debug, set_verbose
|
||||
|
||||
|
||||
def test_debug_is_settable_directly() -> None:
|
||||
import langchain
|
||||
from langchain.callbacks.manager import _get_debug
|
||||
|
||||
previous_value = langchain.debug
|
||||
previous_fn_reading = _get_debug()
|
||||
assert previous_value == previous_fn_reading
|
||||
|
||||
# Flip the value of the flag.
|
||||
langchain.debug = not previous_value
|
||||
|
||||
new_value = langchain.debug
|
||||
new_fn_reading = _get_debug()
|
||||
|
||||
try:
|
||||
# We successfully changed the value of `debug`.
|
||||
assert new_value != previous_value
|
||||
|
||||
# If we access `debug` via a function used elsewhere in langchain,
|
||||
# it also sees the same new value.
|
||||
assert new_value == new_fn_reading
|
||||
|
||||
# If we access `debug` via `get_debug()` we also get the same value.
|
||||
assert new_value == get_debug()
|
||||
finally:
|
||||
# Make sure we don't alter global state, even if the test fails.
|
||||
# Always reset `debug` to the value it had before.
|
||||
set_debug(previous_value)
|
||||
|
||||
|
||||
def test_debug_is_settable_via_setter() -> None:
|
||||
from langchain import globals
|
||||
from langchain.callbacks.manager import _get_debug
|
||||
|
||||
previous_value = globals._debug
|
||||
previous_fn_reading = _get_debug()
|
||||
assert previous_value == previous_fn_reading
|
||||
|
||||
# Flip the value of the flag.
|
||||
set_debug(not previous_value)
|
||||
|
||||
new_value = globals._debug
|
||||
new_fn_reading = _get_debug()
|
||||
|
||||
try:
|
||||
# We successfully changed the value of `debug`.
|
||||
assert new_value != previous_value
|
||||
|
||||
# If we access `debug` via a function used elsewhere in langchain,
|
||||
# it also sees the same new value.
|
||||
assert new_value == new_fn_reading
|
||||
|
||||
# If we access `debug` via `get_debug()` we also get the same value.
|
||||
assert new_value == get_debug()
|
||||
finally:
|
||||
# Make sure we don't alter global state, even if the test fails.
|
||||
# Always reset `debug` to the value it had before.
|
||||
set_debug(previous_value)
|
||||
|
||||
|
||||
def test_verbose_is_settable_directly() -> None:
|
||||
import langchain
|
||||
from langchain.chains.base import _get_verbosity
|
||||
|
||||
previous_value = langchain.verbose
|
||||
previous_fn_reading = _get_verbosity()
|
||||
assert previous_value == previous_fn_reading
|
||||
|
||||
# Flip the value of the flag.
|
||||
langchain.verbose = not previous_value
|
||||
|
||||
new_value = langchain.verbose
|
||||
new_fn_reading = _get_verbosity()
|
||||
|
||||
try:
|
||||
# We successfully changed the value of `verbose`.
|
||||
assert new_value != previous_value
|
||||
|
||||
# If we access `verbose` via a function used elsewhere in langchain,
|
||||
# it also sees the same new value.
|
||||
assert new_value == new_fn_reading
|
||||
|
||||
# If we access `verbose` via `get_verbose()` we also get the same value.
|
||||
assert new_value == get_verbose()
|
||||
finally:
|
||||
# Make sure we don't alter global state, even if the test fails.
|
||||
# Always reset `verbose` to the value it had before.
|
||||
set_verbose(previous_value)
|
||||
|
||||
|
||||
def test_verbose_is_settable_via_setter() -> None:
|
||||
from langchain import globals
|
||||
from langchain.chains.base import _get_verbosity
|
||||
|
||||
previous_value = globals._verbose
|
||||
previous_fn_reading = _get_verbosity()
|
||||
assert previous_value == previous_fn_reading
|
||||
|
||||
# Flip the value of the flag.
|
||||
set_verbose(not previous_value)
|
||||
|
||||
new_value = globals._verbose
|
||||
new_fn_reading = _get_verbosity()
|
||||
|
||||
try:
|
||||
# We successfully changed the value of `verbose`.
|
||||
assert new_value != previous_value
|
||||
|
||||
# If we access `verbose` via a function used elsewhere in langchain,
|
||||
# it also sees the same new value.
|
||||
assert new_value == new_fn_reading
|
||||
|
||||
# If we access `verbose` via `get_verbose()` we also get the same value.
|
||||
assert new_value == get_verbose()
|
||||
finally:
|
||||
# Make sure we don't alter global state, even if the test fails.
|
||||
# Always reset `verbose` to the value it had before.
|
||||
set_verbose(previous_value)
|
Loading…
Reference in New Issue
Block a user