mirror of
https://github.com/csunny/DB-GPT.git
synced 2025-09-11 13:58:58 +00:00
Resolve warnings of logger library (#2681)
Signed-off-by: Emmanuel Ferdman <emmanuelferdman@gmail.com>
This commit is contained in:
@@ -68,7 +68,7 @@ def create_directory_if_not_exists(directory_path: str) -> bool:
|
||||
logger.debug(f"Created directory: {directory_path}")
|
||||
return True
|
||||
except OSError as e:
|
||||
logger.warn(f"Error creating directory {directory_path}: {e}")
|
||||
logger.warning(f"Error creating directory {directory_path}: {e}")
|
||||
return False
|
||||
else:
|
||||
logger.info(f"Directory {directory_path} already exists")
|
||||
|
@@ -1065,7 +1065,7 @@ def _get_graph(dag: DAG):
|
||||
try:
|
||||
from graphviz import Digraph
|
||||
except ImportError:
|
||||
logger.warn("Can't import graphviz, skip visualize DAG")
|
||||
logger.warning("Can't import graphviz, skip visualize DAG")
|
||||
return None, None
|
||||
dot = Digraph(name=dag.dag_id)
|
||||
mermaid_str = "graph TD;\n" # Initialize Mermaid graph definition
|
||||
|
@@ -25,13 +25,13 @@ def _hf_check_quantization(model_params: HFLLMDeployModelParameters):
|
||||
has_quantization = model_params.quantization is not None
|
||||
if has_quantization:
|
||||
if model_params.real_device != "cuda":
|
||||
logger.warn(
|
||||
logger.warning(
|
||||
"8-bit quantization and 4-bit quantization just supported by cuda"
|
||||
)
|
||||
return False
|
||||
elif "chatglm" in model_name:
|
||||
if "int4" not in model_name:
|
||||
logger.warn(
|
||||
logger.warning(
|
||||
"chatglm or chatglm2 not support quantization now, see: "
|
||||
"https://github.com/huggingface/transformers/issues/25228"
|
||||
)
|
||||
@@ -389,7 +389,7 @@ def load_huggingface_quantization_model(
|
||||
tokenizer.bos_token_id = 1
|
||||
tokenizer.pad_token_id = 0
|
||||
except Exception as e:
|
||||
logger.warn(f"{str(e)}")
|
||||
logger.warning(f"{str(e)}")
|
||||
else:
|
||||
logger.info(
|
||||
"Current model type is not LlamaForCausalLM, load tokenizer by "
|
||||
|
@@ -80,7 +80,7 @@ async def _async_heartbeat_sender(
|
||||
try:
|
||||
await send_heartbeat_func(worker_run_data)
|
||||
except Exception as e:
|
||||
logger.warn(f"Send heartbeat func error: {str(e)}")
|
||||
logger.warning(f"Send heartbeat func error: {str(e)}")
|
||||
finally:
|
||||
await asyncio.sleep(heartbeat_interval)
|
||||
|
||||
|
@@ -145,7 +145,7 @@ class TiktokenProxyTokenizer(ProxyTokenizer):
|
||||
)
|
||||
except ImportError:
|
||||
self._support_encoding = False
|
||||
logger.warn("tiktoken not installed, cannot count tokens")
|
||||
logger.warning("tiktoken not installed, cannot count tokens")
|
||||
return None
|
||||
try:
|
||||
if not model_name:
|
||||
|
@@ -71,7 +71,7 @@ class ProxyTokenizerWrapper:
|
||||
)
|
||||
except ImportError:
|
||||
self._support_encoding = False
|
||||
logger.warn("tiktoken not installed, cannot count tokens, returning -1")
|
||||
logger.warning("tiktoken not installed, cannot count tokens, returning -1")
|
||||
return -1
|
||||
try:
|
||||
if not model_name:
|
||||
|
@@ -167,7 +167,7 @@ def initialize_cache(
|
||||
persist_dir, mem_table_buffer_mb=max_memory_mb
|
||||
)
|
||||
except ImportError as e:
|
||||
logger.warn(
|
||||
logger.warning(
|
||||
f"Can't import DiskCacheStorage, use MemoryCacheStorage, import error "
|
||||
f"message: {str(e)}"
|
||||
)
|
||||
|
@@ -12,7 +12,7 @@ def _clear_model_cache(device="cuda"):
|
||||
|
||||
_clear_torch_cache(device)
|
||||
except ImportError:
|
||||
logger.warn("Torch not installed, skip clear torch cache")
|
||||
logger.warning("Torch not installed, skip clear torch cache")
|
||||
# TODO clear other cache
|
||||
|
||||
|
||||
@@ -30,7 +30,7 @@ def _clear_torch_cache(device="cuda"):
|
||||
|
||||
empty_cache()
|
||||
except Exception as e:
|
||||
logger.warn(f"Clear mps torch cache error, {str(e)}")
|
||||
logger.warning(f"Clear mps torch cache error, {str(e)}")
|
||||
elif (hasattr(backends, "cuda") and backends.cuda.is_built()) or torch.has_cuda:
|
||||
device_count = torch.cuda.device_count()
|
||||
for device_id in range(device_count):
|
||||
|
@@ -86,6 +86,6 @@ class ElevenLabsSpeech(VoiceBase):
|
||||
os.remove("speech.mpeg")
|
||||
return True
|
||||
else:
|
||||
logger.warn("Request failed with status code:", response.status_code)
|
||||
logger.warning("Request failed with status code:", response.status_code)
|
||||
logger.info("Response content:", response.content)
|
||||
return False
|
||||
|
Reference in New Issue
Block a user