Resolve warnings of logger library (#2681)

Signed-off-by: Emmanuel Ferdman <emmanuelferdman@gmail.com>
This commit is contained in:
Emmanuel Ferdman 2025-05-15 10:55:16 +03:00 committed by GitHub
parent 6a82d9122b
commit d300a4d412
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
12 changed files with 20 additions and 20 deletions

View File

@ -58,7 +58,7 @@ class KnowledgeApiClient(ApiClient):
return self._post("/knowledge/space/add", data=request)
except Exception as e:
if "have already named" in str(e):
logger.warn(f"you have already named {request.name}")
logger.warning(f"you have already named {request.name}")
else:
raise e
@ -143,7 +143,7 @@ def knowledge_init(
)
except Exception as ex:
if overwrite and "have already named" in str(ex):
logger.warn(
logger.warning(
f"Document {filename} already exist in space {space.name}, "
"overwrite it"
)
@ -172,7 +172,7 @@ def knowledge_init(
return doc_id
except Exception as e:
if skip_wrong_doc:
logger.warn(f"Upload {filename} to {space.name} failed: {str(e)}")
logger.warning(f"Upload {filename} to {space.name} failed: {str(e)}")
else:
raise e
@ -195,7 +195,7 @@ def knowledge_init(
doc_ids = [r.result() for r in as_completed(tasks)]
doc_ids = list(filter(lambda x: x, doc_ids))
if not doc_ids:
logger.warn("Warning: no document to sync")
logger.warning("Warning: no document to sync")
return
@ -374,7 +374,7 @@ def knowledge_delete(
.lower()
)
if user_input != "yes":
logger.warn("Delete operation cancelled.")
logger.warning("Delete operation cancelled.")
return
client.space_delete(space)
logger.info("Delete the whole knowledge space successfully!")
@ -390,7 +390,7 @@ def knowledge_delete(
.lower()
)
if user_input != "yes":
logger.warn("Delete operation cancelled.")
logger.warning("Delete operation cancelled.")
return
client.document_delete(space_name, KnowledgeDocumentRequest(doc_name=doc_name))
logger.info(

View File

@ -610,7 +610,7 @@ class BaseChat(ABC):
return prompt_define_response
def _blocking_stream_call(self):
logger.warn(
logger.warning(
"_blocking_stream_call is only temporarily used in webserver and will be "
"deleted soon, please use stream_call to replace it for higher performance"
)

View File

@ -68,7 +68,7 @@ def create_directory_if_not_exists(directory_path: str) -> bool:
logger.debug(f"Created directory: {directory_path}")
return True
except OSError as e:
logger.warn(f"Error creating directory {directory_path}: {e}")
logger.warning(f"Error creating directory {directory_path}: {e}")
return False
else:
logger.info(f"Directory {directory_path} already exists")

View File

@ -1065,7 +1065,7 @@ def _get_graph(dag: DAG):
try:
from graphviz import Digraph
except ImportError:
logger.warn("Can't import graphviz, skip visualize DAG")
logger.warning("Can't import graphviz, skip visualize DAG")
return None, None
dot = Digraph(name=dag.dag_id)
mermaid_str = "graph TD;\n" # Initialize Mermaid graph definition

View File

@ -25,13 +25,13 @@ def _hf_check_quantization(model_params: HFLLMDeployModelParameters):
has_quantization = model_params.quantization is not None
if has_quantization:
if model_params.real_device != "cuda":
logger.warn(
logger.warning(
"8-bit quantization and 4-bit quantization just supported by cuda"
)
return False
elif "chatglm" in model_name:
if "int4" not in model_name:
logger.warn(
logger.warning(
"chatglm or chatglm2 not support quantization now, see: "
"https://github.com/huggingface/transformers/issues/25228"
)
@ -389,7 +389,7 @@ def load_huggingface_quantization_model(
tokenizer.bos_token_id = 1
tokenizer.pad_token_id = 0
except Exception as e:
logger.warn(f"{str(e)}")
logger.warning(f"{str(e)}")
else:
logger.info(
"Current model type is not LlamaForCausalLM, load tokenizer by "

View File

@ -80,7 +80,7 @@ async def _async_heartbeat_sender(
try:
await send_heartbeat_func(worker_run_data)
except Exception as e:
logger.warn(f"Send heartbeat func error: {str(e)}")
logger.warning(f"Send heartbeat func error: {str(e)}")
finally:
await asyncio.sleep(heartbeat_interval)

View File

@ -145,7 +145,7 @@ class TiktokenProxyTokenizer(ProxyTokenizer):
)
except ImportError:
self._support_encoding = False
logger.warn("tiktoken not installed, cannot count tokens")
logger.warning("tiktoken not installed, cannot count tokens")
return None
try:
if not model_name:

View File

@ -71,7 +71,7 @@ class ProxyTokenizerWrapper:
)
except ImportError:
self._support_encoding = False
logger.warn("tiktoken not installed, cannot count tokens, returning -1")
logger.warning("tiktoken not installed, cannot count tokens, returning -1")
return -1
try:
if not model_name:

View File

@ -167,7 +167,7 @@ def initialize_cache(
persist_dir, mem_table_buffer_mb=max_memory_mb
)
except ImportError as e:
logger.warn(
logger.warning(
f"Can't import DiskCacheStorage, use MemoryCacheStorage, import error "
f"message: {str(e)}"
)

View File

@ -12,7 +12,7 @@ def _clear_model_cache(device="cuda"):
_clear_torch_cache(device)
except ImportError:
logger.warn("Torch not installed, skip clear torch cache")
logger.warning("Torch not installed, skip clear torch cache")
# TODO clear other cache
@ -30,7 +30,7 @@ def _clear_torch_cache(device="cuda"):
empty_cache()
except Exception as e:
logger.warn(f"Clear mps torch cache error, {str(e)}")
logger.warning(f"Clear mps torch cache error, {str(e)}")
elif (hasattr(backends, "cuda") and backends.cuda.is_built()) or torch.has_cuda:
device_count = torch.cuda.device_count()
for device_id in range(device_count):

View File

@ -86,6 +86,6 @@ class ElevenLabsSpeech(VoiceBase):
os.remove("speech.mpeg")
return True
else:
logger.warn("Request failed with status code:", response.status_code)
logger.warning("Request failed with status code:", response.status_code)
logger.info("Response content:", response.content)
return False

View File

@ -88,7 +88,7 @@ class DBSummaryClient:
self.db_summary_embedding(item["db_name"], item["db_type"])
except Exception as e:
message = traceback.format_exc()
logger.warn(
logger.warning(
f"{item['db_name']}, {item['db_type']} summary error!{str(e)}, "
f"detail: {message}"
)