chore:v0.5.8 tag (#1632)

This commit is contained in:
Aries-ckt 2024-06-13 17:53:46 +08:00 committed by GitHub
parent 0541d1494c
commit 57f7b6d64c
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
5 changed files with 4 additions and 4 deletions

Binary file not shown.

Before

Width:  |  Height:  |  Size: 116 KiB

After

Width:  |  Height:  |  Size: 117 KiB

View File

@ -1 +1 @@
version = "0.5.7" version = "0.5.8"

View File

@ -294,7 +294,7 @@ class HuggingFaceBgeEmbeddings(BaseModel, Embeddings):
kwargs["client"] = sentence_transformers.SentenceTransformer( kwargs["client"] = sentence_transformers.SentenceTransformer(
kwargs.get("model_name"), kwargs.get("model_name"),
cache_folder=kwargs.get("cache_folder"), cache_folder=kwargs.get("cache_folder"),
**kwargs.get("model_kwargs"), **(kwargs.get("model_kwargs") or {}),
) )
super().__init__(**kwargs) super().__init__(**kwargs)

View File

@ -17,7 +17,7 @@ class CrossEncoderRerankEmbeddings(BaseModel, RerankEmbeddings):
client: Any #: :meta private: client: Any #: :meta private:
model_name: str = "BAAI/bge-reranker-base" model_name: str = "BAAI/bge-reranker-base"
max_length: int max_length: int = None # type: ignore
"""Max length for input sequences. Longer sequences will be truncated. If None, max """Max length for input sequences. Longer sequences will be truncated. If None, max
length of the model will be used""" length of the model will be used"""
"""Model name to use.""" """Model name to use."""

View File

@ -19,7 +19,7 @@ with open("README.md", mode="r", encoding="utf-8") as fh:
IS_DEV_MODE = os.getenv("IS_DEV_MODE", "true").lower() == "true" IS_DEV_MODE = os.getenv("IS_DEV_MODE", "true").lower() == "true"
# If you modify the version, please modify the version in the following files: # If you modify the version, please modify the version in the following files:
# dbgpt/_version.py # dbgpt/_version.py
DB_GPT_VERSION = os.getenv("DB_GPT_VERSION", "0.5.7") DB_GPT_VERSION = os.getenv("DB_GPT_VERSION", "0.5.8")
BUILD_NO_CACHE = os.getenv("BUILD_NO_CACHE", "true").lower() == "true" BUILD_NO_CACHE = os.getenv("BUILD_NO_CACHE", "true").lower() == "true"
LLAMA_CPP_GPU_ACCELERATION = ( LLAMA_CPP_GPU_ACCELERATION = (