doc:ollama document (#1512)

This commit is contained in:
Aries-ckt 2024-05-11 17:13:25 +08:00 committed by GitHub
parent d3131552d3
commit f389a0c32d
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
4 changed files with 47 additions and 2 deletions

View File

@ -19,6 +19,7 @@ logger = logging.getLogger(__name__)
CHROMA_COLLECTION_NAME = "langchain"
@register_resource(
_("Chroma Vector Store"),
"chroma_vector_store",
@ -152,7 +153,6 @@ class ChromaStore(VectorStoreBase):
files = list(filter(lambda f: f != "chroma.sqlite3", files))
return len(files) > 0
def load_document(self, chunks: List[Chunk]) -> List[str]:
"""Load document to vector store."""
logger.info("ChromaStore load document")

View File

@ -66,7 +66,7 @@ class PGVectorStore(VectorStoreBase):
embedding_function=self.embeddings,
collection_name=self.collection_name,
connection_string=self.connection_string,
) # mypy: ignore
) # mypy: ignore
def similar_search(
self, text: str, topk: int, filters: Optional[MetadataFilters] = None

View File

@ -0,0 +1,41 @@
# ollama
ollama is a model serving platform that allows you to deploy models in a few seconds.
It is a great tool.
### Install ollama
If your system is linux.
```bash
curl -fsSL https://ollama.com/install.sh | sh
```
other environments, please refer to the [official ollama website](https://ollama.com/).
### Pull models.
1. Pull LLM
```bash
ollama pull qwen:0.5b
```
2. Pull embedding model.
```bash
ollama pull nomic-embed-text
```
3. install ollama package.
```bash
pip install ollama
```
### Use ollama proxy model in DB-GPT `.env` file
```bash
LLM_MODEL=ollama_proxyllm
PROXY_SERVER_URL=http://127.0.0.1:11434
PROXYLLM_BACKEND="qwen:0.5b"
PROXY_API_KEY=not_used
EMBEDDING_MODEL=proxy_ollama
proxy_ollama_proxy_server_url=http://127.0.0.1:11434
proxy_ollama_proxy_backend="nomic-embed-text:latest"
```
### run dbgpt server
```bash
python dbgpt/app/dbgpt_server.py
```

View File

@ -237,6 +237,10 @@ const sidebars = {
type: 'doc',
id: 'installation/advanced_usage/More_proxyllms',
},
{
type: 'doc',
id: 'installation/advanced_usage/ollama',
},
{
type: 'doc',
id: 'installation/advanced_usage/vLLM_inference',