community: Bugfix - correct Ollama API path to avoid HTTP 307 (#17895)

Sets the correct /api/generate path, without ending /, to reduce HTTP
requests.

Reference:

https://github.com/ollama/ollama/blob/efe040f8/docs/api.md#generate-request-streaming

Before:

    DEBUG: Starting new HTTP connection (1): localhost:11434
    DEBUG: http://localhost:11434 "POST /api/generate/ HTTP/1.1" 307 0
    DEBUG: http://localhost:11434 "POST /api/generate HTTP/1.1" 200 None

After:

    DEBUG: Starting new HTTP connection (1): localhost:11434
    DEBUG: http://localhost:11434 "POST /api/generate HTTP/1.1" 200 None
This commit is contained in:
Brad Erickson
2024-02-22 08:59:55 -08:00
committed by GitHub
parent a53370a060
commit ecd72d26cf
2 changed files with 8 additions and 8 deletions

View File

@@ -65,7 +65,7 @@ class _OllamaCommon(BaseLanguageModel):
CPU cores your system has (as opposed to the logical number of cores)."""
num_predict: Optional[int] = None
"""Maximum number of tokens to predict when generating text.
"""Maximum number of tokens to predict when generating text.
(Default: 128, -1 = infinite generation, -2 = fill context)"""
repeat_last_n: Optional[int] = None
@@ -159,7 +159,7 @@ class _OllamaCommon(BaseLanguageModel):
yield from self._create_stream(
payload=payload,
stop=stop,
api_url=f"{self.base_url}/api/generate/",
api_url=f"{self.base_url}/api/generate",
**kwargs,
)
@@ -174,7 +174,7 @@ class _OllamaCommon(BaseLanguageModel):
async for item in self._acreate_stream(
payload=payload,
stop=stop,
api_url=f"{self.base_url}/api/generate/",
api_url=f"{self.base_url}/api/generate",
**kwargs,
):
yield item