mirror of
https://github.com/hwchase17/langchain.git
synced 2025-06-21 14:18:52 +00:00
Update ollama.py with optional raw setting. (#21486)
Ollama has a raw option now. https://github.com/ollama/ollama/blob/main/docs/api.md Thank you for contributing to LangChain! - [ ] **PR title**: "package: description" - Where "package" is whichever of langchain, community, core, experimental, etc. is being modified. Use "docs: ..." for purely docs changes, "templates: ..." for template changes, "infra: ..." for CI changes. - Example: "community: add foobar LLM" - [ ] **PR message**: ***Delete this entire checklist*** and replace with - **Description:** a description of the change - **Issue:** the issue # it fixes, if applicable - **Dependencies:** any dependencies required for this change - **Twitter handle:** if your PR gets announced, and you'd like a mention, we'll gladly shout you out! - [ ] **Add tests and docs**: If you're adding a new integration, please include 1. a test for the integration, preferably unit tests that do not rely on network access, 2. an example notebook showing its use. It lives in `docs/docs/integrations` directory. - [ ] **Lint and test**: Run `make format`, `make lint` and `make test` from the root of the package(s) you've modified. See contribution guidelines for more: https://python.langchain.com/docs/contributing/ Additional guidelines: - Make sure optional dependencies are imported within a function. - Please do not add dependencies to pyproject.toml files (even optional ones) unless they are required for unit tests. - Most PRs should not touch more than one package. - Changes should be backwards compatible. - If you are adding something to community, do not re-import it in langchain. If no one reviews your PR within a few days, please @-mention one of baskaryan, efriis, eyurtsev, hwchase17. --------- Co-authored-by: Isaac Francisco <78627776+isahers1@users.noreply.github.com> Co-authored-by: isaac hershenson <ihershenson@hmc.edu>
This commit is contained in:
parent
9944ad7f5f
commit
570d45b2a1
@ -112,15 +112,16 @@ class _OllamaCommon(BaseLanguageModel):
|
||||
"""Timeout for the request stream"""
|
||||
|
||||
keep_alive: Optional[Union[int, str]] = None
|
||||
"""How long the model will stay loaded into memory.
|
||||
"""How long the model will stay loaded into memory."""
|
||||
|
||||
raw: Optional[bool] = None
|
||||
"""raw or not.""
|
||||
The parameter (Default: 5 minutes) can be set to:
|
||||
1. a duration string in Golang (such as "10m" or "24h");
|
||||
2. a number in seconds (such as 3600);
|
||||
3. any negative number which will keep the model loaded \
|
||||
in memory (e.g. -1 or "-1m");
|
||||
4. 0 which will unload the model immediately after generating a response;
|
||||
|
||||
See the [Ollama documents](https://github.com/ollama/ollama/blob/main/docs/faq.md#how-do-i-keep-a-model-loaded-in-memory-or-make-it-unload-immediately)"""
|
||||
|
||||
headers: Optional[dict] = None
|
||||
@ -154,6 +155,7 @@ class _OllamaCommon(BaseLanguageModel):
|
||||
"system": self.system,
|
||||
"template": self.template,
|
||||
"keep_alive": self.keep_alive,
|
||||
"raw": self.raw,
|
||||
}
|
||||
|
||||
@property
|
||||
@ -227,7 +229,6 @@ class _OllamaCommon(BaseLanguageModel):
|
||||
"images": payload.get("images", []),
|
||||
**params,
|
||||
}
|
||||
|
||||
response = requests.post(
|
||||
url=api_url,
|
||||
headers={
|
||||
@ -369,12 +370,9 @@ class _OllamaCommon(BaseLanguageModel):
|
||||
|
||||
class Ollama(BaseLLM, _OllamaCommon):
|
||||
"""Ollama locally runs large language models.
|
||||
|
||||
To use, follow the instructions at https://ollama.ai/.
|
||||
|
||||
Example:
|
||||
.. code-block:: python
|
||||
|
||||
from langchain_community.llms import Ollama
|
||||
ollama = Ollama(model="llama2")
|
||||
"""
|
||||
@ -398,17 +396,13 @@ class Ollama(BaseLLM, _OllamaCommon):
|
||||
**kwargs: Any,
|
||||
) -> LLMResult:
|
||||
"""Call out to Ollama's generate endpoint.
|
||||
|
||||
Args:
|
||||
prompt: The prompt to pass into the model.
|
||||
stop: Optional list of stop words to use when generating.
|
||||
|
||||
Returns:
|
||||
The string generated by the model.
|
||||
|
||||
Example:
|
||||
.. code-block:: python
|
||||
|
||||
response = ollama("Tell me a joke.")
|
||||
"""
|
||||
# TODO: add caching here.
|
||||
@ -434,17 +428,13 @@ class Ollama(BaseLLM, _OllamaCommon):
|
||||
**kwargs: Any,
|
||||
) -> LLMResult:
|
||||
"""Call out to Ollama's generate endpoint.
|
||||
|
||||
Args:
|
||||
prompt: The prompt to pass into the model.
|
||||
stop: Optional list of stop words to use when generating.
|
||||
|
||||
Returns:
|
||||
The string generated by the model.
|
||||
|
||||
Example:
|
||||
.. code-block:: python
|
||||
|
||||
response = ollama("Tell me a joke.")
|
||||
"""
|
||||
# TODO: add caching here.
|
||||
|
@ -101,6 +101,7 @@ def test_handle_kwargs_top_level_parameters(monkeypatch: MonkeyPatch) -> None:
|
||||
"system": "Test system prompt",
|
||||
"template": None,
|
||||
"keep_alive": None,
|
||||
"raw": None,
|
||||
}
|
||||
assert stream is True
|
||||
assert timeout == 300
|
||||
@ -149,6 +150,7 @@ def test_handle_kwargs_with_unknown_param(monkeypatch: MonkeyPatch) -> None:
|
||||
"system": None,
|
||||
"template": None,
|
||||
"keep_alive": None,
|
||||
"raw": None,
|
||||
}
|
||||
assert stream is True
|
||||
assert timeout == 300
|
||||
@ -181,6 +183,7 @@ def test_handle_kwargs_with_options(monkeypatch: MonkeyPatch) -> None:
|
||||
"system": None,
|
||||
"template": None,
|
||||
"keep_alive": None,
|
||||
"raw": None,
|
||||
}
|
||||
assert stream is True
|
||||
assert timeout == 300
|
||||
|
Loading…
Reference in New Issue
Block a user