mirror of
https://github.com/hwchase17/langchain.git
synced 2026-03-18 11:07:36 +00:00
Merge branch 'master' into dev2049/fmt_nbs
This commit is contained in:
@@ -1,7 +1,6 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
@@ -9,7 +8,6 @@
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
@@ -17,7 +15,6 @@
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
@@ -31,7 +28,6 @@
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
@@ -39,7 +35,6 @@
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
@@ -52,14 +47,13 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!pip install comet_ml\n",
|
||||
"!pip install langchain\n",
|
||||
"!pip install openai\n",
|
||||
"!pip install google-search-results"
|
||||
"%pip install comet_ml langchain openai google-search-results spacy textstat pandas\n",
|
||||
"\n",
|
||||
"import sys\n",
|
||||
"!{sys.executable} -m spacy download en_core_web_sm"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
@@ -67,7 +61,6 @@
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
@@ -86,7 +79,6 @@
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
@@ -94,7 +86,6 @@
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
@@ -109,12 +100,12 @@
|
||||
"source": [
|
||||
"import os\n",
|
||||
"\n",
|
||||
"%env OPENAI_API_KEY=\"...\"\n",
|
||||
"%env SERPAPI_API_KEY=\"...\""
|
||||
"os.environ[\"OPENAI_API_KEY\"] = \"...\"\n",
|
||||
"#os.environ[\"OPENAI_ORGANIZATION\"] = \"...\"\n",
|
||||
"os.environ[\"SERPAPI_API_KEY\"] = \"...\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
@@ -149,7 +140,6 @@
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
@@ -185,12 +175,11 @@
|
||||
"synopsis_chain = LLMChain(llm=llm, prompt=prompt_template, callback_manager=manager)\n",
|
||||
"\n",
|
||||
"test_prompts = [{\"title\": \"Documentary about Bigfoot in Paris\"}]\n",
|
||||
"synopsis_chain.apply(test_prompts)\n",
|
||||
"print(synopsis_chain.apply(test_prompts))\n",
|
||||
"comet_callback.flush_tracker(synopsis_chain, finish=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
@@ -232,7 +221,6 @@
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
@@ -240,7 +228,6 @@
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
@@ -256,7 +243,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!pip install rouge-score"
|
||||
"%pip install rouge-score"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -336,16 +323,29 @@
|
||||
" \"\"\"\n",
|
||||
" }\n",
|
||||
"]\n",
|
||||
"synopsis_chain.apply(test_prompts)\n",
|
||||
"print(synopsis_chain.apply(test_prompts))\n",
|
||||
"comet_callback.flush_tracker(synopsis_chain, finish=True)"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"language_info": {
|
||||
"name": "python"
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"orig_nbformat": 4
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.15"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
|
||||
@@ -101,7 +101,7 @@
|
||||
"source": [
|
||||
"from langchain.document_loaders import TextLoader\n",
|
||||
"\n",
|
||||
"loader = TextLoader(\"../state_of_the_union.txt\")"
|
||||
"loader = TextLoader(\"../state_of_the_union.txt\", encoding='utf8')"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -16,7 +16,7 @@
|
||||
"In order to add a memory with an external message store to an agent we are going to do the following steps:\n",
|
||||
"\n",
|
||||
"1. We are going to create a `RedisChatMessageHistory` to connect to an external database to store the messages in.\n",
|
||||
"2. We are going to create an `LLMChain` useing that chat history as memory.\n",
|
||||
"2. We are going to create an `LLMChain` using that chat history as memory.\n",
|
||||
"3. We are going to use that `LLMChain` to create a custom Agent.\n",
|
||||
"\n",
|
||||
"For the purposes of this exercise, we are going to create a simple custom Agent that has access to a search tool and utilizes the `ConversationBufferMemory` class."
|
||||
|
||||
@@ -115,7 +115,7 @@
|
||||
"id": "a2d76826",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"**The above request should now appear on your [PromptLayer dashboard](https://ww.promptlayer.com).**"
|
||||
"**The above request should now appear on your [PromptLayer dashboard](https://www.promptlayer.com).**"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -23,3 +23,4 @@ Query Understanding: GPT-4 processes user queries, grasping the context and extr
|
||||
|
||||
The full tutorial is available below.
|
||||
- [Twitter the-algorithm codebase analysis with Deep Lake](code/twitter-the-algorithm-analysis-deeplake.ipynb): A notebook walking through how to parse github source code and run queries conversation.
|
||||
- [LangChain codebase analysis with Deep Lake](code/code-analysis-deeplake.ipynb): A notebook walking through how to analyze and do question answering over THIS code base.
|
||||
|
||||
@@ -34,12 +34,10 @@ def _get_experiment(
|
||||
) -> Any:
|
||||
comet_ml = import_comet_ml()
|
||||
|
||||
experiment = comet_ml.config.get_global_experiment()
|
||||
if experiment is None:
|
||||
experiment = comet_ml.Experiment( # type: ignore
|
||||
workspace=workspace,
|
||||
project_name=project_name,
|
||||
)
|
||||
experiment = comet_ml.Experiment( # type: ignore
|
||||
workspace=workspace,
|
||||
project_name=project_name,
|
||||
)
|
||||
|
||||
return experiment
|
||||
|
||||
@@ -132,7 +130,7 @@ class CometCallbackHandler(BaseMetadataCallbackHandler, BaseCallbackHandler):
|
||||
warning = (
|
||||
"The comet_ml callback is currently in beta and is subject to change "
|
||||
"based on updates to `langchain`. Please report any issues to "
|
||||
"https://github.com/comet_ml/issue_tracking/issues with the tag "
|
||||
"https://github.com/comet-ml/issue_tracking/issues with the tag "
|
||||
"`langchain`."
|
||||
)
|
||||
comet_ml.LOGGER.warning(warning)
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
from langchain.chat_models.anthropic import ChatAnthropic
|
||||
from langchain.chat_models.azure_openai import AzureChatOpenAI
|
||||
from langchain.chat_models.openai import ChatOpenAI
|
||||
from langchain.chat_models.promptlayer_openai import PromptLayerChatOpenAI
|
||||
|
||||
__all__ = ["ChatOpenAI", "AzureChatOpenAI", "PromptLayerChatOpenAI"]
|
||||
__all__ = ["ChatOpenAI", "AzureChatOpenAI", "PromptLayerChatOpenAI", "ChatAnthropic"]
|
||||
|
||||
139
langchain/chat_models/anthropic.py
Normal file
139
langchain/chat_models/anthropic.py
Normal file
@@ -0,0 +1,139 @@
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
from pydantic import Extra
|
||||
|
||||
from langchain.chat_models.base import BaseChatModel
|
||||
from langchain.llms.anthropic import _AnthropicCommon
|
||||
from langchain.schema import (
|
||||
AIMessage,
|
||||
BaseMessage,
|
||||
ChatGeneration,
|
||||
ChatMessage,
|
||||
ChatResult,
|
||||
HumanMessage,
|
||||
SystemMessage,
|
||||
)
|
||||
|
||||
|
||||
class ChatAnthropic(BaseChatModel, _AnthropicCommon):
|
||||
r"""Wrapper around Anthropic's large language model.
|
||||
|
||||
To use, you should have the ``anthropic`` python package installed, and the
|
||||
environment variable ``ANTHROPIC_API_KEY`` set with your API key, or pass
|
||||
it as a named parameter to the constructor.
|
||||
|
||||
Example:
|
||||
.. code-block:: python
|
||||
import anthropic
|
||||
from langchain.llms import Anthropic
|
||||
model = ChatAnthropic(model="<model_name>", anthropic_api_key="my-api-key")
|
||||
"""
|
||||
|
||||
class Config:
|
||||
"""Configuration for this pydantic object."""
|
||||
|
||||
extra = Extra.forbid
|
||||
|
||||
@property
|
||||
def _llm_type(self) -> str:
|
||||
"""Return type of chat model."""
|
||||
return "anthropic-chat"
|
||||
|
||||
def _convert_one_message_to_text(self, message: BaseMessage) -> str:
|
||||
if isinstance(message, ChatMessage):
|
||||
message_text = f"\n\n{message.role.capitalize()}: {message.content}"
|
||||
elif isinstance(message, HumanMessage):
|
||||
message_text = f"{self.HUMAN_PROMPT} {message.content}"
|
||||
elif isinstance(message, AIMessage):
|
||||
message_text = f"{self.AI_PROMPT} {message.content}"
|
||||
elif isinstance(message, SystemMessage):
|
||||
message_text = f"{self.HUMAN_PROMPT} <admin>{message.content}</admin>"
|
||||
else:
|
||||
raise ValueError(f"Got unknown type {message}")
|
||||
return message_text
|
||||
|
||||
def _convert_messages_to_text(self, messages: List[BaseMessage]) -> str:
|
||||
"""Format a list of strings into a single string with necessary newlines.
|
||||
|
||||
Args:
|
||||
messages (List[BaseMessage]): List of BaseMessage to combine.
|
||||
|
||||
Returns:
|
||||
str: Combined string with necessary newlines.
|
||||
"""
|
||||
return "".join(
|
||||
self._convert_one_message_to_text(message) for message in messages
|
||||
)
|
||||
|
||||
def _convert_messages_to_prompt(self, messages: List[BaseMessage]) -> str:
|
||||
"""Format a list of messages into a full prompt for the Anthropic model
|
||||
|
||||
Args:
|
||||
messages (List[BaseMessage]): List of BaseMessage to combine.
|
||||
|
||||
Returns:
|
||||
str: Combined string with necessary HUMAN_PROMPT and AI_PROMPT tags.
|
||||
"""
|
||||
if not self.AI_PROMPT:
|
||||
raise NameError("Please ensure the anthropic package is loaded")
|
||||
|
||||
if not isinstance(messages[-1], AIMessage):
|
||||
messages.append(AIMessage(content=""))
|
||||
text = self._convert_messages_to_text(messages)
|
||||
return (
|
||||
text.rstrip()
|
||||
) # trim off the trailing ' ' that might come from the "Assistant: "
|
||||
|
||||
def _generate(
|
||||
self, messages: List[BaseMessage], stop: Optional[List[str]] = None
|
||||
) -> ChatResult:
|
||||
prompt = self._convert_messages_to_prompt(messages)
|
||||
params: Dict[str, Any] = {"prompt": prompt, **self._default_params}
|
||||
if stop:
|
||||
params["stop_sequences"] = stop
|
||||
|
||||
if self.streaming:
|
||||
completion = ""
|
||||
stream_resp = self.client.completion_stream(**params)
|
||||
for data in stream_resp:
|
||||
delta = data["completion"][len(completion) :]
|
||||
completion = data["completion"]
|
||||
self.callback_manager.on_llm_new_token(
|
||||
delta,
|
||||
verbose=self.verbose,
|
||||
)
|
||||
else:
|
||||
response = self.client.completion(**params)
|
||||
completion = response["completion"]
|
||||
message = AIMessage(content=completion)
|
||||
return ChatResult(generations=[ChatGeneration(message=message)])
|
||||
|
||||
async def _agenerate(
|
||||
self, messages: List[BaseMessage], stop: Optional[List[str]] = None
|
||||
) -> ChatResult:
|
||||
prompt = self._convert_messages_to_prompt(messages)
|
||||
params: Dict[str, Any] = {"prompt": prompt, **self._default_params}
|
||||
if stop:
|
||||
params["stop_sequences"] = stop
|
||||
|
||||
if self.streaming:
|
||||
completion = ""
|
||||
stream_resp = await self.client.acompletion_stream(**params)
|
||||
async for data in stream_resp:
|
||||
delta = data["completion"][len(completion) :]
|
||||
completion = data["completion"]
|
||||
if self.callback_manager.is_async:
|
||||
await self.callback_manager.on_llm_new_token(
|
||||
delta,
|
||||
verbose=self.verbose,
|
||||
)
|
||||
else:
|
||||
self.callback_manager.on_llm_new_token(
|
||||
delta,
|
||||
verbose=self.verbose,
|
||||
)
|
||||
else:
|
||||
response = await self.client.acompletion(**params)
|
||||
completion = response["completion"]
|
||||
message = AIMessage(content=completion)
|
||||
return ChatResult(generations=[ChatGeneration(message=message)])
|
||||
@@ -54,6 +54,10 @@ class GitLoader(BaseLoader):
|
||||
|
||||
file_path = os.path.join(self.repo_path, item.path)
|
||||
|
||||
ignored_files = repo.ignored([file_path])
|
||||
if len(ignored_files):
|
||||
continue
|
||||
|
||||
# uses filter to skip files
|
||||
if self.file_filter and not self.file_filter(file_path):
|
||||
continue
|
||||
|
||||
@@ -1,15 +1,100 @@
|
||||
"""Wrapper around Anthropic APIs."""
|
||||
import re
|
||||
from typing import Any, Dict, Generator, List, Mapping, Optional
|
||||
from typing import Any, Callable, Dict, Generator, List, Mapping, Optional
|
||||
|
||||
from pydantic import Extra, root_validator
|
||||
from pydantic import BaseModel, Extra, root_validator
|
||||
|
||||
from langchain.llms.base import LLM
|
||||
from langchain.utils import get_from_dict_or_env
|
||||
|
||||
|
||||
class Anthropic(LLM):
|
||||
r"""Wrapper around Anthropic large language models.
|
||||
class _AnthropicCommon(BaseModel):
|
||||
client: Any = None #: :meta private:
|
||||
model: str = "claude-v1"
|
||||
"""Model name to use."""
|
||||
|
||||
max_tokens_to_sample: int = 256
|
||||
"""Denotes the number of tokens to predict per generation."""
|
||||
|
||||
temperature: Optional[float] = None
|
||||
"""A non-negative float that tunes the degree of randomness in generation."""
|
||||
|
||||
top_k: Optional[int] = None
|
||||
"""Number of most likely tokens to consider at each step."""
|
||||
|
||||
top_p: Optional[float] = None
|
||||
"""Total probability mass of tokens to consider at each step."""
|
||||
|
||||
streaming: bool = False
|
||||
"""Whether to stream the results."""
|
||||
|
||||
anthropic_api_key: Optional[str] = None
|
||||
|
||||
HUMAN_PROMPT: Optional[str] = None
|
||||
AI_PROMPT: Optional[str] = None
|
||||
count_tokens: Optional[Callable[[str], int]] = None
|
||||
|
||||
@root_validator()
|
||||
def validate_environment(cls, values: Dict) -> Dict:
|
||||
"""Validate that api key and python package exists in environment."""
|
||||
anthropic_api_key = get_from_dict_or_env(
|
||||
values, "anthropic_api_key", "ANTHROPIC_API_KEY"
|
||||
)
|
||||
try:
|
||||
import anthropic
|
||||
|
||||
values["client"] = anthropic.Client(anthropic_api_key)
|
||||
values["HUMAN_PROMPT"] = anthropic.HUMAN_PROMPT
|
||||
values["AI_PROMPT"] = anthropic.AI_PROMPT
|
||||
values["count_tokens"] = anthropic.count_tokens
|
||||
except ImportError:
|
||||
raise ValueError(
|
||||
"Could not import anthropic python package. "
|
||||
"Please it install it with `pip install anthropic`."
|
||||
)
|
||||
return values
|
||||
|
||||
@property
|
||||
def _default_params(self) -> Mapping[str, Any]:
|
||||
"""Get the default parameters for calling Anthropic API."""
|
||||
d = {
|
||||
"max_tokens_to_sample": self.max_tokens_to_sample,
|
||||
"model": self.model,
|
||||
}
|
||||
if self.temperature is not None:
|
||||
d["temperature"] = self.temperature
|
||||
if self.top_k is not None:
|
||||
d["top_k"] = self.top_k
|
||||
if self.top_p is not None:
|
||||
d["top_p"] = self.top_p
|
||||
return d
|
||||
|
||||
@property
|
||||
def _identifying_params(self) -> Mapping[str, Any]:
|
||||
"""Get the identifying parameters."""
|
||||
return {**{}, **self._default_params}
|
||||
|
||||
def _get_anthropic_stop(self, stop: Optional[List[str]] = None) -> List[str]:
|
||||
if not self.HUMAN_PROMPT or not self.AI_PROMPT:
|
||||
raise NameError("Please ensure the anthropic package is loaded")
|
||||
|
||||
if stop is None:
|
||||
stop = []
|
||||
|
||||
# Never want model to invent new turns of Human / Assistant dialog.
|
||||
stop.extend([self.HUMAN_PROMPT])
|
||||
|
||||
return stop
|
||||
|
||||
def get_num_tokens(self, text: str) -> int:
|
||||
"""Calculate number of tokens."""
|
||||
if not self.count_tokens:
|
||||
raise NameError("Please ensure the anthropic package is loaded")
|
||||
return self.count_tokens(text)
|
||||
|
||||
|
||||
class Anthropic(LLM, _AnthropicCommon):
|
||||
r"""Wrapper around Anthropic's large language models.
|
||||
|
||||
To use, you should have the ``anthropic`` python package installed, and the
|
||||
environment variable ``ANTHROPIC_API_KEY`` set with your API key, or pass
|
||||
@@ -32,73 +117,15 @@ class Anthropic(LLM):
|
||||
response = model(prompt)
|
||||
"""
|
||||
|
||||
client: Any #: :meta private:
|
||||
model: str = "claude-v1"
|
||||
"""Model name to use."""
|
||||
|
||||
max_tokens_to_sample: int = 256
|
||||
"""Denotes the number of tokens to predict per generation."""
|
||||
|
||||
temperature: float = 1.0
|
||||
"""A non-negative float that tunes the degree of randomness in generation."""
|
||||
|
||||
top_k: int = 0
|
||||
"""Number of most likely tokens to consider at each step."""
|
||||
|
||||
top_p: float = 1
|
||||
"""Total probability mass of tokens to consider at each step."""
|
||||
|
||||
streaming: bool = False
|
||||
"""Whether to stream the results."""
|
||||
|
||||
anthropic_api_key: Optional[str] = None
|
||||
|
||||
HUMAN_PROMPT: Optional[str] = None
|
||||
AI_PROMPT: Optional[str] = None
|
||||
|
||||
class Config:
|
||||
"""Configuration for this pydantic object."""
|
||||
|
||||
extra = Extra.forbid
|
||||
|
||||
@root_validator()
|
||||
def validate_environment(cls, values: Dict) -> Dict:
|
||||
"""Validate that api key and python package exists in environment."""
|
||||
anthropic_api_key = get_from_dict_or_env(
|
||||
values, "anthropic_api_key", "ANTHROPIC_API_KEY"
|
||||
)
|
||||
try:
|
||||
import anthropic
|
||||
|
||||
values["client"] = anthropic.Client(anthropic_api_key)
|
||||
values["HUMAN_PROMPT"] = anthropic.HUMAN_PROMPT
|
||||
values["AI_PROMPT"] = anthropic.AI_PROMPT
|
||||
except ImportError:
|
||||
raise ValueError(
|
||||
"Could not import anthropic python package. "
|
||||
"Please install it with `pip install anthropic`."
|
||||
)
|
||||
return values
|
||||
|
||||
@property
|
||||
def _default_params(self) -> Mapping[str, Any]:
|
||||
"""Get the default parameters for calling Anthropic API."""
|
||||
return {
|
||||
"max_tokens_to_sample": self.max_tokens_to_sample,
|
||||
"temperature": self.temperature,
|
||||
"top_k": self.top_k,
|
||||
"top_p": self.top_p,
|
||||
}
|
||||
|
||||
@property
|
||||
def _identifying_params(self) -> Mapping[str, Any]:
|
||||
"""Get the identifying parameters."""
|
||||
return {**{"model": self.model}, **self._default_params}
|
||||
|
||||
@property
|
||||
def _llm_type(self) -> str:
|
||||
"""Return type of llm."""
|
||||
return "anthropic"
|
||||
return "anthropic-llm"
|
||||
|
||||
def _wrap_prompt(self, prompt: str) -> str:
|
||||
if not self.HUMAN_PROMPT or not self.AI_PROMPT:
|
||||
@@ -115,18 +142,6 @@ class Anthropic(LLM):
|
||||
# As a last resort, wrap the prompt ourselves to emulate instruct-style.
|
||||
return f"{self.HUMAN_PROMPT} {prompt}{self.AI_PROMPT} Sure, here you go:\n"
|
||||
|
||||
def _get_anthropic_stop(self, stop: Optional[List[str]] = None) -> List[str]:
|
||||
if not self.HUMAN_PROMPT or not self.AI_PROMPT:
|
||||
raise NameError("Please ensure the anthropic package is loaded")
|
||||
|
||||
if stop is None:
|
||||
stop = []
|
||||
|
||||
# Never want model to invent new turns of Human / Assistant dialog.
|
||||
stop.extend([self.HUMAN_PROMPT, self.AI_PROMPT])
|
||||
|
||||
return stop
|
||||
|
||||
def _call(self, prompt: str, stop: Optional[List[str]] = None) -> str:
|
||||
r"""Call out to Anthropic's completion endpoint.
|
||||
|
||||
@@ -148,10 +163,8 @@ class Anthropic(LLM):
|
||||
stop = self._get_anthropic_stop(stop)
|
||||
if self.streaming:
|
||||
stream_resp = self.client.completion_stream(
|
||||
model=self.model,
|
||||
prompt=self._wrap_prompt(prompt),
|
||||
stop_sequences=stop,
|
||||
stream=True,
|
||||
**self._default_params,
|
||||
)
|
||||
current_completion = ""
|
||||
@@ -163,7 +176,6 @@ class Anthropic(LLM):
|
||||
)
|
||||
return current_completion
|
||||
response = self.client.completion(
|
||||
model=self.model,
|
||||
prompt=self._wrap_prompt(prompt),
|
||||
stop_sequences=stop,
|
||||
**self._default_params,
|
||||
@@ -175,10 +187,8 @@ class Anthropic(LLM):
|
||||
stop = self._get_anthropic_stop(stop)
|
||||
if self.streaming:
|
||||
stream_resp = await self.client.acompletion_stream(
|
||||
model=self.model,
|
||||
prompt=self._wrap_prompt(prompt),
|
||||
stop_sequences=stop,
|
||||
stream=True,
|
||||
**self._default_params,
|
||||
)
|
||||
current_completion = ""
|
||||
@@ -195,7 +205,6 @@ class Anthropic(LLM):
|
||||
)
|
||||
return current_completion
|
||||
response = await self.client.acompletion(
|
||||
model=self.model,
|
||||
prompt=self._wrap_prompt(prompt),
|
||||
stop_sequences=stop,
|
||||
**self._default_params,
|
||||
@@ -227,7 +236,6 @@ class Anthropic(LLM):
|
||||
"""
|
||||
stop = self._get_anthropic_stop(stop)
|
||||
return self.client.completion_stream(
|
||||
model=self.model,
|
||||
prompt=self._wrap_prompt(prompt),
|
||||
stop_sequences=stop,
|
||||
**self._default_params,
|
||||
|
||||
@@ -114,20 +114,7 @@ async def acompletion_with_retry(
|
||||
|
||||
|
||||
class BaseOpenAI(BaseLLM):
|
||||
"""Wrapper around OpenAI large language models.
|
||||
|
||||
To use, you should have the ``openai`` python package installed, and the
|
||||
environment variable ``OPENAI_API_KEY`` set with your API key.
|
||||
|
||||
Any parameters that are valid to be passed to the openai.create call can be passed
|
||||
in, even if not explicitly saved on this class.
|
||||
|
||||
Example:
|
||||
.. code-block:: python
|
||||
|
||||
from langchain.llms import OpenAI
|
||||
openai = OpenAI(model_name="text-davinci-003")
|
||||
"""
|
||||
"""Wrapper around OpenAI large language models."""
|
||||
|
||||
client: Any #: :meta private:
|
||||
model_name: str = "text-davinci-003"
|
||||
@@ -541,7 +528,20 @@ class BaseOpenAI(BaseLLM):
|
||||
|
||||
|
||||
class OpenAI(BaseOpenAI):
|
||||
"""Generic OpenAI class that uses model name."""
|
||||
"""Wrapper around OpenAI large language models.
|
||||
|
||||
To use, you should have the ``openai`` python package installed, and the
|
||||
environment variable ``OPENAI_API_KEY`` set with your API key.
|
||||
|
||||
Any parameters that are valid to be passed to the openai.create call can be passed
|
||||
in, even if not explicitly saved on this class.
|
||||
|
||||
Example:
|
||||
.. code-block:: python
|
||||
|
||||
from langchain.llms import OpenAI
|
||||
openai = OpenAI(model_name="text-davinci-003")
|
||||
"""
|
||||
|
||||
@property
|
||||
def _invocation_params(self) -> Dict[str, Any]:
|
||||
@@ -549,7 +549,20 @@ class OpenAI(BaseOpenAI):
|
||||
|
||||
|
||||
class AzureOpenAI(BaseOpenAI):
|
||||
"""Azure specific OpenAI class that uses deployment name."""
|
||||
"""Wrapper around Azure-specific OpenAI large language models.
|
||||
|
||||
To use, you should have the ``openai`` python package installed, and the
|
||||
environment variable ``OPENAI_API_KEY`` set with your API key.
|
||||
|
||||
Any parameters that are valid to be passed to the openai.create call can be passed
|
||||
in, even if not explicitly saved on this class.
|
||||
|
||||
Example:
|
||||
.. code-block:: python
|
||||
|
||||
from langchain.llms import AzureOpenAI
|
||||
openai = AzureOpenAI(model_name="text-davinci-003")
|
||||
"""
|
||||
|
||||
deployment_name: str = ""
|
||||
"""Deployment name to use."""
|
||||
|
||||
2
poetry.lock
generated
2
poetry.lock
generated
@@ -9035,4 +9035,4 @@ qdrant = ["qdrant-client"]
|
||||
[metadata]
|
||||
lock-version = "2.0"
|
||||
python-versions = ">=3.8.1,<4.0"
|
||||
content-hash = "373f68ef16e7f3d5d9cde8b81c5f261096cc537ddca4f6a36711d7215b63f226"
|
||||
content-hash = "7e343fa8e31d8fcf1023cbda592f64c05e80015c4e0e23c1d387d2e9671ce995"
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
[tool.poetry]
|
||||
name = "langchain"
|
||||
version = "0.0.139"
|
||||
version = "0.0.140"
|
||||
description = "Building applications with LLMs through composability"
|
||||
authors = []
|
||||
license = "MIT"
|
||||
@@ -36,7 +36,7 @@ pinecone-text = {version = "^0.4.2", optional = true}
|
||||
weaviate-client = {version = "^3", optional = true}
|
||||
google-api-python-client = {version = "2.70.0", optional = true}
|
||||
wolframalpha = {version = "5.0.0", optional = true}
|
||||
anthropic = {version = "^0.2.4", optional = true}
|
||||
anthropic = {version = "^0.2.6", optional = true}
|
||||
qdrant-client = {version = "^1.1.2", optional = true, python = ">=3.8.1,<3.12"}
|
||||
dataclasses-json = "^0.5.7"
|
||||
tensorflow-text = {version = "^2.11.0", optional = true, python = "^3.10, <3.12"}
|
||||
|
||||
83
tests/integration_tests/chat_models/test_anthropic.py
Normal file
83
tests/integration_tests/chat_models/test_anthropic.py
Normal file
@@ -0,0 +1,83 @@
|
||||
"""Test Anthropic API wrapper."""
|
||||
from typing import List
|
||||
|
||||
import pytest
|
||||
|
||||
from langchain.callbacks.base import CallbackManager
|
||||
from langchain.chat_models.anthropic import ChatAnthropic
|
||||
from langchain.schema import (
|
||||
AIMessage,
|
||||
BaseMessage,
|
||||
ChatGeneration,
|
||||
HumanMessage,
|
||||
LLMResult,
|
||||
)
|
||||
from tests.unit_tests.callbacks.fake_callback_handler import FakeCallbackHandler
|
||||
|
||||
|
||||
def test_anthropic_call() -> None:
|
||||
"""Test valid call to anthropic."""
|
||||
chat = ChatAnthropic(model="test")
|
||||
message = HumanMessage(content="Hello")
|
||||
response = chat([message])
|
||||
assert isinstance(response, AIMessage)
|
||||
assert isinstance(response.content, str)
|
||||
|
||||
|
||||
def test_anthropic_streaming() -> None:
|
||||
"""Test streaming tokens from anthropic."""
|
||||
chat = ChatAnthropic(model="test", streaming=True)
|
||||
message = HumanMessage(content="Hello")
|
||||
response = chat([message])
|
||||
assert isinstance(response, AIMessage)
|
||||
assert isinstance(response.content, str)
|
||||
|
||||
|
||||
def test_anthropic_streaming_callback() -> None:
|
||||
"""Test that streaming correctly invokes on_llm_new_token callback."""
|
||||
callback_handler = FakeCallbackHandler()
|
||||
callback_manager = CallbackManager([callback_handler])
|
||||
chat = ChatAnthropic(
|
||||
model="test",
|
||||
streaming=True,
|
||||
callback_manager=callback_manager,
|
||||
verbose=True,
|
||||
)
|
||||
message = HumanMessage(content="Write me a sentence with 10 words.")
|
||||
chat([message])
|
||||
assert callback_handler.llm_streams > 1
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_anthropic_async_streaming_callback() -> None:
|
||||
"""Test that streaming correctly invokes on_llm_new_token callback."""
|
||||
callback_handler = FakeCallbackHandler()
|
||||
callback_manager = CallbackManager([callback_handler])
|
||||
chat = ChatAnthropic(
|
||||
model="test",
|
||||
streaming=True,
|
||||
callback_manager=callback_manager,
|
||||
verbose=True,
|
||||
)
|
||||
chat_messages: List[BaseMessage] = [
|
||||
HumanMessage(content="How many toes do dogs have?")
|
||||
]
|
||||
result: LLMResult = await chat.agenerate([chat_messages])
|
||||
assert callback_handler.llm_streams > 1
|
||||
assert isinstance(result, LLMResult)
|
||||
for response in result.generations[0]:
|
||||
assert isinstance(response, ChatGeneration)
|
||||
assert isinstance(response.text, str)
|
||||
assert response.text == response.message.content
|
||||
|
||||
|
||||
def test_formatting() -> None:
|
||||
chat = ChatAnthropic()
|
||||
|
||||
chat_messages: List[BaseMessage] = [HumanMessage(content="Hello")]
|
||||
result = chat._convert_messages_to_prompt(chat_messages)
|
||||
assert result == "\n\nHuman: Hello\n\nAssistant:"
|
||||
|
||||
chat_messages = [HumanMessage(content="Hello"), AIMessage(content="Answer:")]
|
||||
result = chat._convert_messages_to_prompt(chat_messages)
|
||||
assert result == "\n\nHuman: Hello\n\nAssistant: Answer:"
|
||||
@@ -32,7 +32,6 @@ def test_anthropic_streaming_callback() -> None:
|
||||
callback_handler = FakeCallbackHandler()
|
||||
callback_manager = CallbackManager([callback_handler])
|
||||
llm = Anthropic(
|
||||
model="claude-v1",
|
||||
streaming=True,
|
||||
callback_manager=callback_manager,
|
||||
verbose=True,
|
||||
@@ -55,7 +54,6 @@ async def test_anthropic_async_streaming_callback() -> None:
|
||||
callback_handler = FakeCallbackHandler()
|
||||
callback_manager = CallbackManager([callback_handler])
|
||||
llm = Anthropic(
|
||||
model="claude-v1",
|
||||
streaming=True,
|
||||
callback_manager=callback_manager,
|
||||
verbose=True,
|
||||
|
||||
Reference in New Issue
Block a user