mirror of
https://github.com/hwchase17/langchain.git
synced 2025-09-03 03:59:42 +00:00
add anthropic example (#1041)
Co-authored-by: Ivan Vendrov <ivendrov@gmail.com> Co-authored-by: Sasmitha Manathunga <70096033+mmz-001@users.noreply.github.com>
This commit is contained in:
@@ -19,6 +19,8 @@ The examples here are all "how-to" guides for how to integrate with various LLM
|
|||||||
|
|
||||||
`PromptLayer OpenAI <./integrations/promptlayer_openai.html>`_: Covers how to use `PromptLayer <https://promptlayer.com>`_ with Langchain.
|
`PromptLayer OpenAI <./integrations/promptlayer_openai.html>`_: Covers how to use `PromptLayer <https://promptlayer.com>`_ with Langchain.
|
||||||
|
|
||||||
|
`Anthropic <./integrations/anthropic_example.html>`_: Covers how to use Anthropic models with Langchain.
|
||||||
|
|
||||||
|
|
||||||
.. toctree::
|
.. toctree::
|
||||||
:maxdepth: 1
|
:maxdepth: 1
|
||||||
|
110
docs/modules/llms/integrations/anthropic_example.ipynb
Normal file
110
docs/modules/llms/integrations/anthropic_example.ipynb
Normal file
@@ -0,0 +1,110 @@
|
|||||||
|
{
|
||||||
|
"cells": [
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"id": "9597802c",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"# Anthropic\n",
|
||||||
|
"This example goes over how to use LangChain to interact with Anthropic models"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 1,
|
||||||
|
"id": "6fb585dd",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"from langchain.llms import Anthropic\n",
|
||||||
|
"from langchain import PromptTemplate, LLMChain"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 2,
|
||||||
|
"id": "035dea0f",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"template = \"\"\"Question: {question}\n",
|
||||||
|
"\n",
|
||||||
|
"Answer: Let's think step by step.\"\"\"\n",
|
||||||
|
"\n",
|
||||||
|
"prompt = PromptTemplate(template=template, input_variables=[\"question\"])"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 3,
|
||||||
|
"id": "3f3458d9",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"llm = Anthropic()"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 4,
|
||||||
|
"id": "a641dbd9",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"llm_chain = LLMChain(prompt=prompt, llm=llm)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 5,
|
||||||
|
"id": "9f844993",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [
|
||||||
|
{
|
||||||
|
"data": {
|
||||||
|
"text/plain": [
|
||||||
|
"\" Step 1: Justin Beiber was born on March 1, 1994\\nStep 2: The NFL season ends with the Super Bowl in January/February\\nStep 3: Therefore, the Super Bowl that occurred closest to Justin Beiber's birth would be Super Bowl XXIX in 1995\\nStep 4: The San Francisco 49ers won Super Bowl XXIX in 1995\\n\\nTherefore, the answer is the San Francisco 49ers won the Super Bowl in the year Justin Beiber was born.\""
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"execution_count": 5,
|
||||||
|
"metadata": {},
|
||||||
|
"output_type": "execute_result"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"source": [
|
||||||
|
"question = \"What NFL team won the Super Bowl in the year Justin Beiber was born?\"\n",
|
||||||
|
"\n",
|
||||||
|
"llm_chain.run(question)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"id": "4797d719",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": []
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"metadata": {
|
||||||
|
"kernelspec": {
|
||||||
|
"display_name": "Python 3 (ipykernel)",
|
||||||
|
"language": "python",
|
||||||
|
"name": "python3"
|
||||||
|
},
|
||||||
|
"language_info": {
|
||||||
|
"codemirror_mode": {
|
||||||
|
"name": "ipython",
|
||||||
|
"version": 3
|
||||||
|
},
|
||||||
|
"file_extension": ".py",
|
||||||
|
"mimetype": "text/x-python",
|
||||||
|
"name": "python",
|
||||||
|
"nbconvert_exporter": "python",
|
||||||
|
"pygments_lexer": "ipython3",
|
||||||
|
"version": "3.9.1"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"nbformat": 4,
|
||||||
|
"nbformat_minor": 5
|
||||||
|
}
|
@@ -136,15 +136,22 @@
|
|||||||
],
|
],
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"kernelspec": {
|
"kernelspec": {
|
||||||
"display_name": "Python 3.9.12 ('palm')",
|
"display_name": "Python 3 (ipykernel)",
|
||||||
"language": "python",
|
"language": "python",
|
||||||
"name": "python3"
|
"name": "python3"
|
||||||
},
|
},
|
||||||
"language_info": {
|
"language_info": {
|
||||||
|
"codemirror_mode": {
|
||||||
|
"name": "ipython",
|
||||||
|
"version": 3
|
||||||
|
},
|
||||||
|
"file_extension": ".py",
|
||||||
|
"mimetype": "text/x-python",
|
||||||
"name": "python",
|
"name": "python",
|
||||||
"version": "3.9.12"
|
"nbconvert_exporter": "python",
|
||||||
|
"pygments_lexer": "ipython3",
|
||||||
|
"version": "3.9.1"
|
||||||
},
|
},
|
||||||
"orig_nbformat": 4,
|
|
||||||
"vscode": {
|
"vscode": {
|
||||||
"interpreter": {
|
"interpreter": {
|
||||||
"hash": "a0a0263b650d907a3bfe41c0f8d6a63a071b884df3cfdc1579f00cdc1aed6b03"
|
"hash": "a0a0263b650d907a3bfe41c0f8d6a63a071b884df3cfdc1579f00cdc1aed6b03"
|
||||||
|
@@ -1,4 +1,5 @@
|
|||||||
"""Wrapper around Anthropic APIs."""
|
"""Wrapper around Anthropic APIs."""
|
||||||
|
import re
|
||||||
from typing import Any, Dict, Generator, List, Mapping, Optional
|
from typing import Any, Dict, Generator, List, Mapping, Optional
|
||||||
|
|
||||||
from pydantic import BaseModel, Extra, root_validator
|
from pydantic import BaseModel, Extra, root_validator
|
||||||
@@ -32,7 +33,7 @@ class Anthropic(LLM, BaseModel):
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
client: Any #: :meta private:
|
client: Any #: :meta private:
|
||||||
model: Optional[str] = None
|
model: str = "claude-v1"
|
||||||
"""Model name to use."""
|
"""Model name to use."""
|
||||||
|
|
||||||
max_tokens_to_sample: int = 256
|
max_tokens_to_sample: int = 256
|
||||||
@@ -99,10 +100,17 @@ class Anthropic(LLM, BaseModel):
|
|||||||
def _wrap_prompt(self, prompt: str) -> str:
|
def _wrap_prompt(self, prompt: str) -> str:
|
||||||
if not self.HUMAN_PROMPT or not self.AI_PROMPT:
|
if not self.HUMAN_PROMPT or not self.AI_PROMPT:
|
||||||
raise NameError("Please ensure the anthropic package is loaded")
|
raise NameError("Please ensure the anthropic package is loaded")
|
||||||
|
|
||||||
if prompt.startswith(self.HUMAN_PROMPT):
|
if prompt.startswith(self.HUMAN_PROMPT):
|
||||||
return prompt # Already wrapped.
|
return prompt # Already wrapped.
|
||||||
else:
|
|
||||||
return f"{self.HUMAN_PROMPT} {prompt}{self.AI_PROMPT} Sure, here you go:\n"
|
# Guard against common errors in specifying wrong number of newlines.
|
||||||
|
corrected_prompt, n_subs = re.subn(r"^\n*Human:", self.HUMAN_PROMPT, prompt)
|
||||||
|
if n_subs == 1:
|
||||||
|
return corrected_prompt
|
||||||
|
|
||||||
|
# As a last resort, wrap the prompt ourselves to emulate instruct-style.
|
||||||
|
return f"{self.HUMAN_PROMPT} {prompt}{self.AI_PROMPT} Sure, here you go:\n"
|
||||||
|
|
||||||
def _get_anthropic_stop(self, stop: Optional[List[str]] = None) -> List[str]:
|
def _get_anthropic_stop(self, stop: Optional[List[str]] = None) -> List[str]:
|
||||||
if not self.HUMAN_PROMPT or not self.AI_PROMPT:
|
if not self.HUMAN_PROMPT or not self.AI_PROMPT:
|
||||||
|
Reference in New Issue
Block a user