mirror of
https://github.com/hwchase17/langchain.git
synced 2025-07-01 10:54:15 +00:00
Simplify HumanMessages in the quick start guide (#4026)
In the section `Get Message Completions from a Chat Model` of the quick start guide, the HumanMessage doesn't need to include `Translate this sentence from English to French.` when there is a system message. Simplify HumanMessages in these examples can further demonstrate the power of LLM.
This commit is contained in:
parent
087a4bd2b8
commit
8af25867cb
@ -316,7 +316,7 @@ You can also pass in multiple messages for OpenAI's gpt-3.5-turbo and gpt-4 mode
|
|||||||
```python
|
```python
|
||||||
messages = [
|
messages = [
|
||||||
SystemMessage(content="You are a helpful assistant that translates English to French."),
|
SystemMessage(content="You are a helpful assistant that translates English to French."),
|
||||||
HumanMessage(content="Translate this sentence from English to French. I love programming.")
|
HumanMessage(content="I love programming.")
|
||||||
]
|
]
|
||||||
chat(messages)
|
chat(messages)
|
||||||
# -> AIMessage(content="J'aime programmer.", additional_kwargs={})
|
# -> AIMessage(content="J'aime programmer.", additional_kwargs={})
|
||||||
@ -327,22 +327,22 @@ You can go one step further and generate completions for multiple sets of messag
|
|||||||
batch_messages = [
|
batch_messages = [
|
||||||
[
|
[
|
||||||
SystemMessage(content="You are a helpful assistant that translates English to French."),
|
SystemMessage(content="You are a helpful assistant that translates English to French."),
|
||||||
HumanMessage(content="Translate this sentence from English to French. I love programming.")
|
HumanMessage(content="I love programming.")
|
||||||
],
|
],
|
||||||
[
|
[
|
||||||
SystemMessage(content="You are a helpful assistant that translates English to French."),
|
SystemMessage(content="You are a helpful assistant that translates English to French."),
|
||||||
HumanMessage(content="Translate this sentence from English to French. I love artificial intelligence.")
|
HumanMessage(content="I love artificial intelligence.")
|
||||||
],
|
],
|
||||||
]
|
]
|
||||||
result = chat.generate(batch_messages)
|
result = chat.generate(batch_messages)
|
||||||
result
|
result
|
||||||
# -> LLMResult(generations=[[ChatGeneration(text="J'aime programmer.", generation_info=None, message=AIMessage(content="J'aime programmer.", additional_kwargs={}))], [ChatGeneration(text="J'aime l'intelligence artificielle.", generation_info=None, message=AIMessage(content="J'aime l'intelligence artificielle.", additional_kwargs={}))]], llm_output={'token_usage': {'prompt_tokens': 71, 'completion_tokens': 18, 'total_tokens': 89}})
|
# -> LLMResult(generations=[[ChatGeneration(text="J'aime programmer.", generation_info=None, message=AIMessage(content="J'aime programmer.", additional_kwargs={}))], [ChatGeneration(text="J'aime l'intelligence artificielle.", generation_info=None, message=AIMessage(content="J'aime l'intelligence artificielle.", additional_kwargs={}))]], llm_output={'token_usage': {'prompt_tokens': 57, 'completion_tokens': 20, 'total_tokens': 77}})
|
||||||
```
|
```
|
||||||
|
|
||||||
You can recover things like token usage from this LLMResult:
|
You can recover things like token usage from this LLMResult:
|
||||||
```
|
```
|
||||||
result.llm_output['token_usage']
|
result.llm_output['token_usage']
|
||||||
# -> {'prompt_tokens': 71, 'completion_tokens': 18, 'total_tokens': 89}
|
# -> {'prompt_tokens': 57, 'completion_tokens': 20, 'total_tokens': 77}
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
||||||
|
@ -107,7 +107,7 @@
|
|||||||
"source": [
|
"source": [
|
||||||
"messages = [\n",
|
"messages = [\n",
|
||||||
" SystemMessage(content=\"You are a helpful assistant that translates English to French.\"),\n",
|
" SystemMessage(content=\"You are a helpful assistant that translates English to French.\"),\n",
|
||||||
" HumanMessage(content=\"Translate this sentence from English to French. I love programming.\")\n",
|
" HumanMessage(content=\"I love programming.\")\n",
|
||||||
"]\n",
|
"]\n",
|
||||||
"chat(messages)"
|
"chat(messages)"
|
||||||
]
|
]
|
||||||
@ -131,7 +131,7 @@
|
|||||||
{
|
{
|
||||||
"data": {
|
"data": {
|
||||||
"text/plain": [
|
"text/plain": [
|
||||||
"LLMResult(generations=[[ChatGeneration(text=\"J'aime programmer.\", generation_info=None, message=AIMessage(content=\"J'aime programmer.\", additional_kwargs={}))], [ChatGeneration(text=\"J'aime l'intelligence artificielle.\", generation_info=None, message=AIMessage(content=\"J'aime l'intelligence artificielle.\", additional_kwargs={}))]], llm_output={'token_usage': {'prompt_tokens': 71, 'completion_tokens': 18, 'total_tokens': 89}})"
|
"LLMResult(generations=[[ChatGeneration(text=\"J'aime programmer.\", generation_info=None, message=AIMessage(content=\"J'aime programmer.\", additional_kwargs={}))], [ChatGeneration(text=\"J'aime l'intelligence artificielle.\", generation_info=None, message=AIMessage(content=\"J'aime l'intelligence artificielle.\", additional_kwargs={}))]], llm_output={'token_usage': {'prompt_tokens': 57, 'completion_tokens': 20, 'total_tokens': 77}})"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
"execution_count": 5,
|
"execution_count": 5,
|
||||||
@ -143,11 +143,11 @@
|
|||||||
"batch_messages = [\n",
|
"batch_messages = [\n",
|
||||||
" [\n",
|
" [\n",
|
||||||
" SystemMessage(content=\"You are a helpful assistant that translates English to French.\"),\n",
|
" SystemMessage(content=\"You are a helpful assistant that translates English to French.\"),\n",
|
||||||
" HumanMessage(content=\"Translate this sentence from English to French. I love programming.\")\n",
|
" HumanMessage(content=\"I love programming.\")\n",
|
||||||
" ],\n",
|
" ],\n",
|
||||||
" [\n",
|
" [\n",
|
||||||
" SystemMessage(content=\"You are a helpful assistant that translates English to French.\"),\n",
|
" SystemMessage(content=\"You are a helpful assistant that translates English to French.\"),\n",
|
||||||
" HumanMessage(content=\"Translate this sentence from English to French. I love artificial intelligence.\")\n",
|
" HumanMessage(content=\"I love artificial intelligence.\")\n",
|
||||||
" ],\n",
|
" ],\n",
|
||||||
"]\n",
|
"]\n",
|
||||||
"result = chat.generate(batch_messages)\n",
|
"result = chat.generate(batch_messages)\n",
|
||||||
@ -171,9 +171,9 @@
|
|||||||
{
|
{
|
||||||
"data": {
|
"data": {
|
||||||
"text/plain": [
|
"text/plain": [
|
||||||
"{'token_usage': {'prompt_tokens': 71,\n",
|
"{'token_usage': {'prompt_tokens': 57,\n",
|
||||||
" 'completion_tokens': 18,\n",
|
" 'completion_tokens': 20,\n",
|
||||||
" 'total_tokens': 89}}"
|
" 'total_tokens': 77}}"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
"execution_count": 6,
|
"execution_count": 6,
|
||||||
|
Loading…
Reference in New Issue
Block a user