mirror of
https://github.com/hwchase17/langchain.git
synced 2026-02-21 22:56:05 +00:00
fmt
This commit is contained in:
@@ -295,7 +295,7 @@
|
||||
" Chroma,\n",
|
||||
" # This is the number of examples to produce.\n",
|
||||
" k=1,\n",
|
||||
" input_keys=[\"question\"]\n",
|
||||
" input_keys=[\"question\"],\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# Select the most similar example to the input.\n",
|
||||
@@ -345,11 +345,16 @@
|
||||
" example_selector=example_selector,\n",
|
||||
" example_prompt=example_prompt,\n",
|
||||
" suffix=\"Person question is about: {input}\",\n",
|
||||
" input_variables=[\"input\",\"question\"],\n",
|
||||
" input_variables=[\"input\", \"question\"],\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"print(\n",
|
||||
" prompt.invoke({\"question\": \"Who was the father of Mary Ball Washington?\",\"input\":\"Mary Ball Washington\"}).to_string()\n",
|
||||
" prompt.invoke(\n",
|
||||
" {\n",
|
||||
" \"question\": \"Who was the father of Mary Ball Washington?\",\n",
|
||||
" \"input\": \"Mary Ball Washington\",\n",
|
||||
" }\n",
|
||||
" ).to_string()\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
|
||||
229
libs/partners/ollama/testing.ipynb
Normal file
229
libs/partners/ollama/testing.ipynb
Normal file
@@ -0,0 +1,229 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_ollama import ChatOllama\n",
|
||||
"from langchain_core.messages import HumanMessage, SystemMessage\n",
|
||||
"\n",
|
||||
"chat = ChatOllama(model=\"mistral:v0.3\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from pydantic import BaseModel, Field\n",
|
||||
"from typing import Literal\n",
|
||||
"\n",
|
||||
"class Weather(BaseModel):\n",
|
||||
" \"\"\"Format for searching for the weather\"\"\"\n",
|
||||
" location: str = Field(description=\"Where to search for the weather\")\n",
|
||||
" format: Literal['celsius', 'farenheit'] = Field(description=\"What temperature format to use: the options are farenheit or celsius\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 16,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"from langchain_core.output_parsers import JsonOutputParser\n",
|
||||
"\n",
|
||||
"chat = ChatOpenAI()\n",
|
||||
"#chat = chat.with_structured_output(Weather)\n",
|
||||
"chat_with_parser = chat | JsonOutputParser()\n",
|
||||
"#ans = chat.invoke([SystemMessage(\"you are a helpful assistant\"),HumanMessage(\"what is the weather in SF?\")])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
" Hello| there|!| I|'|m| just| a| computer| program|,| so| I| don|'|t| have| feelings| or| emotions|.| But| I|'|m| here| to| help| you| with| any| questions| you| might| have|.| How| can| I| assist| you| today|?||"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chunks = []\n",
|
||||
"for chunk in chat.stream(\"hello! How are you doing\"):\n",
|
||||
" chunks.append(chunk)\n",
|
||||
" print(chunk.content, end=\"|\", flush=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 20,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"{}\n",
|
||||
"{'temperature': ''}\n",
|
||||
"{'temperature': '61'}\n",
|
||||
"{'temperature': '61°F'}\n",
|
||||
"{'temperature': '61°F', 'wind_speed': ''}\n",
|
||||
"{'temperature': '61°F', 'wind_speed': '5'}\n",
|
||||
"{'temperature': '61°F', 'wind_speed': '5 mph'}\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"async for chunk in chat_with_parser.astream(\"what is the weather in SF? please respond in a JSON object with the keys temperature and wind_speed\"):\n",
|
||||
" print(chunk)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_ollama import OllamaLLM\n",
|
||||
"\n",
|
||||
"embedder = OllamaLLM(model=\"llama3\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'I\\'m just an AI, I don\\'t have a physical presence or a specific location, so I don\\'t have access to real-time information about the current time. However, I can suggest some ways for you to find out what time it is:\\n\\n1. Check your phone or watch: If you have a phone or watch, you can check the time on them.\\n2. Ask Siri or Google Assistant: You can ask Siri or Google Assistant, \"What\\'s the current time?\" and they\\'ll tell you.\\n3. Look at a clock online: There are many websites that display the current time, such as WorldTimeBuddy or TimeAndDate.com.\\n4. Check your computer: If you\\'re using a computer, you can check the time on the top right corner of your screen.\\n\\nI hope this helps!'"
|
||||
]
|
||||
},
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"ans = embedder.invoke(\"what time is it\")\n",
|
||||
"ans"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[AIMessage(content='HELLO, WORLD!', response_metadata={'model': 'llama3', 'created_at': '2024-07-04T03:55:07.315396Z', 'message': {'role': 'assistant', 'content': ''}, 'done_reason': 'stop', 'done': True, 'total_duration': 1696745458, 'load_duration': 1505000, 'prompt_eval_count': 8, 'prompt_eval_duration': 111627000, 'eval_count': 6, 'eval_duration': 185181000}, id='run-da6c7562-e25a-4a44-987a-2c83cd8c2686-0'),\n",
|
||||
" AIMessage(content=\"It's been a blast chatting with you! Say goodbye to the world for me, and don't forget to come back and visit us again soon!\", response_metadata={'model': 'llama3', 'created_at': '2024-07-04T03:55:07.018076Z', 'message': {'role': 'assistant', 'content': ''}, 'done_reason': 'stop', 'done': True, 'total_duration': 1399391083, 'load_duration': 1187417, 'prompt_eval_count': 20, 'prompt_eval_duration': 230349000, 'eval_count': 31, 'eval_duration': 1166047000}, id='run-96cad530-6f3e-4cf9-86b4-e0f8abba4cdb-0')]"
|
||||
]
|
||||
},
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_ollama import ChatOllama\n",
|
||||
"import asyncio\n",
|
||||
"\n",
|
||||
"llm = ChatOllama(model=\"llama3\")\n",
|
||||
"\n",
|
||||
"messages = [\n",
|
||||
" (\"human\", \"Say hello world!\"),\n",
|
||||
" (\"human\",\"Say goodbye world!\")\n",
|
||||
"]\n",
|
||||
"await llm.abatch(messages)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Collecting pillow\n",
|
||||
" Using cached pillow-10.4.0-cp312-cp312-macosx_11_0_arm64.whl.metadata (9.2 kB)\n",
|
||||
"Using cached pillow-10.4.0-cp312-cp312-macosx_11_0_arm64.whl (3.4 MB)\n",
|
||||
"Installing collected packages: pillow\n",
|
||||
"Successfully installed pillow-10.4.0\n",
|
||||
"\n",
|
||||
"\u001b[1m[\u001b[0m\u001b[34;49mnotice\u001b[0m\u001b[1;39;49m]\u001b[0m\u001b[39;49m A new release of pip is available: \u001b[0m\u001b[31;49m24.0\u001b[0m\u001b[39;49m -> \u001b[0m\u001b[32;49m24.1.1\u001b[0m\n",
|
||||
"\u001b[1m[\u001b[0m\u001b[34;49mnotice\u001b[0m\u001b[1;39;49m]\u001b[0m\u001b[39;49m To update, run: \u001b[0m\u001b[32;49mpip3 install --upgrade pip\u001b[0m\n",
|
||||
"Note: you may need to restart the kernel to use updated packages.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"%pip install pillow"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content=\"I'm just a language model, I don't have feelings or emotions like humans do. However, I am functioning properly and ready to assist you with any questions or tasks you may have. How can I help you today?\", response_metadata={'model': 'llama3', 'created_at': '2024-07-04T05:34:30.279971Z', 'message': {'role': 'assistant', 'content': ''}, 'done_reason': 'stop', 'done': True, 'total_duration': 7172346125, 'load_duration': 5309712833, 'prompt_eval_count': 13, 'prompt_eval_duration': 187567000, 'eval_count': 46, 'eval_duration': 1673373000}, id='run-ebbd096a-2376-4229-994a-7bc5983ab925-0')"
|
||||
]
|
||||
},
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_ollama import ChatOllama\n",
|
||||
"from langchain_core.messages import HumanMessage\n",
|
||||
"\n",
|
||||
"llm = ChatOllama(model=\"llama3\")\n",
|
||||
"await llm.ainvoke(\"how are you\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.12.3"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
Reference in New Issue
Block a user