auto format

This commit is contained in:
Chester Curme 2025-04-25 15:11:48 -04:00
parent 92bb93c9f0
commit 3a6c34be93
2 changed files with 51 additions and 44 deletions

View File

@ -252,9 +252,10 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"import base64\n",
"\n",
"from langchain_core.messages import HumanMessage\n", "from langchain_core.messages import HumanMessage\n",
"from langchain_google_genai import ChatGoogleGenerativeAI\n", "from langchain_google_genai import ChatGoogleGenerativeAI\n",
"import base64\n",
"\n", "\n",
"# Example using a public URL (remains the same)\n", "# Example using a public URL (remains the same)\n",
"message_url = HumanMessage(\n", "message_url = HumanMessage(\n",
@ -273,18 +274,12 @@
"image_file_path = \"/Users/philschmid/projects/google-gemini/langchain/docs/static/img/agents_vs_chains.png\"\n", "image_file_path = \"/Users/philschmid/projects/google-gemini/langchain/docs/static/img/agents_vs_chains.png\"\n",
"\n", "\n",
"with open(image_file_path, \"rb\") as image_file:\n", "with open(image_file_path, \"rb\") as image_file:\n",
" encoded_image = base64.b64encode(image_file.read()).decode('utf-8')\n", " encoded_image = base64.b64encode(image_file.read()).decode(\"utf-8\")\n",
"\n", "\n",
"message_local = HumanMessage(\n", "message_local = HumanMessage(\n",
" content=[\n", " content=[\n",
" {\n", " {\"type\": \"text\", \"text\": \"Describe the local image.\"},\n",
" \"type\": \"text\",\n", " {\"type\": \"image_url\", \"image_url\": f\"data:image/png;base64,{encoded_image}\"},\n",
" \"text\": \"Describe the local image.\"\n",
" },\n",
" {\n",
" \"type\": \"image_url\",\n",
" \"image_url\": f\"data:image/png;base64,{encoded_image}\"\n",
" }\n",
" ]\n", " ]\n",
")\n", ")\n",
"result_local = llm.invoke([message_local])\n", "result_local = llm.invoke([message_local])\n",
@ -312,32 +307,30 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"from langchain_core.messages import HumanMessage\n",
"import base64\n", "import base64\n",
"\n", "\n",
"from langchain_core.messages import HumanMessage\n",
"\n",
"# Ensure you have an audio file named 'example_audio.mp3' or provide the correct path.\n", "# Ensure you have an audio file named 'example_audio.mp3' or provide the correct path.\n",
"audio_file_path = \"example_audio.mp3\"\n", "audio_file_path = \"example_audio.mp3\"\n",
"audio_mime_type = \"audio/mpeg\"\n", "audio_mime_type = \"audio/mpeg\"\n",
"\n", "\n",
"\n", "\n",
"with open(audio_file_path, \"rb\") as audio_file:\n", "with open(audio_file_path, \"rb\") as audio_file:\n",
" encoded_audio = base64.b64encode(audio_file.read()).decode('utf-8')\n", " encoded_audio = base64.b64encode(audio_file.read()).decode(\"utf-8\")\n",
"\n", "\n",
"message = HumanMessage(\n", "message = HumanMessage(\n",
" content=[\n", " content=[\n",
" {\n", " {\"type\": \"text\", \"text\": \"Transcribe the audio.\"},\n",
" \"type\": \"text\",\n",
" \"text\": \"Transcribe the audio.\"\n",
" },\n",
" {\n", " {\n",
" \"type\": \"media\",\n", " \"type\": \"media\",\n",
" \"data\": encoded_audio, # Use base64 string directly\n", " \"data\": encoded_audio, # Use base64 string directly\n",
" \"mime_type\": audio_mime_type,\n", " \"mime_type\": audio_mime_type,\n",
" }\n", " },\n",
" ]\n", " ]\n",
")\n", ")\n",
"response = llm.invoke([message]) # Uncomment to run\n", "response = llm.invoke([message]) # Uncomment to run\n",
"print(f\"Response for audio: {response.content}\")\n" "print(f\"Response for audio: {response.content}\")"
] ]
}, },
{ {
@ -357,9 +350,10 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"import base64\n",
"\n",
"from langchain_core.messages import HumanMessage\n", "from langchain_core.messages import HumanMessage\n",
"from langchain_google_genai import ChatGoogleGenerativeAI\n", "from langchain_google_genai import ChatGoogleGenerativeAI\n",
"import base64\n",
"\n", "\n",
"# Ensure you have a video file named 'example_video.mp4' or provide the correct path.\n", "# Ensure you have a video file named 'example_video.mp4' or provide the correct path.\n",
"video_file_path = \"example_video.mp4\"\n", "video_file_path = \"example_video.mp4\"\n",
@ -367,24 +361,20 @@
"\n", "\n",
"\n", "\n",
"with open(video_file_path, \"rb\") as video_file:\n", "with open(video_file_path, \"rb\") as video_file:\n",
" encoded_video = base64.b64encode(video_file.read()).decode('utf-8')\n", " encoded_video = base64.b64encode(video_file.read()).decode(\"utf-8\")\n",
"\n", "\n",
"message = HumanMessage(\n", "message = HumanMessage(\n",
" content=[\n", " content=[\n",
" {\n", " {\"type\": \"text\", \"text\": \"Describe the first few frames of the video.\"},\n",
" \"type\": \"text\",\n",
" \"text\": \"Describe the first few frames of the video.\"\n",
" },\n",
" {\n", " {\n",
" \"type\": \"media\",\n", " \"type\": \"media\",\n",
" \"data\": encoded_video, # Use base64 string directly\n", " \"data\": encoded_video, # Use base64 string directly\n",
" \"mime_type\": video_mime_type,\n", " \"mime_type\": video_mime_type,\n",
" }\n", " },\n",
" ]\n", " ]\n",
")\n", ")\n",
"response = llm.invoke([message]) # Uncomment to run\n", "response = llm.invoke([message]) # Uncomment to run\n",
"print(f\"Response for video: {response.content}\")\n", "print(f\"Response for video: {response.content}\")"
" "
] ]
}, },
{ {
@ -575,14 +565,16 @@
} }
], ],
"source": [ "source": [
"from langchain_google_genai import ChatGoogleGenerativeAI\n",
"from langchain_core.tools import tool\n", "from langchain_core.tools import tool\n",
"from langchain_google_genai import ChatGoogleGenerativeAI\n",
"\n",
"\n", "\n",
"# Define the tool\n", "# Define the tool\n",
"@tool(description=\"Get the current weather in a given location\")\n", "@tool(description=\"Get the current weather in a given location\")\n",
"def get_weather(location: str) -> str:\n", "def get_weather(location: str) -> str:\n",
" return \"It's sunny.\"\n", " return \"It's sunny.\"\n",
"\n", "\n",
"\n",
"# Initialize the model and bind the tool\n", "# Initialize the model and bind the tool\n",
"llm = ChatGoogleGenerativeAI(model=\"gemini-2.0-flash\")\n", "llm = ChatGoogleGenerativeAI(model=\"gemini-2.0-flash\")\n",
"llm_with_tools = llm.bind_tools([get_weather])\n", "llm_with_tools = llm.bind_tools([get_weather])\n",
@ -596,8 +588,12 @@
"\n", "\n",
"# Example tool call message would be needed here if you were actually running the tool\n", "# Example tool call message would be needed here if you were actually running the tool\n",
"from langchain_core.messages import ToolMessage\n", "from langchain_core.messages import ToolMessage\n",
"tool_message = ToolMessage(content=get_weather(*ai_msg.tool_calls[0]['args']), tool_call_id=ai_msg.tool_calls[0]['id'])\n", "\n",
"llm_with_tools.invoke([ai_msg, tool_message]) # Example of passing tool result back\n" "tool_message = ToolMessage(\n",
" content=get_weather(*ai_msg.tool_calls[0][\"args\"]),\n",
" tool_call_id=ai_msg.tool_calls[0][\"id\"],\n",
")\n",
"llm_with_tools.invoke([ai_msg, tool_message]) # Example of passing tool result back"
] ]
}, },
{ {
@ -628,19 +624,24 @@
"from langchain_core.pydantic_v1 import BaseModel, Field\n", "from langchain_core.pydantic_v1 import BaseModel, Field\n",
"from langchain_google_genai import ChatGoogleGenerativeAI\n", "from langchain_google_genai import ChatGoogleGenerativeAI\n",
"\n", "\n",
"\n",
"# Define the desired structure\n", "# Define the desired structure\n",
"class Person(BaseModel):\n", "class Person(BaseModel):\n",
" '''Information about a person.'''\n", " \"\"\"Information about a person.\"\"\"\n",
"\n",
" name: str = Field(..., description=\"The person's name\")\n", " name: str = Field(..., description=\"The person's name\")\n",
" height_m: float = Field(..., description=\"The person's height in meters\")\n", " height_m: float = Field(..., description=\"The person's height in meters\")\n",
"\n", "\n",
"\n",
"# Initialize the model\n", "# Initialize the model\n",
"llm = ChatGoogleGenerativeAI(model=\"gemini-2.0-flash\", temperature=0)\n", "llm = ChatGoogleGenerativeAI(model=\"gemini-2.0-flash\", temperature=0)\n",
"structured_llm = llm.with_structured_output(Person)\n", "structured_llm = llm.with_structured_output(Person)\n",
"\n", "\n",
"# Invoke the model with a query asking for structured information\n", "# Invoke the model with a query asking for structured information\n",
"result = structured_llm.invoke(\"Who was the 16th president of the USA, and how tall was he in meters?\")\n", "result = structured_llm.invoke(\n",
"print(result)\n" " \"Who was the 16th president of the USA, and how tall was he in meters?\"\n",
")\n",
"print(result)"
] ]
}, },
{ {
@ -763,9 +764,9 @@
"\n", "\n",
"for c in resp.content:\n", "for c in resp.content:\n",
" if isinstance(c, dict):\n", " if isinstance(c, dict):\n",
" if c[\"type\"] == 'code_execution_result':\n", " if c[\"type\"] == \"code_execution_result\":\n",
" print(f\"Code execution result: {c['code_execution_result']}\")\n", " print(f\"Code execution result: {c['code_execution_result']}\")\n",
" elif c[\"type\"] == 'executable_code':\n", " elif c[\"type\"] == \"executable_code\":\n",
" print(f\"Executable code: {c['executable_code']}\")\n", " print(f\"Executable code: {c['executable_code']}\")\n",
" else:\n", " else:\n",
" print(c)" " print(c)"
@ -813,6 +814,7 @@
"\n", "\n",
"llm = ChatGoogleGenerativeAI(model=\"gemini-2.0-flash\")\n", "llm = ChatGoogleGenerativeAI(model=\"gemini-2.0-flash\")\n",
"\n", "\n",
"\n",
"async def run_async_calls():\n", "async def run_async_calls():\n",
" # Async invoke\n", " # Async invoke\n",
" result_ainvoke = await llm.ainvoke(\"Why is the sky blue?\")\n", " result_ainvoke = await llm.ainvoke(\"Why is the sky blue?\")\n",
@ -820,7 +822,9 @@
"\n", "\n",
" # Async stream\n", " # Async stream\n",
" print(\"\\nAsync Stream Result:\")\n", " print(\"\\nAsync Stream Result:\")\n",
" async for chunk in llm.astream(\"Write a short poem about asynchronous programming.\"):\n", " async for chunk in llm.astream(\n",
" \"Write a short poem about asynchronous programming.\"\n",
" ):\n",
" print(chunk.content, end=\"\", flush=True)\n", " print(chunk.content, end=\"\", flush=True)\n",
" print(\"\\n\")\n", " print(\"\\n\")\n",
"\n", "\n",
@ -828,6 +832,7 @@
" results_abatch = await llm.abatch([\"What is 1+1?\", \"What is 2+2?\"])\n", " results_abatch = await llm.abatch([\"What is 1+1?\", \"What is 2+2?\"])\n",
" print(\"Async Batch Results:\", [res.content for res in results_abatch])\n", " print(\"Async Batch Results:\", [res.content for res in results_abatch])\n",
"\n", "\n",
"\n",
"await run_async_calls()" "await run_async_calls()"
] ]
}, },

View File

@ -274,12 +274,14 @@
")\n", ")\n",
"\n", "\n",
"q_embed = query_embeddings.embed_query(\"What is the capital of France?\")\n", "q_embed = query_embeddings.embed_query(\"What is the capital of France?\")\n",
"d_embed = doc_embeddings.embed_documents([\"The capital of France is Paris.\", \"Philipp is likes to eat pizza.\"])\n", "d_embed = doc_embeddings.embed_documents(\n",
" [\"The capital of France is Paris.\", \"Philipp is likes to eat pizza.\"]\n",
")\n",
"\n", "\n",
"for i, d in enumerate(d_embed):\n", "for i, d in enumerate(d_embed):\n",
" print(f\"Document {i+1}:\")\n", " print(f\"Document {i+1}:\")\n",
" print(f\"Cosine similarity with query: {cosine_similarity([q_embed], [d])[0][0]}\")\n", " print(f\"Cosine similarity with query: {cosine_similarity([q_embed], [d])[0][0]}\")\n",
" print(\"---\")\n" " print(\"---\")"
] ]
}, },
{ {