mirror of
https://github.com/hwchase17/langchain.git
synced 2025-08-02 01:23:07 +00:00
auto format
This commit is contained in:
parent
92bb93c9f0
commit
3a6c34be93
@ -252,9 +252,10 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import base64\n",
|
||||
"\n",
|
||||
"from langchain_core.messages import HumanMessage\n",
|
||||
"from langchain_google_genai import ChatGoogleGenerativeAI\n",
|
||||
"import base64\n",
|
||||
"\n",
|
||||
"# Example using a public URL (remains the same)\n",
|
||||
"message_url = HumanMessage(\n",
|
||||
@ -273,18 +274,12 @@
|
||||
"image_file_path = \"/Users/philschmid/projects/google-gemini/langchain/docs/static/img/agents_vs_chains.png\"\n",
|
||||
"\n",
|
||||
"with open(image_file_path, \"rb\") as image_file:\n",
|
||||
" encoded_image = base64.b64encode(image_file.read()).decode('utf-8')\n",
|
||||
" encoded_image = base64.b64encode(image_file.read()).decode(\"utf-8\")\n",
|
||||
"\n",
|
||||
"message_local = HumanMessage(\n",
|
||||
" content=[\n",
|
||||
" {\n",
|
||||
" \"type\": \"text\",\n",
|
||||
" \"text\": \"Describe the local image.\"\n",
|
||||
" },\n",
|
||||
" {\n",
|
||||
" \"type\": \"image_url\",\n",
|
||||
" \"image_url\": f\"data:image/png;base64,{encoded_image}\"\n",
|
||||
" }\n",
|
||||
" {\"type\": \"text\", \"text\": \"Describe the local image.\"},\n",
|
||||
" {\"type\": \"image_url\", \"image_url\": f\"data:image/png;base64,{encoded_image}\"},\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"result_local = llm.invoke([message_local])\n",
|
||||
@ -312,32 +307,30 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_core.messages import HumanMessage\n",
|
||||
"import base64\n",
|
||||
"\n",
|
||||
"from langchain_core.messages import HumanMessage\n",
|
||||
"\n",
|
||||
"# Ensure you have an audio file named 'example_audio.mp3' or provide the correct path.\n",
|
||||
"audio_file_path = \"example_audio.mp3\"\n",
|
||||
"audio_mime_type=\"audio/mpeg\"\n",
|
||||
"audio_mime_type = \"audio/mpeg\"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"with open(audio_file_path, \"rb\") as audio_file:\n",
|
||||
" encoded_audio = base64.b64encode(audio_file.read()).decode('utf-8')\n",
|
||||
" encoded_audio = base64.b64encode(audio_file.read()).decode(\"utf-8\")\n",
|
||||
"\n",
|
||||
"message = HumanMessage(\n",
|
||||
" content=[\n",
|
||||
" {\n",
|
||||
" \"type\": \"text\",\n",
|
||||
" \"text\": \"Transcribe the audio.\"\n",
|
||||
" },\n",
|
||||
" {\"type\": \"text\", \"text\": \"Transcribe the audio.\"},\n",
|
||||
" {\n",
|
||||
" \"type\": \"media\",\n",
|
||||
" \"data\": encoded_audio, # Use base64 string directly\n",
|
||||
" \"data\": encoded_audio, # Use base64 string directly\n",
|
||||
" \"mime_type\": audio_mime_type,\n",
|
||||
" }\n",
|
||||
" },\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"response = llm.invoke([message]) # Uncomment to run\n",
|
||||
"print(f\"Response for audio: {response.content}\")\n"
|
||||
"response = llm.invoke([message]) # Uncomment to run\n",
|
||||
"print(f\"Response for audio: {response.content}\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -357,34 +350,31 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import base64\n",
|
||||
"\n",
|
||||
"from langchain_core.messages import HumanMessage\n",
|
||||
"from langchain_google_genai import ChatGoogleGenerativeAI\n",
|
||||
"import base64\n",
|
||||
"\n",
|
||||
"# Ensure you have a video file named 'example_video.mp4' or provide the correct path.\n",
|
||||
"video_file_path = \"example_video.mp4\"\n",
|
||||
"video_mime_type=\"video/mp4\"\n",
|
||||
"video_mime_type = \"video/mp4\"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"with open(video_file_path, \"rb\") as video_file:\n",
|
||||
" encoded_video = base64.b64encode(video_file.read()).decode('utf-8')\n",
|
||||
" encoded_video = base64.b64encode(video_file.read()).decode(\"utf-8\")\n",
|
||||
"\n",
|
||||
"message = HumanMessage(\n",
|
||||
" content=[\n",
|
||||
" {\n",
|
||||
" \"type\": \"text\",\n",
|
||||
" \"text\": \"Describe the first few frames of the video.\"\n",
|
||||
" },\n",
|
||||
" {\"type\": \"text\", \"text\": \"Describe the first few frames of the video.\"},\n",
|
||||
" {\n",
|
||||
" \"type\": \"media\",\n",
|
||||
" \"data\": encoded_video, # Use base64 string directly\n",
|
||||
" \"data\": encoded_video, # Use base64 string directly\n",
|
||||
" \"mime_type\": video_mime_type,\n",
|
||||
" }\n",
|
||||
" },\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"response = llm.invoke([message]) # Uncomment to run\n",
|
||||
"print(f\"Response for video: {response.content}\")\n",
|
||||
" "
|
||||
"response = llm.invoke([message]) # Uncomment to run\n",
|
||||
"print(f\"Response for video: {response.content}\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -575,14 +565,16 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_google_genai import ChatGoogleGenerativeAI\n",
|
||||
"from langchain_core.tools import tool\n",
|
||||
"from langchain_google_genai import ChatGoogleGenerativeAI\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# Define the tool\n",
|
||||
"@tool(description=\"Get the current weather in a given location\")\n",
|
||||
"def get_weather(location: str) -> str:\n",
|
||||
" return \"It's sunny.\"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# Initialize the model and bind the tool\n",
|
||||
"llm = ChatGoogleGenerativeAI(model=\"gemini-2.0-flash\")\n",
|
||||
"llm_with_tools = llm.bind_tools([get_weather])\n",
|
||||
@ -596,8 +588,12 @@
|
||||
"\n",
|
||||
"# Example tool call message would be needed here if you were actually running the tool\n",
|
||||
"from langchain_core.messages import ToolMessage\n",
|
||||
"tool_message = ToolMessage(content=get_weather(*ai_msg.tool_calls[0]['args']), tool_call_id=ai_msg.tool_calls[0]['id'])\n",
|
||||
"llm_with_tools.invoke([ai_msg, tool_message]) # Example of passing tool result back\n"
|
||||
"\n",
|
||||
"tool_message = ToolMessage(\n",
|
||||
" content=get_weather(*ai_msg.tool_calls[0][\"args\"]),\n",
|
||||
" tool_call_id=ai_msg.tool_calls[0][\"id\"],\n",
|
||||
")\n",
|
||||
"llm_with_tools.invoke([ai_msg, tool_message]) # Example of passing tool result back"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -628,19 +624,24 @@
|
||||
"from langchain_core.pydantic_v1 import BaseModel, Field\n",
|
||||
"from langchain_google_genai import ChatGoogleGenerativeAI\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# Define the desired structure\n",
|
||||
"class Person(BaseModel):\n",
|
||||
" '''Information about a person.'''\n",
|
||||
" \"\"\"Information about a person.\"\"\"\n",
|
||||
"\n",
|
||||
" name: str = Field(..., description=\"The person's name\")\n",
|
||||
" height_m: float = Field(..., description=\"The person's height in meters\")\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# Initialize the model\n",
|
||||
"llm = ChatGoogleGenerativeAI(model=\"gemini-2.0-flash\", temperature=0)\n",
|
||||
"structured_llm = llm.with_structured_output(Person)\n",
|
||||
"\n",
|
||||
"# Invoke the model with a query asking for structured information\n",
|
||||
"result = structured_llm.invoke(\"Who was the 16th president of the USA, and how tall was he in meters?\")\n",
|
||||
"print(result)\n"
|
||||
"result = structured_llm.invoke(\n",
|
||||
" \"Who was the 16th president of the USA, and how tall was he in meters?\"\n",
|
||||
")\n",
|
||||
"print(result)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -763,9 +764,9 @@
|
||||
"\n",
|
||||
"for c in resp.content:\n",
|
||||
" if isinstance(c, dict):\n",
|
||||
" if c[\"type\"] == 'code_execution_result':\n",
|
||||
" if c[\"type\"] == \"code_execution_result\":\n",
|
||||
" print(f\"Code execution result: {c['code_execution_result']}\")\n",
|
||||
" elif c[\"type\"] == 'executable_code':\n",
|
||||
" elif c[\"type\"] == \"executable_code\":\n",
|
||||
" print(f\"Executable code: {c['executable_code']}\")\n",
|
||||
" else:\n",
|
||||
" print(c)"
|
||||
@ -813,6 +814,7 @@
|
||||
"\n",
|
||||
"llm = ChatGoogleGenerativeAI(model=\"gemini-2.0-flash\")\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"async def run_async_calls():\n",
|
||||
" # Async invoke\n",
|
||||
" result_ainvoke = await llm.ainvoke(\"Why is the sky blue?\")\n",
|
||||
@ -820,7 +822,9 @@
|
||||
"\n",
|
||||
" # Async stream\n",
|
||||
" print(\"\\nAsync Stream Result:\")\n",
|
||||
" async for chunk in llm.astream(\"Write a short poem about asynchronous programming.\"):\n",
|
||||
" async for chunk in llm.astream(\n",
|
||||
" \"Write a short poem about asynchronous programming.\"\n",
|
||||
" ):\n",
|
||||
" print(chunk.content, end=\"\", flush=True)\n",
|
||||
" print(\"\\n\")\n",
|
||||
"\n",
|
||||
@ -828,6 +832,7 @@
|
||||
" results_abatch = await llm.abatch([\"What is 1+1?\", \"What is 2+2?\"])\n",
|
||||
" print(\"Async Batch Results:\", [res.content for res in results_abatch])\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"await run_async_calls()"
|
||||
]
|
||||
},
|
||||
|
@ -274,12 +274,14 @@
|
||||
")\n",
|
||||
"\n",
|
||||
"q_embed = query_embeddings.embed_query(\"What is the capital of France?\")\n",
|
||||
"d_embed = doc_embeddings.embed_documents([\"The capital of France is Paris.\", \"Philipp is likes to eat pizza.\"])\n",
|
||||
"d_embed = doc_embeddings.embed_documents(\n",
|
||||
" [\"The capital of France is Paris.\", \"Philipp is likes to eat pizza.\"]\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"for i, d in enumerate(d_embed):\n",
|
||||
" print(f\"Document {i+1}:\")\n",
|
||||
" print(f\"Cosine similarity with query: {cosine_similarity([q_embed], [d])[0][0]}\")\n",
|
||||
" print(\"---\")\n"
|
||||
" print(\"---\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
Loading…
Reference in New Issue
Block a user