mirror of
https://github.com/hwchase17/langchain.git
synced 2026-03-16 01:53:24 +00:00
fix(openai): add test for CSV and accommodate breaking changes in file url inputs (#35454)
This commit is contained in:
@@ -1,5 +1,6 @@
|
||||
"""Test Responses API usage."""
|
||||
|
||||
import base64
|
||||
import json
|
||||
import os
|
||||
from typing import Annotated, Any, Literal, cast
|
||||
@@ -1200,3 +1201,69 @@ def test_compaction_streaming(output_version: Literal["responses/v1", "v1"]) ->
|
||||
messages.append(third_message)
|
||||
third_response = llm.invoke(messages)
|
||||
assert third_response.text
|
||||
|
||||
|
||||
def test_csv_input() -> None:
|
||||
"""Test CSV file input with both LangChain standard and OpenAI native formats."""
|
||||
# Create sample CSV content
|
||||
csv_content = (
|
||||
"name,age,city\nAlice,30,New York\nBob,25,Los Angeles\nCarol,35,Chicago"
|
||||
)
|
||||
csv_bytes = csv_content.encode("utf-8")
|
||||
base64_string = base64.b64encode(csv_bytes).decode("utf-8")
|
||||
|
||||
llm = ChatOpenAI(model=MODEL_NAME, use_responses_api=True)
|
||||
|
||||
# Test LangChain standard format
|
||||
langchain_message = {
|
||||
"role": "user",
|
||||
"content": [
|
||||
{
|
||||
"type": "text",
|
||||
"text": "How many people are in this CSV file?",
|
||||
},
|
||||
{
|
||||
"type": "file",
|
||||
"base64": base64_string,
|
||||
"mime_type": "text/csv",
|
||||
"filename": "people.csv",
|
||||
},
|
||||
],
|
||||
}
|
||||
payload = llm._get_request_payload([langchain_message])
|
||||
block = payload["input"][0]["content"][1]
|
||||
assert block["type"] == "input_file"
|
||||
|
||||
response = llm.invoke([langchain_message])
|
||||
assert isinstance(response, AIMessage)
|
||||
assert response.content
|
||||
assert (
|
||||
"3" in str(response.content).lower() or "three" in str(response.content).lower()
|
||||
)
|
||||
|
||||
# Test OpenAI native format
|
||||
openai_message = {
|
||||
"role": "user",
|
||||
"content": [
|
||||
{
|
||||
"type": "text",
|
||||
"text": "How many people are in this CSV file?",
|
||||
},
|
||||
{
|
||||
"type": "input_file",
|
||||
"filename": "people.csv",
|
||||
"file_data": f"data:text/csv;base64,{base64_string}",
|
||||
},
|
||||
],
|
||||
}
|
||||
payload2 = llm._get_request_payload([openai_message])
|
||||
block2 = payload2["input"][0]["content"][1]
|
||||
assert block2["type"] == "input_file"
|
||||
|
||||
response2 = llm.invoke([openai_message])
|
||||
assert isinstance(response2, AIMessage)
|
||||
assert response2.content
|
||||
assert (
|
||||
"3" in str(response2.content).lower()
|
||||
or "three" in str(response2.content).lower()
|
||||
)
|
||||
|
||||
@@ -59,7 +59,7 @@ class TestOpenAIResponses(TestOpenAIStandard):
|
||||
"""Test that the model can process PDF inputs."""
|
||||
super().test_openai_pdf_inputs(model)
|
||||
# Responses API additionally supports files via URL
|
||||
url = "https://www.w3.org/WAI/ER/tests/xhtml/testfiles/resources/pdf/dummy.pdf"
|
||||
url = "https://www.berkshirehathaway.com/letters/2024ltr.pdf"
|
||||
|
||||
message = HumanMessage(
|
||||
[
|
||||
|
||||
Reference in New Issue
Block a user