This commit is contained in:
Chester Curme 2025-07-10 14:34:39 -04:00
parent 36dbb20da7
commit 20a6bdf510

View File

@ -370,21 +370,26 @@ def test_computer_calls() -> None:
def test_file_search() -> None: def test_file_search() -> None:
pytest.skip() # TODO: set up infra pytest.skip() # TODO: set up infra
llm = ChatOpenAI(model=MODEL_NAME) llm = ChatOpenAI(model=MODEL_NAME, use_responses_api=True)
tool = { tool = {
"type": "file_search", "type": "file_search",
"vector_store_ids": [os.environ["OPENAI_VECTOR_STORE_ID"]], "vector_store_ids": [os.environ["OPENAI_VECTOR_STORE_ID"]],
} }
response = llm.invoke("What is deep research by OpenAI?", tools=[tool])
input_message = {"role": "user", "content": "What is deep research by OpenAI?"}
response = llm.invoke([input_message], tools=[tool])
_check_response(response) _check_response(response)
full: Optional[BaseMessageChunk] = None full: Optional[BaseMessageChunk] = None
for chunk in llm.stream("What is deep research by OpenAI?", tools=[tool]): for chunk in llm.stream([input_message], tools=[tool]):
assert isinstance(chunk, AIMessageChunk) assert isinstance(chunk, AIMessageChunk)
full = chunk if full is None else full + chunk full = chunk if full is None else full + chunk
assert isinstance(full, AIMessageChunk) assert isinstance(full, AIMessageChunk)
_check_response(full) _check_response(full)
next_message = {"role": "user", "content": "Thank you."}
_ = llm.invoke([input_message, full, next_message])
@pytest.mark.default_cassette("test_stream_reasoning_summary.yaml.gz") @pytest.mark.default_cassette("test_stream_reasoning_summary.yaml.gz")
@pytest.mark.vcr @pytest.mark.vcr