Skip to content

Commit 947fff8

Browse files
fix(prompt): omit tool_choice when prompt manages built-in tools (#2464)
Co-authored-by: Kazuhiro Sera <seratch@openai.com>
1 parent 0793a8c commit 947fff8

File tree

2 files changed

+81
-2
lines changed

2 files changed

+81
-2
lines changed

src/agents/models/openai_responses.py

Lines changed: 8 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -272,9 +272,15 @@ async def _fetch_response(
272272
should_omit_model = prompt is not None and not self._model_is_explicit
273273
model_param: str | ChatModel | Omit = self.model if not should_omit_model else omit
274274
should_omit_tools = prompt is not None and len(converted_tools_payload) == 0
275+
# In prompt-managed tool flows without local tools payload, omit only named tool choices
276+
# that must match an explicit tool list. Keep control literals like "none"/"required".
277+
should_omit_tool_choice = should_omit_tools and isinstance(tool_choice, dict)
275278
tools_param: list[ToolParam] | Omit = (
276279
converted_tools_payload if not should_omit_tools else omit
277280
)
281+
tool_choice_param: response_create_params.ToolChoice | Omit = (
282+
tool_choice if not should_omit_tool_choice else omit
283+
)
278284

279285
include_set: set[str] = set(converted_tools.includes)
280286
if model_settings.response_include is not None:
@@ -301,7 +307,7 @@ async def _fetch_response(
301307
f"{input_json}\n"
302308
f"Tools:\n{tools_json}\n"
303309
f"Stream: {stream}\n"
304-
f"Tool choice: {tool_choice}\n"
310+
f"Tool choice: {tool_choice_param}\n"
305311
f"Response format: {response_format}\n"
306312
f"Previous response id: {previous_response_id}\n"
307313
f"Conversation id: {conversation_id}\n"
@@ -331,7 +337,7 @@ async def _fetch_response(
331337
top_p=self._non_null_or_omit(model_settings.top_p),
332338
truncation=self._non_null_or_omit(model_settings.truncation),
333339
max_output_tokens=self._non_null_or_omit(model_settings.max_tokens),
334-
tool_choice=tool_choice,
340+
tool_choice=tool_choice_param,
335341
parallel_tool_calls=parallel_tool_calls,
336342
stream=cast(Any, stream_param),
337343
extra_headers=self._merge_headers(model_settings),

tests/test_openai_responses.py

Lines changed: 73 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -135,3 +135,76 @@ def __init__(self):
135135
)
136136

137137
assert called_kwargs["tools"] is omit
138+
139+
140+
@pytest.mark.allow_call_model_methods
141+
@pytest.mark.asyncio
142+
async def test_prompt_id_omits_tool_choice_when_no_tools_configured():
143+
called_kwargs: dict[str, Any] = {}
144+
145+
class DummyResponses:
146+
async def create(self, **kwargs):
147+
nonlocal called_kwargs
148+
called_kwargs = kwargs
149+
return get_response_obj([])
150+
151+
class DummyResponsesClient:
152+
def __init__(self):
153+
self.responses = DummyResponses()
154+
155+
model = OpenAIResponsesModel(
156+
model="gpt-4",
157+
openai_client=DummyResponsesClient(), # type: ignore[arg-type]
158+
model_is_explicit=False,
159+
)
160+
161+
await model.get_response(
162+
system_instructions=None,
163+
input="hi",
164+
model_settings=ModelSettings(tool_choice="web_search_preview"),
165+
tools=[],
166+
output_schema=None,
167+
handoffs=[],
168+
tracing=ModelTracing.DISABLED,
169+
prompt={"id": "pmpt_123"},
170+
)
171+
172+
assert called_kwargs["tools"] is omit
173+
assert called_kwargs["tool_choice"] is omit
174+
175+
176+
@pytest.mark.allow_call_model_methods
177+
@pytest.mark.asyncio
178+
@pytest.mark.parametrize("tool_choice", ["none", "required"])
179+
async def test_prompt_id_keeps_literal_tool_choice_without_local_tools(tool_choice: str):
180+
called_kwargs: dict[str, Any] = {}
181+
182+
class DummyResponses:
183+
async def create(self, **kwargs):
184+
nonlocal called_kwargs
185+
called_kwargs = kwargs
186+
return get_response_obj([])
187+
188+
class DummyResponsesClient:
189+
def __init__(self):
190+
self.responses = DummyResponses()
191+
192+
model = OpenAIResponsesModel(
193+
model="gpt-4",
194+
openai_client=DummyResponsesClient(), # type: ignore[arg-type]
195+
model_is_explicit=False,
196+
)
197+
198+
await model.get_response(
199+
system_instructions=None,
200+
input="hi",
201+
model_settings=ModelSettings(tool_choice=tool_choice),
202+
tools=[],
203+
output_schema=None,
204+
handoffs=[],
205+
tracing=ModelTracing.DISABLED,
206+
prompt={"id": "pmpt_123"},
207+
)
208+
209+
assert called_kwargs["tools"] is omit
210+
assert called_kwargs["tool_choice"] == tool_choice

0 commit comments

Comments
 (0)