Skip to content

Commit f84ef7f

Browse files
authored
fix: normalize compacted Responses user inputs before session reuse (#2925)
1 parent b7ba446 commit f84ef7f

File tree

2 files changed

+298
-14
lines changed

2 files changed

+298
-14
lines changed

src/agents/memory/openai_responses_compaction_session.py

Lines changed: 98 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -2,10 +2,11 @@
22

33
import logging
44
from collections.abc import Callable
5-
from typing import TYPE_CHECKING, Any, Literal
5+
from typing import TYPE_CHECKING, Any, Literal, cast
66

77
from openai import AsyncOpenAI
88

9+
from ..items import TResponseInputItem
910
from ..models._openai_shared import get_default_openai_client
1011
from ..run_internal.items import normalize_input_items_for_api
1112
from .openai_conversations_session import OpenAIConversationsSession
@@ -16,7 +17,6 @@
1617
)
1718

1819
if TYPE_CHECKING:
19-
from ..items import TResponseInputItem
2020
from .session import Session
2121

2222
logger = logging.getLogger("openai-agents.openai.compaction")
@@ -213,19 +213,8 @@ async def run_compaction(self, args: OpenAIResponsesCompactionArgs | None = None
213213

214214
compacted = await self.client.responses.compact(**compact_kwargs)
215215

216+
output_items = _normalize_compaction_output_items(compacted.output or [])
216217
await self.underlying_session.clear_session()
217-
output_items: list[TResponseInputItem] = []
218-
if compacted.output:
219-
for item in compacted.output:
220-
if isinstance(item, dict):
221-
output_items.append(item)
222-
else:
223-
# Suppress Pydantic literal warnings: responses.compact can return
224-
# user-style input_text content inside ResponseOutputMessage.
225-
output_items.append(
226-
item.model_dump(exclude_unset=True, warnings=False) # type: ignore
227-
)
228-
229218
output_items = _strip_orphaned_assistant_ids(output_items)
230219

231220
if output_items:
@@ -339,6 +328,101 @@ def _strip_orphaned_assistant_ids(
339328
return cleaned
340329

341330

331+
def _normalize_compaction_output_items(items: list[Any]) -> list[TResponseInputItem]:
332+
"""Normalize compacted output into replay-safe Responses input items."""
333+
output_items: list[TResponseInputItem] = []
334+
for item in items:
335+
if isinstance(item, dict):
336+
output_item = item
337+
else:
338+
# Suppress Pydantic literal warnings: responses.compact can return
339+
# user-style input_text content inside ResponseOutputMessage.
340+
output_item = item.model_dump(exclude_unset=True, warnings=False)
341+
342+
if (
343+
isinstance(output_item, dict)
344+
and output_item.get("type") == "message"
345+
and output_item.get("role") == "user"
346+
):
347+
output_items.append(_normalize_compaction_user_message(output_item))
348+
continue
349+
350+
output_items.append(cast(TResponseInputItem, output_item))
351+
return output_items
352+
353+
354+
def _normalize_compaction_user_message(item: dict[str, Any]) -> TResponseInputItem:
355+
"""Normalize compacted user message content before it is reused as input."""
356+
content = item.get("content")
357+
if not isinstance(content, list):
358+
return cast(TResponseInputItem, item)
359+
360+
normalized_content: list[Any] = []
361+
for content_item in content:
362+
if not isinstance(content_item, dict):
363+
normalized_content.append(content_item)
364+
continue
365+
366+
content_type = content_item.get("type")
367+
if content_type == "input_image":
368+
normalized_content.append(_normalize_compaction_input_image(content_item))
369+
elif content_type == "input_file":
370+
normalized_content.append(_normalize_compaction_input_file(content_item))
371+
else:
372+
normalized_content.append(content_item)
373+
374+
normalized_item = dict(item)
375+
normalized_item["content"] = normalized_content
376+
return cast(TResponseInputItem, normalized_item)
377+
378+
379+
def _normalize_compaction_input_image(content_item: dict[str, Any]) -> dict[str, Any]:
380+
"""Return a valid replay shape for a compacted Responses image input."""
381+
normalized = {"type": "input_image"}
382+
383+
image_url = content_item.get("image_url")
384+
file_id = content_item.get("file_id")
385+
if isinstance(image_url, str) and image_url:
386+
normalized["image_url"] = image_url
387+
elif isinstance(file_id, str) and file_id:
388+
normalized["file_id"] = file_id
389+
else:
390+
raise ValueError("Compaction input_image item missing image_url or file_id.")
391+
392+
detail = content_item.get("detail")
393+
if isinstance(detail, str) and detail:
394+
normalized["detail"] = detail
395+
396+
return normalized
397+
398+
399+
def _normalize_compaction_input_file(content_item: dict[str, Any]) -> dict[str, Any]:
400+
"""Return a valid replay shape for a compacted Responses file input."""
401+
normalized = {"type": "input_file"}
402+
403+
file_data = content_item.get("file_data")
404+
file_url = content_item.get("file_url")
405+
file_id = content_item.get("file_id")
406+
if isinstance(file_data, str) and file_data:
407+
normalized["file_data"] = file_data
408+
elif isinstance(file_url, str) and file_url:
409+
normalized["file_url"] = file_url
410+
elif isinstance(file_id, str) and file_id:
411+
normalized["file_id"] = file_id
412+
else:
413+
raise ValueError("Compaction input_file item missing file_data, file_url, or file_id.")
414+
415+
filename = content_item.get("filename")
416+
if isinstance(filename, str) and filename:
417+
normalized["filename"] = filename
418+
419+
detail = content_item.get("detail")
420+
if isinstance(detail, str) and detail:
421+
normalized["detail"] = detail
422+
423+
return normalized
424+
425+
342426
def _normalize_compaction_session_items(
343427
items: list[TResponseInputItem],
344428
) -> list[TResponseInputItem]:

tests/memory/test_openai_responses_compaction_session.py

Lines changed: 200 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -544,6 +544,206 @@ def model_dump(
544544
model="gpt-4.1",
545545
)
546546

547+
@pytest.mark.asyncio
548+
async def test_run_compaction_normalizes_compacted_user_image_messages(self) -> None:
549+
mock_session = self.create_mock_session()
550+
mock_session.get_items.return_value = []
551+
552+
mock_compact_response = MagicMock()
553+
mock_compact_response.output = [
554+
{
555+
"type": "message",
556+
"role": "user",
557+
"content": [
558+
{"type": "input_text", "text": "analyze this input"},
559+
{
560+
"type": "input_image",
561+
"image_url": "https://example.com/image.png",
562+
"file_id": None,
563+
"detail": "auto",
564+
},
565+
],
566+
}
567+
]
568+
569+
mock_client = MagicMock()
570+
mock_client.responses.compact = AsyncMock(return_value=mock_compact_response)
571+
572+
session = OpenAIResponsesCompactionSession(
573+
session_id="test",
574+
underlying_session=mock_session,
575+
client=mock_client,
576+
compaction_mode="input",
577+
)
578+
579+
await session.run_compaction({"force": True, "compaction_mode": "input"})
580+
581+
stored_items = mock_session.add_items.call_args[0][0]
582+
assert stored_items == [
583+
{
584+
"type": "message",
585+
"role": "user",
586+
"content": [
587+
{"type": "input_text", "text": "analyze this input"},
588+
{
589+
"type": "input_image",
590+
"image_url": "https://example.com/image.png",
591+
"detail": "auto",
592+
},
593+
],
594+
}
595+
]
596+
597+
@pytest.mark.asyncio
598+
async def test_run_compaction_normalizes_compacted_user_file_messages(self) -> None:
599+
mock_session = self.create_mock_session()
600+
mock_session.get_items.return_value = []
601+
602+
mock_compact_response = MagicMock()
603+
mock_compact_response.output = [
604+
{
605+
"type": "message",
606+
"role": "user",
607+
"content": [
608+
{"type": "input_text", "text": "analyze this input"},
609+
{
610+
"type": "input_file",
611+
"file_url": "https://example.com/report.pdf",
612+
"file_id": None,
613+
"filename": "report.pdf",
614+
"detail": "high",
615+
},
616+
],
617+
}
618+
]
619+
620+
mock_client = MagicMock()
621+
mock_client.responses.compact = AsyncMock(return_value=mock_compact_response)
622+
623+
session = OpenAIResponsesCompactionSession(
624+
session_id="test",
625+
underlying_session=mock_session,
626+
client=mock_client,
627+
compaction_mode="input",
628+
)
629+
630+
await session.run_compaction({"force": True, "compaction_mode": "input"})
631+
632+
stored_items = mock_session.add_items.call_args[0][0]
633+
assert stored_items == [
634+
{
635+
"type": "message",
636+
"role": "user",
637+
"content": [
638+
{"type": "input_text", "text": "analyze this input"},
639+
{
640+
"type": "input_file",
641+
"file_url": "https://example.com/report.pdf",
642+
"filename": "report.pdf",
643+
"detail": "high",
644+
},
645+
],
646+
}
647+
]
648+
649+
@pytest.mark.asyncio
650+
async def test_run_compaction_normalizes_file_id_inputs_and_preserves_metadata(self) -> None:
651+
mock_session = self.create_mock_session()
652+
mock_session.get_items.return_value = []
653+
654+
mock_compact_response = MagicMock()
655+
mock_compact_response.output = [
656+
{
657+
"type": "message",
658+
"role": "user",
659+
"content": [
660+
{"type": "input_text", "text": "analyze this input"},
661+
{
662+
"type": "input_file",
663+
"file_id": "file_123",
664+
"file_url": None,
665+
"filename": "report.pdf",
666+
"detail": "low",
667+
},
668+
],
669+
}
670+
]
671+
672+
mock_client = MagicMock()
673+
mock_client.responses.compact = AsyncMock(return_value=mock_compact_response)
674+
675+
session = OpenAIResponsesCompactionSession(
676+
session_id="test",
677+
underlying_session=mock_session,
678+
client=mock_client,
679+
compaction_mode="input",
680+
)
681+
682+
await session.run_compaction({"force": True, "compaction_mode": "input"})
683+
684+
stored_items = mock_session.add_items.call_args[0][0]
685+
assert stored_items == [
686+
{
687+
"type": "message",
688+
"role": "user",
689+
"content": [
690+
{"type": "input_text", "text": "analyze this input"},
691+
{
692+
"type": "input_file",
693+
"file_id": "file_123",
694+
"filename": "report.pdf",
695+
"detail": "low",
696+
},
697+
],
698+
}
699+
]
700+
701+
@pytest.mark.asyncio
702+
async def test_run_compaction_preserves_history_when_output_normalization_fails(self) -> None:
703+
history = [
704+
{
705+
"type": "message",
706+
"role": "user",
707+
"content": [{"type": "input_text", "text": "hello"}],
708+
},
709+
{
710+
"type": "message",
711+
"role": "assistant",
712+
"status": "completed",
713+
"content": [{"type": "output_text", "text": "world"}],
714+
},
715+
]
716+
underlying = SimpleListSession(history=cast(list[TResponseInputItem], history))
717+
718+
mock_compact_response = MagicMock()
719+
mock_compact_response.output = [
720+
{
721+
"type": "message",
722+
"role": "user",
723+
"content": [
724+
{"type": "input_text", "text": "hello"},
725+
{"type": "input_image", "detail": "auto"},
726+
],
727+
}
728+
]
729+
730+
mock_client = MagicMock()
731+
mock_client.responses.compact = AsyncMock(return_value=mock_compact_response)
732+
733+
session = OpenAIResponsesCompactionSession(
734+
session_id="test",
735+
underlying_session=underlying,
736+
client=mock_client,
737+
compaction_mode="input",
738+
)
739+
740+
with pytest.raises(
741+
ValueError, match="Compaction input_image item missing image_url or file_id."
742+
):
743+
await session.run_compaction({"force": True, "compaction_mode": "input"})
744+
745+
assert await session.get_items() == history
746+
547747
@pytest.mark.asyncio
548748
async def test_compaction_runs_during_runner_flow(self) -> None:
549749
"""Ensure Runner triggers compaction when using a compaction-aware session."""

0 commit comments

Comments
 (0)