-
Notifications
You must be signed in to change notification settings - Fork 3.7k
Expand file tree
/
Copy pathopenai_responses_compaction_session.py
More file actions
527 lines (441 loc) · 20 KB
/
openai_responses_compaction_session.py
File metadata and controls
527 lines (441 loc) · 20 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
from __future__ import annotations
import logging
from typing import TYPE_CHECKING, Any, Callable, Literal
from openai import AsyncOpenAI
from ..models._openai_shared import get_default_openai_client
from ..run_internal.items import normalize_input_items_for_api
from .openai_conversations_session import OpenAIConversationsSession
from .session import (
OpenAIResponsesCompactionArgs,
OpenAIResponsesCompactionAwareSession,
SessionABC,
SessionHistoryRewriteArgs,
apply_session_history_mutations,
is_session_history_rewrite_aware_session,
)
if TYPE_CHECKING:
from ..items import TResponseInputItem
from .session import Session
logger = logging.getLogger("openai-agents.openai.compaction")
DEFAULT_COMPACTION_THRESHOLD = 10
OpenAIResponsesCompactionMode = Literal["previous_response_id", "input", "auto"]
def _is_user_message_item(item: TResponseInputItem) -> bool:
if not isinstance(item, dict):
return False
if item.get("type") == "message":
return item.get("role") == "user"
return item.get("role") == "user" and "content" in item
def select_compaction_candidate_items(
items: list[TResponseInputItem],
) -> list[TResponseInputItem]:
"""Select compaction candidate items.
Excludes user messages and compaction items.
"""
return [
item
for item in items
if not (
_is_user_message_item(item)
or (isinstance(item, dict) and item.get("type") == "compaction")
)
]
def default_should_trigger_compaction(context: dict[str, Any]) -> bool:
"""Default decision: compact when >= 10 candidate items exist."""
return len(context["compaction_candidate_items"]) >= DEFAULT_COMPACTION_THRESHOLD
def is_openai_model_name(model: str) -> bool:
"""Validate model name follows OpenAI conventions."""
trimmed = model.strip()
if not trimmed:
return False
# Handle fine-tuned models: ft:gpt-4.1:org:proj:suffix
without_ft_prefix = trimmed[3:] if trimmed.startswith("ft:") else trimmed
root = without_ft_prefix.split(":", 1)[0]
# Allow gpt-* and o* models
if root.startswith("gpt-"):
return True
if root.startswith("o") and root[1:2].isdigit():
return True
return False
class OpenAIResponsesCompactionSession(SessionABC, OpenAIResponsesCompactionAwareSession):
"""Session decorator that triggers responses.compact when stored history grows.
Works with OpenAI Responses API models only. Wraps any Session (except
OpenAIConversationsSession) and automatically calls the OpenAI responses.compact
API after each turn when the decision hook returns True.
"""
def __init__(
self,
session_id: str,
underlying_session: Session,
*,
client: AsyncOpenAI | None = None,
model: str = "gpt-4.1",
compaction_mode: OpenAIResponsesCompactionMode = "auto",
should_trigger_compaction: Callable[[dict[str, Any]], bool] | None = None,
):
"""Initialize the compaction session.
Args:
session_id: Identifier for this session.
underlying_session: Session store that holds the compacted history. Cannot be
OpenAIConversationsSession.
client: OpenAI client for responses.compact API calls. Defaults to
get_default_openai_client() or new AsyncOpenAI().
model: Model to use for responses.compact. Defaults to "gpt-4.1". Must be an
OpenAI model name (gpt-*, o*, or ft:gpt-*).
compaction_mode: Controls how the compaction request provides conversation
history. "auto" (default) uses input when the last response was not
stored or no response_id is available.
should_trigger_compaction: Custom decision hook. Defaults to triggering when
10+ compaction candidates exist.
"""
if isinstance(underlying_session, OpenAIConversationsSession):
raise ValueError(
"OpenAIResponsesCompactionSession cannot wrap OpenAIConversationsSession "
"because it manages its own history on the server."
)
if not is_openai_model_name(model):
raise ValueError(f"Unsupported model for OpenAI responses compaction: {model}")
self.session_id = session_id
self.underlying_session = underlying_session
self._client = client
self.model = model
self.compaction_mode = compaction_mode
self.should_trigger_compaction = (
should_trigger_compaction or default_should_trigger_compaction
)
# cache for incremental candidate tracking
self._compaction_candidate_items: list[TResponseInputItem] | None = None
self._session_items: list[TResponseInputItem] | None = None
self._response_id: str | None = None
self._deferred_response_id: str | None = None
self._last_store: bool | None = None
self._has_pending_local_history_rewrite = False
self._local_history_rewrite_response_id: str | None = None
self._has_unacknowledged_local_session_adds = False
@property
def client(self) -> AsyncOpenAI:
if self._client is None:
self._client = get_default_openai_client() or AsyncOpenAI()
return self._client
def _resolve_compaction_mode(
self,
*,
requested_mode: OpenAIResponsesCompactionMode,
response_id: str | None,
store: bool | None,
turn_has_local_adds_without_new_response_id: bool,
) -> _ResolvedCompactionMode:
resolved_mode = _resolve_compaction_mode(
requested_mode,
response_id=response_id,
store=store,
)
if turn_has_local_adds_without_new_response_id and resolved_mode == "previous_response_id":
self._has_unacknowledged_local_session_adds = False
self._mark_local_history_rewrite()
logger.debug(
"compact: forcing input mode after local session delta without new response id"
)
return "input"
if not self._has_pending_local_history_rewrite:
return resolved_mode
if resolved_mode == "previous_response_id":
if self._local_history_rewrite_response_id is None and response_id is not None:
self._local_history_rewrite_response_id = response_id
logger.debug("compact: forcing input mode after local history rewrite")
return "input"
return resolved_mode
def _resolve_store_tracking(
self,
*,
response_id: str | None,
previous_response_id: str | None,
store: bool | None,
store_was_provided: bool,
) -> bool | None:
"""Resolve the effective store setting for the current response id.
Reuse `_last_store` only while compaction still refers to the same response. A new
response id with no explicit `store` falls back to the Responses API default behavior.
"""
if store_was_provided:
self._last_store = store
return store
if response_id is not None and response_id != previous_response_id:
self._last_store = None
return None
return self._last_store
def _get_effective_store_for_response_id(
self,
*,
response_id: str | None,
store: bool | None,
) -> bool | None:
"""Return the effective store setting without mutating response tracking."""
if store is not None:
return store
if response_id is not None and response_id != self._response_id:
return None
return self._last_store
async def run_compaction(self, args: OpenAIResponsesCompactionArgs | None = None) -> None:
"""Run compaction using responses.compact API."""
previous_response_id = self._response_id
if args and args.get("response_id"):
self._response_id = args["response_id"]
requested_mode = args.get("compaction_mode") if args else None
store_was_provided = bool(args and "store" in args)
requested_store: bool | None = (
args["store"] if args is not None and "store" in args else None
)
store = self._resolve_store_tracking(
response_id=self._response_id,
previous_response_id=previous_response_id,
store=requested_store,
store_was_provided=store_was_provided,
)
turn_has_local_adds_without_new_response_id = (
self._has_unacknowledged_local_session_adds
and (args is None or args.get("response_id") in {None, previous_response_id})
)
if (
args
and args.get("response_id") is not None
and args["response_id"] != previous_response_id
):
self._has_unacknowledged_local_session_adds = False
resolved_mode = self._resolve_compaction_mode(
response_id=self._response_id,
store=store,
requested_mode=requested_mode or self.compaction_mode,
turn_has_local_adds_without_new_response_id=(
turn_has_local_adds_without_new_response_id
),
)
if resolved_mode == "previous_response_id" and not self._response_id:
raise ValueError(
"OpenAIResponsesCompactionSession.run_compaction requires a response_id "
"when using previous_response_id compaction."
)
compaction_candidate_items, session_items = await self._ensure_compaction_candidates()
force = args.get("force", False) if args else False
should_compact = force or self.should_trigger_compaction(
{
"response_id": self._response_id,
"compaction_mode": resolved_mode,
"compaction_candidate_items": compaction_candidate_items,
"session_items": session_items,
}
)
if not should_compact:
logger.debug(
f"skip: decision hook declined compaction for {self._response_id} "
f"(mode={resolved_mode})"
)
return
frontier_unresolved_function_calls = _find_frontier_unresolved_function_calls(session_items)
if frontier_unresolved_function_calls:
logger.debug(
"compact: blocked unresolved function calls for %s: %s",
self._response_id,
frontier_unresolved_function_calls,
)
return
self._deferred_response_id = None
logger.debug(
f"compact: start for {self._response_id} using {self.model} (mode={resolved_mode})"
)
compact_kwargs: dict[str, Any] = {"model": self.model}
if resolved_mode == "previous_response_id":
compact_kwargs["previous_response_id"] = self._response_id
else:
compact_kwargs["input"] = session_items
compacted = await self.client.responses.compact(**compact_kwargs)
await self.underlying_session.clear_session()
output_items: list[TResponseInputItem] = []
if compacted.output:
for item in compacted.output:
if isinstance(item, dict):
output_items.append(item)
else:
# Suppress Pydantic literal warnings: responses.compact can return
# user-style input_text content inside ResponseOutputMessage.
output_items.append(
item.model_dump(exclude_unset=True, warnings=False) # type: ignore
)
output_items = _strip_orphaned_assistant_ids(output_items)
if output_items:
await self.underlying_session.add_items(output_items)
self._compaction_candidate_items = select_compaction_candidate_items(output_items)
self._session_items = output_items
if resolved_mode == "input":
self._clear_pending_local_history_rewrite()
logger.debug(
f"compact: done for {self._response_id} "
f"(mode={resolved_mode}, output={len(output_items)}, "
f"candidates={len(self._compaction_candidate_items)})"
)
async def get_items(self, limit: int | None = None) -> list[TResponseInputItem]:
return await self.underlying_session.get_items(limit)
async def apply_history_mutations(self, args: SessionHistoryRewriteArgs) -> None:
"""Rewrite persisted history and keep compaction caches aligned with the new transcript."""
mutations = list(args.get("mutations", []))
if not mutations:
return
if is_session_history_rewrite_aware_session(self.underlying_session):
await self.underlying_session.apply_history_mutations({"mutations": mutations})
await self._refresh_caches_from_underlying_session()
self._mark_local_history_rewrite()
return
rewritten_items = apply_session_history_mutations(
await self.underlying_session.get_items(),
mutations,
)
await self.underlying_session.clear_session()
if rewritten_items:
await self.underlying_session.add_items(rewritten_items)
self._session_items = rewritten_items
self._compaction_candidate_items = select_compaction_candidate_items(rewritten_items)
self._mark_local_history_rewrite()
async def _defer_compaction(self, response_id: str, store: bool | None = None) -> None:
if self._deferred_response_id is not None:
return
compaction_candidate_items, session_items = await self._ensure_compaction_candidates()
resolved_mode = _resolve_compaction_mode(
self.compaction_mode,
response_id=response_id,
store=self._get_effective_store_for_response_id(response_id=response_id, store=store),
)
should_compact = self.should_trigger_compaction(
{
"response_id": response_id,
"compaction_mode": resolved_mode,
"compaction_candidate_items": compaction_candidate_items,
"session_items": session_items,
}
)
if should_compact:
self._deferred_response_id = response_id
def _get_deferred_compaction_response_id(self) -> str | None:
return self._deferred_response_id
def _clear_deferred_compaction(self) -> None:
self._deferred_response_id = None
async def add_items(self, items: list[TResponseInputItem]) -> None:
if not items:
return
await self.underlying_session.add_items(items)
self._has_unacknowledged_local_session_adds = True
if self._compaction_candidate_items is not None:
new_items = _normalize_compaction_session_items(items)
new_candidates = select_compaction_candidate_items(new_items)
if new_candidates:
self._compaction_candidate_items.extend(new_candidates)
if self._session_items is not None:
self._session_items.extend(_normalize_compaction_session_items(items))
async def pop_item(self) -> TResponseInputItem | None:
popped = await self.underlying_session.pop_item()
if popped:
self._compaction_candidate_items = None
self._session_items = None
return popped
async def clear_session(self) -> None:
await self.underlying_session.clear_session()
self._compaction_candidate_items = []
self._session_items = []
self._deferred_response_id = None
self._has_pending_local_history_rewrite = False
self._local_history_rewrite_response_id = None
self._has_unacknowledged_local_session_adds = False
self._last_store = None
async def _refresh_caches_from_underlying_session(self) -> None:
history = await self.underlying_session.get_items()
self._session_items = history
self._compaction_candidate_items = select_compaction_candidate_items(history)
async def _ensure_compaction_candidates(
self,
) -> tuple[list[TResponseInputItem], list[TResponseInputItem]]:
"""Lazy-load and cache compaction candidates."""
if self._compaction_candidate_items is not None and self._session_items is not None:
return (self._compaction_candidate_items[:], self._session_items[:])
history = _normalize_compaction_session_items(await self.underlying_session.get_items())
candidates = select_compaction_candidate_items(history)
self._compaction_candidate_items = candidates
self._session_items = history
logger.debug(
f"candidates: initialized (history={len(history)}, candidates={len(candidates)})"
)
return (candidates[:], history[:])
def _mark_local_history_rewrite(self) -> None:
self._has_pending_local_history_rewrite = True
self._local_history_rewrite_response_id = self._response_id
def _clear_pending_local_history_rewrite(self) -> None:
self._has_pending_local_history_rewrite = False
self._local_history_rewrite_response_id = None
def _strip_orphaned_assistant_ids(
items: list[TResponseInputItem],
) -> list[TResponseInputItem]:
"""Remove ``id`` from assistant messages when their paired reasoning items are missing.
Some models (e.g. gpt-5.4) return compacted output that retains assistant
message IDs even after stripping the reasoning items those IDs reference.
Sending these orphaned IDs back to ``responses.create`` causes a 400 error
because the API expects the paired reasoning item for each assistant message
ID. This function detects and removes those orphaned IDs so the compacted
history can be used safely.
"""
if not items:
return items
has_reasoning = any(
isinstance(item, dict) and item.get("type") == "reasoning" for item in items
)
if has_reasoning:
return items
cleaned: list[TResponseInputItem] = []
for item in items:
if isinstance(item, dict) and item.get("role") == "assistant" and "id" in item:
item = {k: v for k, v in item.items() if k != "id"} # type: ignore[assignment]
cleaned.append(item)
return cleaned
def _normalize_compaction_session_items(
items: list[TResponseInputItem],
) -> list[TResponseInputItem]:
"""Normalize compaction input so SDK-only metadata never reaches responses.compact."""
return normalize_input_items_for_api(list(items))
_ResolvedCompactionMode = Literal["previous_response_id", "input"]
def _find_frontier_unresolved_function_calls(items: list[TResponseInputItem]) -> list[str]:
"""Return unresolved function-call ids that remain in the active conversation frontier.
Once a later user message appears, earlier unresolved tool calls are considered abandoned and
should no longer block future compaction for the session.
"""
function_call_indices: dict[str, int] = {}
resolved_call_ids: set[str] = set()
last_user_message_index = -1
for index, item in enumerate(items):
if _is_user_message_item(item):
last_user_message_index = index
if isinstance(item, dict):
item_type = item.get("type")
call_id = item.get("call_id")
else:
item_type = getattr(item, "type", None)
call_id = getattr(item, "call_id", None)
if not isinstance(call_id, str):
continue
if item_type == "function_call":
function_call_indices[call_id] = index
elif item_type == "function_call_output":
resolved_call_ids.add(call_id)
return [
call_id
for call_id, index in function_call_indices.items()
if call_id not in resolved_call_ids and index > last_user_message_index
]
def _resolve_compaction_mode(
requested_mode: OpenAIResponsesCompactionMode,
*,
response_id: str | None,
store: bool | None,
) -> _ResolvedCompactionMode:
if requested_mode != "auto":
return requested_mode
if store is False:
return "input"
if not response_id:
return "input"
return "previous_response_id"