Skip to content

Commit 195b53d

Browse files
authored
refactor: Extract duplicate Gemini tool call ID cleanup logic (#2438)
1 parent a86b8ac commit 195b53d

File tree

3 files changed

+25
-13
lines changed

3 files changed

+25
-13
lines changed

src/agents/extensions/models/litellm_model.py

Lines changed: 2 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -42,7 +42,7 @@
4242
from ...logger import logger
4343
from ...model_settings import ModelSettings
4444
from ...models.chatcmpl_converter import Converter
45-
from ...models.chatcmpl_helpers import HEADERS, HEADERS_OVERRIDE
45+
from ...models.chatcmpl_helpers import HEADERS, HEADERS_OVERRIDE, ChatCmplHelpers
4646
from ...models.chatcmpl_stream_handler import ChatCmplStreamHandler
4747
from ...models.fake_id import FAKE_RESPONSES_ID
4848
from ...models.interface import Model, ModelTracing
@@ -819,12 +819,7 @@ def convert_tool_call_to_openai(
819819
) -> ChatCompletionMessageFunctionToolCall:
820820
# Clean up litellm's addition of __thought__ suffix to tool_call.id for
821821
# Gemini models. See: https://github.com/BerriAI/litellm/pull/16895
822-
# This suffix is redundant since we can get thought_signature from
823-
# provider_specific_fields, and this hack causes validation errors when
824-
# cross-model passing to other models.
825-
tool_call_id = tool_call.id
826-
if model and "gemini" in model.lower() and "__thought__" in tool_call_id:
827-
tool_call_id = tool_call_id.split("__thought__")[0]
822+
tool_call_id = ChatCmplHelpers.clean_gemini_tool_call_id(tool_call.id, model)
828823

829824
# Convert litellm's tool call format to chat completion message format
830825
base_tool_call = ChatCompletionMessageFunctionToolCall(

src/agents/models/chatcmpl_helpers.py

Lines changed: 22 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -98,3 +98,25 @@ def convert_logprobs_for_text_delta(
9898
)
9999
)
100100
return converted
101+
102+
@classmethod
103+
def clean_gemini_tool_call_id(cls, tool_call_id: str, model: str | None = None) -> str:
104+
"""Clean up litellm's __thought__ suffix from Gemini tool call IDs.
105+
106+
LiteLLM adds a "__thought__" suffix to Gemini tool call IDs to track thought
107+
signatures. This suffix is redundant since we can get thought_signature from
108+
provider_specific_fields, and this hack causes validation errors when cross-model
109+
passing to other models.
110+
111+
See: https://github.com/BerriAI/litellm/pull/16895
112+
113+
Args:
114+
tool_call_id: The tool call ID to clean.
115+
model: The model name (used to check if it's a Gemini model).
116+
117+
Returns:
118+
The cleaned tool call ID with "__thought__" suffix removed if present.
119+
"""
120+
if model and "gemini" in model.lower() and "__thought__" in tool_call_id:
121+
return tool_call_id.split("__thought__")[0]
122+
return tool_call_id

src/agents/models/chatcmpl_stream_handler.py

Lines changed: 1 addition & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -394,12 +394,7 @@ async def handle_stream(
394394
if tc_delta.id:
395395
# Clean up litellm's addition of __thought__ suffix to tool_call.id for
396396
# Gemini models. See: https://github.com/BerriAI/litellm/pull/16895
397-
# This suffix is redundant since we can get thought_signature from
398-
# provider_specific_fields, and this hack causes validation errors when
399-
# cross-model passing to other models.
400-
tool_call_id = tc_delta.id
401-
if model and "gemini" in model.lower() and "__thought__" in tool_call_id:
402-
tool_call_id = tool_call_id.split("__thought__")[0]
397+
tool_call_id = ChatCmplHelpers.clean_gemini_tool_call_id(tc_delta.id, model)
403398

404399
state.function_calls[tc_delta.index].call_id = tool_call_id
405400

0 commit comments

Comments
 (0)