Skip to content

Commit 3fc2f99

Browse files
committed
fix: honor bridge default backend when model is omitted
- derive request model from the configured default backend - add regression coverage for omitted-model requests against the HTTP server
1 parent e266a62 commit 3fc2f99

File tree

2 files changed

+59
-2
lines changed

2 files changed

+59
-2
lines changed

examples/subscription_bridge/server.py

Lines changed: 16 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -43,6 +43,19 @@ def resolve_backend(model: str | None, default_backend: str = "codex") -> str:
4343
return default_backend
4444

4545

46+
def default_model_for_backend(backend: str) -> str:
47+
if backend == "claude":
48+
return "claude/claude-sonnet-4-6"
49+
return "codex/gpt-5.4"
50+
51+
52+
def resolve_request_model(payload: dict[str, Any], *, default_backend: str) -> str:
53+
raw_model = payload.get("model")
54+
if isinstance(raw_model, str) and raw_model.strip():
55+
return raw_model.strip()
56+
return default_model_for_backend(default_backend)
57+
58+
4659
def _model_flag_value(model: str | None) -> str | None:
4760
if not model:
4861
return None
@@ -686,8 +699,9 @@ def do_GET(self) -> None: # noqa: N802
686699
def do_POST(self) -> None: # noqa: N802
687700
try:
688701
payload = self._read_json()
689-
model = str(payload.get("model") or "codex/gpt-5.4")
690-
backend = resolve_backend(model, default_backend=self.server.default_backend) # type: ignore[attr-defined]
702+
default_backend = self.server.default_backend # type: ignore[attr-defined]
703+
model = resolve_request_model(payload, default_backend=default_backend)
704+
backend = resolve_backend(model, default_backend=default_backend)
691705
workdir = Path(self.server.workdir) # type: ignore[attr-defined]
692706
request_id = f"bridge_{uuid.uuid4().hex}"
693707

tests/examples/test_subscription_bridge.py

Lines changed: 43 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,9 @@
22

33
import json
44
import subprocess
5+
import threading
6+
import time
7+
import urllib.request
58
from pathlib import Path
69
from typing import Any, cast
710

@@ -16,6 +19,46 @@ def test_resolve_backend_prefers_model_prefix() -> None:
1619
assert server.resolve_backend("gpt-5.4", default_backend="codex") == "codex"
1720

1821

22+
def test_resolve_request_model_uses_backend_default_when_model_is_omitted() -> None:
23+
assert server.resolve_request_model({}, default_backend="codex") == "codex/gpt-5.4"
24+
assert server.resolve_request_model({}, default_backend="claude") == "claude/claude-sonnet-4-6"
25+
assert (
26+
server.resolve_request_model({"model": "claude/claude-sonnet-4-6"}, default_backend="codex")
27+
== "claude/claude-sonnet-4-6"
28+
)
29+
30+
31+
def test_http_server_uses_backend_default_model_when_request_omits_model(
32+
monkeypatch: pytest.MonkeyPatch, tmp_path: Path
33+
) -> None:
34+
def fake_run_backend(*, backend: str, prompt: str, model: str | None, workdir: Path) -> str:
35+
return f"backend={backend};model={model};workdir={workdir.name}"
36+
37+
monkeypatch.setattr(server, "run_backend", fake_run_backend)
38+
39+
httpd = server.make_server("127.0.0.1", 0, default_backend="claude", workdir=tmp_path)
40+
thread = threading.Thread(target=httpd.serve_forever, daemon=True)
41+
thread.start()
42+
time.sleep(0.05)
43+
try:
44+
req = urllib.request.Request(
45+
f"http://127.0.0.1:{httpd.server_address[1]}/v1/chat/completions",
46+
data=json.dumps({"messages": [{"role": "user", "content": "Say hi."}]}).encode("utf-8"),
47+
headers={"Content-Type": "application/json"},
48+
)
49+
with urllib.request.urlopen(req) as resp:
50+
payload = json.loads(resp.read().decode("utf-8"))
51+
finally:
52+
httpd.shutdown()
53+
httpd.server_close()
54+
thread.join(timeout=2)
55+
56+
assert payload["model"] == "claude/claude-sonnet-4-6"
57+
assert payload["choices"][0]["message"]["content"].startswith(
58+
"backend=claude;model=claude/claude-sonnet-4-6"
59+
)
60+
61+
1962
def test_build_chat_prompt_from_messages_preserves_roles() -> None:
2063
payload = {
2164
"messages": [

0 commit comments

Comments
 (0)