Skip to content

Commit e266a62

Browse files
committed
feat: add subscription bridge example harness
- add a documented subscription bridge example with local demo agent - verify Codex and Claude CLI-backed tool loops through the bridge - cover prompt shaping, structured output parsing, and demo helpers with tests - keep current limits explicit: non-streaming focus, partial responses parity, no handoff validation yet
1 parent da82b2c commit e266a62

File tree

6 files changed

+1423
-0
lines changed

6 files changed

+1423
-0
lines changed
Lines changed: 86 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,86 @@
1+
# Subscription bridge
2+
3+
This example shows how to drive `openai-agents-python` through a local OpenAI-compatible bridge that routes requests into the authenticated vendor CLIs instead of raw provider APIs:
4+
5+
- `codex/...` -> `codex exec`
6+
- `claude/...` or `anthropic/...` -> `claude -p`
7+
8+
This is useful when you want local agent loops to land on ChatGPT/Codex or Claude Max CLI-backed usage where supported.
9+
10+
Important limitation: this does not make arbitrary raw OpenAI or Anthropic API calls bill to those app plans. The working path is:
11+
12+
`openai-agents-python` -> local bridge -> `codex` / `claude` CLI
13+
14+
## What is included
15+
16+
- `server.py`
17+
- exposes `/health`, `/v1/chat/completions`, and `/v1/responses`
18+
- supports non-streaming text responses and structured tool-call loops
19+
- `demo_agent.py`
20+
- starts an embedded local bridge or connects to an existing one
21+
- runs a simple tool-using `Agent` through `OpenAIChatCompletionsModel`
22+
23+
## Quick start
24+
25+
Run the embedded demo with Codex:
26+
27+
```bash
28+
uv run --python 3.11 python examples/subscription_bridge/demo_agent.py --backend codex
29+
```
30+
31+
Run the embedded demo with Claude:
32+
33+
```bash
34+
uv run --python 3.11 python examples/subscription_bridge/demo_agent.py --backend claude
35+
```
36+
37+
Use an already-running bridge instead of starting an embedded one:
38+
39+
```bash
40+
uv run --python 3.11 python examples/subscription_bridge/server.py --backend codex --port 8787
41+
uv run --python 3.11 python examples/subscription_bridge/demo_agent.py --backend codex --base-url http://127.0.0.1:8787
42+
```
43+
44+
Override the model or prompt:
45+
46+
```bash
47+
uv run --python 3.11 python examples/subscription_bridge/demo_agent.py \
48+
--backend codex \
49+
--model codex/gpt-5.4 \
50+
--prompt "What is the weather in Tokyo?"
51+
```
52+
53+
## Expected behavior
54+
55+
The demo agent includes a simple `get_weather(city)` function tool. A working run should:
56+
57+
1. ask the bridge for the next assistant turn
58+
2. emit a tool call
59+
3. execute the local tool
60+
4. call the bridge again
61+
5. return plain final assistant text
62+
63+
Example final outputs seen on ATHAME:
64+
65+
- Codex: `The weather in Tokyo is sunny and 72 F.`
66+
- Claude: `The weather in Tokyo is currently sunny with a temperature of 72°F.`
67+
68+
## Verification
69+
70+
Run the targeted tests:
71+
72+
```bash
73+
uv run --python 3.11 pytest tests/examples/test_subscription_bridge.py tests/examples/test_subscription_bridge_demo_agent.py -q
74+
```
75+
76+
## Current limits
77+
78+
Validated:
79+
- chat-completions-compatible tool loop through the bridge
80+
- Codex-backed local tool loop
81+
- Claude-backed local tool loop
82+
83+
Not yet validated:
84+
- streaming responses
85+
- full Responses API parity beyond the current minimal implementation
86+
- multi-agent handoffs and handoff semantics
Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,4 @@
1+
from . import demo_agent
2+
from .server import main, make_server
3+
4+
__all__ = ["demo_agent", "main", "make_server"]
Lines changed: 138 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,138 @@
1+
from __future__ import annotations
2+
3+
import argparse
4+
import asyncio
5+
import threading
6+
import time
7+
from pathlib import Path
8+
from typing import Literal
9+
10+
from openai import AsyncOpenAI
11+
12+
from agents import Agent, OpenAIChatCompletionsModel, Runner, function_tool, set_tracing_disabled
13+
14+
try:
15+
from .server import make_server
16+
except ImportError: # pragma: no cover - script execution path
17+
import sys
18+
19+
sys.path.append(str(Path(__file__).resolve().parents[2]))
20+
from examples.subscription_bridge.server import make_server
21+
22+
Backend = Literal["codex", "claude"]
23+
24+
25+
def default_model_for_backend(backend: Backend) -> str:
26+
if backend == "claude":
27+
return "claude/claude-sonnet-4-6"
28+
return "codex/gpt-5.4"
29+
30+
31+
def resolve_model(backend: Backend, model: str | None) -> str:
32+
return model or default_model_for_backend(backend)
33+
34+
35+
def normalize_api_base_url(base_url: str) -> str:
36+
stripped = base_url.rstrip("/")
37+
if stripped.endswith("/v1"):
38+
return stripped
39+
return f"{stripped}/v1"
40+
41+
42+
@function_tool
43+
def get_weather(city: str) -> str:
44+
return f"The weather in {city} is sunny and 72 F."
45+
46+
47+
async def run_demo(*, prompt: str, backend: Backend, model: str | None, api_base_url: str) -> str:
48+
set_tracing_disabled(disabled=True)
49+
client = AsyncOpenAI(base_url=normalize_api_base_url(api_base_url), api_key="dummy")
50+
agent = Agent(
51+
name="Subscription Bridge Demo",
52+
instructions="Use tools when useful, then answer clearly.",
53+
model=OpenAIChatCompletionsModel(
54+
model=resolve_model(backend, model),
55+
openai_client=client,
56+
),
57+
tools=[get_weather],
58+
)
59+
result = await Runner.run(agent, prompt)
60+
return str(result.final_output)
61+
62+
63+
def _build_parser() -> argparse.ArgumentParser:
64+
parser = argparse.ArgumentParser(
65+
description="Run a local openai-agents-python demo through the subscription bridge."
66+
)
67+
parser.add_argument(
68+
"--backend",
69+
choices=["codex", "claude"],
70+
default="codex",
71+
help="Which CLI-backed bridge backend to use.",
72+
)
73+
parser.add_argument(
74+
"--model",
75+
default=None,
76+
help="Optional full model name override, e.g. codex/gpt-5.4 or claude/claude-sonnet-4-6.",
77+
)
78+
parser.add_argument(
79+
"--prompt",
80+
default="What is the weather in Tokyo?",
81+
help="Prompt to send to the demo agent.",
82+
)
83+
parser.add_argument("--host", default="127.0.0.1", help="Bridge host for local embedded mode.")
84+
parser.add_argument(
85+
"--port", type=int, default=8787, help="Bridge port for local embedded mode."
86+
)
87+
parser.add_argument(
88+
"--base-url",
89+
default=None,
90+
help="Use an already-running bridge instead of starting a local embedded bridge.",
91+
)
92+
parser.add_argument(
93+
"--workdir",
94+
default=str(Path.cwd()),
95+
help="Working directory to pass to the local embedded bridge.",
96+
)
97+
return parser
98+
99+
100+
async def _main_async(args: argparse.Namespace) -> str:
101+
if args.base_url:
102+
return await run_demo(
103+
prompt=args.prompt,
104+
backend=args.backend,
105+
model=args.model,
106+
api_base_url=args.base_url,
107+
)
108+
109+
httpd = make_server(
110+
args.host,
111+
args.port,
112+
default_backend=args.backend,
113+
workdir=Path(args.workdir).resolve(),
114+
)
115+
thread = threading.Thread(target=httpd.serve_forever, daemon=True)
116+
thread.start()
117+
time.sleep(0.2)
118+
try:
119+
return await run_demo(
120+
prompt=args.prompt,
121+
backend=args.backend,
122+
model=args.model,
123+
api_base_url=f"http://{args.host}:{args.port}",
124+
)
125+
finally:
126+
httpd.shutdown()
127+
httpd.server_close()
128+
thread.join(timeout=2)
129+
130+
131+
def main() -> None:
132+
parser = _build_parser()
133+
args = parser.parse_args()
134+
print(asyncio.run(_main_async(args)))
135+
136+
137+
if __name__ == "__main__":
138+
main()

0 commit comments

Comments
 (0)