Skip to content

Commit 9b12f3f

Browse files
Copilottma
andauthored
Remove local health port binding, check upstream directly, and fix Docker PR CI (#5)
* Initial plan * fix: tolerate default health port conflict Co-authored-by: tma <4719+tma@users.noreply.github.com> * refactor: make health checks internal Co-authored-by: tma <4719+tma@users.noreply.github.com> * chore: address review follow-ups Co-authored-by: tma <4719+tma@users.noreply.github.com> * chore: slow docker healthcheck cadence Co-authored-by: tma <4719+tma@users.noreply.github.com> * fix: avoid ghcr push on pr builds Co-authored-by: tma <4719+tma@users.noreply.github.com> * fix: simplify docker workflow cache logic Co-authored-by: tma <4719+tma@users.noreply.github.com> --------- Co-authored-by: copilot-swe-agent[bot] <198982749+Copilot@users.noreply.github.com> Co-authored-by: tma <4719+tma@users.noreply.github.com>
1 parent a057383 commit 9b12f3f

File tree

9 files changed

+93
-40
lines changed

9 files changed

+93
-40
lines changed

.github/workflows/docker.yml

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,9 @@ jobs:
1717
permissions:
1818
contents: read
1919
packages: write
20+
env:
21+
CACHE_FROM: ${{ github.event_name == 'pull_request' && 'type=gha' || format('type=registry,ref=ghcr.io/{0}:buildcache', github.repository) }}
22+
CACHE_TO: ${{ github.event_name == 'pull_request' && 'type=gha,mode=max' || format('type=registry,ref=ghcr.io/{0}:buildcache,mode=max', github.repository) }}
2023

2124
steps:
2225
- uses: actions/checkout@v6
@@ -55,5 +58,5 @@ jobs:
5558
push: ${{ github.event_name != 'pull_request' }}
5659
tags: ${{ steps.meta.outputs.tags }}
5760
labels: ${{ steps.meta.outputs.labels }}
58-
cache-from: type=registry,ref=${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:buildcache
59-
cache-to: type=registry,ref=${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:buildcache,mode=max
61+
cache-from: ${{ env.CACHE_FROM }}
62+
cache-to: ${{ env.CACHE_TO }}

.gitignore

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
# Build artifacts
2-
mbproxy
2+
/mbproxy
33
*.exe
44

55
# Go specific

Dockerfile

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -31,9 +31,7 @@ FROM scratch
3131

3232
COPY --from=builder /app/mbproxy /mbproxy
3333

34-
EXPOSE 8080
35-
36-
HEALTHCHECK --interval=5s --timeout=3s --start-period=10s --retries=3 \
34+
HEALTHCHECK --interval=30s --timeout=3s --start-period=10s --retries=3 \
3735
CMD ["/mbproxy", "-health"]
3836

3937
ENTRYPOINT ["/mbproxy"]

README.md

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -39,6 +39,8 @@ All configuration is via environment variables:
3939
| `MODBUS_SHUTDOWN_TIMEOUT` | Graceful shutdown timeout | `30s` |
4040
| `LOG_LEVEL` | Log level: `INFO`, `DEBUG` | `INFO` |
4141

42+
`/mbproxy -health` performs an internal upstream connectivity check and does not open a separate local TCP health port.
43+
4244
### Read-Only Modes
4345

4446
- `false`: Full read/write passthrough to upstream device

SPEC.md

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -108,6 +108,8 @@ Three modes:
108108
| `MODBUS_SHUTDOWN_TIMEOUT` | Graceful shutdown timeout | `30s` | `10s`, `60s` |
109109
| `LOG_LEVEL` | Log level | `INFO` | `INFO`, `DEBUG` |
110110

111+
The container health check runs `mbproxy -health`, which performs an internal upstream connectivity check without binding a separate local TCP port.
112+
111113
## Implementation Details
112114

113115
### Dependencies

cmd/mbproxy/main.go

Lines changed: 23 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -8,11 +8,10 @@ import (
88
"os"
99
"os/signal"
1010
"syscall"
11-
"time"
1211

1312
"github.com/tma/mbproxy/internal/config"
14-
"github.com/tma/mbproxy/internal/health"
1513
"github.com/tma/mbproxy/internal/logging"
14+
"github.com/tma/mbproxy/internal/modbus"
1615
"github.com/tma/mbproxy/internal/proxy"
1716
)
1817

@@ -21,8 +20,7 @@ func main() {
2120
flag.Parse()
2221

2322
if *healthCheck {
24-
addr := config.GetEnv("HEALTH_LISTEN", ":8080")
25-
if err := health.CheckHealth(addr); err != nil {
23+
if err := runHealthCheck(); err != nil {
2624
fmt.Fprintln(os.Stderr, err)
2725
os.Exit(1)
2826
}
@@ -50,19 +48,6 @@ func main() {
5048
os.Exit(1)
5149
}
5250

53-
// Start health server
54-
hs := health.NewServer(cfg.HealthListen, p, logger)
55-
hsLn, err := hs.Listen()
56-
if err != nil {
57-
logger.Error("failed to start health server", "error", err)
58-
os.Exit(1)
59-
}
60-
go func() {
61-
if err := hs.Serve(hsLn); err != nil {
62-
logger.Error("health server error", "error", err)
63-
}
64-
}()
65-
6651
// Start proxy in background
6752
errCh := make(chan error, 1)
6853
go func() {
@@ -83,16 +68,31 @@ func main() {
8368
// Graceful shutdown
8469
cancel()
8570

86-
shutdownCtx, shutdownCancel := context.WithTimeout(context.Background(), 5*time.Second)
87-
defer shutdownCancel()
88-
if err := hs.Shutdown(shutdownCtx); err != nil {
89-
logger.Error("health server shutdown error", "error", err)
90-
}
91-
9271
if err := p.Shutdown(cfg.ShutdownTimeout); err != nil {
9372
logger.Error("shutdown error", "error", err)
9473
os.Exit(1)
9574
}
9675

9776
logger.Info("shutdown complete")
9877
}
78+
79+
func runHealthCheck() error {
80+
cfg, err := config.Load()
81+
if err != nil {
82+
return err
83+
}
84+
85+
return checkUpstreamHealth(cfg, logging.New(cfg.LogLevel))
86+
}
87+
88+
func checkUpstreamHealth(cfg *config.Config, logger *slog.Logger) (err error) {
89+
client := modbus.NewClient(cfg.Upstream, cfg.Timeout, cfg.RequestDelay, cfg.ConnectDelay, logger)
90+
defer func() {
91+
closeErr := client.Close()
92+
if err == nil && closeErr != nil {
93+
err = closeErr
94+
}
95+
}()
96+
97+
return client.Connect()
98+
}

cmd/mbproxy/main_test.go

Lines changed: 59 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,59 @@
1+
package main
2+
3+
import (
4+
"io"
5+
"log/slog"
6+
"net"
7+
"testing"
8+
"time"
9+
10+
"github.com/tma/mbproxy/internal/config"
11+
)
12+
13+
func newTestLogger() *slog.Logger {
14+
return slog.New(slog.NewTextHandler(io.Discard, nil))
15+
}
16+
17+
func TestCheckUpstreamHealth_Success(t *testing.T) {
18+
ln, err := net.Listen("tcp", "127.0.0.1:0")
19+
if err != nil {
20+
t.Fatalf("failed to listen: %v", err)
21+
}
22+
defer ln.Close()
23+
24+
acceptDone := make(chan struct{})
25+
go func() {
26+
defer close(acceptDone)
27+
conn, err := ln.Accept()
28+
if err == nil {
29+
conn.Close()
30+
}
31+
}()
32+
33+
cfg := &config.Config{
34+
Upstream: ln.Addr().String(),
35+
Timeout: time.Second,
36+
}
37+
if err := checkUpstreamHealth(cfg, newTestLogger()); err != nil {
38+
t.Fatalf("expected health check to succeed, got %v", err)
39+
}
40+
41+
<-acceptDone
42+
}
43+
44+
func TestCheckUpstreamHealth_Failure(t *testing.T) {
45+
ln, err := net.Listen("tcp", "127.0.0.1:0")
46+
if err != nil {
47+
t.Fatalf("failed to reserve port: %v", err)
48+
}
49+
addr := ln.Addr().String()
50+
ln.Close()
51+
52+
cfg := &config.Config{
53+
Upstream: addr,
54+
Timeout: 100 * time.Millisecond,
55+
}
56+
if err := checkUpstreamHealth(cfg, newTestLogger()); err == nil {
57+
t.Fatal("expected health check to fail")
58+
}
59+
}

internal/config/config.go

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -30,7 +30,6 @@ type Config struct {
3030
RequestDelay time.Duration
3131
ConnectDelay time.Duration
3232
ShutdownTimeout time.Duration
33-
HealthListen string
3433
LogLevel string
3534
}
3635

@@ -47,7 +46,6 @@ func Load() (*Config, error) {
4746
RequestDelay: 0,
4847
ConnectDelay: 0,
4948
ShutdownTimeout: 30 * time.Second,
50-
HealthListen: GetEnv("HEALTH_LISTEN", ":8080"),
5149
LogLevel: GetEnv("LOG_LEVEL", "INFO"),
5250
}
5351

internal/config/config_test.go

Lines changed: 0 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,6 @@ func TestLoad_Defaults(t *testing.T) {
1919
os.Unsetenv("MODBUS_READONLY")
2020
os.Unsetenv("MODBUS_TIMEOUT")
2121
os.Unsetenv("MODBUS_SHUTDOWN_TIMEOUT")
22-
os.Unsetenv("HEALTH_LISTEN")
2322
os.Unsetenv("LOG_LEVEL")
2423

2524
cfg, err := Load()
@@ -57,9 +56,6 @@ func TestLoad_Defaults(t *testing.T) {
5756
if cfg.ShutdownTimeout != 30*time.Second {
5857
t.Errorf("expected 30s shutdown timeout, got %v", cfg.ShutdownTimeout)
5958
}
60-
if cfg.HealthListen != ":8080" {
61-
t.Errorf("expected :8080, got %s", cfg.HealthListen)
62-
}
6359
if cfg.LogLevel != "INFO" {
6460
t.Errorf("expected INFO log level, got %s", cfg.LogLevel)
6561
}
@@ -85,7 +81,6 @@ func TestLoad_CustomValues(t *testing.T) {
8581
os.Setenv("MODBUS_REQUEST_DELAY", "100ms")
8682
os.Setenv("MODBUS_CONNECT_DELAY", "200ms")
8783
os.Setenv("MODBUS_SHUTDOWN_TIMEOUT", "60s")
88-
os.Setenv("HEALTH_LISTEN", ":9090")
8984
os.Setenv("LOG_LEVEL", "DEBUG")
9085

9186
defer func() {
@@ -99,7 +94,6 @@ func TestLoad_CustomValues(t *testing.T) {
9994
os.Unsetenv("MODBUS_REQUEST_DELAY")
10095
os.Unsetenv("MODBUS_CONNECT_DELAY")
10196
os.Unsetenv("MODBUS_SHUTDOWN_TIMEOUT")
102-
os.Unsetenv("HEALTH_LISTEN")
10397
os.Unsetenv("LOG_LEVEL")
10498
}()
10599

@@ -135,9 +129,6 @@ func TestLoad_CustomValues(t *testing.T) {
135129
if cfg.ShutdownTimeout != 60*time.Second {
136130
t.Errorf("expected 60s shutdown timeout, got %v", cfg.ShutdownTimeout)
137131
}
138-
if cfg.HealthListen != ":9090" {
139-
t.Errorf("expected :9090, got %s", cfg.HealthListen)
140-
}
141132
if cfg.LogLevel != "DEBUG" {
142133
t.Errorf("expected DEBUG log level, got %s", cfg.LogLevel)
143134
}

0 commit comments

Comments
 (0)