Files
drover-go/internal/gui/app.go
T
root 168596bcb5
Build / test (push) Failing after 33s
Build / build-windows (push) Has been skipped
Release / release (push) Failing after 3m22s
sboxrun: domain+IP-CIDR rules + remove voice-quality test
Three follow-up fixes after the WinDivert→sing-box pivot:

1. Discord updater now routes through upstream. Previously only the
   process-name rule matched, but sing-box's TUN-side process
   detection on Windows mis-attributes the in-process Rust updater's
   TLS connection to e.g. steam.exe — the connection went direct and
   hit RKN block. Adding domain_suffix + ip_cidr rules for Cloudflare
   (162.159/16, 104.16/13, 172.64/13) and Fastly (199.232/16,
   151.101/16) catches updates.discord.com regardless of which PID
   the kernel claims sent it. Verified via curl through mihomo:
   updates.discord.com responds 400 in 393ms (i.e. TLS handshake
   succeeds, only the path is wrong — proves the routing reaches it).

2. DiscordSystemHelper.exe added to TargetProcs alongside Update.exe
   (modern Discord builds use it for elevated updates).

3. UDP voice quality test removed from the checker. The STUN-via-
   relay burst measured private mihomo BND.ADDR (192.168.1.132)
   which is unroutable from external clients, so the test reported
   100% loss every time despite voice actually working through
   sing-box's TUN+SOCKS5. The remaining 6 checks (TCP/greet/auth/
   connect/UDP/api) cover what's actionable; voice quality is
   verified empirically by joining a Discord call.

Co-Authored-By: Claude Opus 4.7 (1M context) <noreply@anthropic.com>
2026-05-01 23:21:50 +03:00

279 lines
8.7 KiB
Go

// Package gui hosts the Wails app: the App struct (whose exported methods
// become the JS API for the frontend) and the Run() helper invoked from
// cmd/drover/main.go when the user double-clicks the binary.
package gui
import (
"context"
"fmt"
"log"
"math/rand"
"sync"
"time"
"git.okcu.io/root/drover-go/internal/checker"
"git.okcu.io/root/drover-go/internal/sboxrun"
"github.com/wailsapp/wails/v2/pkg/runtime"
)
// App is the Wails-bound struct. Every exported method is callable from JS
// via the auto-generated wailsjs/go/main/App.* bindings.
//
// Right now everything except the proxy form is a deterministic stub —
// the real WinDivert + SOCKS5 engine arrives in Phase 1. The stubs are
// sufficient for the UI to feel alive: Check fakes a 7-step diagnostic,
// Start/Stop toggles a phase, GetStats emits realistic-looking numbers.
type App struct {
ctx context.Context
version string
mu sync.Mutex
eng *sboxrun.Engine
startedAt time.Time
// muCheck guards cancelCheck and checkDone.
// cancelCheck is the cancel func of the in-flight checker.Run context (nil
// when no check is running). checkDone is closed by the runner goroutine
// once it has drained the result channel — RunCheck waits on it before
// starting a new run, so we never have two emitter goroutines alive.
muCheck sync.Mutex
cancelCheck context.CancelFunc
checkDone chan struct{}
}
// NewApp returns a fresh App stamped with the binary's build version
// (so the GUI can display it in the title bar).
func NewApp(version string) *App { return &App{version: version} }
// Version returns the build version (e.g. "0.2.0", "test-local", or
// "dev"). Frontend reads it on mount to populate the custom title bar.
func (a *App) Version() string { return a.version }
// Startup is called by Wails right after the window is created and the
// JS runtime is ready. We grab the context for runtime.EventsEmit calls
// from any subsequent method.
func (a *App) Startup(ctx context.Context) {
a.ctx = ctx
go a.statsLoop()
}
// Config is the proxy/auth payload the frontend sends back from the form.
type Config struct {
Host string `json:"host"`
Port int `json:"port"`
Auth bool `json:"auth"`
Login string `json:"login"`
Password string `json:"password"`
}
// CheckResult is one row in the diagnostic table; the frontend listens
// for them on the "check:result" event. Mirrors checker.Result but with
// Duration converted to milliseconds (int) for the JS side.
type CheckResult struct {
ID string `json:"id"` // tcp / greet / auth / connect / udp / voice-quality / api
Status string `json:"status"` // running | passed | warn | failed | skipped
Metric string `json:"metric,omitempty"`
Error string `json:"error,omitempty"`
Hint string `json:"hint,omitempty"`
RawHex string `json:"rawHex,omitempty"`
Duration int64 `json:"duration_ms,omitempty"`
Attempt int `json:"attempt,omitempty"`
}
// RunCheck runs a real 7-step SOCKS5 diagnostic via internal/checker. Each
// Result from the checker channel is forwarded to the frontend as a
// "check:result" event; when the channel closes (run finished, or context
// cancelled) we emit "check:done" with the {total, passed, failed} summary.
//
// If a previous check is still in flight, its context is cancelled and we
// wait for the previous goroutine to finish before launching the new one
// — this guarantees event ordering (no two emitters alive simultaneously).
func (a *App) RunCheck(cfg Config) {
// Cancel any in-flight check and wait for its goroutine to drain.
a.muCheck.Lock()
prevCancel := a.cancelCheck
prevDone := a.checkDone
a.muCheck.Unlock()
if prevCancel != nil {
prevCancel()
}
if prevDone != nil {
<-prevDone
}
ctx, cancel := context.WithCancel(a.ctx)
done := make(chan struct{})
a.muCheck.Lock()
a.cancelCheck = cancel
a.checkDone = done
a.muCheck.Unlock()
ckCfg := checker.Config{
ProxyHost: cfg.Host,
ProxyPort: cfg.Port,
UseAuth: cfg.Auth,
ProxyLogin: cfg.Login,
ProxyPassword: cfg.Password,
// Leave PerTestTimeout / MaxRetries / RetryBackoff /
// DiscordGateway / DiscordAPI / StunServer at zero so the
// checker package applies its own defaults.
}
go func() {
defer close(done)
var passed, failed int
for r := range checker.Run(ctx, ckCfg) {
// Always emit on a.ctx, never on the per-check ctx — the
// per-check ctx may already be cancelled when the final
// "cancelled" result arrives, which would silently drop it.
runtime.EventsEmit(a.ctx, "check:result", CheckResult{
ID: r.ID,
Status: string(r.Status),
Metric: r.Metric,
Error: r.Error,
Hint: r.Hint,
RawHex: r.RawHex,
Duration: r.Duration.Milliseconds(),
Attempt: r.Attempt,
})
switch r.Status {
case checker.StatusPassed, checker.StatusWarn:
// Warn is a "soft pass" — counted as passed for the
// final summary, but the row still surfaces the hint.
passed++
case checker.StatusFailed:
failed++
}
}
runtime.EventsEmit(a.ctx, "check:done", map[string]int{
"total": passed + failed,
"passed": passed,
"failed": failed,
})
// Clear cancel/done if we're still the current run (RunCheck may
// have already replaced them with a newer run by the time we get
// here, in which case leave those alone).
a.muCheck.Lock()
if a.checkDone == done {
a.cancelCheck = nil
a.checkDone = nil
}
a.muCheck.Unlock()
}()
}
// CancelCheck cancels the currently-running diagnostic, if any. Safe to
// call when no check is running (no-op).
func (a *App) CancelCheck() {
a.muCheck.Lock()
defer a.muCheck.Unlock()
if a.cancelCheck != nil {
a.cancelCheck()
}
}
// StartEngine initializes and brings up the engine with the given config.
func (a *App) StartEngine(cfg Config) error {
log.Printf("gui: StartEngine called host=%s port=%d auth=%v", cfg.Host, cfg.Port, cfg.Auth)
a.mu.Lock()
defer a.mu.Unlock()
if a.eng != nil && a.eng.Status() == sboxrun.StatusActive {
log.Printf("gui: StartEngine no-op (already active)")
return nil
}
e, err := sboxrun.New(sboxrun.Config{
ProxyHost: cfg.Host,
ProxyPort: cfg.Port,
UseAuth: cfg.Auth,
Login: cfg.Login,
Password: cfg.Password,
TargetProcs: []string{
"Discord.exe",
"DiscordCanary.exe",
"DiscordPTB.exe",
"DiscordSystemHelper.exe", // elevated updater (modern builds)
"Update.exe", // legacy Squirrel updater (older builds)
},
})
if err != nil {
log.Printf("gui: sboxrun.New failed: %v", err)
runtime.EventsEmit(a.ctx, "engine:status", map[string]any{"running": false, "error": err.Error()})
return err
}
if err := e.Start(a.ctx); err != nil {
log.Printf("gui: sboxrun.Start failed: %v", err)
runtime.EventsEmit(a.ctx, "engine:status", map[string]any{"running": false, "error": err.Error()})
return err
}
a.eng = e
a.startedAt = time.Now()
log.Printf("gui: engine started, status=%s", e.Status())
runtime.EventsEmit(a.ctx, "engine:status", map[string]any{"running": true})
return nil
}
// StopEngine shuts down the engine.
func (a *App) StopEngine() error {
a.mu.Lock()
defer a.mu.Unlock()
if a.eng == nil {
return nil
}
err := a.eng.Stop()
a.eng = nil
runtime.EventsEmit(a.ctx, "engine:status", map[string]any{"running": false})
return err
}
// GetStatus returns the current engine state and uptime.
func (a *App) GetStatus() map[string]any {
a.mu.Lock()
defer a.mu.Unlock()
running := a.eng != nil && a.eng.Status() == sboxrun.StatusActive
res := map[string]any{
"running": running,
"uptimeS": int(time.Since(a.startedAt).Seconds()),
}
if a.eng != nil {
res["state"] = string(a.eng.Status())
if err := a.eng.LastError(); err != nil {
res["error"] = err.Error()
}
}
return res
}
// statsLoop emits a stats event every second when the engine is active.
// Numbers are random but stable enough to look real. P2.4 will replace
// with real counters from engine.Engine.
func (a *App) statsLoop() {
r := rand.New(rand.NewSource(time.Now().UnixNano()))
tick := time.NewTicker(time.Second)
defer tick.Stop()
for range tick.C {
a.mu.Lock()
if a.eng == nil || a.eng.Status() != sboxrun.StatusActive || a.ctx == nil {
a.mu.Unlock()
continue
}
uptime := int(time.Since(a.startedAt).Seconds())
a.mu.Unlock()
runtime.EventsEmit(a.ctx, "stats:update", map[string]any{
"up": r.Intn(50_000) + 5_000, // bytes/sec out
"down": r.Intn(500_000) + 50_000, // bytes/sec in
"tcp": r.Intn(8) + 1,
"udp": 0, // P2.1 scope: no UDP yet
"uptimeS": uptime,
})
}
}
// Greet remains as a smoke check that the bindings pipeline survived
// the transition. Frontend can call it from a debug button if needed.
func (a *App) Greet(name string) string {
return fmt.Sprintf("Hello %s — Drover-Go GUI is alive.", name)
}