4074e68715
WIP snapshot before pivot to sing-box+TUN. Reached: - TCP redirect via streamdump pattern (swap+Outbound=0+reinject) - SOCKET layer for SYN-stage flow detection (avoids FLOW Establish-too-late race) - Lazy PID→name resolution (catches Update.exe inside procscan tick) - UDP forward via SOCKS5 UDP ASSOCIATE relay + manual reinject - Result: chat works, voice times out (Discord IP discovery / RTC handshake fails) Reason for pivot: WinDivert NAT-reinject pattern has subtle layer-3 semantics issues that DLL-injection / TUN-based proxies sidestep entirely. Going with embedded sing-box + wintun as the engine — proven path for Discord voice through SOCKS5. Co-Authored-By: Claude Opus 4.7 (1M context) <noreply@anthropic.com>
272 lines
8.6 KiB
Go
272 lines
8.6 KiB
Go
// Package gui hosts the Wails app: the App struct (whose exported methods
|
|
// become the JS API for the frontend) and the Run() helper invoked from
|
|
// cmd/drover/main.go when the user double-clicks the binary.
|
|
package gui
|
|
|
|
import (
|
|
"context"
|
|
"fmt"
|
|
"log"
|
|
"math/rand"
|
|
"sync"
|
|
"time"
|
|
|
|
"git.okcu.io/root/drover-go/internal/checker"
|
|
"git.okcu.io/root/drover-go/internal/engine"
|
|
"github.com/wailsapp/wails/v2/pkg/runtime"
|
|
)
|
|
|
|
// App is the Wails-bound struct. Every exported method is callable from JS
|
|
// via the auto-generated wailsjs/go/main/App.* bindings.
|
|
//
|
|
// Right now everything except the proxy form is a deterministic stub —
|
|
// the real WinDivert + SOCKS5 engine arrives in Phase 1. The stubs are
|
|
// sufficient for the UI to feel alive: Check fakes a 7-step diagnostic,
|
|
// Start/Stop toggles a phase, GetStats emits realistic-looking numbers.
|
|
type App struct {
|
|
ctx context.Context
|
|
version string
|
|
|
|
mu sync.Mutex
|
|
eng *engine.Engine
|
|
startedAt time.Time
|
|
|
|
// muCheck guards cancelCheck and checkDone.
|
|
// cancelCheck is the cancel func of the in-flight checker.Run context (nil
|
|
// when no check is running). checkDone is closed by the runner goroutine
|
|
// once it has drained the result channel — RunCheck waits on it before
|
|
// starting a new run, so we never have two emitter goroutines alive.
|
|
muCheck sync.Mutex
|
|
cancelCheck context.CancelFunc
|
|
checkDone chan struct{}
|
|
}
|
|
|
|
// NewApp returns a fresh App stamped with the binary's build version
|
|
// (so the GUI can display it in the title bar).
|
|
func NewApp(version string) *App { return &App{version: version} }
|
|
|
|
// Version returns the build version (e.g. "0.2.0", "test-local", or
|
|
// "dev"). Frontend reads it on mount to populate the custom title bar.
|
|
func (a *App) Version() string { return a.version }
|
|
|
|
// Startup is called by Wails right after the window is created and the
|
|
// JS runtime is ready. We grab the context for runtime.EventsEmit calls
|
|
// from any subsequent method.
|
|
func (a *App) Startup(ctx context.Context) {
|
|
a.ctx = ctx
|
|
go a.statsLoop()
|
|
}
|
|
|
|
// Config is the proxy/auth payload the frontend sends back from the form.
|
|
type Config struct {
|
|
Host string `json:"host"`
|
|
Port int `json:"port"`
|
|
Auth bool `json:"auth"`
|
|
Login string `json:"login"`
|
|
Password string `json:"password"`
|
|
}
|
|
|
|
// CheckResult is one row in the diagnostic table; the frontend listens
|
|
// for them on the "check:result" event. Mirrors checker.Result but with
|
|
// Duration converted to milliseconds (int) for the JS side.
|
|
type CheckResult struct {
|
|
ID string `json:"id"` // tcp / greet / auth / connect / udp / voice-quality / api
|
|
Status string `json:"status"` // running | passed | warn | failed | skipped
|
|
Metric string `json:"metric,omitempty"`
|
|
Error string `json:"error,omitempty"`
|
|
Hint string `json:"hint,omitempty"`
|
|
RawHex string `json:"rawHex,omitempty"`
|
|
Duration int64 `json:"duration_ms,omitempty"`
|
|
Attempt int `json:"attempt,omitempty"`
|
|
}
|
|
|
|
// RunCheck runs a real 7-step SOCKS5 diagnostic via internal/checker. Each
|
|
// Result from the checker channel is forwarded to the frontend as a
|
|
// "check:result" event; when the channel closes (run finished, or context
|
|
// cancelled) we emit "check:done" with the {total, passed, failed} summary.
|
|
//
|
|
// If a previous check is still in flight, its context is cancelled and we
|
|
// wait for the previous goroutine to finish before launching the new one
|
|
// — this guarantees event ordering (no two emitters alive simultaneously).
|
|
func (a *App) RunCheck(cfg Config) {
|
|
// Cancel any in-flight check and wait for its goroutine to drain.
|
|
a.muCheck.Lock()
|
|
prevCancel := a.cancelCheck
|
|
prevDone := a.checkDone
|
|
a.muCheck.Unlock()
|
|
if prevCancel != nil {
|
|
prevCancel()
|
|
}
|
|
if prevDone != nil {
|
|
<-prevDone
|
|
}
|
|
|
|
ctx, cancel := context.WithCancel(a.ctx)
|
|
done := make(chan struct{})
|
|
|
|
a.muCheck.Lock()
|
|
a.cancelCheck = cancel
|
|
a.checkDone = done
|
|
a.muCheck.Unlock()
|
|
|
|
ckCfg := checker.Config{
|
|
ProxyHost: cfg.Host,
|
|
ProxyPort: cfg.Port,
|
|
UseAuth: cfg.Auth,
|
|
ProxyLogin: cfg.Login,
|
|
ProxyPassword: cfg.Password,
|
|
// Leave PerTestTimeout / MaxRetries / RetryBackoff /
|
|
// DiscordGateway / DiscordAPI / StunServer at zero so the
|
|
// checker package applies its own defaults.
|
|
}
|
|
|
|
go func() {
|
|
defer close(done)
|
|
var passed, failed int
|
|
for r := range checker.Run(ctx, ckCfg) {
|
|
// Always emit on a.ctx, never on the per-check ctx — the
|
|
// per-check ctx may already be cancelled when the final
|
|
// "cancelled" result arrives, which would silently drop it.
|
|
runtime.EventsEmit(a.ctx, "check:result", CheckResult{
|
|
ID: r.ID,
|
|
Status: string(r.Status),
|
|
Metric: r.Metric,
|
|
Error: r.Error,
|
|
Hint: r.Hint,
|
|
RawHex: r.RawHex,
|
|
Duration: r.Duration.Milliseconds(),
|
|
Attempt: r.Attempt,
|
|
})
|
|
switch r.Status {
|
|
case checker.StatusPassed, checker.StatusWarn:
|
|
// Warn is a "soft pass" — counted as passed for the
|
|
// final summary, but the row still surfaces the hint.
|
|
passed++
|
|
case checker.StatusFailed:
|
|
failed++
|
|
}
|
|
}
|
|
runtime.EventsEmit(a.ctx, "check:done", map[string]int{
|
|
"total": passed + failed,
|
|
"passed": passed,
|
|
"failed": failed,
|
|
})
|
|
|
|
// Clear cancel/done if we're still the current run (RunCheck may
|
|
// have already replaced them with a newer run by the time we get
|
|
// here, in which case leave those alone).
|
|
a.muCheck.Lock()
|
|
if a.checkDone == done {
|
|
a.cancelCheck = nil
|
|
a.checkDone = nil
|
|
}
|
|
a.muCheck.Unlock()
|
|
}()
|
|
}
|
|
|
|
// CancelCheck cancels the currently-running diagnostic, if any. Safe to
|
|
// call when no check is running (no-op).
|
|
func (a *App) CancelCheck() {
|
|
a.muCheck.Lock()
|
|
defer a.muCheck.Unlock()
|
|
if a.cancelCheck != nil {
|
|
a.cancelCheck()
|
|
}
|
|
}
|
|
|
|
// StartEngine initializes and brings up the engine with the given config.
|
|
func (a *App) StartEngine(cfg Config) error {
|
|
log.Printf("gui: StartEngine called host=%s port=%d auth=%v", cfg.Host, cfg.Port, cfg.Auth)
|
|
a.mu.Lock()
|
|
defer a.mu.Unlock()
|
|
if a.eng != nil && a.eng.Status() == engine.StatusActive {
|
|
log.Printf("gui: StartEngine no-op (already active)")
|
|
return nil
|
|
}
|
|
e, err := engine.New(engine.Config{
|
|
ProxyAddr: fmt.Sprintf("%s:%d", cfg.Host, cfg.Port),
|
|
UseAuth: cfg.Auth,
|
|
Login: cfg.Login,
|
|
Password: cfg.Password,
|
|
Targets: []string{"Discord.exe", "DiscordCanary.exe", "DiscordPTB.exe", "Update.exe"},
|
|
})
|
|
if err != nil {
|
|
log.Printf("gui: engine.New failed: %v", err)
|
|
runtime.EventsEmit(a.ctx, "engine:status", map[string]any{"running": false, "error": err.Error()})
|
|
return err
|
|
}
|
|
if err := e.Start(a.ctx); err != nil {
|
|
log.Printf("gui: engine.Start failed: %v", err)
|
|
runtime.EventsEmit(a.ctx, "engine:status", map[string]any{"running": false, "error": err.Error()})
|
|
return err
|
|
}
|
|
a.eng = e
|
|
a.startedAt = time.Now()
|
|
log.Printf("gui: engine started, status=%s", e.Status())
|
|
runtime.EventsEmit(a.ctx, "engine:status", map[string]any{"running": true})
|
|
return nil
|
|
}
|
|
|
|
// StopEngine shuts down the engine.
|
|
func (a *App) StopEngine() error {
|
|
a.mu.Lock()
|
|
defer a.mu.Unlock()
|
|
if a.eng == nil {
|
|
return nil
|
|
}
|
|
err := a.eng.Stop()
|
|
a.eng = nil
|
|
runtime.EventsEmit(a.ctx, "engine:status", map[string]any{"running": false})
|
|
return err
|
|
}
|
|
|
|
// GetStatus returns the current engine state and uptime.
|
|
func (a *App) GetStatus() map[string]any {
|
|
a.mu.Lock()
|
|
defer a.mu.Unlock()
|
|
running := a.eng != nil && a.eng.Status() == engine.StatusActive
|
|
res := map[string]any{
|
|
"running": running,
|
|
"uptimeS": int(time.Since(a.startedAt).Seconds()),
|
|
}
|
|
if a.eng != nil {
|
|
res["state"] = string(a.eng.Status())
|
|
if err := a.eng.LastError(); err != nil {
|
|
res["error"] = err.Error()
|
|
}
|
|
}
|
|
return res
|
|
}
|
|
|
|
// statsLoop emits a stats event every second when the engine is active.
|
|
// Numbers are random but stable enough to look real. P2.4 will replace
|
|
// with real counters from engine.Engine.
|
|
func (a *App) statsLoop() {
|
|
r := rand.New(rand.NewSource(time.Now().UnixNano()))
|
|
tick := time.NewTicker(time.Second)
|
|
defer tick.Stop()
|
|
for range tick.C {
|
|
a.mu.Lock()
|
|
if a.eng == nil || a.eng.Status() != engine.StatusActive || a.ctx == nil {
|
|
a.mu.Unlock()
|
|
continue
|
|
}
|
|
uptime := int(time.Since(a.startedAt).Seconds())
|
|
a.mu.Unlock()
|
|
|
|
runtime.EventsEmit(a.ctx, "stats:update", map[string]any{
|
|
"up": r.Intn(50_000) + 5_000, // bytes/sec out
|
|
"down": r.Intn(500_000) + 50_000, // bytes/sec in
|
|
"tcp": r.Intn(8) + 1,
|
|
"udp": 0, // P2.1 scope: no UDP yet
|
|
"uptimeS": uptime,
|
|
})
|
|
}
|
|
}
|
|
|
|
// Greet remains as a smoke check that the bindings pipeline survived
|
|
// the transition. Frontend can call it from a debug button if needed.
|
|
func (a *App) Greet(name string) string {
|
|
return fmt.Sprintf("Hello %s — Drover-Go GUI is alive.", name)
|
|
}
|