pivot: replace WinDivert engine with embedded sing-box + wintun
After 5+ hours of WinDivert NETWORK-layer NAT-rewrite debugging
(streamdump pattern, SOCKET-layer SYN preemption, lazy PID resolution,
UDP ASSOCIATE relay + manual reinject), Discord voice still wouldn't
connect. The fundamental issue is that WinDivert reinjected UDP
packets don't always reach connect()-bound application sockets — the
demux happens at a layer above the reinject point.
dvp/force-proxy avoids this entirely via DLL injection (above the
kernel demux). We avoid it the other way: embed sing-box, let it run
TUN inbound + per-process routing rule + SOCKS5 outbound. TUN packets
are read by sing-box from kernel as a normal flow; the application
socket sees a normal flow back. No reinject hairpin, no SYN race, no
spoofing concerns.
What this commit does:
- Drops internal/divert, internal/engine, internal/redirect,
internal/socks5, internal/procscan, plus cmd/drover/{proxy,
debugflow}_*.go subcommands (all WinDivert-only).
- Adds internal/sboxrun — embed sing-box.exe (1.12.25) + wintun.dll
(0.14.1) via //go:embed, install to %PROGRAMDATA%\Drover\sboxrun\
with SHA256 verify, generate JSON config from form, spawn as
subprocess, manage lifecycle.
- Wires sboxrun into internal/gui/app.go: StartEngine/StopEngine
now call sboxrun.Engine instead of windivert engine.
- Fixes Wails binding: StartEngine(cfg) now passes the form config
(was zero-arg, hit ProxyHost-required validation silently).
Manual test: Discord chat + voice work end-to-end through mihomo
upstream. Yandex Music / svchost / etc continue direct via
process_name routing rule.
Binary grew from 12 MB → 49 MB (37 MB sing-box embedded), but ships
fully self-contained. AV-friendly: wintun is Microsoft-signed, no
DLL injection.
WinDivert work preserved on experimental/windivert branch in case we
ever want to come back to that path.
Co-Authored-By: Claude Opus 4.7 (1M context) <noreply@anthropic.com>
This commit is contained in:
@@ -1,12 +0,0 @@
|
||||
//go:build !windows
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
func runDebugFlow(_ context.Context) error {
|
||||
return fmt.Errorf("debug-flow requires Windows")
|
||||
}
|
||||
@@ -1,64 +0,0 @@
|
||||
//go:build windows
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"log"
|
||||
"time"
|
||||
|
||||
"git.okcu.io/root/drover-go/internal/divert"
|
||||
)
|
||||
|
||||
// runDebugFlow opens a WinDivert FLOW handle with the broadest possible
|
||||
// filter ("tcp") and logs every flow-establish/delete event for up to
|
||||
// 30 seconds. This is the simplest possible test that the FLOW layer
|
||||
// is delivering events to our handle.
|
||||
//
|
||||
// If we see events here but our process-targeted handle in `proxy`
|
||||
// stays silent, the bug is in our processId filter clause. If we see
|
||||
// nothing here, the FLOW layer is broken on this machine.
|
||||
func runDebugFlow(parent context.Context) error {
|
||||
if _, err := divert.InstallDriver(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(parent, 30*time.Second)
|
||||
defer cancel()
|
||||
|
||||
log.Printf("debug-flow: opening FLOW handle with filter \"true\" (capture all flows)")
|
||||
h, err := divert.OpenFlow("true")
|
||||
if err != nil {
|
||||
log.Printf("debug-flow: OpenFlow failed: %v", err)
|
||||
return err
|
||||
}
|
||||
defer h.Close()
|
||||
log.Printf("debug-flow: handle open, listening for 30s")
|
||||
|
||||
go func() {
|
||||
<-ctx.Done()
|
||||
_ = h.Close() // unblock RecvFlow
|
||||
}()
|
||||
|
||||
count := 0
|
||||
for {
|
||||
ev, err := h.RecvFlow()
|
||||
if err != nil {
|
||||
if ctx.Err() != nil {
|
||||
log.Printf("debug-flow: done — captured %d events in 30s", count)
|
||||
return nil
|
||||
}
|
||||
log.Printf("debug-flow: RecvFlow err: %v", err)
|
||||
return err
|
||||
}
|
||||
count++
|
||||
log.Printf("debug-flow: event #%d est=%v pid=%d proto=%d %v:%d → %v:%d rawLocal=%x rawRemote=%x",
|
||||
count, ev.Established, ev.ProcessID, ev.Protocol,
|
||||
ev.SrcAddr, ev.SrcPort, ev.DstAddr, ev.DstPort,
|
||||
ev.LocalRaw, ev.RemoteRaw)
|
||||
if count >= 20 {
|
||||
log.Printf("debug-flow: hit 20-event cap, stopping")
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -127,52 +127,10 @@ func newRootCmd() *cobra.Command {
|
||||
root.AddCommand(newUpdateCmd())
|
||||
root.AddCommand(newServiceCmd())
|
||||
root.AddCommand(newGUICmd())
|
||||
root.AddCommand(newProxyCmd())
|
||||
root.AddCommand(newDebugFlowCmd())
|
||||
|
||||
return root
|
||||
}
|
||||
|
||||
// newDebugFlowCmd opens a WinDivert FLOW handle with filter "tcp"
|
||||
// (capture all TCP flow events from any process) and logs every event
|
||||
// for 30 seconds. Useful to verify the FLOW layer is working at all
|
||||
// without process-targeting interference.
|
||||
func newDebugFlowCmd() *cobra.Command {
|
||||
return &cobra.Command{
|
||||
Use: "debug-flow",
|
||||
Short: "[debug] open broad FLOW handle, log events for 30s",
|
||||
Hidden: true,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
return runDebugFlow(cmd.Context())
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// newProxyCmd is the headless engine-only mode: no Wails, no tray —
|
||||
// just spin up the WinDivert + SOCKS5 pipeline against the configured
|
||||
// upstream and block on Ctrl+C. Useful for debugging without the GUI
|
||||
// stack in the way; everything still goes to %LOCALAPPDATA%\Drover\debug.log.
|
||||
func newProxyCmd() *cobra.Command {
|
||||
var host, login, password string
|
||||
var port int
|
||||
var auth bool
|
||||
cmd := &cobra.Command{
|
||||
Use: "proxy",
|
||||
Short: "Run the WinDivert+SOCKS5 engine in headless mode (no GUI, blocks until Ctrl+C)",
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
return runProxy(cmd.Context(), host, port, auth, login, password)
|
||||
},
|
||||
}
|
||||
cmd.Flags().StringVar(&host, "host", "", "upstream SOCKS5 host (required)")
|
||||
cmd.Flags().IntVar(&port, "port", 0, "upstream SOCKS5 port (required)")
|
||||
cmd.Flags().BoolVar(&auth, "auth", false, "enable user/pass auth")
|
||||
cmd.Flags().StringVar(&login, "login", "", "SOCKS5 login (when --auth)")
|
||||
cmd.Flags().StringVar(&password, "password", "", "SOCKS5 password (when --auth)")
|
||||
_ = cmd.MarkFlagRequired("host")
|
||||
_ = cmd.MarkFlagRequired("port")
|
||||
return cmd
|
||||
}
|
||||
|
||||
func newGUICmd() *cobra.Command {
|
||||
return &cobra.Command{
|
||||
Use: "gui",
|
||||
|
||||
@@ -1,14 +0,0 @@
|
||||
//go:build !windows
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// runProxy stub for non-Windows builds (drover only ships for Windows;
|
||||
// this stub keeps `go build ./...` clean on Linux dev/CI machines).
|
||||
func runProxy(_ context.Context, _ string, _ int, _ bool, _, _ string) error {
|
||||
return fmt.Errorf("the proxy subcommand requires Windows (WinDivert is Windows-only)")
|
||||
}
|
||||
@@ -1,80 +0,0 @@
|
||||
//go:build windows
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"os/signal"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"git.okcu.io/root/drover-go/internal/engine"
|
||||
)
|
||||
|
||||
// runProxy is the body of the `drover proxy` subcommand. It builds an
|
||||
// engine.Engine from the supplied flags, calls Start, and blocks until
|
||||
// the process receives SIGINT (Ctrl+C) or SIGTERM. On signal, it
|
||||
// gracefully Stops the engine and exits.
|
||||
//
|
||||
// All output is mirrored to stderr (visible when launched from a
|
||||
// console session) AND %LOCALAPPDATA%\Drover\debug.log. setupDebugLog
|
||||
// in main.go has already wired the log package to write to both.
|
||||
func runProxy(parent context.Context, host string, port int, auth bool, login, password string) error {
|
||||
if host == "" || port == 0 {
|
||||
return fmt.Errorf("--host and --port are required")
|
||||
}
|
||||
|
||||
ctx, cancel := signal.NotifyContext(parent, os.Interrupt, syscall.SIGTERM)
|
||||
defer cancel()
|
||||
|
||||
cfg := engine.Config{
|
||||
ProxyAddr: fmt.Sprintf("%s:%d", host, port),
|
||||
UseAuth: auth,
|
||||
Login: login,
|
||||
Password: password,
|
||||
Targets: []string{"Discord.exe", "DiscordCanary.exe", "DiscordPTB.exe", "Update.exe"},
|
||||
}
|
||||
|
||||
log.Printf("proxy: building engine (proxy=%s auth=%v targets=%v)", cfg.ProxyAddr, cfg.UseAuth, cfg.Targets)
|
||||
e, err := engine.New(cfg)
|
||||
if err != nil {
|
||||
return fmt.Errorf("engine.New: %w", err)
|
||||
}
|
||||
startCtx, startCancel := context.WithTimeout(ctx, 15*time.Second)
|
||||
defer startCancel()
|
||||
if err := e.Start(startCtx); err != nil {
|
||||
log.Printf("proxy: Start failed: %v", err)
|
||||
return fmt.Errorf("engine.Start: %w", err)
|
||||
}
|
||||
log.Printf("proxy: engine status=%s — press Ctrl+C to stop", e.Status())
|
||||
|
||||
// Periodic status ping so the user sees the engine is alive.
|
||||
statusTk := time.NewTicker(10 * time.Second)
|
||||
defer statusTk.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
log.Printf("proxy: signal received, shutting down")
|
||||
if err := e.Stop(); err != nil {
|
||||
log.Printf("proxy: Stop returned: %v", err)
|
||||
}
|
||||
log.Printf("proxy: bye")
|
||||
return nil
|
||||
case <-statusTk.C:
|
||||
if le := e.LastError(); le != nil {
|
||||
log.Printf("proxy: heartbeat status=%s lastErr=%v", e.Status(), le)
|
||||
} else {
|
||||
log.Printf("proxy: heartbeat status=%s", e.Status())
|
||||
}
|
||||
if e.Status() == engine.StatusFailed {
|
||||
log.Printf("proxy: engine entered Failed state, exiting")
|
||||
_ = e.Stop()
|
||||
return fmt.Errorf("engine failed: %v", e.LastError())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user