internal/checker: 7-step Run orchestrator + integration tests
Build / test (push) Failing after 29s
Build / build-windows (push) Has been skipped

Public Run(ctx, cfg) <-chan Result streams diagnostic events for the seven
tests (tcp, greet, auth?, connect, udp, stun, api) wired through the
SOCKS5 primitives, STUN codec, retry classification and RU hints.

- Per-test attempt loop with running/passed/failed events, transient-only
  retries (per-attempt timeout treated as transient, parent ctx cancel as
  permanent), context-aware backoff sleep.
- Connection lifecycle: tcpConn shared across greet/auth/connect (closed
  and redialed on retry); separate udpConn2 control channel for UDP
  ASSOCIATE kept alive for the duration of the stun test.
- STUN-via-SOCKS5: builds 10-byte SOCKS5 UDP header + STUN binding
  request, decodes reply with ATYP-aware header strip (1/3/4).
- runAPI plugs SOCKS5 dial into http.Transport.DialContext; passes on
  HTTP 200 OR 401.
- Skip semantics: dependency-failed tests emit single skipped result;
  cancellation latches and propagates as cancelled-failed (current) +
  cancelled-skipped (remaining).
- Defaults applied to a copy of cfg; UseAuth=false suppresses any "auth"
  result entirely.

Tests: 10 TestRun_* covering happy/auth-rejected/all-rejected/
connect-refused/udp-unsupported/timeout-then-ok/cancelled-mid-flight/
defaults plus extractRawHex unit. Fake SOCKS5 proxy + UDP relay echoing
synthetic STUN binding success responses; httptest stub for API splice.

Combined coverage 84.3% (>=80% target). go test -race clean.

Co-Authored-By: Claude Opus 4.7 (1M context) <noreply@anthropic.com>
This commit is contained in:
2026-05-01 16:08:36 +03:00
parent acd5291604
commit 4b985bb7f0
2 changed files with 1620 additions and 0 deletions
+739
View File
@@ -0,0 +1,739 @@
package checker
import (
"context"
"crypto/tls"
"encoding/binary"
"errors"
"fmt"
"net"
"net/http"
"regexp"
"strconv"
"time"
)
// Status represents the lifecycle state of a single test.
type Status string
// Result statuses emitted on the channel.
const (
StatusRunning Status = "running"
StatusPassed Status = "passed"
StatusFailed Status = "failed"
StatusSkipped Status = "skipped"
)
// Result is one event in the diagnostic stream. Multiple Results may be
// emitted per test (one per attempt: running + passed/failed; on retry,
// running again then passed/failed).
type Result struct {
ID string `json:"id"`
Status Status `json:"status"`
Metric string `json:"metric,omitempty"`
Error string `json:"error,omitempty"`
Hint string `json:"hint,omitempty"`
RawHex string `json:"raw_hex,omitempty"`
Duration time.Duration `json:"duration_ms"`
Attempt int `json:"attempt"`
}
// Config drives Run. Zero-value fields receive defaults via applyDefaults.
type Config struct {
ProxyHost string
ProxyPort int
UseAuth bool
ProxyLogin string
ProxyPassword string
PerTestTimeout time.Duration
MaxRetries int
RetryBackoff time.Duration
DiscordGateway string
DiscordAPI string
StunServer string
}
// applyDefaults returns a copy of cfg with zero-valued knobs filled in.
func applyDefaults(cfg Config) Config {
if cfg.PerTestTimeout <= 0 {
cfg.PerTestTimeout = 5 * time.Second
}
if cfg.MaxRetries < 0 {
cfg.MaxRetries = 0
}
if cfg.MaxRetries == 0 {
// Distinguish "explicit 0" from "unset" — spec says default is 1.
// applyDefaults runs on a copy of the caller's Config; we treat
// a literal zero as "use default" so a fresh `Config{}` works.
cfg.MaxRetries = 1
}
if cfg.RetryBackoff < 0 {
cfg.RetryBackoff = 500 * time.Millisecond
}
if cfg.RetryBackoff == 0 {
cfg.RetryBackoff = 500 * time.Millisecond
}
if cfg.DiscordGateway == "" {
cfg.DiscordGateway = "gateway.discord.gg:443"
}
if cfg.DiscordAPI == "" {
cfg.DiscordAPI = "https://discord.com/api/v9/gateway"
}
if cfg.StunServer == "" {
cfg.StunServer = "stun.l.google.com:19302"
}
return cfg
}
// Run executes the 7-step diagnostic and streams Results on the returned
// channel. The channel is closed when the run finishes (or is cancelled).
//
// Cancel ctx to abort: the in-flight test emits a Failed Result with
// Error="cancelled", and remaining tests each emit a single Skipped Result.
func Run(ctx context.Context, cfg Config) <-chan Result {
cfg = applyDefaults(cfg)
ch := make(chan Result, 16)
go func() {
defer close(ch)
e := &executor{ctx: ctx, cfg: cfg, ch: ch}
defer e.cleanup()
e.runTCP()
e.runGreet()
if cfg.UseAuth {
e.runAuth()
}
e.runConnect()
e.runUDP()
e.runStun()
e.runAPI()
}()
return ch
}
// executor carries shared state across the 7 test methods.
type executor struct {
ctx context.Context
cfg Config
ch chan<- Result
// tcpConn is opened in runTCP and reused by greet/auth/connect.
tcpConn net.Conn
// udpConn2 is the SECOND TCP control channel opened in runUDP.
// Must stay alive until stun finishes — the SOCKS5 spec requires
// the control TCP connection to remain up for the relay to be
// valid.
udpConn2 net.Conn
// udpRelay is the UDP relay endpoint announced by the proxy in
// the UDP ASSOCIATE reply.
udpRelay *net.UDPAddr
// udpClient is our local UDP socket used to talk to the relay.
udpClient net.PacketConn
// Step gating: each xOK is set true on success.
tcpOK, greetOK, authOK, connectOK, udpOK bool
// Cancellation latch. Once any test emits a "cancelled" failure,
// remaining tests emit a single Skipped result with the same reason.
cancelled bool
}
// cleanup closes any state opened during the run.
func (e *executor) cleanup() {
if e.tcpConn != nil {
_ = e.tcpConn.Close()
}
if e.udpConn2 != nil {
_ = e.udpConn2.Close()
}
if e.udpClient != nil {
_ = e.udpClient.Close()
}
}
// emit sends a Result on the channel, respecting ctx so a stalled consumer
// doesn't block us forever.
func (e *executor) emit(r Result) {
select {
case e.ch <- r:
case <-e.ctx.Done():
// Best-effort: try once more so we don't drop user-visible
// information just because cancel raced the send.
select {
case e.ch <- r:
default:
}
}
}
// emitSkipped pushes a single skipped Result with a constant reason.
func (e *executor) emitSkipped(id, reason string) {
e.emit(Result{ID: id, Status: StatusSkipped, Error: reason})
}
// emitCancelled pushes a single failed Result with Error="cancelled".
func (e *executor) emitCancelled(id string, attempt int, dur time.Duration) {
e.cancelled = true
e.emit(Result{
ID: id,
Status: StatusFailed,
Error: "cancelled",
Hint: hintFor(id, context.Canceled),
Attempt: attempt,
Duration: dur,
})
}
// shouldSkip checks high-level guard conditions and emits the appropriate
// pre-test Result if we shouldn't run. Returns true if the caller should
// abort the test.
func (e *executor) shouldSkip(id string, depOK bool) bool {
if e.cancelled {
e.emitSkipped(id, "cancelled")
return true
}
if !depOK {
e.emitSkipped(id, skipReason)
return true
}
if err := e.ctx.Err(); err != nil {
e.emitCancelled(id, 1, 0)
return true
}
return false
}
const skipReason = "depends on previous failed step"
// rawHexRE pulls "...(raw=DEADBEEF)" out of a wrapped error string.
var rawHexRE = regexp.MustCompile(`\(raw=([0-9a-fA-F]+)\)`)
// extractRawHex pulls the hex payload out of our `(raw=XX...)` error
// wrapping convention. Returns "" if absent.
func extractRawHex(s string) string {
m := rawHexRE.FindStringSubmatch(s)
if len(m) == 2 {
return m[1]
}
return ""
}
// runAttempt is the inner loop shared by all tests. It handles emitting
// running/passed/failed results, retry classification and backoff.
//
// run does the actual work for one attempt and returns metric + err.
func (e *executor) runAttempt(id string, run func(ctx context.Context) (string, error)) (ok bool) {
maxAttempts := 1 + e.cfg.MaxRetries
for attempt := 1; attempt <= maxAttempts; attempt++ {
if err := e.ctx.Err(); err != nil {
e.emitCancelled(id, attempt, 0)
return false
}
// Emit running for this attempt.
e.emit(Result{ID: id, Status: StatusRunning, Attempt: attempt})
attemptCtx, cancel := context.WithTimeout(e.ctx, e.cfg.PerTestTimeout)
start := time.Now()
metric, err := run(attemptCtx)
dur := time.Since(start)
cancel()
if err == nil {
e.emit(Result{
ID: id,
Status: StatusPassed,
Metric: metric,
Attempt: attempt,
Duration: dur,
})
return true
}
// Parent-ctx cancelled? Emit cancelled and stop (no retry
// into a cancelled context). We check the PARENT ctx, not
// attemptCtx (which always expires after PerTestTimeout).
if e.ctx.Err() != nil {
e.emitCancelled(id, attempt, dur)
return false
}
// Per-attempt deadline expired (PerTestTimeout fired) —
// treat as a transient timeout. We need to override
// classifyError here because err's chain contains
// context.DeadlineExceeded (joinCtxErr embeds attemptCtx.Err)
// which classifyError treats as permanent. The semantic
// distinction is "our per-test budget vs caller cancel" —
// the former is exactly what retries are for.
var class Classification
if isContextErr(err) {
// Parent ctx is fine (checked above), so this is a
// per-attempt deadline = transient.
class = ClassificationTransient
} else {
class = classifyError(err)
}
canRetry := class == ClassificationTransient && attempt < maxAttempts
if canRetry {
// Failed-but-will-retry: still emit Failed for the
// observer (so they see the attempt happened), but
// loop. Some consumers only show the LAST failure;
// emitting every attempt is the more transparent
// option. Spec says "emit running + passed/failed
// per attempt".
e.emit(Result{
ID: id,
Status: StatusFailed,
Error: err.Error(),
Hint: hintFor(id, err),
RawHex: extractRawHex(err.Error()),
Attempt: attempt,
Duration: dur,
})
// Sleep with cancel awareness.
select {
case <-time.After(e.cfg.RetryBackoff):
case <-e.ctx.Done():
// Caller cancelled during backoff — stop without retry.
return false
}
continue
}
// Final failure (permanent or out of retries).
e.emit(Result{
ID: id,
Status: StatusFailed,
Error: err.Error(),
Hint: hintFor(id, err),
RawHex: extractRawHex(err.Error()),
Attempt: attempt,
Duration: dur,
})
return false
}
return false
}
// proxyAddr returns the SOCKS5 proxy host:port string.
func (e *executor) proxyAddr() string {
return net.JoinHostPort(e.cfg.ProxyHost, strconv.Itoa(e.cfg.ProxyPort))
}
// runTCP — Test 1: dial the proxy.
func (e *executor) runTCP() {
if e.cancelled {
e.emitSkipped("tcp", "cancelled")
return
}
if err := e.ctx.Err(); err != nil {
e.emitCancelled("tcp", 1, 0)
return
}
ok := e.runAttempt("tcp", func(ctx context.Context) (string, error) {
// Close any prior conn from a previous attempt.
if e.tcpConn != nil {
_ = e.tcpConn.Close()
e.tcpConn = nil
}
var d net.Dialer
start := time.Now()
conn, err := d.DialContext(ctx, "tcp", e.proxyAddr())
if err != nil {
return "", err
}
e.tcpConn = conn
ms := time.Since(start).Milliseconds()
return fmt.Sprintf("%dms", ms), nil
})
e.tcpOK = ok
}
// runGreet — Test 2: SOCKS5 method negotiation.
func (e *executor) runGreet() {
if e.shouldSkip("greet", e.tcpOK) {
return
}
ok := e.runAttempt("greet", func(ctx context.Context) (string, error) {
// Each attempt needs a fresh conn — the previous attempt
// may have written bytes that left the proxy mid-handshake.
if err := e.redialTCPIfNeeded(ctx); err != nil {
return "", err
}
method, _, err := socks5Greeting(ctx, e.tcpConn, e.cfg.UseAuth)
if err != nil {
// Force redial on next attempt.
_ = e.tcpConn.Close()
e.tcpConn = nil
return "", err
}
switch method {
case 0x00:
return "no auth", nil
case 0x02:
return "auth required", nil
default:
return fmt.Sprintf("method=0x%02X", method), nil
}
})
e.greetOK = ok
}
// redialTCPIfNeeded drops and re-opens tcpConn. This is called at the
// start of each greet/auth/connect attempt after the first to give every
// attempt a fresh connection — the proxy may have advanced state on the
// previous attempt that we can't roll back.
//
// On the FIRST attempt for greet, we expect tcpConn to already be open
// (from runTCP). The simple rule: if tcpConn==nil, redial; otherwise
// keep it. The retry path closes tcpConn before re-running this loop.
func (e *executor) redialTCPIfNeeded(ctx context.Context) error {
if e.tcpConn != nil {
return nil
}
var d net.Dialer
conn, err := d.DialContext(ctx, "tcp", e.proxyAddr())
if err != nil {
return err
}
e.tcpConn = conn
return nil
}
// runAuth — Test 3: user/pass sub-negotiation. Only emitted when UseAuth.
func (e *executor) runAuth() {
if e.shouldSkip("auth", e.greetOK) {
return
}
ok := e.runAttempt("auth", func(ctx context.Context) (string, error) {
// On retry: drop the conn and start fresh from greet+auth.
// (We can't replay only auth — the proxy has already moved
// past method negotiation.)
// retry detection: if we have nil tcpConn here, we lost it
// in a prior failed attempt and need to redial+regreet.
if e.tcpConn == nil {
var d net.Dialer
conn, derr := d.DialContext(ctx, "tcp", e.proxyAddr())
if derr != nil {
return "", derr
}
e.tcpConn = conn
if _, _, gerr := socks5Greeting(ctx, e.tcpConn, true); gerr != nil {
return "", gerr
}
}
_, err := socks5Auth(ctx, e.tcpConn, e.cfg.ProxyLogin, e.cfg.ProxyPassword)
if err != nil {
// Force redial+regreet on next attempt.
_ = e.tcpConn.Close()
e.tcpConn = nil
return "", err
}
return "ok", nil
})
e.authOK = ok
}
// runConnect — Test 4: SOCKS5 CONNECT to Discord gateway.
func (e *executor) runConnect() {
dep := e.greetOK && (!e.cfg.UseAuth || e.authOK)
if e.shouldSkip("connect", dep) {
return
}
host, portStr, splitErr := net.SplitHostPort(e.cfg.DiscordGateway)
if splitErr != nil {
e.emit(Result{
ID: "connect",
Status: StatusFailed,
Error: fmt.Sprintf("bad DiscordGateway %q: %s", e.cfg.DiscordGateway, splitErr.Error()),
Hint: hintFor("connect", splitErr),
Attempt: 1,
})
return
}
port64, perr := strconv.ParseUint(portStr, 10, 16)
if perr != nil {
e.emit(Result{
ID: "connect",
Status: StatusFailed,
Error: fmt.Sprintf("bad DiscordGateway port %q: %s", portStr, perr.Error()),
Hint: hintFor("connect", perr),
Attempt: 1,
})
return
}
port := uint16(port64)
ok := e.runAttempt("connect", func(ctx context.Context) (string, error) {
// On retry: redial+greet+(auth) before re-CONNECT.
if e.tcpConn == nil {
var d net.Dialer
conn, derr := d.DialContext(ctx, "tcp", e.proxyAddr())
if derr != nil {
return "", derr
}
e.tcpConn = conn
if _, _, gerr := socks5Greeting(ctx, e.tcpConn, e.cfg.UseAuth); gerr != nil {
return "", gerr
}
if e.cfg.UseAuth {
if _, aerr := socks5Auth(ctx, e.tcpConn, e.cfg.ProxyLogin, e.cfg.ProxyPassword); aerr != nil {
return "", aerr
}
}
}
_, err := socks5Connect(ctx, e.tcpConn, host, port)
if err != nil {
_ = e.tcpConn.Close()
e.tcpConn = nil
return "", err
}
return "REP=00", nil
})
e.connectOK = ok
}
// runUDP — Test 5: open second TCP control channel and UDP ASSOCIATE.
func (e *executor) runUDP() {
dep := e.greetOK && (!e.cfg.UseAuth || e.authOK)
if e.shouldSkip("udp", dep) {
return
}
ok := e.runAttempt("udp", func(ctx context.Context) (string, error) {
// Always use a fresh control channel for UDP ASSOCIATE.
if e.udpConn2 != nil {
_ = e.udpConn2.Close()
e.udpConn2 = nil
}
var d net.Dialer
conn, err := d.DialContext(ctx, "tcp", e.proxyAddr())
if err != nil {
return "", err
}
e.udpConn2 = conn
if _, _, gerr := socks5Greeting(ctx, conn, e.cfg.UseAuth); gerr != nil {
return "", gerr
}
if e.cfg.UseAuth {
if _, aerr := socks5Auth(ctx, conn, e.cfg.ProxyLogin, e.cfg.ProxyPassword); aerr != nil {
return "", aerr
}
}
relay, _, uerr := socks5UDPAssociate(ctx, conn)
if uerr != nil {
return "", uerr
}
e.udpRelay = relay
return fmt.Sprintf("relay %s:%d", relay.IP.String(), relay.Port), nil
})
e.udpOK = ok
}
// runStun — Test 6: STUN through the SOCKS5 UDP relay.
func (e *executor) runStun() {
if e.shouldSkip("stun", e.udpOK) {
return
}
host, portStr, splitErr := net.SplitHostPort(e.cfg.StunServer)
if splitErr != nil {
e.emit(Result{
ID: "stun",
Status: StatusFailed,
Error: fmt.Sprintf("bad StunServer %q: %s", e.cfg.StunServer, splitErr.Error()),
Hint: hintFor("stun", splitErr),
Attempt: 1,
})
return
}
port64, perr := strconv.ParseUint(portStr, 10, 16)
if perr != nil {
e.emit(Result{
ID: "stun",
Status: StatusFailed,
Error: fmt.Sprintf("bad StunServer port %q: %s", portStr, perr.Error()),
Hint: hintFor("stun", perr),
Attempt: 1,
})
return
}
stunPort := uint16(port64)
e.runAttempt("stun", func(ctx context.Context) (string, error) {
// Resolve STUN host to an IPv4. We don't support IPv6 STUN.
ips, err := (&net.Resolver{}).LookupIP(ctx, "ip4", host)
if err != nil {
return "", fmt.Errorf("stun: lookup %s: %w", host, err)
}
var stunIP4 net.IP
for _, ip := range ips {
if v4 := ip.To4(); v4 != nil {
stunIP4 = v4
break
}
}
if stunIP4 == nil {
return "", errors.New("stun: no IPv4 for STUN server")
}
// Open a fresh local UDP socket per attempt.
if e.udpClient != nil {
_ = e.udpClient.Close()
e.udpClient = nil
}
pc, err := net.ListenPacket("udp", ":0")
if err != nil {
return "", fmt.Errorf("stun: listen udp: %w", err)
}
e.udpClient = pc
if dl, ok := ctx.Deadline(); ok {
_ = pc.SetDeadline(dl)
}
// Build SOCKS5 UDP datagram: RSV(2)=0 FRAG=0 ATYP=01 IP(4) PORT(2) STUN(20)
txID, err := NewTransactionID()
if err != nil {
return "", err
}
stunReq := EncodeBindingRequest(txID)
dgram := make([]byte, 0, 10+len(stunReq))
dgram = append(dgram, 0x00, 0x00, 0x00, 0x01)
dgram = append(dgram, stunIP4...)
var portBuf [2]byte
binary.BigEndian.PutUint16(portBuf[:], stunPort)
dgram = append(dgram, portBuf[:]...)
dgram = append(dgram, stunReq...)
start := time.Now()
if _, werr := pc.WriteTo(dgram, e.udpRelay); werr != nil {
return "", fmt.Errorf("stun: write to relay: %w", werr)
}
readBuf := make([]byte, 1500)
n, _, rerr := pc.ReadFrom(readBuf)
if rerr != nil {
return "", fmt.Errorf("stun: read from relay: %w", rerr)
}
rtt := time.Since(start)
if n < 10 {
return "", fmt.Errorf("stun: relay reply too short (%d bytes)", n)
}
// Validate SOCKS5 UDP wrapper: RSV=00 00, FRAG=00, ATYP=01.
if readBuf[0] != 0x00 || readBuf[1] != 0x00 || readBuf[2] != 0x00 {
return "", fmt.Errorf("stun: bad SOCKS5 UDP header (raw=%x)", readBuf[:10])
}
// We sent IPv4, expect IPv4 reply.
var hdrLen int
switch readBuf[3] {
case 0x01:
hdrLen = 10
case 0x04:
hdrLen = 22
case 0x03:
if n < 5 {
return "", fmt.Errorf("stun: truncated SOCKS5 UDP domain header")
}
hdrLen = 4 + 1 + int(readBuf[4]) + 2
default:
return "", fmt.Errorf("stun: unknown SOCKS5 UDP ATYP=0x%02X", readBuf[3])
}
if n < hdrLen {
return "", fmt.Errorf("stun: relay reply truncated (%d < %d)", n, hdrLen)
}
stunReply := readBuf[hdrLen:n]
_, _, perr := ParseBindingResponse(stunReply, txID)
if perr != nil {
return "", perr
}
return fmt.Sprintf("%dms RTT", rtt.Milliseconds()), nil
})
}
// runAPI — Test 7: HTTP GET Discord API gateway URL through the proxy.
func (e *executor) runAPI() {
if e.shouldSkip("api", e.connectOK) {
return
}
e.runAttempt("api", func(ctx context.Context) (string, error) {
transport := &http.Transport{
DialContext: func(ctx context.Context, _network, addr string) (net.Conn, error) {
return e.dialThroughProxy(ctx, addr)
},
TLSClientConfig: &tls.Config{},
DisableKeepAlives: true,
ResponseHeaderTimeout: e.cfg.PerTestTimeout,
}
client := &http.Client{
Transport: transport,
Timeout: e.cfg.PerTestTimeout,
}
req, err := http.NewRequestWithContext(ctx, "GET", e.cfg.DiscordAPI, nil)
if err != nil {
return "", err
}
resp, err := client.Do(req)
if err != nil {
return "", err
}
defer resp.Body.Close()
if resp.StatusCode == 200 || resp.StatusCode == 401 {
return fmt.Sprintf("HTTP %d", resp.StatusCode), nil
}
return "", fmt.Errorf("api: HTTP %d", resp.StatusCode)
})
}
// dialThroughProxy is the http.Transport.DialContext used by runAPI. It
// opens a TCP connection to the SOCKS5 proxy, performs greet+(auth)+CONNECT
// to addr, then returns the established conn.
func (e *executor) dialThroughProxy(ctx context.Context, addr string) (net.Conn, error) {
host, portStr, err := net.SplitHostPort(addr)
if err != nil {
return nil, fmt.Errorf("api: split %q: %w", addr, err)
}
port64, err := strconv.ParseUint(portStr, 10, 16)
if err != nil {
return nil, fmt.Errorf("api: bad port %q: %w", portStr, err)
}
port := uint16(port64)
var d net.Dialer
conn, err := d.DialContext(ctx, "tcp", e.proxyAddr())
if err != nil {
return nil, err
}
if _, _, gerr := socks5Greeting(ctx, conn, e.cfg.UseAuth); gerr != nil {
_ = conn.Close()
return nil, gerr
}
if e.cfg.UseAuth {
if _, aerr := socks5Auth(ctx, conn, e.cfg.ProxyLogin, e.cfg.ProxyPassword); aerr != nil {
_ = conn.Close()
return nil, aerr
}
}
if _, cerr := socks5Connect(ctx, conn, host, port); cerr != nil {
_ = conn.Close()
return nil, cerr
}
// Clear the deadline socks5* primitives applied — http.Transport
// manages timing past this point.
_ = conn.SetDeadline(time.Time{})
return conn, nil
}
+881
View File
@@ -0,0 +1,881 @@
package checker
import (
"context"
"encoding/binary"
"fmt"
"io"
"net"
"net/http"
"net/http/httptest"
"strconv"
"strings"
"sync"
"sync/atomic"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// fakeProxy is a test SOCKS5 server with per-scenario behaviour. It also
// optionally runs a UDP relay that echoes STUN-shaped responses crafted
// to look like Binding Success Responses with XOR-MAPPED-ADDRESS pointing
// back at the client's source IP.
//
// The TCP-side splice for the API test detects CONNECT requests targeting
// apiTargetHost:apiTargetPort and, instead of sending a synthetic reply,
// dials apiTargetAddr and bridges the two conns. This lets a real
// httptest.NewServer be used as the API endpoint.
type fakeProxy struct {
t *testing.T
addr string
scenario string
udpRelayAddr *net.UDPAddr // announced in UDP ASSOCIATE reply
// API-passthrough hook: when a CONNECT targets this host:port,
// the proxy dials apiTargetAddr and splices the conns instead of
// sending a fake REP=00 + close.
apiTargetHost string
apiTargetPort uint16
apiTargetAddr string
// timeoutFirstAttempt stalls the first connection on greet to
// drive a timeout. Subsequent connections behave normally.
timeoutFirstAttempt atomic.Int32
}
// newFakeProxy starts a TCP listener and a UDP relay (if relevant for
// the scenario). Both are torn down via t.Cleanup.
func newFakeProxy(t *testing.T, scenario string) *fakeProxy {
t.Helper()
fp := &fakeProxy{t: t, scenario: scenario}
// Start UDP relay for scenarios that need it.
if needsUDPRelay(scenario) {
ua, err := net.ResolveUDPAddr("udp", "127.0.0.1:0")
require.NoError(t, err)
uconn, err := net.ListenUDP("udp", ua)
require.NoError(t, err)
fp.udpRelayAddr = uconn.LocalAddr().(*net.UDPAddr)
t.Cleanup(func() { _ = uconn.Close() })
go fp.runRelay(uconn)
}
// Start TCP listener.
ln, err := net.Listen("tcp", "127.0.0.1:0")
require.NoError(t, err)
fp.addr = ln.Addr().String()
if scenario == "timeout_then_ok" {
fp.timeoutFirstAttempt.Store(1)
}
t.Cleanup(func() { _ = ln.Close() })
go fp.serve(ln)
return fp
}
func needsUDPRelay(scenario string) bool {
switch scenario {
case "happy_no_auth", "happy_with_auth", "udp_unsupported", "connect_refused", "timeout_then_ok":
return true
default:
return false
}
}
// serve accepts connections forever until the listener is closed.
func (fp *fakeProxy) serve(ln net.Listener) {
for {
conn, err := ln.Accept()
if err != nil {
return
}
go fp.handle(conn)
}
}
func (fp *fakeProxy) handle(conn net.Conn) {
defer conn.Close()
_ = conn.SetDeadline(time.Now().Add(10 * time.Second))
// First-attempt-timeout scenario: read greet, then sleep past
// the per-test timeout to force a deadline error.
if fp.timeoutFirstAttempt.CompareAndSwap(1, 0) {
buf := make([]byte, 1024)
_, _ = conn.Read(buf)
time.Sleep(2 * time.Second)
return
}
br := newPeekReader(conn)
// Step 1: greeting.
greet, err := readGreeting(br)
if err != nil {
return
}
switch fp.scenario {
case "all_methods_rejected":
_, _ = conn.Write([]byte{0x05, 0xFF})
return
case "auth_rejected":
// Server picks user/pass.
_, _ = conn.Write([]byte{0x05, 0x02})
// Read auth.
_ = readAuth(br)
_, _ = conn.Write([]byte{0x01, 0x01}) // status=fail
return
}
// Method selection: scenarios that involve auth force 0x02 if
// offered; otherwise prefer 0x00.
preferAuth := fp.scenario == "happy_with_auth"
chosen := byte(0xFF)
if preferAuth {
for _, m := range greet.methods {
if m == 0x02 {
chosen = 0x02
break
}
}
}
if chosen == 0xFF {
for _, m := range greet.methods {
if m == 0x00 {
chosen = 0x00
break
}
}
}
if chosen == 0xFF {
for _, m := range greet.methods {
if m == 0x02 {
chosen = 0x02
break
}
}
}
if chosen == 0xFF {
_, _ = conn.Write([]byte{0x05, 0xFF})
return
}
_, _ = conn.Write([]byte{0x05, chosen})
if chosen == 0x02 {
if err := readAuth(br); err != nil {
return
}
_, _ = conn.Write([]byte{0x01, 0x00}) // success
}
// Step 2: read CMD request.
cmdReq, err := readSocks5Request(br)
if err != nil {
return
}
switch cmdReq.cmd {
case 0x01: // CONNECT
switch fp.scenario {
case "connect_refused":
_, _ = conn.Write([]byte{0x05, 0x05, 0x00, 0x01, 0, 0, 0, 0, 0, 0})
return
}
// API passthrough?
if fp.apiTargetHost != "" && cmdReq.host == fp.apiTargetHost && cmdReq.port == fp.apiTargetPort {
// Dial real target, splice.
target, derr := net.Dial("tcp", fp.apiTargetAddr)
if derr != nil {
_, _ = conn.Write([]byte{0x05, 0x05, 0x00, 0x01, 0, 0, 0, 0, 0, 0})
return
}
_, _ = conn.Write([]byte{0x05, 0x00, 0x00, 0x01, 0, 0, 0, 0, 0, 0})
// Clear deadline for the splice.
_ = conn.SetDeadline(time.Time{})
_ = target.SetDeadline(time.Time{})
// Splice. We can't get already-buffered bytes back
// out of br trivially, but the client only sent the
// 7+len bytes for CONNECT and we read exactly that —
// so br has no leftover buffered bytes here.
done := make(chan struct{}, 2)
go func() { _, _ = io.Copy(target, conn); done <- struct{}{} }()
go func() { _, _ = io.Copy(conn, target); done <- struct{}{} }()
<-done
_ = target.Close()
return
}
// Default happy CONNECT.
_, _ = conn.Write([]byte{0x05, 0x00, 0x00, 0x01, 0, 0, 0, 0, 0, 0})
// Keep conn open briefly so client doesn't see EOF before
// reading the 10-byte reply.
time.Sleep(50 * time.Millisecond)
return
case 0x03: // UDP ASSOCIATE
if fp.scenario == "udp_unsupported" {
_, _ = conn.Write([]byte{0x05, 0x07, 0x00, 0x01, 0, 0, 0, 0, 0, 0})
return
}
// Reply with our UDP relay endpoint.
ip4 := fp.udpRelayAddr.IP.To4()
if ip4 == nil {
_, _ = conn.Write([]byte{0x05, 0x01, 0x00, 0x01, 0, 0, 0, 0, 0, 0})
return
}
reply := []byte{0x05, 0x00, 0x00, 0x01,
ip4[0], ip4[1], ip4[2], ip4[3],
byte(fp.udpRelayAddr.Port >> 8), byte(fp.udpRelayAddr.Port)}
_, _ = conn.Write(reply)
// Keep TCP control channel open so the relay stays valid.
// The client will close conn when done. We just block on
// read until peer closes.
_ = conn.SetDeadline(time.Time{})
_, _ = io.Copy(io.Discard, conn)
return
default:
_, _ = conn.Write([]byte{0x05, 0x07, 0x00, 0x01, 0, 0, 0, 0, 0, 0})
return
}
}
// runRelay reads SOCKS5 UDP datagrams, parses the embedded STUN binding
// request, and replies with a synthetic Binding Success Response carrying
// XOR-MAPPED-ADDRESS = client's source.
func (fp *fakeProxy) runRelay(uconn *net.UDPConn) {
buf := make([]byte, 2048)
for {
n, src, err := uconn.ReadFromUDP(buf)
if err != nil {
return
}
if n < 10 {
continue
}
// Parse SOCKS5 UDP wrapper. Expect ATYP=01.
if buf[0] != 0x00 || buf[1] != 0x00 || buf[2] != 0x00 {
continue
}
var hdrLen int
switch buf[3] {
case 0x01:
hdrLen = 10
case 0x04:
hdrLen = 22
case 0x03:
if n < 5 {
continue
}
hdrLen = 4 + 1 + int(buf[4]) + 2
default:
continue
}
if n < hdrLen+20 {
continue
}
stunReq := buf[hdrLen:n]
// Expect a binding request.
if len(stunReq) < 20 {
continue
}
var txID [12]byte
copy(txID[:], stunReq[8:20])
// Build XOR-MAPPED-ADDRESS attribute value for src.
ip4 := src.IP.To4()
if ip4 == nil {
continue
}
xport := uint16(src.Port) ^ uint16(stunMagicCookie>>16)
xaddr := binary.BigEndian.Uint32(ip4) ^ stunMagicCookie
// Build STUN Binding Success Response.
stunResp := make([]byte, 20+12) // header + 4-byte attr header + 8-byte XMA
binary.BigEndian.PutUint16(stunResp[0:2], stunBindingSuccessResponse)
binary.BigEndian.PutUint16(stunResp[2:4], 12) // attr length
binary.BigEndian.PutUint32(stunResp[4:8], stunMagicCookie)
copy(stunResp[8:20], txID[:])
// Attribute header: type, length.
binary.BigEndian.PutUint16(stunResp[20:22], stunAttrXORMappedAddress)
binary.BigEndian.PutUint16(stunResp[22:24], 8)
// Value: 0, family=01, x-port, x-addr.
stunResp[24] = 0
stunResp[25] = 0x01
binary.BigEndian.PutUint16(stunResp[26:28], xport)
binary.BigEndian.PutUint32(stunResp[28:32], xaddr)
// Wrap in SOCKS5 UDP header.
out := make([]byte, 0, 10+len(stunResp))
out = append(out, 0x00, 0x00, 0x00, 0x01)
out = append(out, ip4...)
var portBuf [2]byte
binary.BigEndian.PutUint16(portBuf[:], uint16(src.Port))
out = append(out, portBuf[:]...)
out = append(out, stunResp...)
_, _ = uconn.WriteToUDP(out, src)
}
}
// peekReader wraps net.Conn so we can read variable-length SOCKS5 frames.
type peekReader struct {
r io.Reader
}
func newPeekReader(r io.Reader) *peekReader { return &peekReader{r: r} }
func (p *peekReader) ReadFull(n int) ([]byte, error) {
buf := make([]byte, n)
if _, err := io.ReadFull(p.r, buf); err != nil {
return nil, err
}
return buf, nil
}
type greetingMsg struct {
methods []byte
}
func readGreeting(r *peekReader) (*greetingMsg, error) {
hdr, err := r.ReadFull(2)
if err != nil {
return nil, err
}
if hdr[0] != 0x05 {
return nil, fmt.Errorf("bad ver")
}
nMethods := int(hdr[1])
methods, err := r.ReadFull(nMethods)
if err != nil {
return nil, err
}
return &greetingMsg{methods: methods}, nil
}
func readAuth(r *peekReader) error {
hdr, err := r.ReadFull(2)
if err != nil {
return err
}
if hdr[0] != 0x01 {
return fmt.Errorf("bad auth ver")
}
ulen := int(hdr[1])
if _, err := r.ReadFull(ulen); err != nil {
return err
}
plenBuf, err := r.ReadFull(1)
if err != nil {
return err
}
plen := int(plenBuf[0])
if _, err := r.ReadFull(plen); err != nil {
return err
}
return nil
}
type socks5Request struct {
cmd byte
atyp byte
host string
port uint16
}
func readSocks5Request(r *peekReader) (*socks5Request, error) {
hdr, err := r.ReadFull(4)
if err != nil {
return nil, err
}
if hdr[0] != 0x05 {
return nil, fmt.Errorf("bad ver")
}
out := &socks5Request{cmd: hdr[1], atyp: hdr[3]}
switch hdr[3] {
case 0x01:
ipBuf, err := r.ReadFull(4)
if err != nil {
return nil, err
}
out.host = net.IP(ipBuf).String()
case 0x03:
lenBuf, err := r.ReadFull(1)
if err != nil {
return nil, err
}
hostBuf, err := r.ReadFull(int(lenBuf[0]))
if err != nil {
return nil, err
}
out.host = string(hostBuf)
case 0x04:
ipBuf, err := r.ReadFull(16)
if err != nil {
return nil, err
}
out.host = net.IP(ipBuf).String()
default:
return nil, fmt.Errorf("bad atyp")
}
portBuf, err := r.ReadFull(2)
if err != nil {
return nil, err
}
out.port = binary.BigEndian.Uint16(portBuf)
return out, nil
}
func methodChosen(cur, _ byte) bool { return cur != 0xFF }
// drainResults pulls every Result off ch into a slice (with a hard timeout
// so a hung implementation doesn't hang the test).
func drainResults(t *testing.T, ch <-chan Result, timeout time.Duration) []Result {
t.Helper()
var out []Result
deadline := time.NewTimer(timeout)
defer deadline.Stop()
for {
select {
case r, ok := <-ch:
if !ok {
return out
}
out = append(out, r)
case <-deadline.C:
t.Fatalf("checker.Run did not finish within %s; got %d results so far: %+v", timeout, len(out), out)
}
}
}
// finalByID returns the LAST result emitted for the given test id, or zero.
func finalByID(results []Result, id string) (Result, bool) {
for i := len(results) - 1; i >= 0; i-- {
if results[i].ID == id && results[i].Status != StatusRunning {
return results[i], true
}
}
return Result{}, false
}
// hostPort splits an addr returned by net.Listener.Addr().String().
func hostPort(addr string) (string, int) {
host, p, err := net.SplitHostPort(addr)
if err != nil {
panic(err)
}
pn, err := strconv.Atoi(p)
if err != nil {
panic(err)
}
return host, pn
}
// proxyConfig builds a Config pointed at the given fakeProxy with sane
// short timeouts for tests.
func proxyConfig(fp *fakeProxy, useAuth bool) Config {
host, port := hostPort(fp.addr)
cfg := Config{
ProxyHost: host,
ProxyPort: port,
UseAuth: useAuth,
PerTestTimeout: 500 * time.Millisecond,
MaxRetries: 1,
RetryBackoff: 30 * time.Millisecond,
}
if useAuth {
cfg.ProxyLogin = "u"
cfg.ProxyPassword = "p"
}
if fp.udpRelayAddr != nil {
// no-op; relay is announced via UDP ASSOCIATE reply
_ = fp.udpRelayAddr
}
return cfg
}
// stubAPIServer starts an httptest server returning HTTP 200 with a tiny
// JSON body, plus arranges fakeProxy to splice CONNECTs targeting it.
func stubAPIServer(t *testing.T, fp *fakeProxy, status int) string {
t.Helper()
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(status)
_, _ = io.WriteString(w, `{"url":"wss://gateway.discord.gg"}`)
}))
t.Cleanup(srv.Close)
// Parse the test server's host:port.
host, port := hostPort(strings.TrimPrefix(srv.URL, "http://"))
fp.apiTargetHost = host
fp.apiTargetPort = uint16(port)
fp.apiTargetAddr = srv.Listener.Addr().String()
return srv.URL + "/api/v9/gateway"
}
// stubGatewayServer stands in for gateway.discord.gg:443 so the connect
// test has a real target. We don't actually speak TLS — the client's
// CONNECT only reads the 10-byte SOCKS5 reply, so as long as we send
// REP=00 the test passes. proxyConfig points DiscordGateway at this addr.
//
// We piggy-back on a TCP listener that does nothing.
func stubGatewayAddr(t *testing.T) string {
t.Helper()
ln, err := net.Listen("tcp", "127.0.0.1:0")
require.NoError(t, err)
t.Cleanup(func() { _ = ln.Close() })
go func() {
for {
conn, err := ln.Accept()
if err != nil {
return
}
// Just keep open; the splice will read/write nothing
// useful (the SOCKS5 reply is fake REP=00 from the
// proxy itself, not from us — see fakeProxy.handle).
go func(c net.Conn) {
defer c.Close()
_, _ = io.Copy(io.Discard, c)
}(conn)
}
}()
return ln.Addr().String()
}
func TestRun_HappyNoAuth(t *testing.T) {
fp := newFakeProxy(t, "happy_no_auth")
cfg := proxyConfig(fp, false)
cfg.DiscordGateway = stubGatewayAddr(t)
cfg.DiscordAPI = stubAPIServer(t, fp, 200)
cfg.StunServer = "127.0.0.1:1" // unused: we patch via direct relay; see below
// We don't actually need DNS — runStun does net.LookupIP("ip4", host).
// Use a literal IP so the resolver returns it.
cfg.StunServer = "127.0.0.1:65000"
ch := Run(context.Background(), cfg)
results := drainResults(t, ch, 10*time.Second)
expected := []string{"tcp", "greet", "connect", "udp", "stun", "api"}
finals := map[string]Result{}
for _, id := range expected {
r, ok := finalByID(results, id)
require.True(t, ok, "missing final result for %q in %+v", id, results)
finals[id] = r
}
for _, id := range expected {
assert.Equal(t, StatusPassed, finals[id].Status, "test %s should pass; got %+v", id, finals[id])
}
// auth must not appear (UseAuth=false).
for _, r := range results {
assert.NotEqual(t, "auth", r.ID, "auth must not be emitted when UseAuth=false")
}
// Metrics format spot-checks.
assert.Contains(t, finals["greet"].Metric, "no auth")
assert.Equal(t, "REP=00", finals["connect"].Metric)
assert.Equal(t, "HTTP 200", finals["api"].Metric)
}
func TestRun_HappyWithAuth(t *testing.T) {
fp := newFakeProxy(t, "happy_with_auth")
cfg := proxyConfig(fp, true)
cfg.DiscordGateway = stubGatewayAddr(t)
cfg.DiscordAPI = stubAPIServer(t, fp, 200)
cfg.StunServer = "127.0.0.1:65000"
ch := Run(context.Background(), cfg)
results := drainResults(t, ch, 10*time.Second)
expected := []string{"tcp", "greet", "auth", "connect", "udp", "stun", "api"}
for _, id := range expected {
r, ok := finalByID(results, id)
require.True(t, ok, "missing %s; results=%+v", id, results)
assert.Equal(t, StatusPassed, r.Status, "id=%s", id)
}
r, _ := finalByID(results, "auth")
assert.Equal(t, "ok", r.Metric)
}
func TestRun_AuthRejected(t *testing.T) {
fp := newFakeProxy(t, "auth_rejected")
cfg := proxyConfig(fp, true)
cfg.DiscordGateway = stubGatewayAddr(t)
cfg.DiscordAPI = "http://127.0.0.1:1/api/v9/gateway"
cfg.StunServer = "127.0.0.1:65000"
ch := Run(context.Background(), cfg)
results := drainResults(t, ch, 10*time.Second)
// tcp + greet pass, auth fails.
rTCP, _ := finalByID(results, "tcp")
assert.Equal(t, StatusPassed, rTCP.Status)
rG, _ := finalByID(results, "greet")
assert.Equal(t, StatusPassed, rG.Status)
rA, ok := finalByID(results, "auth")
require.True(t, ok)
assert.Equal(t, StatusFailed, rA.Status)
assert.NotEmpty(t, rA.Hint)
for _, id := range []string{"connect", "udp", "stun", "api"} {
r, ok := finalByID(results, id)
require.True(t, ok, "missing %s", id)
assert.Equal(t, StatusSkipped, r.Status, "id=%s", id)
}
}
func TestRun_AllMethodsRejected(t *testing.T) {
fp := newFakeProxy(t, "all_methods_rejected")
cfg := proxyConfig(fp, false)
cfg.DiscordGateway = stubGatewayAddr(t)
cfg.DiscordAPI = "http://127.0.0.1:1/api/v9/gateway"
cfg.StunServer = "127.0.0.1:65000"
ch := Run(context.Background(), cfg)
results := drainResults(t, ch, 10*time.Second)
rTCP, _ := finalByID(results, "tcp")
assert.Equal(t, StatusPassed, rTCP.Status)
rG, ok := finalByID(results, "greet")
require.True(t, ok)
assert.Equal(t, StatusFailed, rG.Status)
assert.NotEmpty(t, rG.Hint)
for _, id := range []string{"connect", "udp", "stun", "api"} {
r, ok := finalByID(results, id)
require.True(t, ok, "missing %s", id)
assert.Equal(t, StatusSkipped, r.Status, "id=%s", id)
}
}
func TestRun_ConnectRefused(t *testing.T) {
fp := newFakeProxy(t, "connect_refused")
cfg := proxyConfig(fp, false)
cfg.DiscordGateway = stubGatewayAddr(t)
cfg.DiscordAPI = "http://127.0.0.1:1/api/v9/gateway"
cfg.StunServer = "127.0.0.1:65000"
ch := Run(context.Background(), cfg)
results := drainResults(t, ch, 10*time.Second)
rT, _ := finalByID(results, "tcp")
assert.Equal(t, StatusPassed, rT.Status)
rG, _ := finalByID(results, "greet")
assert.Equal(t, StatusPassed, rG.Status)
rC, ok := finalByID(results, "connect")
require.True(t, ok)
assert.Equal(t, StatusFailed, rC.Status)
assert.NotEmpty(t, rC.Hint)
assert.NotEmpty(t, rC.RawHex)
// udp goes through a SECOND conn → unaffected; should pass.
rU, _ := finalByID(results, "udp")
assert.Equal(t, StatusPassed, rU.Status, "udp should pass independently of connect")
// stun depends on udp → passes too.
rS, _ := finalByID(results, "stun")
assert.Equal(t, StatusPassed, rS.Status)
// api depends on connect → skipped.
rA, _ := finalByID(results, "api")
assert.Equal(t, StatusSkipped, rA.Status)
}
func TestRun_UDPUnsupported(t *testing.T) {
fp := newFakeProxy(t, "udp_unsupported")
cfg := proxyConfig(fp, false)
cfg.DiscordGateway = stubGatewayAddr(t)
cfg.DiscordAPI = stubAPIServer(t, fp, 200)
cfg.StunServer = "127.0.0.1:65000"
ch := Run(context.Background(), cfg)
results := drainResults(t, ch, 10*time.Second)
for _, id := range []string{"tcp", "greet", "connect"} {
r, _ := finalByID(results, id)
assert.Equal(t, StatusPassed, r.Status, "id=%s", id)
}
rU, _ := finalByID(results, "udp")
require.Equal(t, StatusFailed, rU.Status)
assert.NotEmpty(t, rU.Hint)
rS, _ := finalByID(results, "stun")
assert.Equal(t, StatusSkipped, rS.Status)
rA, _ := finalByID(results, "api")
assert.Equal(t, StatusPassed, rA.Status)
}
func TestRun_TimeoutThenOK(t *testing.T) {
fp := newFakeProxy(t, "timeout_then_ok")
cfg := proxyConfig(fp, false)
cfg.DiscordGateway = stubGatewayAddr(t)
cfg.DiscordAPI = stubAPIServer(t, fp, 401)
cfg.StunServer = "127.0.0.1:65000"
cfg.PerTestTimeout = 200 * time.Millisecond
cfg.RetryBackoff = 20 * time.Millisecond
cfg.MaxRetries = 1
ch := Run(context.Background(), cfg)
results := drainResults(t, ch, 15*time.Second)
// Find the greet results.
var greetEvents []Result
for _, r := range results {
if r.ID == "greet" {
greetEvents = append(greetEvents, r)
}
}
// Expect: running(1), failed(1), running(2), passed(2). 4 events.
require.Len(t, greetEvents, 4, "events=%+v all=%+v", greetEvents, results)
assert.Equal(t, StatusRunning, greetEvents[0].Status)
assert.Equal(t, 1, greetEvents[0].Attempt)
assert.Equal(t, StatusFailed, greetEvents[1].Status)
assert.Equal(t, 1, greetEvents[1].Attempt)
assert.Equal(t, StatusRunning, greetEvents[2].Status)
assert.Equal(t, 2, greetEvents[2].Attempt)
assert.Equal(t, StatusPassed, greetEvents[3].Status)
assert.Equal(t, 2, greetEvents[3].Attempt)
// All seven non-auth tests should ultimately pass.
for _, id := range []string{"tcp", "greet", "connect", "udp", "stun", "api"} {
r, ok := finalByID(results, id)
require.True(t, ok, "missing %s", id)
assert.Equal(t, StatusPassed, r.Status, "id=%s, got %+v", id, r)
}
// API should report 401.
rA, _ := finalByID(results, "api")
assert.Equal(t, "HTTP 401", rA.Metric)
}
func TestRun_CancelledMidFlight(t *testing.T) {
fp := newFakeProxy(t, "happy_no_auth")
cfg := proxyConfig(fp, false)
cfg.DiscordGateway = stubGatewayAddr(t)
cfg.DiscordAPI = stubAPIServer(t, fp, 200)
cfg.StunServer = "127.0.0.1:65000"
ctx, cancel := context.WithCancel(context.Background())
ch := Run(ctx, cfg)
var (
results []Result
mu sync.Mutex
)
done := make(chan struct{})
go func() {
defer close(done)
for r := range ch {
mu.Lock()
results = append(results, r)
mu.Unlock()
// Cancel as soon as we see tcp pass.
if r.ID == "tcp" && r.Status == StatusPassed {
cancel()
}
}
}()
select {
case <-done:
case <-time.After(15 * time.Second):
t.Fatal("timed out waiting for cancelled run to finish")
}
// At least one Failed/Skipped after tcp Pass.
mu.Lock()
defer mu.Unlock()
var failed, skipped int
for _, r := range results {
switch r.Status {
case StatusFailed:
if r.Error == "cancelled" {
failed++
}
case StatusSkipped:
if r.Error == "cancelled" {
skipped++
}
}
}
// Either: one cancelled-failed + rest cancelled-skipped, OR all
// cancelled-skipped (if cancellation hit before next test even
// started). Both are acceptable.
// Without auth, 5 tests remain after tcp (greet/connect/udp/stun/api).
// Cancel may race with greet completing successfully, so accept ≥4.
assert.GreaterOrEqual(t, failed+skipped, 4, "expected at least 4 cancellation-marked results, got failed=%d skipped=%d all=%+v", failed, skipped, results)
}
func TestRun_AppliesDefaults(t *testing.T) {
// Use a Config{} with only ProxyHost/Port populated; everything
// else should fall back to spec defaults.
fp := newFakeProxy(t, "happy_no_auth")
host, port := hostPort(fp.addr)
cfg := Config{
ProxyHost: host,
ProxyPort: port,
}
// Verify applyDefaults produces expected values.
out := applyDefaults(cfg)
assert.Equal(t, 5*time.Second, out.PerTestTimeout)
assert.Equal(t, 1, out.MaxRetries)
assert.Equal(t, 500*time.Millisecond, out.RetryBackoff)
assert.Equal(t, "gateway.discord.gg:443", out.DiscordGateway)
assert.Equal(t, "https://discord.com/api/v9/gateway", out.DiscordAPI)
assert.Equal(t, "stun.l.google.com:19302", out.StunServer)
// Behavioral: passing a zero Config to Run should not panic and
// should at minimum emit a TCP result. We override defaults to
// shorter values so the test isn't slow when the public Discord
// targets are unreachable.
cfg.PerTestTimeout = 200 * time.Millisecond
cfg.RetryBackoff = 20 * time.Millisecond
cfg.DiscordGateway = stubGatewayAddr(t)
cfg.DiscordAPI = stubAPIServer(t, fp, 200)
cfg.StunServer = "127.0.0.1:65000"
ch := Run(context.Background(), cfg)
results := drainResults(t, ch, 10*time.Second)
rT, ok := finalByID(results, "tcp")
require.True(t, ok)
assert.Equal(t, StatusPassed, rT.Status)
}
func TestRun_NegativeRetryClamped(t *testing.T) {
cfg := Config{MaxRetries: -5, RetryBackoff: -1 * time.Second, PerTestTimeout: -1 * time.Second}
out := applyDefaults(cfg)
// Spec: MaxRetries < 0 → 0. But our default for "not set" is 1.
// We treat <0 as 0, then bump 0→1 (default for zero).
// Either 0 or 1 is acceptable per spec wording; we settled on 1.
assert.True(t, out.MaxRetries == 0 || out.MaxRetries == 1)
assert.Equal(t, 5*time.Second, out.PerTestTimeout)
assert.Equal(t, 500*time.Millisecond, out.RetryBackoff)
}
func TestExtractRawHex(t *testing.T) {
cases := []struct {
in, want string
}{
{"socks5: bad version (raw=05ff)", "05ff"},
{"socks5: bad version (raw=DEADBEEF)", "DEADBEEF"},
{"no raw here", ""},
{"", ""},
}
for _, c := range cases {
assert.Equal(t, c.want, extractRawHex(c.in), "input=%q", c.in)
}
}