Compare commits
25 Commits
walterp-ap
...
buildjet
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
0f1273ea69 | ||
|
|
fb4e23506f | ||
|
|
6d04184325 | ||
|
|
8c72aabbdf | ||
|
|
f7cb535693 | ||
|
|
146f51ce76 | ||
|
|
c66e15772f | ||
|
|
e1bdbfe710 | ||
|
|
acc7baac6d | ||
|
|
91794f6498 | ||
|
|
2c447de6cc | ||
|
|
021bedfb89 | ||
|
|
d988c9f098 | ||
|
|
0607832397 | ||
|
|
565dbc599a | ||
|
|
aadf63da1d | ||
|
|
d5781f61a9 | ||
|
|
a7a0baf6b9 | ||
|
|
e9b98dd2e1 | ||
|
|
b9b0bf65a0 | ||
|
|
c6162c2a94 | ||
|
|
aa5e494aba | ||
|
|
ff13c66f55 | ||
|
|
ed248b04a7 | ||
|
|
8158dd2edc |
8
.github/workflows/cifuzz.yml
vendored
8
.github/workflows/cifuzz.yml
vendored
@@ -1,5 +1,7 @@
|
||||
name: CIFuzz
|
||||
on: [pull_request]
|
||||
on:
|
||||
push:
|
||||
branches: [ main, release-branch/* ]
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-$${{ github.head_ref || github.run_id }}
|
||||
@@ -7,7 +9,7 @@ concurrency:
|
||||
|
||||
jobs:
|
||||
Fuzzing:
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: buildjet-4vcpu-ubuntu-2204
|
||||
steps:
|
||||
- name: Build Fuzzers
|
||||
id: build
|
||||
@@ -20,7 +22,7 @@ jobs:
|
||||
uses: google/oss-fuzz/infra/cifuzz/actions/run_fuzzers@master
|
||||
with:
|
||||
oss-fuzz-project-name: 'tailscale'
|
||||
fuzz-seconds: 300
|
||||
fuzz-seconds: 900
|
||||
dry-run: false
|
||||
language: go
|
||||
- name: Upload Crash
|
||||
|
||||
2
.github/workflows/codeql-analysis.yml
vendored
2
.github/workflows/codeql-analysis.yml
vendored
@@ -27,7 +27,7 @@ concurrency:
|
||||
jobs:
|
||||
analyze:
|
||||
name: Analyze
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: buildjet-4vcpu-ubuntu-2204
|
||||
permissions:
|
||||
actions: read
|
||||
contents: read
|
||||
|
||||
2
.github/workflows/cross-android.yml
vendored
2
.github/workflows/cross-android.yml
vendored
@@ -14,7 +14,7 @@ concurrency:
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: buildjet-4vcpu-ubuntu-2204
|
||||
|
||||
if: "!contains(github.event.head_commit.message, '[ci skip]')"
|
||||
|
||||
|
||||
2
.github/workflows/cross-darwin.yml
vendored
2
.github/workflows/cross-darwin.yml
vendored
@@ -14,7 +14,7 @@ concurrency:
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: buildjet-4vcpu-ubuntu-2204
|
||||
|
||||
if: "!contains(github.event.head_commit.message, '[ci skip]')"
|
||||
|
||||
|
||||
2
.github/workflows/cross-freebsd.yml
vendored
2
.github/workflows/cross-freebsd.yml
vendored
@@ -14,7 +14,7 @@ concurrency:
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: buildjet-4vcpu-ubuntu-2204
|
||||
|
||||
if: "!contains(github.event.head_commit.message, '[ci skip]')"
|
||||
|
||||
|
||||
2
.github/workflows/cross-openbsd.yml
vendored
2
.github/workflows/cross-openbsd.yml
vendored
@@ -14,7 +14,7 @@ concurrency:
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: buildjet-4vcpu-ubuntu-2204
|
||||
|
||||
if: "!contains(github.event.head_commit.message, '[ci skip]')"
|
||||
|
||||
|
||||
2
.github/workflows/cross-wasm.yml
vendored
2
.github/workflows/cross-wasm.yml
vendored
@@ -14,7 +14,7 @@ concurrency:
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: buildjet-4vcpu-ubuntu-2204
|
||||
|
||||
if: "!contains(github.event.head_commit.message, '[ci skip]')"
|
||||
|
||||
|
||||
2
.github/workflows/cross-windows.yml
vendored
2
.github/workflows/cross-windows.yml
vendored
@@ -14,7 +14,7 @@ concurrency:
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: buildjet-4vcpu-ubuntu-2204
|
||||
|
||||
if: "!contains(github.event.head_commit.message, '[ci skip]')"
|
||||
|
||||
|
||||
2
.github/workflows/linux-race.yml
vendored
2
.github/workflows/linux-race.yml
vendored
@@ -14,7 +14,7 @@ concurrency:
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: buildjet-4vcpu-ubuntu-2204
|
||||
|
||||
if: "!contains(github.event.head_commit.message, '[ci skip]')"
|
||||
|
||||
|
||||
6
.github/workflows/linux.yml
vendored
6
.github/workflows/linux.yml
vendored
@@ -14,7 +14,7 @@ concurrency:
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: buildjet-4vcpu-ubuntu-2204
|
||||
|
||||
if: "!contains(github.event.head_commit.message, '[ci skip]')"
|
||||
|
||||
@@ -38,10 +38,6 @@ jobs:
|
||||
|
||||
- name: Get QEMU
|
||||
run: |
|
||||
# The qemu in Ubuntu 20.04 (Focal) is too old; we need 5.x something
|
||||
# to run Go binaries. 5.2.0 (Debian bullseye) empirically works, and
|
||||
# use this PPA which brings in a modern qemu.
|
||||
sudo add-apt-repository -y ppa:jacob/virtualisation
|
||||
sudo apt-get -y update
|
||||
sudo apt-get -y install qemu-user
|
||||
|
||||
|
||||
2
.github/workflows/linux32.yml
vendored
2
.github/workflows/linux32.yml
vendored
@@ -14,7 +14,7 @@ concurrency:
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: buildjet-4vcpu-ubuntu-2204
|
||||
|
||||
if: "!contains(github.event.head_commit.message, '[ci skip]')"
|
||||
|
||||
|
||||
4
.github/workflows/static-analysis.yml
vendored
4
.github/workflows/static-analysis.yml
vendored
@@ -40,7 +40,7 @@ jobs:
|
||||
if: failure() && github.event_name == 'push'
|
||||
|
||||
vet:
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: buildjet-4vcpu-ubuntu-2204
|
||||
steps:
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v3
|
||||
@@ -66,7 +66,7 @@ jobs:
|
||||
if: failure() && github.event_name == 'push'
|
||||
|
||||
staticcheck:
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: buildjet-4vcpu-ubuntu-2204
|
||||
strategy:
|
||||
matrix:
|
||||
goos: [linux, windows, darwin]
|
||||
|
||||
9
.github/workflows/vm.yml
vendored
9
.github/workflows/vm.yml
vendored
@@ -11,7 +11,7 @@ concurrency:
|
||||
|
||||
jobs:
|
||||
ubuntu2004-LTS-cloud-base:
|
||||
runs-on: [ self-hosted, linux, vm ]
|
||||
runs-on: buildjet-4vcpu-ubuntu-2204
|
||||
|
||||
if: "!contains(github.event.head_commit.message, '[ci skip]')"
|
||||
|
||||
@@ -27,12 +27,17 @@ jobs:
|
||||
with:
|
||||
go-version-file: go.mod
|
||||
|
||||
- name: Get QEMU
|
||||
run: |
|
||||
sudo apt-get -y update
|
||||
sudo apt-get -y install qemu-system-x86 qemu-user genisoimage qemu-utils
|
||||
|
||||
- name: Run VM tests
|
||||
run: go test ./tstest/integration/vms -v -no-s3 -run-vm-tests -run=TestRunUbuntu2004
|
||||
env:
|
||||
HOME: "/tmp"
|
||||
TMPDIR: "/tmp"
|
||||
XDG_CACHE_HOME: "/var/lib/ghrunner/cache"
|
||||
XDG_CACHE_HOME: "/home/runner/cache"
|
||||
|
||||
- uses: k0kubun/action-slack@v2.0.0
|
||||
with:
|
||||
|
||||
2
.github/workflows/windows.yml
vendored
2
.github/workflows/windows.yml
vendored
@@ -14,7 +14,7 @@ concurrency:
|
||||
|
||||
jobs:
|
||||
test:
|
||||
runs-on: windows-latest
|
||||
runs-on: windows-8vcpu
|
||||
|
||||
if: "!contains(github.event.head_commit.message, '[ci skip]')"
|
||||
|
||||
|
||||
@@ -51,7 +51,7 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa
|
||||
tailscale.com/safesocket from tailscale.com/client/tailscale
|
||||
tailscale.com/syncs from tailscale.com/cmd/derper+
|
||||
tailscale.com/tailcfg from tailscale.com/client/tailscale+
|
||||
tailscale.com/tka from tailscale.com/client/tailscale
|
||||
tailscale.com/tka from tailscale.com/client/tailscale+
|
||||
W tailscale.com/tsconst from tailscale.com/net/interfaces
|
||||
💣 tailscale.com/tstime/mono from tailscale.com/tstime/rate
|
||||
tailscale.com/tstime/rate from tailscale.com/wgengine/filter
|
||||
|
||||
@@ -110,11 +110,12 @@ func runSpeedtest(ctx context.Context, args []string) error {
|
||||
w := tabwriter.NewWriter(os.Stdout, 12, 0, 0, ' ', tabwriter.TabIndent)
|
||||
fmt.Println("Results:")
|
||||
fmt.Fprintln(w, "Interval\t\tTransfer\t\tBandwidth\t\t")
|
||||
startTime := results[0].IntervalStart
|
||||
for _, r := range results {
|
||||
if r.Total {
|
||||
fmt.Fprintln(w, "-------------------------------------------------------------------------")
|
||||
}
|
||||
fmt.Fprintf(w, "%.2f-%.2f\tsec\t%.4f\tMBits\t%.4f\tMbits/sec\t\n", r.IntervalStart.Seconds(), r.IntervalEnd.Seconds(), r.MegaBits(), r.MBitsPerSecond())
|
||||
fmt.Fprintf(w, "%.2f-%.2f\tsec\t%.4f\tMBits\t%.4f\tMbits/sec\t\n", r.IntervalStart.Sub(startTime).Seconds(), r.IntervalEnd.Sub(startTime).Seconds(), r.MegaBits(), r.MBitsPerSecond())
|
||||
}
|
||||
w.Flush()
|
||||
return nil
|
||||
|
||||
@@ -48,11 +48,11 @@ func runConfigureHost(ctx context.Context, args []string) error {
|
||||
if uid := os.Getuid(); uid != 0 {
|
||||
return fmt.Errorf("must be run as root, not %q (%v)", os.Getenv("USER"), uid)
|
||||
}
|
||||
osVer := hostinfo.GetOSVersion()
|
||||
isDSM6 := strings.HasPrefix(osVer, "Synology 6")
|
||||
isDSM7 := strings.HasPrefix(osVer, "Synology 7")
|
||||
hi:= hostinfo.New()
|
||||
isDSM6 := strings.HasPrefix(hi.DistroVersion, "6.")
|
||||
isDSM7 := strings.HasPrefix(hi.DistroVersion, "7.")
|
||||
if !isDSM6 && !isDSM7 {
|
||||
return fmt.Errorf("unsupported DSM version %q", osVer)
|
||||
return fmt.Errorf("unsupported DSM version %q", hi.DistroVersion)
|
||||
}
|
||||
if _, err := os.Stat("/dev/net/tun"); os.IsNotExist(err) {
|
||||
if err := os.MkdirAll("/dev/net", 0755); err != nil {
|
||||
|
||||
@@ -133,6 +133,9 @@ func printReport(dm *tailcfg.DERPMap, report *netcheck.Report) error {
|
||||
printf("\t* MappingVariesByDestIP: %v\n", report.MappingVariesByDestIP)
|
||||
printf("\t* HairPinning: %v\n", report.HairPinning)
|
||||
printf("\t* PortMapping: %v\n", portMapping(report))
|
||||
if report.CaptivePortal != "" {
|
||||
printf("\t* CaptivePortal: %v\n", report.CaptivePortal)
|
||||
}
|
||||
|
||||
// When DERP latency checking failed,
|
||||
// magicsock will try to pick the DERP server that
|
||||
|
||||
@@ -380,7 +380,6 @@ func updatePrefs(prefs, curPrefs *ipn.Prefs, env upCheckEnv) (simpleUp bool, jus
|
||||
// Do this after validations to avoid the 5s delay if we're going to error
|
||||
// out anyway.
|
||||
wantSSH, haveSSH := env.upArgs.runSSH, curPrefs.RunSSH
|
||||
fmt.Println("wantSSH", wantSSH, "haveSSH", haveSSH)
|
||||
if wantSSH != haveSSH && isSSHOverTailscale() {
|
||||
if wantSSH {
|
||||
err = presentRiskToUser(riskLoseSSH, `You are connected over Tailscale; this action will reroute SSH traffic to Tailscale SSH and will result in your session disconnecting.`, env.upArgs.acceptedRisks)
|
||||
|
||||
@@ -100,6 +100,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep
|
||||
tailscale.com/util/groupmember from tailscale.com/cmd/tailscale/cli
|
||||
tailscale.com/util/lineread from tailscale.com/net/interfaces+
|
||||
tailscale.com/util/mak from tailscale.com/net/netcheck
|
||||
tailscale.com/util/multierr from tailscale.com/control/controlhttp
|
||||
tailscale.com/util/singleflight from tailscale.com/net/dnscache
|
||||
L tailscale.com/util/strs from tailscale.com/hostinfo
|
||||
W 💣 tailscale.com/util/winutil from tailscale.com/hostinfo+
|
||||
|
||||
@@ -7,7 +7,7 @@ After=network-pre.target NetworkManager.service systemd-resolved.service
|
||||
[Service]
|
||||
EnvironmentFile=/etc/default/tailscaled
|
||||
ExecStartPre=/usr/sbin/tailscaled --cleanup
|
||||
ExecStart=/usr/sbin/tailscaled --state=/var/lib/tailscale/tailscaled.state --socket=/run/tailscale/tailscaled.sock --port $PORT $FLAGS
|
||||
ExecStart=/usr/sbin/tailscaled --state=/var/lib/tailscale/tailscaled.state --socket=/run/tailscale/tailscaled.sock --port=${PORT} $FLAGS
|
||||
ExecStopPost=/usr/sbin/tailscaled --cleanup
|
||||
|
||||
Restart=on-failure
|
||||
|
||||
@@ -46,7 +46,7 @@ function SSHSession({
|
||||
const ref = useRef<HTMLDivElement>(null)
|
||||
useEffect(() => {
|
||||
if (ref.current) {
|
||||
runSSHSession(ref.current, def, ipn, onDone)
|
||||
runSSHSession(ref.current, def, ipn, onDone, (err) => console.error(err))
|
||||
}
|
||||
}, [ref])
|
||||
|
||||
|
||||
@@ -5,6 +5,8 @@ import { WebLinksAddon } from "xterm-addon-web-links"
|
||||
export type SSHSessionDef = {
|
||||
username: string
|
||||
hostname: string
|
||||
/** Defaults to 5 seconds */
|
||||
timeoutSeconds?: number
|
||||
}
|
||||
|
||||
export function runSSHSession(
|
||||
@@ -12,6 +14,7 @@ export function runSSHSession(
|
||||
def: SSHSessionDef,
|
||||
ipn: IPN,
|
||||
onDone: () => void,
|
||||
onError?: (err: string) => void,
|
||||
terminalOptions?: ITerminalOptions
|
||||
) {
|
||||
const parentWindow = termContainerNode.ownerDocument.defaultView ?? window
|
||||
@@ -46,7 +49,7 @@ export function runSSHSession(
|
||||
term.write(input)
|
||||
},
|
||||
writeErrorFn(err) {
|
||||
console.error(err)
|
||||
onError?.(err)
|
||||
term.write(err)
|
||||
},
|
||||
setReadFn(hook) {
|
||||
@@ -62,6 +65,7 @@ export function runSSHSession(
|
||||
}
|
||||
onDone()
|
||||
},
|
||||
timeoutSeconds: def.timeoutSeconds,
|
||||
})
|
||||
|
||||
// Make terminal and SSH session track the size of the containing DOM node.
|
||||
|
||||
2
cmd/tsconnect/src/types/wasm_js.d.ts
vendored
2
cmd/tsconnect/src/types/wasm_js.d.ts
vendored
@@ -23,6 +23,8 @@ declare global {
|
||||
setReadFn: (readFn: (data: string) => void) => void
|
||||
rows: number
|
||||
cols: number
|
||||
/** Defaults to 5 seconds */
|
||||
timeoutSeconds?: number
|
||||
onDone: () => void
|
||||
}
|
||||
): IPNSSHSession
|
||||
|
||||
@@ -360,6 +360,10 @@ func (s *jsSSHSession) Run() {
|
||||
setReadFn := s.termConfig.Get("setReadFn")
|
||||
rows := s.termConfig.Get("rows").Int()
|
||||
cols := s.termConfig.Get("cols").Int()
|
||||
timeoutSeconds := 5.0
|
||||
if jsTimeoutSeconds := s.termConfig.Get("timeoutSeconds"); jsTimeoutSeconds.Type() == js.TypeNumber {
|
||||
timeoutSeconds = jsTimeoutSeconds.Float()
|
||||
}
|
||||
onDone := s.termConfig.Get("onDone")
|
||||
defer onDone.Invoke()
|
||||
|
||||
@@ -367,7 +371,7 @@ func (s *jsSSHSession) Run() {
|
||||
writeErrorFn.Invoke(fmt.Sprintf("%s Error: %v\r\n", label, err))
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), time.Duration(timeoutSeconds*float64(time.Second)))
|
||||
defer cancel()
|
||||
c, err := s.jsIPN.dialer.UserDial(ctx, "tcp", net.JoinHostPort(s.host, "22"))
|
||||
if err != nil {
|
||||
|
||||
@@ -114,19 +114,11 @@ func NewNoStart(opts Options) (*Auto, error) {
|
||||
}
|
||||
c.authCtx, c.authCancel = context.WithCancel(context.Background())
|
||||
c.mapCtx, c.mapCancel = context.WithCancel(context.Background())
|
||||
c.unregisterHealthWatch = health.RegisterWatcher(c.onHealthChange)
|
||||
c.unregisterHealthWatch = health.RegisterWatcher(direct.ReportHealthChange)
|
||||
return c, nil
|
||||
|
||||
}
|
||||
|
||||
func (c *Auto) onHealthChange(sys health.Subsystem, err error) {
|
||||
if sys == health.SysOverall {
|
||||
return
|
||||
}
|
||||
c.logf("controlclient: restarting map request for %q health change to new state: %v", sys, err)
|
||||
c.cancelMapSafely()
|
||||
}
|
||||
|
||||
// SetPaused controls whether HTTP activity should be paused.
|
||||
//
|
||||
// The client can be paused and unpaused repeatedly, unlike Start and Shutdown, which can only be used once.
|
||||
|
||||
@@ -76,6 +76,8 @@ type Direct struct {
|
||||
popBrowser func(url string) // or nil
|
||||
c2nHandler http.Handler // or nil
|
||||
|
||||
dialPlan ControlDialPlanner // can be nil
|
||||
|
||||
mu sync.Mutex // mutex guards the following fields
|
||||
serverKey key.MachinePublic // original ("legacy") nacl crypto_box-based public key
|
||||
serverNoiseKey key.MachinePublic
|
||||
@@ -106,6 +108,7 @@ type Options struct {
|
||||
KeepAlive bool
|
||||
Logf logger.Logf
|
||||
HTTPTestClient *http.Client // optional HTTP client to use (for tests only)
|
||||
NoiseTestClient *http.Client // optional HTTP client to use for noise RPCs (tests only)
|
||||
DebugFlags []string // debug settings to send to control
|
||||
LinkMonitor *monitor.Mon // optional link monitor
|
||||
PopBrowserURL func(url string) // optional func to open browser
|
||||
@@ -132,6 +135,34 @@ type Options struct {
|
||||
// MapResponse.PingRequest queries from the control plane.
|
||||
// If nil, PingRequest queries are not answered.
|
||||
Pinger Pinger
|
||||
|
||||
// DialPlan contains and stores a previous dial plan that we received
|
||||
// from the control server; if nil, we fall back to using DNS.
|
||||
//
|
||||
// If we receive a new DialPlan from the server, this value will be
|
||||
// updated.
|
||||
DialPlan ControlDialPlanner
|
||||
}
|
||||
|
||||
// ControlDialPlanner is the interface optionally supplied when creating a
|
||||
// control client to control exactly how TCP connections to the control plane
|
||||
// are dialed.
|
||||
//
|
||||
// It is usually implemented by an atomic.Pointer.
|
||||
type ControlDialPlanner interface {
|
||||
// Load returns the current plan for how to connect to control.
|
||||
//
|
||||
// The returned plan can be nil. If so, connections should be made by
|
||||
// resolving the control URL using DNS.
|
||||
Load() *tailcfg.ControlDialPlan
|
||||
|
||||
// Store updates the dial plan with new directions from the control
|
||||
// server.
|
||||
//
|
||||
// The dial plan can span multiple connections to the control server.
|
||||
// That is, a dial plan received when connected over Wi-Fi is still
|
||||
// valid for a subsequent connection over LTE after a network switch.
|
||||
Store(*tailcfg.ControlDialPlan)
|
||||
}
|
||||
|
||||
// Pinger is the LocalBackend.Ping method.
|
||||
@@ -215,6 +246,7 @@ func NewDirect(opts Options) (*Direct, error) {
|
||||
popBrowser: opts.PopBrowserURL,
|
||||
c2nHandler: opts.C2NHandler,
|
||||
dialer: opts.Dialer,
|
||||
dialPlan: opts.DialPlan,
|
||||
}
|
||||
if opts.Hostinfo == nil {
|
||||
c.SetHostinfo(hostinfo.New())
|
||||
@@ -226,6 +258,12 @@ func NewDirect(opts Options) (*Direct, error) {
|
||||
c.SetNetInfo(ni)
|
||||
}
|
||||
}
|
||||
if opts.NoiseTestClient != nil {
|
||||
c.noiseClient = &noiseClient{
|
||||
Client: opts.NoiseTestClient,
|
||||
}
|
||||
c.serverNoiseKey = key.NewMachine().Public() // prevent early error before hitting test client
|
||||
}
|
||||
return c, nil
|
||||
}
|
||||
|
||||
@@ -908,6 +946,14 @@ func (c *Direct) sendMapRequest(ctx context.Context, maxPolls int, readOnly bool
|
||||
} else {
|
||||
vlogf("netmap: got new map")
|
||||
}
|
||||
if resp.ControlDialPlan != nil {
|
||||
if c.dialPlan != nil {
|
||||
c.logf("netmap: got new dial plan from control")
|
||||
c.dialPlan.Store(resp.ControlDialPlan)
|
||||
} else {
|
||||
c.logf("netmap: [unexpected] new dial plan; nowhere to store it")
|
||||
}
|
||||
}
|
||||
|
||||
select {
|
||||
case timeoutReset <- struct{}{}:
|
||||
@@ -1358,12 +1404,17 @@ func (c *Direct) getNoiseClient() (*noiseClient, error) {
|
||||
if nc != nil {
|
||||
return nc, nil
|
||||
}
|
||||
var dp func() *tailcfg.ControlDialPlan
|
||||
if c.dialPlan != nil {
|
||||
dp = c.dialPlan.Load
|
||||
}
|
||||
nc, err, _ := c.sfGroup.Do(struct{}{}, func() (*noiseClient, error) {
|
||||
k, err := c.getMachinePrivKey()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
nc, err := newNoiseClient(k, serverNoiseKey, c.serverURL, c.dialer)
|
||||
c.logf("creating new noise client")
|
||||
nc, err := newNoiseClient(k, serverNoiseKey, c.serverURL, c.dialer, dp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -1383,15 +1434,11 @@ func (c *Direct) getNoiseClient() (*noiseClient, error) {
|
||||
func (c *Direct) setDNSNoise(ctx context.Context, req *tailcfg.SetDNSRequest) error {
|
||||
newReq := *req
|
||||
newReq.Version = tailcfg.CurrentCapabilityVersion
|
||||
np, err := c.getNoiseClient()
|
||||
nc, err := c.getNoiseClient()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
bodyData, err := json.Marshal(newReq)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
res, err := np.Post(fmt.Sprintf("https://%v/%v", np.host, "machine/set-dns"), "application/json", bytes.NewReader(bodyData))
|
||||
res, err := nc.post(ctx, "/machine/set-dns", req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -1539,6 +1586,38 @@ func postPingResult(start time.Time, logf logger.Logf, c *http.Client, pr *tailc
|
||||
return nil
|
||||
}
|
||||
|
||||
// ReportHealthChange reports to the control plane a change to this node's
|
||||
// health.
|
||||
func (c *Direct) ReportHealthChange(sys health.Subsystem, sysErr error) {
|
||||
if sys == health.SysOverall {
|
||||
// We don't report these. These include things like the network is down
|
||||
// (in which case we can't report anyway) or the user wanted things
|
||||
// stopped, as opposed to the more unexpected failure types in the other
|
||||
// subsystems.
|
||||
return
|
||||
}
|
||||
np, err := c.getNoiseClient()
|
||||
if err != nil {
|
||||
// Don't report errors to control if the server doesn't support noise.
|
||||
return
|
||||
}
|
||||
req := &tailcfg.HealthChangeRequest{
|
||||
Subsys: string(sys),
|
||||
}
|
||||
if sysErr != nil {
|
||||
req.Error = sysErr.Error()
|
||||
}
|
||||
|
||||
// Best effort, no logging:
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
defer cancel()
|
||||
res, err := np.post(ctx, "/machine/update-health", req)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
res.Body.Close()
|
||||
}
|
||||
|
||||
var (
|
||||
metricMapRequestsActive = clientmetric.NewGauge("controlclient_map_requests_active")
|
||||
|
||||
|
||||
@@ -48,6 +48,7 @@ type mapSession struct {
|
||||
lastHealth []string
|
||||
lastPopBrowserURL string
|
||||
stickyDebug tailcfg.Debug // accumulated opt.Bool values
|
||||
lastTKAInfo *tailcfg.TKAInfo
|
||||
|
||||
// netMapBuilding is non-nil during a netmapForResponse call,
|
||||
// containing the value to be returned, once fully populated.
|
||||
@@ -115,6 +116,9 @@ func (ms *mapSession) netmapForResponse(resp *tailcfg.MapResponse) *netmap.Netwo
|
||||
if resp.Health != nil {
|
||||
ms.lastHealth = resp.Health
|
||||
}
|
||||
if resp.TKAInfo != nil {
|
||||
ms.lastTKAInfo = resp.TKAInfo
|
||||
}
|
||||
|
||||
debug := resp.Debug
|
||||
if debug != nil {
|
||||
@@ -152,9 +156,17 @@ func (ms *mapSession) netmapForResponse(resp *tailcfg.MapResponse) *netmap.Netwo
|
||||
DERPMap: ms.lastDERPMap,
|
||||
Debug: debug,
|
||||
ControlHealth: ms.lastHealth,
|
||||
TKAEnabled: ms.lastTKAInfo != nil && !ms.lastTKAInfo.Disabled,
|
||||
}
|
||||
ms.netMapBuilding = nm
|
||||
|
||||
if ms.lastTKAInfo != nil && ms.lastTKAInfo.Head != "" {
|
||||
if err := nm.TKAHead.UnmarshalText([]byte(ms.lastTKAInfo.Head)); err != nil {
|
||||
ms.logf("error unmarshalling TKAHead: %v", err)
|
||||
nm.TKAEnabled = false
|
||||
}
|
||||
}
|
||||
|
||||
if resp.Node != nil {
|
||||
ms.lastNode = resp.Node
|
||||
}
|
||||
|
||||
@@ -5,8 +5,10 @@
|
||||
package controlclient
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"encoding/json"
|
||||
"math"
|
||||
"net"
|
||||
"net/http"
|
||||
@@ -53,6 +55,11 @@ type noiseClient struct {
|
||||
httpPort string // the default port to call
|
||||
httpsPort string // the fallback Noise-over-https port
|
||||
|
||||
// dialPlan optionally returns a ControlDialPlan previously received
|
||||
// from the control server; either the function or the return value can
|
||||
// be nil.
|
||||
dialPlan func() *tailcfg.ControlDialPlan
|
||||
|
||||
// mu only protects the following variables.
|
||||
mu sync.Mutex
|
||||
nextID int
|
||||
@@ -61,7 +68,9 @@ type noiseClient struct {
|
||||
|
||||
// newNoiseClient returns a new noiseClient for the provided server and machine key.
|
||||
// serverURL is of the form https://<host>:<port> (no trailing slash).
|
||||
func newNoiseClient(priKey key.MachinePrivate, serverPubKey key.MachinePublic, serverURL string, dialer *tsdial.Dialer) (*noiseClient, error) {
|
||||
//
|
||||
// dialPlan may be nil
|
||||
func newNoiseClient(priKey key.MachinePrivate, serverPubKey key.MachinePublic, serverURL string, dialer *tsdial.Dialer, dialPlan func() *tailcfg.ControlDialPlan) (*noiseClient, error) {
|
||||
u, err := url.Parse(serverURL)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -89,6 +98,7 @@ func newNoiseClient(priKey key.MachinePrivate, serverPubKey key.MachinePublic, s
|
||||
httpPort: httpPort,
|
||||
httpsPort: httpsPort,
|
||||
dialer: dialer,
|
||||
dialPlan: dialPlan,
|
||||
}
|
||||
|
||||
// Create the HTTP/2 Transport using a net/http.Transport
|
||||
@@ -155,16 +165,51 @@ func (nc *noiseClient) dial(_, _ string, _ *tls.Config) (net.Conn, error) {
|
||||
nc.nextID++
|
||||
nc.mu.Unlock()
|
||||
|
||||
// Timeout is a little arbitrary, but plenty long enough for even the
|
||||
// highest latency links.
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
if tailcfg.CurrentCapabilityVersion > math.MaxUint16 {
|
||||
// Panic, because a test should have started failing several
|
||||
// thousand version numbers before getting to this point.
|
||||
panic("capability version is too high to fit in the wire protocol")
|
||||
}
|
||||
|
||||
var dialPlan *tailcfg.ControlDialPlan
|
||||
if nc.dialPlan != nil {
|
||||
dialPlan = nc.dialPlan()
|
||||
}
|
||||
|
||||
// If we have a dial plan, then set our timeout as slightly longer than
|
||||
// the maximum amount of time contained therein; we assume that
|
||||
// explicit instructions on timeouts are more useful than a single
|
||||
// hard-coded timeout.
|
||||
//
|
||||
// The default value of 5 is chosen so that, when there's no dial plan,
|
||||
// we retain the previous behaviour of 10 seconds end-to-end timeout.
|
||||
timeoutSec := 5.0
|
||||
if dialPlan != nil {
|
||||
for _, c := range dialPlan.Candidates {
|
||||
if v := c.DialStartDelaySec + c.DialTimeoutSec; v > timeoutSec {
|
||||
timeoutSec = v
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// After we establish a connection, we need some time to actually
|
||||
// upgrade it into a Noise connection. With a ballpark worst-case RTT
|
||||
// of 1000ms, give ourselves an extra 5 seconds to complete the
|
||||
// handshake.
|
||||
timeoutSec += 5
|
||||
|
||||
// Be extremely defensive and ensure that the timeout is in the range
|
||||
// [5, 60] seconds (e.g. if we accidentally get a negative number).
|
||||
if timeoutSec > 60 {
|
||||
timeoutSec = 60
|
||||
} else if timeoutSec < 5 {
|
||||
timeoutSec = 5
|
||||
}
|
||||
|
||||
timeout := time.Duration(timeoutSec * float64(time.Second))
|
||||
ctx, cancel := context.WithTimeout(context.Background(), timeout)
|
||||
defer cancel()
|
||||
|
||||
conn, err := (&controlhttp.Dialer{
|
||||
Hostname: nc.host,
|
||||
HTTPPort: nc.httpPort,
|
||||
@@ -173,6 +218,7 @@ func (nc *noiseClient) dial(_, _ string, _ *tls.Config) (net.Conn, error) {
|
||||
ControlKey: nc.serverPubKey,
|
||||
ProtocolVersion: uint16(tailcfg.CurrentCapabilityVersion),
|
||||
Dialer: nc.dialer.SystemDial,
|
||||
DialPlan: dialPlan,
|
||||
}).Dial(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -184,3 +230,16 @@ func (nc *noiseClient) dial(_, _ string, _ *tls.Config) (net.Conn, error) {
|
||||
mak.Set(&nc.connPool, ncc.id, ncc)
|
||||
return ncc, nil
|
||||
}
|
||||
|
||||
func (nc *noiseClient) post(ctx context.Context, path string, body any) (*http.Response, error) {
|
||||
jbody, err := json.Marshal(body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req, err := http.NewRequestWithContext(ctx, "POST", "https://"+nc.host+path, bytes.NewReader(jbody))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
return nc.Do(req)
|
||||
}
|
||||
|
||||
@@ -28,18 +28,25 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/http/httptrace"
|
||||
"net/netip"
|
||||
"net/url"
|
||||
"sort"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"tailscale.com/control/controlbase"
|
||||
"tailscale.com/envknob"
|
||||
"tailscale.com/net/dnscache"
|
||||
"tailscale.com/net/dnsfallback"
|
||||
"tailscale.com/net/netutil"
|
||||
"tailscale.com/net/tlsdial"
|
||||
"tailscale.com/net/tshttpproxy"
|
||||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/util/multierr"
|
||||
)
|
||||
|
||||
var stdDialer net.Dialer
|
||||
@@ -82,7 +89,170 @@ func (a *Dialer) httpsFallbackDelay() time.Duration {
|
||||
return 500 * time.Millisecond
|
||||
}
|
||||
|
||||
var _ = envknob.RegisterBool("TS_USE_CONTROL_DIAL_PLAN") // to record at init time whether it's in use
|
||||
|
||||
func (a *Dialer) dial(ctx context.Context) (*controlbase.Conn, error) {
|
||||
// If we don't have a dial plan, just fall back to dialing the single
|
||||
// host we know about.
|
||||
useDialPlan := envknob.BoolDefaultTrue("TS_USE_CONTROL_DIAL_PLAN")
|
||||
if !useDialPlan || a.DialPlan == nil || len(a.DialPlan.Candidates) == 0 {
|
||||
return a.dialHost(ctx, netip.Addr{})
|
||||
}
|
||||
candidates := a.DialPlan.Candidates
|
||||
|
||||
// Otherwise, we try dialing per the plan. Store the highest priority
|
||||
// in the list, so that if we get a connection to one of those
|
||||
// candidates we can return quickly.
|
||||
var highestPriority int = math.MinInt
|
||||
for _, c := range candidates {
|
||||
if c.Priority > highestPriority {
|
||||
highestPriority = c.Priority
|
||||
}
|
||||
}
|
||||
|
||||
// This context allows us to cancel in-flight connections if we get a
|
||||
// highest-priority connection before we're all done.
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
|
||||
// Now, for each candidate, kick off a dial in parallel.
|
||||
type dialResult struct {
|
||||
conn *controlbase.Conn
|
||||
err error
|
||||
addr netip.Addr
|
||||
priority int
|
||||
}
|
||||
resultsCh := make(chan dialResult, len(candidates))
|
||||
|
||||
var pending atomic.Int32
|
||||
pending.Store(int32(len(candidates)))
|
||||
for _, c := range candidates {
|
||||
go func(ctx context.Context, c tailcfg.ControlIPCandidate) {
|
||||
var (
|
||||
conn *controlbase.Conn
|
||||
err error
|
||||
)
|
||||
|
||||
// Always send results back to our channel.
|
||||
defer func() {
|
||||
resultsCh <- dialResult{conn, err, c.IP, c.Priority}
|
||||
if pending.Add(-1) == 0 {
|
||||
close(resultsCh)
|
||||
}
|
||||
}()
|
||||
|
||||
// If non-zero, wait the configured start timeout
|
||||
// before we do anything.
|
||||
if c.DialStartDelaySec > 0 {
|
||||
a.logf("[v2] controlhttp: waiting %.2f seconds before dialing %q @ %v", c.DialStartDelaySec, a.Hostname, c.IP)
|
||||
tmr := time.NewTimer(time.Duration(c.DialStartDelaySec * float64(time.Second)))
|
||||
defer tmr.Stop()
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
err = ctx.Err()
|
||||
return
|
||||
case <-tmr.C:
|
||||
}
|
||||
}
|
||||
|
||||
// Now, create a sub-context with the given timeout and
|
||||
// try dialing the provided host.
|
||||
ctx, cancel := context.WithTimeout(ctx, time.Duration(c.DialTimeoutSec*float64(time.Second)))
|
||||
defer cancel()
|
||||
|
||||
// This will dial, and the defer above sends it back to our parent.
|
||||
a.logf("[v2] controlhttp: trying to dial %q @ %v", a.Hostname, c.IP)
|
||||
conn, err = a.dialHost(ctx, c.IP)
|
||||
}(ctx, c)
|
||||
}
|
||||
|
||||
var results []dialResult
|
||||
for res := range resultsCh {
|
||||
// If we get a response that has the highest priority, we don't
|
||||
// need to wait for any of the other connections to finish; we
|
||||
// can just return this connection.
|
||||
//
|
||||
// TODO(andrew): we could make this better by keeping track of
|
||||
// the highest remaining priority dynamically, instead of just
|
||||
// checking for the highest total
|
||||
if res.priority == highestPriority && res.conn != nil {
|
||||
a.logf("[v1] controlhttp: high-priority success dialing %q @ %v from dial plan", a.Hostname, res.addr)
|
||||
|
||||
// Drain the channel and any existing connections in
|
||||
// the background.
|
||||
go func() {
|
||||
for _, res := range results {
|
||||
if res.conn != nil {
|
||||
res.conn.Close()
|
||||
}
|
||||
}
|
||||
for res := range resultsCh {
|
||||
if res.conn != nil {
|
||||
res.conn.Close()
|
||||
}
|
||||
}
|
||||
if a.drainFinished != nil {
|
||||
close(a.drainFinished)
|
||||
}
|
||||
}()
|
||||
return res.conn, nil
|
||||
}
|
||||
|
||||
// This isn't a highest-priority result, so just store it until
|
||||
// we're done.
|
||||
results = append(results, res)
|
||||
}
|
||||
|
||||
// After we finish this function, close any remaining open connections.
|
||||
defer func() {
|
||||
for _, result := range results {
|
||||
// Note: below, we nil out the returned connection (if
|
||||
// any) in the slice so we don't close it.
|
||||
if result.conn != nil {
|
||||
result.conn.Close()
|
||||
}
|
||||
}
|
||||
|
||||
// We don't drain asynchronously after this point, so notify our
|
||||
// channel when we return.
|
||||
if a.drainFinished != nil {
|
||||
close(a.drainFinished)
|
||||
}
|
||||
}()
|
||||
|
||||
// Sort by priority, then take the first non-error response.
|
||||
sort.Slice(results, func(i, j int) bool {
|
||||
// NOTE: intentionally inverted so that the highest priority
|
||||
// item comes first
|
||||
return results[i].priority > results[j].priority
|
||||
})
|
||||
|
||||
var (
|
||||
conn *controlbase.Conn
|
||||
errs []error
|
||||
)
|
||||
for i, result := range results {
|
||||
if result.err != nil {
|
||||
errs = append(errs, result.err)
|
||||
continue
|
||||
}
|
||||
|
||||
a.logf("[v1] controlhttp: succeeded dialing %q @ %v from dial plan", a.Hostname, result.addr)
|
||||
conn = result.conn
|
||||
results[i].conn = nil // so we don't close it in the defer
|
||||
return conn, nil
|
||||
}
|
||||
merr := multierr.New(errs...)
|
||||
|
||||
// If we get here, then we didn't get anywhere with our dial plan; fall back to just using DNS.
|
||||
a.logf("controlhttp: failed dialing using DialPlan, falling back to DNS; errs=%s", merr.Error())
|
||||
return a.dialHost(ctx, netip.Addr{})
|
||||
}
|
||||
|
||||
// dialHost connects to the configured Dialer.Hostname and upgrades the
|
||||
// connection into a controlbase.Conn. If addr is valid, then no DNS is used
|
||||
// and the connection will be made to the provided address.
|
||||
func (a *Dialer) dialHost(ctx context.Context, addr netip.Addr) (*controlbase.Conn, error) {
|
||||
// Create one shared context used by both port 80 and port 443 dials.
|
||||
// If port 80 is still in flight when 443 returns, this deferred cancel
|
||||
// will stop the port 80 dial.
|
||||
@@ -110,7 +280,7 @@ func (a *Dialer) dial(ctx context.Context) (*controlbase.Conn, error) {
|
||||
}
|
||||
ch := make(chan tryURLRes) // must be unbuffered
|
||||
try := func(u *url.URL) {
|
||||
cbConn, err := a.dialURL(ctx, u)
|
||||
cbConn, err := a.dialURL(ctx, u, addr)
|
||||
select {
|
||||
case ch <- tryURLRes{u, cbConn, err}:
|
||||
case <-ctx.Done():
|
||||
@@ -161,12 +331,12 @@ func (a *Dialer) dial(ctx context.Context) (*controlbase.Conn, error) {
|
||||
}
|
||||
|
||||
// dialURL attempts to connect to the given URL.
|
||||
func (a *Dialer) dialURL(ctx context.Context, u *url.URL) (*controlbase.Conn, error) {
|
||||
func (a *Dialer) dialURL(ctx context.Context, u *url.URL, addr netip.Addr) (*controlbase.Conn, error) {
|
||||
init, cont, err := controlbase.ClientDeferred(a.MachineKey, a.ControlKey, a.ProtocolVersion)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
netConn, err := a.tryURLUpgrade(ctx, u, init)
|
||||
netConn, err := a.tryURLUpgrade(ctx, u, addr, init)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -178,14 +348,27 @@ func (a *Dialer) dialURL(ctx context.Context, u *url.URL) (*controlbase.Conn, er
|
||||
return cbConn, nil
|
||||
}
|
||||
|
||||
// tryURLUpgrade connects to u, and tries to upgrade it to a net.Conn.
|
||||
// tryURLUpgrade connects to u, and tries to upgrade it to a net.Conn. If addr
|
||||
// is valid, then no DNS is used and the connection will be made to the
|
||||
// provided address.
|
||||
//
|
||||
// Only the provided ctx is used, not a.ctx.
|
||||
func (a *Dialer) tryURLUpgrade(ctx context.Context, u *url.URL, init []byte) (net.Conn, error) {
|
||||
dns := &dnscache.Resolver{
|
||||
Forward: dnscache.Get().Forward,
|
||||
LookupIPFallback: dnsfallback.Lookup,
|
||||
UseLastGood: true,
|
||||
func (a *Dialer) tryURLUpgrade(ctx context.Context, u *url.URL, addr netip.Addr, init []byte) (net.Conn, error) {
|
||||
var dns *dnscache.Resolver
|
||||
|
||||
// If we were provided an address to dial, then create a resolver that just
|
||||
// returns that value; otherwise, fall back to DNS.
|
||||
if addr.IsValid() {
|
||||
dns = &dnscache.Resolver{
|
||||
SingleHostStaticResult: []netip.Addr{addr},
|
||||
SingleHost: u.Hostname(),
|
||||
}
|
||||
} else {
|
||||
dns = &dnscache.Resolver{
|
||||
Forward: dnscache.Get().Forward,
|
||||
LookupIPFallback: dnsfallback.Lookup,
|
||||
UseLastGood: true,
|
||||
}
|
||||
}
|
||||
|
||||
var dialer dnscache.DialContextFunc
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
"time"
|
||||
|
||||
"tailscale.com/net/dnscache"
|
||||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/types/key"
|
||||
"tailscale.com/types/logger"
|
||||
)
|
||||
@@ -70,9 +71,15 @@ type Dialer struct {
|
||||
// dropped.
|
||||
Logf logger.Logf
|
||||
|
||||
// DialPlan, if set, contains instructions from the control server on
|
||||
// how to connect to it. If present, we will try the methods in this
|
||||
// plan before falling back to DNS.
|
||||
DialPlan *tailcfg.ControlDialPlan
|
||||
|
||||
proxyFunc func(*http.Request) (*url.URL, error) // or nil
|
||||
|
||||
// For tests only
|
||||
drainFinished chan struct{}
|
||||
insecureTLS bool
|
||||
testFallbackDelay time.Duration
|
||||
}
|
||||
|
||||
@@ -13,16 +13,21 @@ import (
|
||||
"net"
|
||||
"net/http"
|
||||
"net/http/httputil"
|
||||
"net/netip"
|
||||
"net/url"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"tailscale.com/control/controlbase"
|
||||
"tailscale.com/net/dnscache"
|
||||
"tailscale.com/net/socks5"
|
||||
"tailscale.com/net/tsdial"
|
||||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/types/key"
|
||||
"tailscale.com/types/logger"
|
||||
)
|
||||
|
||||
type httpTestParam struct {
|
||||
@@ -444,3 +449,263 @@ func brokenMITMHandler(w http.ResponseWriter, r *http.Request) {
|
||||
w.(http.Flusher).Flush()
|
||||
<-r.Context().Done()
|
||||
}
|
||||
|
||||
func TestDialPlan(t *testing.T) {
|
||||
if runtime.GOOS != "linux" {
|
||||
t.Skip("only works on Linux due to multiple localhost addresses")
|
||||
}
|
||||
|
||||
client, server := key.NewMachine(), key.NewMachine()
|
||||
|
||||
const (
|
||||
testProtocolVersion = 1
|
||||
|
||||
// We need consistent ports for each address; these are chosen
|
||||
// randomly and we hope that they won't conflict during this test.
|
||||
httpPort = "40080"
|
||||
httpsPort = "40443"
|
||||
)
|
||||
|
||||
makeHandler := func(t *testing.T, name string, host netip.Addr, wrap func(http.Handler) http.Handler) {
|
||||
done := make(chan struct{})
|
||||
t.Cleanup(func() {
|
||||
close(done)
|
||||
})
|
||||
var handler http.Handler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
conn, err := AcceptHTTP(context.Background(), w, r, server)
|
||||
if err != nil {
|
||||
log.Print(err)
|
||||
} else {
|
||||
defer conn.Close()
|
||||
}
|
||||
w.Header().Set("X-Handler-Name", name)
|
||||
<-done
|
||||
})
|
||||
if wrap != nil {
|
||||
handler = wrap(handler)
|
||||
}
|
||||
|
||||
httpLn, err := net.Listen("tcp", host.String()+":"+httpPort)
|
||||
if err != nil {
|
||||
t.Fatalf("HTTP listen: %v", err)
|
||||
}
|
||||
httpsLn, err := net.Listen("tcp", host.String()+":"+httpsPort)
|
||||
if err != nil {
|
||||
t.Fatalf("HTTPS listen: %v", err)
|
||||
}
|
||||
|
||||
httpServer := &http.Server{Handler: handler}
|
||||
go httpServer.Serve(httpLn)
|
||||
t.Cleanup(func() {
|
||||
httpServer.Close()
|
||||
})
|
||||
|
||||
httpsServer := &http.Server{
|
||||
Handler: handler,
|
||||
TLSConfig: tlsConfig(t),
|
||||
ErrorLog: logger.StdLogger(logger.WithPrefix(t.Logf, "http.Server.ErrorLog: ")),
|
||||
}
|
||||
go httpsServer.ServeTLS(httpsLn, "", "")
|
||||
t.Cleanup(func() {
|
||||
httpsServer.Close()
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
fallbackAddr := netip.MustParseAddr("127.0.0.1")
|
||||
goodAddr := netip.MustParseAddr("127.0.0.2")
|
||||
otherAddr := netip.MustParseAddr("127.0.0.3")
|
||||
other2Addr := netip.MustParseAddr("127.0.0.4")
|
||||
brokenAddr := netip.MustParseAddr("127.0.0.10")
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
plan *tailcfg.ControlDialPlan
|
||||
wrap func(http.Handler) http.Handler
|
||||
want netip.Addr
|
||||
|
||||
allowFallback bool
|
||||
}{
|
||||
{
|
||||
name: "single",
|
||||
plan: &tailcfg.ControlDialPlan{Candidates: []tailcfg.ControlIPCandidate{
|
||||
{IP: goodAddr, Priority: 1, DialTimeoutSec: 10},
|
||||
}},
|
||||
want: goodAddr,
|
||||
},
|
||||
{
|
||||
name: "broken-then-good",
|
||||
plan: &tailcfg.ControlDialPlan{Candidates: []tailcfg.ControlIPCandidate{
|
||||
// Dials the broken one, which fails, and then
|
||||
// eventually dials the good one and succeeds
|
||||
{IP: brokenAddr, Priority: 2, DialTimeoutSec: 10},
|
||||
{IP: goodAddr, Priority: 1, DialTimeoutSec: 10, DialStartDelaySec: 1},
|
||||
}},
|
||||
want: goodAddr,
|
||||
},
|
||||
{
|
||||
name: "multiple-priority-fast-path",
|
||||
plan: &tailcfg.ControlDialPlan{Candidates: []tailcfg.ControlIPCandidate{
|
||||
// Dials some good IPs and our bad one (which
|
||||
// hangs forever), which then hits the fast
|
||||
// path where we bail without waiting.
|
||||
{IP: brokenAddr, Priority: 1, DialTimeoutSec: 10},
|
||||
{IP: goodAddr, Priority: 1, DialTimeoutSec: 10},
|
||||
{IP: other2Addr, Priority: 1, DialTimeoutSec: 10},
|
||||
{IP: otherAddr, Priority: 2, DialTimeoutSec: 10},
|
||||
}},
|
||||
want: otherAddr,
|
||||
},
|
||||
{
|
||||
name: "multiple-priority-slow-path",
|
||||
plan: &tailcfg.ControlDialPlan{Candidates: []tailcfg.ControlIPCandidate{
|
||||
// Our broken address is the highest priority,
|
||||
// so we don't hit our fast path.
|
||||
{IP: brokenAddr, Priority: 10, DialTimeoutSec: 10},
|
||||
{IP: otherAddr, Priority: 2, DialTimeoutSec: 10},
|
||||
{IP: goodAddr, Priority: 1, DialTimeoutSec: 10},
|
||||
}},
|
||||
want: otherAddr,
|
||||
},
|
||||
{
|
||||
name: "fallback",
|
||||
plan: &tailcfg.ControlDialPlan{Candidates: []tailcfg.ControlIPCandidate{
|
||||
{IP: brokenAddr, Priority: 1, DialTimeoutSec: 1},
|
||||
}},
|
||||
want: fallbackAddr,
|
||||
allowFallback: true,
|
||||
},
|
||||
}
|
||||
for _, tt := range testCases {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
makeHandler(t, "fallback", fallbackAddr, nil)
|
||||
makeHandler(t, "good", goodAddr, nil)
|
||||
makeHandler(t, "other", otherAddr, nil)
|
||||
makeHandler(t, "other2", other2Addr, nil)
|
||||
makeHandler(t, "broken", brokenAddr, func(h http.Handler) http.Handler {
|
||||
return http.HandlerFunc(brokenMITMHandler)
|
||||
})
|
||||
|
||||
dialer := closeTrackDialer{
|
||||
t: t,
|
||||
inner: new(tsdial.Dialer).SystemDial,
|
||||
conns: make(map[*closeTrackConn]bool),
|
||||
}
|
||||
defer dialer.Done()
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
// By default, we intentionally point to something that
|
||||
// we know won't connect, since we want a fallback to
|
||||
// DNS to be an error.
|
||||
host := "example.com"
|
||||
if tt.allowFallback {
|
||||
host = "localhost"
|
||||
}
|
||||
|
||||
drained := make(chan struct{})
|
||||
a := &Dialer{
|
||||
Hostname: host,
|
||||
HTTPPort: httpPort,
|
||||
HTTPSPort: httpsPort,
|
||||
MachineKey: client,
|
||||
ControlKey: server.Public(),
|
||||
ProtocolVersion: testProtocolVersion,
|
||||
Dialer: dialer.Dial,
|
||||
Logf: t.Logf,
|
||||
DialPlan: tt.plan,
|
||||
proxyFunc: func(*http.Request) (*url.URL, error) { return nil, nil },
|
||||
drainFinished: drained,
|
||||
insecureTLS: true,
|
||||
testFallbackDelay: 50 * time.Millisecond,
|
||||
}
|
||||
|
||||
conn, err := a.dial(ctx)
|
||||
if err != nil {
|
||||
t.Fatalf("dialing controlhttp: %v", err)
|
||||
}
|
||||
defer conn.Close()
|
||||
|
||||
raddr := conn.RemoteAddr().(*net.TCPAddr)
|
||||
|
||||
got, ok := netip.AddrFromSlice(raddr.IP)
|
||||
if !ok {
|
||||
t.Errorf("invalid remote IP: %v", raddr.IP)
|
||||
} else if got != tt.want {
|
||||
t.Errorf("got connection from %q; want %q", got, tt.want)
|
||||
} else {
|
||||
t.Logf("successfully connected to %q", raddr.String())
|
||||
}
|
||||
|
||||
// Wait until our dialer drains so we can verify that
|
||||
// all connections are closed.
|
||||
<-drained
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
type closeTrackDialer struct {
|
||||
t testing.TB
|
||||
inner dnscache.DialContextFunc
|
||||
mu sync.Mutex
|
||||
conns map[*closeTrackConn]bool
|
||||
}
|
||||
|
||||
func (d *closeTrackDialer) Dial(ctx context.Context, network, addr string) (net.Conn, error) {
|
||||
c, err := d.inner(ctx, network, addr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ct := &closeTrackConn{Conn: c, d: d}
|
||||
|
||||
d.mu.Lock()
|
||||
d.conns[ct] = true
|
||||
d.mu.Unlock()
|
||||
return ct, nil
|
||||
}
|
||||
|
||||
func (d *closeTrackDialer) Done() {
|
||||
// Unfortunately, tsdial.Dialer.SystemDial closes connections
|
||||
// asynchronously in a goroutine, so we can't assume that everything is
|
||||
// closed by the time we get here.
|
||||
//
|
||||
// Sleep/wait a few times on the assumption that things will close
|
||||
// "eventually".
|
||||
const iters = 100
|
||||
for i := 0; i < iters; i++ {
|
||||
d.mu.Lock()
|
||||
if len(d.conns) == 0 {
|
||||
d.mu.Unlock()
|
||||
return
|
||||
}
|
||||
|
||||
// Only error on last iteration
|
||||
if i != iters-1 {
|
||||
d.mu.Unlock()
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
continue
|
||||
}
|
||||
|
||||
for conn := range d.conns {
|
||||
d.t.Errorf("expected close of conn %p; RemoteAddr=%q", conn, conn.RemoteAddr().String())
|
||||
}
|
||||
d.mu.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
func (d *closeTrackDialer) noteClose(c *closeTrackConn) {
|
||||
d.mu.Lock()
|
||||
delete(d.conns, c) // safe if already deleted
|
||||
d.mu.Unlock()
|
||||
}
|
||||
|
||||
type closeTrackConn struct {
|
||||
net.Conn
|
||||
d *closeTrackDialer
|
||||
}
|
||||
|
||||
func (c *closeTrackConn) Close() error {
|
||||
c.d.noteClose(c)
|
||||
return c.Conn.Close()
|
||||
}
|
||||
|
||||
@@ -37,7 +37,7 @@ spec:
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: tailscale-auth
|
||||
key: AUTH_KEY
|
||||
key: TS_AUTH_KEY
|
||||
optional: true
|
||||
- name: TS_DEST_IP
|
||||
value: "{{TS_DEST_IP}}"
|
||||
|
||||
@@ -17,10 +17,11 @@ TS_KUBE_SECRET="${TS_KUBE_SECRET:-tailscale}"
|
||||
TS_SOCKS5_SERVER="${TS_SOCKS5_SERVER:-}"
|
||||
TS_OUTBOUND_HTTP_PROXY_LISTEN="${TS_OUTBOUND_HTTP_PROXY_LISTEN:-}"
|
||||
TS_TAILSCALED_EXTRA_ARGS="${TS_TAILSCALED_EXTRA_ARGS:-}"
|
||||
TS_SOCKET="${TS_SOCKET:-/tmp/tailscaled.sock}"
|
||||
|
||||
set -e
|
||||
|
||||
TAILSCALED_ARGS="--socket=/tmp/tailscaled.sock"
|
||||
TAILSCALED_ARGS="--socket=${TS_SOCKET}"
|
||||
|
||||
if [[ ! -z "${KUBERNETES_SERVICE_HOST}" ]]; then
|
||||
TAILSCALED_ARGS="${TAILSCALED_ARGS} --state=kube:${TS_KUBE_SECRET} --statedir=${TS_STATE_DIR:-/tmp}"
|
||||
@@ -81,11 +82,11 @@ if [[ ! -z "${TS_EXTRA_ARGS}" ]]; then
|
||||
fi
|
||||
|
||||
echo "Running tailscale up"
|
||||
tailscale --socket=/tmp/tailscaled.sock up ${UP_ARGS}
|
||||
tailscale --socket="${TS_SOCKET}" up ${UP_ARGS}
|
||||
|
||||
if [[ ! -z "${TS_DEST_IP}" ]]; then
|
||||
echo "Adding iptables rule for DNAT"
|
||||
iptables -t nat -I PREROUTING -d "$(tailscale --socket=/tmp/tailscaled.sock ip -4)" -j DNAT --to-destination "${TS_DEST_IP}"
|
||||
iptables -t nat -I PREROUTING -d "$(tailscale --socket=${TS_SOCKET} ip -4)" -j DNAT --to-destination "${TS_DEST_IP}"
|
||||
fi
|
||||
|
||||
echo "Waiting for tailscaled to exit"
|
||||
|
||||
@@ -23,7 +23,7 @@ spec:
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: tailscale-auth
|
||||
key: AUTH_KEY
|
||||
key: TS_AUTH_KEY
|
||||
optional: true
|
||||
- name: TS_ROUTES
|
||||
value: "{{TS_ROUTES}}"
|
||||
|
||||
@@ -26,5 +26,5 @@ spec:
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: tailscale-auth
|
||||
key: AUTH_KEY
|
||||
key: TS_AUTH_KEY
|
||||
optional: true
|
||||
|
||||
@@ -189,6 +189,10 @@ type LocalBackend struct {
|
||||
// statusChanged.Broadcast().
|
||||
statusLock sync.Mutex
|
||||
statusChanged *sync.Cond
|
||||
|
||||
// dialPlan is any dial plan that we've received from the control
|
||||
// server during a previous connection; it is cleared on logout.
|
||||
dialPlan atomic.Pointer[tailcfg.ControlDialPlan]
|
||||
}
|
||||
|
||||
// clientGen is a func that creates a control plane client.
|
||||
@@ -684,6 +688,9 @@ func (b *LocalBackend) setClientStatus(st controlclient.Status) {
|
||||
}
|
||||
}
|
||||
if st.NetMap != nil {
|
||||
if err := b.tkaSyncIfNeededLocked(st.NetMap); err != nil {
|
||||
b.logf("[v1] TKA sync error: %v", err)
|
||||
}
|
||||
if b.findExitNodeIDLocked(st.NetMap) {
|
||||
prefsChanged = true
|
||||
}
|
||||
@@ -1084,6 +1091,7 @@ func (b *LocalBackend) Start(opts ipn.Options) error {
|
||||
Dialer: b.Dialer(),
|
||||
Status: b.setClientStatus,
|
||||
C2NHandler: http.HandlerFunc(b.handleC2N),
|
||||
DialPlan: &b.dialPlan, // pointer because it can't be copied
|
||||
|
||||
// Don't warn about broken Linux IP forwarding when
|
||||
// netstack is being used.
|
||||
@@ -3109,6 +3117,9 @@ func (b *LocalBackend) logout(ctx context.Context, sync bool) error {
|
||||
Prefs: ipn.Prefs{WantRunning: false, LoggedOut: true},
|
||||
})
|
||||
|
||||
// Clear any previous dial plan(s), if set.
|
||||
b.dialPlan.Store(nil)
|
||||
|
||||
if cc == nil {
|
||||
// Double Logout can happen via repeated IPN
|
||||
// connections to ipnserver making it repeatedly
|
||||
@@ -3225,6 +3236,17 @@ func (b *LocalBackend) setNetMapLocked(nm *netmap.NetworkMap) {
|
||||
}
|
||||
}
|
||||
|
||||
// operatorUserName returns the current pref's OperatorUser's name, or the
|
||||
// empty string if none.
|
||||
func (b *LocalBackend) operatorUserName() string {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
if b.prefs == nil {
|
||||
return ""
|
||||
}
|
||||
return b.prefs.OperatorUser
|
||||
}
|
||||
|
||||
// OperatorUserID returns the current pref's OperatorUser's ID (in
|
||||
// os/user.User.Uid string form), or the empty string if none.
|
||||
func (b *LocalBackend) OperatorUserID() string {
|
||||
@@ -3593,6 +3615,17 @@ func (b *LocalBackend) DoNoiseRequest(req *http.Request) (*http.Response, error)
|
||||
return cc.DoNoiseRequest(req)
|
||||
}
|
||||
|
||||
// tailscaleSSHEnabled reports whether Tailscale SSH is currently enabled based
|
||||
// on prefs. It returns false if there are no prefs set.
|
||||
func (b *LocalBackend) tailscaleSSHEnabled() bool {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
if b.prefs == nil {
|
||||
return false
|
||||
}
|
||||
return b.prefs.RunSSH
|
||||
}
|
||||
|
||||
func (b *LocalBackend) sshServerOrInit() (_ SSHServer, err error) {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
// Copyright (c) 2021 Tailscale Inc & AUTHORS All rights reserved.
|
||||
// Copyright (c) 2022 Tailscale Inc & AUTHORS All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
@@ -12,6 +12,8 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
"tailscale.com/envknob"
|
||||
@@ -31,6 +33,118 @@ type tkaState struct {
|
||||
storage *tka.FS
|
||||
}
|
||||
|
||||
// tkaSyncIfNeededLocked examines TKA info reported from the control plane,
|
||||
// performing the steps necessary to synchronize local tka state.
|
||||
//
|
||||
// There are 4 scenarios handled here:
|
||||
// - Enablement: nm.TKAEnabled but b.tka == nil
|
||||
// ∴ reach out to /machine/tka/boostrap to get the genesis AUM, then
|
||||
// initialize TKA.
|
||||
// - Disablement: !nm.TKAEnabled but b.tka != nil
|
||||
// ∴ reach out to /machine/tka/boostrap to read the disablement secret,
|
||||
// then verify and clear tka local state.
|
||||
// - Sync needed: b.tka.Head != nm.TKAHead
|
||||
// ∴ complete multi-step synchronization flow.
|
||||
// - Everything up to date: All other cases.
|
||||
// ∴ no action necessary.
|
||||
//
|
||||
// b.mu must be held. b.mu will be stepped out of (and back in) during network
|
||||
// RPCs.
|
||||
func (b *LocalBackend) tkaSyncIfNeededLocked(nm *netmap.NetworkMap) error {
|
||||
if !networkLockAvailable() {
|
||||
// If the feature flag is not enabled, pretend we don't exist.
|
||||
return nil
|
||||
}
|
||||
if nm.SelfNode == nil {
|
||||
return errors.New("SelfNode missing")
|
||||
}
|
||||
|
||||
isEnabled := b.tka != nil
|
||||
wantEnabled := nm.TKAEnabled
|
||||
if isEnabled != wantEnabled {
|
||||
var ourHead tka.AUMHash
|
||||
if b.tka != nil {
|
||||
ourHead = b.tka.authority.Head()
|
||||
}
|
||||
|
||||
// Regardless of whether we are moving to disabled or enabled, we
|
||||
// need information from the tka bootstrap endpoint.
|
||||
b.mu.Unlock()
|
||||
bs, err := b.tkaFetchBootstrap(nm.SelfNode.ID, ourHead)
|
||||
b.mu.Lock()
|
||||
if err != nil {
|
||||
return fmt.Errorf("fetching bootstrap: %v", err)
|
||||
}
|
||||
|
||||
if wantEnabled && !isEnabled {
|
||||
if err := b.tkaBootstrapFromGenesisLocked(bs.GenesisAUM); err != nil {
|
||||
return fmt.Errorf("bootstrap: %v", err)
|
||||
}
|
||||
isEnabled = true
|
||||
} else if !wantEnabled && isEnabled {
|
||||
if b.tka.authority.ValidDisablement(bs.DisablementSecret) {
|
||||
b.tka = nil
|
||||
isEnabled = false
|
||||
|
||||
if err := os.RemoveAll(b.chonkPath()); err != nil {
|
||||
return fmt.Errorf("os.RemoveAll: %v", err)
|
||||
}
|
||||
} else {
|
||||
b.logf("Disablement secret did not verify, leaving TKA enabled.")
|
||||
}
|
||||
} else {
|
||||
return fmt.Errorf("[bug] unreachable invariant of wantEnabled /w isEnabled")
|
||||
}
|
||||
}
|
||||
|
||||
if isEnabled && b.tka.authority.Head() != nm.TKAHead {
|
||||
// TODO(tom): Implement sync
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// chonkPath returns the absolute path to the directory in which TKA
|
||||
// state (the 'tailchonk') is stored.
|
||||
func (b *LocalBackend) chonkPath() string {
|
||||
return filepath.Join(b.TailscaleVarRoot(), "tka")
|
||||
}
|
||||
|
||||
// tkaBootstrapFromGenesisLocked initializes the local (on-disk) state of the
|
||||
// tailnet key authority, based on the given genesis AUM.
|
||||
//
|
||||
// b.mu must be held.
|
||||
func (b *LocalBackend) tkaBootstrapFromGenesisLocked(g tkatype.MarshaledAUM) error {
|
||||
if !b.CanSupportNetworkLock() {
|
||||
return errors.New("network lock not supported in this configuration")
|
||||
}
|
||||
|
||||
var genesis tka.AUM
|
||||
if err := genesis.Unserialize(g); err != nil {
|
||||
return fmt.Errorf("reading genesis: %v", err)
|
||||
}
|
||||
|
||||
chonkDir := b.chonkPath()
|
||||
if err := os.Mkdir(chonkDir, 0755); err != nil && !os.IsExist(err) {
|
||||
return fmt.Errorf("mkdir: %v", err)
|
||||
}
|
||||
|
||||
chonk, err := tka.ChonkDir(chonkDir)
|
||||
if err != nil {
|
||||
return fmt.Errorf("chonk: %v", err)
|
||||
}
|
||||
authority, err := tka.Bootstrap(chonk, genesis)
|
||||
if err != nil {
|
||||
return fmt.Errorf("tka bootstrap: %v", err)
|
||||
}
|
||||
|
||||
b.tka = &tkaState{
|
||||
authority: authority,
|
||||
storage: chonk,
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// CanSupportNetworkLock returns true if tailscaled is able to operate
|
||||
// a local tailnet key authority (and hence enforce network lock).
|
||||
func (b *LocalBackend) CanSupportNetworkLock() bool {
|
||||
@@ -237,3 +351,50 @@ func (b *LocalBackend) tkaInitFinish(nm *netmap.NetworkMap, nks map[tailcfg.Node
|
||||
return a, nil
|
||||
}
|
||||
}
|
||||
|
||||
// tkaFetchBootstrap sends a /machine/tka/bootstrap RPC to the control plane
|
||||
// over noise. This is used to get values necessary to enable or disable TKA.
|
||||
func (b *LocalBackend) tkaFetchBootstrap(nodeID tailcfg.NodeID, head tka.AUMHash) (*tailcfg.TKABootstrapResponse, error) {
|
||||
bootstrapReq := tailcfg.TKABootstrapRequest{
|
||||
NodeID: nodeID,
|
||||
}
|
||||
if !head.IsZero() {
|
||||
head, err := head.MarshalText()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("head.MarshalText failed: %v", err)
|
||||
}
|
||||
bootstrapReq.Head = string(head)
|
||||
}
|
||||
|
||||
var req bytes.Buffer
|
||||
if err := json.NewEncoder(&req).Encode(bootstrapReq); err != nil {
|
||||
return nil, fmt.Errorf("encoding request: %v", err)
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), time.Minute)
|
||||
defer cancel()
|
||||
if err := ctx.Err(); err != nil {
|
||||
return nil, fmt.Errorf("ctx: %w", err)
|
||||
}
|
||||
req2, err := http.NewRequestWithContext(ctx, "GET", "https://unused/machine/tka/bootstrap", &req)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("req: %w", err)
|
||||
}
|
||||
res, err := b.DoNoiseRequest(req2)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("resp: %w", err)
|
||||
}
|
||||
if res.StatusCode != 200 {
|
||||
body, _ := io.ReadAll(res.Body)
|
||||
res.Body.Close()
|
||||
return nil, fmt.Errorf("request returned (%d): %s", res.StatusCode, string(body))
|
||||
}
|
||||
a := new(tailcfg.TKABootstrapResponse)
|
||||
err = json.NewDecoder(res.Body).Decode(a)
|
||||
res.Body.Close()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("decoding JSON: %w", err)
|
||||
}
|
||||
|
||||
return a, nil
|
||||
}
|
||||
|
||||
243
ipn/ipnlocal/network-lock_test.go
Normal file
243
ipn/ipnlocal/network-lock_test.go
Normal file
@@ -0,0 +1,243 @@
|
||||
// Copyright (c) 2022 Tailscale Inc & AUTHORS All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package ipnlocal
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"tailscale.com/control/controlclient"
|
||||
"tailscale.com/hostinfo"
|
||||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/tka"
|
||||
"tailscale.com/types/key"
|
||||
"tailscale.com/types/netmap"
|
||||
)
|
||||
|
||||
func fakeControlClient(t *testing.T, c *http.Client) *controlclient.Auto {
|
||||
hi := hostinfo.New()
|
||||
ni := tailcfg.NetInfo{LinkType: "wired"}
|
||||
hi.NetInfo = &ni
|
||||
|
||||
k := key.NewMachine()
|
||||
opts := controlclient.Options{
|
||||
ServerURL: "https://example.com",
|
||||
Hostinfo: hi,
|
||||
GetMachinePrivateKey: func() (key.MachinePrivate, error) {
|
||||
return k, nil
|
||||
},
|
||||
HTTPTestClient: c,
|
||||
NoiseTestClient: c,
|
||||
Status: func(controlclient.Status) {},
|
||||
}
|
||||
|
||||
cc, err := controlclient.NewNoStart(opts)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
return cc
|
||||
}
|
||||
|
||||
// NOTE: URLs must have a https scheme and example.com domain to work with the underlying
|
||||
// httptest plumbing, despite the domain being unused in the actual noise request transport.
|
||||
func fakeNoiseServer(t *testing.T, handler http.HandlerFunc) (*httptest.Server, *http.Client) {
|
||||
ts := httptest.NewUnstartedServer(handler)
|
||||
ts.StartTLS()
|
||||
client := ts.Client()
|
||||
client.Transport.(*http.Transport).TLSClientConfig.InsecureSkipVerify = true
|
||||
client.Transport.(*http.Transport).DialContext = func(ctx context.Context, network, addr string) (net.Conn, error) {
|
||||
return (&net.Dialer{}).DialContext(ctx, network, ts.Listener.Addr().String())
|
||||
}
|
||||
return ts, client
|
||||
}
|
||||
|
||||
func TestTKAEnablementFlow(t *testing.T) {
|
||||
networkLockAvailable = func() bool { return true } // Enable the feature flag
|
||||
|
||||
// Make a fake TKA authority, getting a usable genesis AUM which
|
||||
// our mock server can communicate.
|
||||
nlPriv := key.NewNLPrivate()
|
||||
key := tka.Key{Kind: tka.Key25519, Public: nlPriv.Public().Verifier(), Votes: 2}
|
||||
a1, genesisAUM, err := tka.Create(&tka.Mem{}, tka.State{
|
||||
Keys: []tka.Key{key},
|
||||
DisablementSecrets: [][]byte{bytes.Repeat([]byte{0xa5}, 32)},
|
||||
}, nlPriv)
|
||||
if err != nil {
|
||||
t.Fatalf("tka.Create() failed: %v", err)
|
||||
}
|
||||
|
||||
ts, client := fakeNoiseServer(t, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
defer r.Body.Close()
|
||||
switch r.URL.Path {
|
||||
case "/machine/tka/bootstrap":
|
||||
body := new(tailcfg.TKABootstrapRequest)
|
||||
if err := json.NewDecoder(r.Body).Decode(body); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if body.NodeID != 420 {
|
||||
t.Errorf("bootstrap nodeID=%v, want 420", body.NodeID)
|
||||
}
|
||||
if body.Head != "" {
|
||||
t.Errorf("bootstrap head=%s, want empty hash", body.Head)
|
||||
}
|
||||
|
||||
w.WriteHeader(200)
|
||||
out := tailcfg.TKABootstrapResponse{
|
||||
GenesisAUM: genesisAUM.Serialize(),
|
||||
}
|
||||
if err := json.NewEncoder(w).Encode(out); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
default:
|
||||
t.Errorf("unhandled endpoint path: %v", r.URL.Path)
|
||||
w.WriteHeader(404)
|
||||
}
|
||||
}))
|
||||
defer ts.Close()
|
||||
temp := t.TempDir()
|
||||
|
||||
cc := fakeControlClient(t, client)
|
||||
b := LocalBackend{
|
||||
varRoot: temp,
|
||||
cc: cc,
|
||||
ccAuto: cc,
|
||||
logf: t.Logf,
|
||||
}
|
||||
|
||||
b.mu.Lock()
|
||||
err = b.tkaSyncIfNeededLocked(&netmap.NetworkMap{
|
||||
SelfNode: &tailcfg.Node{ID: 420},
|
||||
TKAEnabled: true,
|
||||
TKAHead: tka.AUMHash{},
|
||||
})
|
||||
b.mu.Unlock()
|
||||
if err != nil {
|
||||
t.Errorf("tkaSyncIfNeededLocked() failed: %v", err)
|
||||
}
|
||||
if b.tka == nil {
|
||||
t.Fatal("tka was not initialized")
|
||||
}
|
||||
if b.tka.authority.Head() != a1.Head() {
|
||||
t.Errorf("authority.Head() = %x, want %x", b.tka.authority.Head(), a1.Head())
|
||||
}
|
||||
}
|
||||
|
||||
func TestTKADisablementFlow(t *testing.T) {
|
||||
networkLockAvailable = func() bool { return true } // Enable the feature flag
|
||||
temp := t.TempDir()
|
||||
os.Mkdir(filepath.Join(temp, "tka"), 0755)
|
||||
|
||||
// Make a fake TKA authority, to seed local state.
|
||||
disablementSecret := bytes.Repeat([]byte{0xa5}, 32)
|
||||
nlPriv := key.NewNLPrivate()
|
||||
key := tka.Key{Kind: tka.Key25519, Public: nlPriv.Public().Verifier(), Votes: 2}
|
||||
chonk, err := tka.ChonkDir(filepath.Join(temp, "tka"))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
authority, _, err := tka.Create(chonk, tka.State{
|
||||
Keys: []tka.Key{key},
|
||||
DisablementSecrets: [][]byte{tka.DisablementKDF(disablementSecret)},
|
||||
}, nlPriv)
|
||||
if err != nil {
|
||||
t.Fatalf("tka.Create() failed: %v", err)
|
||||
}
|
||||
|
||||
ts, client := fakeNoiseServer(t, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
defer r.Body.Close()
|
||||
switch r.URL.Path {
|
||||
case "/machine/tka/bootstrap":
|
||||
body := new(tailcfg.TKABootstrapRequest)
|
||||
if err := json.NewDecoder(r.Body).Decode(body); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
var disablement []byte
|
||||
switch body.NodeID {
|
||||
case 42:
|
||||
disablement = bytes.Repeat([]byte{0x42}, 32) // wrong secret
|
||||
case 420:
|
||||
disablement = disablementSecret
|
||||
default:
|
||||
t.Errorf("bootstrap nodeID=%v, wanted 42 or 420", body.NodeID)
|
||||
}
|
||||
var head tka.AUMHash
|
||||
if err := head.UnmarshalText([]byte(body.Head)); err != nil {
|
||||
t.Fatalf("failed unmarshal of body.Head: %v", err)
|
||||
}
|
||||
if head != authority.Head() {
|
||||
t.Errorf("reported head = %x, want %x", head, authority.Head())
|
||||
}
|
||||
|
||||
w.WriteHeader(200)
|
||||
out := tailcfg.TKABootstrapResponse{
|
||||
DisablementSecret: disablement,
|
||||
}
|
||||
if err := json.NewEncoder(w).Encode(out); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
default:
|
||||
t.Errorf("unhandled endpoint path: %v", r.URL.Path)
|
||||
w.WriteHeader(404)
|
||||
}
|
||||
}))
|
||||
defer ts.Close()
|
||||
|
||||
cc := fakeControlClient(t, client)
|
||||
b := LocalBackend{
|
||||
varRoot: temp,
|
||||
cc: cc,
|
||||
ccAuto: cc,
|
||||
logf: t.Logf,
|
||||
tka: &tkaState{
|
||||
authority: authority,
|
||||
storage: chonk,
|
||||
},
|
||||
}
|
||||
|
||||
// Test that the wrong disablement secret does not shut down the authority.
|
||||
// NodeID == 42 indicates this scenario to our mock server.
|
||||
b.mu.Lock()
|
||||
err = b.tkaSyncIfNeededLocked(&netmap.NetworkMap{
|
||||
SelfNode: &tailcfg.Node{ID: 42},
|
||||
TKAEnabled: false,
|
||||
TKAHead: authority.Head(),
|
||||
})
|
||||
b.mu.Unlock()
|
||||
if err != nil {
|
||||
t.Errorf("tkaSyncIfNeededLocked() failed: %v", err)
|
||||
}
|
||||
if b.tka == nil {
|
||||
t.Error("TKA was disabled despite incorrect disablement secret")
|
||||
}
|
||||
|
||||
// Test the correct disablement secret shuts down the authority.
|
||||
// NodeID == 420 indicates this scenario to our mock server.
|
||||
b.mu.Lock()
|
||||
err = b.tkaSyncIfNeededLocked(&netmap.NetworkMap{
|
||||
SelfNode: &tailcfg.Node{ID: 420},
|
||||
TKAEnabled: false,
|
||||
TKAHead: authority.Head(),
|
||||
})
|
||||
b.mu.Unlock()
|
||||
if err != nil {
|
||||
t.Errorf("tkaSyncIfNeededLocked() failed: %v", err)
|
||||
}
|
||||
|
||||
if b.tka != nil {
|
||||
t.Fatal("tka was not shut down")
|
||||
}
|
||||
if _, err := os.Stat(b.chonkPath()); err == nil || !os.IsNotExist(err) {
|
||||
t.Errorf("os.Stat(chonkDir) = %v, want ErrNotExist", err)
|
||||
}
|
||||
}
|
||||
@@ -38,15 +38,16 @@ import (
|
||||
// running as root.
|
||||
var keyTypes = []string{"rsa", "ecdsa", "ed25519"}
|
||||
|
||||
// getSSHUsernames discovers and returns the list of usernames that are
|
||||
// potential Tailscale SSH user targets.
|
||||
//
|
||||
// Invariant: must not be called with b.mu held.
|
||||
func (b *LocalBackend) getSSHUsernames(req *tailcfg.C2NSSHUsernamesRequest) (*tailcfg.C2NSSHUsernamesResponse, error) {
|
||||
res := new(tailcfg.C2NSSHUsernamesResponse)
|
||||
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
|
||||
if b.sshServer == nil {
|
||||
if !b.tailscaleSSHEnabled() {
|
||||
return res, nil
|
||||
}
|
||||
|
||||
max := 10
|
||||
if req != nil && req.Max != 0 {
|
||||
max = req.Max
|
||||
@@ -70,8 +71,8 @@ func (b *LocalBackend) getSSHUsernames(req *tailcfg.C2NSSHUsernamesRequest) (*ta
|
||||
res.Usernames = append(res.Usernames, u)
|
||||
}
|
||||
|
||||
if b.prefs != nil && b.prefs.OperatorUser != "" {
|
||||
add(b.prefs.OperatorUser)
|
||||
if opUser := b.operatorUserName(); opUser != "" {
|
||||
add(opUser)
|
||||
}
|
||||
|
||||
// Check popular usernames and see if they exist with a real shell.
|
||||
|
||||
@@ -772,7 +772,7 @@ func New(logf logger.Logf, logid string, store ipn.StateStore, eng wgengine.Engi
|
||||
})
|
||||
|
||||
if root := b.TailscaleVarRoot(); root != "" {
|
||||
chonkDir := filepath.Join(root, "chonk")
|
||||
chonkDir := filepath.Join(root, "tka")
|
||||
if _, err := os.Stat(chonkDir); err == nil {
|
||||
// The directory exists, which means network-lock has been initialized.
|
||||
storage, err := tka.ChonkDir(chonkDir)
|
||||
|
||||
@@ -1,47 +0,0 @@
|
||||
# Tailscale for Windows dependencies
|
||||
|
||||
The following open source dependencies are used to build the [Tailscale client
|
||||
for windows][]. See also the dependencies in the [Tailscale CLI][].
|
||||
|
||||
[Tailscale client for windows]: https://tailscale.com/kb/1022/install-windows/
|
||||
[Tailscale CLI]: ./tailscale.md
|
||||
|
||||
## Go Packages
|
||||
|
||||
|
||||
- [filippo.io/edwards25519](https://pkg.go.dev/filippo.io/edwards25519) ([BSD-3-Clause](https://github.com/FiloSottile/edwards25519/blob/v1.0.0-rc.1/LICENSE))
|
||||
- [github.com/alexbrainman/sspi](https://pkg.go.dev/github.com/alexbrainman/sspi) ([BSD-3-Clause](https://github.com/alexbrainman/sspi/blob/909beea2cc74/LICENSE))
|
||||
- [github.com/apenwarr/fixconsole](https://pkg.go.dev/github.com/apenwarr/fixconsole) ([Apache-2.0](https://github.com/apenwarr/fixconsole/blob/5a9f6489cc29/LICENSE))
|
||||
- [github.com/apenwarr/w32](https://pkg.go.dev/github.com/apenwarr/w32) ([BSD-3-Clause](https://github.com/apenwarr/w32/blob/aa00fece76ab/LICENSE))
|
||||
- [github.com/fxamacker/cbor/v2](https://pkg.go.dev/github.com/fxamacker/cbor/v2) ([MIT](https://github.com/fxamacker/cbor/blob/v2.4.0/LICENSE))
|
||||
- [github.com/golang/groupcache/lru](https://pkg.go.dev/github.com/golang/groupcache/lru) ([Apache-2.0](https://github.com/golang/groupcache/blob/41bb18bfe9da/LICENSE))
|
||||
- [github.com/hdevalence/ed25519consensus](https://pkg.go.dev/github.com/hdevalence/ed25519consensus) ([BSD-3-Clause](https://github.com/hdevalence/ed25519consensus/blob/c00d1f31bab3/LICENSE))
|
||||
- [github.com/josharian/native](https://pkg.go.dev/github.com/josharian/native) ([MIT](https://github.com/josharian/native/blob/v1.0.0/license))
|
||||
- [github.com/jsimonetti/rtnetlink](https://pkg.go.dev/github.com/jsimonetti/rtnetlink) ([MIT](https://github.com/jsimonetti/rtnetlink/blob/d380b505068b/LICENSE.md))
|
||||
- [github.com/klauspost/compress](https://pkg.go.dev/github.com/klauspost/compress) ([Apache-2.0](https://github.com/klauspost/compress/blob/v1.15.5/LICENSE))
|
||||
- [github.com/klauspost/compress/internal/snapref](https://pkg.go.dev/github.com/klauspost/compress/internal/snapref) ([BSD-3-Clause](https://github.com/klauspost/compress/blob/v1.15.5/internal/snapref/LICENSE))
|
||||
- [github.com/klauspost/compress/zstd/internal/xxhash](https://pkg.go.dev/github.com/klauspost/compress/zstd/internal/xxhash) ([MIT](https://github.com/klauspost/compress/blob/v1.15.5/zstd/internal/xxhash/LICENSE.txt))
|
||||
- [github.com/lxn/walk](https://pkg.go.dev/github.com/lxn/walk) ([BSD-3-Clause](https://github.com/tailscale/walk/blob/ed127cfb919a/LICENSE))
|
||||
- [github.com/lxn/win](https://pkg.go.dev/github.com/lxn/win) ([BSD-3-Clause](https://github.com/tailscale/win/blob/c3f813abca9f/LICENSE))
|
||||
- [github.com/mdlayher/netlink](https://pkg.go.dev/github.com/mdlayher/netlink) ([MIT](https://github.com/mdlayher/netlink/blob/v1.6.0/LICENSE.md))
|
||||
- [github.com/mdlayher/socket](https://pkg.go.dev/github.com/mdlayher/socket) ([MIT](https://github.com/mdlayher/socket/blob/v0.2.3/LICENSE.md))
|
||||
- [github.com/mitchellh/go-ps](https://pkg.go.dev/github.com/mitchellh/go-ps) ([MIT](https://github.com/mitchellh/go-ps/blob/v1.0.0/LICENSE.md))
|
||||
- [github.com/skip2/go-qrcode](https://pkg.go.dev/github.com/skip2/go-qrcode) ([MIT](https://github.com/skip2/go-qrcode/blob/da1b6568686e/LICENSE))
|
||||
- [github.com/x448/float16](https://pkg.go.dev/github.com/x448/float16) ([MIT](https://github.com/x448/float16/blob/v0.8.4/LICENSE))
|
||||
- [go4.org/mem](https://pkg.go.dev/go4.org/mem) ([Apache-2.0](https://github.com/go4org/mem/blob/4f986261bf13/LICENSE))
|
||||
- [go4.org/netipx](https://pkg.go.dev/go4.org/netipx) ([BSD-3-Clause](https://github.com/go4org/netipx/blob/7e7bdc8411bf/LICENSE))
|
||||
- [golang.org/x/crypto](https://pkg.go.dev/golang.org/x/crypto) ([BSD-3-Clause](https://cs.opensource.google/go/x/crypto/+/6f7dac96:LICENSE))
|
||||
- [golang.org/x/net](https://pkg.go.dev/golang.org/x/net) ([BSD-3-Clause](https://cs.opensource.google/go/x/net/+/c690dde0:LICENSE))
|
||||
- [golang.org/x/sync/errgroup](https://pkg.go.dev/golang.org/x/sync/errgroup) ([BSD-3-Clause](https://cs.opensource.google/go/x/sync/+/0de741cf:LICENSE))
|
||||
- [golang.org/x/sys](https://pkg.go.dev/golang.org/x/sys) ([BSD-3-Clause](https://cs.opensource.google/go/x/sys/+/c0bba94a:LICENSE))
|
||||
- [golang.org/x/term](https://pkg.go.dev/golang.org/x/term) ([BSD-3-Clause](https://cs.opensource.google/go/x/term/+/03fcf44c:LICENSE))
|
||||
- [golang.zx2c4.com/wintun](https://pkg.go.dev/golang.zx2c4.com/wintun) ([MIT](https://git.zx2c4.com/wintun-go/tree/LICENSE?id=415007cec224))
|
||||
- [golang.zx2c4.com/wireguard/windows/tunnel/winipcfg](https://pkg.go.dev/golang.zx2c4.com/wireguard/windows/tunnel/winipcfg) ([MIT](https://git.zx2c4.com/wireguard-windows/tree/COPYING?h=v0.4.10))
|
||||
- [gopkg.in/Knetic/govaluate.v3](https://pkg.go.dev/gopkg.in/Knetic/govaluate.v3) ([MIT](https://github.com/Knetic/govaluate/blob/v3.0.0/LICENSE))
|
||||
- [tailscale.com](https://pkg.go.dev/tailscale.com) ([BSD-3-Clause](https://github.com/tailscale/tailscale/blob/f81723ceac3f/LICENSE))
|
||||
|
||||
## Additional Dependencies
|
||||
|
||||
- [Nullsoft Scriptable Install System](https://nsis.sourceforge.io/) ([zlib/libpng](https://nsis.sourceforge.io/License))
|
||||
- [Wintun](https://www.wintun.net/) ([Prebuilt Binaries License](https://git.zx2c4.com/wintun/tree/prebuilt-binaries-license.txt))
|
||||
- [wireguard-windows](https://git.zx2c4.com/wireguard-windows/) ([MIT](https://git.zx2c4.com/wireguard-windows/tree/COPYING))
|
||||
@@ -13,6 +13,7 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"math/rand"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/netip"
|
||||
@@ -112,6 +113,10 @@ type Report struct {
|
||||
GlobalV4 string // ip:port of global IPv4
|
||||
GlobalV6 string // [ip]:port of global IPv6
|
||||
|
||||
// CaptivePortal is set when we think there's a captive portal that is
|
||||
// intercepting HTTP traffic.
|
||||
CaptivePortal opt.Bool
|
||||
|
||||
// TODO: update Clone when adding new fields
|
||||
}
|
||||
|
||||
@@ -175,6 +180,10 @@ type Client struct {
|
||||
// If nil, portmap discovery is not done.
|
||||
PortMapper *portmapper.Client // lazily initialized on first use
|
||||
|
||||
// For tests
|
||||
testEnoughRegions int
|
||||
testCaptivePortalDelay time.Duration
|
||||
|
||||
mu sync.Mutex // guards following
|
||||
nextFull bool // do a full region scan, even if last != nil
|
||||
prev map[time.Time]*Report // some previous reports
|
||||
@@ -192,6 +201,9 @@ type STUNConn interface {
|
||||
}
|
||||
|
||||
func (c *Client) enoughRegions() int {
|
||||
if c.testEnoughRegions > 0 {
|
||||
return c.testEnoughRegions
|
||||
}
|
||||
if c.Verbose {
|
||||
// Abuse verbose a bit here so netcheck can show all region latencies
|
||||
// in verbose mode.
|
||||
@@ -200,6 +212,14 @@ func (c *Client) enoughRegions() int {
|
||||
return 3
|
||||
}
|
||||
|
||||
func (c *Client) captivePortalDelay() time.Duration {
|
||||
if c.testCaptivePortalDelay > 0 {
|
||||
return c.testCaptivePortalDelay
|
||||
}
|
||||
// Chosen semi-arbitrarily
|
||||
return 200 * time.Millisecond
|
||||
}
|
||||
|
||||
func (c *Client) logf(format string, a ...any) {
|
||||
if c.Logf != nil {
|
||||
c.Logf(format, a...)
|
||||
@@ -783,13 +803,35 @@ func (c *Client) GetReport(ctx context.Context, dm *tailcfg.DERPMap) (_ *Report,
|
||||
}
|
||||
c.curState = rs
|
||||
last := c.last
|
||||
|
||||
// Even if we're doing a non-incremental update, we may want to try our
|
||||
// preferred DERP region for captive portal detection. Save that, if we
|
||||
// have it.
|
||||
var preferredDERP int
|
||||
if last != nil {
|
||||
preferredDERP = last.PreferredDERP
|
||||
}
|
||||
|
||||
now := c.timeNow()
|
||||
|
||||
doFull := false
|
||||
if c.nextFull || now.Sub(c.lastFull) > 5*time.Minute {
|
||||
doFull = true
|
||||
}
|
||||
// If the last report had a captive portal and reported no UDP access,
|
||||
// it's possible that we didn't get a useful netcheck due to the
|
||||
// captive portal blocking us. If so, make this report a full
|
||||
// (non-incremental) one.
|
||||
if !doFull && last != nil {
|
||||
doFull = !last.UDP && last.CaptivePortal.EqualBool(true)
|
||||
}
|
||||
if doFull {
|
||||
last = nil // causes makeProbePlan below to do a full (initial) plan
|
||||
c.nextFull = false
|
||||
c.lastFull = now
|
||||
metricNumGetReportFull.Add(1)
|
||||
}
|
||||
|
||||
rs.incremental = last != nil
|
||||
c.mu.Unlock()
|
||||
|
||||
@@ -874,6 +916,48 @@ func (c *Client) GetReport(ctx context.Context, dm *tailcfg.DERPMap) (_ *Report,
|
||||
|
||||
plan := makeProbePlan(dm, ifState, last)
|
||||
|
||||
// If we're doing a full probe, also check for a captive portal. We
|
||||
// delay by a bit to wait for UDP STUN to finish, to avoid the probe if
|
||||
// it's unnecessary.
|
||||
captivePortalDone := syncs.ClosedChan()
|
||||
captivePortalStop := func() {}
|
||||
if !rs.incremental {
|
||||
// NOTE(andrew): we can't simply add this goroutine to the
|
||||
// `NewWaitGroupChan` below, since we don't wait for that
|
||||
// waitgroup to finish when exiting this function and thus get
|
||||
// a data race.
|
||||
ch := make(chan struct{})
|
||||
captivePortalDone = ch
|
||||
|
||||
tmr := time.AfterFunc(c.captivePortalDelay(), func() {
|
||||
defer close(ch)
|
||||
found, err := c.checkCaptivePortal(ctx, dm, preferredDERP)
|
||||
if err != nil {
|
||||
c.logf("[v1] checkCaptivePortal: %v", err)
|
||||
return
|
||||
}
|
||||
rs.report.CaptivePortal.Set(found)
|
||||
})
|
||||
|
||||
captivePortalStop = func() {
|
||||
// Don't cancel our captive portal check if we're
|
||||
// explicitly doing a verbose netcheck.
|
||||
if c.Verbose {
|
||||
return
|
||||
}
|
||||
|
||||
if tmr.Stop() {
|
||||
// Stopped successfully; need to close the
|
||||
// signal channel ourselves.
|
||||
close(ch)
|
||||
return
|
||||
}
|
||||
|
||||
// Did not stop; do nothing and it'll finish by itself
|
||||
// and close the signal channel.
|
||||
}
|
||||
}
|
||||
|
||||
wg := syncs.NewWaitGroupChan()
|
||||
wg.Add(len(plan))
|
||||
for _, probeSet := range plan {
|
||||
@@ -894,9 +978,17 @@ func (c *Client) GetReport(ctx context.Context, dm *tailcfg.DERPMap) (_ *Report,
|
||||
case <-stunTimer.C:
|
||||
case <-ctx.Done():
|
||||
case <-wg.DoneChan():
|
||||
// All of our probes finished, so if we have >0 responses, we
|
||||
// stop our captive portal check.
|
||||
if rs.anyUDP() {
|
||||
captivePortalStop()
|
||||
}
|
||||
case <-rs.stopProbeCh:
|
||||
// Saw enough regions.
|
||||
c.vlogf("saw enough regions; not waiting for rest")
|
||||
// We can stop the captive portal check since we know that we
|
||||
// got a bunch of STUN responses.
|
||||
captivePortalStop()
|
||||
}
|
||||
|
||||
rs.waitHairCheck(ctx)
|
||||
@@ -965,6 +1057,9 @@ func (c *Client) GetReport(ctx context.Context, dm *tailcfg.DERPMap) (_ *Report,
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
// Wait for captive portal check before finishing the report.
|
||||
<-captivePortalDone
|
||||
|
||||
return c.finishAndStoreReport(rs, dm), nil
|
||||
}
|
||||
|
||||
@@ -979,6 +1074,54 @@ func (c *Client) finishAndStoreReport(rs *reportState, dm *tailcfg.DERPMap) *Rep
|
||||
return report
|
||||
}
|
||||
|
||||
var noRedirectClient = &http.Client{
|
||||
// No redirects allowed
|
||||
CheckRedirect: func(req *http.Request, via []*http.Request) error {
|
||||
return http.ErrUseLastResponse
|
||||
},
|
||||
|
||||
// Remaining fields are the same as the default client.
|
||||
Transport: http.DefaultClient.Transport,
|
||||
Jar: http.DefaultClient.Jar,
|
||||
Timeout: http.DefaultClient.Timeout,
|
||||
}
|
||||
|
||||
// checkCaptivePortal reports whether or not we think the system is behind a
|
||||
// captive portal, detected by making a request to a URL that we know should
|
||||
// return a "204 No Content" response and checking if that's what we get.
|
||||
//
|
||||
// The boolean return is whether we think we have a captive portal.
|
||||
func (c *Client) checkCaptivePortal(ctx context.Context, dm *tailcfg.DERPMap, preferredDERP int) (bool, error) {
|
||||
defer noRedirectClient.CloseIdleConnections()
|
||||
|
||||
// If we have a preferred DERP region with more than one node, try
|
||||
// that; otherwise, pick a random one not marked as "Avoid".
|
||||
if preferredDERP == 0 || dm.Regions[preferredDERP] == nil ||
|
||||
(preferredDERP != 0 && len(dm.Regions[preferredDERP].Nodes) == 0) {
|
||||
rids := make([]int, 0, len(dm.Regions))
|
||||
for id, reg := range dm.Regions {
|
||||
if reg == nil || reg.Avoid || len(reg.Nodes) == 0 {
|
||||
continue
|
||||
}
|
||||
rids = append(rids, id)
|
||||
}
|
||||
preferredDERP = rids[rand.Intn(len(rids))]
|
||||
}
|
||||
|
||||
node := dm.Regions[preferredDERP].Nodes[0]
|
||||
req, err := http.NewRequestWithContext(ctx, "GET", "http://"+node.HostName+"/generate_204", nil)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
r, err := noRedirectClient.Do(req)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
c.logf("[v2] checkCaptivePortal url=%q status_code=%d", req.URL.String(), r.StatusCode)
|
||||
|
||||
return r.StatusCode != 204, nil
|
||||
}
|
||||
|
||||
// runHTTPOnlyChecks is the netcheck done by environments that can
|
||||
// only do HTTP requests, such as ws/wasm.
|
||||
func (c *Client) runHTTPOnlyChecks(ctx context.Context, last *Report, rs *reportState, dm *tailcfg.DERPMap) error {
|
||||
@@ -1200,6 +1343,9 @@ func (c *Client) logConciseReport(r *Report, dm *tailcfg.DERPMap) {
|
||||
if r.GlobalV6 != "" {
|
||||
fmt.Fprintf(w, " v6a=%v", r.GlobalV6)
|
||||
}
|
||||
if r.CaptivePortal != "" {
|
||||
fmt.Fprintf(w, " captiveportal=%v", r.CaptivePortal)
|
||||
}
|
||||
fmt.Fprintf(w, " derp=%v", r.PreferredDERP)
|
||||
if r.PreferredDERP != 0 {
|
||||
fmt.Fprintf(w, " derpdist=")
|
||||
|
||||
@@ -9,11 +9,13 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/netip"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -115,6 +117,9 @@ func TestWorksWhenUDPBlocked(t *testing.T) {
|
||||
// OS IPv6 test is irrelevant here, accept whatever the current
|
||||
// machine has.
|
||||
want.OSHasIPv6 = r.OSHasIPv6
|
||||
// Captive portal test is irrelevant; accept what the current report
|
||||
// has.
|
||||
want.CaptivePortal = r.CaptivePortal
|
||||
|
||||
if !reflect.DeepEqual(r, want) {
|
||||
t.Errorf("mismatch\n got: %+v\nwant: %+v\n", r, want)
|
||||
@@ -661,3 +666,57 @@ func TestSortRegions(t *testing.T) {
|
||||
t.Errorf("got %v; want %v", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestNoCaptivePortalWhenUDP(t *testing.T) {
|
||||
// Override noRedirectClient to handle the /generate_204 endpoint
|
||||
var generate204Called atomic.Bool
|
||||
tr := RoundTripFunc(func(req *http.Request) *http.Response {
|
||||
if !strings.HasSuffix(req.URL.String(), "/generate_204") {
|
||||
panic("bad URL: " + req.URL.String())
|
||||
}
|
||||
generate204Called.Store(true)
|
||||
return &http.Response{
|
||||
StatusCode: http.StatusNoContent,
|
||||
Header: make(http.Header),
|
||||
}
|
||||
})
|
||||
|
||||
oldTransport := noRedirectClient.Transport
|
||||
t.Cleanup(func() { noRedirectClient.Transport = oldTransport })
|
||||
noRedirectClient.Transport = tr
|
||||
|
||||
stunAddr, cleanup := stuntest.Serve(t)
|
||||
defer cleanup()
|
||||
|
||||
c := &Client{
|
||||
Logf: t.Logf,
|
||||
UDPBindAddr: "127.0.0.1:0",
|
||||
testEnoughRegions: 1,
|
||||
|
||||
// Set the delay long enough that we have time to cancel it
|
||||
// when our STUN probe succeeds.
|
||||
testCaptivePortalDelay: 10 * time.Second,
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second)
|
||||
defer cancel()
|
||||
|
||||
r, err := c.GetReport(ctx, stuntest.DERPMapOf(stunAddr.String()))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Should not have called our captive portal function.
|
||||
if generate204Called.Load() {
|
||||
t.Errorf("captive portal check called; expected no call")
|
||||
}
|
||||
if r.CaptivePortal != "" {
|
||||
t.Errorf("got CaptivePortal=%q, want empty", r.CaptivePortal)
|
||||
}
|
||||
}
|
||||
|
||||
type RoundTripFunc func(req *http.Request) *http.Response
|
||||
|
||||
func (f RoundTripFunc) RoundTrip(req *http.Request) (*http.Response, error) {
|
||||
return f(req), nil
|
||||
}
|
||||
|
||||
@@ -18,7 +18,7 @@ import (
|
||||
const unknown = ipproto.Unknown
|
||||
|
||||
// RFC1858: prevent overlapping fragment attacks.
|
||||
const minFrag = 60 + 20 // max IPv4 header + basic TCP header
|
||||
const minFragBlks = (60 + 20) / 8 // max IPv4 header + basic TCP header in fragment blocks (8 bytes each)
|
||||
|
||||
type TCPFlag uint8
|
||||
|
||||
@@ -152,11 +152,12 @@ func (q *Parsed) decode4(b []byte) {
|
||||
// it as Unknown. We can also treat any subsequent fragment that starts
|
||||
// at such a low offset as Unknown.
|
||||
fragFlags := binary.BigEndian.Uint16(b[6:8])
|
||||
moreFrags := (fragFlags & 0x20) != 0
|
||||
moreFrags := (fragFlags & 0x2000) != 0
|
||||
fragOfs := fragFlags & 0x1FFF
|
||||
|
||||
if fragOfs == 0 {
|
||||
// This is the first fragment
|
||||
if moreFrags && len(sub) < minFrag {
|
||||
if moreFrags && len(sub) < minFragBlks {
|
||||
// Suspiciously short first fragment, dump it.
|
||||
q.IPProto = unknown
|
||||
return
|
||||
@@ -216,7 +217,7 @@ func (q *Parsed) decode4(b []byte) {
|
||||
}
|
||||
} else {
|
||||
// This is a fragment other than the first one.
|
||||
if fragOfs < minFrag {
|
||||
if fragOfs < minFragBlks {
|
||||
// First frag was suspiciously short, so we can't
|
||||
// trust the followup either.
|
||||
q.IPProto = unknown
|
||||
|
||||
@@ -37,9 +37,9 @@ func mustIPPort(s string) netip.AddrPort {
|
||||
var icmp4RequestBuffer = []byte{
|
||||
// IP header up to checksum
|
||||
0x45, 0x00, 0x00, 0x27, 0xde, 0xad, 0x00, 0x00, 0x40, 0x01, 0x8c, 0x15,
|
||||
// source ip
|
||||
// source IP
|
||||
0x01, 0x02, 0x03, 0x04,
|
||||
// destination ip
|
||||
// destination IP
|
||||
0x05, 0x06, 0x07, 0x08,
|
||||
// ICMP header
|
||||
0x08, 0x00, 0x7d, 0x22,
|
||||
@@ -61,9 +61,9 @@ var icmp4RequestDecode = Parsed{
|
||||
|
||||
var icmp4ReplyBuffer = []byte{
|
||||
0x45, 0x00, 0x00, 0x25, 0x21, 0x52, 0x00, 0x00, 0x40, 0x01, 0x49, 0x73,
|
||||
// source ip
|
||||
// source IP
|
||||
0x05, 0x06, 0x07, 0x08,
|
||||
// destination ip
|
||||
// destination IP
|
||||
0x01, 0x02, 0x03, 0x04,
|
||||
// ICMP header
|
||||
0x00, 0x00, 0xe6, 0x9e,
|
||||
@@ -119,9 +119,9 @@ var unknownPacketDecode = Parsed{
|
||||
var tcp4PacketBuffer = []byte{
|
||||
// IP header up to checksum
|
||||
0x45, 0x00, 0x00, 0x37, 0xde, 0xad, 0x00, 0x00, 0x40, 0x06, 0x49, 0x5f,
|
||||
// source ip
|
||||
// source IP
|
||||
0x01, 0x02, 0x03, 0x04,
|
||||
// destination ip
|
||||
// destination IP
|
||||
0x05, 0x06, 0x07, 0x08,
|
||||
// TCP header with SYN, ACK set
|
||||
0x00, 0x7b, 0x02, 0x37, 0x00, 0x00, 0x12, 0x34, 0x00, 0x00, 0x00, 0x00,
|
||||
@@ -172,9 +172,9 @@ var tcp6RequestDecode = Parsed{
|
||||
var udp4RequestBuffer = []byte{
|
||||
// IP header up to checksum
|
||||
0x45, 0x00, 0x00, 0x2b, 0xde, 0xad, 0x00, 0x00, 0x40, 0x11, 0x8c, 0x01,
|
||||
// source ip
|
||||
// source IP
|
||||
0x01, 0x02, 0x03, 0x04,
|
||||
// destination ip
|
||||
// destination IP
|
||||
0x05, 0x06, 0x07, 0x08,
|
||||
// UDP header
|
||||
0x00, 0x7b, 0x02, 0x37, 0x00, 0x17, 0x72, 0x1d,
|
||||
@@ -197,9 +197,9 @@ var udp4RequestDecode = Parsed{
|
||||
var invalid4RequestBuffer = []byte{
|
||||
// IP header up to checksum. IHL field points beyond end of packet.
|
||||
0x4a, 0x00, 0x00, 0x14, 0xde, 0xad, 0x00, 0x00, 0x40, 0x11, 0x8c, 0x01,
|
||||
// source ip
|
||||
// source IP
|
||||
0x01, 0x02, 0x03, 0x04,
|
||||
// destination ip
|
||||
// destination IP
|
||||
0x05, 0x06, 0x07, 0x08,
|
||||
}
|
||||
|
||||
@@ -244,9 +244,9 @@ var udp6RequestDecode = Parsed{
|
||||
var udp4ReplyBuffer = []byte{
|
||||
// IP header up to checksum
|
||||
0x45, 0x00, 0x00, 0x29, 0x21, 0x52, 0x00, 0x00, 0x40, 0x11, 0x49, 0x5f,
|
||||
// source ip
|
||||
// source IP
|
||||
0x05, 0x06, 0x07, 0x08,
|
||||
// destination ip
|
||||
// destination IP
|
||||
0x01, 0x02, 0x03, 0x04,
|
||||
// UDP header
|
||||
0x02, 0x37, 0x00, 0x7b, 0x00, 0x15, 0xd3, 0x9d,
|
||||
@@ -265,6 +265,59 @@ var udp4ReplyDecode = Parsed{
|
||||
Dst: mustIPPort("5.6.7.8:123"),
|
||||
}
|
||||
|
||||
// First TCP fragment of a packet with leading 24 bytes of 'a's
|
||||
var tcp4MediumFragmentBuffer = []byte{
|
||||
// IP header up to checksum
|
||||
0x45, 0x20, 0x00, 0x4c, 0x2c, 0x62, 0x20, 0x00, 0x22, 0x06, 0x3a, 0x0f,
|
||||
// source IP
|
||||
0x01, 0x02, 0x03, 0x04,
|
||||
// destination IP
|
||||
0x05, 0x06, 0x07, 0x08,
|
||||
// TCP header
|
||||
0x00, 0x50, 0xf3, 0x8c, 0x58, 0xad, 0x60, 0x94, 0x25, 0xe4, 0x23, 0xa8, 0x80,
|
||||
0x10, 0x01, 0xfd, 0xc6, 0x6e, 0x00, 0x00, 0x01, 0x01, 0x08, 0x0a, 0xff, 0x60,
|
||||
0xfb, 0xfe, 0xba, 0x31, 0x78, 0x6a,
|
||||
// data
|
||||
0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61,
|
||||
0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61,
|
||||
}
|
||||
|
||||
var tcp4MediumFragmentDecode = Parsed{
|
||||
b: tcp4MediumFragmentBuffer,
|
||||
subofs: 20,
|
||||
dataofs: 52,
|
||||
length: len(tcp4MediumFragmentBuffer),
|
||||
|
||||
IPVersion: 4,
|
||||
IPProto: TCP,
|
||||
Src: mustIPPort("1.2.3.4:80"),
|
||||
Dst: mustIPPort("5.6.7.8:62348"),
|
||||
TCPFlags: 0x10,
|
||||
}
|
||||
|
||||
var tcp4ShortFragmentBuffer = []byte{
|
||||
// IP header up to checksum
|
||||
0x45, 0x20, 0x00, 0x1e, 0x2c, 0x62, 0x20, 0x00, 0x22, 0x06, 0x3c, 0x4f,
|
||||
// source IP
|
||||
0x01, 0x02, 0x03, 0x04,
|
||||
// destination IP
|
||||
0x05, 0x06, 0x07, 0x08,
|
||||
// partial TCP header
|
||||
0x00, 0x50, 0xf3, 0x8c, 0x58, 0xad, 0x60, 0x94, 0x00, 0x00,
|
||||
}
|
||||
|
||||
var tcp4ShortFragmentDecode = Parsed{
|
||||
b: tcp4ShortFragmentBuffer,
|
||||
subofs: 20,
|
||||
dataofs: 0,
|
||||
length: len(tcp4ShortFragmentBuffer),
|
||||
// short fragments are rejected (marked unknown) to avoid header attacks as described in RFC 1858
|
||||
IPProto: ipproto.Unknown,
|
||||
IPVersion: 4,
|
||||
Src: mustIPPort("1.2.3.4:0"),
|
||||
Dst: mustIPPort("5.6.7.8:0"),
|
||||
}
|
||||
|
||||
var igmpPacketBuffer = []byte{
|
||||
// IP header up to checksum
|
||||
0x46, 0xc0, 0x00, 0x20, 0x00, 0x00, 0x40, 0x00, 0x01, 0x02, 0x41, 0x22,
|
||||
@@ -404,6 +457,8 @@ func TestDecode(t *testing.T) {
|
||||
{"invalid4", invalid4RequestBuffer, invalid4RequestDecode},
|
||||
{"ipv4_tsmp", ipv4TSMPBuffer, ipv4TSMPDecode},
|
||||
{"ipv4_sctp", sctpBuffer, sctpDecode},
|
||||
{"ipv4_frag", tcp4MediumFragmentBuffer, tcp4MediumFragmentDecode},
|
||||
{"ipv4_fragtooshort", tcp4ShortFragmentBuffer, tcp4ShortFragmentDecode},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
|
||||
@@ -11,11 +11,11 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
blockSize = 32000 // size of the block of data to send
|
||||
blockSize = 2 * 1024 * 1024 // size of the block of data to send
|
||||
MinDuration = 5 * time.Second // minimum duration for a test
|
||||
DefaultDuration = MinDuration // default duration for a test
|
||||
MaxDuration = 30 * time.Second // maximum duration for a test
|
||||
version = 1 // value used when comparing client and server versions
|
||||
version = 2 // value used when comparing client and server versions
|
||||
increment = time.Second // increment to display results for, in seconds
|
||||
minInterval = 10 * time.Millisecond // minimum interval length for a result to be included
|
||||
DefaultPort = 20333
|
||||
@@ -37,14 +37,14 @@ type configResponse struct {
|
||||
|
||||
// This represents the Result of a speedtest within a specific interval
|
||||
type Result struct {
|
||||
Bytes int // number of bytes sent/received during the interval
|
||||
IntervalStart time.Duration // duration between the start of the interval and the start of the test
|
||||
IntervalEnd time.Duration // duration between the end of the interval and the start of the test
|
||||
Total bool // if true, this result struct represents the entire test, rather than a segment of the test
|
||||
Bytes int // number of bytes sent/received during the interval
|
||||
IntervalStart time.Time // start of the interval
|
||||
IntervalEnd time.Time // end of the interval
|
||||
Total bool // if true, this result struct represents the entire test, rather than a segment of the test
|
||||
}
|
||||
|
||||
func (r Result) MBitsPerSecond() float64 {
|
||||
return r.MegaBits() / (r.IntervalEnd - r.IntervalStart).Seconds()
|
||||
return r.MegaBits() / r.IntervalEnd.Sub(r.IntervalStart).Seconds()
|
||||
}
|
||||
|
||||
func (r Result) MegaBytes() float64 {
|
||||
@@ -56,7 +56,7 @@ func (r Result) MegaBits() float64 {
|
||||
}
|
||||
|
||||
func (r Result) Interval() time.Duration {
|
||||
return r.IntervalEnd - r.IntervalStart
|
||||
return r.IntervalEnd.Sub(r.IntervalStart)
|
||||
}
|
||||
|
||||
type Direction int
|
||||
|
||||
@@ -81,9 +81,6 @@ func doTest(conn net.Conn, conf config) ([]Result, error) {
|
||||
var currentTime time.Time
|
||||
var results []Result
|
||||
|
||||
startTime := time.Now()
|
||||
lastCalculated := startTime
|
||||
|
||||
if conf.Direction == Download {
|
||||
conn.SetReadDeadline(time.Now().Add(conf.TestDuration).Add(5 * time.Second))
|
||||
} else {
|
||||
@@ -94,6 +91,9 @@ func doTest(conn net.Conn, conf config) ([]Result, error) {
|
||||
|
||||
}
|
||||
|
||||
startTime := time.Now()
|
||||
lastCalculated := startTime
|
||||
|
||||
SpeedTestLoop:
|
||||
for {
|
||||
var n int
|
||||
@@ -110,48 +110,37 @@ SpeedTestLoop:
|
||||
return nil, fmt.Errorf("unexpected error has occurred: %w", err)
|
||||
}
|
||||
} else {
|
||||
// Need to change the data a little bit, to avoid any compression.
|
||||
for i := range bufferData {
|
||||
bufferData[i]++
|
||||
}
|
||||
n, err = conn.Write(bufferData)
|
||||
if err != nil {
|
||||
// If the write failed, there is most likely something wrong with the connection.
|
||||
return nil, fmt.Errorf("upload failed: %w", err)
|
||||
}
|
||||
}
|
||||
currentTime = time.Now()
|
||||
intervalBytes += n
|
||||
|
||||
currentTime = time.Now()
|
||||
// checks if the current time is more or equal to the lastCalculated time plus the increment
|
||||
if currentTime.After(lastCalculated.Add(increment)) {
|
||||
intervalStart := lastCalculated.Sub(startTime)
|
||||
intervalEnd := currentTime.Sub(startTime)
|
||||
if (intervalEnd - intervalStart) > minInterval {
|
||||
results = append(results, Result{Bytes: intervalBytes, IntervalStart: intervalStart, IntervalEnd: intervalEnd, Total: false})
|
||||
}
|
||||
if currentTime.Sub(lastCalculated) >= increment {
|
||||
results = append(results, Result{Bytes: intervalBytes, IntervalStart: lastCalculated, IntervalEnd: currentTime, Total: false})
|
||||
lastCalculated = currentTime
|
||||
totalBytes += intervalBytes
|
||||
intervalBytes = 0
|
||||
}
|
||||
|
||||
if conf.Direction == Upload && time.Since(startTime) > conf.TestDuration {
|
||||
if conf.Direction == Upload && currentTime.Sub(startTime) > conf.TestDuration {
|
||||
break SpeedTestLoop
|
||||
}
|
||||
}
|
||||
|
||||
// get last segment
|
||||
intervalStart := lastCalculated.Sub(startTime)
|
||||
intervalEnd := currentTime.Sub(startTime)
|
||||
if (intervalEnd - intervalStart) > minInterval {
|
||||
results = append(results, Result{Bytes: intervalBytes, IntervalStart: intervalStart, IntervalEnd: intervalEnd, Total: false})
|
||||
if currentTime.Sub(lastCalculated) > minInterval {
|
||||
results = append(results, Result{Bytes: intervalBytes, IntervalStart: lastCalculated, IntervalEnd: currentTime, Total: false})
|
||||
}
|
||||
|
||||
// get total
|
||||
totalBytes += intervalBytes
|
||||
intervalEnd = currentTime.Sub(startTime)
|
||||
if intervalEnd > minInterval {
|
||||
results = append(results, Result{Bytes: totalBytes, IntervalStart: 0, IntervalEnd: intervalEnd, Total: true})
|
||||
if currentTime.Sub(startTime) > minInterval {
|
||||
results = append(results, Result{Bytes: totalBytes, IntervalStart: startTime, IntervalEnd: currentTime, Total: true})
|
||||
}
|
||||
|
||||
return results, nil
|
||||
|
||||
@@ -7,6 +7,7 @@ package speedtest
|
||||
import (
|
||||
"net"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestDownload(t *testing.T) {
|
||||
@@ -23,9 +24,9 @@ func TestDownload(t *testing.T) {
|
||||
type state struct {
|
||||
err error
|
||||
}
|
||||
displayResult := func(t *testing.T, r Result) {
|
||||
displayResult := func(t *testing.T, r Result, start time.Time) {
|
||||
t.Helper()
|
||||
t.Logf("{ Megabytes: %.2f, Start: %.1f, End: %.1f, Total: %t }", r.MegaBytes(), r.IntervalStart.Seconds(), r.IntervalEnd.Seconds(), r.Total)
|
||||
t.Logf("{ Megabytes: %.2f, Start: %.1f, End: %.1f, Total: %t }", r.MegaBytes(), r.IntervalStart.Sub(start).Seconds(), r.IntervalEnd.Sub(start).Seconds(), r.Total)
|
||||
}
|
||||
stateChan := make(chan state, 1)
|
||||
|
||||
@@ -49,8 +50,9 @@ func TestDownload(t *testing.T) {
|
||||
t.Fatalf("download results: expected length: %d, actual length: %d", expectedLen, len(results))
|
||||
}
|
||||
|
||||
start := results[0].IntervalStart
|
||||
for _, result := range results {
|
||||
displayResult(t, result)
|
||||
displayResult(t, result, start)
|
||||
}
|
||||
})
|
||||
|
||||
@@ -66,8 +68,9 @@ func TestDownload(t *testing.T) {
|
||||
t.Fatalf("upload results: expected length: %d, actual length: %d", expectedLen, len(results))
|
||||
}
|
||||
|
||||
start := results[0].IntervalStart
|
||||
for _, result := range results {
|
||||
displayResult(t, result)
|
||||
displayResult(t, result, start)
|
||||
}
|
||||
})
|
||||
|
||||
|
||||
@@ -79,7 +79,9 @@ type CapabilityVersion int
|
||||
// - 40: 2022-08-22: added Node.KeySignature, PeersChangedPatch.KeySignature
|
||||
// - 41: 2022-08-30: uses 100.100.100.100 for route-less ExtraRecords if global nameservers is set
|
||||
// - 42: 2022-09-06: NextDNS DoH support; see https://github.com/tailscale/tailscale/pull/5556
|
||||
const CurrentCapabilityVersion CapabilityVersion = 42
|
||||
// - 43: 2022-09-21: clients can return usernames for SSH
|
||||
// - 44: 2022-09-22: MapResponse.ControlDialPlan
|
||||
const CurrentCapabilityVersion CapabilityVersion = 44
|
||||
|
||||
type StableID string
|
||||
|
||||
@@ -234,6 +236,9 @@ type Node struct {
|
||||
ComputedName string `json:",omitempty"` // MagicDNS base name (for normal non-shared-in nodes), FQDN (without trailing dot, for shared-in nodes), or Hostname (if no MagicDNS)
|
||||
computedHostIfDifferent string // hostname, if different than ComputedName, otherwise empty
|
||||
ComputedNameWithHost string `json:",omitempty"` // either "ComputedName" or "ComputedName (computedHostIfDifferent)", if computedHostIfDifferent is set
|
||||
|
||||
// DataPlaneAuditLogID is the per-node logtail ID used for data plane audit logging.
|
||||
DataPlaneAuditLogID string `json:",omitempty"`
|
||||
}
|
||||
|
||||
// DisplayName returns the user-facing name for a node which should
|
||||
@@ -1372,9 +1377,47 @@ type MapResponse struct {
|
||||
// indicates no change from the value sent earlier.
|
||||
TKAInfo *TKAInfo `json:",omitempty"`
|
||||
|
||||
// DomainDataPlaneAuditLogID, if non-empty, is the per-tailnet log ID to be
|
||||
// used when writing data plane audit logs.
|
||||
DomainDataPlaneAuditLogID string `json:",omitempty"`
|
||||
|
||||
// Debug is normally nil, except for when the control server
|
||||
// is setting debug settings on a node.
|
||||
Debug *Debug `json:",omitempty"`
|
||||
|
||||
// ControlDialPlan tells the client how to connect to the control
|
||||
// server. An initial nil is equivalent to new(ControlDialPlan).
|
||||
// A subsequent streamed nil means no change.
|
||||
ControlDialPlan *ControlDialPlan `json:",omitempty"`
|
||||
}
|
||||
|
||||
// ControlDialPlan is instructions from the control server to the client on how
|
||||
// to connect to the control server; this is useful for maintaining connection
|
||||
// if the client's network state changes after the initial connection, or due
|
||||
// to the configuration that the control server pushes.
|
||||
type ControlDialPlan struct {
|
||||
// An empty list means the default: use DNS (unspecified which DNS).
|
||||
Candidates []ControlIPCandidate
|
||||
}
|
||||
|
||||
// ControlIPCandidate represents a single candidate address to use when
|
||||
// connecting to the control server.
|
||||
type ControlIPCandidate struct {
|
||||
// IP is the address to attempt connecting to.
|
||||
IP netip.Addr
|
||||
|
||||
// DialStartSec is the number of seconds after the beginning of the
|
||||
// connection process to wait before trying this candidate.
|
||||
DialStartDelaySec float64 `json:",omitempty"`
|
||||
|
||||
// DialTimeoutSec is the timeout for a connection to this candidate,
|
||||
// starting after DialStartDelaySec.
|
||||
DialTimeoutSec float64 `json:",omitempty"`
|
||||
|
||||
// Priority is the relative priority of this candidate; candidates with
|
||||
// a higher priority are preferred over candidates with a lower
|
||||
// priority.
|
||||
Priority int `json:",omitempty"`
|
||||
}
|
||||
|
||||
// Debug are instructions from the control server to the client
|
||||
@@ -1641,6 +1684,13 @@ type SetDNSRequest struct {
|
||||
// SetDNSResponse is the response to a SetDNSRequest.
|
||||
type SetDNSResponse struct{}
|
||||
|
||||
// HealthChangeRequest is the JSON request body type used to report
|
||||
// node health changes to https://<control>/machine/<mkey hex>/update-health.
|
||||
type HealthChangeRequest struct {
|
||||
Subsys string // a health.Subsystem value in string form
|
||||
Error string // or empty if cleared
|
||||
}
|
||||
|
||||
// SSHPolicy is the policy for how to handle incoming SSH connections
|
||||
// over Tailscale.
|
||||
type SSHPolicy struct {
|
||||
|
||||
@@ -95,6 +95,7 @@ var _NodeCloneNeedsRegeneration = Node(struct {
|
||||
ComputedName string
|
||||
computedHostIfDifferent string
|
||||
ComputedNameWithHost string
|
||||
DataPlaneAuditLogID string
|
||||
}{})
|
||||
|
||||
// Clone makes a deep copy of Hostinfo.
|
||||
|
||||
@@ -332,6 +332,7 @@ func TestNodeEqual(t *testing.T) {
|
||||
"LastSeen", "Online", "KeepAlive", "MachineAuthorized",
|
||||
"Capabilities",
|
||||
"ComputedName", "computedHostIfDifferent", "ComputedNameWithHost",
|
||||
"DataPlaneAuditLogID",
|
||||
}
|
||||
if have := fieldsOf(reflect.TypeOf(Node{})); !reflect.DeepEqual(have, nodeHandles) {
|
||||
t.Errorf("Node.Equal check might be out of sync\nfields: %q\nhandled: %q\n",
|
||||
|
||||
@@ -173,6 +173,7 @@ func (v NodeView) MachineAuthorized() bool { return v.ж.MachineAuthor
|
||||
func (v NodeView) Capabilities() views.Slice[string] { return views.SliceOf(v.ж.Capabilities) }
|
||||
func (v NodeView) ComputedName() string { return v.ж.ComputedName }
|
||||
func (v NodeView) ComputedNameWithHost() string { return v.ж.ComputedNameWithHost }
|
||||
func (v NodeView) DataPlaneAuditLogID() string { return v.ж.DataPlaneAuditLogID }
|
||||
func (v NodeView) Equal(v2 NodeView) bool { return v.ж.Equal(v2.ж) }
|
||||
|
||||
// A compilation failure here means this code must be regenerated, with the command at the top of this file.
|
||||
@@ -203,6 +204,7 @@ var _NodeViewNeedsRegeneration = Node(struct {
|
||||
ComputedName string
|
||||
computedHostIfDifferent string
|
||||
ComputedNameWithHost string
|
||||
DataPlaneAuditLogID string
|
||||
}{})
|
||||
|
||||
// View returns a readonly view of Hostinfo.
|
||||
|
||||
@@ -101,6 +101,8 @@ type TKAInfo struct {
|
||||
// TKABootstrapRequest is sent by a node to get information necessary for
|
||||
// enabling or disabling the tailnet key authority.
|
||||
type TKABootstrapRequest struct {
|
||||
// NodeID is the node ID of the initiating client.
|
||||
NodeID NodeID
|
||||
// Head represents the node's head AUMHash (tka.Authority.Head), if
|
||||
// network lock is enabled.
|
||||
Head string
|
||||
@@ -120,6 +122,8 @@ type TKABootstrapResponse struct {
|
||||
// state (TKA). Values of type tka.AUMHash are encoded as strings in their
|
||||
// MarshalText form.
|
||||
type TKASyncOfferRequest struct {
|
||||
// NodeID is the node ID of the initiating client.
|
||||
NodeID NodeID
|
||||
// Head represents the node's head AUMHash (tka.Authority.Head). This
|
||||
// corresponds to tka.SyncOffer.Head.
|
||||
Head string
|
||||
@@ -147,6 +151,8 @@ type TKASyncOfferResponse struct {
|
||||
// TKASyncSendRequest encodes AUMs that a node believes the control plane
|
||||
// is missing.
|
||||
type TKASyncSendRequest struct {
|
||||
// NodeID is the node ID of the initiating client.
|
||||
NodeID NodeID
|
||||
// MissingAUMs encodes AUMs that the node believes the control plane
|
||||
// is missing.
|
||||
MissingAUMs []tkatype.MarshaledAUM
|
||||
|
||||
@@ -45,6 +45,12 @@ func (h AUMHash) MarshalText() ([]byte, error) {
|
||||
return b, nil
|
||||
}
|
||||
|
||||
// IsZero returns true if the hash is the empty value.
|
||||
func (h AUMHash) IsZero() bool {
|
||||
return h == (AUMHash{})
|
||||
}
|
||||
|
||||
|
||||
// AUMKind describes valid AUM types.
|
||||
type AUMKind uint8
|
||||
|
||||
|
||||
@@ -31,7 +31,7 @@ func TestAuthorityBuilderAddKey(t *testing.T) {
|
||||
storage := &Mem{}
|
||||
a, _, err := Create(storage, State{
|
||||
Keys: []Key{key},
|
||||
DisablementSecrets: [][]byte{disablementKDF([]byte{1, 2, 3})},
|
||||
DisablementSecrets: [][]byte{DisablementKDF([]byte{1, 2, 3})},
|
||||
}, signer25519(priv))
|
||||
if err != nil {
|
||||
t.Fatalf("Create() failed: %v", err)
|
||||
@@ -68,7 +68,7 @@ func TestAuthorityBuilderRemoveKey(t *testing.T) {
|
||||
storage := &Mem{}
|
||||
a, _, err := Create(storage, State{
|
||||
Keys: []Key{key, key2},
|
||||
DisablementSecrets: [][]byte{disablementKDF([]byte{1, 2, 3})},
|
||||
DisablementSecrets: [][]byte{DisablementKDF([]byte{1, 2, 3})},
|
||||
}, signer25519(priv))
|
||||
if err != nil {
|
||||
t.Fatalf("Create() failed: %v", err)
|
||||
@@ -100,7 +100,7 @@ func TestAuthorityBuilderSetKeyVote(t *testing.T) {
|
||||
storage := &Mem{}
|
||||
a, _, err := Create(storage, State{
|
||||
Keys: []Key{key},
|
||||
DisablementSecrets: [][]byte{disablementKDF([]byte{1, 2, 3})},
|
||||
DisablementSecrets: [][]byte{DisablementKDF([]byte{1, 2, 3})},
|
||||
}, signer25519(priv))
|
||||
if err != nil {
|
||||
t.Fatalf("Create() failed: %v", err)
|
||||
@@ -136,7 +136,7 @@ func TestAuthorityBuilderSetKeyMeta(t *testing.T) {
|
||||
storage := &Mem{}
|
||||
a, _, err := Create(storage, State{
|
||||
Keys: []Key{key},
|
||||
DisablementSecrets: [][]byte{disablementKDF([]byte{1, 2, 3})},
|
||||
DisablementSecrets: [][]byte{DisablementKDF([]byte{1, 2, 3})},
|
||||
}, signer25519(priv))
|
||||
if err != nil {
|
||||
t.Fatalf("Create() failed: %v", err)
|
||||
@@ -172,7 +172,7 @@ func TestAuthorityBuilderMultiple(t *testing.T) {
|
||||
storage := &Mem{}
|
||||
a, _, err := Create(storage, State{
|
||||
Keys: []Key{key},
|
||||
DisablementSecrets: [][]byte{disablementKDF([]byte{1, 2, 3})},
|
||||
DisablementSecrets: [][]byte{DisablementKDF([]byte{1, 2, 3})},
|
||||
}, signer25519(priv))
|
||||
if err != nil {
|
||||
t.Fatalf("Create() failed: %v", err)
|
||||
|
||||
@@ -169,7 +169,7 @@ func testScenario(t *testing.T, sharedChain string, sharedOptions ...testchainOp
|
||||
sharedOptions = append(sharedOptions,
|
||||
optTemplate("genesis", AUM{MessageKind: AUMCheckpoint, State: &State{
|
||||
Keys: []Key{key},
|
||||
DisablementSecrets: [][]byte{disablementKDF([]byte{1, 2, 3})},
|
||||
DisablementSecrets: [][]byte{DisablementKDF([]byte{1, 2, 3})},
|
||||
}}),
|
||||
optKey("key", key, priv),
|
||||
optSignAllUsing("key"))
|
||||
|
||||
@@ -226,7 +226,7 @@ func TestSigCredential(t *testing.T) {
|
||||
a, _ := Open(newTestchain(t, "G1\nG1.template = genesis",
|
||||
optTemplate("genesis", AUM{MessageKind: AUMCheckpoint, State: &State{
|
||||
Keys: []Key{k},
|
||||
DisablementSecrets: [][]byte{disablementKDF([]byte{1, 2, 3})},
|
||||
DisablementSecrets: [][]byte{DisablementKDF([]byte{1, 2, 3})},
|
||||
}})).Chonk())
|
||||
if err := a.NodeKeyAuthorized(node.Public(), nestedSig.Serialize()); err == nil {
|
||||
t.Error("NodeKeyAuthorized(SigCredential, node) did not fail")
|
||||
|
||||
10
tka/state.go
10
tka/state.go
@@ -93,7 +93,13 @@ const disablementLength = 32
|
||||
|
||||
var disablementSalt = []byte("tailscale network-lock disablement salt")
|
||||
|
||||
func disablementKDF(secret []byte) []byte {
|
||||
// DisablementKDF computes a public value which can be stored in a
|
||||
// key authority, but cannot be reversed to find the input secret.
|
||||
//
|
||||
// When the output of this function is stored in tka state (i.e. in
|
||||
// tka.State.DisablementSecrets) a call to Authority.ValidDisablement()
|
||||
// with the input of this function as the argument will return true.
|
||||
func DisablementKDF(secret []byte) []byte {
|
||||
// time = 4 (3 recommended, booped to 4 to compensate for less memory)
|
||||
// memory = 16 (32 recommended)
|
||||
// threads = 4
|
||||
@@ -103,7 +109,7 @@ func disablementKDF(secret []byte) []byte {
|
||||
|
||||
// checkDisablement returns true for a valid disablement secret.
|
||||
func (s State) checkDisablement(secret []byte) bool {
|
||||
derived := disablementKDF(secret)
|
||||
derived := DisablementKDF(secret)
|
||||
for _, candidate := range s.DisablementSecrets {
|
||||
if bytes.Equal(derived, candidate) {
|
||||
return true
|
||||
|
||||
@@ -342,7 +342,7 @@ func TestSyncSimpleE2E(t *testing.T) {
|
||||
`,
|
||||
optTemplate("genesis", AUM{MessageKind: AUMCheckpoint, State: &State{
|
||||
Keys: []Key{key},
|
||||
DisablementSecrets: [][]byte{disablementKDF([]byte{1, 2, 3})},
|
||||
DisablementSecrets: [][]byte{DisablementKDF([]byte{1, 2, 3})},
|
||||
}}),
|
||||
optKey("key", key, priv),
|
||||
optSignAllUsing("key"))
|
||||
|
||||
@@ -305,7 +305,7 @@ func TestAuthorityValidDisablement(t *testing.T) {
|
||||
`,
|
||||
optTemplate("genesis", AUM{MessageKind: AUMCheckpoint, State: &State{
|
||||
Keys: []Key{key},
|
||||
DisablementSecrets: [][]byte{disablementKDF([]byte{1, 2, 3})},
|
||||
DisablementSecrets: [][]byte{DisablementKDF([]byte{1, 2, 3})},
|
||||
}}),
|
||||
)
|
||||
|
||||
@@ -321,7 +321,7 @@ func TestCreateBootstrapAuthority(t *testing.T) {
|
||||
|
||||
a1, genesisAUM, err := Create(&Mem{}, State{
|
||||
Keys: []Key{key},
|
||||
DisablementSecrets: [][]byte{disablementKDF([]byte{1, 2, 3})},
|
||||
DisablementSecrets: [][]byte{DisablementKDF([]byte{1, 2, 3})},
|
||||
}, signer25519(priv))
|
||||
if err != nil {
|
||||
t.Fatalf("Create() failed: %v", err)
|
||||
@@ -361,7 +361,7 @@ func TestAuthorityInformNonLinear(t *testing.T) {
|
||||
`,
|
||||
optTemplate("genesis", AUM{MessageKind: AUMCheckpoint, State: &State{
|
||||
Keys: []Key{key},
|
||||
DisablementSecrets: [][]byte{disablementKDF([]byte{1, 2, 3})},
|
||||
DisablementSecrets: [][]byte{DisablementKDF([]byte{1, 2, 3})},
|
||||
}}),
|
||||
optKey("key", key, priv),
|
||||
optSignAllUsing("key"))
|
||||
@@ -406,7 +406,7 @@ func TestAuthorityInformLinear(t *testing.T) {
|
||||
`,
|
||||
optTemplate("genesis", AUM{MessageKind: AUMCheckpoint, State: &State{
|
||||
Keys: []Key{key},
|
||||
DisablementSecrets: [][]byte{disablementKDF([]byte{1, 2, 3})},
|
||||
DisablementSecrets: [][]byte{DisablementKDF([]byte{1, 2, 3})},
|
||||
}}),
|
||||
optKey("key", key, priv),
|
||||
optSignAllUsing("key"))
|
||||
|
||||
@@ -810,6 +810,7 @@ func (n *testNode) StartDaemonAsIPNGOOS(ipnGOOS string) *Daemon {
|
||||
"HTTPS_PROXY="+n.env.TrafficTrapServer.URL,
|
||||
"TS_DEBUG_TAILSCALED_IPN_GOOS="+ipnGOOS,
|
||||
"TS_LOGS_DIR="+t.TempDir(),
|
||||
"TS_NETCHECK_GENERATE_204_URL="+n.env.ControlServer.URL+"/generate_204",
|
||||
)
|
||||
cmd.Stderr = &nodeOutputParser{n: n}
|
||||
if *verboseTailscaled {
|
||||
|
||||
@@ -200,6 +200,9 @@ func (s *Server) logf(format string, a ...any) {
|
||||
func (s *Server) initMux() {
|
||||
s.mux = http.NewServeMux()
|
||||
s.mux.HandleFunc("/", s.serveUnhandled)
|
||||
s.mux.HandleFunc("/generate_204", func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusNoContent)
|
||||
})
|
||||
s.mux.HandleFunc("/key", s.serveKey)
|
||||
s.mux.HandleFunc("/machine/", s.serveMachine)
|
||||
}
|
||||
|
||||
@@ -37,6 +37,7 @@ import (
|
||||
func init() {
|
||||
expvar.Publish("process_start_unix_time", expvar.Func(func() any { return timeStart.Unix() }))
|
||||
expvar.Publish("version", expvar.Func(func() any { return version.Long }))
|
||||
expvar.Publish("go_version", expvar.Func(func() any { return runtime.Version() }))
|
||||
expvar.Publish("counter_uptime_sec", expvar.Func(func() any { return int64(Uptime().Seconds()) }))
|
||||
expvar.Publish("gauge_goroutines", expvar.Func(func() any { return runtime.NumGoroutine() }))
|
||||
}
|
||||
@@ -184,9 +185,9 @@ type ReturnHandler interface {
|
||||
}
|
||||
|
||||
type HandlerOptions struct {
|
||||
Quiet200s bool // if set, do not log successfully handled HTTP requests
|
||||
Logf logger.Logf
|
||||
Now func() time.Time // if nil, defaults to time.Now
|
||||
QuietLoggingIfSuccessful bool // if set, do not log successfully handled HTTP requests (200 and 304 status codes)
|
||||
Logf logger.Logf
|
||||
Now func() time.Time // if nil, defaults to time.Now
|
||||
|
||||
// If non-nil, StatusCodeCounters maintains counters
|
||||
// of status codes for handled responses.
|
||||
@@ -316,7 +317,7 @@ func (h retHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
}
|
||||
|
||||
if msg.Code != 200 || !h.opts.Quiet200s {
|
||||
if !h.opts.QuietLoggingIfSuccessful || (msg.Code != http.StatusOK && msg.Code != http.StatusNotModified) {
|
||||
h.opts.Logf("%s", msg)
|
||||
}
|
||||
|
||||
|
||||
@@ -303,7 +303,7 @@ func BenchmarkLogNot200(b *testing.B) {
|
||||
// Implicit 200 OK.
|
||||
return nil
|
||||
})
|
||||
h := StdHandler(rh, HandlerOptions{Quiet200s: true})
|
||||
h := StdHandler(rh, HandlerOptions{QuietLoggingIfSuccessful: true})
|
||||
req := httptest.NewRequest("GET", "/", nil)
|
||||
rw := new(httptest.ResponseRecorder)
|
||||
for i := 0; i < b.N; i++ {
|
||||
|
||||
@@ -14,6 +14,7 @@ import (
|
||||
"time"
|
||||
|
||||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/tka"
|
||||
"tailscale.com/types/key"
|
||||
"tailscale.com/wgengine/filter"
|
||||
)
|
||||
@@ -61,6 +62,13 @@ type NetworkMap struct {
|
||||
// check problems.
|
||||
ControlHealth []string
|
||||
|
||||
// TKAEnabled indicates whether the tailnet key authority should be
|
||||
// enabled, from the perspective of the control plane.
|
||||
TKAEnabled bool
|
||||
// TKAHead indicates the control plane's understanding of 'head' (the
|
||||
// hash of the latest update message to tick through TKA).
|
||||
TKAHead tka.AUMHash
|
||||
|
||||
// ACLs
|
||||
|
||||
User tailcfg.UserID
|
||||
|
||||
@@ -575,7 +575,7 @@ func TestGetTypeHasher(t *testing.T) {
|
||||
{
|
||||
name: "tailcfg.Node",
|
||||
val: &tailcfg.Node{},
|
||||
out: "\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\tn\x88\xf1\xff\xff\xff\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\tn\x88\xf1\xff\xff\xff\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00",
|
||||
out: "\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\tn\x88\xf1\xff\xff\xff\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\tn\x88\xf1\xff\xff\xff\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00",
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
|
||||
@@ -1318,6 +1318,9 @@ func (c *Conn) derpWriteChanOfAddr(addr netip.AddrPort, peer key.NodePublic) cha
|
||||
if !c.wantDerpLocked() || c.closed {
|
||||
return nil
|
||||
}
|
||||
if c.derpMap == nil || c.derpMap.Regions[regionID] == nil {
|
||||
return nil
|
||||
}
|
||||
if c.privateKey.IsZero() {
|
||||
c.logf("magicsock: DERP lookup of %v with no private key; ignoring", addr)
|
||||
return nil
|
||||
@@ -1362,9 +1365,6 @@ func (c *Conn) derpWriteChanOfAddr(addr netip.AddrPort, peer key.NodePublic) cha
|
||||
c.activeDerp = make(map[int]activeDerp)
|
||||
c.prevDerp = make(map[int]*syncs.WaitGroupChan)
|
||||
}
|
||||
if c.derpMap == nil || c.derpMap.Regions[regionID] == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Note that derphttp.NewRegionClient does not dial the server
|
||||
// (it doesn't block) so it is safe to do under the c.mu lock.
|
||||
|
||||
@@ -664,7 +664,11 @@ func (ns *Impl) injectInbound(p *packet.Parsed, t *tstun.Wrapper) filter.Respons
|
||||
}
|
||||
|
||||
destIP := p.Dst.Addr()
|
||||
if p.IsEchoRequest() && ns.ProcessSubnets && !tsaddr.IsTailscaleIP(destIP) {
|
||||
|
||||
// If this is an echo request and we're a subnet router, handle pings
|
||||
// ourselves instead of forwarding the packet on.
|
||||
pingIP, handlePing := ns.shouldHandlePing(p)
|
||||
if handlePing {
|
||||
var pong []byte // the reply to the ping, if our relayed ping works
|
||||
if destIP.Is4() {
|
||||
h := p.ICMP4Header()
|
||||
@@ -675,7 +679,7 @@ func (ns *Impl) injectInbound(p *packet.Parsed, t *tstun.Wrapper) filter.Respons
|
||||
h.ToResponse()
|
||||
pong = packet.Generate(&h, p.Payload())
|
||||
}
|
||||
go ns.userPing(destIP, pong)
|
||||
go ns.userPing(pingIP, pong)
|
||||
return filter.DropSilently
|
||||
}
|
||||
|
||||
@@ -704,6 +708,59 @@ func (ns *Impl) injectInbound(p *packet.Parsed, t *tstun.Wrapper) filter.Respons
|
||||
return filter.DropSilently
|
||||
}
|
||||
|
||||
// shouldHandlePing returns whether or not netstack should handle an incoming
|
||||
// ICMP echo request packet, and the IP address that should be pinged from this
|
||||
// process. The IP address can be different from the destination in the packet
|
||||
// if the destination is a 4via6 address.
|
||||
func (ns *Impl) shouldHandlePing(p *packet.Parsed) (_ netip.Addr, ok bool) {
|
||||
if !p.IsEchoRequest() {
|
||||
return netip.Addr{}, false
|
||||
}
|
||||
|
||||
destIP := p.Dst.Addr()
|
||||
|
||||
// We need to handle pings for all 4via6 addresses, even if this
|
||||
// netstack instance normally isn't responsible for processing subnets.
|
||||
//
|
||||
// For example, on Linux, subnet router traffic could be handled via
|
||||
// tun+iptables rules for most packets, but we still need to handle
|
||||
// ICMP echo requests over 4via6 since the host networking stack
|
||||
// doesn't know what to do with a 4via6 address.
|
||||
//
|
||||
// shouldProcessInbound returns 'true' to say that we should process
|
||||
// all IPv6 packets with a destination address in the 'via' range, so
|
||||
// check before we check the "ProcessSubnets" boolean below.
|
||||
if viaRange.Contains(destIP) {
|
||||
// The input echo request was to a 4via6 address, which we cannot
|
||||
// simply ping as-is from this process. Translate the destination to an
|
||||
// IPv4 address, so that our relayed ping (in userPing) is pinging the
|
||||
// underlying destination IP.
|
||||
//
|
||||
// ICMPv4 and ICMPv6 are different protocols with different on-the-wire
|
||||
// representations, so normally you can't send an ICMPv6 message over
|
||||
// IPv4 and expect to get a useful result. However, in this specific
|
||||
// case things are safe because the 'userPing' function doesn't make
|
||||
// use of the input packet.
|
||||
return tsaddr.UnmapVia(destIP), true
|
||||
}
|
||||
|
||||
// If we get here, we don't do anything unless this netstack instance
|
||||
// is responsible for processing subnet traffic.
|
||||
if !ns.ProcessSubnets {
|
||||
return netip.Addr{}, false
|
||||
}
|
||||
|
||||
// For non-4via6 addresses, we don't handle pings if they're destined
|
||||
// for a Tailscale IP.
|
||||
if tsaddr.IsTailscaleIP(destIP) {
|
||||
return netip.Addr{}, false
|
||||
}
|
||||
|
||||
// This netstack instance is processing subnet traffic, so handle the
|
||||
// ping ourselves.
|
||||
return destIP, true
|
||||
}
|
||||
|
||||
func netaddrIPFromNetstackIP(s tcpip.Address) netip.Addr {
|
||||
switch len(s) {
|
||||
case 4:
|
||||
|
||||
@@ -5,6 +5,7 @@
|
||||
package netstack
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/netip"
|
||||
"runtime"
|
||||
"testing"
|
||||
@@ -13,6 +14,7 @@ import (
|
||||
"tailscale.com/net/packet"
|
||||
"tailscale.com/net/tsdial"
|
||||
"tailscale.com/net/tstun"
|
||||
"tailscale.com/types/ipproto"
|
||||
"tailscale.com/wgengine"
|
||||
"tailscale.com/wgengine/filter"
|
||||
)
|
||||
@@ -84,3 +86,169 @@ func TestNetstackLeakMode(t *testing.T) {
|
||||
t.Fatalf("refs.leakMode is 0, want a non-zero value")
|
||||
}
|
||||
}
|
||||
|
||||
func makeNetstack(t *testing.T, config func(*Impl)) *Impl {
|
||||
tunDev := tstun.NewFake()
|
||||
dialer := new(tsdial.Dialer)
|
||||
logf := func(format string, args ...any) {
|
||||
if !t.Failed() {
|
||||
t.Helper()
|
||||
t.Logf(format, args...)
|
||||
}
|
||||
}
|
||||
eng, err := wgengine.NewUserspaceEngine(logf, wgengine.Config{
|
||||
Tun: tunDev,
|
||||
Dialer: dialer,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
t.Cleanup(func() { eng.Close() })
|
||||
ig, ok := eng.(wgengine.InternalsGetter)
|
||||
if !ok {
|
||||
t.Fatal("not an InternalsGetter")
|
||||
}
|
||||
tunWrap, magicSock, dns, ok := ig.GetInternals()
|
||||
if !ok {
|
||||
t.Fatal("failed to get internals")
|
||||
}
|
||||
|
||||
ns, err := Create(logf, tunWrap, eng, magicSock, dialer, dns)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
t.Cleanup(func() { ns.Close() })
|
||||
|
||||
ns.atomicIsLocalIPFunc.Store(func(netip.Addr) bool { return true })
|
||||
config(ns)
|
||||
|
||||
if err := ns.Start(); err != nil {
|
||||
t.Fatalf("Start: %v", err)
|
||||
}
|
||||
return ns
|
||||
}
|
||||
|
||||
func TestShouldHandlePing(t *testing.T) {
|
||||
srcIP := netip.AddrFrom4([4]byte{1, 2, 3, 4})
|
||||
|
||||
t.Run("ICMP4", func(t *testing.T) {
|
||||
dst := netip.MustParseAddr("5.6.7.8")
|
||||
icmph := packet.ICMP4Header{
|
||||
IP4Header: packet.IP4Header{
|
||||
IPProto: ipproto.ICMPv4,
|
||||
Src: srcIP,
|
||||
Dst: dst,
|
||||
},
|
||||
Type: packet.ICMP4EchoRequest,
|
||||
Code: packet.ICMP4NoCode,
|
||||
}
|
||||
_, payload := packet.ICMPEchoPayload(nil)
|
||||
icmpPing := packet.Generate(icmph, payload)
|
||||
pkt := &packet.Parsed{}
|
||||
pkt.Decode(icmpPing)
|
||||
|
||||
impl := makeNetstack(t, func(impl *Impl) {
|
||||
impl.ProcessSubnets = true
|
||||
})
|
||||
pingDst, ok := impl.shouldHandlePing(pkt)
|
||||
if !ok {
|
||||
t.Errorf("expected shouldHandlePing==true")
|
||||
}
|
||||
if pingDst != dst {
|
||||
t.Errorf("got dst %s; want %s", pingDst, dst)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("ICMP6-no-via", func(t *testing.T) {
|
||||
dst := netip.MustParseAddr("2a09:8280:1::4169")
|
||||
icmph := packet.ICMP6Header{
|
||||
IP6Header: packet.IP6Header{
|
||||
IPProto: ipproto.ICMPv6,
|
||||
Src: srcIP,
|
||||
Dst: dst,
|
||||
},
|
||||
Type: packet.ICMP6EchoRequest,
|
||||
Code: packet.ICMP6NoCode,
|
||||
}
|
||||
_, payload := packet.ICMPEchoPayload(nil)
|
||||
icmpPing := packet.Generate(icmph, payload)
|
||||
pkt := &packet.Parsed{}
|
||||
pkt.Decode(icmpPing)
|
||||
|
||||
impl := makeNetstack(t, func(impl *Impl) {
|
||||
impl.ProcessSubnets = true
|
||||
})
|
||||
pingDst, ok := impl.shouldHandlePing(pkt)
|
||||
|
||||
// Expect that we handle this since it's going out onto the
|
||||
// network.
|
||||
if !ok {
|
||||
t.Errorf("expected shouldHandlePing==true")
|
||||
}
|
||||
if pingDst != dst {
|
||||
t.Errorf("got dst %s; want %s", pingDst, dst)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("ICMP6-tailscale-addr", func(t *testing.T) {
|
||||
dst := netip.MustParseAddr("fd7a:115c:a1e0:ab12::1")
|
||||
icmph := packet.ICMP6Header{
|
||||
IP6Header: packet.IP6Header{
|
||||
IPProto: ipproto.ICMPv6,
|
||||
Src: srcIP,
|
||||
Dst: dst,
|
||||
},
|
||||
Type: packet.ICMP6EchoRequest,
|
||||
Code: packet.ICMP6NoCode,
|
||||
}
|
||||
_, payload := packet.ICMPEchoPayload(nil)
|
||||
icmpPing := packet.Generate(icmph, payload)
|
||||
pkt := &packet.Parsed{}
|
||||
pkt.Decode(icmpPing)
|
||||
|
||||
impl := makeNetstack(t, func(impl *Impl) {
|
||||
impl.ProcessSubnets = true
|
||||
})
|
||||
_, ok := impl.shouldHandlePing(pkt)
|
||||
|
||||
// We don't handle this because it's a Tailscale IP and not 4via6
|
||||
if ok {
|
||||
t.Errorf("expected shouldHandlePing==false")
|
||||
}
|
||||
})
|
||||
|
||||
// Handle pings for 4via6 addresses regardless of ProcessSubnets
|
||||
for _, subnets := range []bool{true, false} {
|
||||
t.Run("ICMP6-4via6-ProcessSubnets-"+fmt.Sprint(subnets), func(t *testing.T) {
|
||||
// The 4via6 route 10.1.1.0/24 siteid 7, and then the IP
|
||||
// 10.1.1.9 within that route.
|
||||
dst := netip.MustParseAddr("fd7a:115c:a1e0:b1a:0:7:a01:109")
|
||||
expectedPingDst := netip.MustParseAddr("10.1.1.9")
|
||||
icmph := packet.ICMP6Header{
|
||||
IP6Header: packet.IP6Header{
|
||||
IPProto: ipproto.ICMPv6,
|
||||
Src: srcIP,
|
||||
Dst: dst,
|
||||
},
|
||||
Type: packet.ICMP6EchoRequest,
|
||||
Code: packet.ICMP6NoCode,
|
||||
}
|
||||
_, payload := packet.ICMPEchoPayload(nil)
|
||||
icmpPing := packet.Generate(icmph, payload)
|
||||
pkt := &packet.Parsed{}
|
||||
pkt.Decode(icmpPing)
|
||||
|
||||
impl := makeNetstack(t, func(impl *Impl) {
|
||||
impl.ProcessSubnets = subnets
|
||||
})
|
||||
pingDst, ok := impl.shouldHandlePing(pkt)
|
||||
|
||||
// Handled due to being 4via6
|
||||
if !ok {
|
||||
t.Errorf("expected shouldHandlePing==true")
|
||||
} else if pingDst != expectedPingDst {
|
||||
t.Errorf("got dst %s; want %s", pingDst, expectedPingDst)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user