Compare commits
62 Commits
andrew/exe
...
percy/move
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
0b6a337039 | ||
|
|
e6a5c1ee16 | ||
|
|
58a312edca | ||
|
|
0b7087c401 | ||
|
|
00fe8845b1 | ||
|
|
5ef934b62d | ||
|
|
cfe578870d | ||
|
|
80a100b3cb | ||
|
|
97c4c0ecf0 | ||
|
|
600f25dac9 | ||
|
|
95e2353294 | ||
|
|
10fe10ea10 | ||
|
|
17ca2b7721 | ||
|
|
496347c724 | ||
|
|
d832467461 | ||
|
|
2c02f712d1 | ||
|
|
a0537dc027 | ||
|
|
2e95313b8b | ||
|
|
0a51bbc765 | ||
|
|
02ad21717f | ||
|
|
535a3dbebd | ||
|
|
081595de63 | ||
|
|
4e7f4086b2 | ||
|
|
7d5fe13d27 | ||
|
|
8ee72cd33c | ||
|
|
08dd4994d0 | ||
|
|
138a83efe1 | ||
|
|
c2af1cd9e3 | ||
|
|
a49af98b31 | ||
|
|
0ed4aa028f | ||
|
|
ed8bb3b564 | ||
|
|
3f39211f98 | ||
|
|
8bd04bdd3a | ||
|
|
b60f6b849a | ||
|
|
52f88f782a | ||
|
|
b406f209c3 | ||
|
|
eb299302ba | ||
|
|
0aa54151f2 | ||
|
|
f1514a944a | ||
|
|
46fd4e58a2 | ||
|
|
3abfbf50ae | ||
|
|
6f10fe8ab1 | ||
|
|
079973de82 | ||
|
|
ba1f9a3918 | ||
|
|
2691b9f6be | ||
|
|
bd9725c5f8 | ||
|
|
bfde8079a0 | ||
|
|
76dc028b38 | ||
|
|
3fec806523 | ||
|
|
bce05ec6c3 | ||
|
|
8c925899e1 | ||
|
|
04029b857f | ||
|
|
e701fde6b3 | ||
|
|
66b2e9fd07 | ||
|
|
68a66ee81b | ||
|
|
2c98c44d9a | ||
|
|
82e41ddc42 | ||
|
|
2089f4b603 | ||
|
|
ca39c4e150 | ||
|
|
1a7274fccb | ||
|
|
cbf1a9abe1 | ||
|
|
716e4fcc97 |
2
.github/workflows/checklocks.yml
vendored
2
.github/workflows/checklocks.yml
vendored
@@ -18,7 +18,7 @@ jobs:
|
||||
runs-on: [ ubuntu-latest ]
|
||||
steps:
|
||||
- name: Check out code
|
||||
uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
|
||||
- name: Build checklocks
|
||||
run: ./tool/go build -o /tmp/checklocks gvisor.dev/gvisor/tools/checklocks/cmd/checklocks
|
||||
|
||||
10
.github/workflows/codeql-analysis.yml
vendored
10
.github/workflows/codeql-analysis.yml
vendored
@@ -45,17 +45,17 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
|
||||
# Install a more recent Go that understands modern go.mod content.
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@3041bf56c941b39c61721a86cd11f3bb1338122a # v5.2.0
|
||||
uses: actions/setup-go@f111f3307d8850f501ac008e886eec1fd1932a34 # v5.3.0
|
||||
with:
|
||||
go-version-file: go.mod
|
||||
|
||||
# Initializes the CodeQL tools for scanning.
|
||||
- name: Initialize CodeQL
|
||||
uses: github/codeql-action/init@b6a472f63d85b9c78a3ac5e89422239fc15e9b3c # v3.28.1
|
||||
uses: github/codeql-action/init@f6091c0113d1dcf9b98e269ee48e8a7e51b7bdd4 # v3.28.5
|
||||
with:
|
||||
languages: ${{ matrix.language }}
|
||||
# If you wish to specify custom queries, you can do so here or in a config file.
|
||||
@@ -66,7 +66,7 @@ jobs:
|
||||
# Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
|
||||
# If this step fails, then you should remove it and run the build manually (see below)
|
||||
- name: Autobuild
|
||||
uses: github/codeql-action/autobuild@b6a472f63d85b9c78a3ac5e89422239fc15e9b3c # v3.28.1
|
||||
uses: github/codeql-action/autobuild@f6091c0113d1dcf9b98e269ee48e8a7e51b7bdd4 # v3.28.5
|
||||
|
||||
# ℹ️ Command-line programs to run using the OS shell.
|
||||
# 📚 https://git.io/JvXDl
|
||||
@@ -80,4 +80,4 @@ jobs:
|
||||
# make release
|
||||
|
||||
- name: Perform CodeQL Analysis
|
||||
uses: github/codeql-action/analyze@b6a472f63d85b9c78a3ac5e89422239fc15e9b3c # v3.28.1
|
||||
uses: github/codeql-action/analyze@f6091c0113d1dcf9b98e269ee48e8a7e51b7bdd4 # v3.28.5
|
||||
|
||||
2
.github/workflows/docker-file-build.yml
vendored
2
.github/workflows/docker-file-build.yml
vendored
@@ -10,6 +10,6 @@ jobs:
|
||||
deploy:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- name: "Build Docker image"
|
||||
run: docker build .
|
||||
|
||||
@@ -17,7 +17,7 @@ jobs:
|
||||
id-token: "write"
|
||||
contents: "read"
|
||||
steps:
|
||||
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
ref: "${{ (inputs.tag != null) && format('refs/tags/{0}', inputs.tag) || '' }}"
|
||||
- uses: "DeterminateSystems/nix-installer-action@main"
|
||||
|
||||
4
.github/workflows/golangci-lint.yml
vendored
4
.github/workflows/golangci-lint.yml
vendored
@@ -23,9 +23,9 @@ jobs:
|
||||
name: lint
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
|
||||
- uses: actions/setup-go@3041bf56c941b39c61721a86cd11f3bb1338122a # v5.2.0
|
||||
- uses: actions/setup-go@f111f3307d8850f501ac008e886eec1fd1932a34 # v5.3.0
|
||||
with:
|
||||
go-version-file: go.mod
|
||||
cache: false
|
||||
|
||||
2
.github/workflows/govulncheck.yml
vendored
2
.github/workflows/govulncheck.yml
vendored
@@ -14,7 +14,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Check out code into the Go module directory
|
||||
uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
|
||||
- name: Install govulncheck
|
||||
run: ./tool/go install golang.org/x/vuln/cmd/govulncheck@latest
|
||||
|
||||
6
.github/workflows/installer.yml
vendored
6
.github/workflows/installer.yml
vendored
@@ -36,7 +36,6 @@ jobs:
|
||||
- "ubuntu:24.04"
|
||||
- "elementary/docker:stable"
|
||||
- "elementary/docker:unstable"
|
||||
- "parrotsec/core:lts-amd64"
|
||||
- "parrotsec/core:latest"
|
||||
- "kalilinux/kali-rolling"
|
||||
- "kalilinux/kali-dev"
|
||||
@@ -92,10 +91,7 @@ jobs:
|
||||
|| contains(matrix.image, 'parrotsec')
|
||||
|| contains(matrix.image, 'kalilinux')
|
||||
- name: checkout
|
||||
# We cannot use v4, as it requires a newer glibc version than some of the
|
||||
# tested images provide. See
|
||||
# https://github.com/actions/checkout/issues/1487
|
||||
uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # v3.6.0
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- name: run installer
|
||||
run: scripts/installer.sh
|
||||
# Package installation can fail in docker because systemd is not running
|
||||
|
||||
2
.github/workflows/kubemanifests.yaml
vendored
2
.github/workflows/kubemanifests.yaml
vendored
@@ -17,7 +17,7 @@ jobs:
|
||||
runs-on: [ ubuntu-latest ]
|
||||
steps:
|
||||
- name: Check out code
|
||||
uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- name: Build and lint Helm chart
|
||||
run: |
|
||||
eval `./tool/go run ./cmd/mkversion`
|
||||
|
||||
2
.github/workflows/ssh-integrationtest.yml
vendored
2
.github/workflows/ssh-integrationtest.yml
vendored
@@ -17,7 +17,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Check out code
|
||||
uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- name: Run SSH integration tests
|
||||
run: |
|
||||
make sshintegrationtest
|
||||
38
.github/workflows/test.yml
vendored
38
.github/workflows/test.yml
vendored
@@ -50,7 +50,7 @@ jobs:
|
||||
- shard: '4/4'
|
||||
steps:
|
||||
- name: checkout
|
||||
uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- name: build test wrapper
|
||||
run: ./tool/go build -o /tmp/testwrapper ./cmd/testwrapper
|
||||
- name: integration tests as root
|
||||
@@ -78,7 +78,7 @@ jobs:
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- name: checkout
|
||||
uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- name: Restore Cache
|
||||
uses: actions/cache@1bd1e32a3bdc45362d1e726936510720a7c30a57 # v4.2.0
|
||||
with:
|
||||
@@ -150,10 +150,10 @@ jobs:
|
||||
runs-on: windows-2022
|
||||
steps:
|
||||
- name: checkout
|
||||
uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@3041bf56c941b39c61721a86cd11f3bb1338122a # v5.2.0
|
||||
uses: actions/setup-go@f111f3307d8850f501ac008e886eec1fd1932a34 # v5.3.0
|
||||
with:
|
||||
go-version-file: go.mod
|
||||
cache: false
|
||||
@@ -190,7 +190,7 @@ jobs:
|
||||
options: --privileged
|
||||
steps:
|
||||
- name: checkout
|
||||
uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- name: chown
|
||||
run: chown -R $(id -u):$(id -g) $PWD
|
||||
- name: privileged tests
|
||||
@@ -202,7 +202,7 @@ jobs:
|
||||
if: github.repository == 'tailscale/tailscale'
|
||||
steps:
|
||||
- name: checkout
|
||||
uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- name: Run VM tests
|
||||
run: ./tool/go test ./tstest/integration/vms -v -no-s3 -run-vm-tests -run=TestRunUbuntu2004
|
||||
env:
|
||||
@@ -214,7 +214,7 @@ jobs:
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- name: checkout
|
||||
uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- name: build all
|
||||
run: ./tool/go install -race ./cmd/...
|
||||
- name: build tests
|
||||
@@ -258,7 +258,7 @@ jobs:
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- name: checkout
|
||||
uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- name: Restore Cache
|
||||
uses: actions/cache@1bd1e32a3bdc45362d1e726936510720a7c30a57 # v4.2.0
|
||||
with:
|
||||
@@ -295,7 +295,7 @@ jobs:
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- name: checkout
|
||||
uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- name: build some
|
||||
run: ./tool/go build ./ipn/... ./wgengine/ ./types/... ./control/controlclient
|
||||
env:
|
||||
@@ -323,7 +323,7 @@ jobs:
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- name: checkout
|
||||
uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- name: Restore Cache
|
||||
uses: actions/cache@1bd1e32a3bdc45362d1e726936510720a7c30a57 # v4.2.0
|
||||
with:
|
||||
@@ -356,7 +356,7 @@ jobs:
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- name: checkout
|
||||
uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
# Super minimal Android build that doesn't even use CGO and doesn't build everything that's needed
|
||||
# and is only arm64. But it's a smoke build: it's not meant to catch everything. But it'll catch
|
||||
# some Android breakages early.
|
||||
@@ -371,7 +371,7 @@ jobs:
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- name: checkout
|
||||
uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- name: Restore Cache
|
||||
uses: actions/cache@1bd1e32a3bdc45362d1e726936510720a7c30a57 # v4.2.0
|
||||
with:
|
||||
@@ -405,7 +405,7 @@ jobs:
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- name: checkout
|
||||
uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- name: test tailscale_go
|
||||
run: ./tool/go test -tags=tailscale_go,ts_enable_sockstats ./net/sockstats/...
|
||||
|
||||
@@ -477,17 +477,17 @@ jobs:
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- name: checkout
|
||||
uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- name: check depaware
|
||||
run: |
|
||||
export PATH=$(./tool/go env GOROOT)/bin:$PATH
|
||||
find . -name 'depaware.txt' | xargs -n1 dirname | xargs ./tool/go run github.com/tailscale/depaware --check
|
||||
find . -name 'depaware.txt' | xargs -n1 dirname | xargs ./tool/go run github.com/tailscale/depaware --check --internal
|
||||
|
||||
go_generate:
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- name: checkout
|
||||
uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- name: check that 'go generate' is clean
|
||||
run: |
|
||||
pkgs=$(./tool/go list ./... | grep -Ev 'dnsfallback|k8s-operator|xdp')
|
||||
@@ -500,7 +500,7 @@ jobs:
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- name: checkout
|
||||
uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- name: check that 'go mod tidy' is clean
|
||||
run: |
|
||||
./tool/go mod tidy
|
||||
@@ -512,7 +512,7 @@ jobs:
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- name: checkout
|
||||
uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- name: check licenses
|
||||
run: ./scripts/check_license_headers.sh .
|
||||
|
||||
@@ -528,7 +528,7 @@ jobs:
|
||||
goarch: "386"
|
||||
steps:
|
||||
- name: checkout
|
||||
uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- name: install staticcheck
|
||||
run: GOBIN=~/.local/bin ./tool/go install honnef.co/go/tools/cmd/staticcheck
|
||||
- name: run staticcheck
|
||||
|
||||
2
.github/workflows/update-flake.yml
vendored
2
.github/workflows/update-flake.yml
vendored
@@ -21,7 +21,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Check out code
|
||||
uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
|
||||
- name: Run update-flakes
|
||||
run: ./update-flake.sh
|
||||
|
||||
@@ -14,7 +14,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Check out code
|
||||
uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
|
||||
- name: Run go get
|
||||
run: |
|
||||
|
||||
2
.github/workflows/webclient.yml
vendored
2
.github/workflows/webclient.yml
vendored
@@ -24,7 +24,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Check out code
|
||||
uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- name: Install deps
|
||||
run: ./tool/yarn --cwd client/web
|
||||
- name: Run lint
|
||||
|
||||
4
Makefile
4
Makefile
@@ -17,7 +17,7 @@ lint: ## Run golangci-lint
|
||||
updatedeps: ## Update depaware deps
|
||||
# depaware (via x/tools/go/packages) shells back to "go", so make sure the "go"
|
||||
# it finds in its $$PATH is the right one.
|
||||
PATH="$$(./tool/go env GOROOT)/bin:$$PATH" ./tool/go run github.com/tailscale/depaware --update \
|
||||
PATH="$$(./tool/go env GOROOT)/bin:$$PATH" ./tool/go run github.com/tailscale/depaware --update --internal \
|
||||
tailscale.com/cmd/tailscaled \
|
||||
tailscale.com/cmd/tailscale \
|
||||
tailscale.com/cmd/derper \
|
||||
@@ -27,7 +27,7 @@ updatedeps: ## Update depaware deps
|
||||
depaware: ## Run depaware checks
|
||||
# depaware (via x/tools/go/packages) shells back to "go", so make sure the "go"
|
||||
# it finds in its $$PATH is the right one.
|
||||
PATH="$$(./tool/go env GOROOT)/bin:$$PATH" ./tool/go run github.com/tailscale/depaware --check \
|
||||
PATH="$$(./tool/go env GOROOT)/bin:$$PATH" ./tool/go run github.com/tailscale/depaware --check --internal \
|
||||
tailscale.com/cmd/tailscaled \
|
||||
tailscale.com/cmd/tailscale \
|
||||
tailscale.com/cmd/derper \
|
||||
|
||||
@@ -1 +1 @@
|
||||
1.79.0
|
||||
1.81.0
|
||||
|
||||
@@ -37,7 +37,7 @@ while [ "$#" -gt 1 ]; do
|
||||
--extra-small)
|
||||
shift
|
||||
ldflags="$ldflags -w -s"
|
||||
tags="${tags:+$tags,}ts_omit_aws,ts_omit_bird,ts_omit_tap,ts_omit_kube,ts_omit_completion,ts_omit_ssh,ts_omit_wakeonlan"
|
||||
tags="${tags:+$tags,}ts_omit_aws,ts_omit_bird,ts_omit_tap,ts_omit_kube,ts_omit_completion,ts_omit_ssh,ts_omit_wakeonlan,ts_omit_capture"
|
||||
;;
|
||||
--box)
|
||||
shift
|
||||
|
||||
@@ -26,9 +26,9 @@ import (
|
||||
"github.com/atotto/clipboard"
|
||||
dbus "github.com/godbus/dbus/v5"
|
||||
"github.com/toqueteos/webbrowser"
|
||||
"tailscale.com/client/tailscale"
|
||||
"tailscale.com/ipn"
|
||||
"tailscale.com/ipn/ipnstate"
|
||||
"tailscale.com/localclient/tailscale"
|
||||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/util/slicesx"
|
||||
"tailscale.com/util/stringsx"
|
||||
|
||||
@@ -12,7 +12,7 @@ import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
"tailscale.com/client/tailscale/apitype"
|
||||
"tailscale.com/localclient/tailscale/apitype"
|
||||
)
|
||||
|
||||
// DNSNameServers is returned when retrieving the list of nameservers.
|
||||
|
||||
@@ -11,7 +11,7 @@ import (
|
||||
"log"
|
||||
"net/http"
|
||||
|
||||
"tailscale.com/client/tailscale"
|
||||
"tailscale.com/localclient/tailscale"
|
||||
)
|
||||
|
||||
func main() {
|
||||
|
||||
7
client/tailscale/localclient_stub.go
Normal file
7
client/tailscale/localclient_stub.go
Normal file
@@ -0,0 +1,7 @@
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
package tailscale
|
||||
|
||||
// LocalClient has moved to tailscale.com/localclient/tailscale.
|
||||
type LocalClient struct{}
|
||||
@@ -3,11 +3,10 @@
|
||||
|
||||
//go:build go1.19
|
||||
|
||||
// Package tailscale contains Go clients for the Tailscale LocalAPI and
|
||||
// Tailscale control plane API.
|
||||
// Package tailscale contains a Go client for the Tailscale control plane API.
|
||||
//
|
||||
// Warning: this package is in development and makes no API compatibility
|
||||
// promises as of 2022-04-29. It is subject to change at any time.
|
||||
// Deprecated: This package is no longer maintained. Use
|
||||
// tailscale.com/client/tailscale/v2 instead.
|
||||
package tailscale
|
||||
|
||||
import (
|
||||
@@ -36,6 +35,8 @@ const maxReadSize = 10 << 20
|
||||
//
|
||||
// Use NewClient to instantiate one. Exported fields should be set before
|
||||
// the client is used and not changed thereafter.
|
||||
//
|
||||
// Deprecated: use tailscale.com/client/tailscale/v2 instead.
|
||||
type Client struct {
|
||||
// tailnet is the globally unique identifier for a Tailscale network, such
|
||||
// as "example.com" or "user@gmail.com".
|
||||
|
||||
@@ -15,8 +15,8 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"tailscale.com/client/tailscale/apitype"
|
||||
"tailscale.com/ipn/ipnstate"
|
||||
"tailscale.com/localclient/tailscale/apitype"
|
||||
"tailscale.com/tailcfg"
|
||||
)
|
||||
|
||||
|
||||
@@ -1,13 +1,11 @@
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
import React, { useState } from "react"
|
||||
import React from "react"
|
||||
import { useAPI } from "src/api"
|
||||
import TailscaleIcon from "src/assets/icons/tailscale-icon.svg?react"
|
||||
import { NodeData } from "src/types"
|
||||
import Button from "src/ui/button"
|
||||
import Collapsible from "src/ui/collapsible"
|
||||
import Input from "src/ui/input"
|
||||
|
||||
/**
|
||||
* LoginView is rendered when the client is not authenticated
|
||||
@@ -15,8 +13,6 @@ import Input from "src/ui/input"
|
||||
*/
|
||||
export default function LoginView({ data }: { data: NodeData }) {
|
||||
const api = useAPI()
|
||||
const [controlURL, setControlURL] = useState<string>("")
|
||||
const [authKey, setAuthKey] = useState<string>("")
|
||||
|
||||
return (
|
||||
<div className="mb-8 py-6 px-8 bg-white rounded-md shadow-2xl">
|
||||
@@ -88,8 +84,6 @@ export default function LoginView({ data }: { data: NodeData }) {
|
||||
action: "up",
|
||||
data: {
|
||||
Reauthenticate: true,
|
||||
ControlURL: controlURL,
|
||||
AuthKey: authKey,
|
||||
},
|
||||
})
|
||||
}
|
||||
@@ -98,34 +92,6 @@ export default function LoginView({ data }: { data: NodeData }) {
|
||||
>
|
||||
Log In
|
||||
</Button>
|
||||
<Collapsible trigger="Advanced options">
|
||||
<h4 className="font-medium mb-1 mt-2">Auth Key</h4>
|
||||
<p className="text-sm text-gray-500">
|
||||
Connect with a pre-authenticated key.{" "}
|
||||
<a
|
||||
href="https://tailscale.com/kb/1085/auth-keys/"
|
||||
className="link"
|
||||
target="_blank"
|
||||
rel="noreferrer"
|
||||
>
|
||||
Learn more →
|
||||
</a>
|
||||
</p>
|
||||
<Input
|
||||
className="mt-2"
|
||||
value={authKey}
|
||||
onChange={(e) => setAuthKey(e.target.value)}
|
||||
placeholder="tskey-auth-XXX"
|
||||
/>
|
||||
<h4 className="font-medium mt-3 mb-1">Server URL</h4>
|
||||
<p className="text-sm text-gray-500">Base URL of control server.</p>
|
||||
<Input
|
||||
className="mt-2"
|
||||
value={controlURL}
|
||||
onChange={(e) => setControlURL(e.target.value)}
|
||||
placeholder="https://login.tailscale.com/"
|
||||
/>
|
||||
</Collapsible>
|
||||
</>
|
||||
)}
|
||||
</div>
|
||||
|
||||
@@ -22,8 +22,6 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/gorilla/csrf"
|
||||
"tailscale.com/client/tailscale"
|
||||
"tailscale.com/client/tailscale/apitype"
|
||||
"tailscale.com/clientupdate"
|
||||
"tailscale.com/envknob"
|
||||
"tailscale.com/envknob/featureknob"
|
||||
@@ -31,6 +29,8 @@ import (
|
||||
"tailscale.com/ipn"
|
||||
"tailscale.com/ipn/ipnstate"
|
||||
"tailscale.com/licenses"
|
||||
"tailscale.com/localclient/tailscale"
|
||||
"tailscale.com/localclient/tailscale/apitype"
|
||||
"tailscale.com/net/netutil"
|
||||
"tailscale.com/net/tsaddr"
|
||||
"tailscale.com/tailcfg"
|
||||
@@ -211,15 +211,25 @@ func NewServer(opts ServerOpts) (s *Server, err error) {
|
||||
// The client is secured by limiting the interface it listens on,
|
||||
// or by authenticating requests before they reach the web client.
|
||||
csrfProtect := csrf.Protect(s.csrfKey(), csrf.Secure(false))
|
||||
|
||||
// signal to the CSRF middleware that the request is being served over
|
||||
// plaintext HTTP to skip TLS-only header checks.
|
||||
withSetPlaintext := func(h http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
r = csrf.PlaintextHTTPRequest(r)
|
||||
h.ServeHTTP(w, r)
|
||||
})
|
||||
}
|
||||
|
||||
switch s.mode {
|
||||
case LoginServerMode:
|
||||
s.apiHandler = csrfProtect(http.HandlerFunc(s.serveLoginAPI))
|
||||
s.apiHandler = csrfProtect(withSetPlaintext(http.HandlerFunc(s.serveLoginAPI)))
|
||||
metric = "web_login_client_initialization"
|
||||
case ReadOnlyServerMode:
|
||||
s.apiHandler = csrfProtect(http.HandlerFunc(s.serveLoginAPI))
|
||||
s.apiHandler = csrfProtect(withSetPlaintext(http.HandlerFunc(s.serveLoginAPI)))
|
||||
metric = "web_readonly_client_initialization"
|
||||
case ManageServerMode:
|
||||
s.apiHandler = csrfProtect(http.HandlerFunc(s.serveAPI))
|
||||
s.apiHandler = csrfProtect(withSetPlaintext(http.HandlerFunc(s.serveAPI)))
|
||||
metric = "web_client_initialization"
|
||||
}
|
||||
|
||||
|
||||
@@ -20,10 +20,10 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"tailscale.com/client/tailscale"
|
||||
"tailscale.com/client/tailscale/apitype"
|
||||
"tailscale.com/ipn"
|
||||
"tailscale.com/ipn/ipnstate"
|
||||
"tailscale.com/localclient/tailscale"
|
||||
"tailscale.com/localclient/tailscale/apitype"
|
||||
"tailscale.com/net/memnet"
|
||||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/types/views"
|
||||
|
||||
@@ -6,9 +6,12 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"net/http"
|
||||
"sync"
|
||||
|
||||
"tailscale.com/kube/kubetypes"
|
||||
)
|
||||
|
||||
// healthz is a simple health check server, if enabled it returns 200 OK if
|
||||
@@ -17,6 +20,7 @@ import (
|
||||
type healthz struct {
|
||||
sync.Mutex
|
||||
hasAddrs bool
|
||||
podIPv4 string
|
||||
}
|
||||
|
||||
func (h *healthz) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
@@ -24,7 +28,10 @@ func (h *healthz) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
defer h.Unlock()
|
||||
|
||||
if h.hasAddrs {
|
||||
w.Write([]byte("ok"))
|
||||
w.Header().Add(kubetypes.PodIPv4Header, h.podIPv4)
|
||||
if _, err := w.Write([]byte("ok")); err != nil {
|
||||
http.Error(w, fmt.Sprintf("error writing status: %v", err), http.StatusInternalServerError)
|
||||
}
|
||||
} else {
|
||||
http.Error(w, "node currently has no tailscale IPs", http.StatusServiceUnavailable)
|
||||
}
|
||||
@@ -43,8 +50,8 @@ func (h *healthz) update(healthy bool) {
|
||||
// healthHandlers registers a simple health handler at /healthz.
|
||||
// A containerized tailscale instance is considered healthy if
|
||||
// it has at least one tailnet IP address.
|
||||
func healthHandlers(mux *http.ServeMux) *healthz {
|
||||
h := &healthz{}
|
||||
func healthHandlers(mux *http.ServeMux, podIPv4 string) *healthz {
|
||||
h := &healthz{podIPv4: podIPv4}
|
||||
mux.Handle("GET /healthz", h)
|
||||
return h
|
||||
}
|
||||
|
||||
@@ -8,15 +8,22 @@ package main
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log"
|
||||
"net/http"
|
||||
"net/netip"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"tailscale.com/ipn"
|
||||
"tailscale.com/kube/kubeapi"
|
||||
"tailscale.com/kube/kubeclient"
|
||||
"tailscale.com/kube/kubetypes"
|
||||
"tailscale.com/logtail/backoff"
|
||||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/types/logger"
|
||||
)
|
||||
|
||||
// kubeClient is a wrapper around Tailscale's internal kube client that knows how to talk to the kube API server. We use
|
||||
@@ -126,3 +133,62 @@ func (kc *kubeClient) storeCapVerUID(ctx context.Context, podUID string) error {
|
||||
}
|
||||
return kc.StrategicMergePatchSecret(ctx, kc.stateSecret, s, "tailscale-container")
|
||||
}
|
||||
|
||||
// waitForConsistentState waits for tailscaled to finish writing state if it
|
||||
// looks like it's started. It is designed to reduce the likelihood that
|
||||
// tailscaled gets shut down in the window between authenticating to control
|
||||
// and finishing writing state. However, it's not bullet proof because we can't
|
||||
// atomically authenticate and write state.
|
||||
func (kc *kubeClient) waitForConsistentState(ctx context.Context) error {
|
||||
var logged bool
|
||||
|
||||
bo := backoff.NewBackoff("", logger.Discard, 2*time.Second)
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
default:
|
||||
}
|
||||
secret, err := kc.GetSecret(ctx, kc.stateSecret)
|
||||
if ctx.Err() != nil || kubeclient.IsNotFoundErr(err) {
|
||||
return nil
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("getting Secret %q: %v", kc.stateSecret, err)
|
||||
}
|
||||
|
||||
if hasConsistentState(secret.Data) {
|
||||
return nil
|
||||
}
|
||||
|
||||
if !logged {
|
||||
log.Printf("Waiting for tailscaled to finish writing state to Secret %q", kc.stateSecret)
|
||||
logged = true
|
||||
}
|
||||
bo.BackOff(ctx, errors.New("")) // Fake error to trigger actual sleep.
|
||||
}
|
||||
}
|
||||
|
||||
// hasConsistentState returns true is there is either no state or the full set
|
||||
// of expected keys are present.
|
||||
func hasConsistentState(d map[string][]byte) bool {
|
||||
var (
|
||||
_, hasCurrent = d[string(ipn.CurrentProfileStateKey)]
|
||||
_, hasKnown = d[string(ipn.KnownProfilesStateKey)]
|
||||
_, hasMachine = d[string(ipn.MachineKeyStateKey)]
|
||||
hasProfile bool
|
||||
)
|
||||
|
||||
for k := range d {
|
||||
if strings.HasPrefix(k, "profile-") {
|
||||
if hasProfile {
|
||||
return false // We only expect one profile.
|
||||
}
|
||||
hasProfile = true
|
||||
}
|
||||
}
|
||||
|
||||
// Approximate check, we don't want to reimplement all of profileManager.
|
||||
return (hasCurrent && hasKnown && hasMachine && hasProfile) ||
|
||||
(!hasCurrent && !hasKnown && !hasMachine && !hasProfile)
|
||||
}
|
||||
|
||||
@@ -9,8 +9,10 @@ import (
|
||||
"context"
|
||||
"errors"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"tailscale.com/ipn"
|
||||
"tailscale.com/kube/kubeapi"
|
||||
"tailscale.com/kube/kubeclient"
|
||||
)
|
||||
@@ -205,3 +207,34 @@ func TestSetupKube(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestWaitForConsistentState(t *testing.T) {
|
||||
data := map[string][]byte{
|
||||
// Missing _current-profile.
|
||||
string(ipn.KnownProfilesStateKey): []byte(""),
|
||||
string(ipn.MachineKeyStateKey): []byte(""),
|
||||
"profile-foo": []byte(""),
|
||||
}
|
||||
kc := &kubeClient{
|
||||
Client: &kubeclient.FakeClient{
|
||||
GetSecretImpl: func(context.Context, string) (*kubeapi.Secret, error) {
|
||||
return &kubeapi.Secret{
|
||||
Data: data,
|
||||
}, nil
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
|
||||
defer cancel()
|
||||
if err := kc.waitForConsistentState(ctx); err != context.DeadlineExceeded {
|
||||
t.Fatalf("expected DeadlineExceeded, got %v", err)
|
||||
}
|
||||
|
||||
ctx, cancel = context.WithTimeout(context.Background(), time.Second)
|
||||
defer cancel()
|
||||
data[string(ipn.CurrentProfileStateKey)] = []byte("")
|
||||
if err := kc.waitForConsistentState(ctx); err != nil {
|
||||
t.Fatalf("expected nil, got %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -137,53 +137,83 @@ func newNetfilterRunner(logf logger.Logf) (linuxfw.NetfilterRunner, error) {
|
||||
}
|
||||
|
||||
func main() {
|
||||
if err := run(); err != nil && !errors.Is(err, context.Canceled) {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func run() error {
|
||||
log.SetPrefix("boot: ")
|
||||
tailscale.I_Acknowledge_This_API_Is_Unstable = true
|
||||
|
||||
cfg, err := configFromEnv()
|
||||
if err != nil {
|
||||
log.Fatalf("invalid configuration: %v", err)
|
||||
return fmt.Errorf("invalid configuration: %w", err)
|
||||
}
|
||||
|
||||
if !cfg.UserspaceMode {
|
||||
if err := ensureTunFile(cfg.Root); err != nil {
|
||||
log.Fatalf("Unable to create tuntap device file: %v", err)
|
||||
return fmt.Errorf("unable to create tuntap device file: %w", err)
|
||||
}
|
||||
if cfg.ProxyTargetIP != "" || cfg.ProxyTargetDNSName != "" || cfg.Routes != nil || cfg.TailnetTargetIP != "" || cfg.TailnetTargetFQDN != "" {
|
||||
if err := ensureIPForwarding(cfg.Root, cfg.ProxyTargetIP, cfg.TailnetTargetIP, cfg.TailnetTargetFQDN, cfg.Routes); err != nil {
|
||||
log.Printf("Failed to enable IP forwarding: %v", err)
|
||||
log.Printf("To run tailscale as a proxy or router container, IP forwarding must be enabled.")
|
||||
if cfg.InKubernetes {
|
||||
log.Fatalf("You can either set the sysctls as a privileged initContainer, or run the tailscale container with privileged=true.")
|
||||
return fmt.Errorf("you can either set the sysctls as a privileged initContainer, or run the tailscale container with privileged=true.")
|
||||
} else {
|
||||
log.Fatalf("You can fix this by running the container with privileged=true, or the equivalent in your container runtime that permits access to sysctls.")
|
||||
return fmt.Errorf("you can fix this by running the container with privileged=true, or the equivalent in your container runtime that permits access to sysctls.")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Context is used for all setup stuff until we're in steady
|
||||
// Root context for the whole containerboot process, used to make sure
|
||||
// shutdown signals are promptly and cleanly handled.
|
||||
ctx, cancel := contextWithExitSignalWatch()
|
||||
defer cancel()
|
||||
|
||||
// bootCtx is used for all setup stuff until we're in steady
|
||||
// state, so that if something is hanging we eventually time out
|
||||
// and crashloop the container.
|
||||
bootCtx, cancel := context.WithTimeout(context.Background(), 60*time.Second)
|
||||
bootCtx, cancel := context.WithTimeout(ctx, 60*time.Second)
|
||||
defer cancel()
|
||||
|
||||
var kc *kubeClient
|
||||
if cfg.InKubernetes {
|
||||
kc, err = newKubeClient(cfg.Root, cfg.KubeSecret)
|
||||
if err != nil {
|
||||
log.Fatalf("error initializing kube client: %v", err)
|
||||
return fmt.Errorf("error initializing kube client: %w", err)
|
||||
}
|
||||
if err := cfg.setupKube(bootCtx, kc); err != nil {
|
||||
log.Fatalf("error setting up for running on Kubernetes: %v", err)
|
||||
return fmt.Errorf("error setting up for running on Kubernetes: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
client, daemonProcess, err := startTailscaled(bootCtx, cfg)
|
||||
if err != nil {
|
||||
log.Fatalf("failed to bring up tailscale: %v", err)
|
||||
return fmt.Errorf("failed to bring up tailscale: %w", err)
|
||||
}
|
||||
killTailscaled := func() {
|
||||
if hasKubeStateStore(cfg) {
|
||||
// Check we're not shutting tailscaled down while it's still writing
|
||||
// state. If we authenticate and fail to write all the state, we'll
|
||||
// never recover automatically.
|
||||
//
|
||||
// The default termination grace period for a Pod is 30s. We wait 25s at
|
||||
// most so that we still reserve some of that budget for tailscaled
|
||||
// to receive and react to a SIGTERM before the SIGKILL that k8s
|
||||
// will send at the end of the grace period.
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 25*time.Second)
|
||||
defer cancel()
|
||||
|
||||
log.Printf("Checking for consistent state")
|
||||
err := kc.waitForConsistentState(ctx)
|
||||
if err != nil {
|
||||
log.Printf("Error waiting for consistent state on shutdown: %v", err)
|
||||
}
|
||||
}
|
||||
log.Printf("Sending SIGTERM to tailscaled")
|
||||
if err := daemonProcess.Signal(unix.SIGTERM); err != nil {
|
||||
log.Fatalf("error shutting tailscaled down: %v", err)
|
||||
}
|
||||
@@ -191,17 +221,18 @@ func main() {
|
||||
defer killTailscaled()
|
||||
|
||||
var healthCheck *healthz
|
||||
ep := &egressProxy{}
|
||||
if cfg.HealthCheckAddrPort != "" {
|
||||
mux := http.NewServeMux()
|
||||
|
||||
log.Printf("Running healthcheck endpoint at %s/healthz", cfg.HealthCheckAddrPort)
|
||||
healthCheck = healthHandlers(mux)
|
||||
healthCheck = healthHandlers(mux, cfg.PodIPv4)
|
||||
|
||||
close := runHTTPServer(mux, cfg.HealthCheckAddrPort)
|
||||
defer close()
|
||||
}
|
||||
|
||||
if cfg.localMetricsEnabled() || cfg.localHealthEnabled() {
|
||||
if cfg.localMetricsEnabled() || cfg.localHealthEnabled() || cfg.egressSvcsTerminateEPEnabled() {
|
||||
mux := http.NewServeMux()
|
||||
|
||||
if cfg.localMetricsEnabled() {
|
||||
@@ -211,7 +242,11 @@ func main() {
|
||||
|
||||
if cfg.localHealthEnabled() {
|
||||
log.Printf("Running healthcheck endpoint at %s/healthz", cfg.LocalAddrPort)
|
||||
healthCheck = healthHandlers(mux)
|
||||
healthCheck = healthHandlers(mux, cfg.PodIPv4)
|
||||
}
|
||||
if cfg.EgressProxiesCfgPath != "" {
|
||||
log.Printf("Running preshutdown hook at %s%s", cfg.LocalAddrPort, kubetypes.EgessServicesPreshutdownEP)
|
||||
ep.registerHandlers(mux)
|
||||
}
|
||||
|
||||
close := runHTTPServer(mux, cfg.LocalAddrPort)
|
||||
@@ -226,7 +261,7 @@ func main() {
|
||||
|
||||
w, err := client.WatchIPNBus(bootCtx, ipn.NotifyInitialNetMap|ipn.NotifyInitialPrefs|ipn.NotifyInitialState)
|
||||
if err != nil {
|
||||
log.Fatalf("failed to watch tailscaled for updates: %v", err)
|
||||
return fmt.Errorf("failed to watch tailscaled for updates: %w", err)
|
||||
}
|
||||
|
||||
// Now that we've started tailscaled, we can symlink the socket to the
|
||||
@@ -262,18 +297,18 @@ func main() {
|
||||
didLogin = true
|
||||
w.Close()
|
||||
if err := tailscaleUp(bootCtx, cfg); err != nil {
|
||||
return fmt.Errorf("failed to auth tailscale: %v", err)
|
||||
return fmt.Errorf("failed to auth tailscale: %w", err)
|
||||
}
|
||||
w, err = client.WatchIPNBus(bootCtx, ipn.NotifyInitialNetMap|ipn.NotifyInitialState)
|
||||
if err != nil {
|
||||
return fmt.Errorf("rewatching tailscaled for updates after auth: %v", err)
|
||||
return fmt.Errorf("rewatching tailscaled for updates after auth: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
if isTwoStepConfigAlwaysAuth(cfg) {
|
||||
if err := authTailscale(); err != nil {
|
||||
log.Fatalf("failed to auth tailscale: %v", err)
|
||||
return fmt.Errorf("failed to auth tailscale: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -281,7 +316,7 @@ authLoop:
|
||||
for {
|
||||
n, err := w.Next()
|
||||
if err != nil {
|
||||
log.Fatalf("failed to read from tailscaled: %v", err)
|
||||
return fmt.Errorf("failed to read from tailscaled: %w", err)
|
||||
}
|
||||
|
||||
if n.State != nil {
|
||||
@@ -290,10 +325,10 @@ authLoop:
|
||||
if isOneStepConfig(cfg) {
|
||||
// This could happen if this is the first time tailscaled was run for this
|
||||
// device and the auth key was not passed via the configfile.
|
||||
log.Fatalf("invalid state: tailscaled daemon started with a config file, but tailscale is not logged in: ensure you pass a valid auth key in the config file.")
|
||||
return fmt.Errorf("invalid state: tailscaled daemon started with a config file, but tailscale is not logged in: ensure you pass a valid auth key in the config file.")
|
||||
}
|
||||
if err := authTailscale(); err != nil {
|
||||
log.Fatalf("failed to auth tailscale: %v", err)
|
||||
return fmt.Errorf("failed to auth tailscale: %w", err)
|
||||
}
|
||||
case ipn.NeedsMachineAuth:
|
||||
log.Printf("machine authorization required, please visit the admin panel")
|
||||
@@ -313,14 +348,11 @@ authLoop:
|
||||
|
||||
w.Close()
|
||||
|
||||
ctx, cancel := contextWithExitSignalWatch()
|
||||
defer cancel()
|
||||
|
||||
if isTwoStepConfigAuthOnce(cfg) {
|
||||
// Now that we are authenticated, we can set/reset any of the
|
||||
// settings that we need to.
|
||||
if err := tailscaleSet(ctx, cfg); err != nil {
|
||||
log.Fatalf("failed to auth tailscale: %v", err)
|
||||
return fmt.Errorf("failed to auth tailscale: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -329,11 +361,11 @@ authLoop:
|
||||
if cfg.ServeConfigPath != "" {
|
||||
log.Printf("serve proxy: unsetting previous config")
|
||||
if err := client.SetServeConfig(ctx, new(ipn.ServeConfig)); err != nil {
|
||||
log.Fatalf("failed to unset serve config: %v", err)
|
||||
return fmt.Errorf("failed to unset serve config: %w", err)
|
||||
}
|
||||
if hasKubeStateStore(cfg) {
|
||||
if err := kc.storeHTTPSEndpoint(ctx, ""); err != nil {
|
||||
log.Fatalf("failed to update HTTPS endpoint in tailscale state: %v", err)
|
||||
return fmt.Errorf("failed to update HTTPS endpoint in tailscale state: %w", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -344,19 +376,19 @@ authLoop:
|
||||
// wipe it, but it's good hygiene.
|
||||
log.Printf("Deleting authkey from kube secret")
|
||||
if err := kc.deleteAuthKey(ctx); err != nil {
|
||||
log.Fatalf("deleting authkey from kube secret: %v", err)
|
||||
return fmt.Errorf("deleting authkey from kube secret: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
if hasKubeStateStore(cfg) {
|
||||
if err := kc.storeCapVerUID(ctx, cfg.PodUID); err != nil {
|
||||
log.Fatalf("storing capability version and UID: %v", err)
|
||||
return fmt.Errorf("storing capability version and UID: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
w, err = client.WatchIPNBus(ctx, ipn.NotifyInitialNetMap|ipn.NotifyInitialState)
|
||||
if err != nil {
|
||||
log.Fatalf("rewatching tailscaled for updates after auth: %v", err)
|
||||
return fmt.Errorf("rewatching tailscaled for updates after auth: %w", err)
|
||||
}
|
||||
|
||||
// If tailscaled config was read from a mounted file, watch the file for updates and reload.
|
||||
@@ -386,7 +418,7 @@ authLoop:
|
||||
if isL3Proxy(cfg) {
|
||||
nfr, err = newNetfilterRunner(log.Printf)
|
||||
if err != nil {
|
||||
log.Fatalf("error creating new netfilter runner: %v", err)
|
||||
return fmt.Errorf("error creating new netfilter runner: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -457,9 +489,9 @@ runLoop:
|
||||
killTailscaled()
|
||||
break runLoop
|
||||
case err := <-errChan:
|
||||
log.Fatalf("failed to read from tailscaled: %v", err)
|
||||
return fmt.Errorf("failed to read from tailscaled: %w", err)
|
||||
case err := <-cfgWatchErrChan:
|
||||
log.Fatalf("failed to watch tailscaled config: %v", err)
|
||||
return fmt.Errorf("failed to watch tailscaled config: %w", err)
|
||||
case n := <-notifyChan:
|
||||
if n.State != nil && *n.State != ipn.Running {
|
||||
// Something's gone wrong and we've left the authenticated state.
|
||||
@@ -467,7 +499,7 @@ runLoop:
|
||||
// control flow required to make it work now is hard. So, just crash
|
||||
// the container and rely on the container runtime to restart us,
|
||||
// whereupon we'll go through initial auth again.
|
||||
log.Fatalf("tailscaled left running state (now in state %q), exiting", *n.State)
|
||||
return fmt.Errorf("tailscaled left running state (now in state %q), exiting", *n.State)
|
||||
}
|
||||
if n.NetMap != nil {
|
||||
addrs = n.NetMap.SelfNode.Addresses().AsSlice()
|
||||
@@ -485,7 +517,7 @@ runLoop:
|
||||
deviceID := n.NetMap.SelfNode.StableID()
|
||||
if hasKubeStateStore(cfg) && deephash.Update(¤tDeviceID, &deviceID) {
|
||||
if err := kc.storeDeviceID(ctx, n.NetMap.SelfNode.StableID()); err != nil {
|
||||
log.Fatalf("storing device ID in Kubernetes Secret: %v", err)
|
||||
return fmt.Errorf("storing device ID in Kubernetes Secret: %w", err)
|
||||
}
|
||||
}
|
||||
if cfg.TailnetTargetFQDN != "" {
|
||||
@@ -522,12 +554,12 @@ runLoop:
|
||||
rulesInstalled = true
|
||||
log.Printf("Installing forwarding rules for destination %v", ea.String())
|
||||
if err := installEgressForwardingRule(ctx, ea.String(), addrs, nfr); err != nil {
|
||||
log.Fatalf("installing egress proxy rules for destination %s: %v", ea.String(), err)
|
||||
return fmt.Errorf("installing egress proxy rules for destination %s: %v", ea.String(), err)
|
||||
}
|
||||
}
|
||||
}
|
||||
if !rulesInstalled {
|
||||
log.Fatalf("no forwarding rules for egress addresses %v, host supports IPv6: %v", egressAddrs, nfr.HasIPV6NAT())
|
||||
return fmt.Errorf("no forwarding rules for egress addresses %v, host supports IPv6: %v", egressAddrs, nfr.HasIPV6NAT())
|
||||
}
|
||||
}
|
||||
currentEgressIPs = newCurentEgressIPs
|
||||
@@ -535,7 +567,7 @@ runLoop:
|
||||
if cfg.ProxyTargetIP != "" && len(addrs) != 0 && ipsHaveChanged {
|
||||
log.Printf("Installing proxy rules")
|
||||
if err := installIngressForwardingRule(ctx, cfg.ProxyTargetIP, addrs, nfr); err != nil {
|
||||
log.Fatalf("installing ingress proxy rules: %v", err)
|
||||
return fmt.Errorf("installing ingress proxy rules: %w", err)
|
||||
}
|
||||
}
|
||||
if cfg.ProxyTargetDNSName != "" && len(addrs) != 0 && ipsHaveChanged {
|
||||
@@ -551,7 +583,7 @@ runLoop:
|
||||
if backendsHaveChanged {
|
||||
log.Printf("installing ingress proxy rules for backends %v", newBackendAddrs)
|
||||
if err := installIngressForwardingRuleForDNSTarget(ctx, newBackendAddrs, addrs, nfr); err != nil {
|
||||
log.Fatalf("error installing ingress proxy rules: %v", err)
|
||||
return fmt.Errorf("error installing ingress proxy rules: %w", err)
|
||||
}
|
||||
}
|
||||
resetTimer(false)
|
||||
@@ -573,7 +605,7 @@ runLoop:
|
||||
if cfg.TailnetTargetIP != "" && ipsHaveChanged && len(addrs) != 0 {
|
||||
log.Printf("Installing forwarding rules for destination %v", cfg.TailnetTargetIP)
|
||||
if err := installEgressForwardingRule(ctx, cfg.TailnetTargetIP, addrs, nfr); err != nil {
|
||||
log.Fatalf("installing egress proxy rules: %v", err)
|
||||
return fmt.Errorf("installing egress proxy rules: %w", err)
|
||||
}
|
||||
}
|
||||
// If this is a L7 cluster ingress proxy (set up
|
||||
@@ -585,7 +617,7 @@ runLoop:
|
||||
if cfg.AllowProxyingClusterTrafficViaIngress && cfg.ServeConfigPath != "" && ipsHaveChanged && len(addrs) != 0 {
|
||||
log.Printf("installing rules to forward traffic for %s to node's tailnet IP", cfg.PodIP)
|
||||
if err := installTSForwardingRuleForDestination(ctx, cfg.PodIP, addrs, nfr); err != nil {
|
||||
log.Fatalf("installing rules to forward traffic to node's tailnet IP: %v", err)
|
||||
return fmt.Errorf("installing rules to forward traffic to node's tailnet IP: %w", err)
|
||||
}
|
||||
}
|
||||
currentIPs = newCurrentIPs
|
||||
@@ -604,7 +636,7 @@ runLoop:
|
||||
deviceEndpoints := []any{n.NetMap.SelfNode.Name(), n.NetMap.SelfNode.Addresses()}
|
||||
if hasKubeStateStore(cfg) && deephash.Update(¤tDeviceEndpoints, &deviceEndpoints) {
|
||||
if err := kc.storeDeviceEndpoints(ctx, n.NetMap.SelfNode.Name(), n.NetMap.SelfNode.Addresses().AsSlice()); err != nil {
|
||||
log.Fatalf("storing device IPs and FQDN in Kubernetes Secret: %v", err)
|
||||
return fmt.Errorf("storing device IPs and FQDN in Kubernetes Secret: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -639,20 +671,21 @@ runLoop:
|
||||
// will then continuously monitor the config file and netmap updates and
|
||||
// reconfigure the firewall rules as needed. If any of its operations fail, it
|
||||
// will crash this node.
|
||||
if cfg.EgressSvcsCfgPath != "" {
|
||||
log.Printf("configuring egress proxy using configuration file at %s", cfg.EgressSvcsCfgPath)
|
||||
if cfg.EgressProxiesCfgPath != "" {
|
||||
log.Printf("configuring egress proxy using configuration file at %s", cfg.EgressProxiesCfgPath)
|
||||
egressSvcsNotify = make(chan ipn.Notify)
|
||||
ep := egressProxy{
|
||||
cfgPath: cfg.EgressSvcsCfgPath,
|
||||
opts := egressProxyRunOpts{
|
||||
cfgPath: cfg.EgressProxiesCfgPath,
|
||||
nfr: nfr,
|
||||
kc: kc,
|
||||
tsClient: client,
|
||||
stateSecret: cfg.KubeSecret,
|
||||
netmapChan: egressSvcsNotify,
|
||||
podIPv4: cfg.PodIPv4,
|
||||
tailnetAddrs: addrs,
|
||||
}
|
||||
go func() {
|
||||
if err := ep.run(ctx, n); err != nil {
|
||||
if err := ep.run(ctx, n, opts); err != nil {
|
||||
egressSvcsErrorChan <- err
|
||||
}
|
||||
}()
|
||||
@@ -694,16 +727,18 @@ runLoop:
|
||||
if backendsHaveChanged && len(addrs) != 0 {
|
||||
log.Printf("Backend address change detected, installing proxy rules for backends %v", newBackendAddrs)
|
||||
if err := installIngressForwardingRuleForDNSTarget(ctx, newBackendAddrs, addrs, nfr); err != nil {
|
||||
log.Fatalf("installing ingress proxy rules for DNS target %s: %v", cfg.ProxyTargetDNSName, err)
|
||||
return fmt.Errorf("installing ingress proxy rules for DNS target %s: %v", cfg.ProxyTargetDNSName, err)
|
||||
}
|
||||
}
|
||||
backendAddrs = newBackendAddrs
|
||||
resetTimer(false)
|
||||
case e := <-egressSvcsErrorChan:
|
||||
log.Fatalf("egress proxy failed: %v", e)
|
||||
return fmt.Errorf("egress proxy failed: %v", e)
|
||||
}
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ensureTunFile checks that /dev/net/tun exists, creating it if
|
||||
@@ -732,13 +767,13 @@ func resolveDNS(ctx context.Context, name string) ([]net.IP, error) {
|
||||
ip4s, err := net.DefaultResolver.LookupIP(ctx, "ip4", name)
|
||||
if err != nil {
|
||||
if e, ok := err.(*net.DNSError); !(ok && e.IsNotFound) {
|
||||
return nil, fmt.Errorf("error looking up IPv4 addresses: %v", err)
|
||||
return nil, fmt.Errorf("error looking up IPv4 addresses: %w", err)
|
||||
}
|
||||
}
|
||||
ip6s, err := net.DefaultResolver.LookupIP(ctx, "ip6", name)
|
||||
if err != nil {
|
||||
if e, ok := err.(*net.DNSError); !(ok && e.IsNotFound) {
|
||||
return nil, fmt.Errorf("error looking up IPv6 addresses: %v", err)
|
||||
return nil, fmt.Errorf("error looking up IPv6 addresses: %w", err)
|
||||
}
|
||||
}
|
||||
if len(ip4s) == 0 && len(ip6s) == 0 {
|
||||
@@ -751,7 +786,7 @@ func resolveDNS(ctx context.Context, name string) ([]net.IP, error) {
|
||||
// context that gets cancelled when a signal is received and a cancel function
|
||||
// that can be called to free the resources when the watch should be stopped.
|
||||
func contextWithExitSignalWatch() (context.Context, func()) {
|
||||
closeChan := make(chan string)
|
||||
closeChan := make(chan struct{})
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
signalChan := make(chan os.Signal, 1)
|
||||
signal.Notify(signalChan, syscall.SIGINT, syscall.SIGTERM)
|
||||
@@ -763,8 +798,11 @@ func contextWithExitSignalWatch() (context.Context, func()) {
|
||||
return
|
||||
}
|
||||
}()
|
||||
closeOnce := sync.Once{}
|
||||
f := func() {
|
||||
closeChan <- "goodbye"
|
||||
closeOnce.Do(func() {
|
||||
close(closeChan)
|
||||
})
|
||||
}
|
||||
return ctx, f
|
||||
}
|
||||
@@ -817,7 +855,11 @@ func runHTTPServer(mux *http.ServeMux, addr string) (close func() error) {
|
||||
|
||||
go func() {
|
||||
if err := srv.Serve(ln); err != nil {
|
||||
log.Fatalf("failed running server: %v", err)
|
||||
if err != http.ErrServerClosed {
|
||||
log.Fatalf("failed running server: %v", err)
|
||||
} else {
|
||||
log.Printf("HTTP server at %s closed", addr)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
|
||||
@@ -25,6 +25,7 @@ import (
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"syscall"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -32,6 +33,8 @@ import (
|
||||
"golang.org/x/sys/unix"
|
||||
"tailscale.com/ipn"
|
||||
"tailscale.com/kube/egressservices"
|
||||
"tailscale.com/kube/kubeclient"
|
||||
"tailscale.com/kube/kubetypes"
|
||||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/tstest"
|
||||
"tailscale.com/types/netmap"
|
||||
@@ -48,26 +51,13 @@ func TestContainerBoot(t *testing.T) {
|
||||
defer lapi.Close()
|
||||
|
||||
kube := kubeServer{FSRoot: d}
|
||||
if err := kube.Start(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
kube.Start(t)
|
||||
defer kube.Close()
|
||||
|
||||
tailscaledConf := &ipn.ConfigVAlpha{AuthKey: ptr.To("foo"), Version: "alpha0"}
|
||||
tailscaledConfBytes, err := json.Marshal(tailscaledConf)
|
||||
if err != nil {
|
||||
t.Fatalf("error unmarshaling tailscaled config: %v", err)
|
||||
}
|
||||
serveConf := ipn.ServeConfig{TCP: map[uint16]*ipn.TCPPortHandler{80: {HTTP: true}}}
|
||||
serveConfBytes, err := json.Marshal(serveConf)
|
||||
if err != nil {
|
||||
t.Fatalf("error unmarshaling serve config: %v", err)
|
||||
}
|
||||
egressSvcsCfg := egressservices.Configs{"foo": {TailnetTarget: egressservices.TailnetTarget{FQDN: "foo.tailnetxyx.ts.net"}}}
|
||||
egressSvcsCfgBytes, err := json.Marshal(egressSvcsCfg)
|
||||
if err != nil {
|
||||
t.Fatalf("error unmarshaling egress services config: %v", err)
|
||||
}
|
||||
egressCfg := egressSvcConfig("foo", "foo.tailnetxyz.ts.net")
|
||||
egressStatus := egressSvcStatus("foo", "foo.tailnetxyz.ts.net")
|
||||
|
||||
dirs := []string{
|
||||
"var/lib",
|
||||
@@ -84,16 +74,17 @@ func TestContainerBoot(t *testing.T) {
|
||||
}
|
||||
}
|
||||
files := map[string][]byte{
|
||||
"usr/bin/tailscaled": fakeTailscaled,
|
||||
"usr/bin/tailscale": fakeTailscale,
|
||||
"usr/bin/iptables": fakeTailscale,
|
||||
"usr/bin/ip6tables": fakeTailscale,
|
||||
"dev/net/tun": []byte(""),
|
||||
"proc/sys/net/ipv4/ip_forward": []byte("0"),
|
||||
"proc/sys/net/ipv6/conf/all/forwarding": []byte("0"),
|
||||
"etc/tailscaled/cap-95.hujson": tailscaledConfBytes,
|
||||
"etc/tailscaled/serve-config.json": serveConfBytes,
|
||||
"etc/tailscaled/egress-services-config.json": egressSvcsCfgBytes,
|
||||
"usr/bin/tailscaled": fakeTailscaled,
|
||||
"usr/bin/tailscale": fakeTailscale,
|
||||
"usr/bin/iptables": fakeTailscale,
|
||||
"usr/bin/ip6tables": fakeTailscale,
|
||||
"dev/net/tun": []byte(""),
|
||||
"proc/sys/net/ipv4/ip_forward": []byte("0"),
|
||||
"proc/sys/net/ipv6/conf/all/forwarding": []byte("0"),
|
||||
"etc/tailscaled/cap-95.hujson": mustJSON(t, tailscaledConf),
|
||||
"etc/tailscaled/serve-config.json": mustJSON(t, serveConf),
|
||||
filepath.Join("etc/tailscaled/", egressservices.KeyEgressServices): mustJSON(t, egressCfg),
|
||||
filepath.Join("etc/tailscaled/", egressservices.KeyHEPPings): []byte("4"),
|
||||
}
|
||||
resetFiles := func() {
|
||||
for path, content := range files {
|
||||
@@ -132,6 +123,9 @@ func TestContainerBoot(t *testing.T) {
|
||||
healthURL := func(port int) string {
|
||||
return fmt.Sprintf("http://127.0.0.1:%d/healthz", port)
|
||||
}
|
||||
egressSvcTerminateURL := func(port int) string {
|
||||
return fmt.Sprintf("http://127.0.0.1:%d%s", port, kubetypes.EgessServicesPreshutdownEP)
|
||||
}
|
||||
|
||||
capver := fmt.Sprintf("%d", tailcfg.CurrentCapabilityVersion)
|
||||
|
||||
@@ -143,15 +137,29 @@ func TestContainerBoot(t *testing.T) {
|
||||
|
||||
// WantCmds is the commands that containerboot should run in this phase.
|
||||
WantCmds []string
|
||||
|
||||
// WantKubeSecret is the secret keys/values that should exist in the
|
||||
// kube secret.
|
||||
WantKubeSecret map[string]string
|
||||
|
||||
// Update the kube secret with these keys/values at the beginning of the
|
||||
// phase (simulates our fake tailscaled doing it).
|
||||
UpdateKubeSecret map[string]string
|
||||
|
||||
// WantFiles files that should exist in the container and their
|
||||
// contents.
|
||||
WantFiles map[string]string
|
||||
// WantFatalLog is the fatal log message we expect from containerboot.
|
||||
// If set for a phase, the test will finish on that phase.
|
||||
WantFatalLog string
|
||||
|
||||
// WantLog is a log message we expect from containerboot.
|
||||
WantLog string
|
||||
|
||||
// If set for a phase, the test will expect containerboot to exit with
|
||||
// this error code, and the test will finish on that phase without
|
||||
// waiting for the successful startup log message.
|
||||
WantExitCode *int
|
||||
|
||||
// The signal to send to containerboot at the start of the phase.
|
||||
Signal *syscall.Signal
|
||||
|
||||
EndpointStatuses map[string]int
|
||||
}
|
||||
@@ -439,7 +447,8 @@ func TestContainerBoot(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
WantFatalLog: "no forwarding rules for egress addresses [::1/128], host supports IPv6: false",
|
||||
WantLog: "no forwarding rules for egress addresses [::1/128], host supports IPv6: false",
|
||||
WantExitCode: ptr.To(1),
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -896,9 +905,10 @@ func TestContainerBoot(t *testing.T) {
|
||||
{
|
||||
Name: "egress_svcs_config_kube",
|
||||
Env: map[string]string{
|
||||
"KUBERNETES_SERVICE_HOST": kube.Host,
|
||||
"KUBERNETES_SERVICE_PORT_HTTPS": kube.Port,
|
||||
"TS_EGRESS_SERVICES_CONFIG_PATH": filepath.Join(d, "etc/tailscaled/egress-services-config.json"),
|
||||
"KUBERNETES_SERVICE_HOST": kube.Host,
|
||||
"KUBERNETES_SERVICE_PORT_HTTPS": kube.Port,
|
||||
"TS_EGRESS_PROXIES_CONFIG_PATH": filepath.Join(d, "etc/tailscaled"),
|
||||
"TS_LOCAL_ADDR_PORT": fmt.Sprintf("[::]:%d", localAddrPort),
|
||||
},
|
||||
KubeSecret: map[string]string{
|
||||
"authkey": "tskey-key",
|
||||
@@ -912,28 +922,92 @@ func TestContainerBoot(t *testing.T) {
|
||||
WantKubeSecret: map[string]string{
|
||||
"authkey": "tskey-key",
|
||||
},
|
||||
EndpointStatuses: map[string]int{
|
||||
egressSvcTerminateURL(localAddrPort): 200,
|
||||
},
|
||||
},
|
||||
{
|
||||
Notify: runningNotify,
|
||||
WantKubeSecret: map[string]string{
|
||||
"egress-services": mustBase64(t, egressStatus),
|
||||
"authkey": "tskey-key",
|
||||
"device_fqdn": "test-node.test.ts.net",
|
||||
"device_id": "myID",
|
||||
"device_ips": `["100.64.0.1"]`,
|
||||
"tailscale_capver": capver,
|
||||
},
|
||||
EndpointStatuses: map[string]int{
|
||||
egressSvcTerminateURL(localAddrPort): 200,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "egress_svcs_config_no_kube",
|
||||
Env: map[string]string{
|
||||
"TS_EGRESS_SERVICES_CONFIG_PATH": filepath.Join(d, "etc/tailscaled/egress-services-config.json"),
|
||||
"TS_AUTHKEY": "tskey-key",
|
||||
"TS_EGRESS_PROXIES_CONFIG_PATH": filepath.Join(d, "etc/tailscaled"),
|
||||
"TS_AUTHKEY": "tskey-key",
|
||||
},
|
||||
Phases: []phase{
|
||||
{
|
||||
WantFatalLog: "TS_EGRESS_SERVICES_CONFIG_PATH is only supported for Tailscale running on Kubernetes",
|
||||
WantLog: "TS_EGRESS_PROXIES_CONFIG_PATH is only supported for Tailscale running on Kubernetes",
|
||||
WantExitCode: ptr.To(1),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "kube_shutdown_during_state_write",
|
||||
Env: map[string]string{
|
||||
"KUBERNETES_SERVICE_HOST": kube.Host,
|
||||
"KUBERNETES_SERVICE_PORT_HTTPS": kube.Port,
|
||||
"TS_ENABLE_HEALTH_CHECK": "true",
|
||||
},
|
||||
KubeSecret: map[string]string{
|
||||
"authkey": "tskey-key",
|
||||
},
|
||||
Phases: []phase{
|
||||
{
|
||||
// Normal startup.
|
||||
WantCmds: []string{
|
||||
"/usr/bin/tailscaled --socket=/tmp/tailscaled.sock --state=kube:tailscale --statedir=/tmp --tun=userspace-networking",
|
||||
"/usr/bin/tailscale --socket=/tmp/tailscaled.sock up --accept-dns=false --authkey=tskey-key",
|
||||
},
|
||||
WantKubeSecret: map[string]string{
|
||||
"authkey": "tskey-key",
|
||||
},
|
||||
},
|
||||
{
|
||||
// SIGTERM before state is finished writing, should wait for
|
||||
// consistent state before propagating SIGTERM to tailscaled.
|
||||
Signal: ptr.To(unix.SIGTERM),
|
||||
UpdateKubeSecret: map[string]string{
|
||||
"_machinekey": "foo",
|
||||
"_profiles": "foo",
|
||||
"profile-baff": "foo",
|
||||
// Missing "_current-profile" key.
|
||||
},
|
||||
WantKubeSecret: map[string]string{
|
||||
"authkey": "tskey-key",
|
||||
"_machinekey": "foo",
|
||||
"_profiles": "foo",
|
||||
"profile-baff": "foo",
|
||||
},
|
||||
WantLog: "Waiting for tailscaled to finish writing state to Secret \"tailscale\"",
|
||||
},
|
||||
{
|
||||
// tailscaled has finished writing state, should propagate SIGTERM.
|
||||
UpdateKubeSecret: map[string]string{
|
||||
"_current-profile": "foo",
|
||||
},
|
||||
WantKubeSecret: map[string]string{
|
||||
"authkey": "tskey-key",
|
||||
"_machinekey": "foo",
|
||||
"_profiles": "foo",
|
||||
"profile-baff": "foo",
|
||||
"_current-profile": "foo",
|
||||
},
|
||||
WantLog: "HTTP server at [::]:9002 closed",
|
||||
WantExitCode: ptr.To(0),
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -981,26 +1055,36 @@ func TestContainerBoot(t *testing.T) {
|
||||
|
||||
var wantCmds []string
|
||||
for i, p := range test.Phases {
|
||||
for k, v := range p.UpdateKubeSecret {
|
||||
kube.SetSecret(k, v)
|
||||
}
|
||||
lapi.Notify(p.Notify)
|
||||
if p.WantFatalLog != "" {
|
||||
if p.Signal != nil {
|
||||
cmd.Process.Signal(*p.Signal)
|
||||
}
|
||||
if p.WantLog != "" {
|
||||
err := tstest.WaitFor(2*time.Second, func() error {
|
||||
state, err := cmd.Process.Wait()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if state.ExitCode() != 1 {
|
||||
return fmt.Errorf("process exited with code %d but wanted %d", state.ExitCode(), 1)
|
||||
}
|
||||
waitLogLine(t, time.Second, cbOut, p.WantFatalLog)
|
||||
waitLogLine(t, time.Second, cbOut, p.WantLog)
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
if p.WantExitCode != nil {
|
||||
state, err := cmd.Process.Wait()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if state.ExitCode() != *p.WantExitCode {
|
||||
t.Fatalf("phase %d: want exit code %d, got %d", i, *p.WantExitCode, state.ExitCode())
|
||||
}
|
||||
|
||||
// Early test return, we don't expect the successful startup log message.
|
||||
return
|
||||
}
|
||||
|
||||
wantCmds = append(wantCmds, p.WantCmds...)
|
||||
waitArgs(t, 2*time.Second, d, argFile, strings.Join(wantCmds, "\n"))
|
||||
err := tstest.WaitFor(2*time.Second, func() error {
|
||||
@@ -1056,6 +1140,9 @@ func TestContainerBoot(t *testing.T) {
|
||||
}
|
||||
}
|
||||
waitLogLine(t, 2*time.Second, cbOut, "Startup complete, waiting for shutdown signal")
|
||||
if cmd.ProcessState != nil {
|
||||
t.Fatalf("containerboot should be running but exited with exit code %d", cmd.ProcessState.ExitCode())
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -1287,18 +1374,18 @@ func (k *kubeServer) Reset() {
|
||||
k.secret = map[string]string{}
|
||||
}
|
||||
|
||||
func (k *kubeServer) Start() error {
|
||||
func (k *kubeServer) Start(t *testing.T) {
|
||||
root := filepath.Join(k.FSRoot, "var/run/secrets/kubernetes.io/serviceaccount")
|
||||
|
||||
if err := os.MkdirAll(root, 0700); err != nil {
|
||||
return err
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := os.WriteFile(filepath.Join(root, "namespace"), []byte("default"), 0600); err != nil {
|
||||
return err
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := os.WriteFile(filepath.Join(root, "token"), []byte("bearer_token"), 0600); err != nil {
|
||||
return err
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
k.srv = httptest.NewTLSServer(k)
|
||||
@@ -1307,13 +1394,11 @@ func (k *kubeServer) Start() error {
|
||||
|
||||
var cert bytes.Buffer
|
||||
if err := pem.Encode(&cert, &pem.Block{Type: "CERTIFICATE", Bytes: k.srv.Certificate().Raw}); err != nil {
|
||||
return err
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := os.WriteFile(filepath.Join(root, "ca.crt"), cert.Bytes(), 0600); err != nil {
|
||||
return err
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (k *kubeServer) Close() {
|
||||
@@ -1362,6 +1447,7 @@ func (k *kubeServer) serveSecret(w http.ResponseWriter, r *http.Request) {
|
||||
http.Error(w, fmt.Sprintf("reading request body: %v", err), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
defer r.Body.Close()
|
||||
|
||||
switch r.Method {
|
||||
case "GET":
|
||||
@@ -1394,13 +1480,32 @@ func (k *kubeServer) serveSecret(w http.ResponseWriter, r *http.Request) {
|
||||
panic(fmt.Sprintf("json decode failed: %v. Body:\n\n%s", err, string(bs)))
|
||||
}
|
||||
for _, op := range req {
|
||||
if op.Op != "remove" {
|
||||
switch op.Op {
|
||||
case "remove":
|
||||
if !strings.HasPrefix(op.Path, "/data/") {
|
||||
panic(fmt.Sprintf("unsupported json-patch path %q", op.Path))
|
||||
}
|
||||
delete(k.secret, strings.TrimPrefix(op.Path, "/data/"))
|
||||
case "replace":
|
||||
path, ok := strings.CutPrefix(op.Path, "/data/")
|
||||
if !ok {
|
||||
panic(fmt.Sprintf("unsupported json-patch path %q", op.Path))
|
||||
}
|
||||
req := make([]kubeclient.JSONPatch, 0)
|
||||
if err := json.Unmarshal(bs, &req); err != nil {
|
||||
panic(fmt.Sprintf("json decode failed: %v. Body:\n\n%s", err, string(bs)))
|
||||
}
|
||||
|
||||
for _, patch := range req {
|
||||
val, ok := patch.Value.(string)
|
||||
if !ok {
|
||||
panic(fmt.Sprintf("unsupported json patch value %v: cannot be converted to string", patch.Value))
|
||||
}
|
||||
k.secret[path] = val
|
||||
}
|
||||
default:
|
||||
panic(fmt.Sprintf("unsupported json-patch op %q", op.Op))
|
||||
}
|
||||
if !strings.HasPrefix(op.Path, "/data/") {
|
||||
panic(fmt.Sprintf("unsupported json-patch path %q", op.Path))
|
||||
}
|
||||
delete(k.secret, strings.TrimPrefix(op.Path, "/data/"))
|
||||
}
|
||||
case "application/strategic-merge-patch+json":
|
||||
req := struct {
|
||||
@@ -1416,6 +1521,44 @@ func (k *kubeServer) serveSecret(w http.ResponseWriter, r *http.Request) {
|
||||
panic(fmt.Sprintf("unknown content type %q", r.Header.Get("Content-Type")))
|
||||
}
|
||||
default:
|
||||
panic(fmt.Sprintf("unhandled HTTP method %q", r.Method))
|
||||
panic(fmt.Sprintf("unhandled HTTP request %s %s", r.Method, r.URL))
|
||||
}
|
||||
}
|
||||
|
||||
func mustBase64(t *testing.T, v any) string {
|
||||
b := mustJSON(t, v)
|
||||
s := base64.StdEncoding.WithPadding('=').EncodeToString(b)
|
||||
return s
|
||||
}
|
||||
|
||||
func mustJSON(t *testing.T, v any) []byte {
|
||||
b, err := json.Marshal(v)
|
||||
if err != nil {
|
||||
t.Fatalf("error converting %v to json: %v", v, err)
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
// egress services status given one named tailnet target specified by FQDN. As written by the proxy to its state Secret.
|
||||
func egressSvcStatus(name, fqdn string) egressservices.Status {
|
||||
return egressservices.Status{
|
||||
Services: map[string]*egressservices.ServiceStatus{
|
||||
name: {
|
||||
TailnetTarget: egressservices.TailnetTarget{
|
||||
FQDN: fqdn,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// egress config given one named tailnet target specified by FQDN.
|
||||
func egressSvcConfig(name, fqdn string) egressservices.Configs {
|
||||
return egressservices.Configs{
|
||||
name: egressservices.Config{
|
||||
TailnetTarget: egressservices.TailnetTarget{
|
||||
FQDN: fqdn,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -10,8 +10,8 @@ import (
|
||||
"io"
|
||||
"net/http"
|
||||
|
||||
"tailscale.com/client/tailscale"
|
||||
"tailscale.com/client/tailscale/apitype"
|
||||
"tailscale.com/localclient/tailscale"
|
||||
"tailscale.com/localclient/tailscale/apitype"
|
||||
)
|
||||
|
||||
// metrics is a simple metrics HTTP server, if enabled it forwards requests to
|
||||
|
||||
@@ -17,9 +17,9 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/fsnotify/fsnotify"
|
||||
"tailscale.com/client/tailscale"
|
||||
"tailscale.com/ipn"
|
||||
"tailscale.com/kube/kubetypes"
|
||||
"tailscale.com/localclient/tailscale"
|
||||
"tailscale.com/types/netmap"
|
||||
)
|
||||
|
||||
|
||||
@@ -12,9 +12,9 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"tailscale.com/client/tailscale"
|
||||
"tailscale.com/ipn"
|
||||
"tailscale.com/kube/kubetypes"
|
||||
"tailscale.com/localclient/tailscale"
|
||||
)
|
||||
|
||||
func TestUpdateServeConfig(t *testing.T) {
|
||||
|
||||
@@ -11,10 +11,12 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"log"
|
||||
"net/http"
|
||||
"net/netip"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
@@ -22,7 +24,11 @@ import (
|
||||
"tailscale.com/ipn"
|
||||
"tailscale.com/kube/egressservices"
|
||||
"tailscale.com/kube/kubeclient"
|
||||
"tailscale.com/kube/kubetypes"
|
||||
"tailscale.com/localclient/tailscale"
|
||||
"tailscale.com/syncs"
|
||||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/util/httpm"
|
||||
"tailscale.com/util/linuxfw"
|
||||
"tailscale.com/util/mak"
|
||||
)
|
||||
@@ -37,13 +43,15 @@ const tailscaleTunInterface = "tailscale0"
|
||||
// egressProxy knows how to configure firewall rules to route cluster traffic to
|
||||
// one or more tailnet services.
|
||||
type egressProxy struct {
|
||||
cfgPath string // path to egress service config file
|
||||
cfgPath string // path to a directory with egress services config files
|
||||
|
||||
nfr linuxfw.NetfilterRunner // never nil
|
||||
|
||||
kc kubeclient.Client // never nil
|
||||
stateSecret string // name of the kube state Secret
|
||||
|
||||
tsClient *tailscale.LocalClient // never nil
|
||||
|
||||
netmapChan chan ipn.Notify // chan to receive netmap updates on
|
||||
|
||||
podIPv4 string // never empty string, currently only IPv4 is supported
|
||||
@@ -55,15 +63,29 @@ type egressProxy struct {
|
||||
// memory at all.
|
||||
targetFQDNs map[string][]netip.Prefix
|
||||
|
||||
// used to configure firewall rules.
|
||||
tailnetAddrs []netip.Prefix
|
||||
tailnetAddrs []netip.Prefix // tailnet IPs of this tailnet device
|
||||
|
||||
// shortSleep is the backoff sleep between healthcheck endpoint calls - can be overridden in tests.
|
||||
shortSleep time.Duration
|
||||
// longSleep is the time to sleep after the routing rules are updated to increase the chance that kube
|
||||
// proxies on all nodes have updated their routing configuration. It can be configured to 0 in
|
||||
// tests.
|
||||
longSleep time.Duration
|
||||
// client is a client that can send HTTP requests.
|
||||
client httpClient
|
||||
}
|
||||
|
||||
// httpClient is a client that can send HTTP requests and can be mocked in tests.
|
||||
type httpClient interface {
|
||||
Do(*http.Request) (*http.Response, error)
|
||||
}
|
||||
|
||||
// run configures egress proxy firewall rules and ensures that the firewall rules are reconfigured when:
|
||||
// - the mounted egress config has changed
|
||||
// - the proxy's tailnet IP addresses have changed
|
||||
// - tailnet IPs have changed for any backend targets specified by tailnet FQDN
|
||||
func (ep *egressProxy) run(ctx context.Context, n ipn.Notify) error {
|
||||
func (ep *egressProxy) run(ctx context.Context, n ipn.Notify, opts egressProxyRunOpts) error {
|
||||
ep.configure(opts)
|
||||
var tickChan <-chan time.Time
|
||||
var eventChan <-chan fsnotify.Event
|
||||
// TODO (irbekrm): take a look if this can be pulled into a single func
|
||||
@@ -75,7 +97,7 @@ func (ep *egressProxy) run(ctx context.Context, n ipn.Notify) error {
|
||||
tickChan = ticker.C
|
||||
} else {
|
||||
defer w.Close()
|
||||
if err := w.Add(filepath.Dir(ep.cfgPath)); err != nil {
|
||||
if err := w.Add(ep.cfgPath); err != nil {
|
||||
return fmt.Errorf("failed to add fsnotify watch: %w", err)
|
||||
}
|
||||
eventChan = w.Events
|
||||
@@ -85,28 +107,52 @@ func (ep *egressProxy) run(ctx context.Context, n ipn.Notify) error {
|
||||
return err
|
||||
}
|
||||
for {
|
||||
var err error
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return nil
|
||||
case <-tickChan:
|
||||
err = ep.sync(ctx, n)
|
||||
log.Printf("periodic sync, ensuring firewall config is up to date...")
|
||||
case <-eventChan:
|
||||
log.Printf("config file change detected, ensuring firewall config is up to date...")
|
||||
err = ep.sync(ctx, n)
|
||||
case n = <-ep.netmapChan:
|
||||
shouldResync := ep.shouldResync(n)
|
||||
if shouldResync {
|
||||
log.Printf("netmap change detected, ensuring firewall config is up to date...")
|
||||
err = ep.sync(ctx, n)
|
||||
if !shouldResync {
|
||||
continue
|
||||
}
|
||||
log.Printf("netmap change detected, ensuring firewall config is up to date...")
|
||||
}
|
||||
if err != nil {
|
||||
if err := ep.sync(ctx, n); err != nil {
|
||||
return fmt.Errorf("error syncing egress service config: %w", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type egressProxyRunOpts struct {
|
||||
cfgPath string
|
||||
nfr linuxfw.NetfilterRunner
|
||||
kc kubeclient.Client
|
||||
tsClient *tailscale.LocalClient
|
||||
stateSecret string
|
||||
netmapChan chan ipn.Notify
|
||||
podIPv4 string
|
||||
tailnetAddrs []netip.Prefix
|
||||
}
|
||||
|
||||
// applyOpts configures egress proxy using the provided options.
|
||||
func (ep *egressProxy) configure(opts egressProxyRunOpts) {
|
||||
ep.cfgPath = opts.cfgPath
|
||||
ep.nfr = opts.nfr
|
||||
ep.kc = opts.kc
|
||||
ep.tsClient = opts.tsClient
|
||||
ep.stateSecret = opts.stateSecret
|
||||
ep.netmapChan = opts.netmapChan
|
||||
ep.podIPv4 = opts.podIPv4
|
||||
ep.tailnetAddrs = opts.tailnetAddrs
|
||||
ep.client = &http.Client{} // default HTTP client
|
||||
ep.shortSleep = time.Second
|
||||
ep.longSleep = time.Second * 10
|
||||
}
|
||||
|
||||
// sync triggers an egress proxy config resync. The resync calculates the diff between config and status to determine if
|
||||
// any firewall rules need to be updated. Currently using status in state Secret as a reference for what is the current
|
||||
// firewall configuration is good enough because - the status is keyed by the Pod IP - we crash the Pod on errors such
|
||||
@@ -327,7 +373,8 @@ func (ep *egressProxy) deleteUnnecessaryServices(cfgs *egressservices.Configs, s
|
||||
|
||||
// getConfigs gets the mounted egress service configuration.
|
||||
func (ep *egressProxy) getConfigs() (*egressservices.Configs, error) {
|
||||
j, err := os.ReadFile(ep.cfgPath)
|
||||
svcsCfg := filepath.Join(ep.cfgPath, egressservices.KeyEgressServices)
|
||||
j, err := os.ReadFile(svcsCfg)
|
||||
if os.IsNotExist(err) {
|
||||
return nil, nil
|
||||
}
|
||||
@@ -569,3 +616,142 @@ func servicesStatusIsEqual(st, st1 *egressservices.Status) bool {
|
||||
st1.PodIPv4 = ""
|
||||
return reflect.DeepEqual(*st, *st1)
|
||||
}
|
||||
|
||||
// registerHandlers adds a new handler to the provided ServeMux that can be called as a Kubernetes prestop hook to
|
||||
// delay shutdown till it's safe to do so.
|
||||
func (ep *egressProxy) registerHandlers(mux *http.ServeMux) {
|
||||
mux.Handle(fmt.Sprintf("GET %s", kubetypes.EgessServicesPreshutdownEP), ep)
|
||||
}
|
||||
|
||||
// ServeHTTP serves /internal-egress-services-preshutdown endpoint, when it receives a request, it periodically polls
|
||||
// the configured health check endpoint for each egress service till it the health check endpoint no longer hits this
|
||||
// proxy Pod. It uses the Pod-IPv4 header to verify if health check response is received from this Pod.
|
||||
func (ep *egressProxy) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
cfgs, err := ep.getConfigs()
|
||||
if err != nil {
|
||||
http.Error(w, fmt.Sprintf("error retrieving egress services configs: %v", err), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
if cfgs == nil {
|
||||
if _, err := w.Write([]byte("safe to terminate")); err != nil {
|
||||
http.Error(w, fmt.Sprintf("error writing termination status: %v", err), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
}
|
||||
hp, err := ep.getHEPPings()
|
||||
if err != nil {
|
||||
http.Error(w, fmt.Sprintf("error determining the number of times health check endpoint should be pinged: %v", err), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
ep.waitTillSafeToShutdown(r.Context(), cfgs, hp)
|
||||
}
|
||||
|
||||
// waitTillSafeToShutdown looks up all egress targets configured to be proxied via this instance and, for each target
|
||||
// whose configuration includes a healthcheck endpoint, pings the endpoint till none of the responses
|
||||
// are returned by this instance or till the HTTP request times out. In practice, the endpoint will be a Kubernetes Service for whom one of the backends
|
||||
// would normally be this Pod. When this Pod is being deleted, the operator should have removed it from the Service
|
||||
// backends and eventually kube proxy routing rules should be updated to no longer route traffic for the Service to this
|
||||
// Pod.
|
||||
func (ep *egressProxy) waitTillSafeToShutdown(ctx context.Context, cfgs *egressservices.Configs, hp int) {
|
||||
if cfgs == nil || len(*cfgs) == 0 { // avoid sleeping if no services are configured
|
||||
return
|
||||
}
|
||||
log.Printf("Ensuring that cluster traffic for egress targets is no longer routed via this Pod...")
|
||||
wg := syncs.WaitGroup{}
|
||||
|
||||
for s, cfg := range *cfgs {
|
||||
hep := cfg.HealthCheckEndpoint
|
||||
if hep == "" {
|
||||
log.Printf("Tailnet target %q does not have a cluster healthcheck specified, unable to verify if cluster traffic for the target is still routed via this Pod", s)
|
||||
continue
|
||||
}
|
||||
svc := s
|
||||
wg.Go(func() {
|
||||
log.Printf("Ensuring that cluster traffic is no longer routed to %q via this Pod...", svc)
|
||||
for {
|
||||
if ctx.Err() != nil { // kubelet's HTTP request timeout
|
||||
log.Printf("Cluster traffic for %s did not stop being routed to this Pod.", svc)
|
||||
return
|
||||
}
|
||||
found, err := lookupPodRoute(ctx, hep, ep.podIPv4, hp, ep.client)
|
||||
if err != nil {
|
||||
log.Printf("unable to reach endpoint %q, assuming the routing rules for this Pod have been deleted: %v", hep, err)
|
||||
break
|
||||
}
|
||||
if !found {
|
||||
log.Printf("service %q is no longer routed through this Pod", svc)
|
||||
break
|
||||
}
|
||||
log.Printf("service %q is still routed through this Pod, waiting...", svc)
|
||||
time.Sleep(ep.shortSleep)
|
||||
}
|
||||
})
|
||||
}
|
||||
wg.Wait()
|
||||
// The check above really only checked that the routing rules are updated on this node. Sleep for a bit to
|
||||
// ensure that the routing rules are updated on other nodes. TODO(irbekrm): this may or may not be good enough.
|
||||
// If it's not good enough, we'd probably want to do something more complex, where the proxies check each other.
|
||||
log.Printf("Sleeping for %s before shutdown to ensure that kube proxies on all nodes have updated routing configuration", ep.longSleep)
|
||||
time.Sleep(ep.longSleep)
|
||||
}
|
||||
|
||||
// lookupPodRoute calls the healthcheck endpoint repeat times and returns true if the endpoint returns with the podIP
|
||||
// header at least once.
|
||||
func lookupPodRoute(ctx context.Context, hep, podIP string, repeat int, client httpClient) (bool, error) {
|
||||
for range repeat {
|
||||
f, err := lookup(ctx, hep, podIP, client)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if f {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// lookup calls the healthcheck endpoint and returns true if the response contains the podIP header.
|
||||
func lookup(ctx context.Context, hep, podIP string, client httpClient) (bool, error) {
|
||||
req, err := http.NewRequestWithContext(ctx, httpm.GET, hep, nil)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("error creating new HTTP request: %v", err)
|
||||
}
|
||||
|
||||
// Close the TCP connection to ensure that the next request is routed to a different backend.
|
||||
req.Close = true
|
||||
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
log.Printf("Endpoint %q can not be reached: %v, likely because there are no (more) healthy backends", hep, err)
|
||||
return true, nil
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
gotIP := resp.Header.Get(kubetypes.PodIPv4Header)
|
||||
return strings.EqualFold(podIP, gotIP), nil
|
||||
}
|
||||
|
||||
// getHEPPings gets the number of pings that should be sent to a health check endpoint to ensure that each configured
|
||||
// backend is hit. This assumes that a health check endpoint is a Kubernetes Service and traffic to backend Pods is
|
||||
// round robin load balanced.
|
||||
func (ep *egressProxy) getHEPPings() (int, error) {
|
||||
hepPingsPath := filepath.Join(ep.cfgPath, egressservices.KeyHEPPings)
|
||||
j, err := os.ReadFile(hepPingsPath)
|
||||
if os.IsNotExist(err) {
|
||||
return 0, nil
|
||||
}
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
if len(j) == 0 || string(j) == "" {
|
||||
return 0, nil
|
||||
}
|
||||
hp, err := strconv.Atoi(string(j))
|
||||
if err != nil {
|
||||
return -1, fmt.Errorf("error parsing hep pings as int: %v", err)
|
||||
}
|
||||
if hp < 0 {
|
||||
log.Printf("[unexpected] hep pings is negative: %d", hp)
|
||||
return 0, nil
|
||||
}
|
||||
return hp, nil
|
||||
}
|
||||
|
||||
@@ -6,11 +6,18 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/netip"
|
||||
"reflect"
|
||||
"strings"
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
"tailscale.com/kube/egressservices"
|
||||
"tailscale.com/kube/kubetypes"
|
||||
)
|
||||
|
||||
func Test_updatesForSvc(t *testing.T) {
|
||||
@@ -173,3 +180,145 @@ func Test_updatesForSvc(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// A failure of this test will most likely look like a timeout.
|
||||
func TestWaitTillSafeToShutdown(t *testing.T) {
|
||||
podIP := "10.0.0.1"
|
||||
anotherIP := "10.0.0.2"
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
// services is a map of service name to the number of calls to make to the healthcheck endpoint before
|
||||
// returning a response that does NOT contain this Pod's IP in headers.
|
||||
services map[string]int
|
||||
replicas int
|
||||
healthCheckSet bool
|
||||
}{
|
||||
{
|
||||
name: "no_configs",
|
||||
},
|
||||
{
|
||||
name: "one_service_immediately_safe_to_shutdown",
|
||||
services: map[string]int{
|
||||
"svc1": 0,
|
||||
},
|
||||
replicas: 2,
|
||||
healthCheckSet: true,
|
||||
},
|
||||
{
|
||||
name: "multiple_services_immediately_safe_to_shutdown",
|
||||
services: map[string]int{
|
||||
"svc1": 0,
|
||||
"svc2": 0,
|
||||
"svc3": 0,
|
||||
},
|
||||
replicas: 2,
|
||||
healthCheckSet: true,
|
||||
},
|
||||
{
|
||||
name: "multiple_services_no_healthcheck_endpoints",
|
||||
services: map[string]int{
|
||||
"svc1": 0,
|
||||
"svc2": 0,
|
||||
"svc3": 0,
|
||||
},
|
||||
replicas: 2,
|
||||
},
|
||||
{
|
||||
name: "one_service_eventually_safe_to_shutdown",
|
||||
services: map[string]int{
|
||||
"svc1": 3, // After 3 calls to health check endpoint, no longer returns this Pod's IP
|
||||
},
|
||||
replicas: 2,
|
||||
healthCheckSet: true,
|
||||
},
|
||||
{
|
||||
name: "multiple_services_eventually_safe_to_shutdown",
|
||||
services: map[string]int{
|
||||
"svc1": 1, // After 1 call to health check endpoint, no longer returns this Pod's IP
|
||||
"svc2": 3, // After 3 calls to health check endpoint, no longer returns this Pod's IP
|
||||
"svc3": 5, // After 5 calls to the health check endpoint, no longer returns this Pod's IP
|
||||
},
|
||||
replicas: 2,
|
||||
healthCheckSet: true,
|
||||
},
|
||||
{
|
||||
name: "multiple_services_eventually_safe_to_shutdown_with_higher_replica_count",
|
||||
services: map[string]int{
|
||||
"svc1": 7,
|
||||
"svc2": 10,
|
||||
},
|
||||
replicas: 5,
|
||||
healthCheckSet: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
cfgs := &egressservices.Configs{}
|
||||
switches := make(map[string]int)
|
||||
|
||||
for svc, callsToSwitch := range tt.services {
|
||||
endpoint := fmt.Sprintf("http://%s.local", svc)
|
||||
if tt.healthCheckSet {
|
||||
(*cfgs)[svc] = egressservices.Config{
|
||||
HealthCheckEndpoint: endpoint,
|
||||
}
|
||||
}
|
||||
switches[endpoint] = callsToSwitch
|
||||
}
|
||||
|
||||
ep := &egressProxy{
|
||||
podIPv4: podIP,
|
||||
client: &mockHTTPClient{
|
||||
podIP: podIP,
|
||||
anotherIP: anotherIP,
|
||||
switches: switches,
|
||||
},
|
||||
}
|
||||
|
||||
ep.waitTillSafeToShutdown(context.Background(), cfgs, tt.replicas)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// mockHTTPClient is a client that receives an HTTP call for an egress service endpoint and returns a response with an
|
||||
// IP address in a 'Pod-IPv4' header. It can be configured to return one IP address for N calls, then switch to another
|
||||
// IP address to simulate a scenario where an IP is eventually no longer a backend for an endpoint.
|
||||
// TODO(irbekrm): to test this more thoroughly, we should have the client take into account the number of replicas and
|
||||
// return as if traffic was round robin load balanced across different Pods.
|
||||
type mockHTTPClient struct {
|
||||
// podIP - initial IP address to return, that matches the current proxy's IP address.
|
||||
podIP string
|
||||
anotherIP string
|
||||
// after how many calls to an endpoint, the client should start returning 'anotherIP' instead of 'podIP.
|
||||
switches map[string]int
|
||||
mu sync.Mutex // protects the following
|
||||
// calls tracks the number of calls received.
|
||||
calls map[string]int
|
||||
}
|
||||
|
||||
func (m *mockHTTPClient) Do(req *http.Request) (*http.Response, error) {
|
||||
m.mu.Lock()
|
||||
if m.calls == nil {
|
||||
m.calls = make(map[string]int)
|
||||
}
|
||||
|
||||
endpoint := req.URL.String()
|
||||
m.calls[endpoint]++
|
||||
calls := m.calls[endpoint]
|
||||
m.mu.Unlock()
|
||||
|
||||
resp := &http.Response{
|
||||
StatusCode: http.StatusOK,
|
||||
Header: make(http.Header),
|
||||
Body: io.NopCloser(strings.NewReader("")),
|
||||
}
|
||||
|
||||
if calls <= m.switches[endpoint] {
|
||||
resp.Header.Set(kubetypes.PodIPv4Header, m.podIP) // Pod is still routable
|
||||
} else {
|
||||
resp.Header.Set(kubetypes.PodIPv4Header, m.anotherIP) // Pod is no longer routable
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
@@ -64,16 +64,16 @@ type settings struct {
|
||||
// when setting up rules to proxy cluster traffic to cluster ingress
|
||||
// target.
|
||||
// Deprecated: use PodIPv4, PodIPv6 instead to support dual stack clusters
|
||||
PodIP string
|
||||
PodIPv4 string
|
||||
PodIPv6 string
|
||||
PodUID string
|
||||
HealthCheckAddrPort string
|
||||
LocalAddrPort string
|
||||
MetricsEnabled bool
|
||||
HealthCheckEnabled bool
|
||||
DebugAddrPort string
|
||||
EgressSvcsCfgPath string
|
||||
PodIP string
|
||||
PodIPv4 string
|
||||
PodIPv6 string
|
||||
PodUID string
|
||||
HealthCheckAddrPort string
|
||||
LocalAddrPort string
|
||||
MetricsEnabled bool
|
||||
HealthCheckEnabled bool
|
||||
DebugAddrPort string
|
||||
EgressProxiesCfgPath string
|
||||
}
|
||||
|
||||
func configFromEnv() (*settings, error) {
|
||||
@@ -107,7 +107,7 @@ func configFromEnv() (*settings, error) {
|
||||
MetricsEnabled: defaultBool("TS_ENABLE_METRICS", false),
|
||||
HealthCheckEnabled: defaultBool("TS_ENABLE_HEALTH_CHECK", false),
|
||||
DebugAddrPort: defaultEnv("TS_DEBUG_ADDR_PORT", ""),
|
||||
EgressSvcsCfgPath: defaultEnv("TS_EGRESS_SERVICES_CONFIG_PATH", ""),
|
||||
EgressProxiesCfgPath: defaultEnv("TS_EGRESS_PROXIES_CONFIG_PATH", ""),
|
||||
PodUID: defaultEnv("POD_UID", ""),
|
||||
}
|
||||
podIPs, ok := os.LookupEnv("POD_IPS")
|
||||
@@ -186,7 +186,7 @@ func (s *settings) validate() error {
|
||||
return fmt.Errorf("error parsing TS_HEALTHCHECK_ADDR_PORT value %q: %w", s.HealthCheckAddrPort, err)
|
||||
}
|
||||
}
|
||||
if s.localMetricsEnabled() || s.localHealthEnabled() {
|
||||
if s.localMetricsEnabled() || s.localHealthEnabled() || s.EgressProxiesCfgPath != "" {
|
||||
if _, err := netip.ParseAddrPort(s.LocalAddrPort); err != nil {
|
||||
return fmt.Errorf("error parsing TS_LOCAL_ADDR_PORT value %q: %w", s.LocalAddrPort, err)
|
||||
}
|
||||
@@ -199,8 +199,8 @@ func (s *settings) validate() error {
|
||||
if s.HealthCheckEnabled && s.HealthCheckAddrPort != "" {
|
||||
return errors.New("TS_HEALTHCHECK_ADDR_PORT is deprecated and will be removed in 1.82.0, use TS_ENABLE_HEALTH_CHECK and optionally TS_LOCAL_ADDR_PORT")
|
||||
}
|
||||
if s.EgressSvcsCfgPath != "" && !(s.InKubernetes && s.KubeSecret != "") {
|
||||
return errors.New("TS_EGRESS_SERVICES_CONFIG_PATH is only supported for Tailscale running on Kubernetes")
|
||||
if s.EgressProxiesCfgPath != "" && !(s.InKubernetes && s.KubeSecret != "") {
|
||||
return errors.New("TS_EGRESS_PROXIES_CONFIG_PATH is only supported for Tailscale running on Kubernetes")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -291,7 +291,7 @@ func isOneStepConfig(cfg *settings) bool {
|
||||
// as an L3 proxy, proxying to an endpoint provided via one of the config env
|
||||
// vars.
|
||||
func isL3Proxy(cfg *settings) bool {
|
||||
return cfg.ProxyTargetIP != "" || cfg.ProxyTargetDNSName != "" || cfg.TailnetTargetIP != "" || cfg.TailnetTargetFQDN != "" || cfg.AllowProxyingClusterTrafficViaIngress || cfg.EgressSvcsCfgPath != ""
|
||||
return cfg.ProxyTargetIP != "" || cfg.ProxyTargetDNSName != "" || cfg.TailnetTargetIP != "" || cfg.TailnetTargetFQDN != "" || cfg.AllowProxyingClusterTrafficViaIngress || cfg.EgressProxiesCfgPath != ""
|
||||
}
|
||||
|
||||
// hasKubeStateStore returns true if the state must be stored in a Kubernetes
|
||||
@@ -308,6 +308,10 @@ func (cfg *settings) localHealthEnabled() bool {
|
||||
return cfg.LocalAddrPort != "" && cfg.HealthCheckEnabled
|
||||
}
|
||||
|
||||
func (cfg *settings) egressSvcsTerminateEPEnabled() bool {
|
||||
return cfg.LocalAddrPort != "" && cfg.EgressProxiesCfgPath != ""
|
||||
}
|
||||
|
||||
// defaultEnv returns the value of the given envvar name, or defVal if
|
||||
// unset.
|
||||
func defaultEnv(name, defVal string) string {
|
||||
|
||||
@@ -20,7 +20,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/fsnotify/fsnotify"
|
||||
"tailscale.com/client/tailscale"
|
||||
"tailscale.com/localclient/tailscale"
|
||||
)
|
||||
|
||||
func startTailscaled(ctx context.Context, cfg *settings) (*tailscale.LocalClient, *os.Process, error) {
|
||||
@@ -42,14 +42,14 @@ func startTailscaled(ctx context.Context, cfg *settings) (*tailscale.LocalClient
|
||||
log.Printf("Waiting for tailscaled socket")
|
||||
for {
|
||||
if ctx.Err() != nil {
|
||||
log.Fatalf("Timed out waiting for tailscaled socket")
|
||||
return nil, nil, errors.New("timed out waiting for tailscaled socket")
|
||||
}
|
||||
_, err := os.Stat(cfg.Socket)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
continue
|
||||
} else if err != nil {
|
||||
log.Fatalf("Waiting for tailscaled socket: %v", err)
|
||||
return nil, nil, fmt.Errorf("error waiting for tailscaled socket: %w", err)
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
@@ -53,7 +53,7 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa
|
||||
L 💣 github.com/tailscale/netlink/nl from github.com/tailscale/netlink
|
||||
L github.com/vishvananda/netns from github.com/tailscale/netlink+
|
||||
github.com/x448/float16 from github.com/fxamacker/cbor/v2
|
||||
💣 go4.org/mem from tailscale.com/client/tailscale+
|
||||
💣 go4.org/mem from tailscale.com/derp+
|
||||
go4.org/netipx from tailscale.com/net/tsaddr
|
||||
W 💣 golang.zx2c4.com/wireguard/windows/tunnel/winipcfg from tailscale.com/net/netmon+
|
||||
google.golang.org/protobuf/encoding/protodelim from github.com/prometheus/common/expfmt
|
||||
@@ -86,18 +86,18 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa
|
||||
google.golang.org/protobuf/types/known/timestamppb from github.com/prometheus/client_golang/prometheus+
|
||||
tailscale.com from tailscale.com/version
|
||||
💣 tailscale.com/atomicfile from tailscale.com/cmd/derper+
|
||||
tailscale.com/client/tailscale from tailscale.com/derp
|
||||
tailscale.com/client/tailscale/apitype from tailscale.com/client/tailscale
|
||||
tailscale.com/derp from tailscale.com/cmd/derper+
|
||||
tailscale.com/derp/derphttp from tailscale.com/cmd/derper
|
||||
tailscale.com/disco from tailscale.com/derp
|
||||
tailscale.com/drive from tailscale.com/client/tailscale+
|
||||
tailscale.com/envknob from tailscale.com/client/tailscale+
|
||||
tailscale.com/drive from tailscale.com/ipn+
|
||||
tailscale.com/envknob from tailscale.com/derp+
|
||||
tailscale.com/health from tailscale.com/net/tlsdial+
|
||||
tailscale.com/hostinfo from tailscale.com/net/netmon+
|
||||
tailscale.com/ipn from tailscale.com/client/tailscale
|
||||
tailscale.com/ipn/ipnstate from tailscale.com/client/tailscale+
|
||||
tailscale.com/ipn from tailscale.com/localclient/tailscale
|
||||
tailscale.com/ipn/ipnstate from tailscale.com/ipn+
|
||||
tailscale.com/kube/kubetypes from tailscale.com/envknob
|
||||
tailscale.com/localclient/tailscale from tailscale.com/derp
|
||||
tailscale.com/localclient/tailscale/apitype from tailscale.com/localclient/tailscale
|
||||
tailscale.com/metrics from tailscale.com/cmd/derper+
|
||||
tailscale.com/net/bakedroots from tailscale.com/net/tlsdial
|
||||
tailscale.com/net/dnscache from tailscale.com/derp/derphttp
|
||||
@@ -106,7 +106,7 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa
|
||||
tailscale.com/net/netknob from tailscale.com/net/netns
|
||||
💣 tailscale.com/net/netmon from tailscale.com/derp/derphttp+
|
||||
💣 tailscale.com/net/netns from tailscale.com/derp/derphttp
|
||||
tailscale.com/net/netutil from tailscale.com/client/tailscale
|
||||
tailscale.com/net/netutil from tailscale.com/localclient/tailscale
|
||||
tailscale.com/net/sockstats from tailscale.com/derp/derphttp
|
||||
tailscale.com/net/stun from tailscale.com/net/stunserver
|
||||
tailscale.com/net/stunserver from tailscale.com/cmd/derper
|
||||
@@ -116,11 +116,11 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa
|
||||
tailscale.com/net/tsaddr from tailscale.com/ipn+
|
||||
💣 tailscale.com/net/tshttpproxy from tailscale.com/derp/derphttp+
|
||||
tailscale.com/net/wsconn from tailscale.com/cmd/derper
|
||||
tailscale.com/paths from tailscale.com/client/tailscale
|
||||
💣 tailscale.com/safesocket from tailscale.com/client/tailscale
|
||||
tailscale.com/paths from tailscale.com/localclient/tailscale
|
||||
💣 tailscale.com/safesocket from tailscale.com/localclient/tailscale
|
||||
tailscale.com/syncs from tailscale.com/cmd/derper+
|
||||
tailscale.com/tailcfg from tailscale.com/client/tailscale+
|
||||
tailscale.com/tka from tailscale.com/client/tailscale+
|
||||
tailscale.com/tailcfg from tailscale.com/derp+
|
||||
tailscale.com/tka from tailscale.com/ipn/ipnstate+
|
||||
W tailscale.com/tsconst from tailscale.com/net/netmon+
|
||||
tailscale.com/tstime from tailscale.com/derp+
|
||||
tailscale.com/tstime/mono from tailscale.com/tstime/rate
|
||||
@@ -131,17 +131,17 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa
|
||||
tailscale.com/types/dnstype from tailscale.com/tailcfg+
|
||||
tailscale.com/types/empty from tailscale.com/ipn
|
||||
tailscale.com/types/ipproto from tailscale.com/tailcfg+
|
||||
tailscale.com/types/key from tailscale.com/client/tailscale+
|
||||
tailscale.com/types/key from tailscale.com/cmd/derper+
|
||||
tailscale.com/types/lazy from tailscale.com/version+
|
||||
tailscale.com/types/logger from tailscale.com/cmd/derper+
|
||||
tailscale.com/types/netmap from tailscale.com/ipn
|
||||
tailscale.com/types/opt from tailscale.com/client/tailscale+
|
||||
tailscale.com/types/opt from tailscale.com/envknob+
|
||||
tailscale.com/types/persist from tailscale.com/ipn
|
||||
tailscale.com/types/preftype from tailscale.com/ipn
|
||||
tailscale.com/types/ptr from tailscale.com/hostinfo+
|
||||
tailscale.com/types/result from tailscale.com/util/lineiter
|
||||
tailscale.com/types/structs from tailscale.com/ipn+
|
||||
tailscale.com/types/tkatype from tailscale.com/client/tailscale+
|
||||
tailscale.com/types/tkatype from tailscale.com/localclient/tailscale+
|
||||
tailscale.com/types/views from tailscale.com/ipn+
|
||||
tailscale.com/util/cibuild from tailscale.com/health
|
||||
tailscale.com/util/clientmetric from tailscale.com/net/netmon+
|
||||
@@ -152,7 +152,6 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa
|
||||
L 💣 tailscale.com/util/dirwalk from tailscale.com/metrics
|
||||
tailscale.com/util/dnsname from tailscale.com/hostinfo+
|
||||
💣 tailscale.com/util/hashx from tailscale.com/util/deephash
|
||||
tailscale.com/util/httpm from tailscale.com/client/tailscale
|
||||
tailscale.com/util/lineiter from tailscale.com/hostinfo+
|
||||
L tailscale.com/util/linuxfw from tailscale.com/net/netns
|
||||
tailscale.com/util/mak from tailscale.com/health+
|
||||
@@ -189,6 +188,8 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa
|
||||
golang.org/x/crypto/cryptobyte/asn1 from crypto/ecdsa+
|
||||
golang.org/x/crypto/curve25519 from golang.org/x/crypto/nacl/box+
|
||||
golang.org/x/crypto/hkdf from crypto/tls+
|
||||
golang.org/x/crypto/internal/alias from golang.org/x/crypto/chacha20+
|
||||
golang.org/x/crypto/internal/poly1305 from golang.org/x/crypto/chacha20poly1305+
|
||||
golang.org/x/crypto/nacl/box from tailscale.com/types/key
|
||||
golang.org/x/crypto/nacl/secretbox from golang.org/x/crypto/nacl/box
|
||||
golang.org/x/crypto/salsa20/salsa from golang.org/x/crypto/nacl/box+
|
||||
@@ -201,6 +202,7 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa
|
||||
golang.org/x/net/http/httpproxy from net/http+
|
||||
golang.org/x/net/http2/hpack from net/http
|
||||
golang.org/x/net/idna from golang.org/x/crypto/acme/autocert+
|
||||
golang.org/x/net/internal/socks from golang.org/x/net/proxy
|
||||
golang.org/x/net/proxy from tailscale.com/net/netns
|
||||
D golang.org/x/net/route from net+
|
||||
golang.org/x/sync/errgroup from github.com/mdlayher/socket+
|
||||
@@ -232,6 +234,18 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa
|
||||
crypto/ed25519 from crypto/tls+
|
||||
crypto/elliptic from crypto/ecdsa+
|
||||
crypto/hmac from crypto/tls+
|
||||
crypto/internal/alias from crypto/aes+
|
||||
crypto/internal/bigmod from crypto/ecdsa+
|
||||
crypto/internal/boring from crypto/aes+
|
||||
crypto/internal/boring/bbig from crypto/ecdsa+
|
||||
crypto/internal/boring/sig from crypto/internal/boring
|
||||
crypto/internal/edwards25519 from crypto/ed25519
|
||||
crypto/internal/edwards25519/field from crypto/ecdh+
|
||||
crypto/internal/hpke from crypto/tls
|
||||
crypto/internal/mlkem768 from crypto/tls
|
||||
crypto/internal/nistec from crypto/ecdh+
|
||||
crypto/internal/nistec/fiat from crypto/internal/nistec
|
||||
crypto/internal/randutil from crypto/dsa+
|
||||
crypto/md5 from crypto/tls+
|
||||
crypto/rand from crypto/ed25519+
|
||||
crypto/rc4 from crypto/tls
|
||||
@@ -242,6 +256,7 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa
|
||||
crypto/subtle from crypto/aes+
|
||||
crypto/tls from golang.org/x/crypto/acme+
|
||||
crypto/x509 from crypto/tls+
|
||||
D crypto/x509/internal/macos from crypto/x509
|
||||
crypto/x509/pkix from crypto/x509+
|
||||
embed from crypto/internal/nistec+
|
||||
encoding from encoding/json+
|
||||
@@ -263,6 +278,44 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa
|
||||
hash/maphash from go4.org/mem
|
||||
html from net/http/pprof+
|
||||
html/template from tailscale.com/cmd/derper
|
||||
internal/abi from crypto/x509/internal/macos+
|
||||
internal/asan from syscall
|
||||
internal/bisect from internal/godebug
|
||||
internal/bytealg from bytes+
|
||||
internal/byteorder from crypto/aes+
|
||||
internal/chacha8rand from math/rand/v2+
|
||||
internal/concurrent from unique
|
||||
internal/coverage/rtcov from runtime
|
||||
internal/cpu from crypto/aes+
|
||||
internal/filepathlite from os+
|
||||
internal/fmtsort from fmt+
|
||||
internal/goarch from crypto/aes+
|
||||
internal/godebug from crypto/tls+
|
||||
internal/godebugs from internal/godebug+
|
||||
internal/goexperiment from runtime
|
||||
internal/goos from crypto/x509+
|
||||
internal/itoa from internal/poll+
|
||||
internal/msan from syscall
|
||||
internal/nettrace from net+
|
||||
internal/oserror from io/fs+
|
||||
internal/poll from net+
|
||||
internal/profile from net/http/pprof
|
||||
internal/profilerecord from runtime+
|
||||
internal/race from internal/poll+
|
||||
internal/reflectlite from context+
|
||||
internal/runtime/atomic from internal/runtime/exithook+
|
||||
internal/runtime/exithook from runtime
|
||||
L internal/runtime/syscall from runtime+
|
||||
internal/singleflight from net
|
||||
internal/stringslite from embed+
|
||||
internal/syscall/execenv from os+
|
||||
LD internal/syscall/unix from crypto/rand+
|
||||
W internal/syscall/windows from crypto/rand+
|
||||
W internal/syscall/windows/registry from mime+
|
||||
W internal/syscall/windows/sysdll from internal/syscall/windows+
|
||||
internal/testlog from os
|
||||
internal/unsafeheader from internal/reflectlite+
|
||||
internal/weak from unique
|
||||
io from bufio+
|
||||
io/fs from crypto/x509+
|
||||
L io/ioutil from github.com/mitchellh/go-ps+
|
||||
@@ -282,6 +335,7 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa
|
||||
net/http from expvar+
|
||||
net/http/httptrace from net/http+
|
||||
net/http/internal from net/http
|
||||
net/http/internal/ascii from net/http
|
||||
net/http/pprof from tailscale.com/tsweb
|
||||
net/netip from go4.org/netipx+
|
||||
net/textproto from golang.org/x/net/http/httpguts+
|
||||
@@ -295,7 +349,10 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa
|
||||
reflect from crypto/x509+
|
||||
regexp from github.com/coreos/go-iptables/iptables+
|
||||
regexp/syntax from regexp
|
||||
runtime from crypto/internal/nistec+
|
||||
runtime/debug from github.com/prometheus/client_golang/prometheus+
|
||||
runtime/internal/math from runtime
|
||||
runtime/internal/sys from runtime
|
||||
runtime/metrics from github.com/prometheus/client_golang/prometheus+
|
||||
runtime/pprof from net/http/pprof
|
||||
runtime/trace from net/http/pprof
|
||||
@@ -314,3 +371,4 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa
|
||||
unicode/utf16 from crypto/x509+
|
||||
unicode/utf8 from bufio+
|
||||
unique from net/netip
|
||||
unsafe from bytes+
|
||||
|
||||
@@ -18,8 +18,8 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"tailscale.com/client/tailscale"
|
||||
"tailscale.com/client/tailscale/apitype"
|
||||
"tailscale.com/localclient/tailscale"
|
||||
"tailscale.com/localclient/tailscale/apitype"
|
||||
)
|
||||
|
||||
var (
|
||||
|
||||
@@ -197,10 +197,6 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/
|
||||
W 💣 github.com/tailscale/go-winio/internal/socket from github.com/tailscale/go-winio
|
||||
W github.com/tailscale/go-winio/internal/stringbuffer from github.com/tailscale/go-winio/internal/fs
|
||||
W github.com/tailscale/go-winio/pkg/guid from github.com/tailscale/go-winio+
|
||||
github.com/tailscale/golang-x-crypto/acme from tailscale.com/ipn/ipnlocal
|
||||
LD github.com/tailscale/golang-x-crypto/internal/poly1305 from github.com/tailscale/golang-x-crypto/ssh
|
||||
LD github.com/tailscale/golang-x-crypto/ssh from tailscale.com/ipn/ipnlocal
|
||||
LD github.com/tailscale/golang-x-crypto/ssh/internal/bcrypt_pbkdf from github.com/tailscale/golang-x-crypto/ssh
|
||||
github.com/tailscale/goupnp from github.com/tailscale/goupnp/dcps/internetgateway2+
|
||||
github.com/tailscale/goupnp/dcps/internetgateway2 from tailscale.com/net/portmapper
|
||||
github.com/tailscale/goupnp/httpu from github.com/tailscale/goupnp+
|
||||
@@ -236,7 +232,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/
|
||||
go.uber.org/zap/internal/pool from go.uber.org/zap+
|
||||
go.uber.org/zap/internal/stacktrace from go.uber.org/zap
|
||||
go.uber.org/zap/zapcore from github.com/go-logr/zapr+
|
||||
💣 go4.org/mem from tailscale.com/client/tailscale+
|
||||
💣 go4.org/mem from tailscale.com/control/controlbase+
|
||||
go4.org/netipx from tailscale.com/ipn/ipnlocal+
|
||||
W 💣 golang.zx2c4.com/wintun from github.com/tailscale/wireguard-go/tun
|
||||
W 💣 golang.zx2c4.com/wireguard/windows/tunnel/winipcfg from tailscale.com/net/dns+
|
||||
@@ -781,8 +777,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/
|
||||
tailscale.com from tailscale.com/version
|
||||
tailscale.com/appc from tailscale.com/ipn/ipnlocal
|
||||
💣 tailscale.com/atomicfile from tailscale.com/ipn+
|
||||
tailscale.com/client/tailscale from tailscale.com/client/web+
|
||||
tailscale.com/client/tailscale/apitype from tailscale.com/client/tailscale+
|
||||
tailscale.com/client/tailscale from tailscale.com/cmd/k8s-operator+
|
||||
tailscale.com/client/web from tailscale.com/ipn/ipnlocal
|
||||
tailscale.com/clientupdate from tailscale.com/client/web+
|
||||
LW tailscale.com/clientupdate/distsign from tailscale.com/clientupdate
|
||||
@@ -798,10 +793,11 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/
|
||||
tailscale.com/doctor/ethtool from tailscale.com/ipn/ipnlocal
|
||||
💣 tailscale.com/doctor/permissions from tailscale.com/ipn/ipnlocal
|
||||
tailscale.com/doctor/routetable from tailscale.com/ipn/ipnlocal
|
||||
tailscale.com/drive from tailscale.com/client/tailscale+
|
||||
tailscale.com/envknob from tailscale.com/client/tailscale+
|
||||
tailscale.com/drive from tailscale.com/ipn+
|
||||
tailscale.com/envknob from tailscale.com/client/web+
|
||||
tailscale.com/envknob/featureknob from tailscale.com/client/web+
|
||||
tailscale.com/feature from tailscale.com/feature/wakeonlan+
|
||||
tailscale.com/feature/capture from tailscale.com/feature/condregister
|
||||
tailscale.com/feature/condregister from tailscale.com/tsnet
|
||||
L tailscale.com/feature/tap from tailscale.com/feature/condregister
|
||||
tailscale.com/feature/wakeonlan from tailscale.com/feature/condregister
|
||||
@@ -809,12 +805,12 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/
|
||||
tailscale.com/health/healthmsg from tailscale.com/ipn/ipnlocal
|
||||
tailscale.com/hostinfo from tailscale.com/client/web+
|
||||
tailscale.com/internal/noiseconn from tailscale.com/control/controlclient
|
||||
tailscale.com/ipn from tailscale.com/client/tailscale+
|
||||
tailscale.com/ipn from tailscale.com/client/web+
|
||||
tailscale.com/ipn/conffile from tailscale.com/ipn/ipnlocal+
|
||||
💣 tailscale.com/ipn/ipnauth from tailscale.com/ipn/ipnlocal+
|
||||
tailscale.com/ipn/ipnlocal from tailscale.com/ipn/localapi+
|
||||
tailscale.com/ipn/ipnstate from tailscale.com/client/tailscale+
|
||||
tailscale.com/ipn/localapi from tailscale.com/tsnet
|
||||
tailscale.com/ipn/ipnstate from tailscale.com/client/web+
|
||||
tailscale.com/ipn/localapi from tailscale.com/tsnet+
|
||||
tailscale.com/ipn/policy from tailscale.com/ipn/ipnlocal
|
||||
tailscale.com/ipn/store from tailscale.com/ipn/ipnlocal+
|
||||
L tailscale.com/ipn/store/awsstore from tailscale.com/ipn/store
|
||||
@@ -832,6 +828,8 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/
|
||||
tailscale.com/kube/kubeclient from tailscale.com/ipn/store/kubestore
|
||||
tailscale.com/kube/kubetypes from tailscale.com/cmd/k8s-operator+
|
||||
tailscale.com/licenses from tailscale.com/client/web
|
||||
tailscale.com/localclient/tailscale from tailscale.com/client/web+
|
||||
tailscale.com/localclient/tailscale/apitype from tailscale.com/client/tailscale+
|
||||
tailscale.com/log/filelogger from tailscale.com/logpolicy
|
||||
tailscale.com/log/sockstatlog from tailscale.com/ipn/ipnlocal
|
||||
tailscale.com/logpolicy from tailscale.com/ipn/ipnlocal+
|
||||
@@ -860,7 +858,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/
|
||||
💣 tailscale.com/net/netmon from tailscale.com/control/controlclient+
|
||||
💣 tailscale.com/net/netns from tailscale.com/derp/derphttp+
|
||||
W 💣 tailscale.com/net/netstat from tailscale.com/portlist
|
||||
tailscale.com/net/netutil from tailscale.com/client/tailscale+
|
||||
tailscale.com/net/netutil from tailscale.com/client/web+
|
||||
tailscale.com/net/packet from tailscale.com/net/connstats+
|
||||
tailscale.com/net/packet/checksum from tailscale.com/net/tstun
|
||||
tailscale.com/net/ping from tailscale.com/net/netcheck+
|
||||
@@ -878,17 +876,19 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/
|
||||
💣 tailscale.com/net/tshttpproxy from tailscale.com/clientupdate/distsign+
|
||||
tailscale.com/net/tstun from tailscale.com/tsd+
|
||||
tailscale.com/omit from tailscale.com/ipn/conffile
|
||||
tailscale.com/paths from tailscale.com/client/tailscale+
|
||||
tailscale.com/paths from tailscale.com/ipn/ipnlocal+
|
||||
💣 tailscale.com/portlist from tailscale.com/ipn/ipnlocal
|
||||
tailscale.com/posture from tailscale.com/ipn/ipnlocal
|
||||
tailscale.com/proxymap from tailscale.com/tsd+
|
||||
💣 tailscale.com/safesocket from tailscale.com/client/tailscale+
|
||||
💣 tailscale.com/safesocket from tailscale.com/ipn/ipnauth+
|
||||
tailscale.com/sessionrecording from tailscale.com/k8s-operator/sessionrecording+
|
||||
tailscale.com/syncs from tailscale.com/control/controlknobs+
|
||||
tailscale.com/tailcfg from tailscale.com/client/tailscale+
|
||||
tailscale.com/tailcfg from tailscale.com/client/web+
|
||||
tailscale.com/taildrop from tailscale.com/ipn/ipnlocal+
|
||||
tailscale.com/tempfork/acme from tailscale.com/ipn/ipnlocal
|
||||
tailscale.com/tempfork/heap from tailscale.com/wgengine/magicsock
|
||||
tailscale.com/tka from tailscale.com/client/tailscale+
|
||||
tailscale.com/tempfork/httprec from tailscale.com/control/controlclient
|
||||
tailscale.com/tka from tailscale.com/control/controlclient+
|
||||
tailscale.com/tsconst from tailscale.com/net/netmon+
|
||||
tailscale.com/tsd from tailscale.com/ipn/ipnlocal+
|
||||
tailscale.com/tsnet from tailscale.com/cmd/k8s-operator+
|
||||
@@ -900,7 +900,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/
|
||||
tailscale.com/types/dnstype from tailscale.com/ipn/ipnlocal+
|
||||
tailscale.com/types/empty from tailscale.com/ipn+
|
||||
tailscale.com/types/ipproto from tailscale.com/net/flowtrack+
|
||||
tailscale.com/types/key from tailscale.com/client/tailscale+
|
||||
tailscale.com/types/key from tailscale.com/control/controlbase+
|
||||
tailscale.com/types/lazy from tailscale.com/ipn/ipnlocal+
|
||||
tailscale.com/types/logger from tailscale.com/appc+
|
||||
tailscale.com/types/logid from tailscale.com/ipn/ipnlocal+
|
||||
@@ -913,7 +913,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/
|
||||
tailscale.com/types/ptr from tailscale.com/cmd/k8s-operator+
|
||||
tailscale.com/types/result from tailscale.com/util/lineiter
|
||||
tailscale.com/types/structs from tailscale.com/control/controlclient+
|
||||
tailscale.com/types/tkatype from tailscale.com/client/tailscale+
|
||||
tailscale.com/types/tkatype from tailscale.com/control/controlclient+
|
||||
tailscale.com/types/views from tailscale.com/appc+
|
||||
tailscale.com/util/cibuild from tailscale.com/health
|
||||
tailscale.com/util/clientmetric from tailscale.com/cmd/k8s-operator+
|
||||
@@ -969,7 +969,6 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/
|
||||
tailscale.com/version from tailscale.com/client/web+
|
||||
tailscale.com/version/distro from tailscale.com/client/web+
|
||||
tailscale.com/wgengine from tailscale.com/ipn/ipnlocal+
|
||||
tailscale.com/wgengine/capture from tailscale.com/ipn/ipnlocal+
|
||||
tailscale.com/wgengine/filter from tailscale.com/control/controlclient+
|
||||
tailscale.com/wgengine/filter/filtertype from tailscale.com/types/netmap+
|
||||
💣 tailscale.com/wgengine/magicsock from tailscale.com/ipn/ipnlocal+
|
||||
@@ -985,18 +984,22 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/
|
||||
golang.org/x/crypto/argon2 from tailscale.com/tka
|
||||
golang.org/x/crypto/blake2b from golang.org/x/crypto/argon2+
|
||||
golang.org/x/crypto/blake2s from github.com/tailscale/wireguard-go/device+
|
||||
LD golang.org/x/crypto/blowfish from github.com/tailscale/golang-x-crypto/ssh/internal/bcrypt_pbkdf
|
||||
golang.org/x/crypto/chacha20 from github.com/tailscale/golang-x-crypto/ssh+
|
||||
LD golang.org/x/crypto/blowfish from golang.org/x/crypto/ssh/internal/bcrypt_pbkdf
|
||||
golang.org/x/crypto/chacha20 from golang.org/x/crypto/ssh+
|
||||
golang.org/x/crypto/chacha20poly1305 from crypto/tls+
|
||||
golang.org/x/crypto/cryptobyte from crypto/ecdsa+
|
||||
golang.org/x/crypto/cryptobyte/asn1 from crypto/ecdsa+
|
||||
golang.org/x/crypto/curve25519 from github.com/tailscale/golang-x-crypto/ssh+
|
||||
golang.org/x/crypto/curve25519 from golang.org/x/crypto/ssh+
|
||||
golang.org/x/crypto/hkdf from crypto/tls+
|
||||
golang.org/x/crypto/internal/alias from golang.org/x/crypto/chacha20+
|
||||
golang.org/x/crypto/internal/poly1305 from golang.org/x/crypto/chacha20poly1305+
|
||||
golang.org/x/crypto/nacl/box from tailscale.com/types/key
|
||||
golang.org/x/crypto/nacl/secretbox from golang.org/x/crypto/nacl/box
|
||||
golang.org/x/crypto/poly1305 from github.com/tailscale/wireguard-go/device
|
||||
golang.org/x/crypto/salsa20/salsa from golang.org/x/crypto/nacl/box+
|
||||
golang.org/x/crypto/sha3 from crypto/internal/mlkem768+
|
||||
LD golang.org/x/crypto/ssh from tailscale.com/ipn/ipnlocal
|
||||
LD golang.org/x/crypto/ssh/internal/bcrypt_pbkdf from golang.org/x/crypto/ssh
|
||||
golang.org/x/exp/constraints from github.com/dblohm7/wingoes/pe+
|
||||
golang.org/x/exp/maps from sigs.k8s.io/controller-runtime/pkg/cache+
|
||||
golang.org/x/exp/slices from tailscale.com/cmd/k8s-operator+
|
||||
@@ -1009,6 +1012,9 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/
|
||||
golang.org/x/net/http2/hpack from golang.org/x/net/http2+
|
||||
golang.org/x/net/icmp from github.com/prometheus-community/pro-bing+
|
||||
golang.org/x/net/idna from golang.org/x/net/http/httpguts+
|
||||
golang.org/x/net/internal/iana from golang.org/x/net/icmp+
|
||||
golang.org/x/net/internal/socket from golang.org/x/net/icmp+
|
||||
golang.org/x/net/internal/socks from golang.org/x/net/proxy
|
||||
golang.org/x/net/ipv4 from github.com/miekg/dns+
|
||||
golang.org/x/net/ipv6 from github.com/miekg/dns+
|
||||
golang.org/x/net/proxy from tailscale.com/net/netns
|
||||
@@ -1050,6 +1056,18 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/
|
||||
crypto/ed25519 from crypto/tls+
|
||||
crypto/elliptic from crypto/ecdsa+
|
||||
crypto/hmac from crypto/tls+
|
||||
crypto/internal/alias from crypto/aes+
|
||||
crypto/internal/bigmod from crypto/ecdsa+
|
||||
crypto/internal/boring from crypto/aes+
|
||||
crypto/internal/boring/bbig from crypto/ecdsa+
|
||||
crypto/internal/boring/sig from crypto/internal/boring
|
||||
crypto/internal/edwards25519 from crypto/ed25519
|
||||
crypto/internal/edwards25519/field from crypto/ecdh+
|
||||
crypto/internal/hpke from crypto/tls
|
||||
crypto/internal/mlkem768 from crypto/tls
|
||||
crypto/internal/nistec from crypto/ecdh+
|
||||
crypto/internal/nistec/fiat from crypto/internal/nistec
|
||||
crypto/internal/randutil from crypto/dsa+
|
||||
crypto/md5 from crypto/tls+
|
||||
crypto/rand from crypto/ed25519+
|
||||
crypto/rc4 from crypto/tls+
|
||||
@@ -1060,6 +1078,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/
|
||||
crypto/subtle from crypto/aes+
|
||||
crypto/tls from github.com/aws/aws-sdk-go-v2/aws/transport/http+
|
||||
crypto/x509 from crypto/tls+
|
||||
D crypto/x509/internal/macos from crypto/x509
|
||||
crypto/x509/pkix from crypto/x509+
|
||||
database/sql from github.com/prometheus/client_golang/prometheus/collectors
|
||||
database/sql/driver from database/sql+
|
||||
@@ -1085,6 +1104,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/
|
||||
go/build/constraint from go/parser
|
||||
go/doc from k8s.io/apimachinery/pkg/runtime
|
||||
go/doc/comment from go/doc
|
||||
go/internal/typeparams from go/parser
|
||||
go/parser from k8s.io/apimachinery/pkg/runtime
|
||||
go/scanner from go/ast+
|
||||
go/token from go/ast+
|
||||
@@ -1095,6 +1115,46 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/
|
||||
hash/maphash from go4.org/mem
|
||||
html from html/template+
|
||||
html/template from github.com/gorilla/csrf
|
||||
internal/abi from crypto/x509/internal/macos+
|
||||
internal/asan from syscall
|
||||
internal/bisect from internal/godebug
|
||||
internal/bytealg from bytes+
|
||||
internal/byteorder from crypto/aes+
|
||||
internal/chacha8rand from math/rand/v2+
|
||||
internal/concurrent from unique
|
||||
internal/coverage/rtcov from runtime
|
||||
internal/cpu from crypto/aes+
|
||||
internal/filepathlite from os+
|
||||
internal/fmtsort from fmt+
|
||||
internal/goarch from crypto/aes+
|
||||
internal/godebug from archive/tar+
|
||||
internal/godebugs from internal/godebug+
|
||||
internal/goexperiment from runtime
|
||||
internal/goos from crypto/x509+
|
||||
internal/itoa from internal/poll+
|
||||
internal/lazyregexp from go/doc
|
||||
internal/msan from syscall
|
||||
internal/nettrace from net+
|
||||
internal/oserror from io/fs+
|
||||
internal/poll from net+
|
||||
internal/profile from net/http/pprof
|
||||
internal/profilerecord from runtime+
|
||||
internal/race from internal/poll+
|
||||
internal/reflectlite from context+
|
||||
internal/runtime/atomic from internal/runtime/exithook+
|
||||
internal/runtime/exithook from runtime
|
||||
L internal/runtime/syscall from runtime+
|
||||
internal/saferio from debug/pe+
|
||||
internal/singleflight from net
|
||||
internal/stringslite from embed+
|
||||
internal/syscall/execenv from os+
|
||||
LD internal/syscall/unix from crypto/rand+
|
||||
W internal/syscall/windows from crypto/rand+
|
||||
W internal/syscall/windows/registry from mime+
|
||||
W internal/syscall/windows/sysdll from internal/syscall/windows+
|
||||
internal/testlog from os
|
||||
internal/unsafeheader from internal/reflectlite+
|
||||
internal/weak from unique
|
||||
io from archive/tar+
|
||||
io/fs from archive/tar+
|
||||
io/ioutil from github.com/aws/aws-sdk-go-v2/aws/protocol/query+
|
||||
@@ -1103,6 +1163,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/
|
||||
log/internal from log+
|
||||
log/slog from github.com/go-logr/logr+
|
||||
log/slog/internal from log/slog
|
||||
log/slog/internal/buffer from log/slog
|
||||
maps from sigs.k8s.io/controller-runtime/pkg/predicate+
|
||||
math from archive/tar+
|
||||
math/big from crypto/dsa+
|
||||
@@ -1114,10 +1175,10 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/
|
||||
mime/quotedprintable from mime/multipart
|
||||
net from crypto/tls+
|
||||
net/http from expvar+
|
||||
net/http/httptest from tailscale.com/control/controlclient
|
||||
net/http/httptrace from github.com/prometheus-community/pro-bing+
|
||||
net/http/httputil from github.com/aws/smithy-go/transport/http+
|
||||
net/http/internal from net/http+
|
||||
net/http/internal/ascii from net/http+
|
||||
net/http/pprof from sigs.k8s.io/controller-runtime/pkg/manager+
|
||||
net/netip from github.com/gaissmai/bart+
|
||||
net/textproto from github.com/aws/aws-sdk-go-v2/aws/signer/v4+
|
||||
@@ -1131,7 +1192,10 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/
|
||||
reflect from archive/tar+
|
||||
regexp from github.com/aws/aws-sdk-go-v2/internal/endpoints+
|
||||
regexp/syntax from regexp
|
||||
runtime from archive/tar+
|
||||
runtime/debug from github.com/aws/aws-sdk-go-v2/internal/sync/singleflight+
|
||||
runtime/internal/math from runtime
|
||||
runtime/internal/sys from runtime
|
||||
runtime/metrics from github.com/prometheus/client_golang/prometheus+
|
||||
runtime/pprof from net/http/pprof+
|
||||
runtime/trace from net/http/pprof
|
||||
@@ -1150,3 +1214,4 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/
|
||||
unicode/utf16 from crypto/x509+
|
||||
unicode/utf8 from bufio+
|
||||
unique from net/netip
|
||||
unsafe from bytes+
|
||||
|
||||
@@ -63,7 +63,10 @@ rules:
|
||||
verbs: ["create","delete","deletecollection","get","list","patch","update","watch"]
|
||||
- apiGroups: [""]
|
||||
resources: ["pods"]
|
||||
verbs: ["get","list","watch"]
|
||||
verbs: ["get","list","watch", "update"]
|
||||
- apiGroups: [""]
|
||||
resources: ["pods/status"]
|
||||
verbs: ["update"]
|
||||
- apiGroups: ["apps"]
|
||||
resources: ["statefulsets", "deployments"]
|
||||
verbs: ["create","delete","deletecollection","get","list","patch","update","watch"]
|
||||
|
||||
@@ -4854,6 +4854,13 @@ rules:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- update
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- pods/status
|
||||
verbs:
|
||||
- update
|
||||
- apiGroups:
|
||||
- apps
|
||||
resources:
|
||||
|
||||
@@ -20,7 +20,6 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||
tsoperator "tailscale.com/k8s-operator"
|
||||
"tailscale.com/kube/egressservices"
|
||||
"tailscale.com/types/ptr"
|
||||
)
|
||||
@@ -71,25 +70,27 @@ func (er *egressEpsReconciler) Reconcile(ctx context.Context, req reconcile.Requ
|
||||
if err != nil {
|
||||
return res, fmt.Errorf("error retrieving ExternalName Service: %w", err)
|
||||
}
|
||||
if !tsoperator.EgressServiceIsValidAndConfigured(svc) {
|
||||
l.Infof("Cluster resources for ExternalName Service %s/%s are not yet configured", svc.Namespace, svc.Name)
|
||||
return res, nil
|
||||
}
|
||||
|
||||
// TODO(irbekrm): currently this reconcile loop runs all the checks every time it's triggered, which is
|
||||
// wasteful. Once we have a Ready condition for ExternalName Services for ProxyGroup, use the condition to
|
||||
// determine if a reconcile is needed.
|
||||
|
||||
oldEps := eps.DeepCopy()
|
||||
proxyGroupName := eps.Labels[labelProxyGroup]
|
||||
tailnetSvc := tailnetSvcName(svc)
|
||||
l = l.With("tailnet-service-name", tailnetSvc)
|
||||
|
||||
// Retrieve the desired tailnet service configuration from the ConfigMap.
|
||||
proxyGroupName := eps.Labels[labelProxyGroup]
|
||||
_, cfgs, err := egressSvcsConfigs(ctx, er.Client, proxyGroupName, er.tsNamespace)
|
||||
if err != nil {
|
||||
return res, fmt.Errorf("error retrieving tailnet services configuration: %w", err)
|
||||
}
|
||||
if cfgs == nil {
|
||||
// TODO(irbekrm): this path would be hit if egress service was once exposed on a ProxyGroup that later
|
||||
// got deleted. Probably the EndpointSlices then need to be deleted too- need to rethink this flow.
|
||||
l.Debugf("No egress config found, likely because ProxyGroup has not been created")
|
||||
return res, nil
|
||||
}
|
||||
cfg, ok := (*cfgs)[tailnetSvc]
|
||||
if !ok {
|
||||
l.Infof("[unexpected] configuration for tailnet service %s not found", tailnetSvc)
|
||||
|
||||
274
cmd/k8s-operator/egress-pod-readiness.go
Normal file
274
cmd/k8s-operator/egress-pod-readiness.go
Normal file
@@ -0,0 +1,274 @@
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
//go:build !plan9
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"slices"
|
||||
"strings"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"go.uber.org/zap"
|
||||
xslices "golang.org/x/exp/slices"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||
tsapi "tailscale.com/k8s-operator/apis/v1alpha1"
|
||||
"tailscale.com/kube/kubetypes"
|
||||
"tailscale.com/logtail/backoff"
|
||||
"tailscale.com/tstime"
|
||||
"tailscale.com/util/httpm"
|
||||
)
|
||||
|
||||
const tsEgressReadinessGate = "tailscale.com/egress-services"
|
||||
|
||||
// egressPodsReconciler is responsible for setting tailscale.com/egress-services condition on egress ProxyGroup Pods.
|
||||
// The condition is used as a readiness gate for the Pod, meaning that kubelet will not mark the Pod as ready before the
|
||||
// condition is set. The ProxyGroup StatefulSet updates are rolled out in such a way that no Pod is restarted, before
|
||||
// the previous Pod is marked as ready, so ensuring that the Pod does not get marked as ready when it is not yet able to
|
||||
// route traffic for egress service prevents downtime during restarts caused by no available endpoints left because
|
||||
// every Pod has been recreated and is not yet added to endpoints.
|
||||
// https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#pod-readiness-gate
|
||||
type egressPodsReconciler struct {
|
||||
client.Client
|
||||
logger *zap.SugaredLogger
|
||||
tsNamespace string
|
||||
clock tstime.Clock
|
||||
httpClient doer // http client that can be set to a mock client in tests
|
||||
maxBackoff time.Duration // max backoff period between health check calls
|
||||
}
|
||||
|
||||
// Reconcile reconciles an egress ProxyGroup Pods on changes to those Pods and ProxyGroup EndpointSlices. It ensures
|
||||
// that for each Pod who is ready to route traffic to all egress services for the ProxyGroup, the Pod has a
|
||||
// tailscale.com/egress-services condition to set, so that kubelet will mark the Pod as ready.
|
||||
//
|
||||
// For the Pod to be ready
|
||||
// to route traffic to the egress service, the kube proxy needs to have set up the Pod's IP as an endpoint for the
|
||||
// ClusterIP Service corresponding to the egress service.
|
||||
//
|
||||
// Note that the endpoints for the ClusterIP Service are configured by the operator itself using custom
|
||||
// EndpointSlices(egress-eps-reconciler), so the routing is not blocked on Pod's readiness.
|
||||
//
|
||||
// Each egress service has a corresponding ClusterIP Service, that exposes all user configured
|
||||
// tailnet ports, as well as a health check port for the proxy.
|
||||
//
|
||||
// The reconciler calls the health check endpoint of each Service up to N number of times, where N is the number of
|
||||
// replicas for the ProxyGroup x 3, and checks if the received response is healthy response from the Pod being reconciled.
|
||||
//
|
||||
// The health check response contains a header with the
|
||||
// Pod's IP address- this is used to determine whether the response is received from this Pod.
|
||||
//
|
||||
// If the Pod does not appear to be serving the health check endpoint (pre-v1.80 proxies), the reconciler just sets the
|
||||
// readiness condition for backwards compatibility reasons.
|
||||
func (er *egressPodsReconciler) Reconcile(ctx context.Context, req reconcile.Request) (res reconcile.Result, err error) {
|
||||
l := er.logger.With("Pod", req.NamespacedName)
|
||||
l.Debugf("starting reconcile")
|
||||
defer l.Debugf("reconcile finished")
|
||||
|
||||
pod := new(corev1.Pod)
|
||||
err = er.Get(ctx, req.NamespacedName, pod)
|
||||
if apierrors.IsNotFound(err) {
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
if err != nil {
|
||||
return reconcile.Result{}, fmt.Errorf("failed to get Pod: %w", err)
|
||||
}
|
||||
if !pod.DeletionTimestamp.IsZero() {
|
||||
l.Debugf("Pod is being deleted, do nothing")
|
||||
return res, nil
|
||||
}
|
||||
if pod.Labels[LabelParentType] != proxyTypeProxyGroup {
|
||||
l.Infof("[unexpected] reconciler called for a Pod that is not a ProxyGroup Pod")
|
||||
return res, nil
|
||||
}
|
||||
|
||||
// If the Pod does not have the readiness gate set, there is no need to add the readiness condition. In practice
|
||||
// this will happen if the user has configured custom TS_LOCAL_ADDR_PORT, thus disabling the graceful failover.
|
||||
if !slices.ContainsFunc(pod.Spec.ReadinessGates, func(r corev1.PodReadinessGate) bool {
|
||||
return r.ConditionType == tsEgressReadinessGate
|
||||
}) {
|
||||
l.Debug("Pod does not have egress readiness gate set, skipping")
|
||||
return res, nil
|
||||
}
|
||||
|
||||
proxyGroupName := pod.Labels[LabelParentName]
|
||||
pg := new(tsapi.ProxyGroup)
|
||||
if err := er.Get(ctx, types.NamespacedName{Name: proxyGroupName}, pg); err != nil {
|
||||
return res, fmt.Errorf("error getting ProxyGroup %q: %w", proxyGroupName, err)
|
||||
}
|
||||
if pg.Spec.Type != typeEgress {
|
||||
l.Infof("[unexpected] reconciler called for %q ProxyGroup Pod", pg.Spec.Type)
|
||||
return res, nil
|
||||
}
|
||||
// Get all ClusterIP Services for all egress targets exposed to cluster via this ProxyGroup.
|
||||
lbls := map[string]string{
|
||||
LabelManaged: "true",
|
||||
labelProxyGroup: proxyGroupName,
|
||||
labelSvcType: typeEgress,
|
||||
}
|
||||
svcs := &corev1.ServiceList{}
|
||||
if err := er.List(ctx, svcs, client.InNamespace(er.tsNamespace), client.MatchingLabels(lbls)); err != nil {
|
||||
return res, fmt.Errorf("error listing ClusterIP Services")
|
||||
}
|
||||
|
||||
idx := xslices.IndexFunc(pod.Status.Conditions, func(c corev1.PodCondition) bool {
|
||||
return c.Type == tsEgressReadinessGate
|
||||
})
|
||||
if idx != -1 {
|
||||
l.Debugf("Pod is already ready, do nothing")
|
||||
return res, nil
|
||||
}
|
||||
|
||||
var routesMissing atomic.Bool
|
||||
errChan := make(chan error, len(svcs.Items))
|
||||
for _, svc := range svcs.Items {
|
||||
s := svc
|
||||
go func() {
|
||||
ll := l.With("service_name", s.Name)
|
||||
d := retrieveClusterDomain(er.tsNamespace, ll)
|
||||
healthCheckAddr := healthCheckForSvc(&s, d)
|
||||
if healthCheckAddr == "" {
|
||||
ll.Debugf("ClusterIP Service does not expose a health check endpoint, unable to verify if routing is set up")
|
||||
errChan <- nil
|
||||
return
|
||||
}
|
||||
|
||||
var routesSetup bool
|
||||
bo := backoff.NewBackoff(s.Name, ll.Infof, er.maxBackoff)
|
||||
for range numCalls(pgReplicas(pg)) {
|
||||
if ctx.Err() != nil {
|
||||
errChan <- nil
|
||||
return
|
||||
}
|
||||
state, err := er.lookupPodRouteViaSvc(ctx, pod, healthCheckAddr, ll)
|
||||
if err != nil {
|
||||
errChan <- fmt.Errorf("error validating if routing has been set up for Pod: %w", err)
|
||||
return
|
||||
}
|
||||
if state == healthy || state == cannotVerify {
|
||||
routesSetup = true
|
||||
break
|
||||
}
|
||||
if state == unreachable || state == unhealthy || state == podNotReady {
|
||||
bo.BackOff(ctx, errors.New("backoff"))
|
||||
}
|
||||
}
|
||||
if !routesSetup {
|
||||
ll.Debugf("Pod is not yet configured as Service endpoint")
|
||||
routesMissing.Store(true)
|
||||
}
|
||||
errChan <- nil
|
||||
}()
|
||||
}
|
||||
for range len(svcs.Items) {
|
||||
e := <-errChan
|
||||
err = errors.Join(err, e)
|
||||
}
|
||||
if err != nil {
|
||||
return res, fmt.Errorf("error verifying conectivity: %w", err)
|
||||
}
|
||||
if rm := routesMissing.Load(); rm {
|
||||
l.Info("Pod is not yet added as an endpoint for all egress targets, waiting...")
|
||||
return reconcile.Result{RequeueAfter: shortRequeue}, nil
|
||||
}
|
||||
if err := er.setPodReady(ctx, pod, l); err != nil {
|
||||
return res, fmt.Errorf("error setting Pod as ready: %w", err)
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func (er *egressPodsReconciler) setPodReady(ctx context.Context, pod *corev1.Pod, l *zap.SugaredLogger) error {
|
||||
if slices.ContainsFunc(pod.Status.Conditions, func(c corev1.PodCondition) bool {
|
||||
return c.Type == tsEgressReadinessGate
|
||||
}) {
|
||||
return nil
|
||||
}
|
||||
l.Infof("Pod is ready to route traffic to all egress targets")
|
||||
pod.Status.Conditions = append(pod.Status.Conditions, corev1.PodCondition{
|
||||
Type: tsEgressReadinessGate,
|
||||
Status: corev1.ConditionTrue,
|
||||
LastTransitionTime: metav1.Time{Time: er.clock.Now()},
|
||||
})
|
||||
return er.Status().Update(ctx, pod)
|
||||
}
|
||||
|
||||
// healthCheckState is the result of a single request to an egress Service health check endpoint with a goal to hit a
|
||||
// specific backend Pod.
|
||||
type healthCheckState int8
|
||||
|
||||
const (
|
||||
cannotVerify healthCheckState = iota // not verifiable for this setup (i.e earlier proxy version)
|
||||
unreachable // no backends or another network error
|
||||
notFound // hit another backend
|
||||
unhealthy // not 200
|
||||
podNotReady // Pod is not ready, i.e does not have an IP address yet
|
||||
healthy // 200
|
||||
)
|
||||
|
||||
// lookupPodRouteViaSvc attempts to reach a Pod using a health check endpoint served by a Service and returns the state of the health check.
|
||||
func (er *egressPodsReconciler) lookupPodRouteViaSvc(ctx context.Context, pod *corev1.Pod, healthCheckAddr string, l *zap.SugaredLogger) (healthCheckState, error) {
|
||||
if !slices.ContainsFunc(pod.Spec.Containers[0].Env, func(e corev1.EnvVar) bool {
|
||||
return e.Name == "TS_ENABLE_HEALTH_CHECK" && e.Value == "true"
|
||||
}) {
|
||||
l.Debugf("Pod does not have health check enabled, unable to verify if it is currently routable via Service")
|
||||
return cannotVerify, nil
|
||||
}
|
||||
wantsIP, err := podIPv4(pod)
|
||||
if err != nil {
|
||||
return -1, fmt.Errorf("error determining Pod's IP address: %w", err)
|
||||
}
|
||||
if wantsIP == "" {
|
||||
return podNotReady, nil
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(ctx, time.Second*3)
|
||||
defer cancel()
|
||||
req, err := http.NewRequestWithContext(ctx, httpm.GET, healthCheckAddr, nil)
|
||||
if err != nil {
|
||||
return -1, fmt.Errorf("error creating new HTTP request: %w", err)
|
||||
}
|
||||
// Do not re-use the same connection for the next request so to maximize the chance of hitting all backends equally.
|
||||
req.Close = true
|
||||
resp, err := er.httpClient.Do(req)
|
||||
if err != nil {
|
||||
// This is most likely because this is the first Pod and is not yet added to Service endoints. Other
|
||||
// error types are possible, but checking for those would likely make the system too fragile.
|
||||
return unreachable, nil
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
gotIP := resp.Header.Get(kubetypes.PodIPv4Header)
|
||||
if gotIP == "" {
|
||||
l.Debugf("Health check does not return Pod's IP header, unable to verify if Pod is currently routable via Service")
|
||||
return cannotVerify, nil
|
||||
}
|
||||
if !strings.EqualFold(wantsIP, gotIP) {
|
||||
return notFound, nil
|
||||
}
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return unhealthy, nil
|
||||
}
|
||||
return healthy, nil
|
||||
}
|
||||
|
||||
// numCalls return the number of times an endpoint on a ProxyGroup Service should be called till it can be safely
|
||||
// assumed that, if none of the responses came back from a specific Pod then traffic for the Service is currently not
|
||||
// being routed to that Pod. This assumes that traffic for the Service is routed via round robin, so
|
||||
// InternalTrafficPolicy must be 'Cluster' and session affinity must be None.
|
||||
func numCalls(replicas int32) int32 {
|
||||
return replicas * 3
|
||||
}
|
||||
|
||||
// doer is an interface for HTTP client that can be set to a mock client in tests.
|
||||
type doer interface {
|
||||
Do(*http.Request) (*http.Response, error)
|
||||
}
|
||||
525
cmd/k8s-operator/egress-pod-readiness_test.go
Normal file
525
cmd/k8s-operator/egress-pod-readiness_test.go
Normal file
@@ -0,0 +1,525 @@
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
//go:build !plan9
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"net/http"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"go.uber.org/zap"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client/fake"
|
||||
tsapi "tailscale.com/k8s-operator/apis/v1alpha1"
|
||||
"tailscale.com/kube/kubetypes"
|
||||
"tailscale.com/tstest"
|
||||
"tailscale.com/types/ptr"
|
||||
)
|
||||
|
||||
func TestEgressPodReadiness(t *testing.T) {
|
||||
// We need to pass a Pod object to WithStatusSubresource because of some quirks in how the fake client
|
||||
// works. Without this code we would not be able to update Pod's status further down.
|
||||
fc := fake.NewClientBuilder().
|
||||
WithScheme(tsapi.GlobalScheme).
|
||||
WithStatusSubresource(&corev1.Pod{}).
|
||||
Build()
|
||||
zl, _ := zap.NewDevelopment()
|
||||
cl := tstest.NewClock(tstest.ClockOpts{})
|
||||
rec := &egressPodsReconciler{
|
||||
tsNamespace: "operator-ns",
|
||||
Client: fc,
|
||||
logger: zl.Sugar(),
|
||||
clock: cl,
|
||||
}
|
||||
pg := &tsapi.ProxyGroup{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "dev",
|
||||
},
|
||||
Spec: tsapi.ProxyGroupSpec{
|
||||
Type: "egress",
|
||||
Replicas: ptr.To(int32(3)),
|
||||
},
|
||||
}
|
||||
mustCreate(t, fc, pg)
|
||||
podIP := "10.0.0.2"
|
||||
podTemplate := &corev1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "operator-ns",
|
||||
Name: "pod",
|
||||
Labels: map[string]string{
|
||||
LabelParentType: "proxygroup",
|
||||
LabelParentName: "dev",
|
||||
},
|
||||
},
|
||||
Spec: corev1.PodSpec{
|
||||
ReadinessGates: []corev1.PodReadinessGate{{
|
||||
ConditionType: tsEgressReadinessGate,
|
||||
}},
|
||||
Containers: []corev1.Container{{
|
||||
Name: "tailscale",
|
||||
Env: []corev1.EnvVar{{
|
||||
Name: "TS_ENABLE_HEALTH_CHECK",
|
||||
Value: "true",
|
||||
}},
|
||||
}},
|
||||
},
|
||||
Status: corev1.PodStatus{
|
||||
PodIPs: []corev1.PodIP{{IP: podIP}},
|
||||
},
|
||||
}
|
||||
|
||||
t.Run("no_egress_services", func(t *testing.T) {
|
||||
pod := podTemplate.DeepCopy()
|
||||
mustCreate(t, fc, pod)
|
||||
expectReconciled(t, rec, "operator-ns", pod.Name)
|
||||
|
||||
// Pod should have readiness gate condition set.
|
||||
podSetReady(pod, cl)
|
||||
expectEqual(t, fc, pod)
|
||||
mustDeleteAll(t, fc, pod)
|
||||
})
|
||||
t.Run("one_svc_already_routed_to", func(t *testing.T) {
|
||||
pod := podTemplate.DeepCopy()
|
||||
|
||||
svc, hep := newSvc("svc", 9002)
|
||||
mustCreateAll(t, fc, svc, pod)
|
||||
resp := readyResps(podIP, 1)
|
||||
httpCl := fakeHTTPClient{
|
||||
t: t,
|
||||
state: map[string][]fakeResponse{hep: resp},
|
||||
}
|
||||
rec.httpClient = &httpCl
|
||||
expectReconciled(t, rec, "operator-ns", pod.Name)
|
||||
|
||||
// Pod should have readiness gate condition set.
|
||||
podSetReady(pod, cl)
|
||||
expectEqual(t, fc, pod)
|
||||
|
||||
// A subsequent reconcile should not change the Pod.
|
||||
expectReconciled(t, rec, "operator-ns", pod.Name)
|
||||
expectEqual(t, fc, pod)
|
||||
|
||||
mustDeleteAll(t, fc, pod, svc)
|
||||
})
|
||||
t.Run("one_svc_many_backends_eventually_routed_to", func(t *testing.T) {
|
||||
pod := podTemplate.DeepCopy()
|
||||
|
||||
svc, hep := newSvc("svc", 9002)
|
||||
mustCreateAll(t, fc, svc, pod)
|
||||
// For a 3 replica ProxyGroup the healthcheck endpoint should be called 9 times, make the 9th time only
|
||||
// return with the right Pod IP.
|
||||
resps := append(readyResps("10.0.0.3", 4), append(readyResps("10.0.0.4", 4), readyResps(podIP, 1)...)...)
|
||||
httpCl := fakeHTTPClient{
|
||||
t: t,
|
||||
state: map[string][]fakeResponse{hep: resps},
|
||||
}
|
||||
rec.httpClient = &httpCl
|
||||
expectReconciled(t, rec, "operator-ns", pod.Name)
|
||||
|
||||
// Pod should have readiness gate condition set.
|
||||
podSetReady(pod, cl)
|
||||
expectEqual(t, fc, pod)
|
||||
mustDeleteAll(t, fc, pod, svc)
|
||||
})
|
||||
t.Run("one_svc_one_backend_eventually_healthy", func(t *testing.T) {
|
||||
pod := podTemplate.DeepCopy()
|
||||
|
||||
svc, hep := newSvc("svc", 9002)
|
||||
mustCreateAll(t, fc, svc, pod)
|
||||
// For a 3 replica ProxyGroup the healthcheck endpoint should be called 9 times, make the 9th time only
|
||||
// return with 200 status code.
|
||||
resps := append(unreadyResps(podIP, 8), readyResps(podIP, 1)...)
|
||||
httpCl := fakeHTTPClient{
|
||||
t: t,
|
||||
state: map[string][]fakeResponse{hep: resps},
|
||||
}
|
||||
rec.httpClient = &httpCl
|
||||
expectReconciled(t, rec, "operator-ns", pod.Name)
|
||||
|
||||
// Pod should have readiness gate condition set.
|
||||
podSetReady(pod, cl)
|
||||
expectEqual(t, fc, pod)
|
||||
mustDeleteAll(t, fc, pod, svc)
|
||||
})
|
||||
t.Run("one_svc_one_backend_never_routable", func(t *testing.T) {
|
||||
pod := podTemplate.DeepCopy()
|
||||
|
||||
svc, hep := newSvc("svc", 9002)
|
||||
mustCreateAll(t, fc, svc, pod)
|
||||
// For a 3 replica ProxyGroup the healthcheck endpoint should be called 9 times and Pod should be
|
||||
// requeued if neither of those succeed.
|
||||
resps := readyResps("10.0.0.3", 9)
|
||||
httpCl := fakeHTTPClient{
|
||||
t: t,
|
||||
state: map[string][]fakeResponse{hep: resps},
|
||||
}
|
||||
rec.httpClient = &httpCl
|
||||
expectRequeue(t, rec, "operator-ns", pod.Name)
|
||||
|
||||
// Pod should not have readiness gate condition set.
|
||||
expectEqual(t, fc, pod)
|
||||
mustDeleteAll(t, fc, pod, svc)
|
||||
})
|
||||
t.Run("one_svc_many_backends_already_routable", func(t *testing.T) {
|
||||
pod := podTemplate.DeepCopy()
|
||||
|
||||
svc, hep := newSvc("svc", 9002)
|
||||
svc2, hep2 := newSvc("svc-2", 9002)
|
||||
svc3, hep3 := newSvc("svc-3", 9002)
|
||||
mustCreateAll(t, fc, svc, svc2, svc3, pod)
|
||||
resps := readyResps(podIP, 1)
|
||||
httpCl := fakeHTTPClient{
|
||||
t: t,
|
||||
state: map[string][]fakeResponse{
|
||||
hep: resps,
|
||||
hep2: resps,
|
||||
hep3: resps,
|
||||
},
|
||||
}
|
||||
rec.httpClient = &httpCl
|
||||
expectReconciled(t, rec, "operator-ns", pod.Name)
|
||||
|
||||
// Pod should not have readiness gate condition set.
|
||||
podSetReady(pod, cl)
|
||||
expectEqual(t, fc, pod)
|
||||
mustDeleteAll(t, fc, pod, svc, svc2, svc3)
|
||||
})
|
||||
t.Run("one_svc_many_backends_eventually_routable_and_healthy", func(t *testing.T) {
|
||||
pod := podTemplate.DeepCopy()
|
||||
svc, hep := newSvc("svc", 9002)
|
||||
svc2, hep2 := newSvc("svc-2", 9002)
|
||||
svc3, hep3 := newSvc("svc-3", 9002)
|
||||
mustCreateAll(t, fc, svc, svc2, svc3, pod)
|
||||
resps := append(readyResps("10.0.0.3", 7), readyResps(podIP, 1)...)
|
||||
resps2 := append(readyResps("10.0.0.3", 5), readyResps(podIP, 1)...)
|
||||
resps3 := append(unreadyResps(podIP, 4), readyResps(podIP, 1)...)
|
||||
httpCl := fakeHTTPClient{
|
||||
t: t,
|
||||
state: map[string][]fakeResponse{
|
||||
hep: resps,
|
||||
hep2: resps2,
|
||||
hep3: resps3,
|
||||
},
|
||||
}
|
||||
rec.httpClient = &httpCl
|
||||
expectReconciled(t, rec, "operator-ns", pod.Name)
|
||||
|
||||
// Pod should have readiness gate condition set.
|
||||
podSetReady(pod, cl)
|
||||
expectEqual(t, fc, pod)
|
||||
mustDeleteAll(t, fc, pod, svc, svc2, svc3)
|
||||
})
|
||||
t.Run("one_svc_many_backends_never_routable_and_healthy", func(t *testing.T) {
|
||||
pod := podTemplate.DeepCopy()
|
||||
|
||||
svc, hep := newSvc("svc", 9002)
|
||||
svc2, hep2 := newSvc("svc-2", 9002)
|
||||
svc3, hep3 := newSvc("svc-3", 9002)
|
||||
mustCreateAll(t, fc, svc, svc2, svc3, pod)
|
||||
// For a ProxyGroup with 3 replicas, each Service's health endpoint will be tried 9 times and the Pod
|
||||
// will be requeued if neither succeeds.
|
||||
resps := readyResps("10.0.0.3", 9)
|
||||
resps2 := append(readyResps("10.0.0.3", 5), readyResps("10.0.0.4", 4)...)
|
||||
resps3 := unreadyResps(podIP, 9)
|
||||
httpCl := fakeHTTPClient{
|
||||
t: t,
|
||||
state: map[string][]fakeResponse{
|
||||
hep: resps,
|
||||
hep2: resps2,
|
||||
hep3: resps3,
|
||||
},
|
||||
}
|
||||
rec.httpClient = &httpCl
|
||||
expectRequeue(t, rec, "operator-ns", pod.Name)
|
||||
|
||||
// Pod should not have readiness gate condition set.
|
||||
expectEqual(t, fc, pod)
|
||||
mustDeleteAll(t, fc, pod, svc, svc2, svc3)
|
||||
})
|
||||
t.Run("one_svc_many_backends_one_never_routable", func(t *testing.T) {
|
||||
pod := podTemplate.DeepCopy()
|
||||
|
||||
svc, hep := newSvc("svc", 9002)
|
||||
svc2, hep2 := newSvc("svc-2", 9002)
|
||||
svc3, hep3 := newSvc("svc-3", 9002)
|
||||
mustCreateAll(t, fc, svc, svc2, svc3, pod)
|
||||
// For a ProxyGroup with 3 replicas, each Service's health endpoint will be tried 9 times and the Pod
|
||||
// will be requeued if any one never succeeds.
|
||||
resps := readyResps(podIP, 9)
|
||||
resps2 := readyResps(podIP, 9)
|
||||
resps3 := append(readyResps("10.0.0.3", 5), readyResps("10.0.0.4", 4)...)
|
||||
httpCl := fakeHTTPClient{
|
||||
t: t,
|
||||
state: map[string][]fakeResponse{
|
||||
hep: resps,
|
||||
hep2: resps2,
|
||||
hep3: resps3,
|
||||
},
|
||||
}
|
||||
rec.httpClient = &httpCl
|
||||
expectRequeue(t, rec, "operator-ns", pod.Name)
|
||||
|
||||
// Pod should not have readiness gate condition set.
|
||||
expectEqual(t, fc, pod)
|
||||
mustDeleteAll(t, fc, pod, svc, svc2, svc3)
|
||||
})
|
||||
t.Run("one_svc_many_backends_one_never_healthy", func(t *testing.T) {
|
||||
pod := podTemplate.DeepCopy()
|
||||
|
||||
svc, hep := newSvc("svc", 9002)
|
||||
svc2, hep2 := newSvc("svc-2", 9002)
|
||||
svc3, hep3 := newSvc("svc-3", 9002)
|
||||
mustCreateAll(t, fc, svc, svc2, svc3, pod)
|
||||
// For a ProxyGroup with 3 replicas, each Service's health endpoint will be tried 9 times and the Pod
|
||||
// will be requeued if any one never succeeds.
|
||||
resps := readyResps(podIP, 9)
|
||||
resps2 := unreadyResps(podIP, 9)
|
||||
resps3 := readyResps(podIP, 9)
|
||||
httpCl := fakeHTTPClient{
|
||||
t: t,
|
||||
state: map[string][]fakeResponse{
|
||||
hep: resps,
|
||||
hep2: resps2,
|
||||
hep3: resps3,
|
||||
},
|
||||
}
|
||||
rec.httpClient = &httpCl
|
||||
expectRequeue(t, rec, "operator-ns", pod.Name)
|
||||
|
||||
// Pod should not have readiness gate condition set.
|
||||
expectEqual(t, fc, pod)
|
||||
mustDeleteAll(t, fc, pod, svc, svc2, svc3)
|
||||
})
|
||||
t.Run("one_svc_many_backends_different_ports_eventually_healthy_and_routable", func(t *testing.T) {
|
||||
pod := podTemplate.DeepCopy()
|
||||
|
||||
svc, hep := newSvc("svc", 9003)
|
||||
svc2, hep2 := newSvc("svc-2", 9004)
|
||||
svc3, hep3 := newSvc("svc-3", 9010)
|
||||
mustCreateAll(t, fc, svc, svc2, svc3, pod)
|
||||
// For a ProxyGroup with 3 replicas, each Service's health endpoint will be tried up to 9 times and
|
||||
// marked as success as soon as one try succeeds.
|
||||
resps := append(readyResps("10.0.0.3", 7), readyResps(podIP, 1)...)
|
||||
resps2 := append(readyResps("10.0.0.3", 5), readyResps(podIP, 1)...)
|
||||
resps3 := append(unreadyResps(podIP, 4), readyResps(podIP, 1)...)
|
||||
httpCl := fakeHTTPClient{
|
||||
t: t,
|
||||
state: map[string][]fakeResponse{
|
||||
hep: resps,
|
||||
hep2: resps2,
|
||||
hep3: resps3,
|
||||
},
|
||||
}
|
||||
rec.httpClient = &httpCl
|
||||
expectReconciled(t, rec, "operator-ns", pod.Name)
|
||||
|
||||
// Pod should have readiness gate condition set.
|
||||
podSetReady(pod, cl)
|
||||
expectEqual(t, fc, pod)
|
||||
mustDeleteAll(t, fc, pod, svc, svc2, svc3)
|
||||
})
|
||||
// Proxies of 1.78 and earlier did not set the Pod IP header.
|
||||
t.Run("pod_does_not_return_ip_header", func(t *testing.T) {
|
||||
pod := podTemplate.DeepCopy()
|
||||
pod.Name = "foo-bar"
|
||||
|
||||
svc, hep := newSvc("foo-bar", 9002)
|
||||
mustCreateAll(t, fc, svc, pod)
|
||||
// If a response does not contain Pod IP header, we assume that this is an earlier proxy version,
|
||||
// readiness cannot be verified so the readiness gate is just set to true.
|
||||
resps := unreadyResps("", 1)
|
||||
httpCl := fakeHTTPClient{
|
||||
t: t,
|
||||
state: map[string][]fakeResponse{
|
||||
hep: resps,
|
||||
},
|
||||
}
|
||||
rec.httpClient = &httpCl
|
||||
expectReconciled(t, rec, "operator-ns", pod.Name)
|
||||
|
||||
// Pod should have readiness gate condition set.
|
||||
podSetReady(pod, cl)
|
||||
expectEqual(t, fc, pod)
|
||||
mustDeleteAll(t, fc, pod, svc)
|
||||
})
|
||||
t.Run("one_svc_one_backend_eventually_healthy_and_routable", func(t *testing.T) {
|
||||
pod := podTemplate.DeepCopy()
|
||||
|
||||
svc, hep := newSvc("svc", 9002)
|
||||
mustCreateAll(t, fc, svc, pod)
|
||||
// If a response errors, it is probably because the Pod is not yet properly running, so retry.
|
||||
resps := append(erroredResps(8), readyResps(podIP, 1)...)
|
||||
httpCl := fakeHTTPClient{
|
||||
t: t,
|
||||
state: map[string][]fakeResponse{
|
||||
hep: resps,
|
||||
},
|
||||
}
|
||||
rec.httpClient = &httpCl
|
||||
expectReconciled(t, rec, "operator-ns", pod.Name)
|
||||
|
||||
// Pod should have readiness gate condition set.
|
||||
podSetReady(pod, cl)
|
||||
expectEqual(t, fc, pod)
|
||||
mustDeleteAll(t, fc, pod, svc)
|
||||
})
|
||||
t.Run("one_svc_one_backend_svc_does_not_have_health_port", func(t *testing.T) {
|
||||
pod := podTemplate.DeepCopy()
|
||||
|
||||
// If a Service does not have health port set, we assume that it is not possible to determine Pod's
|
||||
// readiness and set it to ready.
|
||||
svc, _ := newSvc("svc", -1)
|
||||
mustCreateAll(t, fc, svc, pod)
|
||||
rec.httpClient = nil
|
||||
expectReconciled(t, rec, "operator-ns", pod.Name)
|
||||
|
||||
// Pod should have readiness gate condition set.
|
||||
podSetReady(pod, cl)
|
||||
expectEqual(t, fc, pod)
|
||||
mustDeleteAll(t, fc, pod, svc)
|
||||
})
|
||||
t.Run("error_setting_up_healthcheck", func(t *testing.T) {
|
||||
pod := podTemplate.DeepCopy()
|
||||
// This is not a realistic reason for error, but we are just testing the behaviour of a healthcheck
|
||||
// lookup failing.
|
||||
pod.Status.PodIPs = []corev1.PodIP{{IP: "not-an-ip"}}
|
||||
|
||||
svc, _ := newSvc("svc", 9002)
|
||||
svc2, _ := newSvc("svc-2", 9002)
|
||||
svc3, _ := newSvc("svc-3", 9002)
|
||||
mustCreateAll(t, fc, svc, svc2, svc3, pod)
|
||||
rec.httpClient = nil
|
||||
expectError(t, rec, "operator-ns", pod.Name)
|
||||
|
||||
// Pod should not have readiness gate condition set.
|
||||
expectEqual(t, fc, pod)
|
||||
mustDeleteAll(t, fc, pod, svc, svc2, svc3)
|
||||
})
|
||||
t.Run("pod_does_not_have_an_ip_address", func(t *testing.T) {
|
||||
pod := podTemplate.DeepCopy()
|
||||
pod.Status.PodIPs = nil
|
||||
|
||||
svc, _ := newSvc("svc", 9002)
|
||||
svc2, _ := newSvc("svc-2", 9002)
|
||||
svc3, _ := newSvc("svc-3", 9002)
|
||||
mustCreateAll(t, fc, svc, svc2, svc3, pod)
|
||||
rec.httpClient = nil
|
||||
expectRequeue(t, rec, "operator-ns", pod.Name)
|
||||
|
||||
// Pod should not have readiness gate condition set.
|
||||
expectEqual(t, fc, pod)
|
||||
mustDeleteAll(t, fc, pod, svc, svc2, svc3)
|
||||
})
|
||||
}
|
||||
|
||||
func readyResps(ip string, num int) (resps []fakeResponse) {
|
||||
for range num {
|
||||
resps = append(resps, fakeResponse{statusCode: 200, podIP: ip})
|
||||
}
|
||||
return resps
|
||||
}
|
||||
|
||||
func unreadyResps(ip string, num int) (resps []fakeResponse) {
|
||||
for range num {
|
||||
resps = append(resps, fakeResponse{statusCode: 503, podIP: ip})
|
||||
}
|
||||
return resps
|
||||
}
|
||||
|
||||
func erroredResps(num int) (resps []fakeResponse) {
|
||||
for range num {
|
||||
resps = append(resps, fakeResponse{err: errors.New("timeout")})
|
||||
}
|
||||
return resps
|
||||
}
|
||||
|
||||
func newSvc(name string, port int32) (*corev1.Service, string) {
|
||||
svc := &corev1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "operator-ns",
|
||||
Name: name,
|
||||
Labels: map[string]string{
|
||||
LabelManaged: "true",
|
||||
labelProxyGroup: "dev",
|
||||
labelSvcType: typeEgress,
|
||||
},
|
||||
},
|
||||
Spec: corev1.ServiceSpec{},
|
||||
}
|
||||
if port != -1 {
|
||||
svc.Spec.Ports = []corev1.ServicePort{
|
||||
{
|
||||
Name: tsHealthCheckPortName,
|
||||
Port: port,
|
||||
TargetPort: intstr.FromInt(9002),
|
||||
Protocol: "TCP",
|
||||
},
|
||||
}
|
||||
}
|
||||
return svc, fmt.Sprintf("http://%s.operator-ns.svc.cluster.local:%d/healthz", name, port)
|
||||
}
|
||||
|
||||
func podSetReady(pod *corev1.Pod, cl *tstest.Clock) {
|
||||
pod.Status.Conditions = append(pod.Status.Conditions, corev1.PodCondition{
|
||||
Type: tsEgressReadinessGate,
|
||||
Status: corev1.ConditionTrue,
|
||||
LastTransitionTime: metav1.Time{Time: cl.Now().Truncate(time.Second)},
|
||||
})
|
||||
}
|
||||
|
||||
// fakeHTTPClient is a mock HTTP client with a preset map of request URLs to list of responses. When it receives a
|
||||
// request for a specific URL, it returns the preset response for that URL. It errors if an unexpected request is
|
||||
// received.
|
||||
type fakeHTTPClient struct {
|
||||
t *testing.T
|
||||
mu sync.Mutex // protects following
|
||||
state map[string][]fakeResponse
|
||||
}
|
||||
|
||||
func (f *fakeHTTPClient) Do(req *http.Request) (*http.Response, error) {
|
||||
f.mu.Lock()
|
||||
resps := f.state[req.URL.String()]
|
||||
if len(resps) == 0 {
|
||||
f.mu.Unlock()
|
||||
log.Printf("\n\n\nURL %q\n\n\n", req.URL)
|
||||
f.t.Fatalf("fakeHTTPClient received an unexpected request for %q", req.URL)
|
||||
}
|
||||
defer func() {
|
||||
if len(resps) == 1 {
|
||||
delete(f.state, req.URL.String())
|
||||
f.mu.Unlock()
|
||||
return
|
||||
}
|
||||
f.state[req.URL.String()] = f.state[req.URL.String()][1:]
|
||||
f.mu.Unlock()
|
||||
}()
|
||||
|
||||
resp := resps[0]
|
||||
if resp.err != nil {
|
||||
return nil, resp.err
|
||||
}
|
||||
r := http.Response{
|
||||
StatusCode: resp.statusCode,
|
||||
Header: make(http.Header),
|
||||
Body: io.NopCloser(bytes.NewReader([]byte{})),
|
||||
}
|
||||
r.Header.Add(kubetypes.PodIPv4Header, resp.podIP)
|
||||
return &r, nil
|
||||
}
|
||||
|
||||
type fakeResponse struct {
|
||||
err error
|
||||
statusCode int
|
||||
podIP string // for the Pod IP header
|
||||
}
|
||||
@@ -48,11 +48,12 @@ type egressSvcsReadinessReconciler struct {
|
||||
// service to determine how many replicas are currently able to route traffic.
|
||||
func (esrr *egressSvcsReadinessReconciler) Reconcile(ctx context.Context, req reconcile.Request) (res reconcile.Result, err error) {
|
||||
l := esrr.logger.With("Service", req.NamespacedName)
|
||||
defer l.Info("reconcile finished")
|
||||
l.Debugf("starting reconcile")
|
||||
defer l.Debugf("reconcile finished")
|
||||
|
||||
svc := new(corev1.Service)
|
||||
if err = esrr.Get(ctx, req.NamespacedName, svc); apierrors.IsNotFound(err) {
|
||||
l.Info("Service not found")
|
||||
l.Debugf("Service not found")
|
||||
return res, nil
|
||||
} else if err != nil {
|
||||
return res, fmt.Errorf("failed to get Service: %w", err)
|
||||
@@ -127,16 +128,16 @@ func (esrr *egressSvcsReadinessReconciler) Reconcile(ctx context.Context, req re
|
||||
return res, err
|
||||
}
|
||||
if pod == nil {
|
||||
l.Infof("[unexpected] ProxyGroup is ready, but replica %d was not found", i)
|
||||
l.Warnf("[unexpected] ProxyGroup is ready, but replica %d was not found", i)
|
||||
reason, msg = reasonClusterResourcesNotReady, reasonClusterResourcesNotReady
|
||||
return res, nil
|
||||
}
|
||||
l.Infof("looking at Pod with IPs %v", pod.Status.PodIPs)
|
||||
l.Debugf("looking at Pod with IPs %v", pod.Status.PodIPs)
|
||||
ready := false
|
||||
for _, ep := range eps.Endpoints {
|
||||
l.Infof("looking at endpoint with addresses %v", ep.Addresses)
|
||||
l.Debugf("looking at endpoint with addresses %v", ep.Addresses)
|
||||
if endpointReadyForPod(&ep, pod, l) {
|
||||
l.Infof("endpoint is ready for Pod")
|
||||
l.Debugf("endpoint is ready for Pod")
|
||||
ready = true
|
||||
break
|
||||
}
|
||||
@@ -165,7 +166,7 @@ func (esrr *egressSvcsReadinessReconciler) Reconcile(ctx context.Context, req re
|
||||
func endpointReadyForPod(ep *discoveryv1.Endpoint, pod *corev1.Pod, l *zap.SugaredLogger) bool {
|
||||
podIP, err := podIPv4(pod)
|
||||
if err != nil {
|
||||
l.Infof("[unexpected] error retrieving Pod's IPv4 address: %v", err)
|
||||
l.Warnf("[unexpected] error retrieving Pod's IPv4 address: %v", err)
|
||||
return false
|
||||
}
|
||||
// Currently we only ever set a single address on and Endpoint and nothing else is meant to modify this.
|
||||
|
||||
@@ -59,6 +59,8 @@ const (
|
||||
maxPorts = 1000
|
||||
|
||||
indexEgressProxyGroup = ".metadata.annotations.egress-proxy-group"
|
||||
|
||||
tsHealthCheckPortName = "tailscale-health-check"
|
||||
)
|
||||
|
||||
var gaugeEgressServices = clientmetric.NewGauge(kubetypes.MetricEgressServiceCount)
|
||||
@@ -229,15 +231,16 @@ func (esr *egressSvcsReconciler) provision(ctx context.Context, proxyGroupName s
|
||||
found := false
|
||||
for _, wantsPM := range svc.Spec.Ports {
|
||||
if wantsPM.Port == pm.Port && strings.EqualFold(string(wantsPM.Protocol), string(pm.Protocol)) {
|
||||
// We don't use the port name to distinguish this port internally, but Kubernetes
|
||||
// require that, for Service ports with more than one name each port is uniquely named.
|
||||
// So we can always pick the port name from the ExternalName Service as at this point we
|
||||
// know that those are valid names because Kuberentes already validated it once. Note
|
||||
// that users could have changed an unnamed port to a named port and might have changed
|
||||
// port names- this should still work.
|
||||
// We want to both preserve the user set port names for ease of debugging, but also
|
||||
// ensure that we name all unnamed ports as the ClusterIP Service that we create will
|
||||
// always have at least two ports.
|
||||
// https://kubernetes.io/docs/concepts/services-networking/service/#multi-port-services
|
||||
// See also https://github.com/tailscale/tailscale/issues/13406#issuecomment-2507230388
|
||||
clusterIPSvc.Spec.Ports[i].Name = wantsPM.Name
|
||||
if wantsPM.Name != "" {
|
||||
clusterIPSvc.Spec.Ports[i].Name = wantsPM.Name
|
||||
} else {
|
||||
clusterIPSvc.Spec.Ports[i].Name = "tailscale-unnamed"
|
||||
}
|
||||
found = true
|
||||
break
|
||||
}
|
||||
@@ -252,6 +255,12 @@ func (esr *egressSvcsReconciler) provision(ctx context.Context, proxyGroupName s
|
||||
// ClusterIP Service produce new target port and add a portmapping to
|
||||
// the ClusterIP Service.
|
||||
for _, wantsPM := range svc.Spec.Ports {
|
||||
// Because we add a healthcheck port of our own, we will always have at least two ports. That
|
||||
// means that we cannot have ports with name not set.
|
||||
// https://kubernetes.io/docs/concepts/services-networking/service/#multi-port-services
|
||||
if wantsPM.Name == "" {
|
||||
wantsPM.Name = "tailscale-unnamed"
|
||||
}
|
||||
found := false
|
||||
for _, gotPM := range clusterIPSvc.Spec.Ports {
|
||||
if wantsPM.Port == gotPM.Port && strings.EqualFold(string(wantsPM.Protocol), string(gotPM.Protocol)) {
|
||||
@@ -278,6 +287,25 @@ func (esr *egressSvcsReconciler) provision(ctx context.Context, proxyGroupName s
|
||||
})
|
||||
}
|
||||
}
|
||||
var healthCheckPort int32 = defaultLocalAddrPort
|
||||
|
||||
for {
|
||||
if !slices.ContainsFunc(svc.Spec.Ports, func(p corev1.ServicePort) bool {
|
||||
return p.Port == healthCheckPort
|
||||
}) {
|
||||
break
|
||||
}
|
||||
healthCheckPort++
|
||||
if healthCheckPort > 10002 {
|
||||
return nil, false, fmt.Errorf("unable to find a free port for internal health check in range [9002, 10002]")
|
||||
}
|
||||
}
|
||||
clusterIPSvc.Spec.Ports = append(clusterIPSvc.Spec.Ports, corev1.ServicePort{
|
||||
Name: tsHealthCheckPortName,
|
||||
Port: healthCheckPort,
|
||||
TargetPort: intstr.FromInt(defaultLocalAddrPort),
|
||||
Protocol: "TCP",
|
||||
})
|
||||
if !reflect.DeepEqual(clusterIPSvc, oldClusterIPSvc) {
|
||||
if clusterIPSvc, err = createOrUpdate(ctx, esr.Client, esr.tsNamespace, clusterIPSvc, func(svc *corev1.Service) {
|
||||
svc.Labels = clusterIPSvc.Labels
|
||||
@@ -320,7 +348,7 @@ func (esr *egressSvcsReconciler) provision(ctx context.Context, proxyGroupName s
|
||||
}
|
||||
tailnetSvc := tailnetSvcName(svc)
|
||||
gotCfg := (*cfgs)[tailnetSvc]
|
||||
wantsCfg := egressSvcCfg(svc, clusterIPSvc)
|
||||
wantsCfg := egressSvcCfg(svc, clusterIPSvc, esr.tsNamespace, l)
|
||||
if !reflect.DeepEqual(gotCfg, wantsCfg) {
|
||||
l.Debugf("updating egress services ConfigMap %s", cm.Name)
|
||||
mak.Set(cfgs, tailnetSvc, wantsCfg)
|
||||
@@ -504,10 +532,8 @@ func (esr *egressSvcsReconciler) validateClusterResources(ctx context.Context, s
|
||||
return false, nil
|
||||
}
|
||||
if !tsoperator.ProxyGroupIsReady(pg) {
|
||||
l.Infof("ProxyGroup %s is not ready, waiting...", proxyGroupName)
|
||||
tsoperator.SetServiceCondition(svc, tsapi.EgressSvcValid, metav1.ConditionUnknown, reasonProxyGroupNotReady, reasonProxyGroupNotReady, esr.clock, l)
|
||||
tsoperator.RemoveServiceCondition(svc, tsapi.EgressSvcConfigured)
|
||||
return false, nil
|
||||
}
|
||||
|
||||
l.Debugf("egress service is valid")
|
||||
@@ -515,6 +541,24 @@ func (esr *egressSvcsReconciler) validateClusterResources(ctx context.Context, s
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func egressSvcCfg(externalNameSvc, clusterIPSvc *corev1.Service, ns string, l *zap.SugaredLogger) egressservices.Config {
|
||||
d := retrieveClusterDomain(ns, l)
|
||||
tt := tailnetTargetFromSvc(externalNameSvc)
|
||||
hep := healthCheckForSvc(clusterIPSvc, d)
|
||||
cfg := egressservices.Config{
|
||||
TailnetTarget: tt,
|
||||
HealthCheckEndpoint: hep,
|
||||
}
|
||||
for _, svcPort := range clusterIPSvc.Spec.Ports {
|
||||
if svcPort.Name == tsHealthCheckPortName {
|
||||
continue // exclude healthcheck from egress svcs configs
|
||||
}
|
||||
pm := portMap(svcPort)
|
||||
mak.Set(&cfg.Ports, pm, struct{}{})
|
||||
}
|
||||
return cfg
|
||||
}
|
||||
|
||||
func validateEgressService(svc *corev1.Service, pg *tsapi.ProxyGroup) []string {
|
||||
violations := validateService(svc)
|
||||
|
||||
@@ -584,16 +628,6 @@ func tailnetTargetFromSvc(svc *corev1.Service) egressservices.TailnetTarget {
|
||||
}
|
||||
}
|
||||
|
||||
func egressSvcCfg(externalNameSvc, clusterIPSvc *corev1.Service) egressservices.Config {
|
||||
tt := tailnetTargetFromSvc(externalNameSvc)
|
||||
cfg := egressservices.Config{TailnetTarget: tt}
|
||||
for _, svcPort := range clusterIPSvc.Spec.Ports {
|
||||
pm := portMap(svcPort)
|
||||
mak.Set(&cfg.Ports, pm, struct{}{})
|
||||
}
|
||||
return cfg
|
||||
}
|
||||
|
||||
func portMap(p corev1.ServicePort) egressservices.PortMap {
|
||||
// TODO (irbekrm): out of bounds check?
|
||||
return egressservices.PortMap{Protocol: string(p.Protocol), MatchPort: uint16(p.TargetPort.IntVal), TargetPort: uint16(p.Port)}
|
||||
@@ -618,7 +652,11 @@ func egressSvcsConfigs(ctx context.Context, cl client.Client, proxyGroupName, ts
|
||||
Namespace: tsNamespace,
|
||||
},
|
||||
}
|
||||
if err := cl.Get(ctx, client.ObjectKeyFromObject(cm), cm); err != nil {
|
||||
err = cl.Get(ctx, client.ObjectKeyFromObject(cm), cm)
|
||||
if apierrors.IsNotFound(err) { // ProxyGroup resources have not been created (yet)
|
||||
return nil, nil, nil
|
||||
}
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("error retrieving egress services ConfigMap %s: %v", name, err)
|
||||
}
|
||||
cfgs = &egressservices.Configs{}
|
||||
@@ -740,3 +778,17 @@ func (esr *egressSvcsReconciler) updateSvcSpec(ctx context.Context, svc *corev1.
|
||||
svc.Status = *st
|
||||
return err
|
||||
}
|
||||
|
||||
// healthCheckForSvc return the URL of the containerboot's health check endpoint served by this Service or empty string.
|
||||
func healthCheckForSvc(svc *corev1.Service, clusterDomain string) string {
|
||||
// This version of the operator always sets health check port on the egress Services. However, it is possible
|
||||
// that this reconcile loops runs during a proxy upgrade from a version that did not set the health check port
|
||||
// and parses a Service that does not have the port set yet.
|
||||
i := slices.IndexFunc(svc.Spec.Ports, func(port corev1.ServicePort) bool {
|
||||
return port.Name == tsHealthCheckPortName
|
||||
})
|
||||
if i == -1 {
|
||||
return ""
|
||||
}
|
||||
return fmt.Sprintf("http://%s.%s.svc.%s:%d/healthz", svc.Name, svc.Namespace, clusterDomain, svc.Spec.Ports[i].Port)
|
||||
}
|
||||
|
||||
@@ -18,6 +18,7 @@ import (
|
||||
discoveryv1 "k8s.io/api/discovery/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client/fake"
|
||||
tsapi "tailscale.com/k8s-operator/apis/v1alpha1"
|
||||
@@ -78,42 +79,16 @@ func TestTailscaleEgressServices(t *testing.T) {
|
||||
Selector: nil,
|
||||
Ports: []corev1.ServicePort{
|
||||
{
|
||||
Name: "http",
|
||||
Protocol: "TCP",
|
||||
Port: 80,
|
||||
},
|
||||
{
|
||||
Name: "https",
|
||||
Protocol: "TCP",
|
||||
Port: 443,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
t.Run("proxy_group_not_ready", func(t *testing.T) {
|
||||
t.Run("service_one_unnamed_port", func(t *testing.T) {
|
||||
mustCreate(t, fc, svc)
|
||||
expectReconciled(t, esr, "default", "test")
|
||||
// Service should have EgressSvcValid condition set to Unknown.
|
||||
svc.Status.Conditions = []metav1.Condition{condition(tsapi.EgressSvcValid, metav1.ConditionUnknown, reasonProxyGroupNotReady, reasonProxyGroupNotReady, clock)}
|
||||
expectEqual(t, fc, svc)
|
||||
})
|
||||
|
||||
t.Run("proxy_group_ready", func(t *testing.T) {
|
||||
mustUpdateStatus(t, fc, "", "foo", func(pg *tsapi.ProxyGroup) {
|
||||
pg.Status.Conditions = []metav1.Condition{
|
||||
condition(tsapi.ProxyGroupReady, metav1.ConditionTrue, "", "", clock),
|
||||
}
|
||||
})
|
||||
expectReconciled(t, esr, "default", "test")
|
||||
validateReadyService(t, fc, esr, svc, clock, zl, cm)
|
||||
})
|
||||
t.Run("service_retain_one_unnamed_port", func(t *testing.T) {
|
||||
svc.Spec.Ports = []corev1.ServicePort{{Protocol: "TCP", Port: 80}}
|
||||
mustUpdate(t, fc, "default", "test", func(s *corev1.Service) {
|
||||
s.Spec.Ports = svc.Spec.Ports
|
||||
})
|
||||
expectReconciled(t, esr, "default", "test")
|
||||
validateReadyService(t, fc, esr, svc, clock, zl, cm)
|
||||
})
|
||||
t.Run("service_add_two_named_ports", func(t *testing.T) {
|
||||
@@ -164,7 +139,7 @@ func validateReadyService(t *testing.T, fc client.WithWatch, esr *egressSvcsReco
|
||||
// Verify that an EndpointSlice has been created.
|
||||
expectEqual(t, fc, endpointSlice(name, svc, clusterSvc))
|
||||
// Verify that ConfigMap contains configuration for the new egress service.
|
||||
mustHaveConfigForSvc(t, fc, svc, clusterSvc, cm)
|
||||
mustHaveConfigForSvc(t, fc, svc, clusterSvc, cm, zl)
|
||||
r := svcConfiguredReason(svc, true, zl.Sugar())
|
||||
// Verify that the user-created ExternalName Service has Configured set to true and ExternalName pointing to the
|
||||
// CluterIP Service.
|
||||
@@ -203,6 +178,23 @@ func findGenNameForEgressSvcResources(t *testing.T, client client.Client, svc *c
|
||||
|
||||
func clusterIPSvc(name string, extNSvc *corev1.Service) *corev1.Service {
|
||||
labels := egressSvcChildResourceLabels(extNSvc)
|
||||
ports := make([]corev1.ServicePort, len(extNSvc.Spec.Ports))
|
||||
for i, port := range extNSvc.Spec.Ports {
|
||||
ports[i] = corev1.ServicePort{ // Copy the port to avoid modifying the original.
|
||||
Name: port.Name,
|
||||
Port: port.Port,
|
||||
Protocol: port.Protocol,
|
||||
}
|
||||
if port.Name == "" {
|
||||
ports[i].Name = "tailscale-unnamed"
|
||||
}
|
||||
}
|
||||
ports = append(ports, corev1.ServicePort{
|
||||
Name: "tailscale-health-check",
|
||||
Port: 9002,
|
||||
TargetPort: intstr.FromInt(9002),
|
||||
Protocol: "TCP",
|
||||
})
|
||||
return &corev1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
@@ -212,7 +204,7 @@ func clusterIPSvc(name string, extNSvc *corev1.Service) *corev1.Service {
|
||||
},
|
||||
Spec: corev1.ServiceSpec{
|
||||
Type: corev1.ServiceTypeClusterIP,
|
||||
Ports: extNSvc.Spec.Ports,
|
||||
Ports: ports,
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -257,9 +249,9 @@ func portsForEndpointSlice(svc *corev1.Service) []discoveryv1.EndpointPort {
|
||||
return ports
|
||||
}
|
||||
|
||||
func mustHaveConfigForSvc(t *testing.T, cl client.Client, extNSvc, clusterIPSvc *corev1.Service, cm *corev1.ConfigMap) {
|
||||
func mustHaveConfigForSvc(t *testing.T, cl client.Client, extNSvc, clusterIPSvc *corev1.Service, cm *corev1.ConfigMap, l *zap.Logger) {
|
||||
t.Helper()
|
||||
wantsCfg := egressSvcCfg(extNSvc, clusterIPSvc)
|
||||
wantsCfg := egressSvcCfg(extNSvc, clusterIPSvc, clusterIPSvc.Namespace, l.Sugar())
|
||||
if err := cl.Get(context.Background(), client.ObjectKeyFromObject(cm), cm); err != nil {
|
||||
t.Fatalf("Error retrieving ConfigMap: %v", err)
|
||||
}
|
||||
|
||||
@@ -44,6 +44,8 @@ const (
|
||||
VIPSvcOwnerRef = "tailscale.com/k8s-operator:owned-by:%s"
|
||||
// FinalizerNamePG is the finalizer used by the IngressPGReconciler
|
||||
FinalizerNamePG = "tailscale.com/ingress-pg-finalizer"
|
||||
|
||||
indexIngressProxyGroup = ".metadata.annotations.ingress-proxy-group"
|
||||
)
|
||||
|
||||
var gaugePGIngressResources = clientmetric.NewGauge(kubetypes.MetricIngressPGResourceCount)
|
||||
@@ -180,7 +182,8 @@ func (a *IngressPGReconciler) maybeProvision(ctx context.Context, hostname strin
|
||||
return fmt.Errorf("error determining DNS name base: %w", err)
|
||||
}
|
||||
dnsName := hostname + "." + tcd
|
||||
existingVIPSvc, err := a.tsClient.getVIPServiceByName(ctx, hostname)
|
||||
serviceName := tailcfg.ServiceName("svc:" + hostname)
|
||||
existingVIPSvc, err := a.tsClient.getVIPService(ctx, serviceName)
|
||||
// TODO(irbekrm): here and when creating the VIPService, verify if the error is not terminal (and therefore
|
||||
// should not be reconciled). For example, if the hostname is already a hostname of a Tailscale node, the GET
|
||||
// here will fail.
|
||||
@@ -222,7 +225,6 @@ func (a *IngressPGReconciler) maybeProvision(ctx context.Context, hostname strin
|
||||
},
|
||||
},
|
||||
}
|
||||
serviceName := tailcfg.ServiceName("svc:" + hostname)
|
||||
var gotCfg *ipn.ServiceConfig
|
||||
if cfg != nil && cfg.Services != nil {
|
||||
gotCfg = cfg.Services[serviceName]
|
||||
@@ -247,7 +249,7 @@ func (a *IngressPGReconciler) maybeProvision(ctx context.Context, hostname strin
|
||||
}
|
||||
|
||||
vipSvc := &VIPService{
|
||||
Name: hostname,
|
||||
Name: serviceName,
|
||||
Tags: tags,
|
||||
Ports: []string{"443"}, // always 443 for Ingress
|
||||
Comment: fmt.Sprintf(VIPSvcOwnerRef, ing.UID),
|
||||
@@ -257,7 +259,7 @@ func (a *IngressPGReconciler) maybeProvision(ctx context.Context, hostname strin
|
||||
}
|
||||
if existingVIPSvc == nil || !reflect.DeepEqual(vipSvc.Tags, existingVIPSvc.Tags) {
|
||||
logger.Infof("Ensuring VIPService %q exists and is up to date", hostname)
|
||||
if err := a.tsClient.createOrUpdateVIPServiceByName(ctx, vipSvc); err != nil {
|
||||
if err := a.tsClient.createOrUpdateVIPService(ctx, vipSvc); err != nil {
|
||||
logger.Infof("error creating VIPService: %v", err)
|
||||
return fmt.Errorf("error creating VIPService: %w", err)
|
||||
}
|
||||
@@ -305,39 +307,39 @@ func (a *IngressPGReconciler) maybeCleanupProxyGroup(ctx context.Context, proxyG
|
||||
}
|
||||
serveConfigChanged := false
|
||||
// For each VIPService in serve config...
|
||||
for vipHostname := range cfg.Services {
|
||||
for vipServiceName := range cfg.Services {
|
||||
// ...check if there is currently an Ingress with this hostname
|
||||
found := false
|
||||
for _, i := range ingList.Items {
|
||||
ingressHostname := hostnameForIngress(&i)
|
||||
if ingressHostname == vipHostname.WithoutPrefix() {
|
||||
if ingressHostname == vipServiceName.WithoutPrefix() {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !found {
|
||||
logger.Infof("VIPService %q is not owned by any Ingress, cleaning up", vipHostname)
|
||||
svc, err := a.getVIPService(ctx, vipHostname.WithoutPrefix(), logger)
|
||||
logger.Infof("VIPService %q is not owned by any Ingress, cleaning up", vipServiceName)
|
||||
svc, err := a.getVIPService(ctx, vipServiceName, logger)
|
||||
if err != nil {
|
||||
errResp := &tailscale.ErrResponse{}
|
||||
if errors.As(err, &errResp) && errResp.Status == http.StatusNotFound {
|
||||
delete(cfg.Services, vipHostname)
|
||||
delete(cfg.Services, vipServiceName)
|
||||
serveConfigChanged = true
|
||||
continue
|
||||
}
|
||||
return err
|
||||
}
|
||||
if isVIPServiceForAnyIngress(svc) {
|
||||
logger.Infof("cleaning up orphaned VIPService %q", vipHostname)
|
||||
if err := a.tsClient.deleteVIPServiceByName(ctx, vipHostname.WithoutPrefix()); err != nil {
|
||||
logger.Infof("cleaning up orphaned VIPService %q", vipServiceName)
|
||||
if err := a.tsClient.deleteVIPService(ctx, vipServiceName); err != nil {
|
||||
errResp := &tailscale.ErrResponse{}
|
||||
if !errors.As(err, &errResp) || errResp.Status != http.StatusNotFound {
|
||||
return fmt.Errorf("deleting VIPService %q: %w", vipHostname, err)
|
||||
return fmt.Errorf("deleting VIPService %q: %w", vipServiceName, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
delete(cfg.Services, vipHostname)
|
||||
delete(cfg.Services, vipServiceName)
|
||||
serveConfigChanged = true
|
||||
}
|
||||
}
|
||||
@@ -386,7 +388,7 @@ func (a *IngressPGReconciler) maybeCleanup(ctx context.Context, hostname string,
|
||||
logger.Infof("Ensuring that VIPService %q configuration is cleaned up", hostname)
|
||||
|
||||
// 2. Delete the VIPService.
|
||||
if err := a.deleteVIPServiceIfExists(ctx, hostname, ing, logger); err != nil {
|
||||
if err := a.deleteVIPServiceIfExists(ctx, serviceName, ing, logger); err != nil {
|
||||
return fmt.Errorf("error deleting VIPService: %w", err)
|
||||
}
|
||||
|
||||
@@ -478,13 +480,13 @@ func (a *IngressPGReconciler) shouldExpose(ing *networkingv1.Ingress) bool {
|
||||
return isTSIngress && pgAnnot != ""
|
||||
}
|
||||
|
||||
func (a *IngressPGReconciler) getVIPService(ctx context.Context, hostname string, logger *zap.SugaredLogger) (*VIPService, error) {
|
||||
svc, err := a.tsClient.getVIPServiceByName(ctx, hostname)
|
||||
func (a *IngressPGReconciler) getVIPService(ctx context.Context, name tailcfg.ServiceName, logger *zap.SugaredLogger) (*VIPService, error) {
|
||||
svc, err := a.tsClient.getVIPService(ctx, name)
|
||||
if err != nil {
|
||||
errResp := &tailscale.ErrResponse{}
|
||||
if ok := errors.As(err, errResp); ok && errResp.Status != http.StatusNotFound {
|
||||
logger.Infof("error getting VIPService %q: %v", hostname, err)
|
||||
return nil, fmt.Errorf("error getting VIPService %q: %w", hostname, err)
|
||||
logger.Infof("error getting VIPService %q: %v", name, err)
|
||||
return nil, fmt.Errorf("error getting VIPService %q: %w", name, err)
|
||||
}
|
||||
}
|
||||
return svc, nil
|
||||
@@ -550,7 +552,7 @@ func (a *IngressPGReconciler) validateIngress(ing *networkingv1.Ingress, pg *tsa
|
||||
}
|
||||
|
||||
// deleteVIPServiceIfExists attempts to delete the VIPService if it exists and is owned by the given Ingress.
|
||||
func (a *IngressPGReconciler) deleteVIPServiceIfExists(ctx context.Context, name string, ing *networkingv1.Ingress, logger *zap.SugaredLogger) error {
|
||||
func (a *IngressPGReconciler) deleteVIPServiceIfExists(ctx context.Context, name tailcfg.ServiceName, ing *networkingv1.Ingress, logger *zap.SugaredLogger) error {
|
||||
svc, err := a.getVIPService(ctx, name, logger)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error getting VIPService: %w", err)
|
||||
@@ -562,7 +564,7 @@ func (a *IngressPGReconciler) deleteVIPServiceIfExists(ctx context.Context, name
|
||||
}
|
||||
|
||||
logger.Infof("Deleting VIPService %q", name)
|
||||
if err = a.tsClient.deleteVIPServiceByName(ctx, name); err != nil {
|
||||
if err = a.tsClient.deleteVIPService(ctx, name); err != nil {
|
||||
return fmt.Errorf("error deleting VIPService: %w", err)
|
||||
}
|
||||
return nil
|
||||
|
||||
@@ -142,7 +142,7 @@ func TestIngressPGReconciler(t *testing.T) {
|
||||
}
|
||||
|
||||
// Verify VIPService uses default tags
|
||||
vipSvc, err := ft.getVIPServiceByName(context.Background(), "my-svc")
|
||||
vipSvc, err := ft.getVIPService(context.Background(), "svc:my-svc")
|
||||
if err != nil {
|
||||
t.Fatalf("getting VIPService: %v", err)
|
||||
}
|
||||
@@ -161,7 +161,7 @@ func TestIngressPGReconciler(t *testing.T) {
|
||||
expectReconciled(t, ingPGR, "default", "test-ingress")
|
||||
|
||||
// Verify VIPService uses custom tags
|
||||
vipSvc, err = ft.getVIPServiceByName(context.Background(), "my-svc")
|
||||
vipSvc, err = ft.getVIPService(context.Background(), "svc:my-svc")
|
||||
if err != nil {
|
||||
t.Fatalf("getting VIPService: %v", err)
|
||||
}
|
||||
|
||||
@@ -9,6 +9,7 @@ package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
"os"
|
||||
"regexp"
|
||||
"strconv"
|
||||
@@ -38,12 +39,13 @@ import (
|
||||
"sigs.k8s.io/controller-runtime/pkg/manager"
|
||||
"sigs.k8s.io/controller-runtime/pkg/manager/signals"
|
||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||
"tailscale.com/client/tailscale"
|
||||
remoteclient "tailscale.com/client/tailscale"
|
||||
"tailscale.com/hostinfo"
|
||||
"tailscale.com/ipn"
|
||||
"tailscale.com/ipn/store/kubestore"
|
||||
tsapi "tailscale.com/k8s-operator/apis/v1alpha1"
|
||||
"tailscale.com/kube/kubetypes"
|
||||
"tailscale.com/localclient/tailscale"
|
||||
"tailscale.com/tsnet"
|
||||
"tailscale.com/tstime"
|
||||
"tailscale.com/types/logger"
|
||||
@@ -188,9 +190,9 @@ waitOnline:
|
||||
if loginDone {
|
||||
break
|
||||
}
|
||||
caps := tailscale.KeyCapabilities{
|
||||
Devices: tailscale.KeyDeviceCapabilities{
|
||||
Create: tailscale.KeyDeviceCreateCapabilities{
|
||||
caps := remoteclient.KeyCapabilities{
|
||||
Devices: remoteclient.KeyDeviceCapabilities{
|
||||
Create: remoteclient.KeyDeviceCreateCapabilities{
|
||||
Reusable: false,
|
||||
Preauthorized: true,
|
||||
Tags: strings.Split(operatorTags, ","),
|
||||
@@ -334,11 +336,13 @@ func runReconcilers(opts reconcilerOpts) {
|
||||
if err != nil {
|
||||
startlog.Fatalf("could not get local client: %v", err)
|
||||
}
|
||||
ingressProxyGroupFilter := handler.EnqueueRequestsFromMapFunc(ingressesFromIngressProxyGroup(mgr.GetClient(), opts.log))
|
||||
err = builder.
|
||||
ControllerManagedBy(mgr).
|
||||
For(&networkingv1.Ingress{}).
|
||||
Named("ingress-pg-reconciler").
|
||||
Watches(&corev1.Service{}, handler.EnqueueRequestsFromMapFunc(serviceHandlerForIngressPG(mgr.GetClient(), startlog))).
|
||||
Watches(&tsapi.ProxyGroup{}, ingressProxyGroupFilter).
|
||||
Complete(&IngressPGReconciler{
|
||||
recorder: eventRecorder,
|
||||
tsClient: opts.tsClient,
|
||||
@@ -352,6 +356,9 @@ func runReconcilers(opts reconcilerOpts) {
|
||||
if err != nil {
|
||||
startlog.Fatalf("could not create ingress-pg-reconciler: %v", err)
|
||||
}
|
||||
if err := mgr.GetFieldIndexer().IndexField(context.Background(), new(networkingv1.Ingress), indexIngressProxyGroup, indexPGIngresses); err != nil {
|
||||
startlog.Fatalf("failed setting up indexer for HA Ingresses: %v", err)
|
||||
}
|
||||
|
||||
connectorFilter := handler.EnqueueRequestsFromMapFunc(managedResourceHandlerForType("connector"))
|
||||
// If a ProxyClassChanges, enqueue all Connectors that have
|
||||
@@ -453,6 +460,24 @@ func runReconcilers(opts reconcilerOpts) {
|
||||
startlog.Fatalf("could not create egress EndpointSlices reconciler: %v", err)
|
||||
}
|
||||
|
||||
podsForEps := handler.EnqueueRequestsFromMapFunc(podsFromEgressEps(mgr.GetClient(), opts.log, opts.tailscaleNamespace))
|
||||
podsER := handler.EnqueueRequestsFromMapFunc(egressPodsHandler)
|
||||
err = builder.
|
||||
ControllerManagedBy(mgr).
|
||||
Named("egress-pods-readiness-reconciler").
|
||||
Watches(&discoveryv1.EndpointSlice{}, podsForEps).
|
||||
Watches(&corev1.Pod{}, podsER).
|
||||
Complete(&egressPodsReconciler{
|
||||
Client: mgr.GetClient(),
|
||||
tsNamespace: opts.tailscaleNamespace,
|
||||
clock: tstime.DefaultClock{},
|
||||
logger: opts.log.Named("egress-pods-readiness-reconciler"),
|
||||
httpClient: http.DefaultClient,
|
||||
})
|
||||
if err != nil {
|
||||
startlog.Fatalf("could not create egress Pods readiness reconciler: %v", err)
|
||||
}
|
||||
|
||||
// ProxyClass reconciler gets triggered on ServiceMonitor CRD changes to ensure that any ProxyClasses, that
|
||||
// define that a ServiceMonitor should be created, were set to invalid because the CRD did not exist get
|
||||
// reconciled if the CRD is applied at a later point.
|
||||
@@ -777,7 +802,7 @@ func proxyClassHandlerForConnector(cl client.Client, logger *zap.SugaredLogger)
|
||||
}
|
||||
}
|
||||
|
||||
// proxyClassHandlerForConnector returns a handler that, for a given ProxyClass,
|
||||
// proxyClassHandlerForProxyGroup returns a handler that, for a given ProxyClass,
|
||||
// returns a list of reconcile requests for all Connectors that have
|
||||
// .spec.proxyClass set.
|
||||
func proxyClassHandlerForProxyGroup(cl client.Client, logger *zap.SugaredLogger) handler.MapFunc {
|
||||
@@ -906,6 +931,20 @@ func egressEpsHandler(_ context.Context, o client.Object) []reconcile.Request {
|
||||
}
|
||||
}
|
||||
|
||||
func egressPodsHandler(_ context.Context, o client.Object) []reconcile.Request {
|
||||
if typ := o.GetLabels()[LabelParentType]; typ != proxyTypeProxyGroup {
|
||||
return nil
|
||||
}
|
||||
return []reconcile.Request{
|
||||
{
|
||||
NamespacedName: types.NamespacedName{
|
||||
Namespace: o.GetNamespace(),
|
||||
Name: o.GetName(),
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// egressEpsFromEgressPods returns a Pod event handler that checks if Pod is a replica for a ProxyGroup and if it is,
|
||||
// returns reconciler requests for all egress EndpointSlices for that ProxyGroup.
|
||||
func egressEpsFromPGPods(cl client.Client, ns string) handler.MapFunc {
|
||||
@@ -998,7 +1037,7 @@ func reconcileRequestsForPG(pg string, cl client.Client, ns string) []reconcile.
|
||||
// egressSvcsFromEgressProxyGroup is an event handler for egress ProxyGroups. It returns reconcile requests for all
|
||||
// user-created ExternalName Services that should be exposed on this ProxyGroup.
|
||||
func egressSvcsFromEgressProxyGroup(cl client.Client, logger *zap.SugaredLogger) handler.MapFunc {
|
||||
return func(_ context.Context, o client.Object) []reconcile.Request {
|
||||
return func(ctx context.Context, o client.Object) []reconcile.Request {
|
||||
pg, ok := o.(*tsapi.ProxyGroup)
|
||||
if !ok {
|
||||
logger.Infof("[unexpected] ProxyGroup handler triggered for an object that is not a ProxyGroup")
|
||||
@@ -1008,7 +1047,7 @@ func egressSvcsFromEgressProxyGroup(cl client.Client, logger *zap.SugaredLogger)
|
||||
return nil
|
||||
}
|
||||
svcList := &corev1.ServiceList{}
|
||||
if err := cl.List(context.Background(), svcList, client.MatchingFields{indexEgressProxyGroup: pg.Name}); err != nil {
|
||||
if err := cl.List(ctx, svcList, client.MatchingFields{indexEgressProxyGroup: pg.Name}); err != nil {
|
||||
logger.Infof("error listing Services: %v, skipping a reconcile for event on ProxyGroup %s", err, pg.Name)
|
||||
return nil
|
||||
}
|
||||
@@ -1025,10 +1064,40 @@ func egressSvcsFromEgressProxyGroup(cl client.Client, logger *zap.SugaredLogger)
|
||||
}
|
||||
}
|
||||
|
||||
// ingressesFromIngressProxyGroup is an event handler for ingress ProxyGroups. It returns reconcile requests for all
|
||||
// user-created Ingresses that should be exposed on this ProxyGroup.
|
||||
func ingressesFromIngressProxyGroup(cl client.Client, logger *zap.SugaredLogger) handler.MapFunc {
|
||||
return func(ctx context.Context, o client.Object) []reconcile.Request {
|
||||
pg, ok := o.(*tsapi.ProxyGroup)
|
||||
if !ok {
|
||||
logger.Infof("[unexpected] ProxyGroup handler triggered for an object that is not a ProxyGroup")
|
||||
return nil
|
||||
}
|
||||
if pg.Spec.Type != tsapi.ProxyGroupTypeIngress {
|
||||
return nil
|
||||
}
|
||||
ingList := &networkingv1.IngressList{}
|
||||
if err := cl.List(ctx, ingList, client.MatchingFields{indexIngressProxyGroup: pg.Name}); err != nil {
|
||||
logger.Infof("error listing Ingresses: %v, skipping a reconcile for event on ProxyGroup %s", err, pg.Name)
|
||||
return nil
|
||||
}
|
||||
reqs := make([]reconcile.Request, 0)
|
||||
for _, svc := range ingList.Items {
|
||||
reqs = append(reqs, reconcile.Request{
|
||||
NamespacedName: types.NamespacedName{
|
||||
Namespace: svc.Namespace,
|
||||
Name: svc.Name,
|
||||
},
|
||||
})
|
||||
}
|
||||
return reqs
|
||||
}
|
||||
}
|
||||
|
||||
// epsFromExternalNameService is an event handler for ExternalName Services that define a Tailscale egress service that
|
||||
// should be exposed on a ProxyGroup. It returns reconcile requests for EndpointSlices created for this Service.
|
||||
func epsFromExternalNameService(cl client.Client, logger *zap.SugaredLogger, ns string) handler.MapFunc {
|
||||
return func(_ context.Context, o client.Object) []reconcile.Request {
|
||||
return func(ctx context.Context, o client.Object) []reconcile.Request {
|
||||
svc, ok := o.(*corev1.Service)
|
||||
if !ok {
|
||||
logger.Infof("[unexpected] Service handler triggered for an object that is not a Service")
|
||||
@@ -1038,7 +1107,7 @@ func epsFromExternalNameService(cl client.Client, logger *zap.SugaredLogger, ns
|
||||
return nil
|
||||
}
|
||||
epsList := &discoveryv1.EndpointSliceList{}
|
||||
if err := cl.List(context.Background(), epsList, client.InNamespace(ns),
|
||||
if err := cl.List(ctx, epsList, client.InNamespace(ns),
|
||||
client.MatchingLabels(egressSvcChildResourceLabels(svc))); err != nil {
|
||||
logger.Infof("error listing EndpointSlices: %v, skipping a reconcile for event on Service %s", err, svc.Name)
|
||||
return nil
|
||||
@@ -1056,6 +1125,43 @@ func epsFromExternalNameService(cl client.Client, logger *zap.SugaredLogger, ns
|
||||
}
|
||||
}
|
||||
|
||||
func podsFromEgressEps(cl client.Client, logger *zap.SugaredLogger, ns string) handler.MapFunc {
|
||||
return func(ctx context.Context, o client.Object) []reconcile.Request {
|
||||
eps, ok := o.(*discoveryv1.EndpointSlice)
|
||||
if !ok {
|
||||
logger.Infof("[unexpected] EndpointSlice handler triggered for an object that is not a EndpointSlice")
|
||||
return nil
|
||||
}
|
||||
if eps.Labels[labelProxyGroup] == "" {
|
||||
return nil
|
||||
}
|
||||
if eps.Labels[labelSvcType] != "egress" {
|
||||
return nil
|
||||
}
|
||||
podLabels := map[string]string{
|
||||
LabelManaged: "true",
|
||||
LabelParentType: "proxygroup",
|
||||
LabelParentName: eps.Labels[labelProxyGroup],
|
||||
}
|
||||
podList := &corev1.PodList{}
|
||||
if err := cl.List(ctx, podList, client.InNamespace(ns),
|
||||
client.MatchingLabels(podLabels)); err != nil {
|
||||
logger.Infof("error listing EndpointSlices: %v, skipping a reconcile for event on EndpointSlice %s", err, eps.Name)
|
||||
return nil
|
||||
}
|
||||
reqs := make([]reconcile.Request, 0)
|
||||
for _, pod := range podList.Items {
|
||||
reqs = append(reqs, reconcile.Request{
|
||||
NamespacedName: types.NamespacedName{
|
||||
Namespace: pod.Namespace,
|
||||
Name: pod.Name,
|
||||
},
|
||||
})
|
||||
}
|
||||
return reqs
|
||||
}
|
||||
}
|
||||
|
||||
// proxyClassesWithServiceMonitor returns an event handler that, given that the event is for the Prometheus
|
||||
// ServiceMonitor CRD, returns all ProxyClasses that define that a ServiceMonitor should be created.
|
||||
func proxyClassesWithServiceMonitor(cl client.Client, logger *zap.SugaredLogger) handler.MapFunc {
|
||||
@@ -1108,6 +1214,15 @@ func indexEgressServices(o client.Object) []string {
|
||||
return []string{o.GetAnnotations()[AnnotationProxyGroup]}
|
||||
}
|
||||
|
||||
// indexPGIngresses adds a local index to a cached Tailscale Ingresses meant to be exposed on a ProxyGroup. The index is
|
||||
// used a list filter.
|
||||
func indexPGIngresses(o client.Object) []string {
|
||||
if !hasProxyGroupAnnotation(o) {
|
||||
return nil
|
||||
}
|
||||
return []string{o.GetAnnotations()[AnnotationProxyGroup]}
|
||||
}
|
||||
|
||||
// serviceHandlerForIngressPG returns a handler for Service events that ensures that if the Service
|
||||
// associated with an event is a backend Service for a tailscale Ingress with ProxyGroup annotation,
|
||||
// the associated Ingress gets reconciled.
|
||||
|
||||
@@ -1339,71 +1339,6 @@ func TestProxyFirewallMode(t *testing.T) {
|
||||
expectEqual(t, fc, expectedSTS(t, fc, o), removeHashAnnotation, removeResourceReqs)
|
||||
}
|
||||
|
||||
func TestTailscaledConfigfileHash(t *testing.T) {
|
||||
fc := fake.NewFakeClient()
|
||||
ft := &fakeTSClient{}
|
||||
zl, err := zap.NewDevelopment()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
clock := tstest.NewClock(tstest.ClockOpts{})
|
||||
sr := &ServiceReconciler{
|
||||
Client: fc,
|
||||
ssr: &tailscaleSTSReconciler{
|
||||
Client: fc,
|
||||
tsClient: ft,
|
||||
defaultTags: []string{"tag:k8s"},
|
||||
operatorNamespace: "operator-ns",
|
||||
proxyImage: "tailscale/tailscale",
|
||||
},
|
||||
logger: zl.Sugar(),
|
||||
clock: clock,
|
||||
isDefaultLoadBalancer: true,
|
||||
}
|
||||
|
||||
// Create a service that we should manage, and check that the initial round
|
||||
// of objects looks right.
|
||||
mustCreate(t, fc, &corev1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test",
|
||||
Namespace: "default",
|
||||
// The apiserver is supposed to set the UID, but the fake client
|
||||
// doesn't. So, set it explicitly because other code later depends
|
||||
// on it being set.
|
||||
UID: types.UID("1234-UID"),
|
||||
},
|
||||
Spec: corev1.ServiceSpec{
|
||||
ClusterIP: "10.20.30.40",
|
||||
Type: corev1.ServiceTypeLoadBalancer,
|
||||
},
|
||||
})
|
||||
|
||||
expectReconciled(t, sr, "default", "test")
|
||||
expectReconciled(t, sr, "default", "test")
|
||||
|
||||
fullName, shortName := findGenName(t, fc, "default", "test", "svc")
|
||||
o := configOpts{
|
||||
stsName: shortName,
|
||||
secretName: fullName,
|
||||
namespace: "default",
|
||||
parentType: "svc",
|
||||
hostname: "default-test",
|
||||
clusterTargetIP: "10.20.30.40",
|
||||
confFileHash: "848bff4b5ba83ac999e6984c8464e597156daba961ae045e7dbaef606d54ab5e",
|
||||
app: kubetypes.AppIngressProxy,
|
||||
}
|
||||
expectEqual(t, fc, expectedSTS(t, fc, o), removeResourceReqs)
|
||||
|
||||
// 2. Hostname gets changed, configfile is updated and a new hash value
|
||||
// is produced.
|
||||
mustUpdate(t, fc, "default", "test", func(svc *corev1.Service) {
|
||||
mak.Set(&svc.Annotations, AnnotationHostname, "another-test")
|
||||
})
|
||||
o.hostname = "another-test"
|
||||
o.confFileHash = "d4cc13f09f55f4f6775689004f9a466723325b84d2b590692796bfe22aeaa389"
|
||||
expectReconciled(t, sr, "default", "test")
|
||||
expectEqual(t, fc, expectedSTS(t, fc, o), removeResourceReqs)
|
||||
}
|
||||
func Test_isMagicDNSName(t *testing.T) {
|
||||
tests := []struct {
|
||||
in string
|
||||
|
||||
@@ -20,10 +20,10 @@ import (
|
||||
"go.uber.org/zap"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/transport"
|
||||
"tailscale.com/client/tailscale"
|
||||
"tailscale.com/client/tailscale/apitype"
|
||||
ksr "tailscale.com/k8s-operator/sessionrecording"
|
||||
"tailscale.com/kube/kubetypes"
|
||||
"tailscale.com/localclient/tailscale"
|
||||
"tailscale.com/localclient/tailscale/apitype"
|
||||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/tsnet"
|
||||
"tailscale.com/util/clientmetric"
|
||||
|
||||
@@ -13,7 +13,7 @@ import (
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"go.uber.org/zap"
|
||||
"tailscale.com/client/tailscale/apitype"
|
||||
"tailscale.com/localclient/tailscale/apitype"
|
||||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/util/must"
|
||||
)
|
||||
|
||||
@@ -32,6 +32,7 @@ import (
|
||||
"tailscale.com/ipn"
|
||||
tsoperator "tailscale.com/k8s-operator"
|
||||
tsapi "tailscale.com/k8s-operator/apis/v1alpha1"
|
||||
"tailscale.com/kube/egressservices"
|
||||
"tailscale.com/kube/kubetypes"
|
||||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/tstime"
|
||||
@@ -166,6 +167,7 @@ func (r *ProxyGroupReconciler) Reconcile(ctx context.Context, req reconcile.Requ
|
||||
r.recorder.Eventf(pg, corev1.EventTypeWarning, reasonProxyGroupCreationFailed, err.Error())
|
||||
return setStatusReady(pg, metav1.ConditionFalse, reasonProxyGroupCreationFailed, err.Error())
|
||||
}
|
||||
validateProxyClassForPG(logger, pg, proxyClass)
|
||||
if !tsoperator.ProxyClassIsReady(proxyClass) {
|
||||
message := fmt.Sprintf("the ProxyGroup's ProxyClass %s is not yet in a ready state, waiting...", proxyClassName)
|
||||
logger.Info(message)
|
||||
@@ -204,6 +206,31 @@ func (r *ProxyGroupReconciler) Reconcile(ctx context.Context, req reconcile.Requ
|
||||
return setStatusReady(pg, metav1.ConditionTrue, reasonProxyGroupReady, reasonProxyGroupReady)
|
||||
}
|
||||
|
||||
// validateProxyClassForPG applies custom validation logic for ProxyClass applied to ProxyGroup.
|
||||
func validateProxyClassForPG(logger *zap.SugaredLogger, pg *tsapi.ProxyGroup, pc *tsapi.ProxyClass) {
|
||||
if pg.Spec.Type == tsapi.ProxyGroupTypeIngress {
|
||||
return
|
||||
}
|
||||
// Our custom logic for ensuring minimum downtime ProxyGroup update rollouts relies on the local health check
|
||||
// beig accessible on the replica Pod IP:9002. This address can also be modified by users, via
|
||||
// TS_LOCAL_ADDR_PORT env var.
|
||||
//
|
||||
// Currently TS_LOCAL_ADDR_PORT controls Pod's health check and metrics address. _Probably_ there is no need for
|
||||
// users to set this to a custom value. Users who want to consume metrics, should integrate with the metrics
|
||||
// Service and/or ServiceMonitor, rather than Pods directly. The health check is likely not useful to integrate
|
||||
// directly with for operator proxies (and we should aim for unified lifecycle logic in the operator, users
|
||||
// shouldn't need to set their own).
|
||||
//
|
||||
// TODO(irbekrm): maybe disallow configuring this env var in future (in Tailscale 1.84 or later).
|
||||
if hasLocalAddrPortSet(pc) {
|
||||
msg := fmt.Sprintf("ProxyClass %s applied to an egress ProxyGroup has TS_LOCAL_ADDR_PORT env var set to a custom value."+
|
||||
"This will disable the ProxyGroup graceful failover mechanism, so you might experience downtime when ProxyGroup pods are restarted."+
|
||||
"In future we will remove the ability to set custom TS_LOCAL_ADDR_PORT for egress ProxyGroups."+
|
||||
"Please raise an issue if you expect that this will cause issues for your workflow.", pc.Name)
|
||||
logger.Warn(msg)
|
||||
}
|
||||
}
|
||||
|
||||
func (r *ProxyGroupReconciler) maybeProvision(ctx context.Context, pg *tsapi.ProxyGroup, proxyClass *tsapi.ProxyClass) error {
|
||||
logger := r.logger(pg.Name)
|
||||
r.mu.Lock()
|
||||
@@ -253,10 +280,11 @@ func (r *ProxyGroupReconciler) maybeProvision(ctx context.Context, pg *tsapi.Pro
|
||||
return fmt.Errorf("error provisioning RoleBinding: %w", err)
|
||||
}
|
||||
if pg.Spec.Type == tsapi.ProxyGroupTypeEgress {
|
||||
cm := pgEgressCM(pg, r.tsNamespace)
|
||||
cm, hp := pgEgressCM(pg, r.tsNamespace)
|
||||
if _, err := createOrUpdate(ctx, r.Client, r.tsNamespace, cm, func(existing *corev1.ConfigMap) {
|
||||
existing.ObjectMeta.Labels = cm.ObjectMeta.Labels
|
||||
existing.ObjectMeta.OwnerReferences = cm.ObjectMeta.OwnerReferences
|
||||
mak.Set(&existing.BinaryData, egressservices.KeyHEPPings, hp)
|
||||
}); err != nil {
|
||||
return fmt.Errorf("error provisioning egress ConfigMap %q: %w", cm.Name, err)
|
||||
}
|
||||
@@ -270,7 +298,7 @@ func (r *ProxyGroupReconciler) maybeProvision(ctx context.Context, pg *tsapi.Pro
|
||||
return fmt.Errorf("error provisioning ingress ConfigMap %q: %w", cm.Name, err)
|
||||
}
|
||||
}
|
||||
ss, err := pgStatefulSet(pg, r.tsNamespace, r.proxyImage, r.tsFirewallMode)
|
||||
ss, err := pgStatefulSet(pg, r.tsNamespace, r.proxyImage, r.tsFirewallMode, proxyClass)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error generating StatefulSet spec: %w", err)
|
||||
}
|
||||
|
||||
@@ -7,11 +7,14 @@ package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"slices"
|
||||
"strconv"
|
||||
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
rbacv1 "k8s.io/api/rbac/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
"sigs.k8s.io/yaml"
|
||||
tsapi "tailscale.com/k8s-operator/apis/v1alpha1"
|
||||
"tailscale.com/kube/egressservices"
|
||||
@@ -19,9 +22,12 @@ import (
|
||||
"tailscale.com/types/ptr"
|
||||
)
|
||||
|
||||
// deletionGracePeriodSeconds is set to 6 minutes to ensure that the pre-stop hook of these proxies have enough chance to terminate gracefully.
|
||||
const deletionGracePeriodSeconds int64 = 360
|
||||
|
||||
// Returns the base StatefulSet definition for a ProxyGroup. A ProxyClass may be
|
||||
// applied over the top after.
|
||||
func pgStatefulSet(pg *tsapi.ProxyGroup, namespace, image, tsFirewallMode string) (*appsv1.StatefulSet, error) {
|
||||
func pgStatefulSet(pg *tsapi.ProxyGroup, namespace, image, tsFirewallMode string, proxyClass *tsapi.ProxyClass) (*appsv1.StatefulSet, error) {
|
||||
ss := new(appsv1.StatefulSet)
|
||||
if err := yaml.Unmarshal(proxyYaml, &ss); err != nil {
|
||||
return nil, fmt.Errorf("failed to unmarshal proxy spec: %w", err)
|
||||
@@ -145,15 +151,25 @@ func pgStatefulSet(pg *tsapi.ProxyGroup, namespace, image, tsFirewallMode string
|
||||
}
|
||||
|
||||
if pg.Spec.Type == tsapi.ProxyGroupTypeEgress {
|
||||
envs = append(envs, corev1.EnvVar{
|
||||
Name: "TS_EGRESS_SERVICES_CONFIG_PATH",
|
||||
Value: fmt.Sprintf("/etc/proxies/%s", egressservices.KeyEgressServices),
|
||||
},
|
||||
envs = append(envs,
|
||||
// TODO(irbekrm): in 1.80 we deprecated TS_EGRESS_SERVICES_CONFIG_PATH in favour of
|
||||
// TS_EGRESS_PROXIES_CONFIG_PATH. Remove it in 1.84.
|
||||
corev1.EnvVar{
|
||||
Name: "TS_EGRESS_SERVICES_CONFIG_PATH",
|
||||
Value: fmt.Sprintf("/etc/proxies/%s", egressservices.KeyEgressServices),
|
||||
},
|
||||
corev1.EnvVar{
|
||||
Name: "TS_EGRESS_PROXIES_CONFIG_PATH",
|
||||
Value: "/etc/proxies",
|
||||
},
|
||||
corev1.EnvVar{
|
||||
Name: "TS_INTERNAL_APP",
|
||||
Value: kubetypes.AppProxyGroupEgress,
|
||||
},
|
||||
)
|
||||
corev1.EnvVar{
|
||||
Name: "TS_ENABLE_HEALTH_CHECK",
|
||||
Value: "true",
|
||||
})
|
||||
} else { // ingress
|
||||
envs = append(envs, corev1.EnvVar{
|
||||
Name: "TS_INTERNAL_APP",
|
||||
@@ -167,6 +183,25 @@ func pgStatefulSet(pg *tsapi.ProxyGroup, namespace, image, tsFirewallMode string
|
||||
return append(c.Env, envs...)
|
||||
}()
|
||||
|
||||
// The pre-stop hook is used to ensure that a replica does not get terminated while cluster traffic for egress
|
||||
// services is still being routed to it.
|
||||
//
|
||||
// This mechanism currently (2025-01-26) rely on the local health check being accessible on the Pod's
|
||||
// IP, so they are not supported for ProxyGroups where users have configured TS_LOCAL_ADDR_PORT to a custom
|
||||
// value.
|
||||
if pg.Spec.Type == tsapi.ProxyGroupTypeEgress && !hasLocalAddrPortSet(proxyClass) {
|
||||
c.Lifecycle = &corev1.Lifecycle{
|
||||
PreStop: &corev1.LifecycleHandler{
|
||||
HTTPGet: &corev1.HTTPGetAction{
|
||||
Path: kubetypes.EgessServicesPreshutdownEP,
|
||||
Port: intstr.FromInt(defaultLocalAddrPort),
|
||||
},
|
||||
},
|
||||
}
|
||||
// Set the deletion grace period to 6 minutes to ensure that the pre-stop hook has enough time to terminate
|
||||
// gracefully.
|
||||
ss.Spec.Template.DeletionGracePeriodSeconds = ptr.To(deletionGracePeriodSeconds)
|
||||
}
|
||||
return ss, nil
|
||||
}
|
||||
|
||||
@@ -258,7 +293,9 @@ func pgStateSecrets(pg *tsapi.ProxyGroup, namespace string) (secrets []*corev1.S
|
||||
return secrets
|
||||
}
|
||||
|
||||
func pgEgressCM(pg *tsapi.ProxyGroup, namespace string) *corev1.ConfigMap {
|
||||
func pgEgressCM(pg *tsapi.ProxyGroup, namespace string) (*corev1.ConfigMap, []byte) {
|
||||
hp := hepPings(pg)
|
||||
hpBs := []byte(strconv.Itoa(hp))
|
||||
return &corev1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: pgEgressCMName(pg.Name),
|
||||
@@ -266,8 +303,10 @@ func pgEgressCM(pg *tsapi.ProxyGroup, namespace string) *corev1.ConfigMap {
|
||||
Labels: pgLabels(pg.Name, nil),
|
||||
OwnerReferences: pgOwnerReference(pg),
|
||||
},
|
||||
}
|
||||
BinaryData: map[string][]byte{egressservices.KeyHEPPings: hpBs},
|
||||
}, hpBs
|
||||
}
|
||||
|
||||
func pgIngressCM(pg *tsapi.ProxyGroup, namespace string) *corev1.ConfigMap {
|
||||
return &corev1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
@@ -313,3 +352,23 @@ func pgReplicas(pg *tsapi.ProxyGroup) int32 {
|
||||
func pgEgressCMName(pg string) string {
|
||||
return fmt.Sprintf("%s-egress-config", pg)
|
||||
}
|
||||
|
||||
// hasLocalAddrPortSet returns true if the proxyclass has the TS_LOCAL_ADDR_PORT env var set. For egress ProxyGroups,
|
||||
// currently (2025-01-26) this means that the ProxyGroup does not support graceful failover.
|
||||
func hasLocalAddrPortSet(proxyClass *tsapi.ProxyClass) bool {
|
||||
if proxyClass == nil || proxyClass.Spec.StatefulSet == nil || proxyClass.Spec.StatefulSet.Pod == nil || proxyClass.Spec.StatefulSet.Pod.TailscaleContainer == nil {
|
||||
return false
|
||||
}
|
||||
return slices.ContainsFunc(proxyClass.Spec.StatefulSet.Pod.TailscaleContainer.Env, func(env tsapi.Env) bool {
|
||||
return env.Name == envVarTSLocalAddrPort
|
||||
})
|
||||
}
|
||||
|
||||
// hepPings returns the number of times a health check endpoint exposed by a Service fronting ProxyGroup replicas should
|
||||
// be pinged to ensure that all currently configured backend replicas are hit.
|
||||
func hepPings(pg *tsapi.ProxyGroup) int {
|
||||
rc := pgReplicas(pg)
|
||||
// Assuming a Service implemented using round robin load balancing, number-of-replica-times should be enough, but in
|
||||
// practice, we cannot assume that the requests will be load balanced perfectly.
|
||||
return int(rc) * 3
|
||||
}
|
||||
|
||||
@@ -19,13 +19,13 @@ import (
|
||||
rbacv1 "k8s.io/api/rbac/v1"
|
||||
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client/fake"
|
||||
"tailscale.com/client/tailscale"
|
||||
tsoperator "tailscale.com/k8s-operator"
|
||||
tsapi "tailscale.com/k8s-operator/apis/v1alpha1"
|
||||
"tailscale.com/kube/egressservices"
|
||||
"tailscale.com/kube/kubetypes"
|
||||
"tailscale.com/tstest"
|
||||
"tailscale.com/types/ptr"
|
||||
@@ -97,7 +97,7 @@ func TestProxyGroup(t *testing.T) {
|
||||
|
||||
tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupReady, metav1.ConditionFalse, reasonProxyGroupCreating, "the ProxyGroup's ProxyClass default-pc is not yet in a ready state, waiting...", 0, cl, zl.Sugar())
|
||||
expectEqual(t, fc, pg)
|
||||
expectProxyGroupResources(t, fc, pg, false, "")
|
||||
expectProxyGroupResources(t, fc, pg, false, "", pc)
|
||||
})
|
||||
|
||||
t.Run("observe_ProxyGroupCreating_status_reason", func(t *testing.T) {
|
||||
@@ -118,11 +118,11 @@ func TestProxyGroup(t *testing.T) {
|
||||
|
||||
tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupReady, metav1.ConditionFalse, reasonProxyGroupCreating, "0/2 ProxyGroup pods running", 0, cl, zl.Sugar())
|
||||
expectEqual(t, fc, pg)
|
||||
expectProxyGroupResources(t, fc, pg, true, "")
|
||||
expectProxyGroupResources(t, fc, pg, true, "", pc)
|
||||
if expected := 1; reconciler.egressProxyGroups.Len() != expected {
|
||||
t.Fatalf("expected %d egress ProxyGroups, got %d", expected, reconciler.egressProxyGroups.Len())
|
||||
}
|
||||
expectProxyGroupResources(t, fc, pg, true, "")
|
||||
expectProxyGroupResources(t, fc, pg, true, "", pc)
|
||||
keyReq := tailscale.KeyCapabilities{
|
||||
Devices: tailscale.KeyDeviceCapabilities{
|
||||
Create: tailscale.KeyDeviceCreateCapabilities{
|
||||
@@ -154,7 +154,7 @@ func TestProxyGroup(t *testing.T) {
|
||||
}
|
||||
tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupReady, metav1.ConditionTrue, reasonProxyGroupReady, reasonProxyGroupReady, 0, cl, zl.Sugar())
|
||||
expectEqual(t, fc, pg)
|
||||
expectProxyGroupResources(t, fc, pg, true, initialCfgHash)
|
||||
expectProxyGroupResources(t, fc, pg, true, initialCfgHash, pc)
|
||||
})
|
||||
|
||||
t.Run("scale_up_to_3", func(t *testing.T) {
|
||||
@@ -165,7 +165,7 @@ func TestProxyGroup(t *testing.T) {
|
||||
expectReconciled(t, reconciler, "", pg.Name)
|
||||
tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupReady, metav1.ConditionFalse, reasonProxyGroupCreating, "2/3 ProxyGroup pods running", 0, cl, zl.Sugar())
|
||||
expectEqual(t, fc, pg)
|
||||
expectProxyGroupResources(t, fc, pg, true, initialCfgHash)
|
||||
expectProxyGroupResources(t, fc, pg, true, initialCfgHash, pc)
|
||||
|
||||
addNodeIDToStateSecrets(t, fc, pg)
|
||||
expectReconciled(t, reconciler, "", pg.Name)
|
||||
@@ -175,7 +175,7 @@ func TestProxyGroup(t *testing.T) {
|
||||
TailnetIPs: []string{"1.2.3.4", "::1"},
|
||||
})
|
||||
expectEqual(t, fc, pg)
|
||||
expectProxyGroupResources(t, fc, pg, true, initialCfgHash)
|
||||
expectProxyGroupResources(t, fc, pg, true, initialCfgHash, pc)
|
||||
})
|
||||
|
||||
t.Run("scale_down_to_1", func(t *testing.T) {
|
||||
@@ -188,7 +188,7 @@ func TestProxyGroup(t *testing.T) {
|
||||
|
||||
pg.Status.Devices = pg.Status.Devices[:1] // truncate to only the first device.
|
||||
expectEqual(t, fc, pg)
|
||||
expectProxyGroupResources(t, fc, pg, true, initialCfgHash)
|
||||
expectProxyGroupResources(t, fc, pg, true, initialCfgHash, pc)
|
||||
})
|
||||
|
||||
t.Run("trigger_config_change_and_observe_new_config_hash", func(t *testing.T) {
|
||||
@@ -202,7 +202,7 @@ func TestProxyGroup(t *testing.T) {
|
||||
expectReconciled(t, reconciler, "", pg.Name)
|
||||
|
||||
expectEqual(t, fc, pg)
|
||||
expectProxyGroupResources(t, fc, pg, true, "518a86e9fae64f270f8e0ec2a2ea6ca06c10f725035d3d6caca132cd61e42a74")
|
||||
expectProxyGroupResources(t, fc, pg, true, "518a86e9fae64f270f8e0ec2a2ea6ca06c10f725035d3d6caca132cd61e42a74", pc)
|
||||
})
|
||||
|
||||
t.Run("enable_metrics", func(t *testing.T) {
|
||||
@@ -246,12 +246,29 @@ func TestProxyGroup(t *testing.T) {
|
||||
// The fake client does not clean up objects whose owner has been
|
||||
// deleted, so we can't test for the owned resources getting deleted.
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
func TestProxyGroupTypes(t *testing.T) {
|
||||
pc := &tsapi.ProxyClass{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test",
|
||||
Generation: 1,
|
||||
},
|
||||
Spec: tsapi.ProxyClassSpec{},
|
||||
}
|
||||
fc := fake.NewClientBuilder().
|
||||
WithScheme(tsapi.GlobalScheme).
|
||||
WithObjects(pc).
|
||||
WithStatusSubresource(pc).
|
||||
Build()
|
||||
mustUpdateStatus(t, fc, "", pc.Name, func(p *tsapi.ProxyClass) {
|
||||
p.Status.Conditions = []metav1.Condition{{
|
||||
Type: string(tsapi.ProxyClassReady),
|
||||
Status: metav1.ConditionTrue,
|
||||
ObservedGeneration: 1,
|
||||
}}
|
||||
})
|
||||
|
||||
zl, _ := zap.NewDevelopment()
|
||||
reconciler := &ProxyGroupReconciler{
|
||||
@@ -274,9 +291,7 @@ func TestProxyGroupTypes(t *testing.T) {
|
||||
Replicas: ptr.To[int32](0),
|
||||
},
|
||||
}
|
||||
if err := fc.Create(context.Background(), pg); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
mustCreate(t, fc, pg)
|
||||
|
||||
expectReconciled(t, reconciler, "", pg.Name)
|
||||
verifyProxyGroupCounts(t, reconciler, 0, 1)
|
||||
@@ -286,7 +301,8 @@ func TestProxyGroupTypes(t *testing.T) {
|
||||
t.Fatalf("failed to get StatefulSet: %v", err)
|
||||
}
|
||||
verifyEnvVar(t, sts, "TS_INTERNAL_APP", kubetypes.AppProxyGroupEgress)
|
||||
verifyEnvVar(t, sts, "TS_EGRESS_SERVICES_CONFIG_PATH", fmt.Sprintf("/etc/proxies/%s", egressservices.KeyEgressServices))
|
||||
verifyEnvVar(t, sts, "TS_EGRESS_PROXIES_CONFIG_PATH", "/etc/proxies")
|
||||
verifyEnvVar(t, sts, "TS_ENABLE_HEALTH_CHECK", "true")
|
||||
|
||||
// Verify that egress configuration has been set up.
|
||||
cm := &corev1.ConfigMap{}
|
||||
@@ -323,6 +339,57 @@ func TestProxyGroupTypes(t *testing.T) {
|
||||
if diff := cmp.Diff(expectedVolumeMounts, sts.Spec.Template.Spec.Containers[0].VolumeMounts); diff != "" {
|
||||
t.Errorf("unexpected volume mounts (-want +got):\n%s", diff)
|
||||
}
|
||||
|
||||
expectedLifecycle := corev1.Lifecycle{
|
||||
PreStop: &corev1.LifecycleHandler{
|
||||
HTTPGet: &corev1.HTTPGetAction{
|
||||
Path: kubetypes.EgessServicesPreshutdownEP,
|
||||
Port: intstr.FromInt(defaultLocalAddrPort),
|
||||
},
|
||||
},
|
||||
}
|
||||
if diff := cmp.Diff(expectedLifecycle, *sts.Spec.Template.Spec.Containers[0].Lifecycle); diff != "" {
|
||||
t.Errorf("unexpected lifecycle (-want +got):\n%s", diff)
|
||||
}
|
||||
if *sts.Spec.Template.DeletionGracePeriodSeconds != deletionGracePeriodSeconds {
|
||||
t.Errorf("unexpected deletion grace period seconds %d, want %d", *sts.Spec.Template.DeletionGracePeriodSeconds, deletionGracePeriodSeconds)
|
||||
}
|
||||
})
|
||||
t.Run("egress_type_no_lifecycle_hook_when_local_addr_port_set", func(t *testing.T) {
|
||||
pg := &tsapi.ProxyGroup{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-egress-no-lifecycle",
|
||||
UID: "test-egress-no-lifecycle-uid",
|
||||
},
|
||||
Spec: tsapi.ProxyGroupSpec{
|
||||
Type: tsapi.ProxyGroupTypeEgress,
|
||||
Replicas: ptr.To[int32](0),
|
||||
ProxyClass: "test",
|
||||
},
|
||||
}
|
||||
mustCreate(t, fc, pg)
|
||||
mustUpdate(t, fc, "", pc.Name, func(p *tsapi.ProxyClass) {
|
||||
p.Spec.StatefulSet = &tsapi.StatefulSet{
|
||||
Pod: &tsapi.Pod{
|
||||
TailscaleContainer: &tsapi.Container{
|
||||
Env: []tsapi.Env{{
|
||||
Name: "TS_LOCAL_ADDR_PORT",
|
||||
Value: "127.0.0.1:8080",
|
||||
}},
|
||||
},
|
||||
},
|
||||
}
|
||||
})
|
||||
expectReconciled(t, reconciler, "", pg.Name)
|
||||
|
||||
sts := &appsv1.StatefulSet{}
|
||||
if err := fc.Get(context.Background(), client.ObjectKey{Namespace: tsNamespace, Name: pg.Name}, sts); err != nil {
|
||||
t.Fatalf("failed to get StatefulSet: %v", err)
|
||||
}
|
||||
|
||||
if sts.Spec.Template.Spec.Containers[0].Lifecycle != nil {
|
||||
t.Error("lifecycle hook was set when TS_LOCAL_ADDR_PORT was configured via ProxyClass")
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("ingress_type", func(t *testing.T) {
|
||||
@@ -341,7 +408,7 @@ func TestProxyGroupTypes(t *testing.T) {
|
||||
}
|
||||
|
||||
expectReconciled(t, reconciler, "", pg.Name)
|
||||
verifyProxyGroupCounts(t, reconciler, 1, 1)
|
||||
verifyProxyGroupCounts(t, reconciler, 1, 2)
|
||||
|
||||
sts := &appsv1.StatefulSet{}
|
||||
if err := fc.Get(context.Background(), client.ObjectKey{Namespace: tsNamespace, Name: pg.Name}, sts); err != nil {
|
||||
@@ -402,13 +469,13 @@ func verifyEnvVar(t *testing.T, sts *appsv1.StatefulSet, name, expectedValue str
|
||||
t.Errorf("%s environment variable not found", name)
|
||||
}
|
||||
|
||||
func expectProxyGroupResources(t *testing.T, fc client.WithWatch, pg *tsapi.ProxyGroup, shouldExist bool, cfgHash string) {
|
||||
func expectProxyGroupResources(t *testing.T, fc client.WithWatch, pg *tsapi.ProxyGroup, shouldExist bool, cfgHash string, proxyClass *tsapi.ProxyClass) {
|
||||
t.Helper()
|
||||
|
||||
role := pgRole(pg, tsNamespace)
|
||||
roleBinding := pgRoleBinding(pg, tsNamespace)
|
||||
serviceAccount := pgServiceAccount(pg, tsNamespace)
|
||||
statefulSet, err := pgStatefulSet(pg, tsNamespace, testProxyImage, "auto")
|
||||
statefulSet, err := pgStatefulSet(pg, tsNamespace, testProxyImage, "auto", proxyClass)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
@@ -101,6 +101,9 @@ const (
|
||||
proxyTypeIngressResource = "ingress_resource"
|
||||
proxyTypeConnector = "connector"
|
||||
proxyTypeProxyGroup = "proxygroup"
|
||||
|
||||
envVarTSLocalAddrPort = "TS_LOCAL_ADDR_PORT"
|
||||
defaultLocalAddrPort = 9002 // metrics and health check port
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -694,7 +697,7 @@ func (a *tailscaleSTSReconciler) reconcileSTS(ctx context.Context, logger *zap.S
|
||||
// being created, there is no need for a restart.
|
||||
// TODO(irbekrm): remove this in 1.84.
|
||||
hash := tsConfigHash
|
||||
if dev != nil && dev.capver >= 110 {
|
||||
if dev == nil || dev.capver >= 110 {
|
||||
hash = s.Spec.Template.GetAnnotations()[podAnnotationLastSetConfigFileHash]
|
||||
}
|
||||
s.Spec = ss.Spec
|
||||
|
||||
@@ -32,6 +32,7 @@ import (
|
||||
"tailscale.com/ipn"
|
||||
"tailscale.com/ipn/ipnstate"
|
||||
tsapi "tailscale.com/k8s-operator/apis/v1alpha1"
|
||||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/types/ptr"
|
||||
"tailscale.com/util/mak"
|
||||
)
|
||||
@@ -583,6 +584,21 @@ func mustCreate(t *testing.T, client client.Client, obj client.Object) {
|
||||
t.Fatalf("creating %q: %v", obj.GetName(), err)
|
||||
}
|
||||
}
|
||||
func mustCreateAll(t *testing.T, client client.Client, objs ...client.Object) {
|
||||
t.Helper()
|
||||
for _, obj := range objs {
|
||||
mustCreate(t, client, obj)
|
||||
}
|
||||
}
|
||||
|
||||
func mustDeleteAll(t *testing.T, client client.Client, objs ...client.Object) {
|
||||
t.Helper()
|
||||
for _, obj := range objs {
|
||||
if err := client.Delete(context.Background(), obj); err != nil {
|
||||
t.Fatalf("deleting %q: %v", obj.GetName(), err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func mustUpdate[T any, O ptrObject[T]](t *testing.T, client client.Client, ns, name string, update func(O)) {
|
||||
t.Helper()
|
||||
@@ -706,6 +722,19 @@ func expectRequeue(t *testing.T, sr reconcile.Reconciler, ns, name string) {
|
||||
t.Fatalf("expected timed requeue, got success")
|
||||
}
|
||||
}
|
||||
func expectError(t *testing.T, sr reconcile.Reconciler, ns, name string) {
|
||||
t.Helper()
|
||||
req := reconcile.Request{
|
||||
NamespacedName: types.NamespacedName{
|
||||
Name: name,
|
||||
Namespace: ns,
|
||||
},
|
||||
}
|
||||
_, err := sr.Reconcile(context.Background(), req)
|
||||
if err == nil {
|
||||
t.Error("Reconcile: expected error but did not get one")
|
||||
}
|
||||
}
|
||||
|
||||
// expectEvents accepts a test recorder and a list of events, tests that expected
|
||||
// events are sent down the recorder's channel. Waits for 5s for each event.
|
||||
@@ -739,7 +768,7 @@ type fakeTSClient struct {
|
||||
sync.Mutex
|
||||
keyRequests []tailscale.KeyCapabilities
|
||||
deleted []string
|
||||
vipServices map[string]*VIPService
|
||||
vipServices map[tailcfg.ServiceName]*VIPService
|
||||
}
|
||||
type fakeTSNetServer struct {
|
||||
certDomains []string
|
||||
@@ -846,7 +875,7 @@ func removeAuthKeyIfExistsModifier(t *testing.T) func(s *corev1.Secret) {
|
||||
}
|
||||
}
|
||||
|
||||
func (c *fakeTSClient) getVIPServiceByName(ctx context.Context, name string) (*VIPService, error) {
|
||||
func (c *fakeTSClient) getVIPService(ctx context.Context, name tailcfg.ServiceName) (*VIPService, error) {
|
||||
c.Lock()
|
||||
defer c.Unlock()
|
||||
if c.vipServices == nil {
|
||||
@@ -859,17 +888,17 @@ func (c *fakeTSClient) getVIPServiceByName(ctx context.Context, name string) (*V
|
||||
return svc, nil
|
||||
}
|
||||
|
||||
func (c *fakeTSClient) createOrUpdateVIPServiceByName(ctx context.Context, svc *VIPService) error {
|
||||
func (c *fakeTSClient) createOrUpdateVIPService(ctx context.Context, svc *VIPService) error {
|
||||
c.Lock()
|
||||
defer c.Unlock()
|
||||
if c.vipServices == nil {
|
||||
c.vipServices = make(map[string]*VIPService)
|
||||
c.vipServices = make(map[tailcfg.ServiceName]*VIPService)
|
||||
}
|
||||
c.vipServices[svc.Name] = svc
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *fakeTSClient) deleteVIPServiceByName(ctx context.Context, name string) error {
|
||||
func (c *fakeTSClient) deleteVIPService(ctx context.Context, name tailcfg.ServiceName) error {
|
||||
c.Lock()
|
||||
defer c.Unlock()
|
||||
if c.vipServices != nil {
|
||||
|
||||
@@ -17,6 +17,7 @@ import (
|
||||
|
||||
"golang.org/x/oauth2/clientcredentials"
|
||||
"tailscale.com/client/tailscale"
|
||||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/util/httpm"
|
||||
)
|
||||
|
||||
@@ -56,9 +57,9 @@ type tsClient interface {
|
||||
CreateKey(ctx context.Context, caps tailscale.KeyCapabilities) (string, *tailscale.Key, error)
|
||||
Device(ctx context.Context, deviceID string, fields *tailscale.DeviceFieldsOpts) (*tailscale.Device, error)
|
||||
DeleteDevice(ctx context.Context, nodeStableID string) error
|
||||
getVIPServiceByName(ctx context.Context, name string) (*VIPService, error)
|
||||
createOrUpdateVIPServiceByName(ctx context.Context, svc *VIPService) error
|
||||
deleteVIPServiceByName(ctx context.Context, name string) error
|
||||
getVIPService(ctx context.Context, name tailcfg.ServiceName) (*VIPService, error)
|
||||
createOrUpdateVIPService(ctx context.Context, svc *VIPService) error
|
||||
deleteVIPService(ctx context.Context, name tailcfg.ServiceName) error
|
||||
}
|
||||
|
||||
type tsClientImpl struct {
|
||||
@@ -69,9 +70,8 @@ type tsClientImpl struct {
|
||||
|
||||
// VIPService is a Tailscale VIPService with Tailscale API JSON representation.
|
||||
type VIPService struct {
|
||||
// Name is the leftmost label of the DNS name of the VIP service.
|
||||
// Name is required.
|
||||
Name string `json:"name,omitempty"`
|
||||
// Name is a VIPService name in form svc:<leftmost-label-of-service-DNS-name>.
|
||||
Name tailcfg.ServiceName `json:"name,omitempty"`
|
||||
// Addrs are the IP addresses of the VIP Service. There are two addresses:
|
||||
// the first is IPv4 and the second is IPv6.
|
||||
// When creating a new VIP Service, the IP addresses are optional: if no
|
||||
@@ -89,8 +89,8 @@ type VIPService struct {
|
||||
}
|
||||
|
||||
// GetVIPServiceByName retrieves a VIPService by its name. It returns 404 if the VIPService is not found.
|
||||
func (c *tsClientImpl) getVIPServiceByName(ctx context.Context, name string) (*VIPService, error) {
|
||||
path := fmt.Sprintf("%s/api/v2/tailnet/%s/vip-services/by-name/%s", c.baseURL, c.tailnet, url.PathEscape(name))
|
||||
func (c *tsClientImpl) getVIPService(ctx context.Context, name tailcfg.ServiceName) (*VIPService, error) {
|
||||
path := fmt.Sprintf("%s/api/v2/tailnet/%s/vip-services/%s", c.baseURL, c.tailnet, url.PathEscape(name.String()))
|
||||
req, err := http.NewRequestWithContext(ctx, httpm.GET, path, nil)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error creating new HTTP request: %w", err)
|
||||
@@ -111,16 +111,16 @@ func (c *tsClientImpl) getVIPServiceByName(ctx context.Context, name string) (*V
|
||||
return svc, nil
|
||||
}
|
||||
|
||||
// CreateOrUpdateVIPServiceByName creates or updates a VIPService by its name. Caller must ensure that, if the
|
||||
// createOrUpdateVIPService creates or updates a VIPService by its name. Caller must ensure that, if the
|
||||
// VIPService already exists, the VIPService is fetched first to ensure that any auto-allocated IP addresses are not
|
||||
// lost during the update. If the VIPService was created without any IP addresses explicitly set (so that they were
|
||||
// auto-allocated by Tailscale) any subsequent request to this function that does not set any IP addresses will error.
|
||||
func (c *tsClientImpl) createOrUpdateVIPServiceByName(ctx context.Context, svc *VIPService) error {
|
||||
func (c *tsClientImpl) createOrUpdateVIPService(ctx context.Context, svc *VIPService) error {
|
||||
data, err := json.Marshal(svc)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
path := fmt.Sprintf("%s/api/v2/tailnet/%s/vip-services/by-name/%s", c.baseURL, c.tailnet, url.PathEscape(svc.Name))
|
||||
path := fmt.Sprintf("%s/api/v2/tailnet/%s/vip-services/%s", c.baseURL, c.tailnet, url.PathEscape(svc.Name.String()))
|
||||
req, err := http.NewRequestWithContext(ctx, httpm.PUT, path, bytes.NewBuffer(data))
|
||||
if err != nil {
|
||||
return fmt.Errorf("error creating new HTTP request: %w", err)
|
||||
@@ -139,8 +139,8 @@ func (c *tsClientImpl) createOrUpdateVIPServiceByName(ctx context.Context, svc *
|
||||
|
||||
// DeleteVIPServiceByName deletes a VIPService by its name. It returns an error if the VIPService
|
||||
// does not exist or if the deletion fails.
|
||||
func (c *tsClientImpl) deleteVIPServiceByName(ctx context.Context, name string) error {
|
||||
path := fmt.Sprintf("%s/api/v2/tailnet/%s/vip-services/by-name/%s", c.baseURL, c.tailnet, url.PathEscape(name))
|
||||
func (c *tsClientImpl) deleteVIPService(ctx context.Context, name tailcfg.ServiceName) error {
|
||||
path := fmt.Sprintf("%s/api/v2/tailnet/%s/vip-services/%s", c.baseURL, c.tailnet, url.PathEscape(name.String()))
|
||||
req, err := http.NewRequestWithContext(ctx, httpm.DELETE, path, nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error creating new HTTP request: %w", err)
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"expvar"
|
||||
"flag"
|
||||
"fmt"
|
||||
"log"
|
||||
@@ -26,10 +27,12 @@ import (
|
||||
"github.com/inetaf/tcpproxy"
|
||||
"github.com/peterbourgon/ff/v3"
|
||||
"golang.org/x/net/dns/dnsmessage"
|
||||
"tailscale.com/client/tailscale"
|
||||
"gvisor.dev/gvisor/pkg/tcpip"
|
||||
"gvisor.dev/gvisor/pkg/tcpip/transport/tcp"
|
||||
"tailscale.com/envknob"
|
||||
"tailscale.com/hostinfo"
|
||||
"tailscale.com/ipn"
|
||||
"tailscale.com/localclient/tailscale"
|
||||
"tailscale.com/net/netutil"
|
||||
"tailscale.com/syncs"
|
||||
"tailscale.com/tailcfg"
|
||||
@@ -37,6 +40,7 @@ import (
|
||||
"tailscale.com/tsweb"
|
||||
"tailscale.com/util/dnsname"
|
||||
"tailscale.com/util/mak"
|
||||
"tailscale.com/wgengine/netstack"
|
||||
)
|
||||
|
||||
func main() {
|
||||
@@ -112,6 +116,7 @@ func main() {
|
||||
ts.Port = uint16(*wgPort)
|
||||
}
|
||||
defer ts.Close()
|
||||
|
||||
if *verboseTSNet {
|
||||
ts.Logf = log.Printf
|
||||
}
|
||||
@@ -129,6 +134,36 @@ func main() {
|
||||
log.Fatalf("debug serve: %v", http.Serve(dln, mux))
|
||||
}()
|
||||
}
|
||||
|
||||
if err := ts.Start(); err != nil {
|
||||
log.Fatalf("ts.Start: %v", err)
|
||||
}
|
||||
// TODO(raggi): this is not a public interface or guarantee.
|
||||
ns := ts.Sys().Netstack.Get().(*netstack.Impl)
|
||||
tcpRXBufOpt := tcpip.TCPReceiveBufferSizeRangeOption{
|
||||
Min: tcp.MinBufferSize,
|
||||
Default: tcp.DefaultReceiveBufferSize,
|
||||
Max: tcp.MaxBufferSize,
|
||||
}
|
||||
if err := ns.SetTransportProtocolOption(tcp.ProtocolNumber, &tcpRXBufOpt); err != nil {
|
||||
log.Fatalf("could not set TCP RX buf size: %v", err)
|
||||
}
|
||||
tcpTXBufOpt := tcpip.TCPSendBufferSizeRangeOption{
|
||||
Min: tcp.MinBufferSize,
|
||||
Default: tcp.DefaultSendBufferSize,
|
||||
Max: tcp.MaxBufferSize,
|
||||
}
|
||||
if err := ns.SetTransportProtocolOption(tcp.ProtocolNumber, &tcpTXBufOpt); err != nil {
|
||||
log.Fatalf("could not set TCP TX buf size: %v", err)
|
||||
}
|
||||
mslOpt := tcpip.TCPTimeWaitTimeoutOption(5 * time.Second)
|
||||
if err := ns.SetTransportProtocolOption(tcp.ProtocolNumber, &mslOpt); err != nil {
|
||||
log.Fatalf("could not set TCP MSL: %v", err)
|
||||
}
|
||||
if *debugPort != 0 {
|
||||
expvar.Publish("netstack", ns.ExpVar())
|
||||
}
|
||||
|
||||
lc, err := ts.LocalClient()
|
||||
if err != nil {
|
||||
log.Fatalf("LocalClient() failed: %v", err)
|
||||
|
||||
@@ -21,7 +21,7 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/coreos/go-systemd/activation"
|
||||
"tailscale.com/client/tailscale"
|
||||
"tailscale.com/localclient/tailscale"
|
||||
)
|
||||
|
||||
var (
|
||||
|
||||
@@ -24,7 +24,7 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"tailscale.com/client/tailscale"
|
||||
"tailscale.com/localclient/tailscale"
|
||||
"tailscale.com/metrics"
|
||||
"tailscale.com/tsnet"
|
||||
"tailscale.com/tsweb"
|
||||
|
||||
@@ -36,7 +36,7 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"tailscale.com/client/tailscale"
|
||||
"tailscale.com/localclient/tailscale"
|
||||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/tsnet"
|
||||
)
|
||||
|
||||
@@ -22,9 +22,9 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/peterbourgon/ff/v3"
|
||||
"tailscale.com/client/tailscale"
|
||||
"tailscale.com/hostinfo"
|
||||
"tailscale.com/ipn"
|
||||
"tailscale.com/localclient/tailscale"
|
||||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/tsnet"
|
||||
"tailscale.com/tsweb"
|
||||
@@ -157,10 +157,8 @@ func run(ctx context.Context, ts *tsnet.Server, wgPort int, hostname string, pro
|
||||
|
||||
// NetMap contains app-connector configuration
|
||||
if nm := msg.NetMap; nm != nil && nm.SelfNode.Valid() {
|
||||
sn := nm.SelfNode.AsStruct()
|
||||
|
||||
var c appctype.AppConnectorConfig
|
||||
nmConf, err := tailcfg.UnmarshalNodeCapJSON[appctype.AppConnectorConfig](sn.CapMap, configCapKey)
|
||||
nmConf, err := tailcfg.UnmarshalNodeCapViewJSON[appctype.AppConnectorConfig](nm.SelfNode.CapMap(), configCapKey)
|
||||
if err != nil {
|
||||
log.Printf("failed to read app connector configuration from coordination server: %v", err)
|
||||
} else if len(nmConf) > 0 {
|
||||
|
||||
@@ -6,6 +6,9 @@
|
||||
// highlight the unique parts of the Tailscale SSH server so SSH
|
||||
// client authors can hit it easily and fix their SSH clients without
|
||||
// needing to set up Tailscale and Tailscale SSH.
|
||||
//
|
||||
// Connections are allowed using any username except for "denyme". Connecting as
|
||||
// "denyme" will result in an authentication failure with error message.
|
||||
package main
|
||||
|
||||
import (
|
||||
@@ -16,6 +19,7 @@ import (
|
||||
"crypto/rsa"
|
||||
"crypto/x509"
|
||||
"encoding/pem"
|
||||
"errors"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
@@ -24,7 +28,7 @@ import (
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
gossh "github.com/tailscale/golang-x-crypto/ssh"
|
||||
gossh "golang.org/x/crypto/ssh"
|
||||
"tailscale.com/tempfork/gliderlabs/ssh"
|
||||
)
|
||||
|
||||
@@ -62,13 +66,21 @@ func main() {
|
||||
Handler: handleSessionPostSSHAuth,
|
||||
ServerConfigCallback: func(ctx ssh.Context) *gossh.ServerConfig {
|
||||
start := time.Now()
|
||||
var spac gossh.ServerPreAuthConn
|
||||
return &gossh.ServerConfig{
|
||||
NextAuthMethodCallback: func(conn gossh.ConnMetadata, prevErrors []error) []string {
|
||||
return []string{"tailscale"}
|
||||
PreAuthConnCallback: func(conn gossh.ServerPreAuthConn) {
|
||||
spac = conn
|
||||
},
|
||||
NoClientAuth: true, // required for the NoClientAuthCallback to run
|
||||
NoClientAuthCallback: func(cm gossh.ConnMetadata) (*gossh.Permissions, error) {
|
||||
cm.SendAuthBanner(fmt.Sprintf("# Banner: doing none auth at %v\r\n", time.Since(start)))
|
||||
spac.SendAuthBanner(fmt.Sprintf("# Banner: doing none auth at %v\r\n", time.Since(start)))
|
||||
|
||||
if cm.User() == "denyme" {
|
||||
return nil, &gossh.BannerError{
|
||||
Err: errors.New("denying access"),
|
||||
Message: "denyme is not allowed to access this machine\n",
|
||||
}
|
||||
}
|
||||
|
||||
totalBanners := 2
|
||||
if cm.User() == "banners" {
|
||||
@@ -77,9 +89,9 @@ func main() {
|
||||
for banner := 2; banner <= totalBanners; banner++ {
|
||||
time.Sleep(time.Second)
|
||||
if banner == totalBanners {
|
||||
cm.SendAuthBanner(fmt.Sprintf("# Banner%d: access granted at %v\r\n", banner, time.Since(start)))
|
||||
spac.SendAuthBanner(fmt.Sprintf("# Banner%d: access granted at %v\r\n", banner, time.Since(start)))
|
||||
} else {
|
||||
cm.SendAuthBanner(fmt.Sprintf("# Banner%d at %v\r\n", banner, time.Since(start)))
|
||||
spac.SendAuthBanner(fmt.Sprintf("# Banner%d at %v\r\n", banner, time.Since(start)))
|
||||
}
|
||||
}
|
||||
return nil, nil
|
||||
|
||||
@@ -89,6 +89,8 @@ tailscale.com/cmd/stund dependencies: (generated by github.com/tailscale/depawar
|
||||
golang.org/x/crypto/cryptobyte/asn1 from crypto/ecdsa+
|
||||
golang.org/x/crypto/curve25519 from golang.org/x/crypto/nacl/box+
|
||||
golang.org/x/crypto/hkdf from crypto/tls+
|
||||
golang.org/x/crypto/internal/alias from golang.org/x/crypto/chacha20+
|
||||
golang.org/x/crypto/internal/poly1305 from golang.org/x/crypto/chacha20poly1305+
|
||||
golang.org/x/crypto/nacl/box from tailscale.com/types/key
|
||||
golang.org/x/crypto/nacl/secretbox from golang.org/x/crypto/nacl/box
|
||||
golang.org/x/crypto/salsa20/salsa from golang.org/x/crypto/nacl/box+
|
||||
@@ -123,6 +125,18 @@ tailscale.com/cmd/stund dependencies: (generated by github.com/tailscale/depawar
|
||||
crypto/ed25519 from crypto/tls+
|
||||
crypto/elliptic from crypto/ecdsa+
|
||||
crypto/hmac from crypto/tls+
|
||||
crypto/internal/alias from crypto/aes+
|
||||
crypto/internal/bigmod from crypto/ecdsa+
|
||||
crypto/internal/boring from crypto/aes+
|
||||
crypto/internal/boring/bbig from crypto/ecdsa+
|
||||
crypto/internal/boring/sig from crypto/internal/boring
|
||||
crypto/internal/edwards25519 from crypto/ed25519
|
||||
crypto/internal/edwards25519/field from crypto/ecdh+
|
||||
crypto/internal/hpke from crypto/tls
|
||||
crypto/internal/mlkem768 from crypto/tls
|
||||
crypto/internal/nistec from crypto/ecdh+
|
||||
crypto/internal/nistec/fiat from crypto/internal/nistec
|
||||
crypto/internal/randutil from crypto/dsa+
|
||||
crypto/md5 from crypto/tls+
|
||||
crypto/rand from crypto/ed25519+
|
||||
crypto/rc4 from crypto/tls
|
||||
@@ -133,6 +147,7 @@ tailscale.com/cmd/stund dependencies: (generated by github.com/tailscale/depawar
|
||||
crypto/subtle from crypto/aes+
|
||||
crypto/tls from net/http+
|
||||
crypto/x509 from crypto/tls
|
||||
D crypto/x509/internal/macos from crypto/x509
|
||||
crypto/x509/pkix from crypto/x509
|
||||
embed from crypto/internal/nistec+
|
||||
encoding from encoding/json+
|
||||
@@ -153,6 +168,44 @@ tailscale.com/cmd/stund dependencies: (generated by github.com/tailscale/depawar
|
||||
hash/fnv from google.golang.org/protobuf/internal/detrand
|
||||
hash/maphash from go4.org/mem
|
||||
html from net/http/pprof+
|
||||
internal/abi from crypto/x509/internal/macos+
|
||||
internal/asan from syscall
|
||||
internal/bisect from internal/godebug
|
||||
internal/bytealg from bytes+
|
||||
internal/byteorder from crypto/aes+
|
||||
internal/chacha8rand from math/rand/v2+
|
||||
internal/concurrent from unique
|
||||
internal/coverage/rtcov from runtime
|
||||
internal/cpu from crypto/aes+
|
||||
internal/filepathlite from os+
|
||||
internal/fmtsort from fmt
|
||||
internal/goarch from crypto/aes+
|
||||
internal/godebug from crypto/tls+
|
||||
internal/godebugs from internal/godebug+
|
||||
internal/goexperiment from runtime
|
||||
internal/goos from crypto/x509+
|
||||
internal/itoa from internal/poll+
|
||||
internal/msan from syscall
|
||||
internal/nettrace from net+
|
||||
internal/oserror from io/fs+
|
||||
internal/poll from net+
|
||||
internal/profile from net/http/pprof
|
||||
internal/profilerecord from runtime+
|
||||
internal/race from internal/poll+
|
||||
internal/reflectlite from context+
|
||||
internal/runtime/atomic from internal/runtime/exithook+
|
||||
internal/runtime/exithook from runtime
|
||||
L internal/runtime/syscall from runtime+
|
||||
internal/singleflight from net
|
||||
internal/stringslite from embed+
|
||||
internal/syscall/execenv from os
|
||||
LD internal/syscall/unix from crypto/rand+
|
||||
W internal/syscall/windows from crypto/rand+
|
||||
W internal/syscall/windows/registry from mime+
|
||||
W internal/syscall/windows/sysdll from internal/syscall/windows+
|
||||
internal/testlog from os
|
||||
internal/unsafeheader from internal/reflectlite+
|
||||
internal/weak from unique
|
||||
io from bufio+
|
||||
io/fs from crypto/x509+
|
||||
iter from maps+
|
||||
@@ -171,6 +224,7 @@ tailscale.com/cmd/stund dependencies: (generated by github.com/tailscale/depawar
|
||||
net/http from expvar+
|
||||
net/http/httptrace from net/http
|
||||
net/http/internal from net/http
|
||||
net/http/internal/ascii from net/http
|
||||
net/http/pprof from tailscale.com/tsweb
|
||||
net/netip from go4.org/netipx+
|
||||
net/textproto from golang.org/x/net/http/httpguts+
|
||||
@@ -182,7 +236,10 @@ tailscale.com/cmd/stund dependencies: (generated by github.com/tailscale/depawar
|
||||
reflect from crypto/x509+
|
||||
regexp from github.com/prometheus/client_golang/prometheus/internal+
|
||||
regexp/syntax from regexp
|
||||
runtime from crypto/internal/nistec+
|
||||
runtime/debug from github.com/prometheus/client_golang/prometheus+
|
||||
runtime/internal/math from runtime
|
||||
runtime/internal/sys from runtime
|
||||
runtime/metrics from github.com/prometheus/client_golang/prometheus+
|
||||
runtime/pprof from net/http/pprof
|
||||
runtime/trace from net/http/pprof
|
||||
@@ -199,3 +256,4 @@ tailscale.com/cmd/stund dependencies: (generated by github.com/tailscale/depawar
|
||||
unicode/utf16 from crypto/x509+
|
||||
unicode/utf8 from bufio+
|
||||
unique from net/netip
|
||||
unsafe from bytes+
|
||||
|
||||
@@ -10,7 +10,7 @@ import (
|
||||
"fmt"
|
||||
|
||||
"github.com/peterbourgon/ff/v3/ffcli"
|
||||
"tailscale.com/client/tailscale"
|
||||
"tailscale.com/localclient/tailscale"
|
||||
)
|
||||
|
||||
var bugReportCmd = &ffcli.Command{
|
||||
|
||||
@@ -21,9 +21,9 @@ import (
|
||||
"github.com/mattn/go-colorable"
|
||||
"github.com/mattn/go-isatty"
|
||||
"github.com/peterbourgon/ff/v3/ffcli"
|
||||
"tailscale.com/client/tailscale"
|
||||
"tailscale.com/cmd/tailscale/cli/ffcomplete"
|
||||
"tailscale.com/envknob"
|
||||
"tailscale.com/localclient/tailscale"
|
||||
"tailscale.com/paths"
|
||||
"tailscale.com/util/slicesx"
|
||||
"tailscale.com/version/distro"
|
||||
@@ -212,7 +212,7 @@ change in the future.
|
||||
exitNodeCmd(),
|
||||
updateCmd,
|
||||
whoisCmd,
|
||||
debugCmd,
|
||||
debugCmd(),
|
||||
driveCmd,
|
||||
idTokenCmd,
|
||||
advertiseCmd(),
|
||||
|
||||
@@ -25,10 +25,12 @@ import (
|
||||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/tka"
|
||||
"tailscale.com/tstest"
|
||||
"tailscale.com/tstest/deptest"
|
||||
"tailscale.com/types/logger"
|
||||
"tailscale.com/types/opt"
|
||||
"tailscale.com/types/persist"
|
||||
"tailscale.com/types/preftype"
|
||||
"tailscale.com/util/set"
|
||||
"tailscale.com/version/distro"
|
||||
)
|
||||
|
||||
@@ -1568,3 +1570,31 @@ func TestDocs(t *testing.T) {
|
||||
}
|
||||
walk(t, root)
|
||||
}
|
||||
|
||||
func TestDeps(t *testing.T) {
|
||||
deptest.DepChecker{
|
||||
GOOS: "linux",
|
||||
GOARCH: "arm64",
|
||||
WantDeps: set.Of(
|
||||
"tailscale.com/feature/capture/dissector", // want the Lua by default
|
||||
),
|
||||
BadDeps: map[string]string{
|
||||
"tailscale.com/feature/capture": "don't link capture code",
|
||||
"tailscale.com/net/packet": "why we passing packets in the CLI?",
|
||||
"tailscale.com/net/flowtrack": "why we tracking flows in the CLI?",
|
||||
},
|
||||
}.Check(t)
|
||||
}
|
||||
|
||||
func TestDepsNoCapture(t *testing.T) {
|
||||
deptest.DepChecker{
|
||||
GOOS: "linux",
|
||||
GOARCH: "arm64",
|
||||
Tags: "ts_omit_capture",
|
||||
BadDeps: map[string]string{
|
||||
"tailscale.com/feature/capture": "don't link capture code",
|
||||
"tailscale.com/feature/capture/dissector": "don't like the Lua",
|
||||
},
|
||||
}.Check(t)
|
||||
|
||||
}
|
||||
|
||||
80
cmd/tailscale/cli/debug-capture.go
Normal file
80
cmd/tailscale/cli/debug-capture.go
Normal file
@@ -0,0 +1,80 @@
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
//go:build !ios && !ts_omit_capture
|
||||
|
||||
package cli
|
||||
|
||||
import (
|
||||
"context"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"os/exec"
|
||||
|
||||
"github.com/peterbourgon/ff/v3/ffcli"
|
||||
"tailscale.com/feature/capture/dissector"
|
||||
)
|
||||
|
||||
func init() {
|
||||
debugCaptureCmd = mkDebugCaptureCmd
|
||||
}
|
||||
|
||||
func mkDebugCaptureCmd() *ffcli.Command {
|
||||
return &ffcli.Command{
|
||||
Name: "capture",
|
||||
ShortUsage: "tailscale debug capture",
|
||||
Exec: runCapture,
|
||||
ShortHelp: "Stream pcaps for debugging",
|
||||
FlagSet: (func() *flag.FlagSet {
|
||||
fs := newFlagSet("capture")
|
||||
fs.StringVar(&captureArgs.outFile, "o", "", "path to stream the pcap (or - for stdout), leave empty to start wireshark")
|
||||
return fs
|
||||
})(),
|
||||
}
|
||||
}
|
||||
|
||||
var captureArgs struct {
|
||||
outFile string
|
||||
}
|
||||
|
||||
func runCapture(ctx context.Context, args []string) error {
|
||||
stream, err := localClient.StreamDebugCapture(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer stream.Close()
|
||||
|
||||
switch captureArgs.outFile {
|
||||
case "-":
|
||||
fmt.Fprintln(Stderr, "Press Ctrl-C to stop the capture.")
|
||||
_, err = io.Copy(os.Stdout, stream)
|
||||
return err
|
||||
case "":
|
||||
lua, err := os.CreateTemp("", "ts-dissector")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer os.Remove(lua.Name())
|
||||
io.WriteString(lua, dissector.Lua)
|
||||
if err := lua.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
wireshark := exec.CommandContext(ctx, "wireshark", "-X", "lua_script:"+lua.Name(), "-k", "-i", "-")
|
||||
wireshark.Stdin = stream
|
||||
wireshark.Stdout = os.Stdout
|
||||
wireshark.Stderr = os.Stderr
|
||||
return wireshark.Run()
|
||||
}
|
||||
|
||||
f, err := os.OpenFile(captureArgs.outFile, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer f.Close()
|
||||
fmt.Fprintln(Stderr, "Press Ctrl-C to stop the capture.")
|
||||
_, err = io.Copy(f, stream)
|
||||
return err
|
||||
}
|
||||
@@ -20,7 +20,6 @@ import (
|
||||
"net/netip"
|
||||
"net/url"
|
||||
"os"
|
||||
"os/exec"
|
||||
"runtime"
|
||||
"runtime/debug"
|
||||
"strconv"
|
||||
@@ -30,12 +29,12 @@ import (
|
||||
"github.com/peterbourgon/ff/v3/ffcli"
|
||||
"golang.org/x/net/http/httpproxy"
|
||||
"golang.org/x/net/http2"
|
||||
"tailscale.com/client/tailscale"
|
||||
"tailscale.com/client/tailscale/apitype"
|
||||
"tailscale.com/control/controlhttp"
|
||||
"tailscale.com/hostinfo"
|
||||
"tailscale.com/internal/noiseconn"
|
||||
"tailscale.com/ipn"
|
||||
"tailscale.com/localclient/tailscale"
|
||||
"tailscale.com/localclient/tailscale/apitype"
|
||||
"tailscale.com/net/netmon"
|
||||
"tailscale.com/net/tsaddr"
|
||||
"tailscale.com/net/tshttpproxy"
|
||||
@@ -45,307 +44,302 @@ import (
|
||||
"tailscale.com/types/key"
|
||||
"tailscale.com/types/logger"
|
||||
"tailscale.com/util/must"
|
||||
"tailscale.com/wgengine/capture"
|
||||
)
|
||||
|
||||
var debugCmd = &ffcli.Command{
|
||||
Name: "debug",
|
||||
Exec: runDebug,
|
||||
ShortUsage: "tailscale debug <debug-flags | subcommand>",
|
||||
ShortHelp: "Debug commands",
|
||||
LongHelp: hidden + `"tailscale debug" contains misc debug facilities; it is not a stable interface.`,
|
||||
FlagSet: (func() *flag.FlagSet {
|
||||
fs := newFlagSet("debug")
|
||||
fs.StringVar(&debugArgs.file, "file", "", "get, delete:NAME, or NAME")
|
||||
fs.StringVar(&debugArgs.cpuFile, "cpu-profile", "", "if non-empty, grab a CPU profile for --profile-seconds seconds and write it to this file; - for stdout")
|
||||
fs.StringVar(&debugArgs.memFile, "mem-profile", "", "if non-empty, grab a memory profile and write it to this file; - for stdout")
|
||||
fs.IntVar(&debugArgs.cpuSec, "profile-seconds", 15, "number of seconds to run a CPU profile for, when --cpu-profile is non-empty")
|
||||
return fs
|
||||
})(),
|
||||
Subcommands: []*ffcli.Command{
|
||||
{
|
||||
Name: "derp-map",
|
||||
ShortUsage: "tailscale debug derp-map",
|
||||
Exec: runDERPMap,
|
||||
ShortHelp: "Print DERP map",
|
||||
},
|
||||
{
|
||||
Name: "component-logs",
|
||||
ShortUsage: "tailscale debug component-logs [" + strings.Join(ipn.DebuggableComponents, "|") + "]",
|
||||
Exec: runDebugComponentLogs,
|
||||
ShortHelp: "Enable/disable debug logs for a component",
|
||||
FlagSet: (func() *flag.FlagSet {
|
||||
fs := newFlagSet("component-logs")
|
||||
fs.DurationVar(&debugComponentLogsArgs.forDur, "for", time.Hour, "how long to enable debug logs for; zero or negative means to disable")
|
||||
return fs
|
||||
})(),
|
||||
},
|
||||
{
|
||||
Name: "daemon-goroutines",
|
||||
ShortUsage: "tailscale debug daemon-goroutines",
|
||||
Exec: runDaemonGoroutines,
|
||||
ShortHelp: "Print tailscaled's goroutines",
|
||||
},
|
||||
{
|
||||
Name: "daemon-logs",
|
||||
ShortUsage: "tailscale debug daemon-logs",
|
||||
Exec: runDaemonLogs,
|
||||
ShortHelp: "Watch tailscaled's server logs",
|
||||
FlagSet: (func() *flag.FlagSet {
|
||||
fs := newFlagSet("daemon-logs")
|
||||
fs.IntVar(&daemonLogsArgs.verbose, "verbose", 0, "verbosity level")
|
||||
fs.BoolVar(&daemonLogsArgs.time, "time", false, "include client time")
|
||||
return fs
|
||||
})(),
|
||||
},
|
||||
{
|
||||
Name: "metrics",
|
||||
ShortUsage: "tailscale debug metrics",
|
||||
Exec: runDaemonMetrics,
|
||||
ShortHelp: "Print tailscaled's metrics",
|
||||
FlagSet: (func() *flag.FlagSet {
|
||||
fs := newFlagSet("metrics")
|
||||
fs.BoolVar(&metricsArgs.watch, "watch", false, "print JSON dump of delta values")
|
||||
return fs
|
||||
})(),
|
||||
},
|
||||
{
|
||||
Name: "env",
|
||||
ShortUsage: "tailscale debug env",
|
||||
Exec: runEnv,
|
||||
ShortHelp: "Print cmd/tailscale environment",
|
||||
},
|
||||
{
|
||||
Name: "stat",
|
||||
ShortUsage: "tailscale debug stat <files...>",
|
||||
Exec: runStat,
|
||||
ShortHelp: "Stat a file",
|
||||
},
|
||||
{
|
||||
Name: "hostinfo",
|
||||
ShortUsage: "tailscale debug hostinfo",
|
||||
Exec: runHostinfo,
|
||||
ShortHelp: "Print hostinfo",
|
||||
},
|
||||
{
|
||||
Name: "local-creds",
|
||||
ShortUsage: "tailscale debug local-creds",
|
||||
Exec: runLocalCreds,
|
||||
ShortHelp: "Print how to access Tailscale LocalAPI",
|
||||
},
|
||||
{
|
||||
Name: "restun",
|
||||
ShortUsage: "tailscale debug restun",
|
||||
Exec: localAPIAction("restun"),
|
||||
ShortHelp: "Force a magicsock restun",
|
||||
},
|
||||
{
|
||||
Name: "rebind",
|
||||
ShortUsage: "tailscale debug rebind",
|
||||
Exec: localAPIAction("rebind"),
|
||||
ShortHelp: "Force a magicsock rebind",
|
||||
},
|
||||
{
|
||||
Name: "derp-set-on-demand",
|
||||
ShortUsage: "tailscale debug derp-set-on-demand",
|
||||
Exec: localAPIAction("derp-set-homeless"),
|
||||
ShortHelp: "Enable DERP on-demand mode (breaks reachability)",
|
||||
},
|
||||
{
|
||||
Name: "derp-unset-on-demand",
|
||||
ShortUsage: "tailscale debug derp-unset-on-demand",
|
||||
Exec: localAPIAction("derp-unset-homeless"),
|
||||
ShortHelp: "Disable DERP on-demand mode",
|
||||
},
|
||||
{
|
||||
Name: "break-tcp-conns",
|
||||
ShortUsage: "tailscale debug break-tcp-conns",
|
||||
Exec: localAPIAction("break-tcp-conns"),
|
||||
ShortHelp: "Break any open TCP connections from the daemon",
|
||||
},
|
||||
{
|
||||
Name: "break-derp-conns",
|
||||
ShortUsage: "tailscale debug break-derp-conns",
|
||||
Exec: localAPIAction("break-derp-conns"),
|
||||
ShortHelp: "Break any open DERP connections from the daemon",
|
||||
},
|
||||
{
|
||||
Name: "pick-new-derp",
|
||||
ShortUsage: "tailscale debug pick-new-derp",
|
||||
Exec: localAPIAction("pick-new-derp"),
|
||||
ShortHelp: "Switch to some other random DERP home region for a short time",
|
||||
},
|
||||
{
|
||||
Name: "force-prefer-derp",
|
||||
ShortUsage: "tailscale debug force-prefer-derp",
|
||||
Exec: forcePreferDERP,
|
||||
ShortHelp: "Prefer the given region ID if reachable (until restart, or 0 to clear)",
|
||||
},
|
||||
{
|
||||
Name: "force-netmap-update",
|
||||
ShortUsage: "tailscale debug force-netmap-update",
|
||||
Exec: localAPIAction("force-netmap-update"),
|
||||
ShortHelp: "Force a full no-op netmap update (for load testing)",
|
||||
},
|
||||
{
|
||||
// TODO(bradfitz,maisem): eventually promote this out of debug
|
||||
Name: "reload-config",
|
||||
ShortUsage: "tailscale debug reload-config",
|
||||
Exec: reloadConfig,
|
||||
ShortHelp: "Reload config",
|
||||
},
|
||||
{
|
||||
Name: "control-knobs",
|
||||
ShortUsage: "tailscale debug control-knobs",
|
||||
Exec: debugControlKnobs,
|
||||
ShortHelp: "See current control knobs",
|
||||
},
|
||||
{
|
||||
Name: "prefs",
|
||||
ShortUsage: "tailscale debug prefs",
|
||||
Exec: runPrefs,
|
||||
ShortHelp: "Print prefs",
|
||||
FlagSet: (func() *flag.FlagSet {
|
||||
fs := newFlagSet("prefs")
|
||||
fs.BoolVar(&prefsArgs.pretty, "pretty", false, "If true, pretty-print output")
|
||||
return fs
|
||||
})(),
|
||||
},
|
||||
{
|
||||
Name: "watch-ipn",
|
||||
ShortUsage: "tailscale debug watch-ipn",
|
||||
Exec: runWatchIPN,
|
||||
ShortHelp: "Subscribe to IPN message bus",
|
||||
FlagSet: (func() *flag.FlagSet {
|
||||
fs := newFlagSet("watch-ipn")
|
||||
fs.BoolVar(&watchIPNArgs.netmap, "netmap", true, "include netmap in messages")
|
||||
fs.BoolVar(&watchIPNArgs.initial, "initial", false, "include initial status")
|
||||
fs.BoolVar(&watchIPNArgs.rateLimit, "rate-limit", true, "rate limit messags")
|
||||
fs.BoolVar(&watchIPNArgs.showPrivateKey, "show-private-key", false, "include node private key in printed netmap")
|
||||
fs.IntVar(&watchIPNArgs.count, "count", 0, "exit after printing this many statuses, or 0 to keep going forever")
|
||||
return fs
|
||||
})(),
|
||||
},
|
||||
{
|
||||
Name: "netmap",
|
||||
ShortUsage: "tailscale debug netmap",
|
||||
Exec: runNetmap,
|
||||
ShortHelp: "Print the current network map",
|
||||
FlagSet: (func() *flag.FlagSet {
|
||||
fs := newFlagSet("netmap")
|
||||
fs.BoolVar(&netmapArgs.showPrivateKey, "show-private-key", false, "include node private key in printed netmap")
|
||||
return fs
|
||||
})(),
|
||||
},
|
||||
{
|
||||
Name: "via",
|
||||
ShortUsage: "tailscale debug via <site-id> <v4-cidr>\n" +
|
||||
"tailscale debug via <v6-route>",
|
||||
Exec: runVia,
|
||||
ShortHelp: "Convert between site-specific IPv4 CIDRs and IPv6 'via' routes",
|
||||
},
|
||||
{
|
||||
Name: "ts2021",
|
||||
ShortUsage: "tailscale debug ts2021",
|
||||
Exec: runTS2021,
|
||||
ShortHelp: "Debug ts2021 protocol connectivity",
|
||||
FlagSet: (func() *flag.FlagSet {
|
||||
fs := newFlagSet("ts2021")
|
||||
fs.StringVar(&ts2021Args.host, "host", "controlplane.tailscale.com", "hostname of control plane")
|
||||
fs.IntVar(&ts2021Args.version, "version", int(tailcfg.CurrentCapabilityVersion), "protocol version")
|
||||
fs.BoolVar(&ts2021Args.verbose, "verbose", false, "be extra verbose")
|
||||
return fs
|
||||
})(),
|
||||
},
|
||||
{
|
||||
Name: "set-expire",
|
||||
ShortUsage: "tailscale debug set-expire --in=1m",
|
||||
Exec: runSetExpire,
|
||||
ShortHelp: "Manipulate node key expiry for testing",
|
||||
FlagSet: (func() *flag.FlagSet {
|
||||
fs := newFlagSet("set-expire")
|
||||
fs.DurationVar(&setExpireArgs.in, "in", 0, "if non-zero, set node key to expire this duration from now")
|
||||
return fs
|
||||
})(),
|
||||
},
|
||||
{
|
||||
Name: "dev-store-set",
|
||||
ShortUsage: "tailscale debug dev-store-set",
|
||||
Exec: runDevStoreSet,
|
||||
ShortHelp: "Set a key/value pair during development",
|
||||
FlagSet: (func() *flag.FlagSet {
|
||||
fs := newFlagSet("store-set")
|
||||
fs.BoolVar(&devStoreSetArgs.danger, "danger", false, "accept danger")
|
||||
return fs
|
||||
})(),
|
||||
},
|
||||
{
|
||||
Name: "derp",
|
||||
ShortUsage: "tailscale debug derp",
|
||||
Exec: runDebugDERP,
|
||||
ShortHelp: "Test a DERP configuration",
|
||||
},
|
||||
{
|
||||
Name: "capture",
|
||||
ShortUsage: "tailscale debug capture",
|
||||
Exec: runCapture,
|
||||
ShortHelp: "Stream pcaps for debugging",
|
||||
FlagSet: (func() *flag.FlagSet {
|
||||
fs := newFlagSet("capture")
|
||||
fs.StringVar(&captureArgs.outFile, "o", "", "path to stream the pcap (or - for stdout), leave empty to start wireshark")
|
||||
return fs
|
||||
})(),
|
||||
},
|
||||
{
|
||||
Name: "portmap",
|
||||
ShortUsage: "tailscale debug portmap",
|
||||
Exec: debugPortmap,
|
||||
ShortHelp: "Run portmap debugging",
|
||||
FlagSet: (func() *flag.FlagSet {
|
||||
fs := newFlagSet("portmap")
|
||||
fs.DurationVar(&debugPortmapArgs.duration, "duration", 5*time.Second, "timeout for port mapping")
|
||||
fs.StringVar(&debugPortmapArgs.ty, "type", "", `portmap debug type (one of "", "pmp", "pcp", or "upnp")`)
|
||||
fs.StringVar(&debugPortmapArgs.gatewayAddr, "gateway-addr", "", `override gateway IP (must also pass --self-addr)`)
|
||||
fs.StringVar(&debugPortmapArgs.selfAddr, "self-addr", "", `override self IP (must also pass --gateway-addr)`)
|
||||
fs.BoolVar(&debugPortmapArgs.logHTTP, "log-http", false, `print all HTTP requests and responses to the log`)
|
||||
return fs
|
||||
})(),
|
||||
},
|
||||
{
|
||||
Name: "peer-endpoint-changes",
|
||||
ShortUsage: "tailscale debug peer-endpoint-changes <hostname-or-IP>",
|
||||
Exec: runPeerEndpointChanges,
|
||||
ShortHelp: "Print debug information about a peer's endpoint changes",
|
||||
},
|
||||
{
|
||||
Name: "dial-types",
|
||||
ShortUsage: "tailscale debug dial-types <hostname-or-IP> <port>",
|
||||
Exec: runDebugDialTypes,
|
||||
ShortHelp: "Print debug information about connecting to a given host or IP",
|
||||
FlagSet: (func() *flag.FlagSet {
|
||||
fs := newFlagSet("dial-types")
|
||||
fs.StringVar(&debugDialTypesArgs.network, "network", "tcp", `network type to dial ("tcp", "udp", etc.)`)
|
||||
return fs
|
||||
})(),
|
||||
},
|
||||
{
|
||||
Name: "resolve",
|
||||
ShortUsage: "tailscale debug resolve <hostname>",
|
||||
Exec: runDebugResolve,
|
||||
ShortHelp: "Does a DNS lookup",
|
||||
FlagSet: (func() *flag.FlagSet {
|
||||
fs := newFlagSet("resolve")
|
||||
fs.StringVar(&resolveArgs.net, "net", "ip", "network type to resolve (ip, ip4, ip6)")
|
||||
return fs
|
||||
})(),
|
||||
},
|
||||
{
|
||||
Name: "go-buildinfo",
|
||||
ShortUsage: "tailscale debug go-buildinfo",
|
||||
ShortHelp: "Print Go's runtime/debug.BuildInfo",
|
||||
Exec: runGoBuildInfo,
|
||||
},
|
||||
},
|
||||
var (
|
||||
debugCaptureCmd func() *ffcli.Command // or nil
|
||||
)
|
||||
|
||||
func debugCmd() *ffcli.Command {
|
||||
return &ffcli.Command{
|
||||
Name: "debug",
|
||||
Exec: runDebug,
|
||||
ShortUsage: "tailscale debug <debug-flags | subcommand>",
|
||||
ShortHelp: "Debug commands",
|
||||
LongHelp: hidden + `"tailscale debug" contains misc debug facilities; it is not a stable interface.`,
|
||||
FlagSet: (func() *flag.FlagSet {
|
||||
fs := newFlagSet("debug")
|
||||
fs.StringVar(&debugArgs.file, "file", "", "get, delete:NAME, or NAME")
|
||||
fs.StringVar(&debugArgs.cpuFile, "cpu-profile", "", "if non-empty, grab a CPU profile for --profile-seconds seconds and write it to this file; - for stdout")
|
||||
fs.StringVar(&debugArgs.memFile, "mem-profile", "", "if non-empty, grab a memory profile and write it to this file; - for stdout")
|
||||
fs.IntVar(&debugArgs.cpuSec, "profile-seconds", 15, "number of seconds to run a CPU profile for, when --cpu-profile is non-empty")
|
||||
return fs
|
||||
})(),
|
||||
Subcommands: nonNilCmds([]*ffcli.Command{
|
||||
{
|
||||
Name: "derp-map",
|
||||
ShortUsage: "tailscale debug derp-map",
|
||||
Exec: runDERPMap,
|
||||
ShortHelp: "Print DERP map",
|
||||
},
|
||||
{
|
||||
Name: "component-logs",
|
||||
ShortUsage: "tailscale debug component-logs [" + strings.Join(ipn.DebuggableComponents, "|") + "]",
|
||||
Exec: runDebugComponentLogs,
|
||||
ShortHelp: "Enable/disable debug logs for a component",
|
||||
FlagSet: (func() *flag.FlagSet {
|
||||
fs := newFlagSet("component-logs")
|
||||
fs.DurationVar(&debugComponentLogsArgs.forDur, "for", time.Hour, "how long to enable debug logs for; zero or negative means to disable")
|
||||
return fs
|
||||
})(),
|
||||
},
|
||||
{
|
||||
Name: "daemon-goroutines",
|
||||
ShortUsage: "tailscale debug daemon-goroutines",
|
||||
Exec: runDaemonGoroutines,
|
||||
ShortHelp: "Print tailscaled's goroutines",
|
||||
},
|
||||
{
|
||||
Name: "daemon-logs",
|
||||
ShortUsage: "tailscale debug daemon-logs",
|
||||
Exec: runDaemonLogs,
|
||||
ShortHelp: "Watch tailscaled's server logs",
|
||||
FlagSet: (func() *flag.FlagSet {
|
||||
fs := newFlagSet("daemon-logs")
|
||||
fs.IntVar(&daemonLogsArgs.verbose, "verbose", 0, "verbosity level")
|
||||
fs.BoolVar(&daemonLogsArgs.time, "time", false, "include client time")
|
||||
return fs
|
||||
})(),
|
||||
},
|
||||
{
|
||||
Name: "metrics",
|
||||
ShortUsage: "tailscale debug metrics",
|
||||
Exec: runDaemonMetrics,
|
||||
ShortHelp: "Print tailscaled's metrics",
|
||||
FlagSet: (func() *flag.FlagSet {
|
||||
fs := newFlagSet("metrics")
|
||||
fs.BoolVar(&metricsArgs.watch, "watch", false, "print JSON dump of delta values")
|
||||
return fs
|
||||
})(),
|
||||
},
|
||||
{
|
||||
Name: "env",
|
||||
ShortUsage: "tailscale debug env",
|
||||
Exec: runEnv,
|
||||
ShortHelp: "Print cmd/tailscale environment",
|
||||
},
|
||||
{
|
||||
Name: "stat",
|
||||
ShortUsage: "tailscale debug stat <files...>",
|
||||
Exec: runStat,
|
||||
ShortHelp: "Stat a file",
|
||||
},
|
||||
{
|
||||
Name: "hostinfo",
|
||||
ShortUsage: "tailscale debug hostinfo",
|
||||
Exec: runHostinfo,
|
||||
ShortHelp: "Print hostinfo",
|
||||
},
|
||||
{
|
||||
Name: "local-creds",
|
||||
ShortUsage: "tailscale debug local-creds",
|
||||
Exec: runLocalCreds,
|
||||
ShortHelp: "Print how to access Tailscale LocalAPI",
|
||||
},
|
||||
{
|
||||
Name: "restun",
|
||||
ShortUsage: "tailscale debug restun",
|
||||
Exec: localAPIAction("restun"),
|
||||
ShortHelp: "Force a magicsock restun",
|
||||
},
|
||||
{
|
||||
Name: "rebind",
|
||||
ShortUsage: "tailscale debug rebind",
|
||||
Exec: localAPIAction("rebind"),
|
||||
ShortHelp: "Force a magicsock rebind",
|
||||
},
|
||||
{
|
||||
Name: "derp-set-on-demand",
|
||||
ShortUsage: "tailscale debug derp-set-on-demand",
|
||||
Exec: localAPIAction("derp-set-homeless"),
|
||||
ShortHelp: "Enable DERP on-demand mode (breaks reachability)",
|
||||
},
|
||||
{
|
||||
Name: "derp-unset-on-demand",
|
||||
ShortUsage: "tailscale debug derp-unset-on-demand",
|
||||
Exec: localAPIAction("derp-unset-homeless"),
|
||||
ShortHelp: "Disable DERP on-demand mode",
|
||||
},
|
||||
{
|
||||
Name: "break-tcp-conns",
|
||||
ShortUsage: "tailscale debug break-tcp-conns",
|
||||
Exec: localAPIAction("break-tcp-conns"),
|
||||
ShortHelp: "Break any open TCP connections from the daemon",
|
||||
},
|
||||
{
|
||||
Name: "break-derp-conns",
|
||||
ShortUsage: "tailscale debug break-derp-conns",
|
||||
Exec: localAPIAction("break-derp-conns"),
|
||||
ShortHelp: "Break any open DERP connections from the daemon",
|
||||
},
|
||||
{
|
||||
Name: "pick-new-derp",
|
||||
ShortUsage: "tailscale debug pick-new-derp",
|
||||
Exec: localAPIAction("pick-new-derp"),
|
||||
ShortHelp: "Switch to some other random DERP home region for a short time",
|
||||
},
|
||||
{
|
||||
Name: "force-prefer-derp",
|
||||
ShortUsage: "tailscale debug force-prefer-derp",
|
||||
Exec: forcePreferDERP,
|
||||
ShortHelp: "Prefer the given region ID if reachable (until restart, or 0 to clear)",
|
||||
},
|
||||
{
|
||||
Name: "force-netmap-update",
|
||||
ShortUsage: "tailscale debug force-netmap-update",
|
||||
Exec: localAPIAction("force-netmap-update"),
|
||||
ShortHelp: "Force a full no-op netmap update (for load testing)",
|
||||
},
|
||||
{
|
||||
// TODO(bradfitz,maisem): eventually promote this out of debug
|
||||
Name: "reload-config",
|
||||
ShortUsage: "tailscale debug reload-config",
|
||||
Exec: reloadConfig,
|
||||
ShortHelp: "Reload config",
|
||||
},
|
||||
{
|
||||
Name: "control-knobs",
|
||||
ShortUsage: "tailscale debug control-knobs",
|
||||
Exec: debugControlKnobs,
|
||||
ShortHelp: "See current control knobs",
|
||||
},
|
||||
{
|
||||
Name: "prefs",
|
||||
ShortUsage: "tailscale debug prefs",
|
||||
Exec: runPrefs,
|
||||
ShortHelp: "Print prefs",
|
||||
FlagSet: (func() *flag.FlagSet {
|
||||
fs := newFlagSet("prefs")
|
||||
fs.BoolVar(&prefsArgs.pretty, "pretty", false, "If true, pretty-print output")
|
||||
return fs
|
||||
})(),
|
||||
},
|
||||
{
|
||||
Name: "watch-ipn",
|
||||
ShortUsage: "tailscale debug watch-ipn",
|
||||
Exec: runWatchIPN,
|
||||
ShortHelp: "Subscribe to IPN message bus",
|
||||
FlagSet: (func() *flag.FlagSet {
|
||||
fs := newFlagSet("watch-ipn")
|
||||
fs.BoolVar(&watchIPNArgs.netmap, "netmap", true, "include netmap in messages")
|
||||
fs.BoolVar(&watchIPNArgs.initial, "initial", false, "include initial status")
|
||||
fs.BoolVar(&watchIPNArgs.rateLimit, "rate-limit", true, "rate limit messags")
|
||||
fs.BoolVar(&watchIPNArgs.showPrivateKey, "show-private-key", false, "include node private key in printed netmap")
|
||||
fs.IntVar(&watchIPNArgs.count, "count", 0, "exit after printing this many statuses, or 0 to keep going forever")
|
||||
return fs
|
||||
})(),
|
||||
},
|
||||
{
|
||||
Name: "netmap",
|
||||
ShortUsage: "tailscale debug netmap",
|
||||
Exec: runNetmap,
|
||||
ShortHelp: "Print the current network map",
|
||||
FlagSet: (func() *flag.FlagSet {
|
||||
fs := newFlagSet("netmap")
|
||||
fs.BoolVar(&netmapArgs.showPrivateKey, "show-private-key", false, "include node private key in printed netmap")
|
||||
return fs
|
||||
})(),
|
||||
},
|
||||
{
|
||||
Name: "via",
|
||||
ShortUsage: "tailscale debug via <site-id> <v4-cidr>\n" +
|
||||
"tailscale debug via <v6-route>",
|
||||
Exec: runVia,
|
||||
ShortHelp: "Convert between site-specific IPv4 CIDRs and IPv6 'via' routes",
|
||||
},
|
||||
{
|
||||
Name: "ts2021",
|
||||
ShortUsage: "tailscale debug ts2021",
|
||||
Exec: runTS2021,
|
||||
ShortHelp: "Debug ts2021 protocol connectivity",
|
||||
FlagSet: (func() *flag.FlagSet {
|
||||
fs := newFlagSet("ts2021")
|
||||
fs.StringVar(&ts2021Args.host, "host", "controlplane.tailscale.com", "hostname of control plane")
|
||||
fs.IntVar(&ts2021Args.version, "version", int(tailcfg.CurrentCapabilityVersion), "protocol version")
|
||||
fs.BoolVar(&ts2021Args.verbose, "verbose", false, "be extra verbose")
|
||||
return fs
|
||||
})(),
|
||||
},
|
||||
{
|
||||
Name: "set-expire",
|
||||
ShortUsage: "tailscale debug set-expire --in=1m",
|
||||
Exec: runSetExpire,
|
||||
ShortHelp: "Manipulate node key expiry for testing",
|
||||
FlagSet: (func() *flag.FlagSet {
|
||||
fs := newFlagSet("set-expire")
|
||||
fs.DurationVar(&setExpireArgs.in, "in", 0, "if non-zero, set node key to expire this duration from now")
|
||||
return fs
|
||||
})(),
|
||||
},
|
||||
{
|
||||
Name: "dev-store-set",
|
||||
ShortUsage: "tailscale debug dev-store-set",
|
||||
Exec: runDevStoreSet,
|
||||
ShortHelp: "Set a key/value pair during development",
|
||||
FlagSet: (func() *flag.FlagSet {
|
||||
fs := newFlagSet("store-set")
|
||||
fs.BoolVar(&devStoreSetArgs.danger, "danger", false, "accept danger")
|
||||
return fs
|
||||
})(),
|
||||
},
|
||||
{
|
||||
Name: "derp",
|
||||
ShortUsage: "tailscale debug derp",
|
||||
Exec: runDebugDERP,
|
||||
ShortHelp: "Test a DERP configuration",
|
||||
},
|
||||
ccall(debugCaptureCmd),
|
||||
{
|
||||
Name: "portmap",
|
||||
ShortUsage: "tailscale debug portmap",
|
||||
Exec: debugPortmap,
|
||||
ShortHelp: "Run portmap debugging",
|
||||
FlagSet: (func() *flag.FlagSet {
|
||||
fs := newFlagSet("portmap")
|
||||
fs.DurationVar(&debugPortmapArgs.duration, "duration", 5*time.Second, "timeout for port mapping")
|
||||
fs.StringVar(&debugPortmapArgs.ty, "type", "", `portmap debug type (one of "", "pmp", "pcp", or "upnp")`)
|
||||
fs.StringVar(&debugPortmapArgs.gatewayAddr, "gateway-addr", "", `override gateway IP (must also pass --self-addr)`)
|
||||
fs.StringVar(&debugPortmapArgs.selfAddr, "self-addr", "", `override self IP (must also pass --gateway-addr)`)
|
||||
fs.BoolVar(&debugPortmapArgs.logHTTP, "log-http", false, `print all HTTP requests and responses to the log`)
|
||||
return fs
|
||||
})(),
|
||||
},
|
||||
{
|
||||
Name: "peer-endpoint-changes",
|
||||
ShortUsage: "tailscale debug peer-endpoint-changes <hostname-or-IP>",
|
||||
Exec: runPeerEndpointChanges,
|
||||
ShortHelp: "Print debug information about a peer's endpoint changes",
|
||||
},
|
||||
{
|
||||
Name: "dial-types",
|
||||
ShortUsage: "tailscale debug dial-types <hostname-or-IP> <port>",
|
||||
Exec: runDebugDialTypes,
|
||||
ShortHelp: "Print debug information about connecting to a given host or IP",
|
||||
FlagSet: (func() *flag.FlagSet {
|
||||
fs := newFlagSet("dial-types")
|
||||
fs.StringVar(&debugDialTypesArgs.network, "network", "tcp", `network type to dial ("tcp", "udp", etc.)`)
|
||||
return fs
|
||||
})(),
|
||||
},
|
||||
{
|
||||
Name: "resolve",
|
||||
ShortUsage: "tailscale debug resolve <hostname>",
|
||||
Exec: runDebugResolve,
|
||||
ShortHelp: "Does a DNS lookup",
|
||||
FlagSet: (func() *flag.FlagSet {
|
||||
fs := newFlagSet("resolve")
|
||||
fs.StringVar(&resolveArgs.net, "net", "ip", "network type to resolve (ip, ip4, ip6)")
|
||||
return fs
|
||||
})(),
|
||||
},
|
||||
{
|
||||
Name: "go-buildinfo",
|
||||
ShortUsage: "tailscale debug go-buildinfo",
|
||||
ShortHelp: "Print Go's runtime/debug.BuildInfo",
|
||||
Exec: runGoBuildInfo,
|
||||
},
|
||||
}...),
|
||||
}
|
||||
}
|
||||
|
||||
func runGoBuildInfo(ctx context.Context, args []string) error {
|
||||
@@ -1036,50 +1030,6 @@ func runSetExpire(ctx context.Context, args []string) error {
|
||||
return localClient.DebugSetExpireIn(ctx, setExpireArgs.in)
|
||||
}
|
||||
|
||||
var captureArgs struct {
|
||||
outFile string
|
||||
}
|
||||
|
||||
func runCapture(ctx context.Context, args []string) error {
|
||||
stream, err := localClient.StreamDebugCapture(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer stream.Close()
|
||||
|
||||
switch captureArgs.outFile {
|
||||
case "-":
|
||||
fmt.Fprintln(Stderr, "Press Ctrl-C to stop the capture.")
|
||||
_, err = io.Copy(os.Stdout, stream)
|
||||
return err
|
||||
case "":
|
||||
lua, err := os.CreateTemp("", "ts-dissector")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer os.Remove(lua.Name())
|
||||
lua.Write([]byte(capture.DissectorLua))
|
||||
if err := lua.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
wireshark := exec.CommandContext(ctx, "wireshark", "-X", "lua_script:"+lua.Name(), "-k", "-i", "-")
|
||||
wireshark.Stdin = stream
|
||||
wireshark.Stdout = os.Stdout
|
||||
wireshark.Stderr = os.Stderr
|
||||
return wireshark.Run()
|
||||
}
|
||||
|
||||
f, err := os.OpenFile(captureArgs.outFile, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer f.Close()
|
||||
fmt.Fprintln(Stderr, "Press Ctrl-C to stop the capture.")
|
||||
_, err = io.Copy(f, stream)
|
||||
return err
|
||||
}
|
||||
|
||||
var debugPortmapArgs struct {
|
||||
duration time.Duration
|
||||
gatewayAddr string
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
|
||||
"github.com/peterbourgon/ff/v3/ffcli"
|
||||
"tailscale.com/ipn"
|
||||
"tailscale.com/localclient/tailscale/apitype"
|
||||
)
|
||||
|
||||
var downCmd = &ffcli.Command{
|
||||
@@ -23,10 +24,12 @@ var downCmd = &ffcli.Command{
|
||||
|
||||
var downArgs struct {
|
||||
acceptedRisks string
|
||||
reason string
|
||||
}
|
||||
|
||||
func newDownFlagSet() *flag.FlagSet {
|
||||
downf := newFlagSet("down")
|
||||
downf.StringVar(&downArgs.reason, "reason", "", "reason for the disconnect, if required by a policy")
|
||||
registerAcceptRiskFlag(downf, &downArgs.acceptedRisks)
|
||||
return downf
|
||||
}
|
||||
@@ -50,6 +53,7 @@ func runDown(ctx context.Context, args []string) error {
|
||||
fmt.Fprintf(Stderr, "Tailscale was already stopped.\n")
|
||||
return nil
|
||||
}
|
||||
ctx = apitype.RequestReasonKey.WithValue(ctx, downArgs.reason)
|
||||
_, err = localClient.EditPrefs(ctx, &ipn.MaskedPrefs{
|
||||
Prefs: ipn.Prefs{
|
||||
WantRunning: false,
|
||||
|
||||
@@ -25,9 +25,9 @@ import (
|
||||
"github.com/mattn/go-isatty"
|
||||
"github.com/peterbourgon/ff/v3/ffcli"
|
||||
"golang.org/x/time/rate"
|
||||
"tailscale.com/client/tailscale/apitype"
|
||||
"tailscale.com/cmd/tailscale/cli/ffcomplete"
|
||||
"tailscale.com/envknob"
|
||||
"tailscale.com/localclient/tailscale/apitype"
|
||||
"tailscale.com/net/tsaddr"
|
||||
"tailscale.com/syncs"
|
||||
"tailscale.com/tailcfg"
|
||||
|
||||
@@ -19,7 +19,7 @@ import (
|
||||
var funnelCmd = func() *ffcli.Command {
|
||||
se := &serveEnv{lc: &localClient}
|
||||
// previously used to serve legacy newFunnelCommand unless useWIPCode is true
|
||||
// change is limited to make a revert easier and full cleanup to come after the relase.
|
||||
// change is limited to make a revert easier and full cleanup to come after the release.
|
||||
// TODO(tylersmalley): cleanup and removal of newFunnelCommand as of 2023-10-16
|
||||
return newServeV2Command(se, funnel)
|
||||
}
|
||||
|
||||
@@ -16,9 +16,9 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/peterbourgon/ff/v3/ffcli"
|
||||
"tailscale.com/client/tailscale"
|
||||
"tailscale.com/cmd/tailscale/cli/ffcomplete"
|
||||
"tailscale.com/ipn/ipnstate"
|
||||
"tailscale.com/localclient/tailscale"
|
||||
"tailscale.com/tailcfg"
|
||||
)
|
||||
|
||||
|
||||
@@ -23,9 +23,9 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/peterbourgon/ff/v3/ffcli"
|
||||
"tailscale.com/client/tailscale"
|
||||
"tailscale.com/ipn"
|
||||
"tailscale.com/ipn/ipnstate"
|
||||
"tailscale.com/localclient/tailscale"
|
||||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/util/slicesx"
|
||||
"tailscale.com/version"
|
||||
|
||||
@@ -18,9 +18,9 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/peterbourgon/ff/v3/ffcli"
|
||||
"tailscale.com/client/tailscale"
|
||||
"tailscale.com/ipn"
|
||||
"tailscale.com/ipn/ipnstate"
|
||||
"tailscale.com/localclient/tailscale"
|
||||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/tstest"
|
||||
"tailscale.com/types/logger"
|
||||
|
||||
@@ -23,9 +23,9 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/peterbourgon/ff/v3/ffcli"
|
||||
"tailscale.com/client/tailscale"
|
||||
"tailscale.com/ipn"
|
||||
"tailscale.com/ipn/ipnstate"
|
||||
"tailscale.com/localclient/tailscale"
|
||||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/util/mak"
|
||||
"tailscale.com/util/slicesx"
|
||||
|
||||
@@ -139,7 +139,7 @@ func newUpFlagSet(goos string, upArgs *upArgsT, cmd string) *flag.FlagSet {
|
||||
// Some flags are only for "up", not "login".
|
||||
upf.BoolVar(&upArgs.json, "json", false, "output in JSON format (WARNING: format subject to change)")
|
||||
upf.BoolVar(&upArgs.reset, "reset", false, "reset unspecified settings to their default values")
|
||||
upf.BoolVar(&upArgs.forceReauth, "force-reauth", false, "force reauthentication")
|
||||
upf.BoolVar(&upArgs.forceReauth, "force-reauth", false, "force reauthentication (WARNING: this will bring down the Tailscale connection and thus should not be done remotely over SSH or RDP)")
|
||||
registerAcceptRiskFlag(upf, &upArgs.acceptedRisks)
|
||||
}
|
||||
|
||||
|
||||
@@ -60,7 +60,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep
|
||||
github.com/toqueteos/webbrowser from tailscale.com/cmd/tailscale/cli
|
||||
L github.com/vishvananda/netns from github.com/tailscale/netlink+
|
||||
github.com/x448/float16 from github.com/fxamacker/cbor/v2
|
||||
💣 go4.org/mem from tailscale.com/client/tailscale+
|
||||
💣 go4.org/mem from tailscale.com/control/controlbase+
|
||||
go4.org/netipx from tailscale.com/net/tsaddr
|
||||
W 💣 golang.zx2c4.com/wireguard/windows/tunnel/winipcfg from tailscale.com/net/netmon+
|
||||
k8s.io/client-go/util/homedir from tailscale.com/cmd/tailscale/cli
|
||||
@@ -70,8 +70,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep
|
||||
software.sslmate.com/src/go-pkcs12/internal/rc2 from software.sslmate.com/src/go-pkcs12
|
||||
tailscale.com from tailscale.com/version
|
||||
💣 tailscale.com/atomicfile from tailscale.com/cmd/tailscale/cli+
|
||||
tailscale.com/client/tailscale from tailscale.com/client/web+
|
||||
tailscale.com/client/tailscale/apitype from tailscale.com/client/tailscale+
|
||||
tailscale.com/client/tailscale from tailscale.com/cmd/tailscale/cli
|
||||
tailscale.com/client/web from tailscale.com/cmd/tailscale/cli
|
||||
tailscale.com/clientupdate from tailscale.com/client/web+
|
||||
LW tailscale.com/clientupdate/distsign from tailscale.com/clientupdate
|
||||
@@ -85,32 +84,33 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep
|
||||
tailscale.com/derp from tailscale.com/derp/derphttp
|
||||
tailscale.com/derp/derphttp from tailscale.com/net/netcheck
|
||||
tailscale.com/disco from tailscale.com/derp
|
||||
tailscale.com/drive from tailscale.com/client/tailscale+
|
||||
tailscale.com/envknob from tailscale.com/client/tailscale+
|
||||
tailscale.com/drive from tailscale.com/cmd/tailscale/cli+
|
||||
tailscale.com/envknob from tailscale.com/client/web+
|
||||
tailscale.com/envknob/featureknob from tailscale.com/client/web
|
||||
tailscale.com/feature/capture/dissector from tailscale.com/cmd/tailscale/cli
|
||||
tailscale.com/health from tailscale.com/net/tlsdial+
|
||||
tailscale.com/health/healthmsg from tailscale.com/cmd/tailscale/cli
|
||||
tailscale.com/hostinfo from tailscale.com/client/web+
|
||||
tailscale.com/internal/noiseconn from tailscale.com/cmd/tailscale/cli
|
||||
tailscale.com/ipn from tailscale.com/client/tailscale+
|
||||
tailscale.com/ipn/ipnstate from tailscale.com/client/tailscale+
|
||||
tailscale.com/ipn from tailscale.com/client/web+
|
||||
tailscale.com/ipn/ipnstate from tailscale.com/client/web+
|
||||
tailscale.com/kube/kubetypes from tailscale.com/envknob
|
||||
tailscale.com/licenses from tailscale.com/client/web+
|
||||
tailscale.com/localclient/tailscale from tailscale.com/client/web+
|
||||
tailscale.com/localclient/tailscale/apitype from tailscale.com/client/tailscale+
|
||||
tailscale.com/metrics from tailscale.com/derp+
|
||||
tailscale.com/net/bakedroots from tailscale.com/net/tlsdial
|
||||
tailscale.com/net/captivedetection from tailscale.com/net/netcheck
|
||||
tailscale.com/net/dns/recursive from tailscale.com/net/dnsfallback
|
||||
tailscale.com/net/dnscache from tailscale.com/control/controlhttp+
|
||||
tailscale.com/net/dnsfallback from tailscale.com/control/controlhttp+
|
||||
tailscale.com/net/flowtrack from tailscale.com/net/packet
|
||||
tailscale.com/net/netaddr from tailscale.com/ipn+
|
||||
tailscale.com/net/netcheck from tailscale.com/cmd/tailscale/cli
|
||||
tailscale.com/net/neterror from tailscale.com/net/netcheck+
|
||||
tailscale.com/net/netknob from tailscale.com/net/netns
|
||||
💣 tailscale.com/net/netmon from tailscale.com/cmd/tailscale/cli+
|
||||
💣 tailscale.com/net/netns from tailscale.com/derp/derphttp+
|
||||
tailscale.com/net/netutil from tailscale.com/client/tailscale+
|
||||
tailscale.com/net/packet from tailscale.com/wgengine/capture
|
||||
tailscale.com/net/netutil from tailscale.com/client/web+
|
||||
tailscale.com/net/ping from tailscale.com/net/netcheck
|
||||
tailscale.com/net/portmapper from tailscale.com/cmd/tailscale/cli+
|
||||
tailscale.com/net/sockstats from tailscale.com/control/controlhttp+
|
||||
@@ -120,12 +120,12 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep
|
||||
tailscale.com/net/tlsdial/blockblame from tailscale.com/net/tlsdial
|
||||
tailscale.com/net/tsaddr from tailscale.com/client/web+
|
||||
💣 tailscale.com/net/tshttpproxy from tailscale.com/clientupdate/distsign+
|
||||
tailscale.com/paths from tailscale.com/client/tailscale+
|
||||
💣 tailscale.com/safesocket from tailscale.com/client/tailscale+
|
||||
tailscale.com/paths from tailscale.com/cmd/tailscale/cli+
|
||||
💣 tailscale.com/safesocket from tailscale.com/cmd/tailscale/cli+
|
||||
tailscale.com/syncs from tailscale.com/cmd/tailscale/cli+
|
||||
tailscale.com/tailcfg from tailscale.com/client/tailscale+
|
||||
tailscale.com/tailcfg from tailscale.com/client/web+
|
||||
tailscale.com/tempfork/spf13/cobra from tailscale.com/cmd/tailscale/cli/ffcomplete+
|
||||
tailscale.com/tka from tailscale.com/client/tailscale+
|
||||
tailscale.com/tka from tailscale.com/cmd/tailscale/cli+
|
||||
tailscale.com/tsconst from tailscale.com/net/netmon+
|
||||
tailscale.com/tstime from tailscale.com/control/controlhttp+
|
||||
tailscale.com/tstime/mono from tailscale.com/tstime/rate
|
||||
@@ -133,8 +133,8 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep
|
||||
tailscale.com/tsweb/varz from tailscale.com/util/usermetric
|
||||
tailscale.com/types/dnstype from tailscale.com/tailcfg+
|
||||
tailscale.com/types/empty from tailscale.com/ipn
|
||||
tailscale.com/types/ipproto from tailscale.com/net/flowtrack+
|
||||
tailscale.com/types/key from tailscale.com/client/tailscale+
|
||||
tailscale.com/types/ipproto from tailscale.com/ipn+
|
||||
tailscale.com/types/key from tailscale.com/cmd/tailscale/cli+
|
||||
tailscale.com/types/lazy from tailscale.com/util/testenv+
|
||||
tailscale.com/types/logger from tailscale.com/client/web+
|
||||
tailscale.com/types/netmap from tailscale.com/ipn+
|
||||
@@ -185,7 +185,6 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep
|
||||
W 💣 tailscale.com/util/winutil/winenv from tailscale.com/hostinfo+
|
||||
tailscale.com/version from tailscale.com/client/web+
|
||||
tailscale.com/version/distro from tailscale.com/client/web+
|
||||
tailscale.com/wgengine/capture from tailscale.com/cmd/tailscale/cli
|
||||
tailscale.com/wgengine/filter/filtertype from tailscale.com/types/netmap
|
||||
golang.org/x/crypto/argon2 from tailscale.com/tka
|
||||
golang.org/x/crypto/blake2b from golang.org/x/crypto/argon2+
|
||||
@@ -196,6 +195,8 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep
|
||||
golang.org/x/crypto/cryptobyte/asn1 from crypto/ecdsa+
|
||||
golang.org/x/crypto/curve25519 from golang.org/x/crypto/nacl/box+
|
||||
golang.org/x/crypto/hkdf from crypto/tls+
|
||||
golang.org/x/crypto/internal/alias from golang.org/x/crypto/chacha20+
|
||||
golang.org/x/crypto/internal/poly1305 from golang.org/x/crypto/chacha20poly1305+
|
||||
golang.org/x/crypto/nacl/box from tailscale.com/types/key
|
||||
golang.org/x/crypto/nacl/secretbox from golang.org/x/crypto/nacl/box
|
||||
golang.org/x/crypto/pbkdf2 from software.sslmate.com/src/go-pkcs12
|
||||
@@ -211,6 +212,9 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep
|
||||
golang.org/x/net/http2/hpack from net/http+
|
||||
golang.org/x/net/icmp from tailscale.com/net/ping
|
||||
golang.org/x/net/idna from golang.org/x/net/http/httpguts+
|
||||
golang.org/x/net/internal/iana from golang.org/x/net/icmp+
|
||||
golang.org/x/net/internal/socket from golang.org/x/net/icmp+
|
||||
golang.org/x/net/internal/socks from golang.org/x/net/proxy
|
||||
golang.org/x/net/ipv4 from github.com/miekg/dns+
|
||||
golang.org/x/net/ipv6 from github.com/miekg/dns+
|
||||
golang.org/x/net/proxy from tailscale.com/net/netns
|
||||
@@ -249,6 +253,18 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep
|
||||
crypto/ed25519 from crypto/tls+
|
||||
crypto/elliptic from crypto/ecdsa+
|
||||
crypto/hmac from crypto/tls+
|
||||
crypto/internal/alias from crypto/aes+
|
||||
crypto/internal/bigmod from crypto/ecdsa+
|
||||
crypto/internal/boring from crypto/aes+
|
||||
crypto/internal/boring/bbig from crypto/ecdsa+
|
||||
crypto/internal/boring/sig from crypto/internal/boring
|
||||
crypto/internal/edwards25519 from crypto/ed25519
|
||||
crypto/internal/edwards25519/field from crypto/ecdh+
|
||||
crypto/internal/hpke from crypto/tls
|
||||
crypto/internal/mlkem768 from crypto/tls
|
||||
crypto/internal/nistec from crypto/ecdh+
|
||||
crypto/internal/nistec/fiat from crypto/internal/nistec
|
||||
crypto/internal/randutil from crypto/dsa+
|
||||
crypto/md5 from crypto/tls+
|
||||
crypto/rand from crypto/ed25519+
|
||||
crypto/rc4 from crypto/tls
|
||||
@@ -259,6 +275,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep
|
||||
crypto/subtle from crypto/aes+
|
||||
crypto/tls from github.com/miekg/dns+
|
||||
crypto/x509 from crypto/tls+
|
||||
D crypto/x509/internal/macos from crypto/x509
|
||||
crypto/x509/pkix from crypto/x509+
|
||||
DW database/sql/driver from github.com/google/uuid
|
||||
W debug/dwarf from debug/pe
|
||||
@@ -287,6 +304,44 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep
|
||||
image from github.com/skip2/go-qrcode+
|
||||
image/color from github.com/skip2/go-qrcode+
|
||||
image/png from github.com/skip2/go-qrcode
|
||||
internal/abi from crypto/x509/internal/macos+
|
||||
internal/asan from syscall
|
||||
internal/bisect from internal/godebug
|
||||
internal/bytealg from bytes+
|
||||
internal/byteorder from crypto/aes+
|
||||
internal/chacha8rand from math/rand/v2+
|
||||
internal/concurrent from unique
|
||||
internal/coverage/rtcov from runtime
|
||||
internal/cpu from crypto/aes+
|
||||
internal/filepathlite from os+
|
||||
internal/fmtsort from fmt+
|
||||
internal/goarch from crypto/aes+
|
||||
internal/godebug from archive/tar+
|
||||
internal/godebugs from internal/godebug+
|
||||
internal/goexperiment from runtime
|
||||
internal/goos from crypto/x509+
|
||||
internal/itoa from internal/poll+
|
||||
internal/msan from syscall
|
||||
internal/nettrace from net+
|
||||
internal/oserror from io/fs+
|
||||
internal/poll from net+
|
||||
internal/profilerecord from runtime
|
||||
internal/race from internal/poll+
|
||||
internal/reflectlite from context+
|
||||
internal/runtime/atomic from internal/runtime/exithook+
|
||||
internal/runtime/exithook from runtime
|
||||
L internal/runtime/syscall from runtime+
|
||||
internal/saferio from debug/pe+
|
||||
internal/singleflight from net
|
||||
internal/stringslite from embed+
|
||||
internal/syscall/execenv from os+
|
||||
LD internal/syscall/unix from crypto/rand+
|
||||
W internal/syscall/windows from crypto/rand+
|
||||
W internal/syscall/windows/registry from mime+
|
||||
W internal/syscall/windows/sysdll from internal/syscall/windows+
|
||||
internal/testlog from os
|
||||
internal/unsafeheader from internal/reflectlite+
|
||||
internal/weak from unique
|
||||
io from archive/tar+
|
||||
io/fs from archive/tar+
|
||||
io/ioutil from github.com/mitchellh/go-ps+
|
||||
@@ -308,6 +363,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep
|
||||
net/http/httptrace from golang.org/x/net/http2+
|
||||
net/http/httputil from tailscale.com/client/web+
|
||||
net/http/internal from net/http+
|
||||
net/http/internal/ascii from net/http+
|
||||
net/netip from go4.org/netipx+
|
||||
net/textproto from golang.org/x/net/http/httpguts+
|
||||
net/url from crypto/x509+
|
||||
@@ -320,7 +376,10 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep
|
||||
reflect from archive/tar+
|
||||
regexp from github.com/coreos/go-iptables/iptables+
|
||||
regexp/syntax from regexp
|
||||
runtime from archive/tar+
|
||||
runtime/debug from tailscale.com+
|
||||
runtime/internal/math from runtime
|
||||
runtime/internal/sys from runtime
|
||||
slices from tailscale.com/client/web+
|
||||
sort from compress/flate+
|
||||
strconv from archive/tar+
|
||||
@@ -336,3 +395,4 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep
|
||||
unicode/utf16 from crypto/x509+
|
||||
unicode/utf8 from bufio+
|
||||
unique from net/netip
|
||||
unsafe from bytes+
|
||||
|
||||
@@ -152,10 +152,6 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de
|
||||
W 💣 github.com/tailscale/go-winio/internal/socket from github.com/tailscale/go-winio
|
||||
W github.com/tailscale/go-winio/internal/stringbuffer from github.com/tailscale/go-winio/internal/fs
|
||||
W github.com/tailscale/go-winio/pkg/guid from github.com/tailscale/go-winio+
|
||||
github.com/tailscale/golang-x-crypto/acme from tailscale.com/ipn/ipnlocal
|
||||
LD github.com/tailscale/golang-x-crypto/internal/poly1305 from github.com/tailscale/golang-x-crypto/ssh
|
||||
LD github.com/tailscale/golang-x-crypto/ssh from tailscale.com/ipn/ipnlocal+
|
||||
LD github.com/tailscale/golang-x-crypto/ssh/internal/bcrypt_pbkdf from github.com/tailscale/golang-x-crypto/ssh
|
||||
github.com/tailscale/goupnp from github.com/tailscale/goupnp/dcps/internetgateway2+
|
||||
github.com/tailscale/goupnp/dcps/internetgateway2 from tailscale.com/net/portmapper
|
||||
github.com/tailscale/goupnp/httpu from github.com/tailscale/goupnp+
|
||||
@@ -185,7 +181,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de
|
||||
L github.com/u-root/uio/uio from github.com/insomniacslk/dhcp/dhcpv4+
|
||||
L github.com/vishvananda/netns from github.com/tailscale/netlink+
|
||||
github.com/x448/float16 from github.com/fxamacker/cbor/v2
|
||||
💣 go4.org/mem from tailscale.com/client/tailscale+
|
||||
💣 go4.org/mem from tailscale.com/control/controlbase+
|
||||
go4.org/netipx from github.com/tailscale/wf+
|
||||
W 💣 golang.zx2c4.com/wintun from github.com/tailscale/wireguard-go/tun+
|
||||
W 💣 golang.zx2c4.com/wireguard/windows/tunnel/winipcfg from tailscale.com/cmd/tailscaled+
|
||||
@@ -234,8 +230,6 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de
|
||||
tailscale.com/appc from tailscale.com/ipn/ipnlocal
|
||||
💣 tailscale.com/atomicfile from tailscale.com/ipn+
|
||||
LD tailscale.com/chirp from tailscale.com/cmd/tailscaled
|
||||
tailscale.com/client/tailscale from tailscale.com/client/web+
|
||||
tailscale.com/client/tailscale/apitype from tailscale.com/client/tailscale+
|
||||
tailscale.com/client/web from tailscale.com/ipn/ipnlocal
|
||||
tailscale.com/clientupdate from tailscale.com/client/web+
|
||||
LW tailscale.com/clientupdate/distsign from tailscale.com/clientupdate
|
||||
@@ -252,14 +246,15 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de
|
||||
tailscale.com/doctor/ethtool from tailscale.com/ipn/ipnlocal
|
||||
💣 tailscale.com/doctor/permissions from tailscale.com/ipn/ipnlocal
|
||||
tailscale.com/doctor/routetable from tailscale.com/ipn/ipnlocal
|
||||
tailscale.com/drive from tailscale.com/client/tailscale+
|
||||
tailscale.com/drive from tailscale.com/drive/driveimpl+
|
||||
tailscale.com/drive/driveimpl from tailscale.com/cmd/tailscaled
|
||||
tailscale.com/drive/driveimpl/compositedav from tailscale.com/drive/driveimpl
|
||||
tailscale.com/drive/driveimpl/dirfs from tailscale.com/drive/driveimpl+
|
||||
tailscale.com/drive/driveimpl/shared from tailscale.com/drive/driveimpl+
|
||||
tailscale.com/envknob from tailscale.com/client/tailscale+
|
||||
tailscale.com/envknob from tailscale.com/client/web+
|
||||
tailscale.com/envknob/featureknob from tailscale.com/client/web+
|
||||
tailscale.com/feature from tailscale.com/feature/wakeonlan+
|
||||
tailscale.com/feature/capture from tailscale.com/feature/condregister
|
||||
tailscale.com/feature/condregister from tailscale.com/cmd/tailscaled
|
||||
L tailscale.com/feature/tap from tailscale.com/feature/condregister
|
||||
tailscale.com/feature/wakeonlan from tailscale.com/feature/condregister
|
||||
@@ -267,13 +262,13 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de
|
||||
tailscale.com/health/healthmsg from tailscale.com/ipn/ipnlocal
|
||||
tailscale.com/hostinfo from tailscale.com/client/web+
|
||||
tailscale.com/internal/noiseconn from tailscale.com/control/controlclient
|
||||
tailscale.com/ipn from tailscale.com/client/tailscale+
|
||||
tailscale.com/ipn from tailscale.com/client/web+
|
||||
tailscale.com/ipn/conffile from tailscale.com/cmd/tailscaled+
|
||||
💣 tailscale.com/ipn/ipnauth from tailscale.com/ipn/ipnlocal+
|
||||
tailscale.com/ipn/ipnlocal from tailscale.com/cmd/tailscaled+
|
||||
tailscale.com/ipn/ipnserver from tailscale.com/cmd/tailscaled
|
||||
tailscale.com/ipn/ipnstate from tailscale.com/client/tailscale+
|
||||
tailscale.com/ipn/localapi from tailscale.com/ipn/ipnserver
|
||||
tailscale.com/ipn/ipnstate from tailscale.com/client/web+
|
||||
tailscale.com/ipn/localapi from tailscale.com/ipn/ipnserver+
|
||||
tailscale.com/ipn/policy from tailscale.com/ipn/ipnlocal
|
||||
tailscale.com/ipn/store from tailscale.com/cmd/tailscaled+
|
||||
L tailscale.com/ipn/store/awsstore from tailscale.com/ipn/store
|
||||
@@ -283,6 +278,8 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de
|
||||
L tailscale.com/kube/kubeclient from tailscale.com/ipn/store/kubestore
|
||||
tailscale.com/kube/kubetypes from tailscale.com/envknob
|
||||
tailscale.com/licenses from tailscale.com/client/web
|
||||
tailscale.com/localclient/tailscale from tailscale.com/client/web+
|
||||
tailscale.com/localclient/tailscale/apitype from tailscale.com/client/web+
|
||||
tailscale.com/log/filelogger from tailscale.com/logpolicy
|
||||
tailscale.com/log/sockstatlog from tailscale.com/ipn/ipnlocal
|
||||
tailscale.com/logpolicy from tailscale.com/cmd/tailscaled+
|
||||
@@ -310,7 +307,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de
|
||||
💣 tailscale.com/net/netmon from tailscale.com/cmd/tailscaled+
|
||||
💣 tailscale.com/net/netns from tailscale.com/cmd/tailscaled+
|
||||
W 💣 tailscale.com/net/netstat from tailscale.com/portlist
|
||||
tailscale.com/net/netutil from tailscale.com/client/tailscale+
|
||||
tailscale.com/net/netutil from tailscale.com/client/web+
|
||||
tailscale.com/net/packet from tailscale.com/net/connstats+
|
||||
tailscale.com/net/packet/checksum from tailscale.com/net/tstun
|
||||
tailscale.com/net/ping from tailscale.com/net/netcheck+
|
||||
@@ -328,19 +325,21 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de
|
||||
💣 tailscale.com/net/tshttpproxy from tailscale.com/clientupdate/distsign+
|
||||
tailscale.com/net/tstun from tailscale.com/cmd/tailscaled+
|
||||
tailscale.com/omit from tailscale.com/ipn/conffile
|
||||
tailscale.com/paths from tailscale.com/client/tailscale+
|
||||
tailscale.com/paths from tailscale.com/cmd/tailscaled+
|
||||
💣 tailscale.com/portlist from tailscale.com/ipn/ipnlocal
|
||||
tailscale.com/posture from tailscale.com/ipn/ipnlocal
|
||||
tailscale.com/proxymap from tailscale.com/tsd+
|
||||
💣 tailscale.com/safesocket from tailscale.com/client/tailscale+
|
||||
💣 tailscale.com/safesocket from tailscale.com/cmd/tailscaled+
|
||||
LD tailscale.com/sessionrecording from tailscale.com/ssh/tailssh
|
||||
LD 💣 tailscale.com/ssh/tailssh from tailscale.com/cmd/tailscaled
|
||||
tailscale.com/syncs from tailscale.com/cmd/tailscaled+
|
||||
tailscale.com/tailcfg from tailscale.com/client/tailscale+
|
||||
tailscale.com/tailcfg from tailscale.com/client/web+
|
||||
tailscale.com/taildrop from tailscale.com/ipn/ipnlocal+
|
||||
tailscale.com/tempfork/acme from tailscale.com/ipn/ipnlocal
|
||||
LD tailscale.com/tempfork/gliderlabs/ssh from tailscale.com/ssh/tailssh
|
||||
tailscale.com/tempfork/heap from tailscale.com/wgengine/magicsock
|
||||
tailscale.com/tka from tailscale.com/client/tailscale+
|
||||
tailscale.com/tempfork/httprec from tailscale.com/control/controlclient
|
||||
tailscale.com/tka from tailscale.com/control/controlclient+
|
||||
tailscale.com/tsconst from tailscale.com/net/netmon+
|
||||
tailscale.com/tsd from tailscale.com/cmd/tailscaled+
|
||||
tailscale.com/tstime from tailscale.com/control/controlclient+
|
||||
@@ -352,14 +351,14 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de
|
||||
tailscale.com/types/empty from tailscale.com/ipn+
|
||||
tailscale.com/types/flagtype from tailscale.com/cmd/tailscaled
|
||||
tailscale.com/types/ipproto from tailscale.com/net/flowtrack+
|
||||
tailscale.com/types/key from tailscale.com/client/tailscale+
|
||||
tailscale.com/types/key from tailscale.com/cmd/tailscaled+
|
||||
tailscale.com/types/lazy from tailscale.com/ipn/ipnlocal+
|
||||
tailscale.com/types/logger from tailscale.com/appc+
|
||||
tailscale.com/types/logid from tailscale.com/cmd/tailscaled+
|
||||
tailscale.com/types/netlogtype from tailscale.com/net/connstats+
|
||||
tailscale.com/types/netmap from tailscale.com/control/controlclient+
|
||||
tailscale.com/types/nettype from tailscale.com/ipn/localapi+
|
||||
tailscale.com/types/opt from tailscale.com/client/tailscale+
|
||||
tailscale.com/types/opt from tailscale.com/control/controlknobs+
|
||||
tailscale.com/types/persist from tailscale.com/control/controlclient+
|
||||
tailscale.com/types/preftype from tailscale.com/ipn+
|
||||
tailscale.com/types/ptr from tailscale.com/control/controlclient+
|
||||
@@ -380,7 +379,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de
|
||||
tailscale.com/util/groupmember from tailscale.com/client/web+
|
||||
💣 tailscale.com/util/hashx from tailscale.com/util/deephash
|
||||
tailscale.com/util/httphdr from tailscale.com/ipn/ipnlocal+
|
||||
tailscale.com/util/httpm from tailscale.com/client/tailscale+
|
||||
tailscale.com/util/httpm from tailscale.com/client/web+
|
||||
tailscale.com/util/lineiter from tailscale.com/hostinfo+
|
||||
L tailscale.com/util/linuxfw from tailscale.com/net/netns+
|
||||
tailscale.com/util/mak from tailscale.com/control/controlclient+
|
||||
@@ -422,7 +421,6 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de
|
||||
tailscale.com/version/distro from tailscale.com/client/web+
|
||||
W tailscale.com/wf from tailscale.com/cmd/tailscaled
|
||||
tailscale.com/wgengine from tailscale.com/cmd/tailscaled+
|
||||
tailscale.com/wgengine/capture from tailscale.com/ipn/ipnlocal+
|
||||
tailscale.com/wgengine/filter from tailscale.com/control/controlclient+
|
||||
tailscale.com/wgengine/filter/filtertype from tailscale.com/types/netmap+
|
||||
💣 tailscale.com/wgengine/magicsock from tailscale.com/ipn/ipnlocal+
|
||||
@@ -438,19 +436,22 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de
|
||||
golang.org/x/crypto/argon2 from tailscale.com/tka
|
||||
golang.org/x/crypto/blake2b from golang.org/x/crypto/argon2+
|
||||
golang.org/x/crypto/blake2s from github.com/tailscale/wireguard-go/device+
|
||||
LD golang.org/x/crypto/blowfish from github.com/tailscale/golang-x-crypto/ssh/internal/bcrypt_pbkdf+
|
||||
LD golang.org/x/crypto/blowfish from golang.org/x/crypto/ssh/internal/bcrypt_pbkdf
|
||||
golang.org/x/crypto/chacha20 from golang.org/x/crypto/chacha20poly1305+
|
||||
golang.org/x/crypto/chacha20poly1305 from crypto/tls+
|
||||
golang.org/x/crypto/cryptobyte from crypto/ecdsa+
|
||||
golang.org/x/crypto/cryptobyte/asn1 from crypto/ecdsa+
|
||||
golang.org/x/crypto/curve25519 from github.com/tailscale/golang-x-crypto/ssh+
|
||||
golang.org/x/crypto/curve25519 from golang.org/x/crypto/ssh+
|
||||
golang.org/x/crypto/hkdf from crypto/tls+
|
||||
golang.org/x/crypto/internal/alias from golang.org/x/crypto/chacha20+
|
||||
golang.org/x/crypto/internal/poly1305 from golang.org/x/crypto/chacha20poly1305+
|
||||
golang.org/x/crypto/nacl/box from tailscale.com/types/key
|
||||
golang.org/x/crypto/nacl/secretbox from golang.org/x/crypto/nacl/box
|
||||
golang.org/x/crypto/poly1305 from github.com/tailscale/wireguard-go/device
|
||||
golang.org/x/crypto/salsa20/salsa from golang.org/x/crypto/nacl/box+
|
||||
golang.org/x/crypto/sha3 from crypto/internal/mlkem768+
|
||||
LD golang.org/x/crypto/ssh from github.com/pkg/sftp+
|
||||
LD golang.org/x/crypto/ssh/internal/bcrypt_pbkdf from golang.org/x/crypto/ssh
|
||||
golang.org/x/exp/constraints from github.com/dblohm7/wingoes/pe+
|
||||
golang.org/x/exp/maps from tailscale.com/ipn/store/mem+
|
||||
golang.org/x/net/bpf from github.com/mdlayher/genetlink+
|
||||
@@ -462,6 +463,9 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de
|
||||
golang.org/x/net/http2/hpack from golang.org/x/net/http2+
|
||||
golang.org/x/net/icmp from tailscale.com/net/ping+
|
||||
golang.org/x/net/idna from golang.org/x/net/http/httpguts+
|
||||
golang.org/x/net/internal/iana from golang.org/x/net/icmp+
|
||||
golang.org/x/net/internal/socket from golang.org/x/net/icmp+
|
||||
golang.org/x/net/internal/socks from golang.org/x/net/proxy
|
||||
golang.org/x/net/ipv4 from github.com/miekg/dns+
|
||||
golang.org/x/net/ipv6 from github.com/miekg/dns+
|
||||
golang.org/x/net/proxy from tailscale.com/net/netns
|
||||
@@ -501,6 +505,18 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de
|
||||
crypto/ed25519 from crypto/tls+
|
||||
crypto/elliptic from crypto/ecdsa+
|
||||
crypto/hmac from crypto/tls+
|
||||
crypto/internal/alias from crypto/aes+
|
||||
crypto/internal/bigmod from crypto/ecdsa+
|
||||
crypto/internal/boring from crypto/aes+
|
||||
crypto/internal/boring/bbig from crypto/ecdsa+
|
||||
crypto/internal/boring/sig from crypto/internal/boring
|
||||
crypto/internal/edwards25519 from crypto/ed25519
|
||||
crypto/internal/edwards25519/field from crypto/ecdh+
|
||||
crypto/internal/hpke from crypto/tls
|
||||
crypto/internal/mlkem768 from crypto/tls
|
||||
crypto/internal/nistec from crypto/ecdh+
|
||||
crypto/internal/nistec/fiat from crypto/internal/nistec
|
||||
crypto/internal/randutil from crypto/dsa+
|
||||
crypto/md5 from crypto/tls+
|
||||
crypto/rand from crypto/ed25519+
|
||||
crypto/rc4 from crypto/tls+
|
||||
@@ -511,6 +527,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de
|
||||
crypto/subtle from crypto/aes+
|
||||
crypto/tls from github.com/aws/aws-sdk-go-v2/aws/transport/http+
|
||||
crypto/x509 from crypto/tls+
|
||||
D crypto/x509/internal/macos from crypto/x509
|
||||
crypto/x509/pkix from crypto/x509+
|
||||
DW database/sql/driver from github.com/google/uuid
|
||||
W debug/dwarf from debug/pe
|
||||
@@ -528,7 +545,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de
|
||||
encoding/xml from github.com/aws/aws-sdk-go-v2/aws/protocol/xml+
|
||||
errors from archive/tar+
|
||||
expvar from tailscale.com/derp+
|
||||
flag from net/http/httptest+
|
||||
flag from tailscale.com/cmd/tailscaled+
|
||||
fmt from archive/tar+
|
||||
hash from compress/zlib+
|
||||
hash/adler32 from compress/zlib+
|
||||
@@ -536,6 +553,45 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de
|
||||
hash/maphash from go4.org/mem
|
||||
html from html/template+
|
||||
html/template from github.com/gorilla/csrf
|
||||
internal/abi from crypto/x509/internal/macos+
|
||||
internal/asan from syscall
|
||||
internal/bisect from internal/godebug
|
||||
internal/bytealg from bytes+
|
||||
internal/byteorder from crypto/aes+
|
||||
internal/chacha8rand from math/rand/v2+
|
||||
internal/concurrent from unique
|
||||
internal/coverage/rtcov from runtime
|
||||
internal/cpu from crypto/aes+
|
||||
internal/filepathlite from os+
|
||||
internal/fmtsort from fmt+
|
||||
internal/goarch from crypto/aes+
|
||||
internal/godebug from archive/tar+
|
||||
internal/godebugs from internal/godebug+
|
||||
internal/goexperiment from runtime
|
||||
internal/goos from crypto/x509+
|
||||
internal/itoa from internal/poll+
|
||||
internal/msan from syscall
|
||||
internal/nettrace from net+
|
||||
internal/oserror from io/fs+
|
||||
internal/poll from net+
|
||||
internal/profile from net/http/pprof
|
||||
internal/profilerecord from runtime+
|
||||
internal/race from internal/poll+
|
||||
internal/reflectlite from context+
|
||||
internal/runtime/atomic from internal/runtime/exithook+
|
||||
internal/runtime/exithook from runtime
|
||||
L internal/runtime/syscall from runtime+
|
||||
internal/saferio from debug/pe+
|
||||
internal/singleflight from net
|
||||
internal/stringslite from embed+
|
||||
internal/syscall/execenv from os+
|
||||
LD internal/syscall/unix from crypto/rand+
|
||||
W internal/syscall/windows from crypto/rand+
|
||||
W internal/syscall/windows/registry from mime+
|
||||
W internal/syscall/windows/sysdll from internal/syscall/windows+
|
||||
internal/testlog from os
|
||||
internal/unsafeheader from internal/reflectlite+
|
||||
internal/weak from unique
|
||||
io from archive/tar+
|
||||
io/fs from archive/tar+
|
||||
io/ioutil from github.com/aws/aws-sdk-go-v2/aws/protocol/query+
|
||||
@@ -554,10 +610,10 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de
|
||||
mime/quotedprintable from mime/multipart
|
||||
net from crypto/tls+
|
||||
net/http from expvar+
|
||||
net/http/httptest from tailscale.com/control/controlclient
|
||||
net/http/httptrace from github.com/prometheus-community/pro-bing+
|
||||
net/http/httputil from github.com/aws/smithy-go/transport/http+
|
||||
net/http/internal from net/http+
|
||||
net/http/internal/ascii from net/http+
|
||||
net/http/pprof from tailscale.com/cmd/tailscaled+
|
||||
net/netip from github.com/tailscale/wireguard-go/conn+
|
||||
net/textproto from github.com/aws/aws-sdk-go-v2/aws/signer/v4+
|
||||
@@ -571,7 +627,10 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de
|
||||
reflect from archive/tar+
|
||||
regexp from github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn+
|
||||
regexp/syntax from regexp
|
||||
runtime from archive/tar+
|
||||
runtime/debug from github.com/aws/aws-sdk-go-v2/internal/sync/singleflight+
|
||||
runtime/internal/math from runtime
|
||||
runtime/internal/sys from runtime
|
||||
runtime/pprof from net/http/pprof+
|
||||
runtime/trace from net/http/pprof
|
||||
slices from tailscale.com/appc+
|
||||
@@ -589,3 +648,4 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de
|
||||
unicode/utf16 from crypto/x509+
|
||||
unicode/utf8 from bufio+
|
||||
unique from net/netip
|
||||
unsafe from bytes+
|
||||
|
||||
@@ -17,7 +17,6 @@ func TestOmitSSH(t *testing.T) {
|
||||
Tags: "ts_omit_ssh",
|
||||
BadDeps: map[string]string{
|
||||
"tailscale.com/ssh/tailssh": msg,
|
||||
"golang.org/x/crypto/ssh": msg,
|
||||
"tailscale.com/sessionrecording": msg,
|
||||
"github.com/anmitsu/go-shlex": msg,
|
||||
"github.com/creack/pty": msg,
|
||||
|
||||
@@ -30,7 +30,6 @@ import (
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"tailscale.com/client/tailscale"
|
||||
"tailscale.com/cmd/tailscaled/childproc"
|
||||
"tailscale.com/control/controlclient"
|
||||
"tailscale.com/drive/driveimpl"
|
||||
@@ -42,6 +41,7 @@ import (
|
||||
"tailscale.com/ipn/ipnlocal"
|
||||
"tailscale.com/ipn/ipnserver"
|
||||
"tailscale.com/ipn/store"
|
||||
"tailscale.com/localclient/tailscale"
|
||||
"tailscale.com/logpolicy"
|
||||
"tailscale.com/logtail"
|
||||
"tailscale.com/net/dns"
|
||||
|
||||
@@ -22,6 +22,8 @@ func TestDeps(t *testing.T) {
|
||||
BadDeps: map[string]string{
|
||||
"testing": "do not use testing package in production code",
|
||||
"gvisor.dev/gvisor/pkg/hostarch": "will crash on non-4K page sizes; see https://github.com/tailscale/tailscale/issues/8658",
|
||||
"net/http/httptest": "do not use httptest in production code",
|
||||
"net/http/internal/testcert": "do not use httptest in production code",
|
||||
},
|
||||
}.Check(t)
|
||||
|
||||
|
||||
@@ -22,8 +22,8 @@ import (
|
||||
"log"
|
||||
"time"
|
||||
|
||||
"tailscale.com/client/tailscale"
|
||||
"tailscale.com/ipn/ipnstate"
|
||||
"tailscale.com/localclient/tailscale"
|
||||
"tailscale.com/tka"
|
||||
"tailscale.com/types/key"
|
||||
)
|
||||
|
||||
@@ -35,11 +35,11 @@ import (
|
||||
|
||||
"gopkg.in/square/go-jose.v2"
|
||||
"gopkg.in/square/go-jose.v2/jwt"
|
||||
"tailscale.com/client/tailscale"
|
||||
"tailscale.com/client/tailscale/apitype"
|
||||
"tailscale.com/envknob"
|
||||
"tailscale.com/ipn"
|
||||
"tailscale.com/ipn/ipnstate"
|
||||
"tailscale.com/localclient/tailscale"
|
||||
"tailscale.com/localclient/tailscale/apitype"
|
||||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/tsnet"
|
||||
"tailscale.com/types/key"
|
||||
|
||||
@@ -30,8 +30,8 @@ import (
|
||||
"time"
|
||||
|
||||
"tailscale.com/atomicfile"
|
||||
"tailscale.com/client/tailscale"
|
||||
"tailscale.com/hostinfo"
|
||||
"tailscale.com/localclient/tailscale"
|
||||
"tailscale.com/util/mak"
|
||||
"tailscale.com/util/must"
|
||||
"tailscale.com/util/set"
|
||||
|
||||
@@ -21,6 +21,7 @@ import (
|
||||
"tailscale.com/types/netmap"
|
||||
"tailscale.com/types/persist"
|
||||
"tailscale.com/types/structs"
|
||||
"tailscale.com/util/clientmetric"
|
||||
"tailscale.com/util/execqueue"
|
||||
)
|
||||
|
||||
@@ -131,6 +132,8 @@ type Auto struct {
|
||||
// the server.
|
||||
lastUpdateGen updateGen
|
||||
|
||||
lastStatus atomic.Pointer[Status]
|
||||
|
||||
paused bool // whether we should stop making HTTP requests
|
||||
unpauseWaiters []chan bool // chans that gets sent true (once) on wake, or false on Shutdown
|
||||
loggedIn bool // true if currently logged in
|
||||
@@ -596,21 +599,90 @@ func (c *Auto) sendStatus(who string, err error, url string, nm *netmap.NetworkM
|
||||
// not logged in.
|
||||
nm = nil
|
||||
}
|
||||
new := Status{
|
||||
newSt := &Status{
|
||||
URL: url,
|
||||
Persist: p,
|
||||
NetMap: nm,
|
||||
Err: err,
|
||||
state: state,
|
||||
}
|
||||
c.lastStatus.Store(newSt)
|
||||
|
||||
// Launch a new goroutine to avoid blocking the caller while the observer
|
||||
// does its thing, which may result in a call back into the client.
|
||||
metricQueued.Add(1)
|
||||
c.observerQueue.Add(func() {
|
||||
c.observer.SetControlClientStatus(c, new)
|
||||
if canSkipStatus(newSt, c.lastStatus.Load()) {
|
||||
metricSkippable.Add(1)
|
||||
if !c.direct.controlKnobs.DisableSkipStatusQueue.Load() {
|
||||
metricSkipped.Add(1)
|
||||
return
|
||||
}
|
||||
}
|
||||
c.observer.SetControlClientStatus(c, *newSt)
|
||||
|
||||
// Best effort stop retaining the memory now that we've sent it to the
|
||||
// observer (LocalBackend). We CAS here because the caller goroutine is
|
||||
// doing a Store which we want to win a race. This is only a memory
|
||||
// optimization and is not for correctness.
|
||||
//
|
||||
// If the CAS fails, that means somebody else's Store replaced our
|
||||
// pointer (so mission accomplished: our netmap is no longer retained in
|
||||
// any case) and that Store caller will be responsible for removing
|
||||
// their own netmap (or losing their race too, down the chain).
|
||||
// Eventually the last caller will win this CAS and zero lastStatus.
|
||||
c.lastStatus.CompareAndSwap(newSt, nil)
|
||||
})
|
||||
}
|
||||
|
||||
var (
|
||||
metricQueued = clientmetric.NewCounter("controlclient_auto_status_queued")
|
||||
metricSkippable = clientmetric.NewCounter("controlclient_auto_status_queue_skippable")
|
||||
metricSkipped = clientmetric.NewCounter("controlclient_auto_status_queue_skipped")
|
||||
)
|
||||
|
||||
// canSkipStatus reports whether we can skip sending s1, knowing
|
||||
// that s2 is enqueued sometime in the future after s1.
|
||||
//
|
||||
// s1 must be non-nil. s2 may be nil.
|
||||
func canSkipStatus(s1, s2 *Status) bool {
|
||||
if s2 == nil {
|
||||
// Nothing in the future.
|
||||
return false
|
||||
}
|
||||
if s1 == s2 {
|
||||
// If the last item in the queue is the same as s1,
|
||||
// we can't skip it.
|
||||
return false
|
||||
}
|
||||
if s1.Err != nil || s1.URL != "" {
|
||||
// If s1 has an error or a URL, we shouldn't skip it, lest the error go
|
||||
// away in s2 or in-between. We want to make sure all the subsystems see
|
||||
// it. Plus there aren't many of these, so not worth skipping.
|
||||
return false
|
||||
}
|
||||
if !s1.Persist.Equals(s2.Persist) || s1.state != s2.state {
|
||||
// If s1 has a different Persist or state than s2,
|
||||
// don't skip it. We only care about skipping the typical
|
||||
// entries where the only difference is the NetMap.
|
||||
return false
|
||||
}
|
||||
// If nothing above precludes it, and both s1 and s2 have NetMaps, then
|
||||
// we can skip it, because s2's NetMap is a newer version and we can
|
||||
// jump straight from whatever state we had before to s2's state,
|
||||
// without passing through s1's state first. A NetMap is regrettably a
|
||||
// full snapshot of the state, not an incremental delta. We're slowly
|
||||
// moving towards passing around only deltas around internally at all
|
||||
// layers, but this is explicitly the case where we didn't have a delta
|
||||
// path for the message we received over the wire and had to resort
|
||||
// to the legacy full NetMap path. And then we can get behind processing
|
||||
// these full NetMap snapshots in LocalBackend/wgengine/magicsock/netstack
|
||||
// and this path (when it returns true) lets us skip over useless work
|
||||
// and not get behind in the queue. This matters in particular for tailnets
|
||||
// that are both very large + very churny.
|
||||
return s1.NetMap != nil && s2.NetMap != nil
|
||||
}
|
||||
|
||||
func (c *Auto) Login(flags LoginFlags) {
|
||||
c.logf("client.Login(%v)", flags)
|
||||
|
||||
|
||||
@@ -4,8 +4,13 @@
|
||||
package controlclient
|
||||
|
||||
import (
|
||||
"io"
|
||||
"reflect"
|
||||
"slices"
|
||||
"testing"
|
||||
|
||||
"tailscale.com/types/netmap"
|
||||
"tailscale.com/types/persist"
|
||||
)
|
||||
|
||||
func fieldsOf(t reflect.Type) (fields []string) {
|
||||
@@ -62,3 +67,83 @@ func TestStatusEqual(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// tests [canSkipStatus].
|
||||
func TestCanSkipStatus(t *testing.T) {
|
||||
st := new(Status)
|
||||
nm1 := &netmap.NetworkMap{}
|
||||
nm2 := &netmap.NetworkMap{}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
s1, s2 *Status
|
||||
want bool
|
||||
}{
|
||||
{
|
||||
name: "nil-s2",
|
||||
s1: st,
|
||||
s2: nil,
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
name: "equal",
|
||||
s1: st,
|
||||
s2: st,
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
name: "s1-error",
|
||||
s1: &Status{Err: io.EOF, NetMap: nm1},
|
||||
s2: &Status{NetMap: nm2},
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
name: "s1-url",
|
||||
s1: &Status{URL: "foo", NetMap: nm1},
|
||||
s2: &Status{NetMap: nm2},
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
name: "s1-persist-diff",
|
||||
s1: &Status{Persist: new(persist.Persist).View(), NetMap: nm1},
|
||||
s2: &Status{NetMap: nm2},
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
name: "s1-state-diff",
|
||||
s1: &Status{state: 123, NetMap: nm1},
|
||||
s2: &Status{NetMap: nm2},
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
name: "s1-no-netmap1",
|
||||
s1: &Status{NetMap: nil},
|
||||
s2: &Status{NetMap: nm2},
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
name: "s1-no-netmap2",
|
||||
s1: &Status{NetMap: nm1},
|
||||
s2: &Status{NetMap: nil},
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
name: "skip",
|
||||
s1: &Status{NetMap: nm1},
|
||||
s2: &Status{NetMap: nm2},
|
||||
want: true,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if got := canSkipStatus(tt.s1, tt.s2); got != tt.want {
|
||||
t.Errorf("canSkipStatus = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
want := []string{"Err", "URL", "NetMap", "Persist", "state"}
|
||||
if f := fieldsOf(reflect.TypeFor[Status]()); !slices.Equal(f, want) {
|
||||
t.Errorf("Status fields = %q; this code was only written to handle fields %q", f, want)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -15,7 +15,6 @@ import (
|
||||
"log"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"net/netip"
|
||||
"net/url"
|
||||
"os"
|
||||
@@ -42,6 +41,7 @@ import (
|
||||
"tailscale.com/net/tsdial"
|
||||
"tailscale.com/net/tshttpproxy"
|
||||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/tempfork/httprec"
|
||||
"tailscale.com/tka"
|
||||
"tailscale.com/tstime"
|
||||
"tailscale.com/types/key"
|
||||
@@ -1384,7 +1384,7 @@ func answerC2NPing(logf logger.Logf, c2nHandler http.Handler, c *http.Client, pr
|
||||
handlerCtx, cancel := context.WithTimeout(context.Background(), handlerTimeout)
|
||||
defer cancel()
|
||||
hreq = hreq.WithContext(handlerCtx)
|
||||
rec := httptest.NewRecorder()
|
||||
rec := httprec.NewRecorder()
|
||||
c2nHandler.ServeHTTP(rec, hreq)
|
||||
cancel()
|
||||
|
||||
|
||||
@@ -300,6 +300,15 @@ func (ms *mapSession) updateStateFromResponse(resp *tailcfg.MapResponse) {
|
||||
if dm := resp.DERPMap; dm != nil {
|
||||
ms.vlogf("netmap: new map contains DERP map")
|
||||
|
||||
// Guard against the control server accidentally sending
|
||||
// a nil region definition, which at least Headscale was
|
||||
// observed to send.
|
||||
for rid, r := range dm.Regions {
|
||||
if r == nil {
|
||||
delete(dm.Regions, rid)
|
||||
}
|
||||
}
|
||||
|
||||
// Zero-valued fields in a DERPMap mean that we're not changing
|
||||
// anything and are using the previous value(s).
|
||||
if ldm := ms.lastDERPMap; ldm != nil {
|
||||
|
||||
@@ -6,6 +6,8 @@
|
||||
package controlknobs
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"sync/atomic"
|
||||
|
||||
"tailscale.com/syncs"
|
||||
@@ -103,6 +105,11 @@ type Knobs struct {
|
||||
// DisableCaptivePortalDetection is whether the node should not perform captive portal detection
|
||||
// automatically when the network state changes.
|
||||
DisableCaptivePortalDetection atomic.Bool
|
||||
|
||||
// DisableSkipStatusQueue is whether the node should disable skipping
|
||||
// of queued netmap.NetworkMap between the controlclient and LocalBackend.
|
||||
// See tailscale/tailscale#14768.
|
||||
DisableSkipStatusQueue atomic.Bool
|
||||
}
|
||||
|
||||
// UpdateFromNodeAttributes updates k (if non-nil) based on the provided self
|
||||
@@ -132,6 +139,7 @@ func (k *Knobs) UpdateFromNodeAttributes(capMap tailcfg.NodeCapMap) {
|
||||
disableLocalDNSOverrideViaNRPT = has(tailcfg.NodeAttrDisableLocalDNSOverrideViaNRPT)
|
||||
disableCryptorouting = has(tailcfg.NodeAttrDisableMagicSockCryptoRouting)
|
||||
disableCaptivePortalDetection = has(tailcfg.NodeAttrDisableCaptivePortalDetection)
|
||||
disableSkipStatusQueue = has(tailcfg.NodeAttrDisableSkipStatusQueue)
|
||||
)
|
||||
|
||||
if has(tailcfg.NodeAttrOneCGNATEnable) {
|
||||
@@ -159,6 +167,7 @@ func (k *Knobs) UpdateFromNodeAttributes(capMap tailcfg.NodeCapMap) {
|
||||
k.DisableLocalDNSOverrideViaNRPT.Store(disableLocalDNSOverrideViaNRPT)
|
||||
k.DisableCryptorouting.Store(disableCryptorouting)
|
||||
k.DisableCaptivePortalDetection.Store(disableCaptivePortalDetection)
|
||||
k.DisableSkipStatusQueue.Store(disableSkipStatusQueue)
|
||||
}
|
||||
|
||||
// AsDebugJSON returns k as something that can be marshalled with json.Marshal
|
||||
@@ -167,25 +176,19 @@ func (k *Knobs) AsDebugJSON() map[string]any {
|
||||
if k == nil {
|
||||
return nil
|
||||
}
|
||||
return map[string]any{
|
||||
"DisableUPnP": k.DisableUPnP.Load(),
|
||||
"KeepFullWGConfig": k.KeepFullWGConfig.Load(),
|
||||
"RandomizeClientPort": k.RandomizeClientPort.Load(),
|
||||
"OneCGNAT": k.OneCGNAT.Load(),
|
||||
"ForceBackgroundSTUN": k.ForceBackgroundSTUN.Load(),
|
||||
"DisableDeltaUpdates": k.DisableDeltaUpdates.Load(),
|
||||
"PeerMTUEnable": k.PeerMTUEnable.Load(),
|
||||
"DisableDNSForwarderTCPRetries": k.DisableDNSForwarderTCPRetries.Load(),
|
||||
"SilentDisco": k.SilentDisco.Load(),
|
||||
"LinuxForceIPTables": k.LinuxForceIPTables.Load(),
|
||||
"LinuxForceNfTables": k.LinuxForceNfTables.Load(),
|
||||
"SeamlessKeyRenewal": k.SeamlessKeyRenewal.Load(),
|
||||
"ProbeUDPLifetime": k.ProbeUDPLifetime.Load(),
|
||||
"AppCStoreRoutes": k.AppCStoreRoutes.Load(),
|
||||
"UserDialUseRoutes": k.UserDialUseRoutes.Load(),
|
||||
"DisableSplitDNSWhenNoCustomResolvers": k.DisableSplitDNSWhenNoCustomResolvers.Load(),
|
||||
"DisableLocalDNSOverrideViaNRPT": k.DisableLocalDNSOverrideViaNRPT.Load(),
|
||||
"DisableCryptorouting": k.DisableCryptorouting.Load(),
|
||||
"DisableCaptivePortalDetection": k.DisableCaptivePortalDetection.Load(),
|
||||
ret := map[string]any{}
|
||||
rt := reflect.TypeFor[Knobs]()
|
||||
rv := reflect.ValueOf(k).Elem() // of *k
|
||||
for i := 0; i < rt.NumField(); i++ {
|
||||
name := rt.Field(i).Name
|
||||
switch v := rv.Field(i).Addr().Interface().(type) {
|
||||
case *atomic.Bool:
|
||||
ret[name] = v.Load()
|
||||
case *syncs.AtomicValue[opt.Bool]:
|
||||
ret[name] = v.Load()
|
||||
default:
|
||||
panic(fmt.Sprintf("unknown field type %T for %v", v, name))
|
||||
}
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
@@ -6,6 +6,8 @@ package controlknobs
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"tailscale.com/types/logger"
|
||||
)
|
||||
|
||||
func TestAsDebugJSON(t *testing.T) {
|
||||
@@ -18,4 +20,5 @@ func TestAsDebugJSON(t *testing.T) {
|
||||
if want := reflect.TypeFor[Knobs]().NumField(); len(got) != want {
|
||||
t.Errorf("AsDebugJSON map has %d fields; want %v", len(got), want)
|
||||
}
|
||||
t.Logf("Got: %v", logger.AsJSON(got))
|
||||
}
|
||||
|
||||
@@ -36,9 +36,9 @@ import (
|
||||
|
||||
"go4.org/mem"
|
||||
"golang.org/x/sync/errgroup"
|
||||
"tailscale.com/client/tailscale"
|
||||
"tailscale.com/disco"
|
||||
"tailscale.com/envknob"
|
||||
"tailscale.com/localclient/tailscale"
|
||||
"tailscale.com/metrics"
|
||||
"tailscale.com/syncs"
|
||||
"tailscale.com/tailcfg"
|
||||
@@ -1827,6 +1827,14 @@ func (c *sclient) setWriteDeadline() {
|
||||
// of connected peers.
|
||||
d = privilegedWriteTimeout
|
||||
}
|
||||
if d == 0 {
|
||||
// A zero value should disable the write deadline per
|
||||
// --tcp-write-timeout docs. The flag should only be applicable for
|
||||
// non-mesh connections, again per its docs. If mesh happened to use a
|
||||
// zero value constant above it would be a bug, so we don't bother
|
||||
// with a condition on c.canMesh.
|
||||
return
|
||||
}
|
||||
// Ignore the error from setting the write deadline. In practice,
|
||||
// setting the deadline will only fail if the connection is closed
|
||||
// or closing, so the subsequent Write() will fail anyway.
|
||||
|
||||
@@ -98,6 +98,7 @@ func ServeNoContent(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set(NoContentResponseHeader, "response "+challenge)
|
||||
}
|
||||
}
|
||||
w.Header().Set("Cache-Control", "no-cache, no-store, must-revalidate, no-transform, max-age=0")
|
||||
w.WriteHeader(http.StatusNoContent)
|
||||
}
|
||||
|
||||
@@ -105,7 +106,7 @@ func isChallengeChar(c rune) bool {
|
||||
// Semi-randomly chosen as a limited set of valid characters
|
||||
return ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z') ||
|
||||
('0' <= c && c <= '9') ||
|
||||
c == '.' || c == '-' || c == '_'
|
||||
c == '.' || c == '-' || c == '_' || c == ':'
|
||||
}
|
||||
|
||||
const (
|
||||
|
||||
@@ -55,8 +55,7 @@ func CanRunTailscaleSSH() error {
|
||||
func CanUseExitNode() error {
|
||||
switch dist := distro.Get(); dist {
|
||||
case distro.Synology, // see https://github.com/tailscale/tailscale/issues/1995
|
||||
distro.QNAP,
|
||||
distro.Unraid:
|
||||
distro.QNAP:
|
||||
return errors.New("Tailscale exit nodes cannot be used on " + string(dist))
|
||||
}
|
||||
|
||||
|
||||
@@ -13,21 +13,44 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
_ "embed"
|
||||
|
||||
"tailscale.com/feature"
|
||||
"tailscale.com/ipn/localapi"
|
||||
"tailscale.com/net/packet"
|
||||
"tailscale.com/util/set"
|
||||
)
|
||||
|
||||
//go:embed ts-dissector.lua
|
||||
var DissectorLua string
|
||||
func init() {
|
||||
feature.Register("capture")
|
||||
localapi.Register("debug-capture", serveLocalAPIDebugCapture)
|
||||
}
|
||||
|
||||
// Callback describes a function which is called to
|
||||
// record packets when debugging packet-capture.
|
||||
// Such callbacks must not take ownership of the
|
||||
// provided data slice: it may only copy out of it
|
||||
// within the lifetime of the function.
|
||||
type Callback func(Path, time.Time, []byte, packet.CaptureMeta)
|
||||
func serveLocalAPIDebugCapture(h *localapi.Handler, w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
if !h.PermitWrite {
|
||||
http.Error(w, "debug access denied", http.StatusForbidden)
|
||||
return
|
||||
}
|
||||
if r.Method != "POST" {
|
||||
http.Error(w, "POST required", http.StatusMethodNotAllowed)
|
||||
return
|
||||
}
|
||||
|
||||
w.WriteHeader(http.StatusOK)
|
||||
w.(http.Flusher).Flush()
|
||||
|
||||
b := h.LocalBackend()
|
||||
s := b.GetOrSetCaptureSink(newSink)
|
||||
|
||||
unregister := s.RegisterOutput(w)
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
case <-s.WaitCh():
|
||||
}
|
||||
unregister()
|
||||
|
||||
b.ClearCaptureSink()
|
||||
}
|
||||
|
||||
var bufferPool = sync.Pool{
|
||||
New: func() any {
|
||||
@@ -57,29 +80,8 @@ func writePktHeader(w *bytes.Buffer, when time.Time, length int) {
|
||||
binary.Write(w, binary.LittleEndian, uint32(length)) // total length
|
||||
}
|
||||
|
||||
// Path describes where in the data path the packet was captured.
|
||||
type Path uint8
|
||||
|
||||
// Valid Path values.
|
||||
const (
|
||||
// FromLocal indicates the packet was logged as it traversed the FromLocal path:
|
||||
// i.e.: A packet from the local system into the TUN.
|
||||
FromLocal Path = 0
|
||||
// FromPeer indicates the packet was logged upon reception from a remote peer.
|
||||
FromPeer Path = 1
|
||||
// SynthesizedToLocal indicates the packet was generated from within tailscaled,
|
||||
// and is being routed to the local machine's network stack.
|
||||
SynthesizedToLocal Path = 2
|
||||
// SynthesizedToPeer indicates the packet was generated from within tailscaled,
|
||||
// and is being routed to a remote Wireguard peer.
|
||||
SynthesizedToPeer Path = 3
|
||||
|
||||
// PathDisco indicates the packet is information about a disco frame.
|
||||
PathDisco Path = 254
|
||||
)
|
||||
|
||||
// New creates a new capture sink.
|
||||
func New() *Sink {
|
||||
// newSink creates a new capture sink.
|
||||
func newSink() packet.CaptureSink {
|
||||
ctx, c := context.WithCancel(context.Background())
|
||||
return &Sink{
|
||||
ctx: ctx,
|
||||
@@ -126,6 +128,10 @@ func (s *Sink) RegisterOutput(w io.Writer) (unregister func()) {
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Sink) CaptureCallback() packet.CaptureCallback {
|
||||
return s.LogPacket
|
||||
}
|
||||
|
||||
// NumOutputs returns the number of outputs registered with the sink.
|
||||
func (s *Sink) NumOutputs() int {
|
||||
s.mu.Lock()
|
||||
@@ -174,7 +180,7 @@ func customDataLen(meta packet.CaptureMeta) int {
|
||||
// LogPacket is called to insert a packet into the capture.
|
||||
//
|
||||
// This function does not take ownership of the provided data slice.
|
||||
func (s *Sink) LogPacket(path Path, when time.Time, data []byte, meta packet.CaptureMeta) {
|
||||
func (s *Sink) LogPacket(path packet.CapturePath, when time.Time, data []byte, meta packet.CaptureMeta) {
|
||||
select {
|
||||
case <-s.ctx.Done():
|
||||
return
|
||||
12
feature/capture/dissector/dissector.go
Normal file
12
feature/capture/dissector/dissector.go
Normal file
@@ -0,0 +1,12 @@
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
// Package dissector contains the Lua dissector for Tailscale packets.
|
||||
package dissector
|
||||
|
||||
import (
|
||||
_ "embed"
|
||||
)
|
||||
|
||||
//go:embed ts-dissector.lua
|
||||
var Lua string
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user