Compare commits

..

1 Commits

Author SHA1 Message Date
Marwan Sulaiman
dea35a2f38 cmd/tailscale: combine funnel and serve under dev flag
This PR combines the funnel and serve code under the same path.
However, it is using the new code which means features being
added to the funnel command will automatically be added to serve but
also things that are missing are missing from both.

Updates #8489

Signed-off-by: Marwan Sulaiman <marwan@tailscale.com>
2023-08-29 15:18:36 +01:00
197 changed files with 2502 additions and 8522 deletions

View File

@@ -45,7 +45,7 @@ jobs:
steps:
- name: Checkout repository
uses: actions/checkout@v4
uses: actions/checkout@v3
# Initializes the CodeQL tools for scanning.
- name: Initialize CodeQL

View File

@@ -10,6 +10,6 @@ jobs:
deploy:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@v3
- name: "Build Docker image"
run: docker build .

View File

@@ -17,7 +17,7 @@ jobs:
id-token: "write"
contents: "read"
steps:
- uses: "actions/checkout@v4"
- uses: "actions/checkout@v3"
with:
ref: "${{ (inputs.tag != null) && format('refs/tags/{0}', inputs.tag) || '' }}"
- uses: "DeterminateSystems/nix-installer-action@main"

View File

@@ -22,7 +22,7 @@ jobs:
steps:
- name: Check out code
uses: actions/checkout@v4
uses: actions/checkout@v3
- name: Set up Go
uses: actions/setup-go@v4

View File

@@ -23,7 +23,7 @@ jobs:
name: lint
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@v3
- uses: actions/setup-go@v4
with:

View File

@@ -14,7 +14,7 @@ jobs:
steps:
- name: Check out code into the Go module directory
uses: actions/checkout@v4
uses: actions/checkout@v3
- name: Install govulncheck
run: ./tool/go install golang.org/x/vuln/cmd/govulncheck@latest

View File

@@ -91,7 +91,7 @@ jobs:
|| contains(matrix.image, 'parrotsec')
|| contains(matrix.image, 'kalilinux')
- name: checkout
uses: actions/checkout@v4
uses: actions/checkout@v3
- name: run installer
run: scripts/installer.sh
# Package installation can fail in docker because systemd is not running

View File

@@ -51,7 +51,7 @@ jobs:
runs-on: ubuntu-22.04
steps:
- name: checkout
uses: actions/checkout@v4
uses: actions/checkout@v3
- name: Restore Cache
uses: actions/cache@v3
with:
@@ -74,7 +74,6 @@ jobs:
env:
GOARCH: ${{ matrix.goarch }}
- name: build variant CLIs
if: matrix.buildflags == '' # skip on race builder
run: |
export TS_USE_TOOLCHAIN=1
./build_dist.sh --extra-small ./cmd/tailscaled
@@ -84,7 +83,7 @@ jobs:
env:
GOARCH: ${{ matrix.goarch }}
- name: get qemu # for tstest/archtest
if: matrix.goarch == 'amd64' && matrix.buildflags == ''
if: matrix.goarch == 'amd64' && matrix.variant == ''
run: |
sudo apt-get -y update
sudo apt-get -y install qemu-user
@@ -95,7 +94,7 @@ jobs:
env:
GOARCH: ${{ matrix.goarch }}
- name: bench all
run: ./tool/go test ${{matrix.buildflags}} -bench=. -benchtime=1x -run=^$ $(for x in $(git grep -l "^func Benchmark" | xargs dirname | sort | uniq); do echo "./$x"; done)
run: PATH=$PWD/tool:$PATH /tmp/testwrapper ./... ${{matrix.buildflags}} -bench=. -benchtime=1x -run=^$
env:
GOARCH: ${{ matrix.goarch }}
- name: check that no tracked files changed
@@ -116,7 +115,7 @@ jobs:
runs-on: windows-2022
steps:
- name: checkout
uses: actions/checkout@v4
uses: actions/checkout@v3
- name: Install Go
uses: actions/setup-go@v4
@@ -142,12 +141,10 @@ jobs:
${{ github.job }}-${{ runner.os }}-go-2-${{ hashFiles('**/go.sum') }}
${{ github.job }}-${{ runner.os }}-go-2-
- name: test
run: go run ./cmd/testwrapper ./...
- name: bench all
# Don't use -bench=. -benchtime=1x.
# Somewhere in the layers (powershell?)
# the equals signs cause great confusion.
run: go test ./... -bench . -benchtime 1x -run "^$"
run: go run ./cmd/testwrapper ./... -bench . -benchtime 1x
vm:
runs-on: ["self-hosted", "linux", "vm"]
@@ -155,7 +152,7 @@ jobs:
if: github.repository == 'tailscale/tailscale'
steps:
- name: checkout
uses: actions/checkout@v4
uses: actions/checkout@v3
- name: Run VM tests
run: ./tool/go test ./tstest/integration/vms -v -no-s3 -run-vm-tests -run=TestRunUbuntu2004
env:
@@ -204,7 +201,7 @@ jobs:
runs-on: ubuntu-22.04
steps:
- name: checkout
uses: actions/checkout@v4
uses: actions/checkout@v3
- name: Restore Cache
uses: actions/cache@v3
with:
@@ -241,7 +238,7 @@ jobs:
runs-on: ubuntu-22.04
steps:
- name: checkout
uses: actions/checkout@v4
uses: actions/checkout@v3
- name: build some
run: ./tool/go build ./ipn/... ./wgengine/ ./types/... ./control/controlclient
env:
@@ -255,7 +252,7 @@ jobs:
runs-on: ubuntu-22.04
steps:
- name: checkout
uses: actions/checkout@v4
uses: actions/checkout@v3
# Super minimal Android build that doesn't even use CGO and doesn't build everything that's needed
# and is only arm64. But it's a smoke build: it's not meant to catch everything. But it'll catch
# some Android breakages early.
@@ -270,7 +267,7 @@ jobs:
runs-on: ubuntu-22.04
steps:
- name: checkout
uses: actions/checkout@v4
uses: actions/checkout@v3
- name: Restore Cache
uses: actions/cache@v3
with:
@@ -304,7 +301,7 @@ jobs:
runs-on: ubuntu-22.04
steps:
- name: checkout
uses: actions/checkout@v4
uses: actions/checkout@v3
- name: test tailscale_go
run: ./tool/go test -tags=tailscale_go,ts_enable_sockstats ./net/sockstats/...
@@ -372,7 +369,7 @@ jobs:
runs-on: ubuntu-22.04
steps:
- name: checkout
uses: actions/checkout@v4
uses: actions/checkout@v3
- name: check depaware
run: |
export PATH=$(./tool/go env GOROOT)/bin:$PATH
@@ -382,7 +379,7 @@ jobs:
runs-on: ubuntu-22.04
steps:
- name: checkout
uses: actions/checkout@v4
uses: actions/checkout@v3
- name: check that 'go generate' is clean
run: |
pkgs=$(./tool/go list ./... | grep -v dnsfallback)
@@ -395,7 +392,7 @@ jobs:
runs-on: ubuntu-22.04
steps:
- name: checkout
uses: actions/checkout@v4
uses: actions/checkout@v3
- name: check that 'go mod tidy' is clean
run: |
./tool/go mod tidy
@@ -407,7 +404,7 @@ jobs:
runs-on: ubuntu-22.04
steps:
- name: checkout
uses: actions/checkout@v4
uses: actions/checkout@v3
- name: check licenses
run: ./scripts/check_license_headers.sh .
@@ -423,7 +420,7 @@ jobs:
goarch: "386"
steps:
- name: checkout
uses: actions/checkout@v4
uses: actions/checkout@v3
- name: install staticcheck
run: GOBIN=~/.local/bin ./tool/go install honnef.co/go/tools/cmd/staticcheck
- name: run staticcheck

View File

@@ -21,7 +21,7 @@ jobs:
steps:
- name: Check out code
uses: actions/checkout@v4
uses: actions/checkout@v3
- name: Run update-flakes
run: ./update-flake.sh

View File

@@ -41,16 +41,6 @@ We always require the latest Go release, currently Go 1.21. (While we build
releases with our [Go fork](https://github.com/tailscale/go/), its use is not
required.)
To include the embedded web client (accessed via the `tailscale web` command),
first build the client assets using:
```
./tool/yarn --cwd client/web install
./tool/yarn --cwd client/web build
```
Build the `tailscale` and `tailscaled` binaries:
```
go install tailscale.com/cmd/tailscale{,d}
```
@@ -67,6 +57,17 @@ If your distro has conventions that preclude the use of
`build_dist.sh`, please do the equivalent of what it does in your
distro's way, so that bug reports contain useful version information.
## Building the web client
To include the embedded web client (accessed via the `tailscale web` command),
you'll need to build the client assets using:
```
./tool/yarn --cwd client/web build
```
Do this before building the `tailscale.com/cmd/tailscale` binary.
## Bugs
Please file any issues about this code or the hosted service on

View File

@@ -392,20 +392,6 @@ func (lc *LocalClient) DebugAction(ctx context.Context, action string) error {
return nil
}
// DebugResultJSON invokes a debug action and returns its result as something JSON-able.
// These are development tools and subject to change or removal over time.
func (lc *LocalClient) DebugResultJSON(ctx context.Context, action string) (any, error) {
body, err := lc.send(ctx, "POST", "/localapi/v0/debug?action="+url.QueryEscape(action), 200, nil)
if err != nil {
return nil, fmt.Errorf("error %w: %s", err, body)
}
var x any
if err := json.Unmarshal(body, &x); err != nil {
return nil, err
}
return x, nil
}
// DebugPortmapOpts contains options for the DebugPortmap command.
type DebugPortmapOpts struct {
// Duration is how long the mapping should be created for. It defaults
@@ -1108,6 +1094,29 @@ func (lc *LocalClient) NetworkLockDisable(ctx context.Context, secret []byte) er
return nil
}
// StreamServe returns an io.ReadCloser that streams serve/Funnel
// connections made to the provided HostPort.
//
// If Serve and Funnel were not already enabled for the HostPort in the ServeConfig,
// the backend enables it for the duration of the context's lifespan and
// then turns it back off once the context is closed. If either are already enabled,
// then they remain that way but logs are still streamed
func (lc *LocalClient) StreamServe(ctx context.Context, hp ipn.ServeStreamRequest) (io.ReadCloser, error) {
req, err := http.NewRequestWithContext(ctx, "POST", "http://"+apitype.LocalAPIHost+"/localapi/v0/stream-serve", jsonBody(hp))
if err != nil {
return nil, err
}
res, err := lc.doLocalRequestNiceError(req)
if err != nil {
return nil, err
}
if res.StatusCode != 200 {
res.Body.Close()
return nil, errors.New(res.Status)
}
return res.Body, nil
}
// GetServeConfig return the current serve config.
//
// If the serve config is empty, it returns (nil, nil).

View File

@@ -6,7 +6,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1" />
<link rel="shortcut icon" href="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAACAAAAAgCAQAAADZc7J/AAAABGdBTUEAALGPC/xhBQAAACBjSFJNAAB6JgAAgIQAAPoAAACA6AAAdTAAAOpgAAA6mAAAF3CculE8AAAAAmJLR0QA/4ePzL8AAAAHdElNRQflAx4QGA4EvmzDAAAA30lEQVRIx2NgGAWMCKa8JKM4A8Ovt88ekyLCDGOoyDBJMjExMbFy8zF8/EKsCAMDE8yAPyIwFps48SJIBpAL4AZwvoSx/r0lXgQpDN58EWL5x/7/H+vL20+JFxluQKVe5b3Ke5V+0kQQCamfoYKBg4GDwUKI8d0BYkWQkrLKewYBKPPDHUFiRaiZkBgmwhj/F5IgggyUJ6i8V3mv0kCayDAAeEsklXqGAgYGhgV3CnGrwVciYSYk0kokhgS44/JxqqFpiYSZbEgskd4dEBRk1GD4wdB5twKXmlHAwMDAAACdEZau06NQUwAAACV0RVh0ZGF0ZTpjcmVhdGUAMjAyMC0wNy0xNVQxNTo1Mzo0MCswMDowMCVXsDIAAAAldEVYdGRhdGU6bW9kaWZ5ADIwMjAtMDctMTVUMTU6NTM6NDArMDA6MDBUCgiOAAAAAElFTkSuQmCC" />
<script type="module" crossorigin src="./assets/index-4d1f45ea.js"></script>
<script type="module" crossorigin src="./assets/index-f8beba53.js"></script>
<link rel="stylesheet" href="./assets/index-8612dca6.css">
</head>
<body>

View File

@@ -4,8 +4,6 @@
package web
import (
"embed"
"io/fs"
"log"
"net/http"
"net/http/httputil"
@@ -14,42 +12,11 @@ import (
"os/exec"
"path/filepath"
"strings"
"tailscale.com/util/must"
)
// This contains all files needed to build the frontend assets.
// Because we assign this to the blank identifier, it does not actually embed the files.
// However, this does cause `go mod vendor` to include the files when vendoring the package.
// External packages that use the web client can `go mod vendor`, run `yarn build` to
// build the assets, then those asset bundles will be embedded.
//
//go:embed yarn.lock index.html *.js *.json src/*
var _ embed.FS
//go:embed build/*
var embeddedFS embed.FS
// staticfiles serves static files from the build directory.
var staticfiles http.Handler
func init() {
buildFiles := must.Get(fs.Sub(embeddedFS, "build"))
staticfiles = http.FileServer(http.FS(buildFiles))
}
func assetsHandler(devMode bool) (_ http.Handler, cleanup func()) {
if devMode {
// When in dev mode, proxy asset requests to the Vite dev server.
cleanup := startDevServer()
return devServerProxy(), cleanup
}
return staticfiles, nil
}
// startDevServer starts the JS dev server that does on-demand rebuilding
// and serving of web client JS and CSS resources.
func startDevServer() (cleanup func()) {
func (s *Server) startDevServer() (cleanup func()) {
root := gitRootDir()
webClientPath := filepath.Join(root, "client", "web")
@@ -78,8 +45,10 @@ func startDevServer() (cleanup func()) {
}
}
// devServerProxy returns a reverse proxy to the vite dev server.
func devServerProxy() *httputil.ReverseProxy {
func (s *Server) addProxyToDevServer() {
if !s.devMode {
return // only using Vite proxy in dev mode
}
// We use Vite to develop on the web client.
// Vite starts up its own local server for development,
// which we proxy requests to from Server.ServeHTTP.
@@ -93,9 +62,8 @@ func devServerProxy() *httputil.ReverseProxy {
w.Write([]byte("\n\nError: " + err.Error()))
}
viteTarget, _ := url.Parse("http://127.0.0.1:4000")
devProxy := httputil.NewSingleHostReverseProxy(viteTarget)
devProxy.ErrorHandler = handleErr
return devProxy
s.devProxy = httputil.NewSingleHostReverseProxy(viteTarget)
s.devProxy.ErrorHandler = handleErr
}
func gitRootDir() string {

View File

@@ -16,6 +16,8 @@ import (
"net/url"
)
const qnapPrefix = "/cgi-bin/qpkg/Tailscale/index.cgi/"
// authorizeQNAP authenticates the logged-in QNAP user and verifies
// that they are authorized to use the web client. It returns true if the
// request was handled and no further processing is required.

View File

@@ -1,49 +1,14 @@
let csrfToken: string
let unraidCsrfToken: string | undefined // required for unraid POST requests (#8062)
// apiFetch wraps the standard JS fetch function with csrf header
// management and param additions specific to the web client.
//
// apiFetch adds the `api` prefix to the request URL,
// so endpoint should be provided without the `api` prefix
// (i.e. provide `/data` rather than `api/data`).
// apiFetch wraps the standard JS fetch function
// with csrf header management.
export function apiFetch(
endpoint: string,
method: "GET" | "POST",
body?: any,
params?: Record<string, string>
input: RequestInfo | URL,
init?: RequestInit | undefined
): Promise<Response> {
const urlParams = new URLSearchParams(window.location.search)
const nextParams = new URLSearchParams(params)
const token = urlParams.get("SynoToken")
if (token) {
nextParams.set("SynoToken", token)
}
const search = nextParams.toString()
const url = `api${endpoint}${search ? `?${search}` : ""}`
var contentType: string
if (unraidCsrfToken && method === "POST") {
const params = new URLSearchParams()
params.append("csrf_token", unraidCsrfToken)
if (body) {
params.append("ts_data", JSON.stringify(body))
}
body = params.toString()
contentType = "application/x-www-form-urlencoded;charset=UTF-8"
} else {
body = body ? JSON.stringify(body) : undefined
contentType = "application/json"
}
return fetch(url, {
method: method,
headers: {
Accept: "application/json",
"Content-Type": contentType,
"X-CSRF-Token": csrfToken,
},
body,
return fetch(input, {
...init,
headers: withCsrfToken(init?.headers),
}).then((r) => {
updateCsrfToken(r)
if (!r.ok) {
@@ -55,13 +20,13 @@ export function apiFetch(
})
}
function withCsrfToken(h?: HeadersInit): HeadersInit {
return { ...h, "X-CSRF-Token": csrfToken }
}
function updateCsrfToken(r: Response) {
const tok = r.headers.get("X-CSRF-Token")
if (tok) {
csrfToken = tok
}
}
export function setUnraidCsrfToken(token?: string) {
unraidCsrfToken = token
}

View File

@@ -5,7 +5,7 @@ import useNodeData from "src/hooks/node-data"
export default function App() {
// TODO(sonia): use isPosting value from useNodeData
// to fill loading states.
const { data, refreshData, updateNode } = useNodeData()
const { data, updateNode } = useNodeData()
return (
<div className="py-14">
@@ -15,11 +15,7 @@ export default function App() {
) : (
<>
<main className="container max-w-lg mx-auto mb-8 py-6 px-8 bg-white rounded-md shadow-2xl">
<Header
data={data}
refreshData={refreshData}
updateNode={updateNode}
/>
<Header data={data} updateNode={updateNode} />
<IP data={data} />
<State data={data} updateNode={updateNode} />
</main>

View File

@@ -1,6 +1,5 @@
import cx from "classnames"
import React from "react"
import { apiFetch } from "src/api"
import { NodeData, NodeUpdate } from "src/hooks/node-data"
// TODO(tailscale/corp#13775): legacy.tsx contains a set of components
@@ -10,11 +9,9 @@ import { NodeData, NodeUpdate } from "src/hooks/node-data"
export function Header({
data,
refreshData,
updateNode,
}: {
data: NodeData
refreshData: () => void
updateNode: (update: NodeUpdate) => void
}) {
return (
@@ -92,11 +89,7 @@ export function Header({
</button>{" "}
|{" "}
<button
onClick={() =>
apiFetch("/local/v0/logout", "POST")
.then(refreshData)
.catch((err) => alert("Logout failed: " + err.message))
}
onClick={() => updateNode({ ForceLogout: true })}
className="hover:text-gray-700"
>
Logout

View File

@@ -1,5 +1,5 @@
import { useCallback, useEffect, useState } from "react"
import { apiFetch, setUnraidCsrfToken } from "src/api"
import { apiFetch } from "src/api"
export type NodeData = {
Profile: UserProfile
@@ -35,17 +35,21 @@ export default function useNodeData() {
const [data, setData] = useState<NodeData>()
const [isPosting, setIsPosting] = useState<boolean>(false)
const refreshData = useCallback(
() =>
apiFetch("/data", "GET")
.then((r) => r.json())
.then((d: NodeData) => {
setData(d)
setUnraidCsrfToken(d.IsUnraid ? d.UnraidToken : undefined)
})
.catch((error) => console.error(error)),
[setData]
)
const fetchNodeData = useCallback(() => {
const urlParams = new URLSearchParams(window.location.search)
const nextParams = new URLSearchParams()
const token = urlParams.get("SynoToken")
if (token) {
nextParams.set("SynoToken", token)
}
const search = nextParams.toString()
const url = `api/data${search ? `?${search}` : ""}`
apiFetch(url)
.then((r) => r.json())
.then((d) => setData(d))
.catch((error) => console.error(error))
}, [setData])
const updateNode = useCallback(
(update: NodeUpdate) => {
@@ -73,7 +77,33 @@ export default function useNodeData() {
: data.AdvertiseExitNode,
}
apiFetch("/data", "POST", update, { up: "true" })
const urlParams = new URLSearchParams(window.location.search)
const nextParams = new URLSearchParams({ up: "true" })
const token = urlParams.get("SynoToken")
if (token) {
nextParams.set("SynoToken", token)
}
const search = nextParams.toString()
const url = `api/data${search ? `?${search}` : ""}`
var body, contentType: string
if (data.IsUnraid) {
const params = new URLSearchParams()
params.append("csrf_token", data.UnraidToken)
params.append("ts_data", JSON.stringify(update))
body = params.toString()
contentType = "application/x-www-form-urlencoded;charset=UTF-8"
} else {
body = JSON.stringify(update)
contentType = "application/json"
}
apiFetch(url, {
method: "POST",
headers: { Accept: "application/json", "Content-Type": contentType },
body: body,
})
.then((r) => r.json())
.then((r) => {
setIsPosting(false)
@@ -85,7 +115,7 @@ export default function useNodeData() {
if (url) {
window.open(url, "_blank")
}
refreshData()
fetchNodeData()
})
.catch((err) => alert("Failed operation: " + err.message))
},
@@ -95,11 +125,11 @@ export default function useNodeData() {
useEffect(
() => {
// Initial data load.
refreshData()
fetchNodeData()
// Refresh on browser tab focus.
const onVisibilityChange = () => {
document.visibilityState === "visible" && refreshData()
document.visibilityState === "visible" && fetchNodeData()
}
window.addEventListener("visibilitychange", onVisibilityChange)
return () => {
@@ -111,5 +141,5 @@ export default function useNodeData() {
[]
)
return { data, refreshData, updateNode, isPosting }
return { data, updateNode, isPosting }
}

View File

@@ -15,6 +15,8 @@ import (
"tailscale.com/util/groupmember"
)
const synologyPrefix = "/webman/3rdparty/Tailscale/index.cgi/"
// authorizeSynology authenticates the logged-in Synology user and verifies
// that they are authorized to use the web client. It returns true if the
// request was handled and no further processing is required.

View File

@@ -7,20 +7,22 @@ package web
import (
"context"
"crypto/rand"
"embed"
"encoding/json"
"fmt"
"io"
"io/fs"
"log"
"net/http"
"net/http/httputil"
"net/netip"
"os"
"path/filepath"
"slices"
"strings"
"sync"
"github.com/gorilla/csrf"
"tailscale.com/client/tailscale"
"tailscale.com/client/tailscale/apitype"
"tailscale.com/envknob"
"tailscale.com/ipn"
"tailscale.com/ipn/ipnstate"
@@ -28,20 +30,47 @@ import (
"tailscale.com/net/netutil"
"tailscale.com/tailcfg"
"tailscale.com/util/httpm"
"tailscale.com/util/must"
"tailscale.com/version/distro"
)
// This contains all files needed to build the frontend assets.
// Because we assign this to the blank identifier, it does not actually embed the files.
// However, this does cause `go mod vendor` to include the files when vendoring the package.
// External packages that use the web client can `go mod vendor`, run `yarn build` to
// build the assets, then those asset bundles will be embedded.
//
//go:embed yarn.lock index.html *.js *.json src/*
var _ embed.FS
//go:embed build/*
var embeddedFS embed.FS
// staticfiles serves static files from the build directory.
var staticfiles http.Handler
// Server is the backend server for a Tailscale web client.
type Server struct {
lc *tailscale.LocalClient
devMode bool
devMode bool
devProxy *httputil.ReverseProxy // only filled when devMode is on
cgiMode bool
pathPrefix string
cgiPath string
apiHandler http.Handler // csrf-protected api handler
assetsHandler http.Handler // serves frontend assets
apiHandler http.Handler // serves api endpoints; csrf-protected
selfMu sync.Mutex // protects self field
// self is a cached NodeView of the active self node,
// refreshed by watching the IPN notification bus
// (see Server.watchSelf).
//
// self's hostname and Tailscale IP are used to verify
// that incoming requests to the web client api are coming
// from the web client frontend and not some other source.
// Particularly to protect against DNS rebinding attacks.
// self should not be used to fill data for frontend views.
self tailcfg.NodeView
}
// ServerOpts contains options for constructing a new Server.
@@ -51,8 +80,8 @@ type ServerOpts struct {
// CGIMode indicates if the server is running as a CGI script.
CGIMode bool
// PathPrefix is the URL prefix added to requests by CGI or reverse proxy.
PathPrefix string
// If running in CGIMode, CGIPath is the URL path prefix to the CGI script.
CGIPath string
// LocalClient is the tailscale.LocalClient to use for this web server.
// If nil, a new one will be created.
@@ -66,12 +95,24 @@ func NewServer(ctx context.Context, opts ServerOpts) (s *Server, cleanup func())
opts.LocalClient = &tailscale.LocalClient{}
}
s = &Server{
devMode: opts.DevMode,
lc: opts.LocalClient,
cgiMode: opts.CGIMode,
pathPrefix: opts.PathPrefix,
devMode: opts.DevMode,
lc: opts.LocalClient,
cgiMode: opts.CGIMode,
cgiPath: opts.CGIPath,
}
s.assetsHandler, cleanup = assetsHandler(opts.DevMode)
cleanup = func() {}
if s.devMode {
cleanup = s.startDevServer()
s.addProxyToDevServer()
}
var wg sync.WaitGroup
defer wg.Wait()
wg.Add(1)
go func() {
defer wg.Done()
go s.watchSelf(ctx)
}()
// Create handler for "/api" requests with CSRF protection.
// We don't require secure cookies, since the web client is regularly used
@@ -85,13 +126,81 @@ func NewServer(ctx context.Context, opts ServerOpts) (s *Server, cleanup func())
return s, cleanup
}
func init() {
buildFiles := must.Get(fs.Sub(embeddedFS, "build"))
staticfiles = http.FileServer(http.FS(buildFiles))
}
// watchSelf watches the IPN notification bus to refresh
// the Server's self node cache.
func (s *Server) watchSelf(ctx context.Context) {
watchCtx, cancelWatch := context.WithCancel(ctx)
defer cancelWatch()
watcher, err := s.lc.WatchIPNBus(watchCtx, ipn.NotifyInitialNetMap|ipn.NotifyNoPrivateKeys)
if err != nil {
log.Fatalf("lost connection to tailscaled: %v", err)
}
defer watcher.Close()
for {
n, err := watcher.Next()
if err != nil {
log.Fatalf("lost connection to tailscaled: %v", err)
}
if state := n.State; state != nil && *state == ipn.NeedsLogin {
s.updateSelf(tailcfg.NodeView{})
continue
}
if n.NetMap == nil {
continue
}
s.updateSelf(n.NetMap.SelfNode)
}
}
// updateSelf grabs the lock and updates s.self.
// Then logs if anything changed.
func (s *Server) updateSelf(self tailcfg.NodeView) {
s.selfMu.Lock()
prev := s.self
s.self = self
s.selfMu.Unlock()
var old, new tailcfg.StableNodeID
if prev.Valid() {
old = prev.StableID()
}
if s.self.Valid() {
new = s.self.StableID()
}
if old != new {
if new.IsZero() {
log.Printf("self node logout")
} else {
log.Printf("self node login")
}
}
}
// ServeHTTP processes all requests for the Tailscale web client.
func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) {
handler := s.serve
// if path prefix is defined, strip it from requests.
if s.pathPrefix != "" {
handler = enforcePrefix(s.pathPrefix, handler)
// if running in cgi mode, strip the cgi path prefix
if s.cgiMode {
prefix := s.cgiPath
if prefix == "" {
switch distro.Get() {
case distro.Synology:
prefix = synologyPrefix
case distro.QNAP:
prefix = qnapPrefix
}
}
if prefix != "" {
handler = enforcePrefix(prefix, handler)
}
}
handler(w, r)
@@ -124,11 +233,14 @@ func (s *Server) serve(w http.ResponseWriter, r *http.Request) {
// Pass API requests through to the API handler.
s.apiHandler.ServeHTTP(w, r)
return
case s.devMode:
// When in dev mode, proxy non-api requests to the Vite dev server.
s.devProxy.ServeHTTP(w, r)
return
default:
if !s.devMode {
s.lc.IncrementCounter(context.Background(), "web_client_page_load", 1)
}
s.assetsHandler.ServeHTTP(w, r)
// Otherwise, serve static files from the embedded filesystem.
s.lc.IncrementCounter(context.Background(), "web_client_page_load", 1)
staticfiles.ServeHTTP(w, r)
return
}
}
@@ -139,8 +251,8 @@ func (s *Server) serve(w http.ResponseWriter, r *http.Request) {
func (s *Server) serveAPI(w http.ResponseWriter, r *http.Request) {
w.Header().Set("X-CSRF-Token", csrf.Token(r))
path := strings.TrimPrefix(r.URL.Path, "/api")
switch {
case path == "/data":
switch path {
case "/data":
switch r.Method {
case httpm.GET:
s.serveGetNodeDataJSON(w, r)
@@ -150,9 +262,6 @@ func (s *Server) serveAPI(w http.ResponseWriter, r *http.Request) {
http.Error(w, "method not allowed", http.StatusMethodNotAllowed)
}
return
case strings.HasPrefix(path, "/local/"):
s.proxyRequestToLocalAPI(w, r)
return
}
http.Error(w, "invalid endpoint", http.StatusNotFound)
}
@@ -293,6 +402,7 @@ func (s *Server) servePostNodeUpdate(w http.ResponseWriter, r *http.Request) {
} else {
io.WriteString(w, "{}")
}
return
}
func (s *Server) tailscaleUp(ctx context.Context, st *ipnstate.Status, postData nodeUpdate) (authURL string, retErr error) {
@@ -354,70 +464,21 @@ func (s *Server) tailscaleUp(ctx context.Context, st *ipnstate.Status, postData
}
}
// proxyRequestToLocalAPI proxies the web API request to the localapi.
//
// The web API request path is expected to exactly match a localapi path,
// with prefix /api/local/ rather than /localapi/.
//
// If the localapi path is not included in localapiAllowlist,
// the request is rejected.
func (s *Server) proxyRequestToLocalAPI(w http.ResponseWriter, r *http.Request) {
path := strings.TrimPrefix(r.URL.Path, "/api/local")
if r.URL.Path == path { // missing prefix
http.Error(w, "invalid request", http.StatusBadRequest)
return
}
if !slices.Contains(localapiAllowlist, path) {
http.Error(w, fmt.Sprintf("%s not allowed from localapi proxy", path), http.StatusForbidden)
return
}
localAPIURL := "http://" + apitype.LocalAPIHost + "/localapi" + path
req, err := http.NewRequestWithContext(r.Context(), r.Method, localAPIURL, r.Body)
if err != nil {
http.Error(w, "failed to construct request", http.StatusInternalServerError)
return
}
// Make request to tailscaled localapi.
resp, err := s.lc.DoLocalRequest(req)
if err != nil {
http.Error(w, err.Error(), resp.StatusCode)
return
}
defer resp.Body.Close()
// Send response back to web frontend.
w.Header().Set("Content-Type", resp.Header.Get("Content-Type"))
w.WriteHeader(resp.StatusCode)
if _, err := io.Copy(w, resp.Body); err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
}
}
// localapiAllowlist is an allowlist of localapi endpoints the
// web client is allowed to proxy to the client's localapi.
//
// Rather than exposing all localapi endpoints over the proxy,
// this limits to just the ones actually used from the web
// client frontend.
//
// TODO(sonia,will): Shouldn't expand this beyond the existing
// localapi endpoints until the larger web client auth story
// is worked out (tailscale/corp#14335).
var localapiAllowlist = []string{
"/v0/logout",
}
// csrfKey returns a key that can be used for CSRF protection.
// If an error occurs during key creation, the error is logged and the active process terminated.
// If the server is running in CGI mode, the key is cached to disk and reused between requests.
// If an error occurs during key storage, the error is logged and the active process terminated.
func (s *Server) csrfKey() []byte {
csrfFile := filepath.Join(os.TempDir(), "tailscale-web-csrf.key")
var csrfFile string
// if running in CGI mode, try to read from disk, but ignore errors
if s.cgiMode {
confdir, err := os.UserConfigDir()
if err != nil {
confdir = os.TempDir()
}
csrfFile = filepath.Join(confdir, "tailscale", "web-csrf.key")
key, _ := os.ReadFile(csrfFile)
if len(key) == 32 {
return key
@@ -427,11 +488,14 @@ func (s *Server) csrfKey() []byte {
// create a new key
key := make([]byte, 32)
if _, err := rand.Read(key); err != nil {
log.Fatalf("error generating CSRF key: %v", err)
log.Fatal("error generating CSRF key: %w", err)
}
// if running in CGI mode, try to write the newly created key to disk, and exit if it fails.
if s.cgiMode {
if err := os.Mkdir(filepath.Dir(csrfFile), 0700); err != nil && !os.IsExist(err) {
log.Fatalf("unable to store CSRF key: %v", err)
}
if err := os.WriteFile(csrfFile, key, 0600); err != nil {
log.Fatalf("unable to store CSRF key: %v", err)
}
@@ -445,19 +509,6 @@ func (s *Server) csrfKey() []byte {
// Unlike http.StripPrefix, it does not return a 404 if the prefix is not present.
// Instead, it returns a redirect to the prefix path.
func enforcePrefix(prefix string, h http.HandlerFunc) http.HandlerFunc {
if prefix == "" {
return h
}
// ensure that prefix always has both a leading and trailing slash so
// that relative links for JS and CSS assets work correctly.
if !strings.HasPrefix(prefix, "/") {
prefix = "/" + prefix
}
if !strings.HasSuffix(prefix, "/") {
prefix += "/"
}
return func(w http.ResponseWriter, r *http.Request) {
if !strings.HasPrefix(r.URL.Path, prefix) {
http.Redirect(w, r, prefix, http.StatusFound)

View File

@@ -4,16 +4,8 @@
package web
import (
"fmt"
"io"
"net/http"
"net/http/httptest"
"net/url"
"strings"
"testing"
"tailscale.com/client/tailscale"
"tailscale.com/net/memnet"
)
func TestQnapAuthnURL(t *testing.T) {
@@ -70,62 +62,3 @@ func TestQnapAuthnURL(t *testing.T) {
})
}
}
// TestServeAPI tests the web client api's handling of
// 1. invalid endpoint errors
// 2. localapi proxy allowlist
func TestServeAPI(t *testing.T) {
lal := memnet.Listen("local-tailscaled.sock:80")
defer lal.Close()
// Serve dummy localapi. Just returns "success".
localapi := &http.Server{Handler: http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
fmt.Fprintf(w, "success")
})}
defer localapi.Close()
go localapi.Serve(lal)
s := &Server{lc: &tailscale.LocalClient{Dial: lal.Dial}}
tests := []struct {
name string
reqPath string
wantResp string
wantStatus int
}{{
name: "invalid_endpoint",
reqPath: "/not-an-endpoint",
wantResp: "invalid endpoint",
wantStatus: http.StatusNotFound,
}, {
name: "not_in_localapi_allowlist",
reqPath: "/local/v0/not-allowlisted",
wantResp: "/v0/not-allowlisted not allowed from localapi proxy",
wantStatus: http.StatusForbidden,
}, {
name: "in_localapi_allowlist",
reqPath: "/local/v0/logout",
wantResp: "success", // Successfully allowed to hit localapi.
wantStatus: http.StatusOK,
}}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
r := httptest.NewRequest("POST", "/api"+tt.reqPath, nil)
w := httptest.NewRecorder()
s.serveAPI(w, r)
res := w.Result()
defer res.Body.Close()
if gotStatus := res.StatusCode; tt.wantStatus != gotStatus {
t.Errorf("wrong status; want=%q, got=%q", tt.wantStatus, gotStatus)
}
body, err := io.ReadAll(res.Body)
if err != nil {
t.Fatal(err)
}
gotResp := strings.TrimSuffix(string(body), "\n") // trim trailing newline
if tt.wantResp != gotResp {
t.Errorf("wrong response; want=%q, got=%q", tt.wantResp, gotResp)
}
})
}
}

View File

@@ -7,16 +7,13 @@
package clientupdate
import (
"archive/tar"
"bufio"
"bytes"
"compress/gzip"
"context"
"encoding/json"
"errors"
"fmt"
"io"
"maps"
"net/http"
"os"
"os/exec"
@@ -60,8 +57,14 @@ func versionToTrack(v string) (string, error) {
return "unstable", nil
}
// Arguments contains arguments needed to run an update.
type Arguments struct {
type updater struct {
UpdateArgs
track string
update func() error
}
// UpdateArgs contains arguments needed to run an update.
type UpdateArgs struct {
// Version can be a specific version number or one of the predefined track
// constants:
//
@@ -73,7 +76,7 @@ type Arguments struct {
// Leaving this empty is the same as using CurrentTrack.
Version string
// AppStore forces a local app store check, even if the current binary was
// not installed via an app store. TODO(cpalmer): Remove this.
// not installed via an app store.
AppStore bool
// Logf is a logger for update progress messages.
Logf logger.Logf
@@ -86,31 +89,30 @@ type Arguments struct {
PkgsAddr string
}
func (args Arguments) validate() error {
func (args UpdateArgs) validate() error {
if args.Confirm == nil {
return errors.New("missing Confirm callback in Arguments")
return errors.New("missing Confirm callback in UpdateArgs")
}
if args.Logf == nil {
return errors.New("missing Logf callback in Arguments")
return errors.New("missing Logf callback in UpdateArgs")
}
return nil
}
type Updater struct {
Arguments
track string
// Update is a platform-specific method that updates the installation. May be
// nil (not all platforms support updates from within Tailscale).
Update func() error
}
func NewUpdater(args Arguments) (*Updater, error) {
up := Updater{
Arguments: args,
// Update runs a single update attempt using the platform-specific mechanism.
//
// On Windows, this copies the calling binary and re-executes it to apply the
// update. The calling binary should handle an "update" subcommand and call
// this function again for the re-executed binary to proceed.
func Update(args UpdateArgs) error {
if err := args.validate(); err != nil {
return err
}
up.Update = up.getUpdateFunction()
if up.Update == nil {
return nil, errors.ErrUnsupported
if args.PkgsAddr == "" {
args.PkgsAddr = "https://pkgs.tailscale.com"
}
up := &updater{
UpdateArgs: args,
}
switch up.Version {
case StableTrack, UnstableTrack:
@@ -125,82 +127,56 @@ func NewUpdater(args Arguments) (*Updater, error) {
var err error
up.track, err = versionToTrack(args.Version)
if err != nil {
return nil, err
return err
}
}
if up.Arguments.PkgsAddr == "" {
up.Arguments.PkgsAddr = "https://pkgs.tailscale.com"
}
return &up, nil
}
type updateFunction func() error
func (up *Updater) getUpdateFunction() updateFunction {
switch runtime.GOOS {
case "windows":
return up.updateWindows
up.update = up.updateWindows
case "linux":
switch distro.Get() {
case distro.Synology:
return up.updateSynology
up.update = up.updateSynology
case distro.Debian: // includes Ubuntu
return up.updateDebLike
up.update = up.updateDebLike
case distro.Arch:
return up.updateArchLike
up.update = up.updateArchLike
case distro.Alpine:
return up.updateAlpineLike
up.update = up.updateAlpineLike
}
switch {
case haveExecutable("pacman"):
return up.updateArchLike
up.update = up.updateArchLike
case haveExecutable("apt-get"): // TODO(awly): add support for "apt"
// The distro.Debian switch case above should catch most apt-based
// systems, but add this fallback just in case.
return up.updateDebLike
up.update = up.updateDebLike
case haveExecutable("dnf"):
return up.updateFedoraLike("dnf")
up.update = up.updateFedoraLike("dnf")
case haveExecutable("yum"):
return up.updateFedoraLike("yum")
up.update = up.updateFedoraLike("yum")
case haveExecutable("apk"):
return up.updateAlpineLike
}
// If nothing matched, fall back to tarball updates.
if up.Update == nil {
return up.updateLinuxBinary
up.update = up.updateAlpineLike
}
case "darwin":
switch {
case !up.Arguments.AppStore && !version.IsSandboxedMacOS():
return nil
case !up.Arguments.AppStore && strings.HasSuffix(os.Getenv("HOME"), "/io.tailscale.ipn.macsys/Data"):
return up.updateMacSys
case !args.AppStore && !version.IsSandboxedMacOS():
return errors.ErrUnsupported
case !args.AppStore && strings.HasSuffix(os.Getenv("HOME"), "/io.tailscale.ipn.macsys/Data"):
up.update = up.updateMacSys
default:
return up.updateMacAppStore
up.update = up.updateMacAppStore
}
case "freebsd":
return up.updateFreeBSD
up.update = up.updateFreeBSD
}
return nil
if up.update == nil {
return errors.ErrUnsupported
}
return up.update()
}
// Update runs a single update attempt using the platform-specific mechanism.
//
// On Windows, this copies the calling binary and re-executes it to apply the
// update. The calling binary should handle an "update" subcommand and call
// this function again for the re-executed binary to proceed.
func Update(args Arguments) error {
if err := args.validate(); err != nil {
return err
}
up, err := NewUpdater(args)
if err != nil {
return err
}
return up.Update()
}
func (up *Updater) confirm(ver string) bool {
func (up *updater) confirm(ver string) bool {
if version.Short() == ver {
up.Logf("already running %v; no update needed", ver)
return false
@@ -213,14 +189,13 @@ func (up *Updater) confirm(ver string) bool {
const synoinfoConfPath = "/etc/synoinfo.conf"
func (up *Updater) updateSynology() error {
func (up *updater) updateSynology() error {
if up.Version != "" {
return errors.New("installing a specific version on Synology is not supported")
}
// Get the latest version and list of SPKs from pkgs.tailscale.com.
dsmVersion := distro.DSMVersion()
osName := fmt.Sprintf("dsm%d", dsmVersion)
osName := fmt.Sprintf("dsm%d", distro.DSMVersion())
arch, err := synoArch(runtime.GOARCH, synoinfoConfPath)
if err != nil {
return err
@@ -261,20 +236,8 @@ func (up *Updater) updateSynology() error {
// just spits out a JSON result when done.
out, err := cmd.CombinedOutput()
if err != nil {
if dsmVersion == 6 && bytes.Contains(out, []byte("error = [290]")) {
return fmt.Errorf("synopkg install failed: %w\noutput:\n%s\nplease make sure that packages from 'Any publisher' are allowed in the Package Center (Package Center -> Settings -> Trust Level -> Any publisher)", err, out)
}
return fmt.Errorf("synopkg install failed: %w\noutput:\n%s", err, out)
}
if dsmVersion == 6 {
// DSM6 does not automatically restart the package on install. Do it
// manually.
cmd := exec.Command("nohup", "synopkg", "start", "Tailscale")
out, err := cmd.CombinedOutput()
if err != nil {
return fmt.Errorf("synopkg start failed: %w\noutput:\n%s", err, out)
}
}
return nil
}
@@ -336,15 +299,7 @@ func parseSynoinfo(path string) (string, error) {
return "", fmt.Errorf(`missing "unique=" field in %q`, path)
}
func (up *Updater) updateDebLike() error {
if err := requireRoot(); err != nil {
return err
}
if err := exec.Command("dpkg", "--status", "tailscale").Run(); err != nil && isExitError(err) {
// Tailscale was not installed via apt, update via tarball download
// instead.
return up.updateLinuxBinary()
}
func (up *updater) updateDebLike() error {
ver, err := requestedTailscaleVersion(up.Version, up.track)
if err != nil {
return err
@@ -353,6 +308,10 @@ func (up *Updater) updateDebLike() error {
return nil
}
if err := requireRoot(); err != nil {
return err
}
if updated, err := updateDebianAptSourcesList(up.track); err != nil {
return err
} else if updated {
@@ -442,12 +401,7 @@ func updateDebianAptSourcesListBytes(was []byte, dstTrack string) (newContent []
return buf.Bytes(), nil
}
func (up *Updater) updateArchLike() error {
if err := exec.Command("pacman", "--query", "tailscale").Run(); err != nil && isExitError(err) {
// Tailscale was not installed via pacman, update via tarball download
// instead.
return up.updateLinuxBinary()
}
func (up *updater) updateArchLike() error {
// Arch maintainer asked us not to implement "tailscale update" or
// auto-updates on Arch-based distros:
// https://github.com/tailscale/tailscale/issues/6995#issuecomment-1687080106
@@ -460,16 +414,11 @@ const yumRepoConfigFile = "/etc/yum.repos.d/tailscale.repo"
// updateFedoraLike updates tailscale on any distros in the Fedora family,
// specifically anything that uses "dnf" or "yum" package managers. The actual
// package manager is passed via packageManager.
func (up *Updater) updateFedoraLike(packageManager string) func() error {
func (up *updater) updateFedoraLike(packageManager string) func() error {
return func() (err error) {
if err := requireRoot(); err != nil {
return err
}
if err := exec.Command(packageManager, "info", "--installed", "tailscale").Run(); err != nil && isExitError(err) {
// Tailscale was not installed via yum/dnf, update via tarball
// download instead.
return up.updateLinuxBinary()
}
defer func() {
if err != nil {
err = fmt.Errorf(`%w; you can try updating using "%s upgrade tailscale"`, err, packageManager)
@@ -541,18 +490,13 @@ func updateYUMRepoTrack(repoFile, dstTrack string) (rewrote bool, err error) {
return true, os.WriteFile(repoFile, newContent.Bytes(), 0644)
}
func (up *Updater) updateAlpineLike() (err error) {
func (up *updater) updateAlpineLike() (err error) {
if up.Version != "" {
return errors.New("installing a specific version on Alpine-based distros is not supported")
}
if err := requireRoot(); err != nil {
return err
}
if err := exec.Command("apk", "info", "--installed", "tailscale").Run(); err != nil && isExitError(err) {
// Tailscale was not installed via apk, update via tarball download
// instead.
return up.updateLinuxBinary()
}
defer func() {
if err != nil {
@@ -603,11 +547,11 @@ func parseAlpinePackageVersion(out []byte) (string, error) {
return "", errors.New("tailscale version not found in output")
}
func (up *Updater) updateMacSys() error {
func (up *updater) updateMacSys() error {
return errors.New("NOTREACHED: On MacSys builds, `tailscale update` is handled in Swift to launch the GUI updater")
}
func (up *Updater) updateMacAppStore() error {
func (up *updater) updateMacAppStore() error {
out, err := exec.Command("defaults", "read", "/Library/Preferences/com.apple.commerce.plist", "AutoUpdate").CombinedOutput()
if err != nil {
return fmt.Errorf("can't check App Store auto-update setting: %w, output: %q", err, string(out))
@@ -668,7 +612,7 @@ var (
markTempFileFunc func(string) error // or nil on non-Windows
)
func (up *Updater) updateWindows() error {
func (up *updater) updateWindows() error {
if msi := os.Getenv(winMSIEnv); msi != "" {
up.Logf("installing %v ...", msi)
if err := up.installMSI(msi); err != nil {
@@ -738,7 +682,7 @@ func (up *Updater) updateWindows() error {
panic("unreachable")
}
func (up *Updater) installMSI(msi string) error {
func (up *updater) installMSI(msi string) error {
var err error
for tries := 0; tries < 2; tries++ {
cmd := exec.Command("msiexec.exe", "/i", filepath.Base(msi), "/quiet", "/promptrestart", "/qn")
@@ -805,7 +749,7 @@ func makeSelfCopy() (tmpPathExe string, err error) {
return f2.Name(), f2.Close()
}
func (up *Updater) downloadURLToFile(pathSrc, fileDst string) (ret error) {
func (up *updater) downloadURLToFile(pathSrc, fileDst string) (ret error) {
c, err := distsign.NewClient(up.Logf, up.PkgsAddr)
if err != nil {
return err
@@ -813,18 +757,13 @@ func (up *Updater) downloadURLToFile(pathSrc, fileDst string) (ret error) {
return c.Download(context.Background(), pathSrc, fileDst)
}
func (up *Updater) updateFreeBSD() (err error) {
func (up *updater) updateFreeBSD() (err error) {
if up.Version != "" {
return errors.New("installing a specific version on FreeBSD is not supported")
}
if err := requireRoot(); err != nil {
return err
}
if err := exec.Command("pkg", "query", "%n", "tailscale").Run(); err != nil && isExitError(err) {
// Tailscale was not installed via pkg and we don't pre-compile
// binaries for it.
return errors.New("Tailscale was not installed via pkg, binary updates on FreeBSD are not supported; please reinstall Tailscale using pkg or update manually")
}
defer func() {
if err != nil {
@@ -854,165 +793,6 @@ func (up *Updater) updateFreeBSD() (err error) {
return nil
}
func (up *Updater) updateLinuxBinary() error {
ver, err := requestedTailscaleVersion(up.Version, up.track)
if err != nil {
return err
}
if !up.confirm(ver) {
return nil
}
// Root is needed to overwrite binaries and restart systemd unit.
if err := requireRoot(); err != nil {
return err
}
dlPath, err := up.downloadLinuxTarball(ver)
if err != nil {
return err
}
up.Logf("Extracting %q", dlPath)
if err := up.unpackLinuxTarball(dlPath); err != nil {
return err
}
if err := os.Remove(dlPath); err != nil {
up.Logf("failed to clean up %q: %v", dlPath, err)
}
if err := restartSystemdUnit(context.Background()); err != nil {
if errors.Is(err, errors.ErrUnsupported) {
up.Logf("Tailscale binaries updated successfully.\nPlease restart tailscaled to finish the update.")
} else {
up.Logf("Tailscale binaries updated successfully, but failed to restart tailscaled: %s.\nPlease restart tailscaled to finish the update.", err)
}
} else {
up.Logf("Success")
}
return nil
}
func (up *Updater) downloadLinuxTarball(ver string) (string, error) {
dlDir, err := os.UserCacheDir()
if err != nil {
return "", err
}
dlDir = filepath.Join(dlDir, "tailscale-update")
if err := os.MkdirAll(dlDir, 0700); err != nil {
return "", err
}
pkgsPath := fmt.Sprintf("%s/tailscale_%s_%s.tgz", up.track, ver, runtime.GOARCH)
dlPath := filepath.Join(dlDir, path.Base(pkgsPath))
if err := up.downloadURLToFile(pkgsPath, dlPath); err != nil {
return "", err
}
return dlPath, nil
}
func (up *Updater) unpackLinuxTarball(path string) error {
tailscale, tailscaled, err := binaryPaths()
if err != nil {
return err
}
f, err := os.Open(path)
if err != nil {
return err
}
defer f.Close()
gr, err := gzip.NewReader(f)
if err != nil {
return err
}
defer gr.Close()
tr := tar.NewReader(gr)
files := make(map[string]int)
wantFiles := map[string]int{
"tailscale": 1,
"tailscaled": 1,
}
for {
th, err := tr.Next()
if err == io.EOF {
break
}
if err != nil {
return fmt.Errorf("failed extracting %q: %w", path, err)
}
// TODO(awly): try to also extract tailscaled.service. The tricky part
// is fixing up binary paths in that file if they differ from where
// local tailscale/tailscaled are installed. Also, this may not be a
// systemd distro.
switch filepath.Base(th.Name) {
case "tailscale":
files["tailscale"]++
if err := writeFile(tr, tailscale+".new", 0755); err != nil {
return fmt.Errorf("failed extracting the new tailscale binary from %q: %w", path, err)
}
case "tailscaled":
files["tailscaled"]++
if err := writeFile(tr, tailscaled+".new", 0755); err != nil {
return fmt.Errorf("failed extracting the new tailscaled binary from %q: %w", path, err)
}
}
}
if !maps.Equal(files, wantFiles) {
return fmt.Errorf("%q has missing or duplicate files: got %v, want %v", path, files, wantFiles)
}
// Only place the files in final locations after everything extracted correctly.
if err := os.Rename(tailscale+".new", tailscale); err != nil {
return err
}
up.Logf("Updated %s", tailscale)
if err := os.Rename(tailscaled+".new", tailscaled); err != nil {
return err
}
up.Logf("Updated %s", tailscaled)
return nil
}
func writeFile(r io.Reader, path string, perm os.FileMode) error {
if err := os.Remove(path); err != nil && !os.IsNotExist(err) {
return fmt.Errorf("failed to remove existing file at %q: %w", path, err)
}
f, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_EXCL, perm)
if err != nil {
return err
}
defer f.Close()
if _, err := io.Copy(f, r); err != nil {
return err
}
return f.Close()
}
// Var allows overriding this in tests.
var binaryPaths = func() (tailscale, tailscaled string, err error) {
// This can be either tailscale or tailscaled.
this, err := os.Executable()
if err != nil {
return "", "", err
}
otherName := "tailscaled"
if filepath.Base(this) == "tailscaled" {
otherName = "tailscale"
}
// Try to find the other binary in the same directory.
other := filepath.Join(filepath.Dir(this), otherName)
_, err = os.Stat(other)
if os.IsNotExist(err) {
// If it's not in the same directory, try to find it in $PATH.
other, err = exec.LookPath(otherName)
}
if err != nil {
return "", "", fmt.Errorf("cannot find %q in neither %q nor $PATH: %w", otherName, filepath.Dir(this), err)
}
if otherName == "tailscaled" {
return this, other, nil
} else {
return other, this, nil
}
}
func haveExecutable(name string) bool {
path, err := exec.LookPath(name)
return err == nil && path != ""
@@ -1087,8 +867,3 @@ func requireRoot() error {
return errors.New("must be root")
}
}
func isExitError(err error) bool {
var exitErr *exec.ExitError
return errors.As(err, &exitErr)
}

View File

@@ -4,14 +4,9 @@
package clientupdate
import (
"archive/tar"
"compress/gzip"
"fmt"
"io/fs"
"maps"
"os"
"path/filepath"
"strings"
"testing"
)
@@ -507,257 +502,3 @@ unique="synology_88f6281_213air"
})
}
}
func TestUnpackLinuxTarball(t *testing.T) {
oldBinaryPaths := binaryPaths
t.Cleanup(func() { binaryPaths = oldBinaryPaths })
tests := []struct {
desc string
tarball map[string]string
before map[string]string
after map[string]string
wantErr bool
}{
{
desc: "success",
before: map[string]string{
"tailscale": "v1",
"tailscaled": "v1",
},
tarball: map[string]string{
"/usr/bin/tailscale": "v2",
"/usr/bin/tailscaled": "v2",
},
after: map[string]string{
"tailscale": "v2",
"tailscaled": "v2",
},
},
{
desc: "don't touch unrelated files",
before: map[string]string{
"tailscale": "v1",
"tailscaled": "v1",
"foo": "bar",
},
tarball: map[string]string{
"/usr/bin/tailscale": "v2",
"/usr/bin/tailscaled": "v2",
},
after: map[string]string{
"tailscale": "v2",
"tailscaled": "v2",
"foo": "bar",
},
},
{
desc: "unmodified",
before: map[string]string{
"tailscale": "v1",
"tailscaled": "v1",
},
tarball: map[string]string{
"/usr/bin/tailscale": "v1",
"/usr/bin/tailscaled": "v1",
},
after: map[string]string{
"tailscale": "v1",
"tailscaled": "v1",
},
},
{
desc: "ignore extra tarball files",
before: map[string]string{
"tailscale": "v1",
"tailscaled": "v1",
},
tarball: map[string]string{
"/usr/bin/tailscale": "v2",
"/usr/bin/tailscaled": "v2",
"/systemd/tailscaled.service": "v2",
},
after: map[string]string{
"tailscale": "v2",
"tailscaled": "v2",
},
},
{
desc: "tarball missing tailscaled",
before: map[string]string{
"tailscale": "v1",
"tailscaled": "v1",
},
tarball: map[string]string{
"/usr/bin/tailscale": "v2",
},
after: map[string]string{
"tailscale": "v1",
"tailscale.new": "v2",
"tailscaled": "v1",
},
wantErr: true,
},
{
desc: "duplicate tailscale binary",
before: map[string]string{
"tailscale": "v1",
"tailscaled": "v1",
},
tarball: map[string]string{
"/usr/bin/tailscale": "v2",
"/usr/sbin/tailscale": "v2",
"/usr/bin/tailscaled": "v2",
},
after: map[string]string{
"tailscale": "v1",
"tailscale.new": "v2",
"tailscaled": "v1",
"tailscaled.new": "v2",
},
wantErr: true,
},
{
desc: "empty archive",
before: map[string]string{
"tailscale": "v1",
"tailscaled": "v1",
},
tarball: map[string]string{},
after: map[string]string{
"tailscale": "v1",
"tailscaled": "v1",
},
wantErr: true,
},
}
for _, tt := range tests {
t.Run(tt.desc, func(t *testing.T) {
// Swap out binaryPaths function to point at dummy file paths.
tmp := t.TempDir()
tailscalePath := filepath.Join(tmp, "tailscale")
tailscaledPath := filepath.Join(tmp, "tailscaled")
binaryPaths = func() (string, string, error) {
return tailscalePath, tailscaledPath, nil
}
for name, content := range tt.before {
if err := os.WriteFile(filepath.Join(tmp, name), []byte(content), 0755); err != nil {
t.Fatal(err)
}
}
tarPath := filepath.Join(tmp, "tailscale.tgz")
genTarball(t, tarPath, tt.tarball)
up := &Updater{Arguments: Arguments{Logf: t.Logf}}
err := up.unpackLinuxTarball(tarPath)
if err != nil {
if !tt.wantErr {
t.Fatalf("unexpected error: %v", err)
}
} else if tt.wantErr {
t.Fatalf("unpack succeeded, expected an error")
}
gotAfter := make(map[string]string)
err = filepath.WalkDir(tmp, func(path string, d fs.DirEntry, err error) error {
if err != nil {
return err
}
if d.Type().IsDir() {
return nil
}
if path == tarPath {
return nil
}
content, err := os.ReadFile(path)
if err != nil {
return err
}
path = filepath.ToSlash(path)
base := filepath.ToSlash(tmp)
gotAfter[strings.TrimPrefix(path, base+"/")] = string(content)
return nil
})
if err != nil {
t.Fatal(err)
}
if !maps.Equal(gotAfter, tt.after) {
t.Errorf("files after unpack: %+v, want %+v", gotAfter, tt.after)
}
})
}
}
func genTarball(t *testing.T, path string, files map[string]string) {
f, err := os.Create(path)
if err != nil {
t.Fatal(err)
}
defer f.Close()
gw := gzip.NewWriter(f)
defer gw.Close()
tw := tar.NewWriter(gw)
defer tw.Close()
for file, content := range files {
if err := tw.WriteHeader(&tar.Header{
Name: file,
Size: int64(len(content)),
Mode: 0755,
}); err != nil {
t.Fatal(err)
}
if _, err := tw.Write([]byte(content)); err != nil {
t.Fatal(err)
}
}
}
func TestWriteFileOverwrite(t *testing.T) {
path := filepath.Join(t.TempDir(), "test")
for i := 0; i < 2; i++ {
content := fmt.Sprintf("content %d", i)
if err := writeFile(strings.NewReader(content), path, 0600); err != nil {
t.Fatal(err)
}
got, err := os.ReadFile(path)
if err != nil {
t.Fatal(err)
}
if string(got) != content {
t.Errorf("got content: %q, want: %q", got, content)
}
}
}
func TestWriteFileSymlink(t *testing.T) {
// Test for a malicious symlink at the destination path.
// f2 points to f1 and writeFile(f2) should not end up overwriting f1.
tmp := t.TempDir()
f1 := filepath.Join(tmp, "f1")
if err := os.WriteFile(f1, []byte("old"), 0600); err != nil {
t.Fatal(err)
}
f2 := filepath.Join(tmp, "f2")
if err := os.Symlink(f1, f2); err != nil {
t.Fatal(err)
}
if err := writeFile(strings.NewReader("new"), f2, 0600); err != nil {
t.Errorf("writeFile(%q) failed: %v", f2, err)
}
want := map[string]string{
f1: "old",
f2: "new",
}
for f, content := range want {
got, err := os.ReadFile(f)
if err != nil {
t.Fatal(err)
}
if string(got) != content {
t.Errorf("%q: got content %q, want %q", f, got, content)
}
}
}

View File

@@ -247,48 +247,6 @@ func (c *Client) Download(ctx context.Context, srcPath, dstPath string) error {
return nil
}
// ValidateLocalBinary fetches the latest signature associated with the binary
// at srcURLPath and uses it to validate the file located on disk via
// localFilePath. ValidateLocalBinary returns an error if anything goes wrong
// with the signature download or with signature validation.
func (c *Client) ValidateLocalBinary(srcURLPath, localFilePath string) error {
// Always fetch a fresh signing key.
sigPub, err := c.signingKeys()
if err != nil {
return err
}
srcURL := c.url(srcURLPath)
sigURL := srcURL + ".sig"
localFile, err := os.Open(localFilePath)
if err != nil {
return err
}
defer localFile.Close()
h := NewPackageHash()
_, err = io.Copy(h, localFile)
if err != nil {
return err
}
hash, hashLen := h.Sum(nil), h.Len()
c.logf("Downloading %q", sigURL)
sig, err := fetch(sigURL, signatureSizeLimit)
if err != nil {
return err
}
msg := binary.LittleEndian.AppendUint64(hash, uint64(hashLen))
if !VerifyAny(sigPub, msg, sig) {
return fmt.Errorf("signature %q for file %q does not validate with the current release signing key; either you are under attack, or attempting to download an old version of Tailscale which was signed with an older signing key", sigURL, localFilePath)
}
c.logf("Signature OK")
return nil
}
// signingKeys fetches current signing keys from the server and validates them
// against the roots. Should be called before validation of any downloaded file
// to get the fresh keys.

View File

@@ -119,121 +119,6 @@ func TestDownload(t *testing.T) {
}
}
func TestValidateLocalBinary(t *testing.T) {
srv := newTestServer(t)
c := srv.client(t)
tests := []struct {
desc string
before func(*testing.T)
src string
wantErr bool
}{
{
desc: "missing file",
before: func(*testing.T) {},
src: "hello",
wantErr: true,
},
{
desc: "success",
before: func(*testing.T) {
srv.addSigned("hello", []byte("world"))
},
src: "hello",
},
{
desc: "contents changed",
before: func(*testing.T) {
srv.addSigned("hello", []byte("new world"))
},
src: "hello",
wantErr: true,
},
{
desc: "no signature",
before: func(*testing.T) {
srv.add("hello", []byte("world"))
},
src: "hello",
wantErr: true,
},
{
desc: "bad signature",
before: func(*testing.T) {
srv.add("hello", []byte("world"))
srv.add("hello.sig", []byte("potato"))
},
src: "hello",
wantErr: true,
},
{
desc: "signed with untrusted key",
before: func(t *testing.T) {
srv.add("hello", []byte("world"))
srv.add("hello.sig", newSigningKeyPair(t).sign([]byte("world")))
},
src: "hello",
wantErr: true,
},
{
desc: "signed with root key",
before: func(t *testing.T) {
srv.add("hello", []byte("world"))
srv.add("hello.sig", ed25519.Sign(srv.roots[0].k, []byte("world")))
},
src: "hello",
wantErr: true,
},
{
desc: "bad signing key signature",
before: func(t *testing.T) {
srv.add("distsign.pub.sig", []byte("potato"))
srv.addSigned("hello", []byte("world"))
},
src: "hello",
wantErr: true,
},
}
for _, tt := range tests {
t.Run(tt.desc, func(t *testing.T) {
srv.reset()
// First just do a successful Download.
want := []byte("world")
srv.addSigned("hello", want)
dst := filepath.Join(t.TempDir(), tt.src)
err := c.Download(context.Background(), tt.src, dst)
if err != nil {
t.Fatalf("unexpected error from Download(%q): %v", tt.src, err)
}
got, err := os.ReadFile(dst)
if err != nil {
t.Fatal(err)
}
if !bytes.Equal(want, got) {
t.Errorf("Download(%q): got %q, want %q", tt.src, got, want)
}
// Now we reset srv with the test case and validate against the local dst.
srv.reset()
tt.before(t)
err = c.ValidateLocalBinary(tt.src, dst)
if err != nil {
if tt.wantErr {
return
}
t.Fatalf("unexpected error from ValidateLocalBinary(%q): %v", tt.src, err)
}
if tt.wantErr {
t.Fatalf("ValidateLocalBinary(%q) succeeded, expected an error", tt.src)
}
})
}
}
func TestRotateRoot(t *testing.T) {
srv := newTestServer(t)
c1 := srv.client(t)

View File

@@ -1,3 +0,0 @@
-----BEGIN ROOT PUBLIC KEY-----
Psrabv2YNiEDhPlnLVSMtB5EKACm7zxvKxfvYD4i7X8=
-----END ROOT PUBLIC KEY-----

View File

@@ -1,37 +0,0 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
package clientupdate
import (
"context"
"errors"
"fmt"
"github.com/coreos/go-systemd/v22/dbus"
)
func restartSystemdUnit(ctx context.Context) error {
c, err := dbus.NewWithContext(ctx)
if err != nil {
// Likely not a systemd-managed distro.
return errors.ErrUnsupported
}
defer c.Close()
if err := c.ReloadContext(ctx); err != nil {
return fmt.Errorf("failed to reload tailsacled.service: %w", err)
}
ch := make(chan string, 1)
if _, err := c.RestartUnitContext(ctx, "tailscaled.service", "replace", ch); err != nil {
return fmt.Errorf("failed to restart tailsacled.service: %w", err)
}
select {
case res := <-ch:
if res != "done" {
return fmt.Errorf("systemd service restart failed with result %q", res)
}
case <-ctx.Done():
return ctx.Err()
}
return nil
}

View File

@@ -1,15 +0,0 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
//go:build !linux
package clientupdate
import (
"context"
"errors"
)
func restartSystemdUnit(ctx context.Context) error {
return errors.ErrUnsupported
}

View File

@@ -16,8 +16,6 @@
// - TS_ROUTES: subnet routes to advertise.
// - TS_DEST_IP: proxy all incoming Tailscale traffic to the given
// destination.
// - TS_TAILNET_TARGET_IP: proxy all incoming non-Tailscale traffic to the given
// destination.
// - TS_TAILSCALED_EXTRA_ARGS: extra arguments to 'tailscaled'.
// - TS_EXTRA_ARGS: extra arguments to 'tailscale login', these are not
// reset on restart.
@@ -90,9 +88,8 @@ func main() {
AuthKey: defaultEnvs([]string{"TS_AUTHKEY", "TS_AUTH_KEY"}, ""),
Hostname: defaultEnv("TS_HOSTNAME", ""),
Routes: defaultEnv("TS_ROUTES", ""),
ServeConfigPath: defaultEnv("TS_SERVE_CONFIG", ""),
ProxyTo: defaultEnv("TS_DEST_IP", ""),
TailnetTargetIP: defaultEnv("TS_TAILNET_TARGET_IP", ""),
ServeConfigPath: defaultEnv("TS_SERVE_CONFIG", ""),
DaemonExtraArgs: defaultEnv("TS_TAILSCALED_EXTRA_ARGS", ""),
ExtraArgs: defaultEnv("TS_EXTRA_ARGS", ""),
InKubernetes: os.Getenv("KUBERNETES_SERVICE_HOST") != "",
@@ -110,17 +107,16 @@ func main() {
if cfg.ProxyTo != "" && cfg.UserspaceMode {
log.Fatal("TS_DEST_IP is not supported with TS_USERSPACE")
}
if cfg.TailnetTargetIP != "" && cfg.UserspaceMode {
log.Fatal("TS_TAILNET_TARGET_IP is not supported with TS_USERSPACE")
if cfg.ProxyTo != "" && cfg.ServeConfigPath != "" {
log.Fatal("TS_DEST_IP is not supported with TS_SERVE_CONFIG")
}
if !cfg.UserspaceMode {
if err := ensureTunFile(cfg.Root); err != nil {
log.Fatalf("Unable to create tuntap device file: %v", err)
}
if cfg.ProxyTo != "" || cfg.Routes != "" || cfg.TailnetTargetIP != "" {
if err := ensureIPForwarding(cfg.Root, cfg.ProxyTo, cfg.TailnetTargetIP, cfg.Routes); err != nil {
if cfg.ProxyTo != "" || cfg.Routes != "" {
if err := ensureIPForwarding(cfg.Root, cfg.ProxyTo, cfg.Routes); err != nil {
log.Printf("Failed to enable IP forwarding: %v", err)
log.Printf("To run tailscale as a proxy or router container, IP forwarding must be enabled.")
if cfg.InKubernetes {
@@ -274,7 +270,7 @@ authLoop:
}
var (
wantProxy = cfg.ProxyTo != "" || cfg.TailnetTargetIP != ""
wantProxy = cfg.ProxyTo != ""
wantDeviceInfo = cfg.InKubernetes && cfg.KubeSecret != "" && cfg.KubernetesCanPatch
startupTasksDone = false
currentIPs deephash.Sum // tailscale IPs assigned to device
@@ -301,13 +297,9 @@ authLoop:
log.Fatalf("tailscaled left running state (now in state %q), exiting", *n.State)
}
if n.NetMap != nil {
addrs := n.NetMap.SelfNode.Addresses().AsSlice()
newCurrentIPs := deephash.Hash(&addrs)
ipsHaveChanged := newCurrentIPs != currentIPs
if cfg.ProxyTo != "" && len(addrs) > 0 && ipsHaveChanged {
log.Printf("Installing proxy rules")
if err := installIngressForwardingRule(ctx, cfg.ProxyTo, addrs); err != nil {
log.Fatalf("installing ingress proxy rules: %v", err)
if cfg.ProxyTo != "" && len(n.NetMap.Addresses) > 0 && deephash.Update(&currentIPs, &n.NetMap.Addresses) {
if err := installIPTablesRule(ctx, cfg.ProxyTo, n.NetMap.Addresses); err != nil {
log.Fatalf("installing proxy rules: %v", err)
}
}
if cfg.ServeConfigPath != "" && len(n.NetMap.DNS.CertDomains) > 0 {
@@ -320,13 +312,6 @@ authLoop:
}
}
}
if cfg.TailnetTargetIP != "" && ipsHaveChanged && len(addrs) > 0 {
if err := installEgressForwardingRule(ctx, cfg.TailnetTargetIP, addrs); err != nil {
log.Fatalf("installing egress proxy rules: %v", err)
}
}
currentIPs = newCurrentIPs
deviceInfo := []any{n.NetMap.SelfNode.StableID(), n.NetMap.SelfNode.Name()}
if cfg.InKubernetes && cfg.KubernetesCanPatch && cfg.KubeSecret != "" && deephash.Update(&currentDeviceInfo, &deviceInfo) {
if err := storeDeviceInfo(ctx, cfg.KubeSecret, n.NetMap.SelfNode.StableID(), n.NetMap.SelfNode.Name(), n.NetMap.SelfNode.Addresses().AsSlice()); err != nil {
@@ -585,25 +570,14 @@ func ensureTunFile(root string) error {
}
// ensureIPForwarding enables IPv4/IPv6 forwarding for the container.
func ensureIPForwarding(root, clusterProxyTarget, tailnetTargetiP, routes string) error {
func ensureIPForwarding(root, proxyTo, routes string) error {
var (
v4Forwarding, v6Forwarding bool
)
if clusterProxyTarget != "" {
proxyIP, err := netip.ParseAddr(clusterProxyTarget)
if proxyTo != "" {
proxyIP, err := netip.ParseAddr(proxyTo)
if err != nil {
return fmt.Errorf("invalid cluster destination IP: %v", err)
}
if proxyIP.Is4() {
v4Forwarding = true
} else {
v6Forwarding = true
}
}
if tailnetTargetiP != "" {
proxyIP, err := netip.ParseAddr(tailnetTargetiP)
if err != nil {
return fmt.Errorf("invalid tailnet destination IP: %v", err)
return fmt.Errorf("invalid proxy destination IP: %v", err)
}
if proxyIP.Is4() {
v4Forwarding = true
@@ -653,53 +627,7 @@ func ensureIPForwarding(root, clusterProxyTarget, tailnetTargetiP, routes string
return nil
}
func installEgressForwardingRule(ctx context.Context, dstStr string, tsIPs []netip.Prefix) error {
dst, err := netip.ParseAddr(dstStr)
if err != nil {
return err
}
argv0 := "iptables"
if dst.Is6() {
argv0 = "ip6tables"
}
var local string
for _, pfx := range tsIPs {
if !pfx.IsSingleIP() {
continue
}
if pfx.Addr().Is4() != dst.Is4() {
continue
}
local = pfx.Addr().String()
break
}
if local == "" {
return fmt.Errorf("no tailscale IP matching family of %s found in %v", dstStr, tsIPs)
}
// Technically, if the control server ever changes the IPs assigned to this
// node, we'll slowly accumulate iptables rules. This shouldn't happen, so
// for now we'll live with it.
// Set up a rule that ensures that all packets
// except for those received on tailscale0 interface is forwarded to
// destination address
cmdDNAT := exec.CommandContext(ctx, argv0, "-t", "nat", "-I", "PREROUTING", "1", "!", "-i", "tailscale0", "-j", "DNAT", "--to-destination", dstStr)
cmdDNAT.Stdout = os.Stdout
cmdDNAT.Stderr = os.Stderr
if err := cmdDNAT.Run(); err != nil {
return fmt.Errorf("executing iptables failed: %w", err)
}
// Set up a rule that ensures that all packets sent to the destination
// address will have the proxy's IP set as source IP
cmdSNAT := exec.CommandContext(ctx, argv0, "-t", "nat", "-I", "POSTROUTING", "1", "--destination", dstStr, "-j", "SNAT", "--to-source", local)
cmdSNAT.Stdout = os.Stdout
cmdSNAT.Stderr = os.Stderr
if err := cmdSNAT.Run(); err != nil {
return fmt.Errorf("setting up SNAT via iptables failed: %w", err)
}
return nil
}
func installIngressForwardingRule(ctx context.Context, dstStr string, tsIPs []netip.Prefix) error {
func installIPTablesRule(ctx context.Context, dstStr string, tsIPs []netip.Prefix) error {
dst, err := netip.ParseAddr(dstStr)
if err != nil {
return err
@@ -736,17 +664,10 @@ func installIngressForwardingRule(ctx context.Context, dstStr string, tsIPs []ne
// settings is all the configuration for containerboot.
type settings struct {
AuthKey string
Hostname string
Routes string
// ProxyTo is the destination IP to which all incoming
// Tailscale traffic should be proxied. If empty, no proxying
// is done. This is typically a locally reachable IP.
ProxyTo string
// TailnetTargetIP is the destination IP to which all incoming
// non-Tailscale traffic should be proxied. If empty, no
// proxying is done. This is typically a Tailscale IP.
TailnetTargetIP string
AuthKey string
Hostname string
Routes string
ProxyTo string
ServeConfigPath string
DaemonExtraArgs string
ExtraArgs string

View File

@@ -134,14 +134,11 @@ func TestContainerBoot(t *testing.T) {
{
WantCmds: []string{
"/usr/bin/tailscaled --socket=/tmp/tailscaled.sock --state=mem: --statedir=/tmp --tun=userspace-networking",
"/usr/bin/tailscale --socket=/tmp/tailscaled.sock login",
"/usr/bin/tailscale --socket=/tmp/tailscaled.sock up --accept-dns=false",
},
},
{
Notify: runningNotify,
WantCmds: []string{
"/usr/bin/tailscale --socket=/tmp/tailscaled.sock set --accept-dns=false",
},
},
},
},
@@ -155,14 +152,11 @@ func TestContainerBoot(t *testing.T) {
{
WantCmds: []string{
"/usr/bin/tailscaled --socket=/tmp/tailscaled.sock --state=mem: --statedir=/tmp --tun=userspace-networking",
"/usr/bin/tailscale --socket=/tmp/tailscaled.sock login --authkey=tskey-key",
"/usr/bin/tailscale --socket=/tmp/tailscaled.sock up --accept-dns=false --authkey=tskey-key",
},
},
{
Notify: runningNotify,
WantCmds: []string{
"/usr/bin/tailscale --socket=/tmp/tailscaled.sock set --accept-dns=false",
},
},
},
},
@@ -176,14 +170,11 @@ func TestContainerBoot(t *testing.T) {
{
WantCmds: []string{
"/usr/bin/tailscaled --socket=/tmp/tailscaled.sock --state=mem: --statedir=/tmp --tun=userspace-networking",
"/usr/bin/tailscale --socket=/tmp/tailscaled.sock login --authkey=tskey-key",
"/usr/bin/tailscale --socket=/tmp/tailscaled.sock up --accept-dns=false --authkey=tskey-key",
},
},
{
Notify: runningNotify,
WantCmds: []string{
"/usr/bin/tailscale --socket=/tmp/tailscaled.sock set --accept-dns=false",
},
},
},
},
@@ -197,14 +188,11 @@ func TestContainerBoot(t *testing.T) {
{
WantCmds: []string{
"/usr/bin/tailscaled --socket=/tmp/tailscaled.sock --statedir=/tmp --tun=userspace-networking",
"/usr/bin/tailscale --socket=/tmp/tailscaled.sock login --authkey=tskey-key",
"/usr/bin/tailscale --socket=/tmp/tailscaled.sock up --accept-dns=false --authkey=tskey-key",
},
},
{
Notify: runningNotify,
WantCmds: []string{
"/usr/bin/tailscale --socket=/tmp/tailscaled.sock set --accept-dns=false",
},
},
},
},
@@ -218,7 +206,7 @@ func TestContainerBoot(t *testing.T) {
{
WantCmds: []string{
"/usr/bin/tailscaled --socket=/tmp/tailscaled.sock --state=mem: --statedir=/tmp --tun=userspace-networking",
"/usr/bin/tailscale --socket=/tmp/tailscaled.sock login --authkey=tskey-key",
"/usr/bin/tailscale --socket=/tmp/tailscaled.sock up --accept-dns=false --authkey=tskey-key --advertise-routes=1.2.3.0/24,10.20.30.0/24",
},
},
{
@@ -227,9 +215,6 @@ func TestContainerBoot(t *testing.T) {
"proc/sys/net/ipv4/ip_forward": "0",
"proc/sys/net/ipv6/conf/all/forwarding": "0",
},
WantCmds: []string{
"/usr/bin/tailscale --socket=/tmp/tailscaled.sock set --accept-dns=false --advertise-routes=1.2.3.0/24,10.20.30.0/24",
},
},
},
},
@@ -244,7 +229,7 @@ func TestContainerBoot(t *testing.T) {
{
WantCmds: []string{
"/usr/bin/tailscaled --socket=/tmp/tailscaled.sock --state=mem: --statedir=/tmp",
"/usr/bin/tailscale --socket=/tmp/tailscaled.sock login --authkey=tskey-key",
"/usr/bin/tailscale --socket=/tmp/tailscaled.sock up --accept-dns=false --authkey=tskey-key --advertise-routes=1.2.3.0/24,10.20.30.0/24",
},
},
{
@@ -253,9 +238,6 @@ func TestContainerBoot(t *testing.T) {
"proc/sys/net/ipv4/ip_forward": "1",
"proc/sys/net/ipv6/conf/all/forwarding": "0",
},
WantCmds: []string{
"/usr/bin/tailscale --socket=/tmp/tailscaled.sock set --accept-dns=false --advertise-routes=1.2.3.0/24,10.20.30.0/24",
},
},
},
},
@@ -270,7 +252,7 @@ func TestContainerBoot(t *testing.T) {
{
WantCmds: []string{
"/usr/bin/tailscaled --socket=/tmp/tailscaled.sock --state=mem: --statedir=/tmp",
"/usr/bin/tailscale --socket=/tmp/tailscaled.sock login --authkey=tskey-key",
"/usr/bin/tailscale --socket=/tmp/tailscaled.sock up --accept-dns=false --authkey=tskey-key --advertise-routes=::/64,1::/64",
},
},
{
@@ -279,9 +261,6 @@ func TestContainerBoot(t *testing.T) {
"proc/sys/net/ipv4/ip_forward": "0",
"proc/sys/net/ipv6/conf/all/forwarding": "1",
},
WantCmds: []string{
"/usr/bin/tailscale --socket=/tmp/tailscaled.sock set --accept-dns=false --advertise-routes=::/64,1::/64",
},
},
},
},
@@ -296,7 +275,7 @@ func TestContainerBoot(t *testing.T) {
{
WantCmds: []string{
"/usr/bin/tailscaled --socket=/tmp/tailscaled.sock --state=mem: --statedir=/tmp",
"/usr/bin/tailscale --socket=/tmp/tailscaled.sock login --authkey=tskey-key",
"/usr/bin/tailscale --socket=/tmp/tailscaled.sock up --accept-dns=false --authkey=tskey-key --advertise-routes=::/64,1.2.3.0/24",
},
},
{
@@ -305,14 +284,11 @@ func TestContainerBoot(t *testing.T) {
"proc/sys/net/ipv4/ip_forward": "1",
"proc/sys/net/ipv6/conf/all/forwarding": "1",
},
WantCmds: []string{
"/usr/bin/tailscale --socket=/tmp/tailscaled.sock set --accept-dns=false --advertise-routes=::/64,1.2.3.0/24",
},
},
},
},
{
Name: "ingres proxy",
Name: "proxy",
Env: map[string]string{
"TS_AUTHKEY": "tskey-key",
"TS_DEST_IP": "1.2.3.4",
@@ -322,42 +298,17 @@ func TestContainerBoot(t *testing.T) {
{
WantCmds: []string{
"/usr/bin/tailscaled --socket=/tmp/tailscaled.sock --state=mem: --statedir=/tmp",
"/usr/bin/tailscale --socket=/tmp/tailscaled.sock login --authkey=tskey-key",
"/usr/bin/tailscale --socket=/tmp/tailscaled.sock up --accept-dns=false --authkey=tskey-key",
},
},
{
Notify: runningNotify,
WantCmds: []string{
"/usr/bin/tailscale --socket=/tmp/tailscaled.sock set --accept-dns=false",
"/usr/bin/iptables -t nat -I PREROUTING 1 -d 100.64.0.1 -j DNAT --to-destination 1.2.3.4",
},
},
},
},
{
Name: "egress proxy",
Env: map[string]string{
"TS_AUTHKEY": "tskey-key",
"TS_TAILNET_TARGET_IP": "100.99.99.99",
"TS_USERSPACE": "false",
},
Phases: []phase{
{
WantCmds: []string{
"/usr/bin/tailscaled --socket=/tmp/tailscaled.sock --state=mem: --statedir=/tmp",
"/usr/bin/tailscale --socket=/tmp/tailscaled.sock login --authkey=tskey-key",
},
},
{
Notify: runningNotify,
WantCmds: []string{
"/usr/bin/tailscale --socket=/tmp/tailscaled.sock set --accept-dns=false",
"/usr/bin/iptables -t nat -I PREROUTING 1 ! -i tailscale0 -j DNAT --to-destination 100.99.99.99",
"/usr/bin/iptables -t nat -I POSTROUTING 1 --destination 100.99.99.99 -j SNAT --to-source 100.64.0.1",
},
},
},
},
{
Name: "authkey_once",
Env: map[string]string{
@@ -375,14 +326,11 @@ func TestContainerBoot(t *testing.T) {
State: ptr.To(ipn.NeedsLogin),
},
WantCmds: []string{
"/usr/bin/tailscale --socket=/tmp/tailscaled.sock login --authkey=tskey-key",
"/usr/bin/tailscale --socket=/tmp/tailscaled.sock up --accept-dns=false --authkey=tskey-key",
},
},
{
Notify: runningNotify,
WantCmds: []string{
"/usr/bin/tailscale --socket=/tmp/tailscaled.sock set --accept-dns=false",
},
},
},
},
@@ -399,7 +347,7 @@ func TestContainerBoot(t *testing.T) {
{
WantCmds: []string{
"/usr/bin/tailscaled --socket=/tmp/tailscaled.sock --state=kube:tailscale --statedir=/tmp --tun=userspace-networking",
"/usr/bin/tailscale --socket=/tmp/tailscaled.sock login --authkey=tskey-key",
"/usr/bin/tailscale --socket=/tmp/tailscaled.sock up --accept-dns=false --authkey=tskey-key",
},
WantKubeSecret: map[string]string{
"authkey": "tskey-key",
@@ -407,9 +355,6 @@ func TestContainerBoot(t *testing.T) {
},
{
Notify: runningNotify,
WantCmds: []string{
"/usr/bin/tailscale --socket=/tmp/tailscaled.sock set --accept-dns=false",
},
WantKubeSecret: map[string]string{
"authkey": "tskey-key",
"device_fqdn": "test-node.test.ts.net",
@@ -434,15 +379,12 @@ func TestContainerBoot(t *testing.T) {
{
WantCmds: []string{
"/usr/bin/tailscaled --socket=/tmp/tailscaled.sock --statedir=/tmp --tun=userspace-networking",
"/usr/bin/tailscale --socket=/tmp/tailscaled.sock login --authkey=tskey-key",
"/usr/bin/tailscale --socket=/tmp/tailscaled.sock up --accept-dns=false --authkey=tskey-key",
},
WantKubeSecret: map[string]string{},
},
{
Notify: runningNotify,
WantCmds: []string{
"/usr/bin/tailscale --socket=/tmp/tailscaled.sock set --accept-dns=false",
},
Notify: runningNotify,
WantKubeSecret: map[string]string{},
},
},
@@ -460,15 +402,12 @@ func TestContainerBoot(t *testing.T) {
{
WantCmds: []string{
"/usr/bin/tailscaled --socket=/tmp/tailscaled.sock --state=kube:tailscale --statedir=/tmp --tun=userspace-networking",
"/usr/bin/tailscale --socket=/tmp/tailscaled.sock login --authkey=tskey-key",
"/usr/bin/tailscale --socket=/tmp/tailscaled.sock up --accept-dns=false --authkey=tskey-key",
},
WantKubeSecret: map[string]string{},
},
{
Notify: runningNotify,
WantCmds: []string{
"/usr/bin/tailscale --socket=/tmp/tailscaled.sock set --accept-dns=false",
},
Notify: runningNotify,
WantKubeSecret: map[string]string{},
},
},
@@ -498,7 +437,7 @@ func TestContainerBoot(t *testing.T) {
State: ptr.To(ipn.NeedsLogin),
},
WantCmds: []string{
"/usr/bin/tailscale --socket=/tmp/tailscaled.sock login --authkey=tskey-key",
"/usr/bin/tailscale --socket=/tmp/tailscaled.sock up --accept-dns=false --authkey=tskey-key",
},
WantKubeSecret: map[string]string{
"authkey": "tskey-key",
@@ -506,9 +445,6 @@ func TestContainerBoot(t *testing.T) {
},
{
Notify: runningNotify,
WantCmds: []string{
"/usr/bin/tailscale --socket=/tmp/tailscaled.sock set --accept-dns=false",
},
WantKubeSecret: map[string]string{
"device_fqdn": "test-node.test.ts.net",
"device_id": "myID",
@@ -530,7 +466,7 @@ func TestContainerBoot(t *testing.T) {
{
WantCmds: []string{
"/usr/bin/tailscaled --socket=/tmp/tailscaled.sock --state=kube:tailscale --statedir=/tmp --tun=userspace-networking",
"/usr/bin/tailscale --socket=/tmp/tailscaled.sock login --authkey=tskey-key",
"/usr/bin/tailscale --socket=/tmp/tailscaled.sock up --accept-dns=false --authkey=tskey-key",
},
WantKubeSecret: map[string]string{
"authkey": "tskey-key",
@@ -538,9 +474,6 @@ func TestContainerBoot(t *testing.T) {
},
{
Notify: runningNotify,
WantCmds: []string{
"/usr/bin/tailscale --socket=/tmp/tailscaled.sock set --accept-dns=false",
},
WantKubeSecret: map[string]string{
"authkey": "tskey-key",
"device_fqdn": "test-node.test.ts.net",
@@ -578,14 +511,11 @@ func TestContainerBoot(t *testing.T) {
{
WantCmds: []string{
"/usr/bin/tailscaled --socket=/tmp/tailscaled.sock --state=mem: --statedir=/tmp --tun=userspace-networking --socks5-server=localhost:1080 --outbound-http-proxy-listen=localhost:8080",
"/usr/bin/tailscale --socket=/tmp/tailscaled.sock login",
"/usr/bin/tailscale --socket=/tmp/tailscaled.sock up --accept-dns=false",
},
},
{
Notify: runningNotify,
WantCmds: []string{
"/usr/bin/tailscale --socket=/tmp/tailscaled.sock set --accept-dns=false",
},
},
},
},
@@ -598,14 +528,11 @@ func TestContainerBoot(t *testing.T) {
{
WantCmds: []string{
"/usr/bin/tailscaled --socket=/tmp/tailscaled.sock --state=mem: --statedir=/tmp --tun=userspace-networking",
"/usr/bin/tailscale --socket=/tmp/tailscaled.sock login",
"/usr/bin/tailscale --socket=/tmp/tailscaled.sock up --accept-dns=true",
},
},
{
Notify: runningNotify,
WantCmds: []string{
"/usr/bin/tailscale --socket=/tmp/tailscaled.sock set --accept-dns=true",
},
},
},
},
@@ -619,14 +546,10 @@ func TestContainerBoot(t *testing.T) {
{
WantCmds: []string{
"/usr/bin/tailscaled --socket=/tmp/tailscaled.sock --state=mem: --statedir=/tmp --tun=userspace-networking --experiments=widgets",
"/usr/bin/tailscale --socket=/tmp/tailscaled.sock login --widget=rotated",
"/usr/bin/tailscale --socket=/tmp/tailscaled.sock up --accept-dns=false --widget=rotated",
},
},
{
}, {
Notify: runningNotify,
WantCmds: []string{
"/usr/bin/tailscale --socket=/tmp/tailscaled.sock set --accept-dns=false",
},
},
},
},
@@ -639,14 +562,10 @@ func TestContainerBoot(t *testing.T) {
{
WantCmds: []string{
"/usr/bin/tailscaled --socket=/tmp/tailscaled.sock --state=mem: --statedir=/tmp --tun=userspace-networking",
"/usr/bin/tailscale --socket=/tmp/tailscaled.sock login",
"/usr/bin/tailscale --socket=/tmp/tailscaled.sock up --accept-dns=false --hostname=my-server",
},
},
{
}, {
Notify: runningNotify,
WantCmds: []string{
"/usr/bin/tailscale --socket=/tmp/tailscaled.sock set --accept-dns=false --hostname=my-server",
},
},
},
},
@@ -687,7 +606,7 @@ func TestContainerBoot(t *testing.T) {
t.Fatalf("starting containerboot: %v", err)
}
defer func() {
cmd.Process.Signal(unix.SIGKILL)
cmd.Process.Signal(unix.SIGTERM)
cmd.Process.Wait()
}()
@@ -875,17 +794,10 @@ func (l *localAPI) Notify(n *ipn.Notify) {
}
func (l *localAPI) ServeHTTP(w http.ResponseWriter, r *http.Request) {
switch r.URL.Path {
case "/localapi/v0/serve-config":
if r.Method != "POST" {
panic(fmt.Sprintf("unsupported method %q", r.Method))
}
return
case "/localapi/v0/watch-ipn-bus":
if r.Method != "GET" {
panic(fmt.Sprintf("unsupported method %q", r.Method))
}
default:
if r.Method != "GET" {
panic(fmt.Sprintf("unsupported method %q", r.Method))
}
if r.URL.Path != "/localapi/v0/watch-ipn-bus" {
panic(fmt.Sprintf("unsupported path %q", r.URL.Path))
}

View File

@@ -136,7 +136,7 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa
tailscale.com/types/structs from tailscale.com/ipn+
tailscale.com/types/tkatype from tailscale.com/types/key+
tailscale.com/types/views from tailscale.com/ipn/ipnstate+
tailscale.com/util/clientmetric from tailscale.com/net/tshttpproxy+
W tailscale.com/util/clientmetric from tailscale.com/net/tshttpproxy
tailscale.com/util/cloudenv from tailscale.com/hostinfo+
W tailscale.com/util/cmpver from tailscale.com/net/tshttpproxy
tailscale.com/util/cmpx from tailscale.com/cmd/derper+

View File

@@ -50,7 +50,7 @@ func addWebSocketSupport(s *derp.Server, base http.Handler) http.Handler {
return
}
counterWebSocketAccepts.Add(1)
wc := wsconn.NetConn(r.Context(), c, websocket.MessageBinary, r.RemoteAddr)
wc := wsconn.NetConn(r.Context(), c, websocket.MessageBinary)
brw := bufio.NewReadWriter(bufio.NewReader(wc), bufio.NewWriter(wc))
s.Accept(r.Context(), wc, brw, r.RemoteAddr)
})

View File

@@ -18,7 +18,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@v3
- name: Setup Go environment
uses: actions/setup-go@v3.2.0

View File

@@ -9,7 +9,6 @@ import (
"context"
"fmt"
"strings"
"sync"
"go.uber.org/zap"
"golang.org/x/exp/slices"
@@ -22,8 +21,6 @@ import (
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"tailscale.com/ipn"
"tailscale.com/types/opt"
"tailscale.com/util/clientmetric"
"tailscale.com/util/set"
)
type IngressReconciler struct {
@@ -32,20 +29,8 @@ type IngressReconciler struct {
recorder record.EventRecorder
ssr *tailscaleSTSReconciler
logger *zap.SugaredLogger
mu sync.Mutex // protects following
// managedIngresses is a set of all ingress resources that we're currently
// managing. This is only used for metrics.
managedIngresses set.Slice[types.UID]
}
var (
// gaugeIngressResources tracks the number of ingress resources that we're
// currently managing.
gaugeIngressResources = clientmetric.NewGauge("k8s_ingress_resources")
)
func (a *IngressReconciler) Reconcile(ctx context.Context, req reconcile.Request) (_ reconcile.Result, err error) {
logger := a.logger.With("ingress-ns", req.Namespace, "ingress-name", req.Name)
logger.Debugf("starting reconcile")
@@ -72,10 +57,6 @@ func (a *IngressReconciler) maybeCleanup(ctx context.Context, logger *zap.Sugare
ix := slices.Index(ing.Finalizers, FinalizerName)
if ix < 0 {
logger.Debugf("no finalizer, nothing to do")
a.mu.Lock()
defer a.mu.Unlock()
a.managedIngresses.Remove(ing.UID)
gaugeIngressResources.Set(int64(a.managedIngresses.Len()))
return nil
}
@@ -96,10 +77,6 @@ func (a *IngressReconciler) maybeCleanup(ctx context.Context, logger *zap.Sugare
// cleanup removes the tailscale finalizer, which will make all future
// reconciles exit early.
logger.Infof("unexposed ingress from tailnet")
a.mu.Lock()
defer a.mu.Unlock()
a.managedIngresses.Remove(ing.UID)
gaugeIngressResources.Set(int64(a.managedIngresses.Len()))
return nil
}
@@ -120,14 +97,6 @@ func (a *IngressReconciler) maybeProvision(ctx context.Context, logger *zap.Suga
return fmt.Errorf("failed to add finalizer: %w", err)
}
}
a.mu.Lock()
a.managedIngresses.Add(ing.UID)
gaugeIngressResources.Set(int64(a.managedIngresses.Len()))
a.mu.Unlock()
if !a.ssr.IsHTTPSEnabledOnTailnet() {
a.recorder.Event(ing, corev1.EventTypeWarning, "HTTPSNotEnabled", "HTTPS is not enabled on the tailnet; ingress may not work")
}
// magic443 is a fake hostname that we can use to tell containerboot to swap
// out with the real hostname once it's known.
@@ -221,7 +190,7 @@ func (a *IngressReconciler) maybeProvision(ctx context.Context, logger *zap.Suga
ChildResourceLabels: crl,
}
if _, err := a.ssr.Provision(ctx, logger, sts); err != nil {
if err := a.ssr.Provision(ctx, logger, sts); err != nil {
return fmt.Errorf("failed to provision: %w", err)
}

View File

@@ -73,7 +73,7 @@ func main() {
if shouldRunAuthProxy {
launchAuthProxy(zlog, restConfig, s)
}
startReconcilers(zlog, s, tsNamespace, restConfig, tsClient, image, priorityClassName, tags)
startReconcilers(zlog, tsNamespace, restConfig, tsClient, image, priorityClassName, tags)
}
// initTSNet initializes the tsnet.Server and logs in to Tailscale. It uses the
@@ -182,7 +182,7 @@ waitOnline:
// startReconcilers starts the controller-runtime manager and registers the
// ServiceReconciler.
func startReconcilers(zlog *zap.SugaredLogger, s *tsnet.Server, tsNamespace string, restConfig *rest.Config, tsClient *tailscale.Client, image, priorityClassName, tags string) {
func startReconcilers(zlog *zap.SugaredLogger, tsNamespace string, restConfig *rest.Config, tsClient *tailscale.Client, image, priorityClassName, tags string) {
var (
isDefaultLoadBalancer = defaultBool("OPERATOR_DEFAULT_LOAD_BALANCER", false)
)
@@ -225,7 +225,6 @@ func startReconcilers(zlog *zap.SugaredLogger, s *tsnet.Server, tsNamespace stri
eventRecorder := mgr.GetEventRecorderFor("tailscale-operator")
ssr := &tailscaleSTSReconciler{
Client: mgr.GetClient(),
tsnetServer: s,
tsClient: tsClient,
defaultTags: strings.Split(tags, ","),
operatorNamespace: tsNamespace,

View File

@@ -7,7 +7,6 @@ package main
import (
"context"
"fmt"
"strings"
"sync"
"testing"
@@ -154,111 +153,6 @@ func TestLoadBalancerClass(t *testing.T) {
}
expectEqual(t, fc, want)
}
func TestTailnetTargetIPAnnotation(t *testing.T) {
fc := fake.NewFakeClient()
ft := &fakeTSClient{}
zl, err := zap.NewDevelopment()
if err != nil {
t.Fatal(err)
}
tailnetTargetIP := "100.66.66.66"
sr := &ServiceReconciler{
Client: fc,
ssr: &tailscaleSTSReconciler{
Client: fc,
tsClient: ft,
defaultTags: []string{"tag:k8s"},
operatorNamespace: "operator-ns",
proxyImage: "tailscale/tailscale",
},
logger: zl.Sugar(),
}
// Create a service that we should manage, and check that the initial round
// of objects looks right.
mustCreate(t, fc, &corev1.Service{
ObjectMeta: metav1.ObjectMeta{
Name: "test",
Namespace: "default",
// The apiserver is supposed to set the UID, but the fake client
// doesn't. So, set it explicitly because other code later depends
// on it being set.
UID: types.UID("1234-UID"),
Annotations: map[string]string{
AnnotationTailnetTargetIP: tailnetTargetIP,
},
},
Spec: corev1.ServiceSpec{
Type: corev1.ServiceTypeClusterIP,
Selector: map[string]string{
"foo": "bar",
},
},
})
expectReconciled(t, sr, "default", "test")
fullName, shortName := findGenName(t, fc, "default", "test")
expectEqual(t, fc, expectedSecret(fullName))
expectEqual(t, fc, expectedHeadlessService(shortName))
expectEqual(t, fc, expectedEgressSTS(shortName, fullName, tailnetTargetIP, "default-test", ""))
want := &corev1.Service{
TypeMeta: metav1.TypeMeta{
Kind: "Service",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: "test",
Namespace: "default",
Finalizers: []string{"tailscale.com/finalizer"},
UID: types.UID("1234-UID"),
Annotations: map[string]string{
AnnotationTailnetTargetIP: tailnetTargetIP,
},
},
Spec: corev1.ServiceSpec{
ExternalName: fmt.Sprintf("%s.operator-ns.svc", shortName),
Type: corev1.ServiceTypeExternalName,
Selector: nil,
},
}
expectEqual(t, fc, want)
expectEqual(t, fc, expectedSecret(fullName))
expectEqual(t, fc, expectedHeadlessService(shortName))
expectEqual(t, fc, expectedEgressSTS(shortName, fullName, tailnetTargetIP, "default-test", ""))
// Change the tailscale-target-ip annotation which should update the
// StatefulSet
tailnetTargetIP = "100.77.77.77"
mustUpdate(t, fc, "default", "test", func(s *corev1.Service) {
s.ObjectMeta.Annotations = map[string]string{
AnnotationTailnetTargetIP: tailnetTargetIP,
}
})
// Remove the tailscale-target-ip annotation which should make the
// operator clean up
mustUpdate(t, fc, "default", "test", func(s *corev1.Service) {
s.ObjectMeta.Annotations = map[string]string{}
})
expectReconciled(t, sr, "default", "test")
// // synchronous StatefulSet deletion triggers a requeue. But, the StatefulSet
// // didn't create any child resources since this is all faked, so the
// // deletion goes through immediately.
expectReconciled(t, sr, "default", "test")
expectMissing[appsv1.StatefulSet](t, fc, "operator-ns", shortName)
// // The deletion triggers another reconcile, to finish the cleanup.
expectReconciled(t, sr, "default", "test")
expectMissing[appsv1.StatefulSet](t, fc, "operator-ns", shortName)
expectMissing[corev1.Service](t, fc, "operator-ns", shortName)
expectMissing[corev1.Secret](t, fc, "operator-ns", fullName)
// At the moment we don't revert changes to the user created Service -
// we don't have a reliable way how to tell what it was before and also
// we don't really expect it to be re-used
}
func TestAnnotations(t *testing.T) {
fc := fake.NewFakeClient()
@@ -887,8 +781,8 @@ func expectedSTS(stsName, secretName, hostname, priorityClassName string) *appsv
Template: corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Annotations: map[string]string{
"tailscale.com/operator-last-set-hostname": hostname,
"tailscale.com/operator-last-set-cluster-ip": "10.20.30.40",
"tailscale.com/operator-last-set-hostname": hostname,
"tailscale.com/operator-last-set-ip": "10.20.30.40",
},
DeletionGracePeriodSeconds: ptr.To[int64](10),
Labels: map[string]string{"app": "1234-UID"},
@@ -931,75 +825,6 @@ func expectedSTS(stsName, secretName, hostname, priorityClassName string) *appsv
},
}
}
func expectedEgressSTS(stsName, secretName, tailnetTargetIP, hostname, priorityClassName string) *appsv1.StatefulSet {
return &appsv1.StatefulSet{
TypeMeta: metav1.TypeMeta{
Kind: "StatefulSet",
APIVersion: "apps/v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: stsName,
Namespace: "operator-ns",
Labels: map[string]string{
"tailscale.com/managed": "true",
"tailscale.com/parent-resource": "test",
"tailscale.com/parent-resource-ns": "default",
"tailscale.com/parent-resource-type": "svc",
},
},
Spec: appsv1.StatefulSetSpec{
Replicas: ptr.To[int32](1),
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{"app": "1234-UID"},
},
ServiceName: stsName,
Template: corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Annotations: map[string]string{
"tailscale.com/operator-last-set-hostname": hostname,
"tailscale.com/operator-last-set-ts-tailnet-target-ip": tailnetTargetIP,
},
DeletionGracePeriodSeconds: ptr.To[int64](10),
Labels: map[string]string{"app": "1234-UID"},
},
Spec: corev1.PodSpec{
ServiceAccountName: "proxies",
PriorityClassName: priorityClassName,
InitContainers: []corev1.Container{
{
Name: "sysctler",
Image: "busybox",
Command: []string{"/bin/sh"},
Args: []string{"-c", "sysctl -w net.ipv4.ip_forward=1 net.ipv6.conf.all.forwarding=1"},
SecurityContext: &corev1.SecurityContext{
Privileged: ptr.To(true),
},
},
},
Containers: []corev1.Container{
{
Name: "tailscale",
Image: "tailscale/tailscale",
Env: []corev1.EnvVar{
{Name: "TS_USERSPACE", Value: "false"},
{Name: "TS_AUTH_ONCE", Value: "true"},
{Name: "TS_KUBE_SECRET", Value: secretName},
{Name: "TS_HOSTNAME", Value: hostname},
{Name: "TS_TAILNET_TARGET_IP", Value: tailnetTargetIP},
},
SecurityContext: &corev1.SecurityContext{
Capabilities: &corev1.Capabilities{
Add: []corev1.Capability{"NET_ADMIN"},
},
},
ImagePullPolicy: "Always",
},
},
},
},
},
}
}
func findGenName(t *testing.T, client client.Client, ns, name string) (full, noSuffix string) {
t.Helper()

View File

@@ -25,7 +25,6 @@ import (
"tailscale.com/tailcfg"
"tailscale.com/tsnet"
"tailscale.com/types/logger"
"tailscale.com/util/clientmetric"
"tailscale.com/util/set"
)
@@ -43,8 +42,6 @@ func addWhoIsToRequest(r *http.Request, who *apitype.WhoIsResponse) *http.Reques
return r.WithContext(context.WithValue(r.Context(), whoIsKey{}, who))
}
var counterNumRequestsProxied = clientmetric.NewCounter("k8s_auth_proxy_requests_proxied")
// launchAuthProxy launches the auth proxy, which is a small HTTP server that
// authenticates requests using the Tailscale LocalAPI and then proxies them to
// the kube-apiserver.
@@ -87,7 +84,6 @@ func (h *authProxy) ServeHTTP(w http.ResponseWriter, r *http.Request) {
http.Error(w, "failed to authenticate caller", http.StatusInternalServerError)
return
}
counterNumRequestsProxied.Add(1)
h.rp.ServeHTTP(w, addWhoIsToRequest(r, who))
}

View File

@@ -24,7 +24,6 @@ import (
"tailscale.com/client/tailscale"
"tailscale.com/ipn"
"tailscale.com/tailcfg"
"tailscale.com/tsnet"
"tailscale.com/types/opt"
"tailscale.com/util/dnsname"
"tailscale.com/util/mak"
@@ -39,19 +38,17 @@ const (
FinalizerName = "tailscale.com/finalizer"
// Annotations settable by users on services.
AnnotationExpose = "tailscale.com/expose"
AnnotationTags = "tailscale.com/tags"
AnnotationHostname = "tailscale.com/hostname"
AnnotationTailnetTargetIP = "tailscale.com/ts-tailnet-target-ip"
AnnotationExpose = "tailscale.com/expose"
AnnotationTags = "tailscale.com/tags"
AnnotationHostname = "tailscale.com/hostname"
// Annotations settable by users on ingresses.
AnnotationFunnel = "tailscale.com/funnel"
// Annotations set by the operator on pods to trigger restarts when the
// hostname or IP changes.
podAnnotationLastSetClusterIP = "tailscale.com/operator-last-set-cluster-ip"
podAnnotationLastSetHostname = "tailscale.com/operator-last-set-hostname"
podAnnotationLastSetTailnetTargetIP = "tailscale.com/operator-last-set-ts-tailnet-target-ip"
podAnnotationLastSetIP = "tailscale.com/operator-last-set-ip"
podAnnotationLastSetHostname = "tailscale.com/operator-last-set-hostname"
)
type tailscaleSTSConfig struct {
@@ -60,11 +57,7 @@ type tailscaleSTSConfig struct {
ChildResourceLabels map[string]string
ServeConfig *ipn.ServeConfig
// Tailscale target in cluster we are setting up ingress for
ClusterTargetIP string
// Tailscale IP of a Tailscale service we are setting up egress for
TailnetTargetIP string
TargetIP string
Hostname string
Tags []string // if empty, use defaultTags
@@ -72,7 +65,6 @@ type tailscaleSTSConfig struct {
type tailscaleSTSReconciler struct {
client.Client
tsnetServer *tsnet.Server
tsClient tsClient
defaultTags []string
operatorNamespace string
@@ -80,30 +72,25 @@ type tailscaleSTSReconciler struct {
proxyPriorityClassName string
}
// IsHTTPSEnabledOnTailnet reports whether HTTPS is enabled on the tailnet.
func (a *tailscaleSTSReconciler) IsHTTPSEnabledOnTailnet() bool {
return len(a.tsnetServer.CertDomains()) > 0
}
// Provision ensures that the StatefulSet for the given service is running and
// up to date.
func (a *tailscaleSTSReconciler) Provision(ctx context.Context, logger *zap.SugaredLogger, sts *tailscaleSTSConfig) (*corev1.Service, error) {
func (a *tailscaleSTSReconciler) Provision(ctx context.Context, logger *zap.SugaredLogger, sts *tailscaleSTSConfig) error {
// Do full reconcile.
hsvc, err := a.reconcileHeadlessService(ctx, logger, sts)
if err != nil {
return nil, fmt.Errorf("failed to reconcile headless service: %w", err)
return fmt.Errorf("failed to reconcile headless service: %w", err)
}
secretName, err := a.createOrGetSecret(ctx, logger, sts, hsvc)
if err != nil {
return nil, fmt.Errorf("failed to create or get API key secret: %w", err)
return fmt.Errorf("failed to create or get API key secret: %w", err)
}
_, err = a.reconcileSTS(ctx, logger, sts, hsvc, secretName)
if err != nil {
return nil, fmt.Errorf("failed to reconcile statefulset: %w", err)
return fmt.Errorf("failed to reconcile statefulset: %w", err)
}
return hsvc, nil
return nil
}
// Cleanup removes all resources associated that were created by Provision with
@@ -188,15 +175,15 @@ func (a *tailscaleSTSReconciler) createOrGetSecret(ctx context.Context, logger *
Labels: stsC.ChildResourceLabels,
},
}
var orig *corev1.Secret // unmodified copy of secret
alreadyExists := false
if err := a.Get(ctx, client.ObjectKeyFromObject(secret), secret); err == nil {
logger.Debugf("secret %s/%s already exists", secret.GetNamespace(), secret.GetName())
orig = secret.DeepCopy()
alreadyExists = true
} else if !apierrors.IsNotFound(err) {
return "", err
}
if orig == nil {
if !alreadyExists {
// Secret doesn't exist yet, create one. Initially it contains
// only the Tailscale authkey, but once Tailscale starts it'll
// also store the daemon state.
@@ -231,8 +218,8 @@ func (a *tailscaleSTSReconciler) createOrGetSecret(ctx context.Context, logger *
}
mak.Set(&secret.StringData, "serve-config", string(j))
}
if orig != nil {
if err := a.Patch(ctx, secret, client.MergeFrom(orig)); err != nil {
if alreadyExists {
if err := a.Update(ctx, secret); err != nil {
return "", err
}
} else {
@@ -318,17 +305,11 @@ func (a *tailscaleSTSReconciler) reconcileSTS(ctx context.Context, logger *zap.S
Name: "TS_HOSTNAME",
Value: sts.Hostname,
})
if sts.ClusterTargetIP != "" {
if sts.TargetIP != "" {
container.Env = append(container.Env, corev1.EnvVar{
Name: "TS_DEST_IP",
Value: sts.ClusterTargetIP,
Value: sts.TargetIP,
})
} else if sts.TailnetTargetIP != "" {
container.Env = append(container.Env, corev1.EnvVar{
Name: "TS_TAILNET_TARGET_IP",
Value: sts.TailnetTargetIP,
})
} else if sts.ServeConfig != nil {
container.Env = append(container.Env, corev1.EnvVar{
Name: "TS_SERVE_CONFIG",
@@ -369,13 +350,10 @@ func (a *tailscaleSTSReconciler) reconcileSTS(ctx context.Context, logger *zap.S
// container when the value changes. We do this by adding an annotation to
// the pod template that contains the last value we set.
ss.Spec.Template.Annotations = map[string]string{
podAnnotationLastSetHostname: sts.Hostname,
"tailscale.com/operator-last-set-hostname": sts.Hostname,
}
if sts.ClusterTargetIP != "" {
ss.Spec.Template.Annotations[podAnnotationLastSetClusterIP] = sts.ClusterTargetIP
}
if sts.TailnetTargetIP != "" {
ss.Spec.Template.Annotations[podAnnotationLastSetTailnetTargetIP] = sts.TailnetTargetIP
if sts.TargetIP != "" {
ss.Spec.Template.Annotations["tailscale.com/operator-last-set-ip"] = sts.TargetIP
}
ss.Spec.Template.Labels = map[string]string{
"app": sts.ParentResourceUID,

View File

@@ -10,17 +10,13 @@ import (
"fmt"
"net/netip"
"strings"
"sync"
"go.uber.org/zap"
"golang.org/x/exp/slices"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/types"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"tailscale.com/util/clientmetric"
"tailscale.com/util/set"
)
type ServiceReconciler struct {
@@ -28,26 +24,8 @@ type ServiceReconciler struct {
ssr *tailscaleSTSReconciler
logger *zap.SugaredLogger
isDefaultLoadBalancer bool
mu sync.Mutex // protects following
// managedIngressProxies is a set of all ingress proxies that we're
// currently managing. This is only used for metrics.
managedIngressProxies set.Slice[types.UID]
// managedEgressProxies is a set of all egress proxies that we're currently
// managing. This is only used for metrics.
managedEgressProxies set.Slice[types.UID]
}
var (
// gaugeEgressProxies tracks the number of egress proxies that we're
// currently managing.
gaugeEgressProxies = clientmetric.NewGauge("k8s_egress_proxies")
// gaugeIngressProxies tracks the number of ingress proxies that we're
// currently managing.
gaugeIngressProxies = clientmetric.NewGauge("k8s_ingress_proxies")
)
func childResourceLabels(name, ns, typ string) map[string]string {
// You might wonder why we're using owner references, since they seem to be
// built for exactly this. Unfortunately, Kubernetes does not support
@@ -77,8 +55,8 @@ func (a *ServiceReconciler) Reconcile(ctx context.Context, req reconcile.Request
} else if err != nil {
return reconcile.Result{}, fmt.Errorf("failed to get svc: %w", err)
}
if !svc.DeletionTimestamp.IsZero() || !a.shouldExpose(svc) && !a.hasTailnetTargetAnnotation(svc) {
logger.Debugf("service is being deleted or is (no longer) referring to Tailscale ingress/egress, ensuring any created resources are cleaned up")
if !svc.DeletionTimestamp.IsZero() || !a.shouldExpose(svc) {
logger.Debugf("service is being deleted or should not be exposed, cleaning up")
return reconcile.Result{}, a.maybeCleanup(ctx, logger, svc)
}
@@ -93,12 +71,6 @@ func (a *ServiceReconciler) maybeCleanup(ctx context.Context, logger *zap.Sugare
ix := slices.Index(svc.Finalizers, FinalizerName)
if ix < 0 {
logger.Debugf("no finalizer, nothing to do")
a.mu.Lock()
defer a.mu.Unlock()
a.managedIngressProxies.Remove(svc.UID)
a.managedEgressProxies.Remove(svc.UID)
gaugeIngressProxies.Set(int64(a.managedIngressProxies.Len()))
gaugeEgressProxies.Set(int64(a.managedEgressProxies.Len()))
return nil
}
@@ -119,13 +91,6 @@ func (a *ServiceReconciler) maybeCleanup(ctx context.Context, logger *zap.Sugare
// cleanup removes the tailscale finalizer, which will make all future
// reconciles exit early.
logger.Infof("unexposed service from tailnet")
a.mu.Lock()
defer a.mu.Unlock()
a.managedIngressProxies.Remove(svc.UID)
a.managedEgressProxies.Remove(svc.UID)
gaugeIngressProxies.Set(int64(a.managedIngressProxies.Len()))
gaugeEgressProxies.Set(int64(a.managedEgressProxies.Len()))
return nil
}
@@ -157,44 +122,24 @@ func (a *ServiceReconciler) maybeProvision(ctx context.Context, logger *zap.Suga
tags = strings.Split(tstr, ",")
}
clusterIPAddr, err := netip.ParseAddr(svc.Spec.ClusterIP)
if err != nil {
return fmt.Errorf("failed to parse cluster IP: %w", err)
}
sts := &tailscaleSTSConfig{
ParentResourceName: svc.Name,
ParentResourceUID: string(svc.UID),
TargetIP: svc.Spec.ClusterIP,
Hostname: hostname,
Tags: tags,
ChildResourceLabels: crl,
}
a.mu.Lock()
if a.shouldExpose(svc) {
sts.ClusterTargetIP = svc.Spec.ClusterIP
a.managedIngressProxies.Add(svc.UID)
gaugeIngressProxies.Set(int64(a.managedIngressProxies.Len()))
} else if a.hasTailnetTargetAnnotation(svc) {
sts.TailnetTargetIP = svc.Annotations[AnnotationTailnetTargetIP]
a.managedEgressProxies.Add(svc.UID)
gaugeEgressProxies.Set(int64(a.managedEgressProxies.Len()))
}
a.mu.Unlock()
var hsvc *corev1.Service
if hsvc, err = a.ssr.Provision(ctx, logger, sts); err != nil {
if err := a.ssr.Provision(ctx, logger, sts); err != nil {
return fmt.Errorf("failed to provision: %w", err)
}
if a.hasTailnetTargetAnnotation(svc) {
headlessSvcName := hsvc.Name + "." + hsvc.Namespace + ".svc"
if svc.Spec.ExternalName != headlessSvcName || svc.Spec.Type != corev1.ServiceTypeExternalName {
svc.Spec.ExternalName = headlessSvcName
svc.Spec.Selector = nil
svc.Spec.Type = corev1.ServiceTypeExternalName
if err := a.Update(ctx, svc); err != nil {
return fmt.Errorf("failed to update service: %w", err)
}
}
return nil
}
if !a.hasLoadBalancerClass(svc) {
logger.Debugf("service is not a LoadBalancer, so not updating ingress")
return nil
@@ -218,10 +163,6 @@ func (a *ServiceReconciler) maybeProvision(ctx context.Context, logger *zap.Suga
ingress := []corev1.LoadBalancerIngress{
{Hostname: tsHost},
}
clusterIPAddr, err := netip.ParseAddr(svc.Spec.ClusterIP)
if err != nil {
return fmt.Errorf("failed to parse cluster IP: %w", err)
}
for _, ip := range tsIPs {
addr, err := netip.ParseAddr(ip)
if err != nil {
@@ -245,7 +186,7 @@ func (a *ServiceReconciler) shouldExpose(svc *corev1.Service) bool {
return false
}
return a.hasLoadBalancerClass(svc) || a.hasExposeAnnotation(svc)
return a.hasLoadBalancerClass(svc) || a.hasAnnotation(svc)
}
func (a *ServiceReconciler) hasLoadBalancerClass(svc *corev1.Service) bool {
@@ -255,14 +196,7 @@ func (a *ServiceReconciler) hasLoadBalancerClass(svc *corev1.Service) bool {
svc.Spec.LoadBalancerClass == nil && a.isDefaultLoadBalancer)
}
// hasExposeAnnotation reports whether Service has the tailscale.com/expose
// annotation set
func (a *ServiceReconciler) hasExposeAnnotation(svc *corev1.Service) bool {
return svc != nil && svc.Annotations[AnnotationExpose] == "true"
}
// hasTailnetTargetAnnotation reports whether Service has a
// tailscale.com/ts-tailnet-target-ip annotation set
func (a *ServiceReconciler) hasTailnetTargetAnnotation(svc *corev1.Service) bool {
return svc != nil && svc.Annotations[AnnotationTailnetTargetIP] != ""
func (a *ServiceReconciler) hasAnnotation(svc *corev1.Service) bool {
return svc != nil &&
svc.Annotations[AnnotationExpose] == "true"
}

View File

@@ -16,12 +16,10 @@ import (
"log"
"net"
"net/http"
"os"
"strconv"
"strings"
"time"
"github.com/peterbourgon/ff/v3"
"golang.org/x/net/dns/dnsmessage"
"inet.af/tcpproxy"
"tailscale.com/client/tailscale"
@@ -34,6 +32,14 @@ import (
"tailscale.com/util/clientmetric"
)
var (
ports = flag.String("ports", "443", "comma-separated list of ports to proxy")
forwards = flag.String("forwards", "", "comma-separated list of ports to transparently forward, protocol/number/destination. For example, --forwards=tcp/22/github.com,tcp/5432/sql.example.com")
wgPort = flag.Int("wg-listen-port", 0, "UDP port to listen on for WireGuard and peer-to-peer traffic; 0 means automatically select")
promoteHTTPS = flag.Bool("promote-https", true, "promote HTTP to HTTPS")
debugPort = flag.Int("debug-port", 8080, "Listening port for debug/metrics endpoint")
)
var tsMBox = dnsmessage.MustNewName("support.tailscale.com.")
// portForward is the state for a single port forwarding entry, as passed to the --forward flag.
@@ -68,19 +74,7 @@ func parseForward(value string) (*portForward, error) {
}
func main() {
fs := flag.NewFlagSet("sniproxy", flag.ContinueOnError)
var (
ports = fs.String("ports", "443", "comma-separated list of ports to proxy")
forwards = fs.String("forwards", "", "comma-separated list of ports to transparently forward, protocol/number/destination. For example, --forwards=tcp/22/github.com,tcp/5432/sql.example.com")
wgPort = fs.Int("wg-listen-port", 0, "UDP port to listen on for WireGuard and peer-to-peer traffic; 0 means automatically select")
promoteHTTPS = fs.Bool("promote-https", true, "promote HTTP to HTTPS")
debugPort = fs.Int("debug-port", 8893, "Listening port for debug/metrics endpoint")
)
err := ff.Parse(fs, os.Args[1:], ff.WithEnvVarPrefix("TS_APPC"))
if err != nil {
log.Fatal("ff.Parse")
}
flag.Parse()
if *ports == "" {
log.Fatal("no ports")
}
@@ -132,6 +126,7 @@ func main() {
})
go s.forward(ln, forw)
}
ln, err := s.ts.Listen("udp", ":53")

View File

@@ -130,7 +130,6 @@ change in the future.
netlockCmd,
licensesCmd,
exitNodeCmd,
updateCmd,
},
FlagSet: rootfs,
Exec: func(context.Context, []string) error { return flag.ErrHelp },
@@ -146,6 +145,8 @@ change in the future.
switch {
case slices.Contains(args, "debug"):
rootCmd.Subcommands = append(rootCmd.Subcommands, debugCmd)
case slices.Contains(args, "update"):
rootCmd.Subcommands = append(rootCmd.Subcommands, updateCmd)
}
if runtime.GOOS == "linux" && distro.Get() == distro.Synology {
rootCmd.Subcommands = append(rootCmd.Subcommands, configureHostCmd)

View File

@@ -556,10 +556,6 @@ func TestPrefsFromUpArgs(t *testing.T) {
NetfilterMode: preftype.NetfilterOn,
CorpDNS: true,
AllowSingleHosts: true,
AutoUpdate: ipn.AutoUpdatePrefs{
Check: true,
Apply: false,
},
},
},
{
@@ -573,10 +569,6 @@ func TestPrefsFromUpArgs(t *testing.T) {
AllowSingleHosts: true,
RouteAll: true,
NetfilterMode: preftype.NetfilterOn,
AutoUpdate: ipn.AutoUpdatePrefs{
Check: true,
Apply: false,
},
},
},
{
@@ -592,10 +584,6 @@ func TestPrefsFromUpArgs(t *testing.T) {
netip.MustParsePrefix("::/0"),
},
NetfilterMode: preftype.NetfilterOn,
AutoUpdate: ipn.AutoUpdatePrefs{
Check: true,
Apply: false,
},
},
},
{
@@ -682,10 +670,6 @@ func TestPrefsFromUpArgs(t *testing.T) {
WantRunning: true,
NetfilterMode: preftype.NetfilterNoDivert,
NoSNAT: true,
AutoUpdate: ipn.AutoUpdatePrefs{
Check: true,
Apply: false,
},
},
},
{
@@ -699,10 +683,6 @@ func TestPrefsFromUpArgs(t *testing.T) {
WantRunning: true,
NetfilterMode: preftype.NetfilterOff,
NoSNAT: true,
AutoUpdate: ipn.AutoUpdatePrefs{
Check: true,
Apply: false,
},
},
},
{
@@ -718,10 +698,6 @@ func TestPrefsFromUpArgs(t *testing.T) {
AdvertiseRoutes: []netip.Prefix{
netip.MustParsePrefix("fd7a:115c:a1e0:b1a::bb:10.0.0.0/112"),
},
AutoUpdate: ipn.AutoUpdatePrefs{
Check: true,
Apply: false,
},
},
},
{

View File

@@ -138,11 +138,6 @@ var debugCmd = &ffcli.Command{
Exec: localAPIAction("break-derp-conns"),
ShortHelp: "break any open DERP connections from the daemon",
},
{
Name: "control-knobs",
Exec: debugControlKnobs,
ShortHelp: "see current control knobs",
},
{
Name: "prefs",
Exec: runPrefs,
@@ -920,17 +915,3 @@ func runPeerEndpointChanges(ctx context.Context, args []string) error {
fmt.Printf("%s", dst.String())
return nil
}
func debugControlKnobs(ctx context.Context, args []string) error {
if len(args) > 0 {
return errors.New("unexpected arguments")
}
v, err := localClient.DebugResultJSON(ctx, "control-knobs")
if err != nil {
return err
}
e := json.NewEncoder(os.Stdout)
e.SetIndent("", " ")
e.Encode(v)
return nil
}

View File

@@ -14,7 +14,6 @@ import (
"strings"
"github.com/peterbourgon/ff/v3/ffcli"
"tailscale.com/envknob"
"tailscale.com/ipn"
"tailscale.com/ipn/ipnstate"
"tailscale.com/tailcfg"
@@ -26,8 +25,8 @@ var funnelCmd = func() *ffcli.Command {
// This flag is used to switch to an in-development
// implementation of the tailscale funnel command.
// See https://github.com/tailscale/tailscale/issues/7844
if envknob.UseWIPCode() {
return newServeDevCommand(se, funnel)
if os.Getenv("TAILSCALE_FUNNEL_DEV") == "on" {
return newFunnelDevCommand(se)
}
return newFunnelCommand(se)
}

View File

@@ -0,0 +1,48 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
package cli
import (
"flag"
"strings"
"github.com/peterbourgon/ff/v3/ffcli"
)
// newFunnelDevCommand returns a new "funnel" subcommand using e as its environment.
// The funnel subcommand is used to turn on/off the Funnel service.
// Funnel is off by default.
// Funnel allows you to publish a 'tailscale serve' server publicly,
// open to the entire internet.
// newFunnelCommand shares the same serveEnv as the "serve" subcommand.
// See newServeCommand and serve.go for more details.
func newFunnelDevCommand(e *serveEnv) *ffcli.Command {
return &ffcli.Command{
Name: "funnel",
ShortHelp: "Turn on/off Funnel service",
ShortUsage: strings.Join([]string{
"funnel <port>",
"funnel status [--json]",
}, "\n "),
LongHelp: strings.Join([]string{
"Funnel allows you to expose your local",
"server publicly to the entire internet.",
"Note that it only supports https servers at this point.",
"This command is in development and is unsupported",
}, "\n"),
Exec: e.runServeDev(true),
UsageFunc: usageFunc,
Subcommands: []*ffcli.Command{
{
Name: "status",
Exec: e.runServeStatus,
ShortHelp: "show current serve/Funnel status",
FlagSet: e.newFlags("funnel-status", func(fs *flag.FlagSet) {
fs.BoolVar(&e.json, "json", false, "output JSON")
}),
UsageFunc: usageFunc,
},
},
}
}

View File

@@ -53,7 +53,7 @@ func runNetcheck(ctx context.Context, args []string) error {
return err
}
c := &netcheck.Client{
PortMapper: portmapper.NewClient(logf, netMon, nil, nil, nil),
PortMapper: portmapper.NewClient(logf, netMon, nil, nil),
UseDNSCache: false, // always resolve, don't cache
}
if netcheckArgs.verbose {
@@ -153,11 +153,7 @@ func printReport(dm *tailcfg.DERPMap, report *netcheck.Report) error {
if len(report.RegionLatency) == 0 {
printf("\t* Nearest DERP: unknown (no response to latency probes)\n")
} else {
if report.PreferredDERP != 0 {
printf("\t* Nearest DERP: %v\n", dm.Regions[report.PreferredDERP].RegionName)
} else {
printf("\t* Nearest DERP: [none]\n")
}
printf("\t* Nearest DERP: %v\n", dm.Regions[report.PreferredDERP].RegionName)
printf("\t* DERP latency:\n")
var rids []int
for rid := range dm.Regions {

View File

@@ -25,7 +25,6 @@ import (
"github.com/peterbourgon/ff/v3/ffcli"
"tailscale.com/client/tailscale"
"tailscale.com/envknob"
"tailscale.com/ipn"
"tailscale.com/ipn/ipnstate"
"tailscale.com/tailcfg"
@@ -38,8 +37,8 @@ var serveCmd = func() *ffcli.Command {
// This flag is used to switch to an in-development
// implementation of the tailscale funnel command.
// See https://github.com/tailscale/tailscale/issues/7844
if envknob.UseWIPCode() {
return newServeDevCommand(se, serve)
if os.Getenv("TAILSCALE_FUNNEL_DEV") == "on" {
return newServeDevCommand(se)
}
return newServeCommand(se)
}
@@ -120,10 +119,6 @@ EXAMPLES
}
}
// errHelp is standard error text that prompts users to
// run `serve --help` for information on how to use serve.
var errHelp = errors.New("try `tailscale serve --help` for usage info")
func (e *serveEnv) newFlags(name string, setup func(fs *flag.FlagSet)) *flag.FlagSet {
onError, out := flag.ExitOnError, Stderr
if e.testFlagOut != nil {
@@ -149,6 +144,7 @@ type localServeClient interface {
QueryFeature(ctx context.Context, feature string) (*tailcfg.QueryFeatureResponse, error)
WatchIPNBus(ctx context.Context, mask ipn.NotifyWatchOpt) (*tailscale.IPNBusWatcher, error)
IncrementCounter(ctx context.Context, name string, delta int) error
StreamServe(ctx context.Context, req ipn.ServeStreamRequest) (io.ReadCloser, error) // TODO: testing :)
}
// serveEnv is the environment the serve command runs within. All I/O should be
@@ -158,18 +154,9 @@ type localServeClient interface {
//
// It also contains the flags, as registered with newServeCommand.
type serveEnv struct {
// v1 flags
// flags
json bool // output JSON (status only for now)
// v2 specific flags
bg bool // background mode
setPath string // serve path
https string // HTTP port
http string // HTTP port
tcp string // TCP port
tlsTerminatedTCP string // a TLS terminated TCP port
subcmd serveMode // subcommand
lc localServeClient // localClient interface, specific to serve
// optional stuff for tests:
@@ -256,7 +243,7 @@ func (e *serveEnv) runServe(ctx context.Context, args []string) error {
if len(args) < 2 || ((srcType == "https" || srcType == "http") && !turnOff && len(args) < 3) {
fmt.Fprintf(os.Stderr, "error: invalid number of arguments\n\n")
return errHelp
return flag.ErrHelp
}
if srcType == "https" && !turnOff {
@@ -298,7 +285,7 @@ func (e *serveEnv) runServe(ctx context.Context, args []string) error {
default:
fmt.Fprintf(os.Stderr, "error: invalid serve type %q\n", srcType)
fmt.Fprint(os.Stderr, "must be one of: http:<port>, https:<port>, tcp:<port> or tls-terminated-tcp:<port>\n\n", srcType)
return errHelp
return flag.ErrHelp
}
}
@@ -334,13 +321,13 @@ func (e *serveEnv) handleWebServe(ctx context.Context, srvPort uint16, useTLS bo
}
if !filepath.IsAbs(source) {
fmt.Fprintf(os.Stderr, "error: path must be absolute\n\n")
return errHelp
return flag.ErrHelp
}
source = filepath.Clean(source)
fi, err := os.Stat(source)
if err != nil {
fmt.Fprintf(os.Stderr, "error: invalid path: %v\n\n", err)
return errHelp
return flag.ErrHelp
}
if fi.IsDir() && !strings.HasSuffix(mount, "/") {
// dir mount points must end in /
@@ -366,7 +353,7 @@ func (e *serveEnv) handleWebServe(ctx context.Context, srvPort uint16, useTLS bo
if sc.IsTCPForwardingOnPort(srvPort) {
fmt.Fprintf(os.Stderr, "error: cannot serve web; already serving TCP\n")
return errHelp
return flag.ErrHelp
}
mak.Set(&sc.TCP, srvPort, &ipn.TCPPortHandler{HTTPS: useTLS, HTTP: !useTLS})
@@ -554,18 +541,18 @@ func (e *serveEnv) handleTCPServe(ctx context.Context, srcType string, srcPort u
terminateTLS = true
default:
fmt.Fprintf(os.Stderr, "error: invalid TCP source %q\n\n", dest)
return errHelp
return flag.ErrHelp
}
dstURL, err := url.Parse(dest)
if err != nil {
fmt.Fprintf(os.Stderr, "error: invalid TCP source %q: %v\n\n", dest, err)
return errHelp
return flag.ErrHelp
}
host, dstPortStr, err := net.SplitHostPort(dstURL.Host)
if err != nil {
fmt.Fprintf(os.Stderr, "error: invalid TCP source %q: %v\n\n", dest, err)
return errHelp
return flag.ErrHelp
}
switch host {
@@ -574,12 +561,12 @@ func (e *serveEnv) handleTCPServe(ctx context.Context, srcType string, srcPort u
default:
fmt.Fprintf(os.Stderr, "error: invalid TCP source %q\n", dest)
fmt.Fprint(os.Stderr, "must be one of: localhost or 127.0.0.1\n\n", dest)
return errHelp
return flag.ErrHelp
}
if p, err := strconv.ParseUint(dstPortStr, 10, 16); p == 0 || err != nil {
fmt.Fprintf(os.Stderr, "error: invalid port %q\n\n", dstPortStr)
return errHelp
return flag.ErrHelp
}
cursc, err := e.lc.GetServeConfig(ctx)

View File

@@ -5,751 +5,110 @@ package cli
import (
"context"
"errors"
"flag"
"fmt"
"log"
"net"
"net/url"
"io"
"os"
"os/signal"
"path"
"path/filepath"
"slices"
"sort"
"strconv"
"strings"
"github.com/peterbourgon/ff/v3/ffcli"
"tailscale.com/client/tailscale"
"tailscale.com/ipn"
"tailscale.com/ipn/ipnstate"
"tailscale.com/tailcfg"
"tailscale.com/util/mak"
"tailscale.com/version"
)
type execFunc func(ctx context.Context, args []string) error
type commandInfo struct {
Name string
ShortHelp string
LongHelp string
}
var serveHelpCommon = strings.TrimSpace(`
<target> can be a port number (e.g., 3000), a partial URL (e.g., localhost:3000), or a
full URL including a path (e.g., http://localhost:3000/foo, https+insecure://localhost:3000/foo).
EXAMPLES
- Mount a local web server at 127.0.0.1:3000 in the foreground:
$ tailscale %s localhost:3000
- Mount a local web server at 127.0.0.1:3000 in the background:
$ tailscale %s --bg localhost:3000
For more examples and use cases visit our docs site https://tailscale.com/kb/1247/funnel-serve-use-cases
`)
type serveMode int
const (
serve serveMode = iota
funnel
)
type serveType int
const (
serveTypeHTTPS serveType = iota
serveTypeHTTP
serveTypeTCP
serveTypeTLSTerminatedTCP
)
var infoMap = map[serveMode]commandInfo{
serve: {
// newServeDevCommand returns a new "serve" subcommand using e as its environment.
func newServeDevCommand(e *serveEnv) *ffcli.Command {
return &ffcli.Command{
Name: "serve",
ShortHelp: "Serve content and local servers on your tailnet",
LongHelp: strings.Join([]string{
"Serve enables you to share a local server securely within your tailnet.\n",
"To share a local server on the internet, use `tailscale funnel`\n\n",
}, "\n"),
},
funnel: {
Name: "funnel",
ShortHelp: "Serve content and local servers on the internet",
LongHelp: strings.Join([]string{
"Funnel enables you to share a local server on the internet using Tailscale.\n",
"To share only within your tailnet, use `tailscale serve`\n\n",
}, "\n"),
},
}
func buildShortUsage(subcmd string) string {
return strings.Join([]string{
subcmd + " [flags] <target> [off]",
subcmd + " status [--json]",
subcmd + " reset",
}, "\n ")
}
// newServeDevCommand returns a new "serve" subcommand using e as its environment.
func newServeDevCommand(e *serveEnv, subcmd serveMode) *ffcli.Command {
if subcmd != serve && subcmd != funnel {
log.Fatalf("newServeDevCommand called with unknown subcmd %q", subcmd)
}
info := infoMap[subcmd]
return &ffcli.Command{
Name: info.Name,
ShortHelp: info.ShortHelp,
ShortUsage: strings.Join([]string{
fmt.Sprintf("%s <target>", info.Name),
fmt.Sprintf("%s status [--json]", info.Name),
fmt.Sprintf("%s reset", info.Name),
"serve <port>",
"serve status [--json]",
}, "\n "),
LongHelp: info.LongHelp + fmt.Sprintf(strings.TrimSpace(serveHelpCommon), info.Name, info.Name),
Exec: e.runServeCombined(subcmd),
FlagSet: e.newFlags("serve-set", func(fs *flag.FlagSet) {
fs.BoolVar(&e.bg, "bg", false, "run the command in the background")
fs.StringVar(&e.setPath, "set-path", "", "set a path for a specific target and run in the background")
fs.StringVar(&e.https, "https", "", "default; HTTPS listener")
fs.StringVar(&e.http, "http", "", "HTTP listener")
fs.StringVar(&e.tcp, "tcp", "", "TCP listener")
fs.StringVar(&e.tlsTerminatedTCP, "tls-terminated-tcp", "", "TLS terminated TCP listener")
}),
LongHelp: strings.TrimSpace(`
The 'tailscale serve' set of commands allows you to serve
content and local servers from your Tailscale node to
your tailnet.
`),
Exec: e.runServeDev(false),
UsageFunc: usageFunc,
Subcommands: []*ffcli.Command{
{
Name: "status",
Exec: e.runServeStatus,
ShortHelp: "view current proxy configuration",
FlagSet: e.newFlags("serve-status", func(fs *flag.FlagSet) {
ShortHelp: "show current serve/Funnel status",
FlagSet: e.newFlags("funnel-status", func(fs *flag.FlagSet) {
fs.BoolVar(&e.json, "json", false, "output JSON")
}),
UsageFunc: usageFunc,
},
{
Name: "reset",
ShortHelp: "reset current serve/funnel config",
Exec: e.runServeReset,
FlagSet: e.newFlags("serve-reset", nil),
UsageFunc: usageFunc,
},
},
}
}
func validateArgs(subcmd serveMode, args []string) error {
switch len(args) {
case 0:
return flag.ErrHelp
case 1, 2:
if isLegacyInvocation(subcmd, args) {
fmt.Fprintf(os.Stderr, "error: the CLI for serve and funnel has changed.")
fmt.Fprintf(os.Stderr, "Please see https://tailscale.com/kb/1242/tailscale-serve for more information.")
return errHelp
}
default:
fmt.Fprintf(os.Stderr, "error: invalid number of arguments (%d)", len(args))
return errHelp
}
return nil
}
// runServeCombined is the entry point for the "tailscale {serve,funnel}" commands.
func (e *serveEnv) runServeCombined(subcmd serveMode) execFunc {
e.subcmd = subcmd
// runServeDev is the entry point for the "tailscale serve|funnel" subcommand.
//
// Note: funnel is only supported on single DNS name for now. (2023-08-18)
func (e *serveEnv) runServeDev(funnel bool) execFunc {
return func(ctx context.Context, args []string) error {
if err := validateArgs(subcmd, args); err != nil {
return err
}
ctx, cancel := signal.NotifyContext(ctx, os.Interrupt)
defer cancel()
if len(args) != 1 {
return flag.ErrHelp
}
var source string
port64, err := strconv.ParseUint(args[0], 10, 16)
if err == nil {
source = fmt.Sprintf("http://127.0.0.1:%d", port64)
} else {
source, err = expandProxyTarget(args[0])
}
if err != nil {
return err
}
st, err := e.getLocalClientStatusWithoutPeers(ctx)
if err != nil {
return fmt.Errorf("getting client status: %w", err)
}
funnel := subcmd == funnel
if funnel {
// verify node has funnel capabilities
if err := e.verifyFunnelEnabled(ctx, st, 443); err != nil {
return err
}
}
mount, err := cleanURLPath(e.setPath)
if err != nil {
return fmt.Errorf("failed to clean the mount point: %w", err)
}
if e.setPath != "" {
// TODO(marwan-at-work): either
// 1. Warn the user that this is a side effect.
// 2. Force the user to pass --bg
// 3. Allow set-path to be in the foreground.
e.bg = true
}
srvType, srvPort, err := srvTypeAndPortFromFlags(e)
if err != nil {
fmt.Fprintf(os.Stderr, "error: %v\n\n", err)
return errHelp
}
sc, err := e.lc.GetServeConfig(ctx)
if err != nil {
return fmt.Errorf("error getting serve config: %w", err)
}
// nil if no config
if sc == nil {
sc = new(ipn.ServeConfig)
}
dnsName := strings.TrimSuffix(st.Self.DNSName, ".")
hp := ipn.HostPort(dnsName + ":443") // TODO(marwan-at-work): support the 2 other ports
// set parent serve config to always be persisted
// at the top level, but a nested config might be
// the one that gets manipulated depending on
// foreground or background.
parentSC := sc
turnOff := "off" == args[len(args)-1]
if !turnOff && srvType == serveTypeHTTPS {
// Running serve with https requires that the tailnet has enabled
// https cert provisioning. Send users through an interactive flow
// to enable this if not already done.
//
// TODO(sonia,tailscale/corp#10577): The interactive feature flow
// is behind a control flag. If the tailnet doesn't have the flag
// on, enableFeatureInteractive will error. For now, we hide that
// error and maintain the previous behavior (prior to 2023-08-15)
// of letting them edit the serve config before enabling certs.
if err := e.enableFeatureInteractive(ctx, "serve", func(caps []string) bool {
return slices.Contains(caps, tailcfg.CapabilityHTTPS)
}); err != nil {
return fmt.Errorf("error enabling https feature: %w", err)
}
}
var watcher *tailscale.IPNBusWatcher
if !e.bg && !turnOff {
// if foreground mode, create a WatchIPNBus session
// and use the nested config for all following operations
// TODO(marwan-at-work): nested-config validations should happen here or previous to this point.
watcher, err = e.lc.WatchIPNBus(ctx, ipn.NotifyInitialState)
if err != nil {
return err
}
defer watcher.Close()
n, err := watcher.Next()
if err != nil {
return err
}
if n.SessionID == "" {
return errors.New("missing SessionID")
}
fsc := &ipn.ServeConfig{}
mak.Set(&sc.Foreground, n.SessionID, fsc)
sc = fsc
}
var msg string
if turnOff {
err = e.unsetServe(sc, dnsName, srvType, srvPort, mount)
} else {
err = e.setServe(sc, st, dnsName, srvType, srvPort, mount, args[0], funnel)
msg = e.messageForPort(sc, st, dnsName, srvPort)
}
if err != nil {
fmt.Fprintf(os.Stderr, "error: %v\n\n", err)
return errHelp
}
if err := e.lc.SetServeConfig(ctx, parentSC); err != nil {
return err
}
if msg != "" {
fmt.Fprintln(os.Stderr, msg)
}
if watcher != nil {
for {
_, err = watcher.Next()
if err != nil {
if errors.Is(err, context.Canceled) {
return nil
}
return err
}
}
}
return nil
}
}
func (e *serveEnv) setServe(sc *ipn.ServeConfig, st *ipnstate.Status, dnsName string, srvType serveType, srvPort uint16, mount string, target string, allowFunnel bool) error {
// update serve config based on the type
switch srvType {
case serveTypeHTTPS, serveTypeHTTP:
useTLS := srvType == serveTypeHTTPS
err := e.applyWebServe(sc, dnsName, srvPort, useTLS, mount, target)
if err != nil {
return fmt.Errorf("failed apply web serve: %w", err)
}
case serveTypeTCP, serveTypeTLSTerminatedTCP:
err := e.applyTCPServe(sc, dnsName, srvType, srvPort, target)
if err != nil {
return fmt.Errorf("failed to apply TCP serve: %w", err)
}
default:
return fmt.Errorf("invalid type %q", srvType)
}
// update the serve config based on if funnel is enabled
e.applyFunnel(sc, dnsName, srvPort, allowFunnel)
return nil
}
// messageForPort returns a message for the given port based on the
// serve config and status.
func (e *serveEnv) messageForPort(sc *ipn.ServeConfig, st *ipnstate.Status, dnsName string, srvPort uint16) string {
var output strings.Builder
hp := ipn.HostPort(net.JoinHostPort(dnsName, strconv.Itoa(int(srvPort))))
if sc.AllowFunnel[hp] == true {
output.WriteString("Available on the internet:\n")
} else {
output.WriteString("Available within your tailnet:\n")
}
scheme := "https"
if sc.IsServingHTTP(srvPort) {
scheme = "http"
}
portPart := ":" + fmt.Sprint(srvPort)
if scheme == "http" && srvPort == 80 ||
scheme == "https" && srvPort == 443 {
portPart = ""
}
output.WriteString(fmt.Sprintf("%s://%s%s\n\n", scheme, dnsName, portPart))
if !e.bg {
output.WriteString("Press Ctrl+C to exit.")
return output.String()
}
srvTypeAndDesc := func(h *ipn.HTTPHandler) (string, string) {
switch {
case h.Path != "":
return "path", h.Path
case h.Proxy != "":
return "proxy", h.Proxy
case h.Text != "":
return "text", "\"" + elipticallyTruncate(h.Text, 20) + "\""
}
return "", ""
}
if sc.Web[hp] != nil {
var mounts []string
for k := range sc.Web[hp].Handlers {
mounts = append(mounts, k)
}
sort.Slice(mounts, func(i, j int) bool {
return len(mounts[i]) < len(mounts[j])
// In the streaming case, the process stays running in the
// foreground and prints out connections to the HostPort.
//
// The local backend handles updating the ServeConfig as
// necessary, then restores it to its original state once
// the process's context is closed or the client turns off
// Tailscale.
return e.streamServe(ctx, ipn.ServeStreamRequest{
Funnel: funnel,
HostPort: hp,
Source: source,
MountPoint: "/", // TODO(marwan-at-work): support multiple mount points
})
maxLen := len(mounts[len(mounts)-1])
for _, m := range mounts {
h := sc.Web[hp].Handlers[m]
t, d := srvTypeAndDesc(h)
output.WriteString(fmt.Sprintf("%s %s%s %-5s %s\n", "|--", m, strings.Repeat(" ", maxLen-len(m)), t, d))
}
} else if sc.TCP[srvPort] != nil {
h := sc.TCP[srvPort]
tlsStatus := "TLS over TCP"
if h.TerminateTLS != "" {
tlsStatus = "TLS terminated"
}
output.WriteString(fmt.Sprintf("|-- tcp://%s (%s)\n", hp, tlsStatus))
for _, a := range st.TailscaleIPs {
ipp := net.JoinHostPort(a.String(), strconv.Itoa(int(srvPort)))
output.WriteString(fmt.Sprintf("|-- tcp://%s\n", ipp))
}
output.WriteString(fmt.Sprintf("|--> tcp://%s\n", h.TCPForward))
}
output.WriteString("\nServe started and running in the background.\n")
output.WriteString(fmt.Sprintf("To disable the proxy, run: tailscale %s off", infoMap[e.subcmd].Name))
return output.String()
}
func (e *serveEnv) applyWebServe(sc *ipn.ServeConfig, dnsName string, srvPort uint16, useTLS bool, mount, target string) error {
h := new(ipn.HTTPHandler)
switch {
case strings.HasPrefix(target, "text:"):
text := strings.TrimPrefix(target, "text:")
if text == "" {
return errors.New("unable to serve; text cannot be an empty string")
}
h.Text = text
case filepath.IsAbs(target):
if version.IsSandboxedMacOS() {
// don't allow path serving for now on macOS (2022-11-15)
return errors.New("path serving is not supported if sandboxed on macOS")
}
target = filepath.Clean(target)
fi, err := os.Stat(target)
if err != nil {
return errors.New("invalid path")
}
// TODO: need to understand this further
if fi.IsDir() && !strings.HasSuffix(mount, "/") {
// dir mount points must end in /
// for relative file links to work
mount += "/"
}
h.Path = target
default:
t, err := expandProxyTargetDev(target)
if err != nil {
return err
}
h.Proxy = t
}
// TODO: validation needs to check nested foreground configs
if sc.IsTCPForwardingOnPort(srvPort) {
return errors.New("cannot serve web; already serving TCP")
}
mak.Set(&sc.TCP, srvPort, &ipn.TCPPortHandler{HTTPS: useTLS, HTTP: !useTLS})
hp := ipn.HostPort(net.JoinHostPort(dnsName, strconv.Itoa(int(srvPort))))
if _, ok := sc.Web[hp]; !ok {
mak.Set(&sc.Web, hp, new(ipn.WebServerConfig))
}
mak.Set(&sc.Web[hp].Handlers, mount, h)
// TODO: handle multiple web handlers from foreground mode
for k, v := range sc.Web[hp].Handlers {
if v == h {
continue
}
// If the new mount point ends in / and another mount point
// shares the same prefix, remove the other handler.
// (e.g. /foo/ overwrites /foo)
// The opposite example is also handled.
m1 := strings.TrimSuffix(mount, "/")
m2 := strings.TrimSuffix(k, "/")
if m1 == m2 {
delete(sc.Web[hp].Handlers, k)
}
}
return nil
}
func (e *serveEnv) applyTCPServe(sc *ipn.ServeConfig, dnsName string, srcType serveType, srcPort uint16, target string) error {
var terminateTLS bool
switch srcType {
case serveTypeTCP:
terminateTLS = false
case serveTypeTLSTerminatedTCP:
terminateTLS = true
default:
return fmt.Errorf("invalid TCP target %q", target)
}
dstURL, err := url.Parse(target)
func (e *serveEnv) streamServe(ctx context.Context, req ipn.ServeStreamRequest) error {
stream, err := e.lc.StreamServe(ctx, req)
if err != nil {
return fmt.Errorf("invalid TCP target %q: %v", target, err)
}
host, dstPortStr, err := net.SplitHostPort(dstURL.Host)
if err != nil {
return fmt.Errorf("invalid TCP target %q: %v", target, err)
return err
}
defer stream.Close()
switch host {
case "localhost", "127.0.0.1":
// ok
default:
return fmt.Errorf("invalid TCP target %q, must be one of localhost or 127.0.0.1", target)
}
if p, err := strconv.ParseUint(dstPortStr, 10, 16); p == 0 || err != nil {
return fmt.Errorf("invalid port %q", dstPortStr)
}
fwdAddr := "127.0.0.1:" + dstPortStr
// TODO: needs to account for multiple configs from foreground mode
if sc.IsServingWeb(srcPort) {
return fmt.Errorf("cannot serve TCP; already serving web on %d", srcPort)
}
mak.Set(&sc.TCP, srcPort, &ipn.TCPPortHandler{TCPForward: fwdAddr})
if terminateTLS {
sc.TCP[srcPort].TerminateTLS = dnsName
}
return nil
}
func (e *serveEnv) applyFunnel(sc *ipn.ServeConfig, dnsName string, srvPort uint16, allowFunnel bool) {
hp := ipn.HostPort(net.JoinHostPort(dnsName, strconv.Itoa(int(srvPort))))
// TODO: Should we return an error? Should not be possible.
// nil if no config
if sc == nil {
sc = new(ipn.ServeConfig)
}
// TODO: should ensure there is no other conflicting funnel
// TODO: add error handling for if toggling for existing sc
if allowFunnel {
mak.Set(&sc.AllowFunnel, hp, true)
}
}
// unsetServe removes the serve config for the given serve port.
func (e *serveEnv) unsetServe(sc *ipn.ServeConfig, dnsName string, srvType serveType, srvPort uint16, mount string) error {
switch srvType {
case serveTypeHTTPS, serveTypeHTTP:
err := e.removeWebServe(sc, dnsName, srvPort, mount)
if err != nil {
return fmt.Errorf("failed to remove web serve: %w", err)
}
case serveTypeTCP, serveTypeTLSTerminatedTCP:
err := e.removeTCPServe(sc, srvPort)
if err != nil {
return fmt.Errorf("failed to remove TCP serve: %w", err)
}
default:
return fmt.Errorf("invalid type %q", srvType)
}
// TODO(tylersmalley): remove funnel
return nil
}
func srvTypeAndPortFromFlags(e *serveEnv) (srvType serveType, srvPort uint16, err error) {
sourceMap := map[serveType]string{
serveTypeHTTP: e.http,
serveTypeHTTPS: e.https,
serveTypeTCP: e.tcp,
serveTypeTLSTerminatedTCP: e.tlsTerminatedTCP,
}
var srcTypeCount int
var srcValue string
for k, v := range sourceMap {
if v != "" {
srcTypeCount++
srvType = k
srcValue = v
}
}
if srcTypeCount > 1 {
return 0, 0, fmt.Errorf("cannot serve multiple types for a single mount point")
} else if srcTypeCount == 0 {
srvType = serveTypeHTTPS
srcValue = "443"
}
srvPort, err = parseServePort(srcValue)
if err != nil {
return 0, 0, fmt.Errorf("invalid port %q: %w", srcValue, err)
}
return srvType, srvPort, nil
}
func isLegacyInvocation(subcmd serveMode, args []string) bool {
if subcmd == serve && len(args) == 2 {
prefixes := []string{"http", "https", "tcp", "tls-terminated-tcp"}
for _, prefix := range prefixes {
if strings.HasPrefix(args[0], prefix) {
return true
}
}
}
return false
}
// removeWebServe removes a web handler from the serve config
// and removes funnel if no remaining mounts exist for the serve port.
// The srvPort argument is the serving port and the mount argument is
// the mount point or registered path to remove.
func (e *serveEnv) removeWebServe(sc *ipn.ServeConfig, dnsName string, srvPort uint16, mount string) error {
if sc.IsTCPForwardingOnPort(srvPort) {
return errors.New("cannot remove web handler; currently serving TCP")
}
hp := ipn.HostPort(net.JoinHostPort(dnsName, strconv.Itoa(int(srvPort))))
if !sc.WebHandlerExists(hp, mount) {
return errors.New("error: handler does not exist")
}
// delete existing handler, then cascade delete if empty
delete(sc.Web[hp].Handlers, mount)
if len(sc.Web[hp].Handlers) == 0 {
delete(sc.Web, hp)
delete(sc.TCP, srvPort)
}
// clear empty maps mostly for testing
if len(sc.Web) == 0 {
sc.Web = nil
}
if len(sc.TCP) == 0 {
sc.TCP = nil
}
// disable funnel if no remaining mounts exist for the serve port
if sc.Web == nil && sc.TCP == nil {
delete(sc.AllowFunnel, hp)
}
return nil
}
// removeTCPServe removes the TCP forwarding configuration for the
// given srvPort, or serving port.
func (e *serveEnv) removeTCPServe(sc *ipn.ServeConfig, src uint16) error {
if sc == nil {
return nil
}
if sc.GetTCPPortHandler(src) == nil {
return errors.New("error: serve config does not exist")
}
if sc.IsServingWeb(src) {
return fmt.Errorf("unable to remove; serving web, not TCP forwarding on serve port %d", src)
}
delete(sc.TCP, src)
// clear map mostly for testing
if len(sc.TCP) == 0 {
sc.TCP = nil
}
return nil
}
// expandProxyTargetDev expands the supported target values to be proxied
// allowing for input values to be a port number, a partial URL, or a full URL
// including a path.
//
// examples:
// - 3000
// - localhost:3000
// - http://localhost:3000
// - https://localhost:3000
// - https-insecure://localhost:3000
// - https-insecure://localhost:3000/foo
func expandProxyTargetDev(target string) (string, error) {
var (
scheme = "http"
host = "127.0.0.1"
)
// support target being a port number
if port, err := strconv.ParseUint(target, 10, 16); err == nil {
return fmt.Sprintf("%s://%s:%d", scheme, host, port), nil
}
// prepend scheme if not present
if !strings.Contains(target, "://") {
target = scheme + "://" + target
}
// make sure we can parse the target
u, err := url.ParseRequestURI(target)
if err != nil {
return "", fmt.Errorf("invalid URL %w", err)
}
// ensure a supported scheme
switch u.Scheme {
case "http", "https", "https+insecure":
default:
return "", errors.New("must be a URL starting with http://, https://, or https+insecure://")
}
// validate the port
port, err := strconv.ParseUint(u.Port(), 10, 16)
if err != nil || port == 0 {
return "", fmt.Errorf("invalid port %q", u.Port())
}
// validate the host.
switch u.Hostname() {
case "localhost", "127.0.0.1":
u.Host = fmt.Sprintf("%s:%d", host, port)
default:
return "", errors.New("only localhost or 127.0.0.1 proxies are currently supported")
}
return u.String(), nil
}
// cleanURLPath ensures the path is clean and has a leading "/".
func cleanURLPath(urlPath string) (string, error) {
if urlPath == "" {
return "/", nil
}
// TODO(tylersmalley) verify still needed with path being a flag
urlPath = cleanMinGWPathConversionIfNeeded(urlPath)
if !strings.HasPrefix(urlPath, "/") {
urlPath = "/" + urlPath
}
c := path.Clean(urlPath)
if urlPath == c || urlPath == c+"/" {
return urlPath, nil
}
return "", fmt.Errorf("invalid mount point %q", urlPath)
}
func (s serveType) String() string {
switch s {
case serveTypeHTTP:
return "httpListener"
case serveTypeHTTPS:
return "httpsListener"
case serveTypeTCP:
return "tcpListener"
case serveTypeTLSTerminatedTCP:
return "tlsTerminatedTCPListener"
default:
return "unknownServeType"
}
fmt.Fprintf(os.Stderr, "Serve started on \"https://%s\".\n", strings.TrimSuffix(string(req.HostPort), ":443"))
fmt.Fprintf(os.Stderr, "Press Ctrl-C to stop.\n\n")
_, err = io.Copy(os.Stdout, stream)
return err
}

View File

@@ -1,955 +0,0 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
package cli
import (
"bytes"
"context"
"fmt"
"os"
"path/filepath"
"reflect"
"runtime"
"strings"
"testing"
"github.com/peterbourgon/ff/v3/ffcli"
"tailscale.com/ipn"
"tailscale.com/types/logger"
)
func TestServeDevConfigMutations(t *testing.T) {
// Stateful mutations, starting from an empty config.
type step struct {
command []string // serve args; nil means no command to run (only reset)
reset bool // if true, reset all ServeConfig state
want *ipn.ServeConfig // non-nil means we want a save of this value
wantErr func(error) (badErrMsg string) // nil means no error is wanted
line int // line number of addStep call, for error messages
debugBreak func()
}
var steps []step
add := func(s step) {
_, _, s.line, _ = runtime.Caller(1)
steps = append(steps, s)
}
// using port number
add(step{reset: true})
add(step{
command: cmd("funnel --bg 3000"),
want: &ipn.ServeConfig{
TCP: map[uint16]*ipn.TCPPortHandler{443: {HTTPS: true}},
Web: map[ipn.HostPort]*ipn.WebServerConfig{
"foo.test.ts.net:443": {Handlers: map[string]*ipn.HTTPHandler{
"/": {Proxy: "http://127.0.0.1:3000"},
}},
},
AllowFunnel: map[ipn.HostPort]bool{"foo.test.ts.net:443": true},
},
})
// funnel background
add(step{reset: true})
add(step{
command: cmd("funnel --bg localhost:3000"),
want: &ipn.ServeConfig{
TCP: map[uint16]*ipn.TCPPortHandler{443: {HTTPS: true}},
Web: map[ipn.HostPort]*ipn.WebServerConfig{
"foo.test.ts.net:443": {Handlers: map[string]*ipn.HTTPHandler{
"/": {Proxy: "http://127.0.0.1:3000"},
}},
},
AllowFunnel: map[ipn.HostPort]bool{"foo.test.ts.net:443": true},
},
})
// serve background
add(step{reset: true})
add(step{
command: cmd("serve --bg localhost:3000"),
want: &ipn.ServeConfig{
TCP: map[uint16]*ipn.TCPPortHandler{443: {HTTPS: true}},
Web: map[ipn.HostPort]*ipn.WebServerConfig{
"foo.test.ts.net:443": {Handlers: map[string]*ipn.HTTPHandler{
"/": {Proxy: "http://127.0.0.1:3000"},
}},
},
},
})
// --set-path runs in background
add(step{reset: true})
add(step{
command: cmd("serve --set-path=/ localhost:3000"),
want: &ipn.ServeConfig{
TCP: map[uint16]*ipn.TCPPortHandler{443: {HTTPS: true}},
Web: map[ipn.HostPort]*ipn.WebServerConfig{
"foo.test.ts.net:443": {Handlers: map[string]*ipn.HTTPHandler{
"/": {Proxy: "http://127.0.0.1:3000"},
}},
},
},
})
// using http listener
add(step{reset: true})
add(step{
command: cmd("serve --bg --http=80 localhost:3000"),
want: &ipn.ServeConfig{
TCP: map[uint16]*ipn.TCPPortHandler{80: {HTTP: true}},
Web: map[ipn.HostPort]*ipn.WebServerConfig{
"foo.test.ts.net:80": {Handlers: map[string]*ipn.HTTPHandler{
"/": {Proxy: "http://127.0.0.1:3000"},
}},
},
},
})
// using https listener with a valid port
add(step{reset: true})
add(step{
command: cmd("serve --bg --https=8443 localhost:3000"),
want: &ipn.ServeConfig{
TCP: map[uint16]*ipn.TCPPortHandler{8443: {HTTPS: true}},
Web: map[ipn.HostPort]*ipn.WebServerConfig{
"foo.test.ts.net:8443": {Handlers: map[string]*ipn.HTTPHandler{
"/": {Proxy: "http://127.0.0.1:3000"},
}},
},
},
})
// https
add(step{reset: true})
add(step{ // allow omitting port (default to 80)
command: cmd("serve --http=80 --bg http://localhost:3000"),
want: &ipn.ServeConfig{
TCP: map[uint16]*ipn.TCPPortHandler{80: {HTTP: true}},
Web: map[ipn.HostPort]*ipn.WebServerConfig{
"foo.test.ts.net:80": {Handlers: map[string]*ipn.HTTPHandler{
"/": {Proxy: "http://127.0.0.1:3000"},
}},
},
},
})
add(step{ // support non Funnel port
command: cmd("serve --http=9999 --set-path=/abc http://localhost:3001"),
want: &ipn.ServeConfig{
TCP: map[uint16]*ipn.TCPPortHandler{80: {HTTP: true}, 9999: {HTTP: true}},
Web: map[ipn.HostPort]*ipn.WebServerConfig{
"foo.test.ts.net:80": {Handlers: map[string]*ipn.HTTPHandler{
"/": {Proxy: "http://127.0.0.1:3000"},
}},
"foo.test.ts.net:9999": {Handlers: map[string]*ipn.HTTPHandler{
"/abc": {Proxy: "http://127.0.0.1:3001"},
}},
},
},
})
add(step{
command: cmd("serve --http=9999 --set-path=/abc off"),
want: &ipn.ServeConfig{
TCP: map[uint16]*ipn.TCPPortHandler{80: {HTTP: true}},
Web: map[ipn.HostPort]*ipn.WebServerConfig{
"foo.test.ts.net:80": {Handlers: map[string]*ipn.HTTPHandler{
"/": {Proxy: "http://127.0.0.1:3000"},
}},
},
},
})
add(step{
command: cmd("serve --http=8080 --set-path=/abc http://127.0.0.1:3001"),
want: &ipn.ServeConfig{
TCP: map[uint16]*ipn.TCPPortHandler{80: {HTTP: true}, 8080: {HTTP: true}},
Web: map[ipn.HostPort]*ipn.WebServerConfig{
"foo.test.ts.net:80": {Handlers: map[string]*ipn.HTTPHandler{
"/": {Proxy: "http://127.0.0.1:3000"},
}},
"foo.test.ts.net:8080": {Handlers: map[string]*ipn.HTTPHandler{
"/abc": {Proxy: "http://127.0.0.1:3001"},
}},
},
},
})
// // https
add(step{reset: true})
add(step{
command: cmd("serve --https=443 --bg http://localhost:0"), // invalid port, too low
wantErr: anyErr(),
})
add(step{
command: cmd("serve --https=443 --bg http://localhost:65536"), // invalid port, too high
wantErr: anyErr(),
})
add(step{
command: cmd("serve --https=443 --bg http://somehost:3000"), // invalid host
wantErr: anyErr(),
})
add(step{
command: cmd("serve --https=443 --bg httpz://127.0.0.1"), // invalid scheme
wantErr: anyErr(),
})
add(step{ // allow omitting port (default to 443)
command: cmd("serve --https=443 --bg http://localhost:3000"),
want: &ipn.ServeConfig{
TCP: map[uint16]*ipn.TCPPortHandler{443: {HTTPS: true}},
Web: map[ipn.HostPort]*ipn.WebServerConfig{
"foo.test.ts.net:443": {Handlers: map[string]*ipn.HTTPHandler{
"/": {Proxy: "http://127.0.0.1:3000"},
}},
},
},
})
add(step{ // support non Funnel port
command: cmd("serve --https=9999 --set-path=/abc http://localhost:3001"),
want: &ipn.ServeConfig{
TCP: map[uint16]*ipn.TCPPortHandler{443: {HTTPS: true}, 9999: {HTTPS: true}},
Web: map[ipn.HostPort]*ipn.WebServerConfig{
"foo.test.ts.net:443": {Handlers: map[string]*ipn.HTTPHandler{
"/": {Proxy: "http://127.0.0.1:3000"},
}},
"foo.test.ts.net:9999": {Handlers: map[string]*ipn.HTTPHandler{
"/abc": {Proxy: "http://127.0.0.1:3001"},
}},
},
},
})
add(step{
command: cmd("serve --https=9999 --set-path=/abc off"),
want: &ipn.ServeConfig{
TCP: map[uint16]*ipn.TCPPortHandler{443: {HTTPS: true}},
Web: map[ipn.HostPort]*ipn.WebServerConfig{
"foo.test.ts.net:443": {Handlers: map[string]*ipn.HTTPHandler{
"/": {Proxy: "http://127.0.0.1:3000"},
}},
},
},
})
add(step{
command: cmd("serve --https=8443 --set-path=/abc http://127.0.0.1:3001"),
want: &ipn.ServeConfig{
TCP: map[uint16]*ipn.TCPPortHandler{443: {HTTPS: true}, 8443: {HTTPS: true}},
Web: map[ipn.HostPort]*ipn.WebServerConfig{
"foo.test.ts.net:443": {Handlers: map[string]*ipn.HTTPHandler{
"/": {Proxy: "http://127.0.0.1:3000"},
}},
"foo.test.ts.net:8443": {Handlers: map[string]*ipn.HTTPHandler{
"/abc": {Proxy: "http://127.0.0.1:3001"},
}},
},
},
})
add(step{
command: cmd("serve --https=10000 --bg text:hi"),
want: &ipn.ServeConfig{
TCP: map[uint16]*ipn.TCPPortHandler{
443: {HTTPS: true}, 8443: {HTTPS: true}, 10000: {HTTPS: true}},
Web: map[ipn.HostPort]*ipn.WebServerConfig{
"foo.test.ts.net:443": {Handlers: map[string]*ipn.HTTPHandler{
"/": {Proxy: "http://127.0.0.1:3000"},
}},
"foo.test.ts.net:8443": {Handlers: map[string]*ipn.HTTPHandler{
"/abc": {Proxy: "http://127.0.0.1:3001"},
}},
"foo.test.ts.net:10000": {Handlers: map[string]*ipn.HTTPHandler{
"/": {Text: "hi"},
}},
},
},
})
add(step{
command: cmd("serve --https=443 --set-path=/foo off"),
want: nil, // nothing to save
wantErr: anyErr(),
}) // handler doesn't exist, so we get an error
add(step{
command: cmd("serve --https=10000 off"),
want: &ipn.ServeConfig{
TCP: map[uint16]*ipn.TCPPortHandler{443: {HTTPS: true}, 8443: {HTTPS: true}},
Web: map[ipn.HostPort]*ipn.WebServerConfig{
"foo.test.ts.net:443": {Handlers: map[string]*ipn.HTTPHandler{
"/": {Proxy: "http://127.0.0.1:3000"},
}},
"foo.test.ts.net:8443": {Handlers: map[string]*ipn.HTTPHandler{
"/abc": {Proxy: "http://127.0.0.1:3001"},
}},
},
},
})
add(step{
command: cmd("serve --https=443 off"),
want: &ipn.ServeConfig{
TCP: map[uint16]*ipn.TCPPortHandler{8443: {HTTPS: true}},
Web: map[ipn.HostPort]*ipn.WebServerConfig{
"foo.test.ts.net:8443": {Handlers: map[string]*ipn.HTTPHandler{
"/abc": {Proxy: "http://127.0.0.1:3001"},
}},
},
},
})
add(step{
command: cmd("serve --https=8443 --set-path=/abc off"),
want: &ipn.ServeConfig{},
})
add(step{ // clean mount: "bar" becomes "/bar"
command: cmd("serve --https=443 --set-path=bar https://127.0.0.1:8443"),
want: &ipn.ServeConfig{
TCP: map[uint16]*ipn.TCPPortHandler{443: {HTTPS: true}},
Web: map[ipn.HostPort]*ipn.WebServerConfig{
"foo.test.ts.net:443": {Handlers: map[string]*ipn.HTTPHandler{
"/bar": {Proxy: "https://127.0.0.1:8443"},
}},
},
},
})
// add(step{
// command: cmd("serve --https=443 --set-path=bar https://127.0.0.1:8443"),
// want: nil, // nothing to save
// })
add(step{ // try resetting using reset command
command: cmd("serve reset"),
want: &ipn.ServeConfig{},
})
add(step{
command: cmd("serve --https=443 --bg https+insecure://127.0.0.1:3001"),
want: &ipn.ServeConfig{
TCP: map[uint16]*ipn.TCPPortHandler{443: {HTTPS: true}},
Web: map[ipn.HostPort]*ipn.WebServerConfig{
"foo.test.ts.net:443": {Handlers: map[string]*ipn.HTTPHandler{
"/": {Proxy: "https+insecure://127.0.0.1:3001"},
}},
},
},
})
add(step{reset: true})
add(step{
command: cmd("serve --https=443 --set-path=/foo localhost:3000"),
want: &ipn.ServeConfig{
TCP: map[uint16]*ipn.TCPPortHandler{443: {HTTPS: true}},
Web: map[ipn.HostPort]*ipn.WebServerConfig{
"foo.test.ts.net:443": {Handlers: map[string]*ipn.HTTPHandler{
"/foo": {Proxy: "http://127.0.0.1:3000"},
}},
},
},
})
add(step{ // test a second handler on the same port
command: cmd("serve --https=8443 --set-path=/foo localhost:3000"),
want: &ipn.ServeConfig{
TCP: map[uint16]*ipn.TCPPortHandler{443: {HTTPS: true}, 8443: {HTTPS: true}},
Web: map[ipn.HostPort]*ipn.WebServerConfig{
"foo.test.ts.net:443": {Handlers: map[string]*ipn.HTTPHandler{
"/foo": {Proxy: "http://127.0.0.1:3000"},
}},
"foo.test.ts.net:8443": {Handlers: map[string]*ipn.HTTPHandler{
"/foo": {Proxy: "http://127.0.0.1:3000"},
}},
},
},
})
add(step{reset: true})
add(step{ // support path in proxy
command: cmd("serve --https=443 --bg http://127.0.0.1:3000/foo/bar"),
want: &ipn.ServeConfig{
TCP: map[uint16]*ipn.TCPPortHandler{443: {HTTPS: true}},
Web: map[ipn.HostPort]*ipn.WebServerConfig{
"foo.test.ts.net:443": {Handlers: map[string]*ipn.HTTPHandler{
"/": {Proxy: "http://127.0.0.1:3000/foo/bar"},
}},
},
},
})
// // tcp
add(step{reset: true})
add(step{ // must include scheme for tcp
command: cmd("serve --tls-terminated-tcp=443 --bg localhost:5432"),
wantErr: exactErr(errHelp, "errHelp"),
})
add(step{ // !somehost, must be localhost or 127.0.0.1
command: cmd("serve --tls-terminated-tcp=443 --bg tcp://somehost:5432"),
wantErr: exactErr(errHelp, "errHelp"),
})
add(step{ // bad target port, too low
command: cmd("serve --tls-terminated-tcp=443 --bg tcp://somehost:0"),
wantErr: exactErr(errHelp, "errHelp"),
})
add(step{ // bad target port, too high
command: cmd("serve --tls-terminated-tcp=443 --bg tcp://somehost:65536"),
wantErr: exactErr(errHelp, "errHelp"),
})
add(step{
command: cmd("serve --tls-terminated-tcp=443 --bg tcp://localhost:5432"),
want: &ipn.ServeConfig{
TCP: map[uint16]*ipn.TCPPortHandler{
443: {
TCPForward: "127.0.0.1:5432",
TerminateTLS: "foo.test.ts.net",
},
},
},
})
add(step{
command: cmd("serve --tls-terminated-tcp=443 --bg tcp://127.0.0.1:8443"),
want: &ipn.ServeConfig{
TCP: map[uint16]*ipn.TCPPortHandler{
443: {
TCPForward: "127.0.0.1:8443",
TerminateTLS: "foo.test.ts.net",
},
},
},
})
// add(step{
// command: cmd("serve --tls-terminated-tcp=443 --bg tcp://127.0.0.1:8443"),
// want: nil, // nothing to save
// })
add(step{
command: cmd("serve --tls-terminated-tcp=443 --bg tcp://localhost:8444"),
want: &ipn.ServeConfig{
TCP: map[uint16]*ipn.TCPPortHandler{
443: {
TCPForward: "127.0.0.1:8444",
TerminateTLS: "foo.test.ts.net",
},
},
},
})
add(step{
command: cmd("serve --tls-terminated-tcp=443 --bg tcp://127.0.0.1:8445"),
want: &ipn.ServeConfig{
TCP: map[uint16]*ipn.TCPPortHandler{
443: {
TCPForward: "127.0.0.1:8445",
TerminateTLS: "foo.test.ts.net",
},
},
},
})
add(step{reset: true})
add(step{
command: cmd("serve --tls-terminated-tcp=443 --bg tcp://localhost:123"),
want: &ipn.ServeConfig{
TCP: map[uint16]*ipn.TCPPortHandler{
443: {
TCPForward: "127.0.0.1:123",
TerminateTLS: "foo.test.ts.net",
},
},
},
})
add(step{ // handler doesn't exist, so we get an error
command: cmd("serve --tls-terminated-tcp=8443 off"),
wantErr: anyErr(),
})
add(step{
command: cmd("serve --tls-terminated-tcp=443 off"),
want: &ipn.ServeConfig{},
})
// // text
add(step{reset: true})
add(step{
command: cmd("serve --https=443 --bg text:hello"),
want: &ipn.ServeConfig{
TCP: map[uint16]*ipn.TCPPortHandler{443: {HTTPS: true}},
Web: map[ipn.HostPort]*ipn.WebServerConfig{
"foo.test.ts.net:443": {Handlers: map[string]*ipn.HTTPHandler{
"/": {Text: "hello"},
}},
},
},
})
// path
td := t.TempDir()
writeFile := func(suffix, contents string) {
if err := os.WriteFile(filepath.Join(td, suffix), []byte(contents), 0600); err != nil {
t.Fatal(err)
}
}
add(step{reset: true})
writeFile("foo", "this is foo")
add(step{
command: cmd("serve --https=443 --bg " + filepath.Join(td, "foo")),
want: &ipn.ServeConfig{
TCP: map[uint16]*ipn.TCPPortHandler{443: {HTTPS: true}},
Web: map[ipn.HostPort]*ipn.WebServerConfig{
"foo.test.ts.net:443": {Handlers: map[string]*ipn.HTTPHandler{
"/": {Path: filepath.Join(td, "foo")},
}},
},
},
})
os.MkdirAll(filepath.Join(td, "subdir"), 0700)
writeFile("subdir/file-a", "this is A")
add(step{
command: cmd("serve --https=443 --set-path=/some/where " + filepath.Join(td, "subdir/file-a")),
want: &ipn.ServeConfig{
TCP: map[uint16]*ipn.TCPPortHandler{443: {HTTPS: true}},
Web: map[ipn.HostPort]*ipn.WebServerConfig{
"foo.test.ts.net:443": {Handlers: map[string]*ipn.HTTPHandler{
"/": {Path: filepath.Join(td, "foo")},
"/some/where": {Path: filepath.Join(td, "subdir/file-a")},
}},
},
},
})
add(step{ // bad path
command: cmd("serve --https=443 --bg bad/path"),
wantErr: exactErr(errHelp, "errHelp"),
})
add(step{reset: true})
add(step{
command: cmd("serve --https=443 --bg " + filepath.Join(td, "subdir")),
want: &ipn.ServeConfig{
TCP: map[uint16]*ipn.TCPPortHandler{443: {HTTPS: true}},
Web: map[ipn.HostPort]*ipn.WebServerConfig{
"foo.test.ts.net:443": {Handlers: map[string]*ipn.HTTPHandler{
"/": {Path: filepath.Join(td, "subdir/")},
}},
},
},
})
add(step{
command: cmd("serve --https=443 off"),
want: &ipn.ServeConfig{},
})
// // combos
add(step{reset: true})
add(step{
command: cmd("serve --bg localhost:3000"),
want: &ipn.ServeConfig{
TCP: map[uint16]*ipn.TCPPortHandler{443: {HTTPS: true}},
Web: map[ipn.HostPort]*ipn.WebServerConfig{
"foo.test.ts.net:443": {Handlers: map[string]*ipn.HTTPHandler{
"/": {Proxy: "http://127.0.0.1:3000"},
}},
},
},
})
add(step{ // enable funnel for primary port
command: cmd("funnel --bg localhost:3000"),
want: &ipn.ServeConfig{
AllowFunnel: map[ipn.HostPort]bool{"foo.test.ts.net:443": true},
TCP: map[uint16]*ipn.TCPPortHandler{443: {HTTPS: true}},
Web: map[ipn.HostPort]*ipn.WebServerConfig{
"foo.test.ts.net:443": {Handlers: map[string]*ipn.HTTPHandler{
"/": {Proxy: "http://127.0.0.1:3000"},
}},
},
},
})
add(step{ // serving on secondary port doesn't change funnel on primary port
command: cmd("serve --https=8443 --set-path=/bar localhost:3001"),
want: &ipn.ServeConfig{
AllowFunnel: map[ipn.HostPort]bool{"foo.test.ts.net:443": true},
TCP: map[uint16]*ipn.TCPPortHandler{443: {HTTPS: true}, 8443: {HTTPS: true}},
Web: map[ipn.HostPort]*ipn.WebServerConfig{
"foo.test.ts.net:443": {Handlers: map[string]*ipn.HTTPHandler{
"/": {Proxy: "http://127.0.0.1:3000"},
}},
"foo.test.ts.net:8443": {Handlers: map[string]*ipn.HTTPHandler{
"/bar": {Proxy: "http://127.0.0.1:3001"},
}},
},
},
})
add(step{ // turn funnel on for secondary port
command: cmd("funnel --https=8443 --set-path=/bar localhost:3001"),
want: &ipn.ServeConfig{
AllowFunnel: map[ipn.HostPort]bool{"foo.test.ts.net:443": true, "foo.test.ts.net:8443": true},
TCP: map[uint16]*ipn.TCPPortHandler{443: {HTTPS: true}, 8443: {HTTPS: true}},
Web: map[ipn.HostPort]*ipn.WebServerConfig{
"foo.test.ts.net:443": {Handlers: map[string]*ipn.HTTPHandler{
"/": {Proxy: "http://127.0.0.1:3000"},
}},
"foo.test.ts.net:8443": {Handlers: map[string]*ipn.HTTPHandler{
"/bar": {Proxy: "http://127.0.0.1:3001"},
}},
},
},
})
// TODO(tylersmalley) resolve these failures
// add(step{ // turn funnel off for primary port 443
// command: cmd("serve --https=443 --set-path=/bar localhost:3001"),
// want: &ipn.ServeConfig{
// AllowFunnel: map[ipn.HostPort]bool{"foo.test.ts.net:8443": true},
// TCP: map[uint16]*ipn.TCPPortHandler{443: {HTTPS: true}, 8443: {HTTPS: true}},
// Web: map[ipn.HostPort]*ipn.WebServerConfig{
// "foo.test.ts.net:443": {Handlers: map[string]*ipn.HTTPHandler{
// "/": {Proxy: "http://127.0.0.1:3000"},
// }},
// "foo.test.ts.net:8443": {Handlers: map[string]*ipn.HTTPHandler{
// "/bar": {Proxy: "http://127.0.0.1:3001"},
// }},
// },
// },
// })
// add(step{ // remove secondary port
// command: cmd("https:8443 /bar off"),
// want: &ipn.ServeConfig{
// AllowFunnel: map[ipn.HostPort]bool{"foo.test.ts.net:8443": true},
// TCP: map[uint16]*ipn.TCPPortHandler{443: {HTTPS: true}},
// Web: map[ipn.HostPort]*ipn.WebServerConfig{
// "foo.test.ts.net:443": {Handlers: map[string]*ipn.HTTPHandler{
// "/": {Proxy: "http://127.0.0.1:3000"},
// }},
// },
// },
// })
// add(step{ // start a tcp forwarder on 8443
// command: cmd("tcp:8443 tcp://localhost:5432"),
// want: &ipn.ServeConfig{
// AllowFunnel: map[ipn.HostPort]bool{"foo.test.ts.net:8443": true},
// TCP: map[uint16]*ipn.TCPPortHandler{443: {HTTPS: true}, 8443: {TCPForward: "127.0.0.1:5432"}},
// Web: map[ipn.HostPort]*ipn.WebServerConfig{
// "foo.test.ts.net:443": {Handlers: map[string]*ipn.HTTPHandler{
// "/": {Proxy: "http://127.0.0.1:3000"},
// }},
// },
// },
// })
// add(step{ // remove primary port http handler
// command: cmd("https:443 / off"),
// want: &ipn.ServeConfig{
// AllowFunnel: map[ipn.HostPort]bool{"foo.test.ts.net:8443": true},
// TCP: map[uint16]*ipn.TCPPortHandler{8443: {TCPForward: "127.0.0.1:5432"}},
// },
// })
// add(step{ // remove tcp forwarder
// command: cmd("tls-terminated-tcp:8443 off"),
// want: &ipn.ServeConfig{
// AllowFunnel: map[ipn.HostPort]bool{"foo.test.ts.net:8443": true},
// },
// })
// add(step{ // turn off funnel
// command: cmd("funnel 8443 off"),
// want: &ipn.ServeConfig{},
// })
// // tricky steps
add(step{reset: true})
add(step{ // a directory with a trailing slash mount point
command: cmd("serve --https=443 --set-path=/dir " + filepath.Join(td, "subdir")),
want: &ipn.ServeConfig{
TCP: map[uint16]*ipn.TCPPortHandler{443: {HTTPS: true}},
Web: map[ipn.HostPort]*ipn.WebServerConfig{
"foo.test.ts.net:443": {Handlers: map[string]*ipn.HTTPHandler{
"/dir/": {Path: filepath.Join(td, "subdir/")},
}},
},
},
})
add(step{ // this should overwrite the previous one
command: cmd("serve --https=443 --set-path=/dir " + filepath.Join(td, "foo")),
want: &ipn.ServeConfig{
TCP: map[uint16]*ipn.TCPPortHandler{443: {HTTPS: true}},
Web: map[ipn.HostPort]*ipn.WebServerConfig{
"foo.test.ts.net:443": {Handlers: map[string]*ipn.HTTPHandler{
"/dir": {Path: filepath.Join(td, "foo")},
}},
},
},
})
add(step{reset: true}) // reset and do the opposite
add(step{ // a file without a trailing slash mount point
command: cmd("serve --https=443 --set-path=/dir " + filepath.Join(td, "foo")),
want: &ipn.ServeConfig{
TCP: map[uint16]*ipn.TCPPortHandler{443: {HTTPS: true}},
Web: map[ipn.HostPort]*ipn.WebServerConfig{
"foo.test.ts.net:443": {Handlers: map[string]*ipn.HTTPHandler{
"/dir": {Path: filepath.Join(td, "foo")},
}},
},
},
})
add(step{ // this should overwrite the previous one
command: cmd("serve --https=443 --set-path=/dir " + filepath.Join(td, "subdir")),
want: &ipn.ServeConfig{
TCP: map[uint16]*ipn.TCPPortHandler{443: {HTTPS: true}},
Web: map[ipn.HostPort]*ipn.WebServerConfig{
"foo.test.ts.net:443": {Handlers: map[string]*ipn.HTTPHandler{
"/dir/": {Path: filepath.Join(td, "subdir/")},
}},
},
},
})
// // error states
add(step{reset: true})
add(step{ // tcp forward 5432 on serve port 443
command: cmd("serve --tls-terminated-tcp=443 --bg tcp://localhost:5432"),
want: &ipn.ServeConfig{
TCP: map[uint16]*ipn.TCPPortHandler{
443: {
TCPForward: "127.0.0.1:5432",
TerminateTLS: "foo.test.ts.net",
},
},
},
})
add(step{ // try to start a web handler on the same port
command: cmd("serve --https=443 --bg localhost:3000"),
wantErr: exactErr(errHelp, "errHelp"),
})
add(step{reset: true})
add(step{ // start a web handler on port 443
command: cmd("serve --https=443 --bg localhost:3000"),
want: &ipn.ServeConfig{
TCP: map[uint16]*ipn.TCPPortHandler{443: {HTTPS: true}},
Web: map[ipn.HostPort]*ipn.WebServerConfig{
"foo.test.ts.net:443": {Handlers: map[string]*ipn.HTTPHandler{
"/": {Proxy: "http://127.0.0.1:3000"},
}},
},
},
})
add(step{ // try to start a tcp forwarder on the same serve port
command: cmd("serve --tls-terminated-tcp=443 --bg tcp://localhost:5432"),
wantErr: anyErr(),
})
lc := &fakeLocalServeClient{}
// And now run the steps above.
for i, st := range steps {
if st.debugBreak != nil {
st.debugBreak()
}
if st.reset {
t.Logf("Executing step #%d, line %v: [reset]", i, st.line)
lc.config = nil
}
if st.command == nil {
continue
}
t.Logf("Executing step #%d, line %v: %q ... ", i, st.line, st.command)
var stdout bytes.Buffer
var flagOut bytes.Buffer
e := &serveEnv{
lc: lc,
testFlagOut: &flagOut,
testStdout: &stdout,
}
lastCount := lc.setCount
var cmd *ffcli.Command
var args []string
mode := serve
if st.command[0] == "funnel" {
mode = funnel
}
cmd = newServeDevCommand(e, mode)
args = st.command[1:]
err := cmd.ParseAndRun(context.Background(), args)
if flagOut.Len() > 0 {
t.Logf("flag package output: %q", flagOut.Bytes())
}
if err != nil {
if st.wantErr == nil {
t.Fatalf("step #%d, line %v: unexpected error: %v", i, st.line, err)
}
if bad := st.wantErr(err); bad != "" {
t.Fatalf("step #%d, line %v: unexpected error: %v", i, st.line, bad)
}
continue
}
if st.wantErr != nil {
t.Fatalf("step #%d, line %v: got success (saved=%v), but wanted an error", i, st.line, lc.config != nil)
}
var got *ipn.ServeConfig = nil
if lc.setCount > lastCount {
got = lc.config
}
if !reflect.DeepEqual(got, st.want) {
t.Fatalf("[%d] %v: bad state. got:\n%v\n\nwant:\n%v\n",
i, st.command, logger.AsJSON(got), logger.AsJSON(st.want))
// NOTE: asJSON will omit empty fields, which might make
// result in bad state got/want diffs being the same, even
// though the actual state is different. Use below to debug:
// t.Fatalf("[%d] %v: bad state. got:\n%+v\n\nwant:\n%+v\n",
// i, st.command, got, st.want)
}
}
}
func TestSrcTypeFromFlags(t *testing.T) {
tests := []struct {
name string
env *serveEnv
expectedType serveType
expectedPort uint16
expectedErr bool
}{
{
name: "only http set",
env: &serveEnv{http: "80"},
expectedType: serveTypeHTTP,
expectedPort: 80,
expectedErr: false,
},
{
name: "only https set",
env: &serveEnv{https: "10000"},
expectedType: serveTypeHTTPS,
expectedPort: 10000,
expectedErr: false,
},
{
name: "only tcp set",
env: &serveEnv{tcp: "8000"},
expectedType: serveTypeTCP,
expectedPort: 8000,
expectedErr: false,
},
{
name: "only tls-terminated-tcp set",
env: &serveEnv{tlsTerminatedTCP: "8080"},
expectedType: serveTypeTLSTerminatedTCP,
expectedPort: 8080,
expectedErr: false,
},
{
name: "defaults to https, port 443",
env: &serveEnv{},
expectedType: serveTypeHTTPS,
expectedPort: 443,
expectedErr: false,
},
{
name: "multiple types set",
env: &serveEnv{http: "80", https: "443"},
expectedPort: 0,
expectedErr: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
srcType, srcPort, err := srvTypeAndPortFromFlags(tt.env)
if (err != nil) != tt.expectedErr {
t.Errorf("Expected error: %v, got: %v", tt.expectedErr, err)
}
if srcType != tt.expectedType {
t.Errorf("Expected srcType: %s, got: %s", tt.expectedType.String(), srcType)
}
if srcPort != tt.expectedPort {
t.Errorf("Expected srcPort: %d, got: %d", tt.expectedPort, srcPort)
}
})
}
}
func TestExpandProxyTargetDev(t *testing.T) {
tests := []struct {
input string
expected string
wantErr bool
}{
{input: "8080", expected: "http://127.0.0.1:8080"},
{input: "localhost:8080", expected: "http://127.0.0.1:8080"},
{input: "http://localhost:8080", expected: "http://127.0.0.1:8080"},
{input: "http://127.0.0.1:8080", expected: "http://127.0.0.1:8080"},
{input: "http://127.0.0.1:8080/foo", expected: "http://127.0.0.1:8080/foo"},
{input: "https://localhost:8080", expected: "https://127.0.0.1:8080"},
{input: "https+insecure://localhost:8080", expected: "https+insecure://127.0.0.1:8080"},
// errors
{input: "localhost:9999999", wantErr: true},
{input: "ftp://localhost:8080", expected: "", wantErr: true},
{input: "https://tailscale.com:8080", expected: "", wantErr: true},
{input: "", expected: "", wantErr: true},
}
for _, tt := range tests {
t.Run(tt.input, func(t *testing.T) {
actual, err := expandProxyTargetDev(tt.input)
if tt.wantErr == true && err == nil {
t.Errorf("Expected an error but got none")
return
}
if tt.wantErr == false && err != nil {
t.Errorf("Got an error, but didn't expect one: %v", err)
return
}
if actual != tt.expected {
t.Errorf("Got: %q; expected: %q", actual, tt.expected)
}
})
}
}
func TestCleanURLPath(t *testing.T) {
tests := []struct {
input string
expected string
wantErr bool
}{
{input: "", expected: "/"},
{input: "/", expected: "/"},
{input: "/foo", expected: "/foo"},
{input: "/foo/", expected: "/foo/"},
{input: "/../bar", wantErr: true},
}
for _, tt := range tests {
t.Run(tt.input, func(t *testing.T) {
actual, err := cleanURLPath(tt.input)
if tt.wantErr == true && err == nil {
t.Errorf("Expected an error but got none")
return
}
if tt.wantErr == false && err != nil {
t.Errorf("Got an error, but didn't expect one: %v", err)
return
}
if actual != tt.expected {
t.Errorf("Got: %q; expected: %q", actual, tt.expected)
}
})
}
}
func TestIsLegacyInvocation(t *testing.T) {
tests := []struct {
subcmd serveMode
args []string
expected bool
}{
{subcmd: serve, args: []string{"https", "localhost:3000"}, expected: true},
{subcmd: serve, args: []string{"https:8443", "localhost:3000"}, expected: true},
{subcmd: serve, args: []string{"http", "localhost:3000"}, expected: true},
{subcmd: serve, args: []string{"http:80", "localhost:3000"}, expected: true},
{subcmd: serve, args: []string{"tcp:2222", "tcp://localhost:22"}, expected: true},
{subcmd: serve, args: []string{"tls-terminated-tcp:443", "tcp://localhost:80"}, expected: true},
// false
{subcmd: serve, args: []string{"3000"}, expected: false},
{subcmd: serve, args: []string{"localhost:3000"}, expected: false},
}
for _, tt := range tests {
args := strings.Join(tt.args, " ")
t.Run(fmt.Sprintf("%v %s", infoMap[tt.subcmd].Name, args), func(t *testing.T) {
actual := isLegacyInvocation(tt.subcmd, tt.args)
if actual != tt.expected {
t.Errorf("Got: %v; expected: %v", actual, tt.expected)
}
})
}
}

View File

@@ -9,6 +9,7 @@ import (
"errors"
"flag"
"fmt"
"io"
"os"
"path/filepath"
"reflect"
@@ -338,19 +339,19 @@ func TestServeConfigMutations(t *testing.T) {
add(step{reset: true})
add(step{ // must include scheme for tcp
command: cmd("tls-terminated-tcp:443 localhost:5432"),
wantErr: exactErr(errHelp, "errHelp"),
wantErr: exactErr(flag.ErrHelp, "flag.ErrHelp"),
})
add(step{ // !somehost, must be localhost or 127.0.0.1
command: cmd("tls-terminated-tcp:443 tcp://somehost:5432"),
wantErr: exactErr(errHelp, "errHelp"),
wantErr: exactErr(flag.ErrHelp, "flag.ErrHelp"),
})
add(step{ // bad target port, too low
command: cmd("tls-terminated-tcp:443 tcp://somehost:0"),
wantErr: exactErr(errHelp, "errHelp"),
wantErr: exactErr(flag.ErrHelp, "flag.ErrHelp"),
})
add(step{ // bad target port, too high
command: cmd("tls-terminated-tcp:443 tcp://somehost:65536"),
wantErr: exactErr(errHelp, "errHelp"),
wantErr: exactErr(flag.ErrHelp, "flag.ErrHelp"),
})
add(step{
command: cmd("tls-terminated-tcp:443 tcp://localhost:5432"),
@@ -471,7 +472,7 @@ func TestServeConfigMutations(t *testing.T) {
})
add(step{ // bad path
command: cmd("https:443 / bad/path"),
wantErr: exactErr(errHelp, "errHelp"),
wantErr: exactErr(flag.ErrHelp, "flag.ErrHelp"),
})
add(step{reset: true})
add(step{
@@ -665,7 +666,7 @@ func TestServeConfigMutations(t *testing.T) {
})
add(step{ // try to start a web handler on the same port
command: cmd("https:443 / localhost:3000"),
wantErr: exactErr(errHelp, "errHelp"),
wantErr: exactErr(flag.ErrHelp, "flag.ErrHelp"),
})
add(step{reset: true})
add(step{ // start a web handler on port 443
@@ -901,6 +902,11 @@ func (lc *fakeLocalServeClient) IncrementCounter(ctx context.Context, name strin
return nil // unused in tests
}
func (lc *fakeLocalServeClient) StreamServe(ctx context.Context, req ipn.ServeStreamRequest) (io.ReadCloser, error) {
// TODO: testing :)
return nil, nil
}
// exactError returns an error checker that wants exactly the provided want error.
// If optName is non-empty, it's used in the error message.
func exactErr(want error, optName ...string) func(error) string {

View File

@@ -11,7 +11,6 @@ import (
"net/netip"
"github.com/peterbourgon/ff/v3/ffcli"
"tailscale.com/clientupdate"
"tailscale.com/ipn"
"tailscale.com/net/netutil"
"tailscale.com/net/tsaddr"
@@ -47,8 +46,6 @@ type setArgsT struct {
acceptedRisks string
profileName string
forceDaemon bool
updateCheck bool
updateApply bool
}
func newSetFlagSet(goos string, setArgs *setArgsT) *flag.FlagSet {
@@ -64,8 +61,6 @@ func newSetFlagSet(goos string, setArgs *setArgsT) *flag.FlagSet {
setf.StringVar(&setArgs.hostname, "hostname", "", "hostname to use instead of the one provided by the OS")
setf.StringVar(&setArgs.advertiseRoutes, "advertise-routes", "", "routes to advertise to other nodes (comma-separated, e.g. \"10.0.0.0/8,192.168.0.0/24\") or empty string to not advertise routes")
setf.BoolVar(&setArgs.advertiseDefaultRoute, "advertise-exit-node", false, "offer to be an exit node for internet traffic for the tailnet")
setf.BoolVar(&setArgs.updateCheck, "update-check", true, "HIDDEN: notify about available Tailscale updates")
setf.BoolVar(&setArgs.updateApply, "auto-update", false, "HIDDEN: automatically update to the latest available version")
if safesocket.GOOSUsesPeerCreds(goos) {
setf.StringVar(&setArgs.opUser, "operator", "", "Unix username to allow to operate on tailscaled without sudo")
}
@@ -104,10 +99,6 @@ func runSet(ctx context.Context, args []string) (retErr error) {
Hostname: setArgs.hostname,
OperatorUser: setArgs.opUser,
ForceDaemon: setArgs.forceDaemon,
AutoUpdate: ipn.AutoUpdatePrefs{
Check: setArgs.updateCheck,
Apply: setArgs.updateApply,
},
},
}
@@ -152,12 +143,6 @@ func runSet(ctx context.Context, args []string) (retErr error) {
return err
}
}
if maskedPrefs.AutoUpdateSet {
_, err := clientupdate.NewUpdater(clientupdate.Arguments{})
if errors.Is(err, errors.ErrUnsupported) {
return errors.New("automatic updates are not supported on this platform")
}
}
checkPrefs := curPrefs.Clone()
checkPrefs.ApplyEdits(maskedPrefs)
if err := localClient.CheckPrefs(ctx, checkPrefs); err != nil {

View File

@@ -236,9 +236,6 @@ func runStatus(ctx context.Context, args []string) error {
printHealth()
}
printFunnelStatus(ctx)
if cv := st.ClientVersion; cv != nil && !cv.RunningLatest && cv.LatestVersion != "" {
printf("# New Tailscale version is available: %q, run `tailscale update` to update.\n", cv.LatestVersion)
}
return nil
}

View File

@@ -97,8 +97,6 @@ func newUpFlagSet(goos string, upArgs *upArgsT, cmd string) *flag.FlagSet {
}
upf := newFlagSet(cmd)
// When adding new flags, prefer to put them under "tailscale set" instead
// of here. Setting preferences via "tailscale up" is deprecated.
upf.BoolVar(&upArgs.qr, "qr", false, "show QR code for login URLs")
upf.StringVar(&upArgs.authKeyOrFile, "auth-key", "", `node authorization key; if it begins with "file:", then it's a path to a file containing the authkey`)
@@ -714,8 +712,6 @@ func init() {
addPrefFlagMapping("operator", "OperatorUser")
addPrefFlagMapping("ssh", "RunSSH")
addPrefFlagMapping("nickname", "ProfileName")
addPrefFlagMapping("update-check", "AutoUpdate")
addPrefFlagMapping("auto-update", "AutoUpdate")
}
func addPrefFlagMapping(flagName string, prefNames ...string) {

View File

@@ -60,7 +60,7 @@ func runUpdate(ctx context.Context, args []string) error {
if updateArgs.track != "" {
ver = updateArgs.track
}
err := clientupdate.Update(clientupdate.Arguments{
err := clientupdate.Update(clientupdate.UpdateArgs{
Version: ver,
AppStore: updateArgs.appStore,
Logf: func(format string, args ...any) { fmt.Printf(format+"\n", args...) },

View File

@@ -39,7 +39,6 @@ Tailscale, as opposed to a CLI or a native app.
webf.StringVar(&webArgs.listen, "listen", "localhost:8088", "listen address; use port 0 for automatic")
webf.BoolVar(&webArgs.cgi, "cgi", false, "run as CGI script")
webf.BoolVar(&webArgs.dev, "dev", false, "run web client in developer mode [this flag is in development, use is unsupported]")
webf.StringVar(&webArgs.prefix, "prefix", "", "URL prefix added to requests (for cgi or reverse proxies)")
return webf
})(),
Exec: runWeb,
@@ -49,7 +48,6 @@ var webArgs struct {
listen string
cgi bool
dev bool
prefix string
}
func tlsConfigFromEnvironment() *tls.Config {
@@ -83,7 +81,6 @@ func runWeb(ctx context.Context, args []string) error {
webServer, cleanup := web.NewServer(ctx, web.ServerOpts{
DevMode: webArgs.dev,
CGIMode: webArgs.cgi,
PathPrefix: webArgs.prefix,
LocalClient: &localClient,
})
defer cleanup()

View File

@@ -11,11 +11,9 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep
W github.com/alexbrainman/sspi/internal/common from github.com/alexbrainman/sspi/negotiate
W 💣 github.com/alexbrainman/sspi/negotiate from tailscale.com/net/tshttpproxy
L github.com/coreos/go-iptables/iptables from tailscale.com/util/linuxfw
L github.com/coreos/go-systemd/v22/dbus from tailscale.com/clientupdate
W 💣 github.com/dblohm7/wingoes from tailscale.com/util/winutil/authenticode+
W 💣 github.com/dblohm7/wingoes/pe from tailscale.com/util/winutil/authenticode
github.com/fxamacker/cbor/v2 from tailscale.com/tka
L 💣 github.com/godbus/dbus/v5 from github.com/coreos/go-systemd/v22/dbus
github.com/golang/groupcache/lru from tailscale.com/net/dnscache
L github.com/google/nftables from tailscale.com/util/linuxfw
L 💣 github.com/google/nftables/alignedbuff from github.com/google/nftables/xt
@@ -202,12 +200,11 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep
golang.org/x/text/unicode/bidi from golang.org/x/net/idna+
golang.org/x/text/unicode/norm from golang.org/x/net/idna
golang.org/x/time/rate from tailscale.com/cmd/tailscale/cli+
archive/tar from tailscale.com/clientupdate
bufio from compress/flate+
bytes from bufio+
cmp from slices
compress/flate from compress/gzip+
compress/gzip from net/http+
compress/gzip from net/http
compress/zlib from image/png+
container/list from crypto/tls+
context from crypto/tls+

View File

@@ -76,7 +76,6 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de
L github.com/aws/smithy-go/transport/http/internal/io from github.com/aws/smithy-go/transport/http
L github.com/aws/smithy-go/waiter from github.com/aws/aws-sdk-go-v2/service/ssm
L github.com/coreos/go-iptables/iptables from tailscale.com/util/linuxfw
L github.com/coreos/go-systemd/v22/dbus from tailscale.com/clientupdate
LD 💣 github.com/creack/pty from tailscale.com/ssh/tailssh
W 💣 github.com/dblohm7/wingoes from github.com/dblohm7/wingoes/com+
W 💣 github.com/dblohm7/wingoes/com from tailscale.com/cmd/tailscaled+
@@ -95,8 +94,8 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de
L github.com/google/nftables/expr from github.com/google/nftables+
L github.com/google/nftables/internal/parseexprfunc from github.com/google/nftables+
L github.com/google/nftables/xt from github.com/google/nftables/expr+
github.com/google/uuid from tailscale.com/clientupdate
github.com/hdevalence/ed25519consensus from tailscale.com/tka+
github.com/google/uuid from tailscale.com/ipn/ipnlocal
github.com/hdevalence/ed25519consensus from tailscale.com/tka
L 💣 github.com/illarion/gonotify from tailscale.com/net/dns
L github.com/insomniacslk/dhcp/dhcpv4 from tailscale.com/net/tstun
L github.com/insomniacslk/dhcp/iana from github.com/insomniacslk/dhcp/dhcpv4
@@ -217,8 +216,6 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de
LD tailscale.com/chirp from tailscale.com/cmd/tailscaled
tailscale.com/client/tailscale from tailscale.com/derp
tailscale.com/client/tailscale/apitype from tailscale.com/ipn/ipnlocal+
tailscale.com/clientupdate from tailscale.com/ipn/ipnlocal
tailscale.com/clientupdate/distsign from tailscale.com/clientupdate
tailscale.com/cmd/tailscaled/childproc from tailscale.com/ssh/tailssh+
tailscale.com/control/controlbase from tailscale.com/control/controlclient+
tailscale.com/control/controlclient from tailscale.com/ipn/ipnlocal+
@@ -290,7 +287,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de
tailscale.com/paths from tailscale.com/ipn/ipnlocal+
💣 tailscale.com/portlist from tailscale.com/ipn/ipnlocal
tailscale.com/safesocket from tailscale.com/client/tailscale+
tailscale.com/smallzstd from tailscale.com/control/controlclient+
tailscale.com/smallzstd from tailscale.com/cmd/tailscaled+
LD 💣 tailscale.com/ssh/tailssh from tailscale.com/cmd/tailscaled
tailscale.com/syncs from tailscale.com/net/netcheck+
tailscale.com/tailcfg from tailscale.com/client/tailscale/apitype+
@@ -305,7 +302,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de
tailscale.com/tstime/rate from tailscale.com/wgengine/filter+
tailscale.com/tsweb/varz from tailscale.com/cmd/tailscaled
tailscale.com/types/dnstype from tailscale.com/ipn/ipnlocal+
tailscale.com/types/empty from tailscale.com/ipn+
tailscale.com/types/empty from tailscale.com/control/controlclient+
tailscale.com/types/flagtype from tailscale.com/cmd/tailscaled
tailscale.com/types/ipproto from tailscale.com/net/flowtrack+
tailscale.com/types/key from tailscale.com/control/controlbase+
@@ -315,7 +312,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de
tailscale.com/types/netlogtype from tailscale.com/net/connstats+
tailscale.com/types/netmap from tailscale.com/control/controlclient+
tailscale.com/types/nettype from tailscale.com/wgengine/magicsock+
tailscale.com/types/opt from tailscale.com/client/tailscale+
tailscale.com/types/opt from tailscale.com/control/controlclient+
tailscale.com/types/persist from tailscale.com/control/controlclient+
tailscale.com/types/preftype from tailscale.com/ipn+
tailscale.com/types/ptr from tailscale.com/hostinfo+
@@ -337,13 +334,12 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de
L tailscale.com/util/linuxfw from tailscale.com/net/netns+
tailscale.com/util/mak from tailscale.com/control/controlclient+
tailscale.com/util/multierr from tailscale.com/control/controlclient+
tailscale.com/util/must from tailscale.com/logpolicy+
tailscale.com/util/must from tailscale.com/logpolicy
💣 tailscale.com/util/osdiag from tailscale.com/cmd/tailscaled+
W 💣 tailscale.com/util/osdiag/internal/wsc from tailscale.com/util/osdiag
tailscale.com/util/osshare from tailscale.com/ipn/ipnlocal+
W tailscale.com/util/pidowner from tailscale.com/ipn/ipnauth
tailscale.com/util/racebuild from tailscale.com/logpolicy
tailscale.com/util/rands from tailscale.com/ipn/localapi+
tailscale.com/util/ringbuffer from tailscale.com/wgengine/magicsock
tailscale.com/util/set from tailscale.com/health+
tailscale.com/util/singleflight from tailscale.com/control/controlclient+
@@ -353,7 +349,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de
tailscale.com/util/testenv from tailscale.com/ipn/ipnlocal+
tailscale.com/util/uniq from tailscale.com/wgengine/magicsock+
💣 tailscale.com/util/winutil from tailscale.com/control/controlclient+
W 💣 tailscale.com/util/winutil/authenticode from tailscale.com/util/osdiag+
W 💣 tailscale.com/util/winutil/authenticode from tailscale.com/util/osdiag
W tailscale.com/util/winutil/policy from tailscale.com/ipn/ipnlocal
tailscale.com/version from tailscale.com/derp+
tailscale.com/version/distro from tailscale.com/hostinfo+
@@ -415,7 +411,6 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de
golang.org/x/text/unicode/bidi from golang.org/x/net/idna+
golang.org/x/text/unicode/norm from golang.org/x/net/idna
golang.org/x/time/rate from gvisor.dev/gvisor/pkg/tcpip/stack+
archive/tar from tailscale.com/clientupdate
bufio from compress/flate+
bytes from bufio+
cmp from slices

View File

@@ -48,6 +48,7 @@ import (
"tailscale.com/net/tstun"
"tailscale.com/paths"
"tailscale.com/safesocket"
"tailscale.com/smallzstd"
"tailscale.com/syncs"
"tailscale.com/tsd"
"tailscale.com/tsweb/varz"
@@ -75,8 +76,6 @@ func defaultTunName() string {
// "utun" is recognized by wireguard-go/tun/tun_darwin.go
// as a magic value that uses/creates any free number.
return "utun"
case "plan9":
return "userspace-networking"
case "linux":
switch distro.Get() {
case distro.Synology:
@@ -201,10 +200,6 @@ func main() {
}
}
if fd, ok := envknob.LookupInt("TS_PARENT_DEATH_FD"); ok && fd > 2 {
go dieOnPipeReadErrorOfFD(fd)
}
if printVersion {
fmt.Println(version.String())
os.Exit(0)
@@ -496,7 +491,6 @@ func getLocalBackend(ctx context.Context, logf logger.Logf, logID logid.PublicID
if err != nil {
return nil, fmt.Errorf("newNetstack: %w", err)
}
sys.Set(ns)
ns.ProcessLocalIPs = onlyNetstack
ns.ProcessSubnets = onlyNetstack || handleSubnetsInNetstack()
@@ -551,6 +545,9 @@ func getLocalBackend(ctx context.Context, logf logger.Logf, logID logid.PublicID
if root := lb.TailscaleVarRoot(); root != "" {
dnsfallback.SetCachePath(filepath.Join(root, "derpmap.cached.json"), logf)
}
lb.SetDecompressor(func() (controlclient.Decompressor, error) {
return smallzstd.NewDecoder(nil)
})
configureTaildrop(logf, lb)
if err := ns.Start(lb); err != nil {
log.Fatalf("failed to start netstack: %v", err)
@@ -608,7 +605,6 @@ func tryEngine(logf logger.Logf, sys *tsd.System, name string) (onlyNetstack boo
NetMon: sys.NetMon.Get(),
Dialer: sys.Dialer.Get(),
SetSubsystem: sys.Set,
ControlKnobs: sys.ControlKnobs(),
}
onlyNetstack = name == "userspace-networking"
@@ -771,14 +767,3 @@ func beChild(args []string) error {
}
return f(args[1:])
}
// dieOnPipeReadErrorOfFD reads from the pipe named by fd and exit the process
// when the pipe becomes readable. We use this in tests as a somewhat more
// portable mechanism for the Linux PR_SET_PDEATHSIG, which we wish existed on
// macOS. This helps us clean up straggler tailscaled processes when the parent
// test driver dies unexpectedly.
func dieOnPipeReadErrorOfFD(fd int) {
f := os.NewFile(uintptr(fd), "TS_PARENT_DEATH_FD")
f.Read(make([]byte, 1))
os.Exit(1)
}

View File

@@ -19,8 +19,7 @@ import (
const FlakyTestLogMessage = "flakytest: this is a known flaky test"
// FlakeAttemptEnv is an environment variable that is set by cmd/testwrapper
// when a flaky test is being (re)tried. It contains the attempt number,
// starting at 1.
// when a flaky test is retried. It contains the attempt number, starting at 1.
const FlakeAttemptEnv = "TS_TESTWRAPPER_ATTEMPT"
var issueRegexp = regexp.MustCompile(`\Ahttps://github\.com/tailscale/[a-zA-Z0-9_.-]+/issues/\d+\z`)
@@ -34,11 +33,7 @@ func Mark(t testing.TB, issue string) {
if !issueRegexp.MatchString(issue) {
t.Fatalf("bad issue format: %q", issue)
}
if _, ok := os.LookupEnv(FlakeAttemptEnv); ok {
// We're being run under cmd/testwrapper so send our sentinel message
// to stderr. (We avoid doing this when the env is absent to avoid
// spamming people running tests without the wrapper)
fmt.Fprintln(os.Stderr, FlakyTestLogMessage)
}
fmt.Fprintln(os.Stderr, FlakyTestLogMessage) // sentinel value for testwrapper
t.Logf("flakytest: issue tracking this flaky test: %s", issue)
}

View File

@@ -8,7 +8,6 @@
package main
import (
"bufio"
"bytes"
"context"
"encoding/json"
@@ -30,8 +29,7 @@ import (
const maxAttempts = 3
type testAttempt struct {
pkg string // "tailscale.com/types/key"
testName string // "TestFoo"
name testName
outcome string // "pass", "fail", "skip"
logs bytes.Buffer
isMarkedFlaky bool // set if the test is marked as flaky
@@ -39,6 +37,11 @@ type testAttempt struct {
pkgFinished bool
}
type testName struct {
pkg string // "tailscale.com/types/key"
name string // "TestFoo"
}
type packageTests struct {
// pattern is the package pattern to run.
// Must be a single pattern, not a list of patterns.
@@ -60,10 +63,9 @@ var debug = os.Getenv("TS_TESTWRAPPER_DEBUG") != ""
// runTests runs the tests in pt and sends the results on ch. It sends a
// testAttempt for each test and a final testAttempt per pkg with pkgFinished
// set to true. Package build errors will not emit a testAttempt (as no valid
// JSON is produced) but the [os/exec.ExitError] will be returned.
// set to true.
// It calls close(ch) when it's done.
func runTests(ctx context.Context, attempt int, pt *packageTests, otherArgs []string, ch chan<- *testAttempt) error {
func runTests(ctx context.Context, attempt int, pt *packageTests, otherArgs []string, ch chan<- *testAttempt) {
defer close(ch)
args := []string{"test", "-json", pt.pattern}
args = append(args, otherArgs...)
@@ -89,12 +91,17 @@ func runTests(ctx context.Context, attempt int, pt *packageTests, otherArgs []st
log.Printf("error starting test: %v", err)
os.Exit(1)
}
done := make(chan struct{})
go func() {
defer close(done)
cmd.Wait()
}()
s := bufio.NewScanner(r)
resultMap := make(map[string]map[string]*testAttempt) // pkg -> test -> testAttempt
for s.Scan() {
jd := json.NewDecoder(r)
resultMap := make(map[testName]*testAttempt)
for {
var goOutput goTestOutput
if err := json.Unmarshal(s.Bytes(), &goOutput); err != nil {
if err := jd.Decode(&goOutput); err != nil {
if errors.Is(err, io.EOF) || errors.Is(err, os.ErrClosed) {
break
}
@@ -104,39 +111,32 @@ func runTests(ctx context.Context, attempt int, pt *packageTests, otherArgs []st
// The build error will be printed to stderr.
// See: https://github.com/golang/go/issues/35169
if _, ok := err.(*json.SyntaxError); ok {
fmt.Println(s.Text())
jd = json.NewDecoder(r)
continue
}
panic(err)
}
pkg := goOutput.Package
pkgTests := resultMap[pkg]
if goOutput.Test == "" {
switch goOutput.Action {
case "fail", "pass", "skip":
for _, test := range pkgTests {
if test.outcome == "" {
test.outcome = "fail"
ch <- test
}
}
ch <- &testAttempt{
pkg: goOutput.Package,
name: testName{
pkg: goOutput.Package,
},
outcome: goOutput.Action,
pkgFinished: true,
}
}
continue
}
if pkgTests == nil {
pkgTests = make(map[string]*testAttempt)
resultMap[pkg] = pkgTests
name := testName{
pkg: goOutput.Package,
name: goOutput.Test,
}
testName := goOutput.Test
if test, _, isSubtest := strings.Cut(goOutput.Test, "/"); isSubtest {
testName = test
name.name = test
if goOutput.Action == "output" {
resultMap[pkg][testName].logs.WriteString(goOutput.Output)
resultMap[name].logs.WriteString(goOutput.Output)
}
continue
}
@@ -144,28 +144,21 @@ func runTests(ctx context.Context, attempt int, pt *packageTests, otherArgs []st
case "start":
// ignore
case "run":
pkgTests[testName] = &testAttempt{
pkg: pkg,
testName: testName,
resultMap[name] = &testAttempt{
name: name,
}
case "skip", "pass", "fail":
pkgTests[testName].outcome = goOutput.Action
ch <- pkgTests[testName]
resultMap[name].outcome = goOutput.Action
ch <- resultMap[name]
case "output":
if strings.TrimSpace(goOutput.Output) == flakytest.FlakyTestLogMessage {
pkgTests[testName].isMarkedFlaky = true
resultMap[name].isMarkedFlaky = true
} else {
pkgTests[testName].logs.WriteString(goOutput.Output)
resultMap[name].logs.WriteString(goOutput.Output)
}
}
}
if err := cmd.Wait(); err != nil {
return err
}
if err := s.Err(); err != nil {
return fmt.Errorf("reading go test stdout: %w", err)
}
return nil
<-done
}
func main() {
@@ -247,32 +240,14 @@ func main() {
fmt.Printf("\n\nAttempt #%d: Retrying flaky tests:\n\n", thisRun.attempt)
}
failed := false
toRetry := make(map[string][]string) // pkg -> tests to retry
for _, pt := range thisRun.tests {
ch := make(chan *testAttempt)
runErr := make(chan error, 1)
go func() {
defer close(runErr)
runErr <- runTests(ctx, thisRun.attempt, pt, otherArgs, ch)
}()
var failed bool
go runTests(ctx, thisRun.attempt, pt, otherArgs, ch)
for tr := range ch {
// Go assigns the package name "command-line-arguments" when you
// `go test FILE` rather than `go test PKG`. It's more
// convenient for us to to specify files in tests, so fix tr.pkg
// so that subsequent testwrapper attempts run correctly.
if tr.pkg == "command-line-arguments" {
tr.pkg = pattern
}
if tr.pkgFinished {
if tr.outcome == "fail" && len(toRetry[tr.pkg]) == 0 {
// If a package fails and we don't have any tests to
// retry, then we should fail. This typically happens
// when a package times out.
failed = true
}
printPkgOutcome(tr.pkg, tr.outcome, thisRun.attempt)
printPkgOutcome(tr.name.pkg, tr.outcome, thisRun.attempt)
continue
}
if *v || tr.outcome == "fail" {
@@ -282,28 +257,15 @@ func main() {
continue
}
if tr.isMarkedFlaky {
toRetry[tr.pkg] = append(toRetry[tr.pkg], tr.testName)
toRetry[tr.name.pkg] = append(toRetry[tr.name.pkg], tr.name.name)
} else {
failed = true
}
}
if failed {
fmt.Println("\n\nNot retrying flaky tests because non-flaky tests failed.")
os.Exit(1)
}
// If there's nothing to retry and no non-retryable tests have
// failed then we've probably hit a build error.
if err := <-runErr; len(toRetry) == 0 && err != nil {
var exit *exec.ExitError
if errors.As(err, &exit) {
if code := exit.ExitCode(); code > -1 {
os.Exit(exit.ExitCode())
}
}
log.Printf("testwrapper: %s", err)
os.Exit(1)
}
}
if failed {
fmt.Println("\n\nNot retrying flaky tests because non-flaky tests failed.")
os.Exit(1)
}
if len(toRetry) == 0 {
continue

View File

@@ -1,218 +0,0 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
package main_test
import (
"bytes"
"errors"
"fmt"
"os"
"os/exec"
"path/filepath"
"sync"
"testing"
)
var (
buildPath string
buildErr error
buildOnce sync.Once
)
func cmdTestwrapper(t *testing.T, args ...string) *exec.Cmd {
buildOnce.Do(func() {
buildPath, buildErr = buildTestWrapper()
})
if buildErr != nil {
t.Fatalf("building testwrapper: %s", buildErr)
}
return exec.Command(buildPath, args...)
}
func buildTestWrapper() (string, error) {
dir, err := os.MkdirTemp("", "testwrapper")
if err != nil {
return "", fmt.Errorf("making temp dir: %w", err)
}
_, err = exec.Command("go", "build", "-o", dir, ".").Output()
if err != nil {
return "", fmt.Errorf("go build: %w", err)
}
return filepath.Join(dir, "testwrapper"), nil
}
func TestRetry(t *testing.T) {
t.Parallel()
testfile := filepath.Join(t.TempDir(), "retry_test.go")
code := []byte(`package retry_test
import (
"os"
"testing"
"tailscale.com/cmd/testwrapper/flakytest"
)
func TestOK(t *testing.T) {}
func TestFlakeRun(t *testing.T) {
flakytest.Mark(t, "https://github.com/tailscale/tailscale/issues/0") // random issue
e := os.Getenv(flakytest.FlakeAttemptEnv)
if e == "" {
t.Skip("not running in testwrapper")
}
if e == "1" {
t.Fatal("First run in testwrapper, failing so that test is retried. This is expected.")
}
}
`)
if err := os.WriteFile(testfile, code, 0o644); err != nil {
t.Fatalf("writing package: %s", err)
}
out, err := cmdTestwrapper(t, "-v", testfile).CombinedOutput()
if err != nil {
t.Fatalf("go run . %s: %s with output:\n%s", testfile, err, out)
}
want := []byte("ok\t" + testfile + " [attempt=2]")
if !bytes.Contains(out, want) {
t.Fatalf("wanted output containing %q but got:\n%s", want, out)
}
if okRuns := bytes.Count(out, []byte("=== RUN TestOK")); okRuns != 1 {
t.Fatalf("expected TestOK to be run once but was run %d times in output:\n%s", okRuns, out)
}
if flakeRuns := bytes.Count(out, []byte("=== RUN TestFlakeRun")); flakeRuns != 2 {
t.Fatalf("expected TestFlakeRun to be run twice but was run %d times in output:\n%s", flakeRuns, out)
}
if testing.Verbose() {
t.Logf("success - output:\n%s", out)
}
}
func TestNoRetry(t *testing.T) {
t.Parallel()
testfile := filepath.Join(t.TempDir(), "noretry_test.go")
code := []byte(`package noretry_test
import (
"testing"
"tailscale.com/cmd/testwrapper/flakytest"
)
func TestFlakeRun(t *testing.T) {
flakytest.Mark(t, "https://github.com/tailscale/tailscale/issues/0") // random issue
t.Error("shouldn't be retried")
}
func TestAlwaysError(t *testing.T) {
t.Error("error")
}
`)
if err := os.WriteFile(testfile, code, 0o644); err != nil {
t.Fatalf("writing package: %s", err)
}
out, err := cmdTestwrapper(t, "-v", testfile).Output()
if err == nil {
t.Fatalf("go run . %s: expected error but it succeeded with output:\n%s", testfile, out)
}
if code, ok := errExitCode(err); ok && code != 1 {
t.Fatalf("expected exit code 1 but got %d", code)
}
want := []byte("Not retrying flaky tests because non-flaky tests failed.")
if !bytes.Contains(out, want) {
t.Fatalf("wanted output containing %q but got:\n%s", want, out)
}
if flakeRuns := bytes.Count(out, []byte("=== RUN TestFlakeRun")); flakeRuns != 1 {
t.Fatalf("expected TestFlakeRun to be run once but was run %d times in output:\n%s", flakeRuns, out)
}
if testing.Verbose() {
t.Logf("success - output:\n%s", out)
}
}
func TestBuildError(t *testing.T) {
t.Parallel()
// Construct our broken package.
testfile := filepath.Join(t.TempDir(), "builderror_test.go")
code := []byte("package builderror_test\n\nderp")
err := os.WriteFile(testfile, code, 0o644)
if err != nil {
t.Fatalf("writing package: %s", err)
}
buildErr := []byte("builderror_test.go:3:1: expected declaration, found derp\nFAIL command-line-arguments [setup failed]")
// Confirm `go test` exits with code 1.
goOut, err := exec.Command("go", "test", testfile).CombinedOutput()
if code, ok := errExitCode(err); !ok || code != 1 {
t.Fatalf("go test %s: expected error with exit code 0 but got: %v", testfile, err)
}
if !bytes.Contains(goOut, buildErr) {
t.Fatalf("go test %s: expected build error containing %q but got:\n%s", testfile, buildErr, goOut)
}
// Confirm `testwrapper` exits with code 1.
twOut, err := cmdTestwrapper(t, testfile).CombinedOutput()
if code, ok := errExitCode(err); !ok || code != 1 {
t.Fatalf("testwrapper %s: expected error with exit code 0 but got: %v", testfile, err)
}
if !bytes.Contains(twOut, buildErr) {
t.Fatalf("testwrapper %s: expected build error containing %q but got:\n%s", testfile, buildErr, twOut)
}
if testing.Verbose() {
t.Logf("success - output:\n%s", twOut)
}
}
func TestTimeout(t *testing.T) {
t.Parallel()
// Construct our broken package.
testfile := filepath.Join(t.TempDir(), "timeout_test.go")
code := []byte(`package noretry_test
import (
"testing"
"time"
)
func TestTimeout(t *testing.T) {
time.Sleep(500 * time.Millisecond)
}
`)
err := os.WriteFile(testfile, code, 0o644)
if err != nil {
t.Fatalf("writing package: %s", err)
}
out, err := cmdTestwrapper(t, testfile, "-timeout=20ms").CombinedOutput()
if code, ok := errExitCode(err); !ok || code != 1 {
t.Fatalf("testwrapper %s: expected error with exit code 0 but got: %v; output was:\n%s", testfile, err, out)
}
if want := "panic: test timed out after 20ms"; !bytes.Contains(out, []byte(want)) {
t.Fatalf("testwrapper %s: expected build error containing %q but got:\n%s", testfile, buildErr, out)
}
if testing.Verbose() {
t.Logf("success - output:\n%s", out)
}
}
func errExitCode(err error) (int, bool) {
var exit *exec.ExitError
if errors.As(err, &exit) {
return exit.ExitCode(), true
}
return 0, false
}

View File

@@ -35,6 +35,7 @@ import (
"tailscale.com/net/netns"
"tailscale.com/net/tsdial"
"tailscale.com/safesocket"
"tailscale.com/smallzstd"
"tailscale.com/tailcfg"
"tailscale.com/tsd"
"tailscale.com/wgengine"
@@ -102,7 +103,6 @@ func newIPN(jsConfig js.Value) map[string]any {
eng, err := wgengine.NewUserspaceEngine(logf, wgengine.Config{
Dialer: dialer,
SetSubsystem: sys.Set,
ControlKnobs: sys.ControlKnobs(),
})
if err != nil {
log.Fatal(err)
@@ -113,7 +113,6 @@ func newIPN(jsConfig js.Value) map[string]any {
if err != nil {
log.Fatalf("netstack.Create: %v", err)
}
sys.Set(ns)
ns.ProcessLocalIPs = true
ns.ProcessSubnets = true
@@ -134,6 +133,9 @@ func newIPN(jsConfig js.Value) map[string]any {
if err := ns.Start(lb); err != nil {
log.Fatalf("failed to start netstack: %v", err)
}
lb.SetDecompressor(func() (controlclient.Decompressor, error) {
return smallzstd.NewDecoder(nil)
})
srv.SetLocalBackend(lb)
jsIPN := &jsIPN{
@@ -324,11 +326,7 @@ func (i *jsIPN) logout() {
if i.lb.State() == ipn.NoState {
log.Printf("Backend not running")
}
go func() {
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
i.lb.Logout(ctx)
}()
go i.lb.Logout()
}
func (i *jsIPN) ssh(host, username string, termConfig js.Value) map[string]any {

View File

@@ -17,36 +17,54 @@ import (
"tailscale.com/net/sockstats"
"tailscale.com/tailcfg"
"tailscale.com/tstime"
"tailscale.com/types/empty"
"tailscale.com/types/key"
"tailscale.com/types/logger"
"tailscale.com/types/netmap"
"tailscale.com/types/persist"
"tailscale.com/types/ptr"
"tailscale.com/types/structs"
)
type LoginGoal struct {
_ structs.Incomparable
token *tailcfg.Oauth2Token // oauth token to use when logging in
flags LoginFlags // flags to use when logging in
url string // auth url that needs to be visited
_ structs.Incomparable
wantLoggedIn bool // true if we *want* to be logged in
token *tailcfg.Oauth2Token // oauth token to use when logging in
flags LoginFlags // flags to use when logging in
url string // auth url that needs to be visited
loggedOutResult chan<- error
}
func (g *LoginGoal) sendLogoutError(err error) {
if g.loggedOutResult == nil {
return
}
select {
case g.loggedOutResult <- err:
default:
}
}
var _ Client = (*Auto)(nil)
// waitUnpause waits until either the client is unpaused or the Auto client is
// shut down. It reports whether the client should keep running (i.e. it's not
// closed).
func (c *Auto) waitUnpause(routineLogName string) (keepRunning bool) {
// waitUnpause waits until the client is unpaused then returns. It only
// returns an error if the client is closed.
func (c *Auto) waitUnpause(routineLogName string) error {
c.mu.Lock()
if !c.paused || c.closed {
defer c.mu.Unlock()
return !c.closed
if !c.paused {
c.mu.Unlock()
return nil
}
unpaused := c.unpausedChanLocked()
c.mu.Unlock()
c.logf("%s: awaiting unpause", routineLogName)
return <-unpaused
select {
case <-unpaused:
c.logf("%s: unpaused", routineLogName)
return nil
case <-c.quit:
return errors.New("quit")
}
}
// updateRoutine is responsible for informing the server of worthy changes to
@@ -60,7 +78,7 @@ func (c *Auto) updateRoutine() {
var lastUpdateGenInformed updateGen
for {
if !c.waitUnpause("updateRoutine") {
if err := c.waitUnpause("updateRoutine"); err != nil {
c.logf("updateRoutine: exiting")
return
}
@@ -70,11 +88,19 @@ func (c *Auto) updateRoutine() {
needUpdate := gen > 0 && gen != lastUpdateGenInformed && c.loggedIn
c.mu.Unlock()
if !needUpdate {
if needUpdate {
select {
case <-c.quit:
c.logf("updateRoutine: exiting")
return
default:
}
} else {
// Nothing to do, wait for a signal.
select {
case <-ctx.Done():
continue
case <-c.quit:
c.logf("updateRoutine: exiting")
return
case <-c.updateCh:
continue
}
@@ -112,37 +138,36 @@ type updateGen int64
// Auto connects to a tailcontrol server for a node.
// It's a concrete implementation of the Client interface.
type Auto struct {
direct *Direct // our interface to the server APIs
clock tstime.Clock
logf logger.Logf
closed bool
updateCh chan struct{} // readable when we should inform the server of a change
observer Observer // called to update Client status; always non-nil
observerQueue execQueue
direct *Direct // our interface to the server APIs
clock tstime.Clock
logf logger.Logf
expiry *time.Time
closed bool
updateCh chan struct{} // readable when we should inform the server of a change
newMapCh chan struct{} // readable when we must restart a map request
observer Observer // called to update Client status; always non-nil
unregisterHealthWatch func()
mu sync.Mutex // mutex guards the following fields
wantLoggedIn bool // whether the user wants to be logged in per last method call
urlToVisit string // the last url we were told to visit
expiry time.Time
// lastUpdateGen is the gen of last update we had an update worth sending to
// the server.
lastUpdateGen updateGen
paused bool // whether we should stop making HTTP requests
unpauseWaiters []chan bool // chans that gets sent true (once) on wake, or false on Shutdown
loggedIn bool // true if currently logged in
loginGoal *LoginGoal // non-nil if some login activity is desired
inMapPoll bool // true once we get the first MapResponse in a stream; false when HTTP response ends
state State // TODO(bradfitz): delete this, make it computed by method from other state
paused bool // whether we should stop making HTTP requests
unpauseWaiters []chan struct{}
loggedIn bool // true if currently logged in
loginGoal *LoginGoal // non-nil if some login activity is desired
synced bool // true if our netmap is up-to-date
inSendStatus int // number of sendStatus calls currently in progress
state State
authCtx context.Context // context used for auth requests
mapCtx context.Context // context used for netmap and update requests
authCancel func() // cancel authCtx
mapCancel func() // cancel mapCtx
quit chan struct{} // when closed, goroutines should all exit
authDone chan struct{} // when closed, authRoutine is done
mapDone chan struct{} // when closed, mapRoutine is done
updateDone chan struct{} // when closed, updateRoutine is done
@@ -183,6 +208,8 @@ func NewNoStart(opts Options) (_ *Auto, err error) {
clock: opts.Clock,
logf: opts.Logf,
updateCh: make(chan struct{}, 1),
newMapCh: make(chan struct{}, 1),
quit: make(chan struct{}),
authDone: make(chan struct{}),
mapDone: make(chan struct{}),
updateDone: make(chan struct{}),
@@ -205,20 +232,21 @@ func NewNoStart(opts Options) (_ *Auto, err error) {
func (c *Auto) SetPaused(paused bool) {
c.mu.Lock()
defer c.mu.Unlock()
if paused == c.paused || c.closed {
if paused == c.paused {
return
}
c.logf("setPaused(%v)", paused)
c.paused = paused
if paused {
c.cancelMapCtxLocked()
c.cancelAuthCtxLocked()
return
// Only cancel the map routine. (The auth routine isn't expensive
// so it's fine to keep it running.)
c.cancelMapLocked()
} else {
for _, ch := range c.unpauseWaiters {
close(ch)
}
c.unpauseWaiters = nil
}
for _, ch := range c.unpauseWaiters {
ch <- true
}
c.unpauseWaiters = nil
}
// Start starts the client's goroutines.
@@ -252,16 +280,9 @@ func (c *Auto) updateControl() {
}
}
// cancelAuthCtx cancels the existing auth goroutine's context
// & creates a new one, causing it to restart.
func (c *Auto) cancelAuthCtx() {
func (c *Auto) cancelAuth() {
c.mu.Lock()
defer c.mu.Unlock()
c.cancelAuthCtxLocked()
}
// cancelAuthCtxLocked is like cancelAuthCtx, but assumes the caller holds c.mu.
func (c *Auto) cancelAuthCtxLocked() {
if c.authCancel != nil {
c.authCancel()
}
@@ -271,16 +292,8 @@ func (c *Auto) cancelAuthCtxLocked() {
}
}
// cancelMapCtx cancels the context for the existing mapPoll and liteUpdates
// goroutines and creates a new one, causing them to restart.
func (c *Auto) cancelMapCtx() {
c.mu.Lock()
defer c.mu.Unlock()
c.cancelMapCtxLocked()
}
// cancelMapCtxLocked is like cancelMapCtx, but assumes the caller holds c.mu.
func (c *Auto) cancelMapCtxLocked() {
// cancelMapLocked is like cancelMap, but assumes the caller holds c.mu.
func (c *Auto) cancelMapLocked() {
if c.mapCancel != nil {
c.mapCancel()
}
@@ -290,15 +303,32 @@ func (c *Auto) cancelMapCtxLocked() {
}
}
// cancelMap cancels the existing mapPoll and liteUpdates.
func (c *Auto) cancelMap() {
c.mu.Lock()
defer c.mu.Unlock()
c.cancelMapLocked()
}
// restartMap cancels the existing mapPoll and liteUpdates, and then starts a
// new one.
func (c *Auto) restartMap() {
c.mu.Lock()
c.cancelMapCtxLocked()
synced := c.inMapPoll
c.cancelMapLocked()
synced := c.synced
c.mu.Unlock()
c.logf("[v1] restartMap: synced=%v", synced)
select {
case c.newMapCh <- struct{}{}:
c.logf("[v1] restartMap: wrote to channel")
default:
// if channel write failed, then there was already
// an outstanding newMapCh request. One is enough,
// since it'll always use the latest endpoints.
c.logf("[v1] restartMap: channel was full")
}
c.updateControl()
}
@@ -307,20 +337,23 @@ func (c *Auto) authRoutine() {
bo := backoff.NewBackoff("authRoutine", c.logf, 30*time.Second)
for {
if !c.waitUnpause("authRoutine") {
c.logf("authRoutine: exiting")
return
}
c.mu.Lock()
goal := c.loginGoal
ctx := c.authCtx
if goal != nil {
c.logf("[v1] authRoutine: %s; wantLoggedIn=%v", c.state, true)
c.logf("[v1] authRoutine: %s; wantLoggedIn=%v", c.state, goal.wantLoggedIn)
} else {
c.logf("[v1] authRoutine: %s; goal=nil paused=%v", c.state, c.paused)
}
c.mu.Unlock()
select {
case <-c.quit:
c.logf("[v1] authRoutine: quit")
return
default:
}
report := func(err error, msg string) {
c.logf("[v1] %s: %v", msg, err)
// don't send status updates for context errors,
@@ -338,90 +371,111 @@ func (c *Auto) authRoutine() {
continue
}
c.mu.Lock()
c.urlToVisit = goal.url
if goal.url != "" {
c.state = StateURLVisitRequired
} else {
c.state = StateAuthenticating
}
c.mu.Unlock()
var url string
var err error
var f string
if goal.url != "" {
url, err = c.direct.WaitLoginURL(ctx, goal.url)
f = "WaitLoginURL"
} else {
url, err = c.direct.TryLogin(ctx, goal.token, goal.flags)
f = "TryLogin"
}
if err != nil {
health.SetAuthRoutineInError(err)
report(err, f)
bo.BackOff(ctx, err)
continue
}
if url != "" {
// goal.url ought to be empty here.
// However, not all control servers get this right,
// and logging about it here just generates noise.
c.mu.Lock()
c.urlToVisit = url
c.loginGoal = &LoginGoal{
flags: LoginDefault,
url: url,
if !goal.wantLoggedIn {
health.SetAuthRoutineInError(nil)
err := c.direct.TryLogout(ctx)
goal.sendLogoutError(err)
if err != nil {
report(err, "TryLogout")
bo.BackOff(ctx, err)
continue
}
c.state = StateURLVisitRequired
// success
c.mu.Lock()
c.loggedIn = false
c.loginGoal = nil
c.state = StateNotAuthenticated
c.synced = false
c.mu.Unlock()
c.sendStatus("authRoutine-url", err, url, nil)
if goal.url == url {
// The server sent us the same URL we already tried,
// backoff to avoid a busy loop.
bo.BackOff(ctx, errors.New("login URL not changing"))
c.sendStatus("authRoutine-wantout", nil, "", nil)
bo.BackOff(ctx, nil)
} else { // ie. goal.wantLoggedIn
c.mu.Lock()
if goal.url != "" {
c.state = StateURLVisitRequired
} else {
bo.BackOff(ctx, nil)
c.state = StateAuthenticating
}
continue
c.mu.Unlock()
var url string
var err error
var f string
if goal.url != "" {
url, err = c.direct.WaitLoginURL(ctx, goal.url)
f = "WaitLoginURL"
} else {
url, err = c.direct.TryLogin(ctx, goal.token, goal.flags)
f = "TryLogin"
}
if err != nil {
health.SetAuthRoutineInError(err)
report(err, f)
bo.BackOff(ctx, err)
continue
}
if url != "" {
// goal.url ought to be empty here.
// However, not all control servers get this right,
// and logging about it here just generates noise.
c.mu.Lock()
c.loginGoal = &LoginGoal{
wantLoggedIn: true,
flags: LoginDefault,
url: url,
}
c.state = StateURLVisitRequired
c.synced = false
c.mu.Unlock()
c.sendStatus("authRoutine-url", err, url, nil)
if goal.url == url {
// The server sent us the same URL we already tried,
// backoff to avoid a busy loop.
bo.BackOff(ctx, errors.New("login URL not changing"))
} else {
bo.BackOff(ctx, nil)
}
continue
}
// success
health.SetAuthRoutineInError(nil)
c.mu.Lock()
c.loggedIn = true
c.loginGoal = nil
c.state = StateAuthenticated
c.mu.Unlock()
c.sendStatus("authRoutine-success", nil, "", nil)
c.restartMap()
bo.BackOff(ctx, nil)
}
// success
health.SetAuthRoutineInError(nil)
c.mu.Lock()
c.urlToVisit = ""
c.loggedIn = true
c.loginGoal = nil
c.state = StateAuthenticated
c.mu.Unlock()
c.sendStatus("authRoutine-success", nil, "", nil)
c.restartMap()
bo.BackOff(ctx, nil)
}
}
// ExpiryForTests returns the credential expiration time, or the zero value if
// the expiration time isn't known. It's used in tests only.
func (c *Auto) ExpiryForTests() time.Time {
// Expiry returns the credential expiration time, or the zero time if
// the expiration time isn't known. Used in tests only.
func (c *Auto) Expiry() *time.Time {
c.mu.Lock()
defer c.mu.Unlock()
return c.expiry
}
// DirectForTest returns the underlying direct client object.
// It's used in tests only.
func (c *Auto) DirectForTest() *Direct {
// Direct returns the underlying direct client object. Used in tests
// only.
func (c *Auto) Direct() *Direct {
return c.direct
}
// unpausedChanLocked returns a new channel that gets sent
// either a true when unpaused or false on Auto.Shutdown.
// unpausedChanLocked returns a new channel that is closed when the
// current Auto pause is unpaused.
//
// c.mu must be held
func (c *Auto) unpausedChanLocked() <-chan bool {
unpaused := make(chan bool, 1)
func (c *Auto) unpausedChanLocked() <-chan struct{} {
unpaused := make(chan struct{})
c.unpauseWaiters = append(c.unpauseWaiters, unpaused)
return unpaused
}
@@ -432,18 +486,17 @@ type mapRoutineState struct {
bo *backoff.Backoff
}
var _ NetmapDeltaUpdater = mapRoutineState{}
func (mrs mapRoutineState) UpdateFullNetmap(nm *netmap.NetworkMap) {
c := mrs.c
health.SetInPollNetMap(true)
c.mu.Lock()
ctx := c.mapCtx
c.inMapPoll = true
c.synced = true
if c.loggedIn {
c.state = StateSynchronized
}
c.expiry = nm.Expiry
c.expiry = ptr.To(nm.Expiry)
stillAuthed := c.loggedIn
c.logf("[v1] mapRoutine: netmap received: %s", c.state)
c.mu.Unlock()
@@ -455,28 +508,6 @@ func (mrs mapRoutineState) UpdateFullNetmap(nm *netmap.NetworkMap) {
mrs.bo.BackOff(ctx, nil)
}
func (mrs mapRoutineState) UpdateNetmapDelta(muts []netmap.NodeMutation) bool {
c := mrs.c
c.mu.Lock()
goodState := c.loggedIn && c.inMapPoll
ndu, canDelta := c.observer.(NetmapDeltaUpdater)
c.mu.Unlock()
if !goodState || !canDelta {
return false
}
ctx, cancel := context.WithTimeout(c.mapCtx, 2*time.Second)
defer cancel()
var ok bool
err := c.observerQueue.RunSync(ctx, func() {
ok = ndu.UpdateNetmapDelta(muts)
})
return err == nil && ok
}
// mapRoutine is responsible for keeping a read-only streaming connection to the
// control server, and keeping the netmap up to date.
func (c *Auto) mapRoutine() {
@@ -487,7 +518,7 @@ func (c *Auto) mapRoutine() {
}
for {
if !c.waitUnpause("mapRoutine") {
if err := c.waitUnpause("mapRoutine"); err != nil {
c.logf("mapRoutine: exiting")
return
}
@@ -498,6 +529,13 @@ func (c *Auto) mapRoutine() {
ctx := c.mapCtx
c.mu.Unlock()
select {
case <-c.quit:
c.logf("mapRoutine: quit")
return
default:
}
report := func(err error, msg string) {
c.logf("[v1] %s: %v", msg, err)
err = fmt.Errorf("%s: %w", msg, err)
@@ -511,32 +549,39 @@ func (c *Auto) mapRoutine() {
if !loggedIn {
// Wait for something interesting to happen
c.mu.Lock()
c.inMapPoll = false
c.synced = false
// c.state is set by authRoutine()
c.mu.Unlock()
<-ctx.Done()
c.logf("[v1] mapRoutine: context done.")
continue
}
health.SetOutOfPollNetMap()
err := c.direct.PollNetMap(ctx, mrs)
health.SetOutOfPollNetMap()
c.mu.Lock()
c.inMapPoll = false
if c.state == StateSynchronized {
c.state = StateAuthenticated
}
paused := c.paused
c.mu.Unlock()
if paused {
mrs.bo.BackOff(ctx, nil)
c.logf("mapRoutine: paused")
select {
case <-ctx.Done():
c.logf("[v1] mapRoutine: context done.")
case <-c.newMapCh:
c.logf("[v1] mapRoutine: new map needed while idle.")
}
} else {
mrs.bo.BackOff(ctx, err)
health.SetInPollNetMap(false)
err := c.direct.PollNetMap(ctx, mrs)
health.SetInPollNetMap(false)
c.mu.Lock()
c.synced = false
if c.state == StateSynchronized {
c.state = StateAuthenticated
}
paused := c.paused
c.mu.Unlock()
if paused {
mrs.bo.BackOff(ctx, nil)
c.logf("mapRoutine: paused")
continue
}
report(err, "PollNetMap")
mrs.bo.BackOff(ctx, err)
continue
}
}
}
@@ -586,7 +631,6 @@ func (c *Auto) SetTKAHead(headHash string) {
c.updateControl()
}
// sendStatus can not be called with the c.mu held.
func (c *Auto) sendStatus(who string, err error, url string, nm *netmap.NetworkMap) {
c.mu.Lock()
if c.closed {
@@ -595,77 +639,91 @@ func (c *Auto) sendStatus(who string, err error, url string, nm *netmap.NetworkM
}
state := c.state
loggedIn := c.loggedIn
inMapPoll := c.inMapPoll
synced := c.synced
c.inSendStatus++
c.mu.Unlock()
c.logf("[v1] sendStatus: %s: %v", who, state)
var p persist.PersistView
if nm != nil && loggedIn && inMapPoll {
p = c.direct.GetPersist()
var p *persist.PersistView
var loginFin, logoutFin *empty.Message
if state == StateAuthenticated {
loginFin = new(empty.Message)
}
if state == StateNotAuthenticated {
logoutFin = new(empty.Message)
}
if nm != nil && loggedIn && synced {
p = ptr.To(c.direct.GetPersist())
} else {
// don't send netmap status, as it's misleading when we're
// not logged in.
nm = nil
}
new := Status{
URL: url,
Persist: p,
NetMap: nm,
Err: err,
state: state,
LoginFinished: loginFin,
LogoutFinished: logoutFin,
URL: url,
Persist: p,
NetMap: nm,
State: state,
Err: err,
}
c.observer.SetControlClientStatus(new)
// Launch a new goroutine to avoid blocking the caller while the observer
// does its thing, which may result in a call back into the client.
c.observerQueue.Add(func() {
c.observer.SetControlClientStatus(c, new)
})
c.mu.Lock()
c.inSendStatus--
c.mu.Unlock()
}
func (c *Auto) Login(t *tailcfg.Oauth2Token, flags LoginFlags) {
c.logf("client.Login(%v, %v)", t != nil, flags)
c.mu.Lock()
defer c.mu.Unlock()
if c.closed {
return
}
c.wantLoggedIn = true
c.loginGoal = &LoginGoal{
token: t,
flags: flags,
wantLoggedIn: true,
token: t,
flags: flags,
}
c.cancelMapCtxLocked()
c.cancelAuthCtxLocked()
c.mu.Unlock()
c.cancelAuth()
}
var ErrClientClosed = errors.New("client closed")
func (c *Auto) StartLogout() {
c.logf("client.StartLogout()")
c.mu.Lock()
c.loginGoal = &LoginGoal{
wantLoggedIn: false,
}
c.mu.Unlock()
c.cancelAuth()
}
func (c *Auto) Logout(ctx context.Context) error {
c.logf("client.Logout()")
errc := make(chan error, 1)
c.mu.Lock()
c.wantLoggedIn = false
c.loginGoal = nil
closed := c.closed
c.mu.Unlock()
if closed {
return ErrClientClosed
c.loginGoal = &LoginGoal{
wantLoggedIn: false,
loggedOutResult: errc,
}
c.mu.Unlock()
c.cancelAuth()
if err := c.direct.TryLogout(ctx); err != nil {
timer, timerChannel := c.clock.NewTimer(10 * time.Second)
defer timer.Stop()
select {
case err := <-errc:
return err
case <-ctx.Done():
return ctx.Err()
case <-timerChannel:
return context.DeadlineExceeded
}
c.mu.Lock()
c.loggedIn = false
c.state = StateNotAuthenticated
c.cancelAuthCtxLocked()
c.cancelMapCtxLocked()
c.mu.Unlock()
c.sendStatus("authRoutine-wantout", nil, "", nil)
return nil
}
func (c *Auto) SetExpirySooner(ctx context.Context, expiry time.Time) error {
@@ -687,32 +745,26 @@ func (c *Auto) Shutdown() {
c.logf("client.Shutdown()")
c.mu.Lock()
inSendStatus := c.inSendStatus
closed := c.closed
direct := c.direct
if !closed {
c.closed = true
c.observerQueue.shutdown()
c.cancelAuthCtxLocked()
c.cancelMapCtxLocked()
for _, w := range c.unpauseWaiters {
w <- false
}
c.unpauseWaiters = nil
}
c.mu.Unlock()
c.logf("client.Shutdown")
c.logf("client.Shutdown: inSendStatus=%v", inSendStatus)
if !closed {
c.unregisterHealthWatch()
close(c.quit)
c.cancelAuth()
<-c.authDone
c.cancelMap()
<-c.mapDone
<-c.updateDone
if direct != nil {
direct.Close()
}
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
c.observerQueue.wait(ctx)
c.logf("Client.Shutdown done.")
}
}
@@ -753,95 +805,3 @@ func (c *Auto) DoNoiseRequest(req *http.Request) (*http.Response, error) {
func (c *Auto) GetSingleUseNoiseRoundTripper(ctx context.Context) (http.RoundTripper, *tailcfg.EarlyNoise, error) {
return c.direct.GetSingleUseNoiseRoundTripper(ctx)
}
type execQueue struct {
mu sync.Mutex
closed bool
inFlight bool // whether a goroutine is running q.run
doneWaiter chan struct{} // non-nil if waiter is waiting, then closed
queue []func()
}
func (q *execQueue) Add(f func()) {
q.mu.Lock()
defer q.mu.Unlock()
if q.closed {
return
}
if q.inFlight {
q.queue = append(q.queue, f)
} else {
q.inFlight = true
go q.run(f)
}
}
// RunSync waits for the queue to be drained and then synchronously runs f.
// It returns an error if the queue is closed before f is run or ctx expires.
func (q *execQueue) RunSync(ctx context.Context, f func()) error {
for {
if err := q.wait(ctx); err != nil {
return err
}
q.mu.Lock()
if q.inFlight {
q.mu.Unlock()
continue
}
defer q.mu.Unlock()
if q.closed {
return errors.New("closed")
}
f()
return nil
}
}
func (q *execQueue) run(f func()) {
f()
q.mu.Lock()
for len(q.queue) > 0 && !q.closed {
f := q.queue[0]
q.queue[0] = nil
q.queue = q.queue[1:]
q.mu.Unlock()
f()
q.mu.Lock()
}
q.inFlight = false
q.queue = nil
if q.doneWaiter != nil {
close(q.doneWaiter)
q.doneWaiter = nil
}
q.mu.Unlock()
}
func (q *execQueue) shutdown() {
q.mu.Lock()
defer q.mu.Unlock()
q.closed = true
}
// wait waits for the queue to be empty.
func (q *execQueue) wait(ctx context.Context) error {
q.mu.Lock()
waitCh := q.doneWaiter
if q.inFlight && waitCh == nil {
waitCh = make(chan struct{})
q.doneWaiter = waitCh
}
q.mu.Unlock()
if waitCh == nil {
return nil
}
select {
case <-waitCh:
return nil
case <-ctx.Done():
return ctx.Err()
}
}

View File

@@ -25,9 +25,6 @@ const (
// Client represents a client connection to the control server.
// Currently this is done through a pair of polling https requests in
// the Auto client, but that might change eventually.
//
// The Client must be comparable as it is used by the Observer to detect stale
// clients.
type Client interface {
// Shutdown closes this session, which should not be used any further
// afterwards.
@@ -37,6 +34,10 @@ type Client interface {
// LoginFinished flag (on success) or an auth URL (if further
// interaction is needed).
Login(*tailcfg.Oauth2Token, LoginFlags)
// StartLogout starts an asynchronous logout process.
// When it finishes, the Status callback will be called while
// AuthCantContinue()==true.
StartLogout()
// Logout starts a synchronous logout process. It doesn't return
// until the logout operation has been completed.
Logout(context.Context) error

View File

@@ -6,6 +6,8 @@ package controlclient
import (
"reflect"
"testing"
"tailscale.com/types/empty"
)
func fieldsOf(t reflect.Type) (fields []string) {
@@ -19,7 +21,7 @@ func fieldsOf(t reflect.Type) (fields []string) {
func TestStatusEqual(t *testing.T) {
// Verify that the Equal method stays in sync with reality
equalHandles := []string{"Err", "URL", "NetMap", "Persist", "state"}
equalHandles := []string{"LoginFinished", "LogoutFinished", "Err", "URL", "NetMap", "State", "Persist"}
if have := fieldsOf(reflect.TypeOf(Status{})); !reflect.DeepEqual(have, equalHandles) {
t.Errorf("Status.Equal check might be out of sync\nfields: %q\nhandled: %q\n",
have, equalHandles)
@@ -50,8 +52,18 @@ func TestStatusEqual(t *testing.T) {
true,
},
{
&Status{},
&Status{state: StateAuthenticated},
&Status{State: StateNew},
&Status{State: StateNew},
true,
},
{
&Status{State: StateNew},
&Status{State: StateAuthenticated},
false,
},
{
&Status{LoginFinished: nil},
&Status{LoginFinished: new(empty.Message)},
false,
},
}

View File

@@ -22,9 +22,9 @@ import (
"os"
"reflect"
"runtime"
"slices"
"strings"
"sync"
"sync/atomic"
"time"
"go4.org/mem"
@@ -42,13 +42,14 @@ import (
"tailscale.com/net/tlsdial"
"tailscale.com/net/tsdial"
"tailscale.com/net/tshttpproxy"
"tailscale.com/smallzstd"
"tailscale.com/syncs"
"tailscale.com/tailcfg"
"tailscale.com/tka"
"tailscale.com/tstime"
"tailscale.com/types/key"
"tailscale.com/types/logger"
"tailscale.com/types/netmap"
"tailscale.com/types/opt"
"tailscale.com/types/persist"
"tailscale.com/types/ptr"
"tailscale.com/types/tkatype"
@@ -63,10 +64,11 @@ type Direct struct {
httpc *http.Client // HTTP client used to talk to tailcontrol
dialer *tsdial.Dialer
dnsCache *dnscache.Resolver
controlKnobs *controlknobs.Knobs // always non-nil
serverURL string // URL of the tailcontrol server
serverURL string // URL of the tailcontrol server
clock tstime.Clock
lastPrintMap time.Time
newDecompressor func() (Decompressor, error)
keepAlive bool
logf logger.Logf
netMon *netmon.Monitor // or nil
discoPubKey key.DiscoPublic
@@ -102,11 +104,7 @@ type Direct struct {
// Observer is implemented by users of the control client (such as LocalBackend)
// to get notified of changes in the control client's status.
type Observer interface {
// SetControlClientStatus is called when the client has a new status to
// report. The Client is provided to allow the Observer to track which
// Client is reporting the status, allowing it to ignore stale status
// reports from previous Clients.
SetControlClientStatus(Client, Status)
SetControlClientStatus(Status)
}
type Options struct {
@@ -117,6 +115,8 @@ type Options struct {
Clock tstime.Clock
Hostinfo *tailcfg.Hostinfo // non-nil passes ownership, nil means to use default using os.Hostname, etc
DiscoPublicKey key.DiscoPublic
NewDecompressor func() (Decompressor, error)
KeepAlive bool
Logf logger.Logf
HTTPTestClient *http.Client // optional HTTP client to use (for tests only)
NoiseTestClient *http.Client // optional HTTP client to use for noise RPCs (tests only)
@@ -127,7 +127,6 @@ type Options struct {
OnControlTime func(time.Time) // optional func to notify callers of new time from control
Dialer *tsdial.Dialer // non-nil
C2NHandler http.Handler // or nil
ControlKnobs *controlknobs.Knobs // or nil to ignore
// Observer is called when there's a change in status to report
// from the control client.
@@ -193,19 +192,6 @@ type NetmapUpdater interface {
// the diff themselves between the previous full & next full network maps.
}
// NetmapDeltaUpdater is an optional interface that can be implemented by
// NetmapUpdater implementations to receive delta updates from the controlclient
// rather than just full updates.
type NetmapDeltaUpdater interface {
// UpdateNetmapDelta is called with discrete changes to the network map.
//
// The ok result is whether the implementation was able to apply the
// mutations. It might return false if its internal state doesn't
// support applying them or a NetmapUpdater it's wrapping doesn't
// implement the NetmapDeltaUpdater optional method.
UpdateNetmapDelta([]netmap.NodeMutation) (ok bool)
}
// NewDirect returns a new Direct client.
func NewDirect(opts Options) (*Direct, error) {
if opts.ServerURL == "" {
@@ -214,9 +200,6 @@ func NewDirect(opts Options) (*Direct, error) {
if opts.GetMachinePrivateKey == nil {
return nil, errors.New("controlclient.New: no GetMachinePrivateKey specified")
}
if opts.ControlKnobs == nil {
opts.ControlKnobs = &controlknobs.Knobs{}
}
opts.ServerURL = strings.TrimRight(opts.ServerURL, "/")
serverURL, err := url.Parse(opts.ServerURL)
if err != nil {
@@ -264,11 +247,12 @@ func NewDirect(opts Options) (*Direct, error) {
c := &Direct{
httpc: httpc,
controlKnobs: opts.ControlKnobs,
getMachinePrivKey: opts.GetMachinePrivateKey,
serverURL: opts.ServerURL,
clock: opts.Clock,
logf: opts.Logf,
newDecompressor: opts.NewDecompressor,
keepAlive: opts.KeepAlive,
persist: opts.Persist.View(),
authKey: opts.AuthKey,
discoPubKey: opts.DiscoPublicKey,
@@ -752,6 +736,18 @@ func resignNKS(priv key.NLPrivate, nodeKey key.NodePublic, oldNKS tkatype.Marsha
return newSig.Serialize(), nil
}
func sameEndpoints(a, b []tailcfg.Endpoint) bool {
if len(a) != len(b) {
return false
}
for i := range a {
if a[i] != b[i] {
return false
}
}
return true
}
// newEndpoints acquires c.mu and sets the local port and endpoints and reports
// whether they've changed.
//
@@ -761,11 +757,15 @@ func (c *Direct) newEndpoints(endpoints []tailcfg.Endpoint) (changed bool) {
defer c.mu.Unlock()
// Nothing new?
if slices.Equal(c.endpoints, endpoints) {
if sameEndpoints(c.endpoints, endpoints) {
return false // unchanged
}
c.logf("[v2] client.newEndpoints(%v)", endpoints)
c.endpoints = slices.Clone(endpoints)
var epStrs []string
for _, ep := range endpoints {
epStrs = append(epStrs, ep.Addr.String())
}
c.logf("[v2] client.newEndpoints(%v)", epStrs)
c.endpoints = append(c.endpoints[:0], endpoints...)
return true // changed
}
@@ -878,7 +878,7 @@ func (c *Direct) sendMapRequest(ctx context.Context, isStreaming bool, nu Netmap
request := &tailcfg.MapRequest{
Version: tailcfg.CurrentCapabilityVersion,
KeepAlive: true,
KeepAlive: c.keepAlive,
NodeKey: persist.PublicNodeKey(),
DiscoKey: c.discoPubKey,
Endpoints: epStrs,
@@ -905,7 +905,9 @@ func (c *Direct) sendMapRequest(ctx context.Context, isStreaming bool, nu Netmap
old := request.DebugFlags
request.DebugFlags = append(old[:len(old):len(old)], extraDebugFlags...)
}
request.Compress = "zstd"
if c.newDecompressor != nil {
request.Compress = "zstd"
}
bodyData, err := encode(request, serverKey, serverNoiseKey, machinePrivKey)
if err != nil {
@@ -962,7 +964,7 @@ func (c *Direct) sendMapRequest(ctx context.Context, isStreaming bool, nu Netmap
var mapResIdx int // 0 for first message, then 1+ for deltas
sess := newMapSession(persist.PrivateNodeKey(), nu, c.controlKnobs)
sess := newMapSession(persist.PrivateNodeKey(), nu)
defer sess.Close()
sess.cancel = cancel
sess.logf = c.logf
@@ -1189,14 +1191,19 @@ func (c *Direct) decodeMsg(msg []byte, v any, mkey key.MachinePrivate) error {
} else {
decrypted = msg
}
decoder, err := smallzstd.NewDecoder(nil)
if err != nil {
return err
}
defer decoder.Close()
b, err := decoder.DecodeAll(decrypted, nil)
if err != nil {
return err
var b []byte
if c.newDecompressor == nil {
b = decrypted
} else {
decoder, err := c.newDecompressor()
if err != nil {
return err
}
defer decoder.Close()
b, err = decoder.DecodeAll(decrypted, nil)
if err != nil {
return err
}
}
if debugMap() {
var buf bytes.Buffer
@@ -1303,6 +1310,68 @@ func initDevKnob() devKnobs {
var clock tstime.Clock = tstime.StdClock{}
// config from control.
var (
controlDisableDRPO atomic.Bool
controlKeepFullWGConfig atomic.Bool
controlRandomizeClientPort atomic.Bool
controlOneCGNAT syncs.AtomicValue[opt.Bool]
)
// DisableDRPO reports whether control says to disable the
// DERP route optimization (Issue 150).
func DisableDRPO() bool {
return controlDisableDRPO.Load()
}
// KeepFullWGConfig reports whether control says we should disable the lazy
// wireguard programming and instead give it the full netmap always.
func KeepFullWGConfig() bool {
return controlKeepFullWGConfig.Load()
}
// RandomizeClientPort reports whether control says we should randomize
// the client port.
func RandomizeClientPort() bool {
return controlRandomizeClientPort.Load()
}
// ControlOneCGNATSetting returns control's OneCGNAT setting, if any.
func ControlOneCGNATSetting() opt.Bool {
return controlOneCGNAT.Load()
}
func setControlKnobsFromNodeAttrs(selfNodeAttrs []string) {
var (
keepFullWG bool
disableDRPO bool
disableUPnP bool
randomizeClientPort bool
oneCGNAT opt.Bool
)
for _, attr := range selfNodeAttrs {
switch attr {
case tailcfg.NodeAttrDebugDisableWGTrim:
keepFullWG = true
case tailcfg.NodeAttrDebugDisableDRPO:
disableDRPO = true
case tailcfg.NodeAttrDisableUPnP:
disableUPnP = true
case tailcfg.NodeAttrRandomizeClientPort:
randomizeClientPort = true
case tailcfg.NodeAttrOneCGNATEnable:
oneCGNAT.Set(true)
case tailcfg.NodeAttrOneCGNATDisable:
oneCGNAT.Set(false)
}
}
controlKeepFullWGConfig.Store(keepFullWG)
controlDisableDRPO.Store(disableDRPO)
controlknobs.SetDisableUPnP(disableUPnP)
controlRandomizeClientPort.Store(randomizeClientPort)
controlOneCGNAT.Store(oneCGNAT)
}
// ipForwardingBroken reports whether the system's IP forwarding is disabled
// and will definitely not work for the routes provided.
//

View File

@@ -14,9 +14,7 @@ import (
"sort"
"strconv"
"sync"
"time"
"tailscale.com/control/controlknobs"
"tailscale.com/envknob"
"tailscale.com/tailcfg"
"tailscale.com/tstime"
@@ -40,8 +38,7 @@ import (
// one MapRequest).
type mapSession struct {
// Immutable fields.
netmapUpdater NetmapUpdater // called on changes (in addition to the optional hooks below)
controlKnobs *controlknobs.Knobs // or nil
nu NetmapUpdater // called on changes (in addition to the optional hooks below)
privateNodeKey key.NodePrivate
publicNodeKey key.NodePublic
logf logger.Logf
@@ -97,10 +94,9 @@ type mapSession struct {
// Modify its optional fields on the returned value before use.
//
// It must have its Close method called to release resources.
func newMapSession(privateNodeKey key.NodePrivate, nu NetmapUpdater, controlKnobs *controlknobs.Knobs) *mapSession {
func newMapSession(privateNodeKey key.NodePrivate, nu NetmapUpdater) *mapSession {
ms := &mapSession{
netmapUpdater: nu,
controlKnobs: controlKnobs,
nu: nu,
privateNodeKey: privateNodeKey,
publicNodeKey: privateNodeKey.Public(),
lastDNSConfig: new(tailcfg.DNSConfig),
@@ -188,7 +184,7 @@ func (ms *mapSession) HandleNonKeepAliveMapResponse(ctx context.Context, resp *t
if DevKnob.StripCaps() {
resp.Node.Capabilities = nil
}
ms.controlKnobs.UpdateFromNodeAttributes(resp.Node.Capabilities)
setControlKnobsFromNodeAttrs(resp.Node.Capabilities)
}
// Call Node.InitDisplayNames on any changed nodes.
@@ -198,16 +194,8 @@ func (ms *mapSession) HandleNonKeepAliveMapResponse(ctx context.Context, resp *t
ms.updateStateFromResponse(resp)
if ms.tryHandleIncrementally(resp) {
ms.onConciseNetMapSummary(ms.lastNetmapSummary) // every 5s log
return nil
}
// We have to rebuild the whole netmap (lots of garbage & work downstream of
// our UpdateFullNetmap call). This is the part we tried to avoid but
// some field mutations (especially rare ones) aren't yet handled.
nm := ms.netmap()
ms.lastNetmapSummary = nm.VeryConcise()
ms.onConciseNetMapSummary(ms.lastNetmapSummary)
@@ -216,25 +204,10 @@ func (ms *mapSession) HandleNonKeepAliveMapResponse(ctx context.Context, resp *t
ms.onSelfNodeChanged(nm)
}
ms.netmapUpdater.UpdateFullNetmap(nm)
ms.nu.UpdateFullNetmap(nm)
return nil
}
func (ms *mapSession) tryHandleIncrementally(res *tailcfg.MapResponse) bool {
if ms.controlKnobs != nil && ms.controlKnobs.DisableDeltaUpdates.Load() {
return false
}
nud, ok := ms.netmapUpdater.(NetmapDeltaUpdater)
if !ok {
return false
}
mutations, ok := netmap.MutationsFromMapResponse(res, time.Now())
if ok && len(mutations) > 0 {
return nud.UpdateNetmapDelta(mutations)
}
return ok
}
// updateStats are some stats from updateStateFromResponse, primarily for
// testing. It's meant to be cheap enough to always compute, though. It doesn't
// allocate.

View File

@@ -16,7 +16,6 @@ import (
"github.com/google/go-cmp/cmp"
"go4.org/mem"
"tailscale.com/control/controlknobs"
"tailscale.com/tailcfg"
"tailscale.com/tstest"
"tailscale.com/tstime"
@@ -393,7 +392,7 @@ func formatNodes(nodes []*tailcfg.Node) string {
}
func newTestMapSession(t testing.TB, nu NetmapUpdater) *mapSession {
ms := newMapSession(key.NewNode(), nu, new(controlknobs.Knobs))
ms := newMapSession(key.NewNode(), nu)
t.Cleanup(ms.Close)
ms.logf = t.Logf
return ms

View File

@@ -8,6 +8,7 @@ import (
"fmt"
"reflect"
"tailscale.com/types/empty"
"tailscale.com/types/netmap"
"tailscale.com/types/persist"
"tailscale.com/types/structs"
@@ -37,10 +38,6 @@ const (
StateSynchronized // connected and received map update
)
func (s State) AppendText(b []byte) ([]byte, error) {
return append(b, s.String()...), nil
}
func (s State) MarshalText() ([]byte, error) {
return []byte(s.String()), nil
}
@@ -65,55 +62,34 @@ func (s State) String() string {
}
type Status struct {
_ structs.Incomparable
_ structs.Incomparable
LoginFinished *empty.Message // nonempty when login finishes
LogoutFinished *empty.Message // nonempty when logout finishes
Err error
URL string // interactive URL to visit to finish logging in
NetMap *netmap.NetworkMap // server-pushed configuration
// Err, if non-nil, is an error that occurred while logging in.
//
// If it's of type UserVisibleError then it's meant to be shown to users in
// their Tailscale client. Otherwise it's just logged to tailscaled's logs.
Err error
// URL, if non-empty, is the interactive URL to visit to finish logging in.
URL string
// NetMap is the latest server-pushed state of the tailnet network.
NetMap *netmap.NetworkMap
// Persist, when Valid, is the locally persisted configuration.
//
// TODO(bradfitz,maisem): clarify this.
Persist persist.PersistView
// state is the internal state. It should not be exposed outside this
// The internal state should not be exposed outside this
// package, but we have some automated tests elsewhere that need to
// use it via the StateForTest accessor.
// use them. Please don't use these fields.
// TODO(apenwarr): Unexport or remove these.
state State
State State
Persist *persist.PersistView // locally persisted configuration
}
// LoginFinished reports whether the controlclient is in its "StateAuthenticated"
// state where it's in a happy register state but not yet in a map poll.
//
// TODO(bradfitz): delete this and everything around Status.state.
func (s *Status) LoginFinished() bool { return s.state == StateAuthenticated }
// StateForTest returns the internal state of s for tests only.
func (s *Status) StateForTest() State { return s.state }
// SetStateForTest sets the internal state of s for tests only.
func (s *Status) SetStateForTest(state State) { s.state = state }
// Equal reports whether s and s2 are equal.
func (s *Status) Equal(s2 *Status) bool {
if s == nil && s2 == nil {
return true
}
return s != nil && s2 != nil &&
(s.LoginFinished == nil) == (s2.LoginFinished == nil) &&
(s.LogoutFinished == nil) == (s2.LogoutFinished == nil) &&
s.Err == s2.Err &&
s.URL == s2.URL &&
s.state == s2.state &&
reflect.DeepEqual(s.Persist, s2.Persist) &&
reflect.DeepEqual(s.NetMap, s2.NetMap)
reflect.DeepEqual(s.NetMap, s2.NetMap) &&
s.State == s2.State
}
func (s Status) String() string {
@@ -121,5 +97,5 @@ func (s Status) String() string {
if err != nil {
panic(err)
}
return s.state.String() + " " + string(b)
return s.State.String() + " " + string(b)
}

View File

@@ -51,7 +51,7 @@ func (d *Dialer) Dial(ctx context.Context) (*ClientConn, error) {
if err != nil {
return nil, err
}
netConn := wsconn.NetConn(context.Background(), wsConn, websocket.MessageBinary, wsURL.String())
netConn := wsconn.NetConn(context.Background(), wsConn, websocket.MessageBinary)
cbConn, err := cont(ctx, netConn)
if err != nil {
netConn.Close()

View File

@@ -146,7 +146,7 @@ func acceptWebsocket(ctx context.Context, w http.ResponseWriter, r *http.Request
return nil, fmt.Errorf("decoding base64 handshake parameter: %v", err)
}
conn := wsconn.NetConn(ctx, c, websocket.MessageBinary, r.RemoteAddr)
conn := wsconn.NetConn(ctx, c, websocket.MessageBinary)
nc, err := controlbase.Server(ctx, conn, private, init)
if err != nil {
conn.Close()

View File

@@ -8,101 +8,22 @@ package controlknobs
import (
"sync/atomic"
"tailscale.com/syncs"
"tailscale.com/tailcfg"
"tailscale.com/types/opt"
"tailscale.com/envknob"
)
// Knobs is the set of knobs that the control plane's coordination server can
// adjust at runtime.
type Knobs struct {
// DisableUPnP indicates whether to attempt UPnP mapping.
DisableUPnP atomic.Bool
// disableUPnP indicates whether to attempt UPnP mapping.
var disableUPnPControl atomic.Bool
// DisableDRPO is whether control says to disable the
// DERP route optimization (Issue 150).
DisableDRPO atomic.Bool
var disableUPnpEnv = envknob.RegisterBool("TS_DISABLE_UPNP")
// KeepFullWGConfig is whether we should disable the lazy wireguard
// programming and instead give WireGuard the full netmap always, even for
// idle peers.
KeepFullWGConfig atomic.Bool
// RandomizeClientPort is whether control says we should randomize
// the client port.
RandomizeClientPort atomic.Bool
// OneCGNAT is whether the the node should make one big CGNAT route
// in the OS rather than one /32 per peer.
OneCGNAT syncs.AtomicValue[opt.Bool]
// ForceBackgroundSTUN forces netcheck STUN queries to keep
// running in magicsock, even when idle.
ForceBackgroundSTUN atomic.Bool
// DisableDeltaUpdates is whether the node should not process
// incremental (delta) netmap updates and should treat all netmap
// changes as "full" ones as tailscaled did in 1.48.x and earlier.
DisableDeltaUpdates atomic.Bool
// DisableUPnP reports the last reported value from control
// whether UPnP portmapping should be disabled.
func DisableUPnP() bool {
return disableUPnPControl.Load() || disableUPnpEnv()
}
// UpdateFromNodeAttributes updates k (if non-nil) based on the provided self
// node attributes (Node.Capabilities).
func (k *Knobs) UpdateFromNodeAttributes(selfNodeAttrs []string) {
if k == nil {
return
}
var (
keepFullWG bool
disableDRPO bool
disableUPnP bool
randomizeClientPort bool
disableDeltaUpdates bool
oneCGNAT opt.Bool
forceBackgroundSTUN bool
)
for _, attr := range selfNodeAttrs {
switch attr {
case tailcfg.NodeAttrDebugDisableWGTrim:
keepFullWG = true
case tailcfg.NodeAttrDebugDisableDRPO:
disableDRPO = true
case tailcfg.NodeAttrDisableUPnP:
disableUPnP = true
case tailcfg.NodeAttrRandomizeClientPort:
randomizeClientPort = true
case tailcfg.NodeAttrOneCGNATEnable:
oneCGNAT.Set(true)
case tailcfg.NodeAttrOneCGNATDisable:
oneCGNAT.Set(false)
case tailcfg.NodeAttrDebugForceBackgroundSTUN:
forceBackgroundSTUN = true
case tailcfg.NodeAttrDisableDeltaUpdates:
disableDeltaUpdates = true
}
}
k.KeepFullWGConfig.Store(keepFullWG)
k.DisableDRPO.Store(disableDRPO)
k.DisableUPnP.Store(disableUPnP)
k.RandomizeClientPort.Store(randomizeClientPort)
k.OneCGNAT.Store(oneCGNAT)
k.ForceBackgroundSTUN.Store(forceBackgroundSTUN)
k.DisableDeltaUpdates.Store(disableDeltaUpdates)
}
// AsDebugJSON returns k as something that can be marshalled with json.Marshal
// for debug.
func (k *Knobs) AsDebugJSON() map[string]any {
if k == nil {
return nil
}
return map[string]any{
"DisableUPnP": k.DisableUPnP.Load(),
"DisableDRPO": k.DisableDRPO.Load(),
"KeepFullWGConfig": k.KeepFullWGConfig.Load(),
"RandomizeClientPort": k.RandomizeClientPort.Load(),
"OneCGNAT": k.OneCGNAT.Load(),
"ForceBackgroundSTUN": k.ForceBackgroundSTUN.Load(),
"DisableDeltaUpdates": k.DisableDeltaUpdates.Load(),
}
// SetDisableUPnP sets whether control says that UPnP should be
// disabled.
func SetDisableUPnP(v bool) {
disableUPnPControl.Store(v)
}

View File

@@ -1,21 +0,0 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
package controlknobs
import (
"reflect"
"testing"
)
func TestAsDebugJSON(t *testing.T) {
var nilPtr *Knobs
if got := nilPtr.AsDebugJSON(); got != nil {
t.Errorf("AsDebugJSON(nil) = %v; want nil", got)
}
k := new(Knobs)
got := k.AsDebugJSON()
if want := reflect.TypeOf(Knobs{}).NumField(); len(got) != want {
t.Errorf("AsDebugJSON map has %d fields; want %v", len(got), want)
}
}

View File

@@ -730,9 +730,8 @@ func firstStr(a, b string) string {
}
// dialNodeUsingProxy connects to n using a CONNECT to the HTTP(s) proxy in proxyURL.
func (c *Client) dialNodeUsingProxy(ctx context.Context, n *tailcfg.DERPNode, proxyURL *url.URL) (_ net.Conn, err error) {
func (c *Client) dialNodeUsingProxy(ctx context.Context, n *tailcfg.DERPNode, proxyURL *url.URL) (proxyConn net.Conn, err error) {
pu := proxyURL
var proxyConn net.Conn
if pu.Scheme == "https" {
var d tls.Dialer
proxyConn, err = d.DialContext(ctx, "tcp", net.JoinHostPort(pu.Hostname(), firstStr(pu.Port(), "443")))

View File

@@ -27,6 +27,6 @@ func dialWebsocket(ctx context.Context, urlStr string) (net.Conn, error) {
return nil, err
}
log.Printf("websocket: connected to %v", urlStr)
netConn := wsconn.NetConn(context.Background(), c, websocket.MessageBinary, urlStr)
netConn := wsconn.NetConn(context.Background(), c, websocket.MessageBinary)
return netConn, nil
}

View File

@@ -389,24 +389,12 @@ func CanTaildrop() bool { return !Bool("TS_DISABLE_TAILDROP") }
// SSHPolicyFile returns the path, if any, to the SSHPolicy JSON file for development.
func SSHPolicyFile() string { return String("TS_DEBUG_SSH_POLICY_FILE") }
// SSHIgnoreTailnetPolicy reports whether to ignore the Tailnet SSH policy for development.
// SSHIgnoreTailnetPolicy is whether to ignore the Tailnet SSH policy for development.
func SSHIgnoreTailnetPolicy() bool { return Bool("TS_DEBUG_SSH_IGNORE_TAILNET_POLICY") }
// TKASkipSignatureCheck reports whether to skip node-key signature checking for development.
// TKASkipSignatureCheck is whether to skip node-key signature checking for development.
func TKASkipSignatureCheck() bool { return Bool("TS_UNSAFE_SKIP_NKS_VERIFICATION") }
// CrashOnUnexpected reports whether the Tailscale client should panic
// on unexpected conditions. If TS_DEBUG_CRASH_ON_UNEXPECTED is set, that's
// used. Otherwise the default value is true for unstable builds.
func CrashOnUnexpected() bool {
if v, ok := crashOnUnexpected().Get(); ok {
return v
}
return version.IsUnstableBuild()
}
var crashOnUnexpected = RegisterOptBool("TS_DEBUG_CRASH_ON_UNEXPECTED")
// NoLogsNoSupport reports whether the client's opted out of log uploads and
// technical support.
func NoLogsNoSupport() bool {

View File

@@ -115,4 +115,4 @@
in
flake-utils.lib.eachDefaultSystem (system: flakeForSystem nixpkgs system);
}
# nix-direnv cache busting line: sha256-TZP/FQqb21yiKMlIPXXSoN6HfiBAun+gPZHQ5cPc8L0=
# nix-direnv cache busting line: sha256-GACTVegfg57CKUMczmGuovr+zqcGzzpT6cEw+QLPvSs=

3
go.mod
View File

@@ -16,7 +16,6 @@ require (
github.com/aws/aws-sdk-go-v2/service/ssm v1.36.3
github.com/coreos/go-iptables v0.6.0
github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf
github.com/coreos/go-systemd/v22 v22.4.0
github.com/creack/pty v1.1.18
github.com/dave/jennifer v1.6.1
github.com/dblohm7/wingoes v0.0.0-20230821191801-fc76608aecf0
@@ -27,7 +26,7 @@ require (
github.com/go-json-experiment/json v0.0.0-20230321051131-ccbac49a6929
github.com/go-logr/zapr v1.2.4
github.com/go-ole/go-ole v1.2.6
github.com/godbus/dbus/v5 v5.1.1-0.20230522191255-76236955d466
github.com/godbus/dbus/v5 v5.1.0
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da
github.com/golangci/golangci-lint v1.52.2
github.com/google/go-cmp v0.5.9

View File

@@ -1 +1 @@
sha256-TZP/FQqb21yiKMlIPXXSoN6HfiBAun+gPZHQ5cPc8L0=
sha256-GACTVegfg57CKUMczmGuovr+zqcGzzpT6cEw+QLPvSs=

8
go.sum
View File

@@ -208,8 +208,6 @@ github.com/coreos/go-iptables v0.6.0 h1:is9qnZMPYjLd8LYqmm/qlE+wwEgJIkTYdhV3rfZo
github.com/coreos/go-iptables v0.6.0/go.mod h1:Qe8Bv2Xik5FyTXwgIbLAnv2sWSBmvWdFETJConOQ//Q=
github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf h1:iW4rZ826su+pqaw19uhpSCzhj44qo35pNgKFGqzDKkU=
github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
github.com/coreos/go-systemd/v22 v22.4.0 h1:y9YHcjnjynCd/DVbg5j9L/33jQM3MxJlbj/zWskzfGU=
github.com/coreos/go-systemd/v22 v22.4.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
@@ -358,9 +356,8 @@ github.com/gobwas/pool v0.2.0 h1:QEmUOlnSjWtnpRGHF3SauEiOsy82Cup83Vf2LcMlnc8=
github.com/gobwas/pool v0.2.0/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw=
github.com/gobwas/ws v1.0.2 h1:CoAavW/wd/kulfZmSIBt6p24n4j7tHgNVCjsfHVNUbo=
github.com/gobwas/ws v1.0.2/go.mod h1:szmBTxLgaFppYjEmNtny/v3w89xOydFnnZMcgRRu/EM=
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
github.com/godbus/dbus/v5 v5.1.1-0.20230522191255-76236955d466 h1:sQspH8M4niEijh3PFscJRLDnkL547IeP7kpPe3uUhEg=
github.com/godbus/dbus/v5 v5.1.1-0.20230522191255-76236955d466/go.mod h1:ZiQxhyQ+bbbfxUKVvjfO498oPYvtYhZzycal3G/NHmU=
github.com/godbus/dbus/v5 v5.1.0 h1:4KLkAxT3aOY8Li4FRJe/KvhoNFFxo0m6fNuFUO8QJUk=
github.com/godbus/dbus/v5 v5.1.0/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
github.com/gofrs/flock v0.8.1 h1:+gYjHKf32LDeiEEFhQaotPbLuUXjY5ZqxKgXy7n59aw=
github.com/gofrs/flock v0.8.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU=
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
@@ -1181,7 +1178,6 @@ golang.org/x/sys v0.0.0-20220702020025-31831981b65f/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220817070843-5a390386f1f2/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=

View File

@@ -27,7 +27,7 @@ var (
sysErr = map[Subsystem]error{} // error key => err (or nil for no error)
watchers = set.HandleSet[func(Subsystem, error)]{} // opt func to run if error state changes
warnables = set.Set[*Warnable]{}
warnables = map[*Warnable]struct{}{} // set of warnables
timer *time.Timer
debugHandler = map[string]http.Handler{}
@@ -84,7 +84,7 @@ func NewWarnable(opts ...WarnableOpt) *Warnable {
}
mu.Lock()
defer mu.Unlock()
warnables.Add(w)
warnables[w] = struct{}{}
return w
}
@@ -279,31 +279,27 @@ func SetControlHealth(problems []string) {
// GotStreamedMapResponse notes that we got a tailcfg.MapResponse
// message in streaming mode, even if it's just a keep-alive message.
//
// This also notes that a map poll is in progress. To unset that, call
// SetOutOfPollNetMap().
func GotStreamedMapResponse() {
mu.Lock()
defer mu.Unlock()
lastStreamedMapResponse = time.Now()
if !inMapPoll {
inMapPoll = true
inMapPollSince = time.Now()
}
selfCheckLocked()
}
// SetOutOfPollNetMap records that the client is no longer in
// an HTTP map request long poll to the control plane.
func SetOutOfPollNetMap() {
// SetInPollNetMap records whether the client has an open
// HTTP long poll open to the control plane.
func SetInPollNetMap(v bool) {
mu.Lock()
defer mu.Unlock()
if !inMapPoll {
if v == inMapPoll {
return
}
inMapPoll = false
lastMapPollEndedAt = time.Now()
selfCheckLocked()
inMapPoll = v
if v {
inMapPollSince = time.Now()
} else {
lastMapPollEndedAt = time.Now()
}
}
// GetInPollNetMap reports whether the client has an open

View File

@@ -8,8 +8,6 @@ import (
"fmt"
"reflect"
"testing"
"tailscale.com/util/set"
)
func TestAppendWarnableDebugFlags(t *testing.T) {
@@ -37,5 +35,5 @@ func TestAppendWarnableDebugFlags(t *testing.T) {
func resetWarnables() {
mu.Lock()
defer mu.Unlock()
warnables = set.Set[*Warnable]{}
warnables = make(map[*Warnable]struct{})
}

View File

@@ -61,7 +61,7 @@ const (
// each one via RequestEngineStatus.
NotifyWatchEngineUpdates NotifyWatchOpt = 1 << iota
NotifyInitialState // if set, the first Notify message (sent immediately) will contain the current State + BrowseToURL + SessionID
NotifyInitialState // if set, the first Notify message (sent immediately) will contain the current State + BrowseToURL
NotifyInitialPrefs // if set, the first Notify message (sent immediately) will contain the current Prefs
NotifyInitialNetMap // if set, the first Notify message (sent immediately) will contain the current NetMap
@@ -77,12 +77,6 @@ type Notify struct {
_ structs.Incomparable
Version string // version number of IPN backend
// SessionID identifies the unique WatchIPNBus session.
// This field is only set in the first message when requesting
// NotifyInitialState. Clients must store it on their side as
// following notifications will not include this field.
SessionID string `json:",omitempty"`
// ErrMessage, if non-nil, contains a critical error message.
// For State InUseOtherUser, ErrMessage is not critical and just contains the details.
ErrMessage *string

View File

@@ -51,7 +51,6 @@ var _PrefsCloneNeedsRegeneration = Prefs(struct {
NetfilterMode preftype.NetfilterMode
OperatorUser string
ProfileName string
AutoUpdate AutoUpdatePrefs
Persist *persist.Persist
}{})
@@ -76,12 +75,6 @@ func (src *ServeConfig) Clone() *ServeConfig {
}
}
dst.AllowFunnel = maps.Clone(src.AllowFunnel)
if dst.Foreground != nil {
dst.Foreground = map[string]*ServeConfig{}
for k, v := range src.Foreground {
dst.Foreground[k] = v.Clone()
}
}
return dst
}
@@ -90,7 +83,6 @@ var _ServeConfigCloneNeedsRegeneration = ServeConfig(struct {
TCP map[uint16]*TCPPortHandler
Web map[HostPort]*WebServerConfig
AllowFunnel map[HostPort]bool
Foreground map[string]*ServeConfig
}{})
// Clone makes a deep copy of TCPPortHandler.

View File

@@ -86,7 +86,6 @@ func (v PrefsView) NoSNAT() bool { return v.ж.NoSNAT }
func (v PrefsView) NetfilterMode() preftype.NetfilterMode { return v.ж.NetfilterMode }
func (v PrefsView) OperatorUser() string { return v.ж.OperatorUser }
func (v PrefsView) ProfileName() string { return v.ж.ProfileName }
func (v PrefsView) AutoUpdate() AutoUpdatePrefs { return v.ж.AutoUpdate }
func (v PrefsView) Persist() persist.PersistView { return v.ж.Persist.View() }
// A compilation failure here means this code must be regenerated, with the command at the top of this file.
@@ -112,7 +111,6 @@ var _PrefsViewNeedsRegeneration = Prefs(struct {
NetfilterMode preftype.NetfilterMode
OperatorUser string
ProfileName string
AutoUpdate AutoUpdatePrefs
Persist *persist.Persist
}{})
@@ -177,18 +175,11 @@ func (v ServeConfigView) AllowFunnel() views.Map[HostPort, bool] {
return views.MapOf(v.ж.AllowFunnel)
}
func (v ServeConfigView) Foreground() views.MapFn[string, *ServeConfig, ServeConfigView] {
return views.MapFnOf(v.ж.Foreground, func(t *ServeConfig) ServeConfigView {
return t.View()
})
}
// A compilation failure here means this code must be regenerated, with the command at the top of this file.
var _ServeConfigViewNeedsRegeneration = ServeConfig(struct {
TCP map[uint16]*TCPPortHandler
Web map[HostPort]*WebServerConfig
AllowFunnel map[HostPort]bool
Foreground map[string]*ServeConfig
}{})
// View returns a readonly view of TCPPortHandler.

View File

@@ -4,7 +4,6 @@
package ipnlocal
import (
"bytes"
"encoding/json"
"errors"
"fmt"
@@ -15,16 +14,15 @@ import (
"path/filepath"
"runtime"
"strconv"
"strings"
"time"
"tailscale.com/clientupdate"
"tailscale.com/envknob"
"tailscale.com/net/sockstats"
"tailscale.com/tailcfg"
"tailscale.com/util/clientmetric"
"tailscale.com/util/goroutines"
"tailscale.com/version"
"tailscale.com/version/distro"
)
var c2nLogHeap func(http.ResponseWriter, *http.Request) // non-nil on most platforms (c2n_pprof.go)
@@ -40,15 +38,7 @@ func (b *LocalBackend) handleC2N(w http.ResponseWriter, r *http.Request) {
body, _ := io.ReadAll(r.Body)
w.Write(body)
case "/update":
switch r.Method {
case http.MethodGet:
b.handleC2NUpdateGet(w, r)
case http.MethodPost:
b.handleC2NUpdatePost(w, r)
default:
http.Error(w, "bad method", http.StatusMethodNotAllowed)
return
}
b.handleC2NUpdate(w, r)
case "/logtail/flush":
if r.Method != "POST" {
http.Error(w, "bad method", http.StatusMethodNotAllowed)
@@ -121,48 +111,39 @@ func (b *LocalBackend) handleC2N(w http.ResponseWriter, r *http.Request) {
}
}
func (b *LocalBackend) handleC2NUpdateGet(w http.ResponseWriter, r *http.Request) {
b.logf("c2n: GET /update received")
func (b *LocalBackend) handleC2NUpdate(w http.ResponseWriter, r *http.Request) {
// TODO(bradfitz): add some sort of semaphore that prevents two concurrent
// updates, or if one happened in the past 5 minutes, or something.
res := b.newC2NUpdateResponse()
res.Started = b.c2nUpdateStarted()
var res tailcfg.C2NUpdateResponse
res.Enabled = envknob.AllowsRemoteUpdate()
res.Supported = runtime.GOOS == "windows" || (runtime.GOOS == "linux" && distro.Get() == distro.Debian)
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(res)
}
switch r.Method {
case "GET", "POST":
default:
http.Error(w, "bad method", http.StatusMethodNotAllowed)
return
}
func (b *LocalBackend) handleC2NUpdatePost(w http.ResponseWriter, r *http.Request) {
b.logf("c2n: POST /update received")
res := b.newC2NUpdateResponse()
defer func() {
if res.Err != "" {
b.logf("c2n: POST /update failed: %s", res.Err)
}
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(res)
}()
if r.Method == "GET" {
return
}
if !res.Enabled {
res.Err = "not enabled"
return
}
if !res.Supported {
res.Err = "not supported"
return
}
// Check if update was already started, and mark as started.
if !b.trySetC2NUpdateStarted() {
res.Err = "update already started"
return
}
defer func() {
// Clear the started flag if something failed.
if res.Err != "" {
b.setC2NUpdateStarted(false)
}
}()
cmdTS, err := findCmdTailscale()
if err != nil {
res.Err = fmt.Sprintf("failed to find cmd/tailscale binary: %v", err)
@@ -184,64 +165,22 @@ func (b *LocalBackend) handleC2NUpdatePost(w http.ResponseWriter, r *http.Reques
res.Err = "cmd/tailscale version mismatch"
return
}
cmd := exec.Command(cmdTS, "update", "--yes")
buf := new(bytes.Buffer)
cmd.Stdout = buf
cmd.Stderr = buf
b.logf("c2n: running %q", strings.Join(cmd.Args, " "))
if err := cmd.Start(); err != nil {
res.Err = fmt.Sprintf("failed to start cmd/tailscale update: %v", err)
return
}
res.Started = true
// Run update asynchronously and respond that it started.
go func() {
if err := cmd.Wait(); err != nil {
b.logf("c2n: update command failed: %v, output: %s", err, buf)
} else {
b.logf("c2n: update complete")
}
b.setC2NUpdateStarted(false)
}()
}
func (b *LocalBackend) newC2NUpdateResponse() tailcfg.C2NUpdateResponse {
// If NewUpdater does not return an error, we can update the installation.
// Exception: When version.IsMacSysExt returns true, we don't support that
// yet. TODO(cpalmer, #6995): Implement it.
// TODO(bradfitz,andrew): There might be a race condition here on Windows:
// * We start the update process.
// * tailscale.exe copies itself and kicks off the update process
// * msiexec stops this process during the update before the selfCopy exits(?)
// * This doesn't return because the process is dead.
//
// Note that we create the Updater solely to check for errors; we do not
// invoke it here. For this purpose, it is ok to pass it a zero Arguments.
prefs := b.Prefs().AutoUpdate()
_, err := clientupdate.NewUpdater(clientupdate.Arguments{})
return tailcfg.C2NUpdateResponse{
Enabled: envknob.AllowsRemoteUpdate() || prefs.Apply,
Supported: err == nil && !version.IsMacSysExt(),
}
}
func (b *LocalBackend) c2nUpdateStarted() bool {
b.mu.Lock()
defer b.mu.Unlock()
return b.c2nUpdateStatus.started
}
func (b *LocalBackend) setC2NUpdateStarted(v bool) {
b.mu.Lock()
defer b.mu.Unlock()
b.c2nUpdateStatus.started = v
}
func (b *LocalBackend) trySetC2NUpdateStarted() bool {
b.mu.Lock()
defer b.mu.Unlock()
if b.c2nUpdateStatus.started {
return false
}
b.c2nUpdateStatus.started = true
return true
// This seems fairly unlikely, but worth checking.
defer cmd.Wait()
return
}
// findCmdTailscale looks for the cmd/tailscale that corresponds to the

View File

@@ -11,7 +11,6 @@ import (
"fmt"
"io"
"log"
"maps"
"net"
"net/http"
"net/http/httputil"
@@ -34,7 +33,6 @@ import (
"gvisor.dev/gvisor/pkg/tcpip"
"tailscale.com/client/tailscale/apitype"
"tailscale.com/control/controlclient"
"tailscale.com/control/controlknobs"
"tailscale.com/doctor"
"tailscale.com/doctor/permissions"
"tailscale.com/doctor/routetable"
@@ -80,7 +78,6 @@ import (
"tailscale.com/util/mak"
"tailscale.com/util/multierr"
"tailscale.com/util/osshare"
"tailscale.com/util/rands"
"tailscale.com/util/set"
"tailscale.com/util/systemd"
"tailscale.com/util/testenv"
@@ -146,17 +143,19 @@ type LocalBackend struct {
statsLogf logger.Logf // for printing peers stats on change
sys *tsd.System
e wgengine.Engine // non-nil; TODO(bradfitz): remove; use sys
store ipn.StateStore // non-nil; TODO(bradfitz): remove; use sys
dialer *tsdial.Dialer // non-nil; TODO(bradfitz): remove; use sys
pm *profileManager
store ipn.StateStore // non-nil; TODO(bradfitz): remove; use sys
dialer *tsdial.Dialer // non-nil; TODO(bradfitz): remove; use sys
backendLogID logid.PublicID
unregisterNetMon func()
unregisterHealthWatch func()
portpoll *portlist.Poller // may be nil
portpollOnce sync.Once // guards starting readPoller
gotPortPollRes chan struct{} // closed upon first readPoller result
varRoot string // or empty if SetVarRoot never called
logFlushFunc func() // or nil if SetLogFlusher wasn't called
em *expiryManager // non-nil
newDecompressor func() (controlclient.Decompressor, error)
varRoot string // or empty if SetVarRoot never called
logFlushFunc func() // or nil if SetLogFlusher wasn't called
em *expiryManager // non-nil
sshAtomicBool atomic.Bool
shutdownCalled bool // if Shutdown has been called
debugSink *capture.Sink
@@ -189,7 +188,6 @@ type LocalBackend struct {
// The mutex protects the following elements.
mu sync.Mutex
pm *profileManager // mu guards access
filterHash deephash.Sum
httpTestClient *http.Client // for controlclient. nil by default, used by tests.
ccGen clientGen // function for producing controlclient; lazily populated
@@ -203,8 +201,9 @@ type LocalBackend struct {
capFileSharing bool // whether netMap contains the file sharing capability
capTailnetLock bool // whether netMap contains the tailnet lock capability
// hostinfo is mutated in-place while mu is held.
hostinfo *tailcfg.Hostinfo
netMap *netmap.NetworkMap // not mutated in place once set (except for Peers slice)
hostinfo *tailcfg.Hostinfo
// netMap is not mutated in-place once set.
netMap *netmap.NetworkMap
nmExpiryTimer tstime.TimerController // for updating netMap on node expiry; can be nil
nodeByAddr map[netip.Addr]tailcfg.NodeView
activeLogin string // last logged LoginName from netMap
@@ -239,16 +238,16 @@ type LocalBackend struct {
directFileRoot string
directFileDoFinalRename bool // false on macOS, true on several NAS platforms
componentLogUntil map[string]componentLogState
// c2nUpdateStatus is the status of c2n-triggered client update.
c2nUpdateStatus updateStatus
// ServeConfig fields. (also guarded by mu)
lastServeConfJSON mem.RO // last JSON that was parsed into serveConfig
serveConfig ipn.ServeConfigView // or !Valid if none
activeWatchSessions set.Set[string] // of WatchIPN SessionID
lastServeConfJSON mem.RO // last JSON that was parsed into serveConfig
serveConfig ipn.ServeConfigView // or !Valid if none
serveListeners map[netip.AddrPort]*serveListener // addrPort => serveListener
serveProxyHandlers sync.Map // string (HTTPHandler.Proxy) => *httputil.ReverseProxy
// serveStreamers is a map for those running Funnel in the foreground
// and streaming incoming requests.
serveStreamers map[uint16]map[uint32]func(ipn.FunnelRequestLog) // serve port => map of stream loggers (key is UUID)
// statusLock must be held before calling statusChanged.Wait() or
// statusChanged.Broadcast().
@@ -267,13 +266,6 @@ type LocalBackend struct {
// at the moment that tkaSyncLock is taken).
tkaSyncLock sync.Mutex
clock tstime.Clock
// Last ClientVersion received in MapResponse, guarded by mu.
lastClientVersion *tailcfg.ClientVersion
}
type updateStatus struct {
started bool
}
// clientGen is a func that creates a control plane client.
@@ -288,7 +280,6 @@ func NewLocalBackend(logf logger.Logf, logID logid.PublicID, sys *tsd.System, lo
e := sys.Engine.Get()
store := sys.StateStore.Get()
dialer := sys.Dialer.Get()
_ = sys.MagicSock.Get() // or panic
pm, err := newProfileManager(store, logf)
if err != nil {
@@ -312,24 +303,23 @@ func NewLocalBackend(logf logger.Logf, logID logid.PublicID, sys *tsd.System, lo
clock := tstime.StdClock{}
b := &LocalBackend{
ctx: ctx,
ctxCancel: cancel,
logf: logf,
keyLogf: logger.LogOnChange(logf, 5*time.Minute, clock.Now),
statsLogf: logger.LogOnChange(logf, 5*time.Minute, clock.Now),
sys: sys,
e: e,
dialer: dialer,
store: store,
pm: pm,
backendLogID: logID,
state: ipn.NoState,
portpoll: portpoll,
em: newExpiryManager(logf),
gotPortPollRes: make(chan struct{}),
loginFlags: loginFlags,
clock: clock,
activeWatchSessions: make(set.Set[string]),
ctx: ctx,
ctxCancel: cancel,
logf: logf,
keyLogf: logger.LogOnChange(logf, 5*time.Minute, clock.Now),
statsLogf: logger.LogOnChange(logf, 5*time.Minute, clock.Now),
sys: sys,
e: e,
dialer: dialer,
store: store,
pm: pm,
backendLogID: logID,
state: ipn.NoState,
portpoll: portpoll,
em: newExpiryManager(logf),
gotPortPollRes: make(chan struct{}),
loginFlags: loginFlags,
clock: clock,
}
netMon := sys.NetMon.Get()
@@ -405,7 +395,11 @@ func (b *LocalBackend) SetComponentDebugLogging(component string, until time.Tim
var setEnabled func(bool)
switch component {
case "magicsock":
setEnabled = b.magicConn().SetDebugLoggingEnabled
mc, err := b.magicConn()
if err != nil {
return err
}
setEnabled = mc.SetDebugLoggingEnabled
case "sockstats":
if b.sockstatLogger != nil {
setEnabled = func(v bool) {
@@ -571,14 +565,7 @@ func (b *LocalBackend) Shutdown() {
b.mu.Unlock()
ctx, cancel := context.WithTimeout(b.ctx, 5*time.Second)
defer cancel()
t0 := time.Now()
err := b.Logout(ctx) // best effort
td := time.Since(t0).Round(time.Millisecond)
if err != nil {
b.logf("failed to log out ephemeral node on shutdown after %v: %v", td, err)
} else {
b.logf("logged out ephemeral node on shutdown")
}
b.LogoutSync(ctx) // best effort
b.mu.Lock()
}
cc := b.cc
@@ -670,9 +657,6 @@ func (b *LocalBackend) updateStatus(sb *ipnstate.StatusBuilder, extraLocked func
s.TUN = !b.sys.IsNetstack()
s.BackendState = b.state.String()
s.AuthURL = b.authURLSticky
if prefs := b.pm.CurrentPrefs(); prefs.Valid() && prefs.AutoUpdate().Check {
s.ClientVersion = b.lastClientVersion
}
if err := health.OverallError(); err != nil {
switch e := err.(type) {
case multierr.Error:
@@ -894,18 +878,22 @@ func (b *LocalBackend) peerCapsLocked(src netip.Addr) tailcfg.PeerCapMap {
return nil
}
// SetDecompressor sets a decompression function, which must be a zstd
// reader.
//
// This exists because the iOS/Mac NetworkExtension is very resource
// constrained, and the zstd package is too heavy to fit in the
// constrained RSS limit.
func (b *LocalBackend) SetDecompressor(fn func() (controlclient.Decompressor, error)) {
b.newDecompressor = fn
}
// SetControlClientStatus is the callback invoked by the control client whenever it posts a new status.
// Among other things, this is where we update the netmap, packet filters, DNS and DERP maps.
func (b *LocalBackend) SetControlClientStatus(c controlclient.Client, st controlclient.Status) {
b.mu.Lock()
if b.cc != c {
b.logf("Ignoring SetControlClientStatus from old client")
b.mu.Unlock()
return
}
func (b *LocalBackend) SetControlClientStatus(st controlclient.Status) {
// The following do not depend on any data for which we need to lock b.
if st.Err != nil {
b.mu.Unlock()
// TODO(crawshaw): display in the UI.
if errors.Is(st.Err, io.EOF) {
b.logf("[v1] Received error: EOF")
return
@@ -922,6 +910,8 @@ func (b *LocalBackend) SetControlClientStatus(c controlclient.Client, st control
// Track the number of calls
currCall := b.numClientStatusCalls.Add(1)
b.mu.Lock()
// Handle node expiry in the netmap
if st.NetMap != nil {
now := b.clock.Now()
@@ -957,7 +947,7 @@ func (b *LocalBackend) SetControlClientStatus(c controlclient.Client, st control
// Call ourselves with the current status again; the logic in
// setClientStatus will take care of updating the expired field
// of peers in the netmap.
b.SetControlClientStatus(c, st)
b.SetControlClientStatus(st)
})
}
}
@@ -979,7 +969,7 @@ func (b *LocalBackend) SetControlClientStatus(c controlclient.Client, st control
b.blockEngineUpdates(false)
}
if st.LoginFinished() && wasBlocked {
if st.LoginFinished != nil && wasBlocked {
// Auth completed, unblock the engine
b.blockEngineUpdates(false)
b.authReconfig()
@@ -989,6 +979,20 @@ func (b *LocalBackend) SetControlClientStatus(c controlclient.Client, st control
// Lock b once and do only the things that require locking.
b.mu.Lock()
if st.LogoutFinished != nil {
if p := b.pm.CurrentPrefs(); !p.Persist().Valid() || p.Persist().UserProfile().LoginName() == "" {
b.mu.Unlock()
return
}
if err := b.pm.DeleteProfile(b.pm.CurrentProfile().ID); err != nil {
b.logf("error deleting profile: %v", err)
}
if err := b.resetForProfileChangeLockedOnEntry(); err != nil {
b.logf("resetForProfileChangeLockedOnEntry err: %v", err)
}
return
}
prefsChanged := false
prefs := b.pm.CurrentPrefs().AsStruct()
netMap := b.netMap
@@ -1003,8 +1007,8 @@ func (b *LocalBackend) SetControlClientStatus(c controlclient.Client, st control
prefs.ControlURL = prefs.ControlURLOrDefault()
prefsChanged = true
}
if st.Persist.Valid() {
if !prefs.Persist.View().Equals(st.Persist) {
if st.Persist != nil && st.Persist.Valid() {
if !prefs.Persist.View().Equals(*st.Persist) {
prefsChanged = true
prefs.Persist = st.Persist.AsStruct()
}
@@ -1013,7 +1017,7 @@ func (b *LocalBackend) SetControlClientStatus(c controlclient.Client, st control
b.authURL = st.URL
b.authURLSticky = st.URL
}
if wasBlocked && st.LoginFinished() {
if wasBlocked && st.LoginFinished != nil {
// Interactive login finished successfully (URL visited).
// After an interactive login, the user always wants
// WantRunning.
@@ -1098,7 +1102,7 @@ func (b *LocalBackend) SetControlClientStatus(c controlclient.Client, st control
}
b.e.SetNetworkMap(st.NetMap)
b.magicConn().SetDERPMap(st.NetMap.DERPMap)
b.e.SetDERPMap(st.NetMap.DERPMap)
// Update our cached DERP map
dnsfallback.UpdateCache(st.NetMap.DERPMap, b.logf)
@@ -1117,52 +1121,6 @@ func (b *LocalBackend) SetControlClientStatus(c controlclient.Client, st control
b.authReconfig()
}
var _ controlclient.NetmapDeltaUpdater = (*LocalBackend)(nil)
// UpdateNetmapDelta implements controlclient.NetmapDeltaUpdater.
func (b *LocalBackend) UpdateNetmapDelta(muts []netmap.NodeMutation) (handled bool) {
if !b.magicConn().UpdateNetmapDelta(muts) {
return false
}
b.mu.Lock()
defer b.mu.Unlock()
return b.updateNetmapDeltaLocked(muts)
}
func (b *LocalBackend) updateNetmapDeltaLocked(muts []netmap.NodeMutation) (handled bool) {
if b.netMap == nil {
return false
}
peers := b.netMap.Peers
for _, m := range muts {
// LocalBackend only cares about some types of mutations.
// (magicsock cares about different ones.)
switch m.(type) {
case netmap.NodeMutationOnline, netmap.NodeMutationLastSeen:
default:
continue
}
nodeID := m.NodeIDBeingMutated()
idx := b.netMap.PeerIndexByNodeID(nodeID)
if idx == -1 {
continue
}
mut := peers[idx].AsStruct()
switch m := m.(type) {
case netmap.NodeMutationOnline:
mut.Online = ptr.To(m.Online)
case netmap.NodeMutationLastSeen:
mut.LastSeen = ptr.To(m.LastSeen)
}
peers[idx] = mut.View()
}
return true
}
// setExitNodeID updates prefs to reference an exit node by ID, rather
// than by IP. It returns whether prefs was mutated.
func setExitNodeID(prefs *ipn.Prefs, nm *netmap.NetworkMap) (prefsChanged bool) {
@@ -1339,6 +1297,10 @@ func (b *LocalBackend) startIsNoopLocked(opts ipn.Options) bool {
// actually a supported operation (it should be, but it's very unclear
// from the following whether or not that is a safe transition).
func (b *LocalBackend) Start(opts ipn.Options) error {
if opts.LegacyMigrationPrefs == nil && !b.pm.CurrentPrefs().Valid() {
return errors.New("no prefs provided")
}
if opts.LegacyMigrationPrefs != nil {
b.logf("Start: %v", opts.LegacyMigrationPrefs.Pretty())
} else {
@@ -1346,11 +1308,6 @@ func (b *LocalBackend) Start(opts ipn.Options) error {
}
b.mu.Lock()
if opts.LegacyMigrationPrefs == nil && !b.pm.CurrentPrefs().Valid() {
b.mu.Unlock()
return errors.New("no prefs provided")
}
if opts.UpdatePrefs != nil {
if err := b.checkPrefsLocked(opts.UpdatePrefs); err != nil {
b.mu.Unlock()
@@ -1369,7 +1326,7 @@ func (b *LocalBackend) Start(opts ipn.Options) error {
// but meanwhile we can make Start cheaper here for such a
// case and not restart the world (which takes a few seconds).
// Instead, just send a notify with the state that iOS needs.
if b.startIsNoopLocked(opts) && profileID == b.lastProfileID && profileID != "" {
if b.startIsNoopLocked(opts) && profileID == b.lastProfileID {
b.logf("Start: already running; sending notify")
nm := b.netMap
state := b.state
@@ -1390,14 +1347,16 @@ func (b *LocalBackend) Start(opts ipn.Options) error {
hostinfo.Userspace.Set(b.sys.IsNetstack())
hostinfo.UserspaceRouter.Set(b.sys.IsNetstackRouter())
// TODO(apenwarr): avoid the need to reinit controlclient.
// This will trigger a full relogin/reconfigure cycle every
// time a Handle reconnects to the backend. Ideally, we
// would send the new Prefs and everything would get back
// into sync with the minimal changes. But that's not how it
// is right now, which is a sign that the code is still too
// complicated.
prevCC := b.resetControlClientLocked()
if b.cc != nil {
// TODO(apenwarr): avoid the need to reinit controlclient.
// This will trigger a full relogin/reconfigure cycle every
// time a Handle reconnects to the backend. Ideally, we
// would send the new Prefs and everything would get back
// into sync with the minimal changes. But that's not how it
// is right now, which is a sign that the code is still too
// complicated.
b.resetControlClientLockedAsync()
}
httpTestClient := b.httpTestClient
if b.hostinfo != nil {
@@ -1426,7 +1385,6 @@ func (b *LocalBackend) Start(opts ipn.Options) error {
wantRunning := prefs.WantRunning()
if wantRunning {
if err := b.initMachineKeyLocked(); err != nil {
b.mu.Unlock()
return fmt.Errorf("initMachineKeyLocked: %w", err)
}
}
@@ -1467,7 +1425,7 @@ func (b *LocalBackend) Start(opts ipn.Options) error {
})
}
discoPublic := b.magicConn().DiscoPublicKey()
discoPublic := b.e.DiscoPublicKey()
var err error
@@ -1477,10 +1435,6 @@ func (b *LocalBackend) Start(opts ipn.Options) error {
debugFlags = append([]string{"netstack"}, debugFlags...)
}
if prevCC != nil {
prevCC.Shutdown()
}
// TODO(apenwarr): The only way to change the ServerURL is to
// re-run b.Start(), because this is the only place we create a
// new controlclient. SetPrefs() allows you to overwrite ServerURL,
@@ -1492,6 +1446,8 @@ func (b *LocalBackend) Start(opts ipn.Options) error {
ServerURL: serverURL,
AuthKey: opts.AuthKey,
Hostinfo: hostinfo,
KeepAlive: true,
NewDecompressor: b.newDecompressor,
HTTPTestClient: httpTestClient,
DiscoPublicKey: discoPublic,
DebugFlags: debugFlags,
@@ -1504,7 +1460,6 @@ func (b *LocalBackend) Start(opts ipn.Options) error {
Observer: b,
C2NHandler: http.HandlerFunc(b.handleC2N),
DialPlan: &b.dialPlan, // pointer because it can't be copied
ControlKnobs: b.sys.ControlKnobs(),
// Don't warn about broken Linux IP forwarding when
// netstack is being used.
@@ -1515,13 +1470,6 @@ func (b *LocalBackend) Start(opts ipn.Options) error {
}
b.mu.Lock()
// Even though we reset b.cc above, we might have raced with
// another Start() call. If so, shut down the previous one again
// as we do not know if it was created with the same options.
prevCC = b.resetControlClientLocked()
if prevCC != nil {
defer prevCC.Shutdown() // must be called after b.mu is unlocked
}
b.cc = cc
b.ccAuto, _ = cc.(*controlclient.Auto)
endpoints := b.endpoints
@@ -1545,7 +1493,7 @@ func (b *LocalBackend) Start(opts ipn.Options) error {
}
cc.SetTKAHead(tkaHead)
b.magicConn().SetNetInfoCallback(b.setNetInfo)
b.e.SetNetInfoCallback(b.setNetInfo)
blid := b.backendLogID.String()
b.logf("Backend: logs: be:%v fe:%v", blid, opts.FrontendLogID)
@@ -1980,8 +1928,6 @@ func (b *LocalBackend) ResendHostinfoIfNeeded() {
func (b *LocalBackend) WatchNotifications(ctx context.Context, mask ipn.NotifyWatchOpt, onWatchAdded func(), fn func(roNotify *ipn.Notify) (keepGoing bool)) {
ch := make(chan *ipn.Notify, 128)
sessionID := rands.HexString(16)
origFn := fn
if mask&ipn.NotifyNoPrivateKeys != 0 {
fn = func(n *ipn.Notify) bool {
@@ -2003,13 +1949,10 @@ func (b *LocalBackend) WatchNotifications(ctx context.Context, mask ipn.NotifyWa
var ini *ipn.Notify
b.mu.Lock()
b.activeWatchSessions.Add(sessionID)
const initialBits = ipn.NotifyInitialState | ipn.NotifyInitialPrefs | ipn.NotifyInitialNetMap
if mask&initialBits != 0 {
ini = &ipn.Notify{Version: version.Long()}
if mask&ipn.NotifyInitialState != 0 {
ini.SessionID = sessionID
ini.State = ptr.To(b.state)
if b.state == ipn.NeedsLogin {
ini.BrowseToURL = ptr.To(b.authURLSticky)
@@ -2029,7 +1972,6 @@ func (b *LocalBackend) WatchNotifications(ctx context.Context, mask ipn.NotifyWa
defer func() {
b.mu.Lock()
delete(b.notifyWatchers, handle)
delete(b.activeWatchSessions, sessionID)
b.mu.Unlock()
}()
@@ -2060,10 +2002,6 @@ func (b *LocalBackend) WatchNotifications(ctx context.Context, mask ipn.NotifyWa
go b.pollRequestEngineStatus(ctx)
}
// TODO(marwan-at-work): check err
// TODO(marwan-at-work): streaming background logs?
defer b.DeleteForegroundSession(sessionID)
for {
select {
case <-ctx.Done():
@@ -2219,9 +2157,6 @@ func (b *LocalBackend) tellClientToBrowseToURL(url string) {
// onClientVersion is called on MapResponse updates when a MapResponse contains
// a non-nil ClientVersion message.
func (b *LocalBackend) onClientVersion(v *tailcfg.ClientVersion) {
b.mu.Lock()
b.lastClientVersion = v
b.mu.Unlock()
switch runtime.GOOS {
case "darwin", "ios":
// These auto-update well enough, and we haven't converted the
@@ -2830,7 +2765,7 @@ func (b *LocalBackend) SetPrefs(newp *ipn.Prefs) {
// doesn't affect security or correctness. And we also don't expect people to
// modify their ServeConfig in raw mode.
func (b *LocalBackend) wantIngressLocked() bool {
return b.serveConfig.Valid() && b.serveConfig.HasAllowFunnel()
return b.serveConfig.Valid() && b.serveConfig.AllowFunnel().Len() > 0
}
// setPrefsLockedOnEntry requires b.mu be held to call it, but it
@@ -2896,7 +2831,7 @@ func (b *LocalBackend) setPrefsLockedOnEntry(caller string, newp *ipn.Prefs) ipn
}
if netMap != nil {
b.magicConn().SetDERPMap(netMap.DERPMap)
b.e.SetDERPMap(netMap.DERPMap)
}
if !oldp.WantRunning() && newp.WantRunning {
@@ -3128,7 +3063,7 @@ func (b *LocalBackend) authReconfig() {
return
}
oneCGNATRoute := shouldUseOneCGNATRoute(b.logf, b.sys.ControlKnobs(), version.OS())
oneCGNATRoute := shouldUseOneCGNATRoute(b.logf, version.OS())
rcfg := b.routerConfig(cfg, prefs, oneCGNATRoute)
dcfg := dnsConfigForNetmap(nm, prefs, b.logf, version.OS())
@@ -3146,13 +3081,11 @@ func (b *LocalBackend) authReconfig() {
//
// The versionOS is a Tailscale-style version ("iOS", "macOS") and not
// a runtime.GOOS.
func shouldUseOneCGNATRoute(logf logger.Logf, controlKnobs *controlknobs.Knobs, versionOS string) bool {
if controlKnobs != nil {
// Explicit enabling or disabling always take precedence.
if v, ok := controlKnobs.OneCGNAT.Load().Get(); ok {
logf("[v1] shouldUseOneCGNATRoute: explicit=%v", v)
return v
}
func shouldUseOneCGNATRoute(logf logger.Logf, versionOS string) bool {
// Explicit enabling or disabling always take precedence.
if v, ok := controlclient.ControlOneCGNATSetting().Get(); ok {
logf("[v1] shouldUseOneCGNATRoute: explicit=%v", v)
return v
}
// Also prefer to do this on the Mac, so that we don't need to constantly
@@ -3784,16 +3717,10 @@ func (b *LocalBackend) enterStateLockedOnEntry(newState ipn.State) {
}
// hasNodeKey reports whether a non-zero node key is present in the current
// prefs.
func (b *LocalBackend) hasNodeKey() bool {
// we can't use b.Prefs(), because it strips the keys, oops!
b.mu.Lock()
defer b.mu.Unlock()
return b.hasNodeKeyLocked()
}
func (b *LocalBackend) hasNodeKeyLocked() bool {
// we can't use b.Prefs(), because it strips the keys, oops!
p := b.pm.CurrentPrefs()
return p.Valid() && p.Persist().Valid() && !p.Persist().PrivateNodeKey().IsZero()
}
@@ -3802,17 +3729,19 @@ func (b *LocalBackend) hasNodeKeyLocked() bool {
func (b *LocalBackend) NodeKey() key.NodePublic {
b.mu.Lock()
defer b.mu.Unlock()
if !b.hasNodeKeyLocked() {
p := b.pm.CurrentPrefs()
if !p.Valid() || !p.Persist().Valid() || p.Persist().PrivateNodeKey().IsZero() {
return key.NodePublic{}
}
return b.pm.CurrentPrefs().Persist().PublicNodeKey()
return p.Persist().PublicNodeKey()
}
// nextStateLocked returns the state the backend seems to be in, based on
// nextState returns the state the backend seems to be in, based on
// its internal state.
//
// b.mu must be held
func (b *LocalBackend) nextStateLocked() ipn.State {
func (b *LocalBackend) nextState() ipn.State {
b.mu.Lock()
var (
cc = b.cc
netMap = b.netMap
@@ -3828,9 +3757,10 @@ func (b *LocalBackend) nextStateLocked() ipn.State {
wantRunning = p.WantRunning()
loggedOut = p.LoggedOut()
}
b.mu.Unlock()
switch {
case !wantRunning && !loggedOut && !blocked && b.hasNodeKeyLocked():
case !wantRunning && !loggedOut && !blocked && b.hasNodeKey():
return ipn.Stopped
case netMap == nil:
if (cc != nil && cc.AuthCantContinue()) || loggedOut {
@@ -3893,8 +3823,7 @@ func (b *LocalBackend) RequestEngineStatus() {
// TODO(apenwarr): use a channel or something to prevent reentrancy?
// Or maybe just call the state machine from fewer places.
func (b *LocalBackend) stateMachine() {
b.mu.Lock()
b.enterStateLockedOnEntry(b.nextStateLocked())
b.enterState(b.nextState())
}
// stopEngineAndWait deconfigures the local network data plane, and
@@ -3922,12 +3851,12 @@ func (b *LocalBackend) requestEngineStatusAndWait() {
b.statusLock.Unlock()
}
// resetControlClientLocked sets b.cc to nil and returns the old value. If the
// returned value is non-nil, the caller must call Shutdown on it after
// releasing b.mu.
func (b *LocalBackend) resetControlClientLocked() controlclient.Client {
// resetControlClientLockedAsync sets b.cc to nil, and starts a
// goroutine to Shutdown the old client. It does not wait for the
// shutdown to complete.
func (b *LocalBackend) resetControlClientLockedAsync() {
if b.cc == nil {
return nil
return
}
// When we clear the control client, stop any outstanding netmap expiry
@@ -3943,10 +3872,10 @@ func (b *LocalBackend) resetControlClientLocked() controlclient.Client {
// will abort.
b.numClientStatusCalls.Add(1)
}
prev := b.cc
go b.cc.Shutdown()
b.cc = nil
b.ccAuto = nil
return prev
}
// ResetForClientDisconnect resets the backend for GUI clients running
@@ -3956,15 +3885,11 @@ func (b *LocalBackend) resetControlClientLocked() controlclient.Client {
// don't want to the user to have to reauthenticate in the future
// when they restart the GUI.
func (b *LocalBackend) ResetForClientDisconnect() {
b.logf("LocalBackend.ResetForClientDisconnect")
defer b.enterState(ipn.Stopped)
b.mu.Lock()
prevCC := b.resetControlClientLocked()
if prevCC != nil {
// Needs to happen without b.mu held.
defer prevCC.Shutdown()
}
defer b.mu.Unlock()
b.logf("LocalBackend.ResetForClientDisconnect")
b.resetControlClientLockedAsync()
b.setNetMapLocked(nil)
b.pm.Reset()
b.keyExpired = false
@@ -3972,7 +3897,6 @@ func (b *LocalBackend) ResetForClientDisconnect() {
b.authURLSticky = ""
b.activeLogin = ""
b.setAtomicValuesFromPrefsLocked(ipn.PrefsView{})
b.enterStateLockedOnEntry(ipn.Stopped)
}
func (b *LocalBackend) ShouldRunSSH() bool { return b.sshAtomicBool.Load() && envknob.CanSSHD() }
@@ -3987,30 +3911,27 @@ func (b *LocalBackend) ShouldHandleViaIP(ip netip.Addr) bool {
return false
}
// Logout logs out the current profile, if any, and waits for the logout to
// complete.
func (b *LocalBackend) Logout(ctx context.Context) error {
b.mu.Lock()
if !b.hasNodeKeyLocked() {
// Already logged out.
b.mu.Unlock()
return nil
}
cc := b.cc
// Logout tells the controlclient that we want to log out, and
// transitions the local engine to the logged-out state without
// waiting for controlclient to be in that state.
func (b *LocalBackend) Logout() {
b.logout(context.Background(), false)
}
// Grab the current profile before we unlock the mutex, so that we can
// delete it later.
profile := b.pm.CurrentProfile()
func (b *LocalBackend) LogoutSync(ctx context.Context) error {
return b.logout(ctx, true)
}
func (b *LocalBackend) logout(ctx context.Context, sync bool) error {
b.mu.Lock()
cc := b.cc
b.mu.Unlock()
_, err := b.EditPrefs(&ipn.MaskedPrefs{
b.EditPrefs(&ipn.MaskedPrefs{
WantRunningSet: true,
LoggedOutSet: true,
Prefs: ipn.Prefs{WantRunning: false, LoggedOut: true},
})
if err != nil {
return err
}
// Clear any previous dial plan(s), if set.
b.dialPlan.Store(nil)
@@ -4026,16 +3947,15 @@ func (b *LocalBackend) Logout(ctx context.Context) error {
return errors.New("no controlclient")
}
if err := cc.Logout(ctx); err != nil {
return err
var err error
if sync {
err = cc.Logout(ctx)
} else {
cc.StartLogout()
}
b.mu.Lock()
if err := b.pm.DeleteProfile(profile.ID); err != nil {
b.mu.Unlock()
b.logf("error deleting profile: %v", err)
return err
}
return b.resetForProfileChangeLockedOnEntry()
b.stateMachine()
return err
}
// assertClientLocked crashes if there is no controlclient in this backend.
@@ -4070,9 +3990,6 @@ func hasCapability(nm *netmap.NetworkMap, cap string) bool {
// Tailscale is turned off.
func (b *LocalBackend) setNetMapLocked(nm *netmap.NetworkMap) {
b.dialer.SetNetMap(nm)
if ns, ok := b.sys.Netstack.GetOK(); ok {
ns.UpdateNetstackIPs(nm)
}
var login string
if nm != nil {
login = cmpx.Or(nm.UserProfiles[nm.User()].LoginName, "<missing-profile>")
@@ -4081,7 +3998,6 @@ func (b *LocalBackend) setNetMapLocked(nm *netmap.NetworkMap) {
if login != b.activeLogin {
b.logf("active login: %v", login)
b.activeLogin = login
b.lastProfileID = b.pm.CurrentProfile().ID
}
b.pauseOrResumeControlClientLocked()
@@ -4151,10 +4067,6 @@ func (b *LocalBackend) setDebugLogsByCapabilityLocked(nm *netmap.NetworkMap) {
}
}
// reloadServeConfigLocked reloads the serve config from the store or resets the
// serve config to nil if not logged in. The "changed" parameter, when false, instructs
// the method to only run the reset-logic and not reload the store from memory to ensure
// foreground sessions are not removed if they are not saved on disk.
func (b *LocalBackend) reloadServeConfigLocked(prefs ipn.PrefsView) {
if b.netMap == nil || !b.netMap.SelfNode.Valid() || !prefs.Valid() || b.pm.CurrentProfile().ID == "" {
// We're not logged in, so we don't have a profile.
@@ -4163,7 +4075,6 @@ func (b *LocalBackend) reloadServeConfigLocked(prefs ipn.PrefsView) {
b.serveConfig = ipn.ServeConfigView{}
return
}
confKey := ipn.ServeConfigKey(b.pm.CurrentProfile().ID)
// TODO(maisem,bradfitz): prevent reading the config from disk
// if the profile has not changed.
@@ -4183,12 +4094,6 @@ func (b *LocalBackend) reloadServeConfigLocked(prefs ipn.PrefsView) {
b.serveConfig = ipn.ServeConfigView{}
return
}
// remove inactive sessions
maps.DeleteFunc(conf.Foreground, func(s string, sc *ipn.ServeConfig) bool {
return !b.activeWatchSessions.Contains(s)
})
b.serveConfig = conf.View()
}
@@ -4206,7 +4111,7 @@ func (b *LocalBackend) setTCPPortsInterceptedFromNetmapAndPrefsLocked(prefs ipn.
b.reloadServeConfigLocked(prefs)
if b.serveConfig.Valid() {
servePorts := make([]uint16, 0, 3)
b.serveConfig.RangeOverTCPs(func(port uint16, _ ipn.TCPPortHandlerView) bool {
b.serveConfig.TCP().Range(func(port uint16, _ ipn.TCPPortHandlerView) bool {
if port > 0 {
servePorts = append(servePorts, uint16(port))
}
@@ -4239,7 +4144,7 @@ func (b *LocalBackend) setServeProxyHandlersLocked() {
return
}
var backends map[string]bool
b.serveConfig.RangeOverWebs(func(_ ipn.HostPort, conf ipn.WebServerConfigView) (cont bool) {
b.serveConfig.Web().Range(func(_ ipn.HostPort, conf ipn.WebServerConfigView) (cont bool) {
conf.Handlers().Range(func(_ string, h ipn.HTTPHandlerView) (cont bool) {
backend := h.Proxy()
if backend == "" {
@@ -4695,22 +4600,29 @@ func peerCanProxyDNS(p tailcfg.NodeView) bool {
}
func (b *LocalBackend) DebugRebind() error {
b.magicConn().Rebind()
mc, err := b.magicConn()
if err != nil {
return err
}
mc.Rebind()
return nil
}
func (b *LocalBackend) DebugReSTUN() error {
b.magicConn().ReSTUN("explicit-debug")
mc, err := b.magicConn()
if err != nil {
return err
}
mc.ReSTUN("explicit-debug")
return nil
}
// ControlKnobs returns the node's control knobs.
func (b *LocalBackend) ControlKnobs() *controlknobs.Knobs {
return b.sys.ControlKnobs()
}
func (b *LocalBackend) magicConn() *magicsock.Conn {
return b.sys.MagicSock.Get()
func (b *LocalBackend) magicConn() (*magicsock.Conn, error) {
mc, ok := b.sys.MagicSock.GetOK()
if !ok {
return nil, errors.New("failed to get magicsock from sys")
}
return mc, nil
}
type keyProvingNoiseRoundTripper struct {
@@ -5016,26 +4928,16 @@ func (b *LocalBackend) initTKALocked() error {
}
// resetForProfileChangeLockedOnEntry resets the backend for a profile change.
//
// b.mu must held on entry. It is released on exit.
func (b *LocalBackend) resetForProfileChangeLockedOnEntry() error {
if b.shutdownCalled {
// Prevent a call back to Start during Shutdown, which calls Logout for
// ephemeral nodes, which can then call back here. But we're shutting
// down, so no need to do any work.
b.mu.Unlock()
return nil
}
b.setNetMapLocked(nil) // Reset netmap.
// Reset the NetworkMap in the engine
b.e.SetNetworkMap(new(netmap.NetworkMap))
if err := b.initTKALocked(); err != nil {
b.mu.Unlock()
return err
}
b.lastServeConfJSON = mem.B(nil)
b.serveConfig = ipn.ServeConfigView{}
b.enterStateLockedOnEntry(ipn.NoState) // Reset state; releases b.mu
b.enterStateLockedOnEntry(ipn.NoState) // Reset state.
health.SetLocalLogConfigHealth(nil)
return b.Start(ipn.Options{})
}
@@ -5086,10 +4988,7 @@ func (b *LocalBackend) ListProfiles() []ipn.LoginProfile {
// called to register it as new node.
func (b *LocalBackend) ResetAuth() error {
b.mu.Lock()
prevCC := b.resetControlClientLocked()
if prevCC != nil {
defer prevCC.Shutdown() // call must happen after release b.mu
}
b.resetControlClientLockedAsync()
if err := b.clearMachineKeyLocked(); err != nil {
b.mu.Unlock()
return err
@@ -5153,7 +5052,12 @@ func (b *LocalBackend) GetPeerEndpointChanges(ctx context.Context, ip netip.Addr
}
peer := pip.Node
chs, err := b.magicConn().GetEndpointChanges(peer)
mc, err := b.magicConn()
if err != nil {
return nil, fmt.Errorf("getting magicsock conn: %w", err)
}
chs, err := mc.GetEndpointChanges(peer)
if err != nil {
return nil, fmt.Errorf("getting endpoint changes: %w", err)
}
@@ -5170,5 +5074,9 @@ func (b *LocalBackend) DebugBreakTCPConns() error {
}
func (b *LocalBackend) DebugBreakDERPConns() error {
return b.magicConn().DebugBreakDERPConns()
mc, err := b.magicConn()
if err != nil {
return err
}
return mc.DebugBreakDERPConns()
}

View File

@@ -26,8 +26,6 @@ import (
"tailscale.com/types/logger"
"tailscale.com/types/logid"
"tailscale.com/types/netmap"
"tailscale.com/types/ptr"
"tailscale.com/util/set"
"tailscale.com/wgengine"
"tailscale.com/wgengine/filter"
"tailscale.com/wgengine/wgcfg"
@@ -828,6 +826,9 @@ type legacyBackend interface {
StartLoginInteractive()
// Login logs in with an OAuth2 token.
Login(token *tailcfg.Oauth2Token)
// Logout terminates the current login session and stops the
// wireguard engine.
Logout()
// SetPrefs installs a new set of user preferences, including
// WantRunning. This may cause the wireguard engine to
// reconfigure or stop.
@@ -845,9 +846,6 @@ var _ legacyBackend = (*LocalBackend)(nil)
func TestWatchNotificationsCallbacks(t *testing.T) {
b := new(LocalBackend)
// activeWatchSessions is typically set in NewLocalBackend
// so WatchNotifications expects it to be non-empty.
b.activeWatchSessions = make(set.Set[string])
n := new(ipn.Notify)
b.WatchNotifications(context.Background(), 0, func() {
b.mu.Lock()
@@ -880,75 +878,3 @@ func TestWatchNotificationsCallbacks(t *testing.T) {
t.Fatalf("unexpected number of watchers in new LocalBackend, want: 0 got: %v", len(b.notifyWatchers))
}
}
// tests LocalBackend.updateNetmapDeltaLocked
func TestUpdateNetmapDelta(t *testing.T) {
var b LocalBackend
if b.updateNetmapDeltaLocked(nil) {
t.Errorf("updateNetmapDeltaLocked() = true, want false with nil netmap")
}
b.netMap = &netmap.NetworkMap{}
for i := 0; i < 5; i++ {
b.netMap.Peers = append(b.netMap.Peers, (&tailcfg.Node{ID: (tailcfg.NodeID(i) + 1)}).View())
}
someTime := time.Unix(123, 0)
muts, ok := netmap.MutationsFromMapResponse(&tailcfg.MapResponse{
PeersChangedPatch: []*tailcfg.PeerChange{
{
NodeID: 1,
DERPRegion: 1,
},
{
NodeID: 2,
Online: ptr.To(true),
},
{
NodeID: 3,
Online: ptr.To(false),
},
{
NodeID: 4,
LastSeen: ptr.To(someTime),
},
},
}, someTime)
if !ok {
t.Fatal("netmap.MutationsFromMapResponse failed")
}
if !b.updateNetmapDeltaLocked(muts) {
t.Fatalf("updateNetmapDeltaLocked() = false, want true with new netmap")
}
wants := []*tailcfg.Node{
{
ID: 1,
DERP: "", // unmodified by the delta
},
{
ID: 2,
Online: ptr.To(true),
},
{
ID: 3,
Online: ptr.To(false),
},
{
ID: 4,
LastSeen: ptr.To(someTime),
},
}
for _, want := range wants {
idx := b.netMap.PeerIndexByNodeID(want.ID)
if idx == -1 {
t.Errorf("ID %v not found in netmap", want.ID)
continue
}
got := b.netMap.Peers[idx].AsStruct()
if !reflect.DeepEqual(got, want) {
t.Errorf("netmap.Peer %v wrong.\n got: %v\nwant: %v", want.ID, logger.AsJSON(got), logger.AsJSON(want))
}
}
}

View File

@@ -31,7 +31,7 @@ import (
type observerFunc func(controlclient.Status)
func (f observerFunc) SetControlClientStatus(_ controlclient.Client, s controlclient.Status) {
func (f observerFunc) SetControlClientStatus(s controlclient.Status) {
f(s)
}

View File

@@ -1234,7 +1234,11 @@ func (h *peerAPIHandler) handleServeMagicsock(w http.ResponseWriter, r *http.Req
http.Error(w, "denied; no debug access", http.StatusForbidden)
return
}
h.ps.b.magicConn().ServeHTTPDebug(w, r)
if mc, ok := h.ps.b.sys.MagicSock.GetOK(); ok {
mc.ServeHTTPDebug(w, r)
return
}
http.Error(w, "miswired", 500)
}
func (h *peerAPIHandler) handleServeMetrics(w http.ResponseWriter, r *http.Request) {

View File

@@ -28,8 +28,6 @@ var debug = envknob.RegisterBool("TS_DEBUG_PROFILES")
// profileManager is a wrapper around a StateStore that manages
// multiple profiles and the current profile.
//
// It is not safe for concurrent use.
type profileManager struct {
store ipn.StateStore
logf logger.Logf
@@ -441,7 +439,6 @@ func (pm *profileManager) NewProfile() {
// defaultPrefs is the default prefs for a new profile.
var defaultPrefs = func() ipn.PrefsView {
prefs := ipn.NewPrefs()
prefs.LoggedOut = true
prefs.WantRunning = false
prefs.ControlURL = winutil.GetPolicyString("LoginURL", "")

View File

@@ -5,9 +5,7 @@ package ipnlocal
import (
"context"
"crypto/sha256"
"crypto/tls"
"encoding/hex"
"encoding/json"
"errors"
"fmt"
@@ -25,6 +23,7 @@ import (
"sync"
"time"
"github.com/google/uuid"
"tailscale.com/ipn"
"tailscale.com/logtail/backoff"
"tailscale.com/net/netutil"
@@ -35,11 +34,6 @@ import (
"tailscale.com/version"
)
// ErrETagMismatch signals that the given
// If-Match header does not match with the
// current etag of a resource.
var ErrETagMismatch = errors.New("etag mismatch")
// serveHTTPContextKey is the context.Value key for a *serveHTTPContext.
type serveHTTPContextKey struct{}
@@ -221,15 +215,10 @@ func (b *LocalBackend) updateServeTCPPortNetMapAddrListenersLocked(ports []uint1
}
// SetServeConfig establishes or replaces the current serve config.
// ETag is an optional parameter to enforce Optimistic Concurrency Control.
// If it is an empty string, then the config will be overwritten.
func (b *LocalBackend) SetServeConfig(config *ipn.ServeConfig, etag string) error {
func (b *LocalBackend) SetServeConfig(config *ipn.ServeConfig) error {
b.mu.Lock()
defer b.mu.Unlock()
return b.setServeConfigLocked(config, etag)
}
func (b *LocalBackend) setServeConfigLocked(config *ipn.ServeConfig, etag string) error {
prefs := b.pm.CurrentPrefs()
if config.IsFunnelOn() && prefs.ShieldsUp() {
return errors.New("Unable to turn on Funnel while shields-up is enabled")
@@ -242,24 +231,8 @@ func (b *LocalBackend) setServeConfigLocked(config *ipn.ServeConfig, etag string
if !nm.SelfNode.Valid() {
return errors.New("netMap SelfNode is nil")
}
// If etag is present, check that it has
// not changed from the last config.
if etag != "" {
// Note that we marshal b.serveConfig
// and not use b.lastServeConfJSON as that might
// be a Go nil value, which produces a different
// checksum from a JSON "null" value.
previousCfg, err := json.Marshal(b.serveConfig)
if err != nil {
return fmt.Errorf("error encoding previous config: %w", err)
}
sum := sha256.Sum256(previousCfg)
previousEtag := hex.EncodeToString(sum[:])
if etag != previousEtag {
return ErrETagMismatch
}
}
profileID := b.pm.CurrentProfile().ID
confKey := ipn.ServeConfigKey(profileID)
var bs []byte
if config != nil {
@@ -269,9 +242,6 @@ func (b *LocalBackend) setServeConfigLocked(config *ipn.ServeConfig, etag string
}
bs = j
}
profileID := b.pm.CurrentProfile().ID
confKey := ipn.ServeConfigKey(profileID)
if err := b.store.WriteState(confKey, bs); err != nil {
return fmt.Errorf("writing ServeConfig to StateStore: %w", err)
}
@@ -288,18 +258,164 @@ func (b *LocalBackend) ServeConfig() ipn.ServeConfigView {
return b.serveConfig
}
// DeleteForegroundSession deletes a ServeConfig's foreground session
// in the LocalBackend if it exists. It also ensures check, delete, and
// set operations happen within the same mutex lock to avoid any races.
func (b *LocalBackend) DeleteForegroundSession(sessionID string) error {
b.mu.Lock()
defer b.mu.Unlock()
if !b.serveConfig.Valid() || !b.serveConfig.Foreground().Has(sessionID) {
return nil
// StreamServe opens a stream to write any incoming connections made
// to the given HostPort out to the listening io.Writer.
//
// If Serve and Funnel were not already enabled for the HostPort in the ServeConfig,
// the backend enables it for the duration of the context's lifespan and
// then turns it back off once the context is closed. If either are already enabled,
// then they remain that way but logs are still streamed
func (b *LocalBackend) StreamServe(ctx context.Context, w io.Writer, req ipn.ServeStreamRequest) (err error) {
f, ok := w.(http.Flusher)
if !ok {
return errors.New("writer not a flusher")
}
f.Flush()
port, err := req.HostPort.Port()
if err != nil {
return err
}
// Turn on Funnel for the given HostPort.
sc := b.ServeConfig().AsStruct()
if sc == nil {
sc = &ipn.ServeConfig{}
}
setHandler(sc, req)
if err := b.SetServeConfig(sc); err != nil {
return fmt.Errorf("errro setting serve config: %w", err)
}
// Defer turning off Funnel once stream ends.
defer func() {
sc := b.ServeConfig().AsStruct()
deleteHandler(sc, req, port)
err = errors.Join(err, b.SetServeConfig(sc))
}()
var writeErrs []error
writeToStream := func(log ipn.FunnelRequestLog) {
jsonLog, err := json.Marshal(log)
if err != nil {
writeErrs = append(writeErrs, err)
return
}
if _, err := fmt.Fprintf(w, "%s\n", jsonLog); err != nil {
writeErrs = append(writeErrs, err)
return
}
f.Flush()
}
// Hook up connections stream.
b.mu.Lock()
mak.NonNilMapForJSON(&b.serveStreamers)
if b.serveStreamers[port] == nil {
b.serveStreamers[port] = make(map[uint32]func(ipn.FunnelRequestLog))
}
id := uuid.New().ID()
b.serveStreamers[port][id] = writeToStream
b.mu.Unlock()
// Clean up streamer when done.
defer func() {
b.mu.Lock()
delete(b.serveStreamers[port], id)
b.mu.Unlock()
}()
select {
case <-ctx.Done():
// Triggered by foreground `tailscale funnel` process
// (the streamer) getting closed, or by turning off Tailscale.
}
return errors.Join(writeErrs...)
}
func setHandler(sc *ipn.ServeConfig, req ipn.ServeStreamRequest) {
if sc.TCP == nil {
sc.TCP = make(map[uint16]*ipn.TCPPortHandler)
}
if _, ok := sc.TCP[443]; !ok {
sc.TCP[443] = &ipn.TCPPortHandler{
HTTPS: true,
}
}
if sc.Web == nil {
sc.Web = make(map[ipn.HostPort]*ipn.WebServerConfig)
}
wsc, ok := sc.Web[req.HostPort]
if !ok {
wsc = &ipn.WebServerConfig{}
sc.Web[req.HostPort] = wsc
}
if wsc.Handlers == nil {
wsc.Handlers = make(map[string]*ipn.HTTPHandler)
}
wsc.Handlers[req.MountPoint] = &ipn.HTTPHandler{
Proxy: req.Source,
}
if req.Funnel {
if sc.AllowFunnel == nil {
sc.AllowFunnel = make(map[ipn.HostPort]bool)
}
sc.AllowFunnel[req.HostPort] = true
}
}
func deleteHandler(sc *ipn.ServeConfig, req ipn.ServeStreamRequest, port uint16) {
delete(sc.AllowFunnel, req.HostPort)
if sc.TCP != nil {
delete(sc.TCP, port)
}
if sc.Web == nil {
return
}
if sc.Web[req.HostPort] == nil {
return
}
wsc, ok := sc.Web[req.HostPort]
if !ok {
return
}
if wsc.Handlers == nil {
return
}
if _, ok := wsc.Handlers[req.MountPoint]; !ok {
return
}
delete(wsc.Handlers, req.MountPoint)
if len(wsc.Handlers) == 0 {
delete(sc.Web, req.HostPort)
}
}
func (b *LocalBackend) maybeLogServeConnection(destPort uint16, srcAddr netip.AddrPort) {
b.mu.Lock()
streamers := b.serveStreamers[destPort]
b.mu.Unlock()
if len(streamers) == 0 {
return
}
var log ipn.FunnelRequestLog
log.SrcAddr = srcAddr
log.Time = b.clock.Now()
if node, user, ok := b.WhoIs(srcAddr); ok {
log.NodeName = node.ComputedName()
if node.IsTagged() {
log.NodeTags = node.Tags().AsSlice()
} else {
log.UserLoginName = user.LoginName
log.UserDisplayName = user.DisplayName
}
}
for _, stream := range streamers {
stream(log)
}
sc := b.serveConfig.AsStruct()
delete(sc.Foreground, sessionID)
return b.setServeConfigLocked(sc, "")
}
func (b *LocalBackend) HandleIngressTCPConn(ingressPeer tailcfg.NodeView, target ipn.HostPort, srcAddr netip.AddrPort, getConnOrReset func() (net.Conn, bool), sendRST func()) {
@@ -313,7 +429,7 @@ func (b *LocalBackend) HandleIngressTCPConn(ingressPeer tailcfg.NodeView, target
return
}
if !sc.HasFunnelForTarget(target) {
if !sc.AllowFunnel().Get(target) {
b.logf("localbackend: got ingress conn for unconfigured %q; rejecting", target)
sendRST()
return
@@ -371,7 +487,7 @@ func (b *LocalBackend) tcpHandlerForServe(dport uint16, srcAddr netip.AddrPort)
return nil
}
tcph, ok := sc.FindTCP(dport)
tcph, ok := sc.TCP().GetOk(dport)
if !ok {
b.logf("[unexpected] localbackend: got TCP conn without TCP config for port %v; from %v", dport, srcAddr)
return nil
@@ -404,6 +520,7 @@ func (b *LocalBackend) tcpHandlerForServe(dport uint16, srcAddr netip.AddrPort)
if backDst := tcph.TCPForward(); backDst != "" {
return func(conn net.Conn) error {
defer conn.Close()
b.maybeLogServeConnection(dport, srcAddr)
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
backConn, err := b.dialer.SystemDial(ctx, "tcp", backDst)
cancel()
@@ -566,14 +683,15 @@ func (b *LocalBackend) addTailscaleIdentityHeaders(r *httputil.ProxyRequest) {
r.Out.Header.Set("Tailscale-Headers-Info", "https://tailscale.com/s/serve-headers")
}
// serveWebHandler is an http.HandlerFunc that maps incoming requests to the
// correct *http.
func (b *LocalBackend) serveWebHandler(w http.ResponseWriter, r *http.Request) {
h, mountPoint, ok := b.getServeHandler(r)
if !ok {
http.NotFound(w, r)
return
}
if c, ok := getServeHTTPContext(r); ok {
b.maybeLogServeConnection(c.DestPort, c.SrcAddr)
}
if s := h.Text(); s != "" {
w.Header().Set("Content-Type", "text/plain; charset=utf-8")
io.WriteString(w, s)
@@ -709,7 +827,7 @@ func (b *LocalBackend) webServerConfig(hostname string, port uint16) (c ipn.WebS
if !b.serveConfig.Valid() {
return c, false
}
return b.serveConfig.FindWeb(key)
return b.serveConfig.Web().GetOk(key)
}
func (b *LocalBackend) getTLSServeCertForPort(port uint16) func(hi *tls.ClientHelloInfo) (*tls.Certificate, error) {

View File

@@ -6,11 +6,7 @@ package ipnlocal
import (
"bytes"
"context"
"crypto/sha256"
"crypto/tls"
"encoding/hex"
"encoding/json"
"errors"
"fmt"
"net/http"
"net/http/httptest"
@@ -28,7 +24,6 @@ import (
"tailscale.com/types/logid"
"tailscale.com/types/netmap"
"tailscale.com/util/cmpx"
"tailscale.com/util/mak"
"tailscale.com/util/must"
"tailscale.com/wgengine"
)
@@ -174,82 +169,50 @@ func TestGetServeHandler(t *testing.T) {
}
}
func getEtag(t *testing.T, b any) string {
t.Helper()
bts, err := json.Marshal(b)
func TestServeHTTPProxy(t *testing.T) {
sys := &tsd.System{}
e, err := wgengine.NewUserspaceEngine(t.Logf, wgengine.Config{SetSubsystem: sys.Set})
if err != nil {
t.Fatal(err)
}
sum := sha256.Sum256(bts)
return hex.EncodeToString(sum[:])
}
func TestServeConfigETag(t *testing.T) {
b := newTestBackend(t)
// a nil config with initial etag should succeed
err := b.SetServeConfig(nil, getEtag(t, nil))
sys.Set(e)
sys.Set(new(mem.Store))
b, err := NewLocalBackend(t.Logf, logid.PublicID{}, sys, 0)
if err != nil {
t.Fatal(err)
}
defer b.Shutdown()
dir := t.TempDir()
b.SetVarRoot(dir)
// a nil config with an invalid etag should fail
err = b.SetServeConfig(nil, "abc")
if !errors.Is(err, ErrETagMismatch) {
t.Fatal("expected an error but got nil")
}
pm := must.Get(newProfileManager(new(mem.Store), t.Logf))
pm.currentProfile = &ipn.LoginProfile{ID: "id0"}
b.pm = pm
// a new config with no etag should succeed
conf := &ipn.ServeConfig{
Web: map[ipn.HostPort]*ipn.WebServerConfig{
"example.ts.net:443": {Handlers: map[string]*ipn.HTTPHandler{
"/": {Proxy: "http://127.0.0.1:3000"},
}},
b.netMap = &netmap.NetworkMap{
SelfNode: (&tailcfg.Node{
Name: "example.ts.net",
}).View(),
UserProfiles: map[tailcfg.UserID]tailcfg.UserProfile{
tailcfg.UserID(1): {
LoginName: "someone@example.com",
DisplayName: "Some One",
ProfilePicURL: "https://example.com/photo.jpg",
},
},
}
err = b.SetServeConfig(conf, getEtag(t, nil))
if err != nil {
t.Fatal(err)
b.nodeByAddr = map[netip.Addr]tailcfg.NodeView{
netip.MustParseAddr("100.150.151.152"): (&tailcfg.Node{
ComputedName: "some-peer",
User: tailcfg.UserID(1),
}).View(),
netip.MustParseAddr("100.150.151.153"): (&tailcfg.Node{
ComputedName: "some-tagged-peer",
Tags: []string{"tag:server", "tag:test"},
User: tailcfg.UserID(1),
}).View(),
}
confView := b.ServeConfig()
etag := getEtag(t, confView)
if etag == "" {
t.Fatal("expected to get an etag but got an empty string")
}
conf = confView.AsStruct()
mak.Set(&conf.AllowFunnel, "example.ts.net:443", true)
// replacing an existing config with an invalid etag should fail
err = b.SetServeConfig(conf, "invalid etag")
if !errors.Is(err, ErrETagMismatch) {
t.Fatalf("expected an etag mismatch error but got %v", err)
}
// replacing an existing config with a valid etag should succeed
err = b.SetServeConfig(conf, etag)
if err != nil {
t.Fatal(err)
}
// replacing an existing config with a previous etag should fail
err = b.SetServeConfig(nil, etag)
if !errors.Is(err, ErrETagMismatch) {
t.Fatalf("expected an etag mismatch error but got %v", err)
}
// replacing an existing config with the new etag should succeed
newCfg := b.ServeConfig()
etag = getEtag(t, newCfg)
err = b.SetServeConfig(nil, etag)
if err != nil {
t.Fatal(err)
}
}
func TestServeHTTPProxy(t *testing.T) {
b := newTestBackend(t)
// Start test serve endpoint.
testServ := httptest.NewServer(http.HandlerFunc(
func(w http.ResponseWriter, r *http.Request) {
@@ -269,7 +232,7 @@ func TestServeHTTPProxy(t *testing.T) {
}},
},
}
if err := b.SetServeConfig(conf, ""); err != nil {
if err := b.SetServeConfig(conf); err != nil {
t.Fatal(err)
}
@@ -346,52 +309,6 @@ func TestServeHTTPProxy(t *testing.T) {
}
}
func newTestBackend(t *testing.T) *LocalBackend {
sys := &tsd.System{}
e, err := wgengine.NewUserspaceEngine(t.Logf, wgengine.Config{SetSubsystem: sys.Set})
if err != nil {
t.Fatal(err)
}
sys.Set(e)
sys.Set(new(mem.Store))
b, err := NewLocalBackend(t.Logf, logid.PublicID{}, sys, 0)
if err != nil {
t.Fatal(err)
}
t.Cleanup(b.Shutdown)
dir := t.TempDir()
b.SetVarRoot(dir)
pm := must.Get(newProfileManager(new(mem.Store), t.Logf))
pm.currentProfile = &ipn.LoginProfile{ID: "id0"}
b.pm = pm
b.netMap = &netmap.NetworkMap{
SelfNode: (&tailcfg.Node{
Name: "example.ts.net",
}).View(),
UserProfiles: map[tailcfg.UserID]tailcfg.UserProfile{
tailcfg.UserID(1): {
LoginName: "someone@example.com",
DisplayName: "Some One",
ProfilePicURL: "https://example.com/photo.jpg",
},
},
}
b.nodeByAddr = map[netip.Addr]tailcfg.NodeView{
netip.MustParseAddr("100.150.151.152"): (&tailcfg.Node{
ComputedName: "some-peer",
User: tailcfg.UserID(1),
}).View(),
netip.MustParseAddr("100.150.151.153"): (&tailcfg.Node{
ComputedName: "some-tagged-peer",
Tags: []string{"tag:server", "tag:test"},
User: tailcfg.UserID(1),
}).View(),
}
return b
}
func TestServeFileOrDirectory(t *testing.T) {
td := t.TempDir()
writeFile := func(suffix, contents string) {

View File

@@ -19,6 +19,7 @@ import (
"tailscale.com/tailcfg"
"tailscale.com/tsd"
"tailscale.com/tstest"
"tailscale.com/types/empty"
"tailscale.com/types/key"
"tailscale.com/types/logger"
"tailscale.com/types/logid"
@@ -162,18 +163,19 @@ func (cc *mockControl) send(err error, url string, loginFinished bool, nm *netma
cc.mu.Unlock()
}
if cc.opts.Observer != nil {
pv := cc.persist.View()
s := controlclient.Status{
URL: url,
NetMap: nm,
Persist: cc.persist.View(),
Persist: &pv,
Err: err,
}
if loginFinished {
s.SetStateForTest(controlclient.StateAuthenticated)
s.LoginFinished = &empty.Message{}
} else if url == "" && err == nil && nm == nil {
s.SetStateForTest(controlclient.StateNotAuthenticated)
s.LogoutFinished = &empty.Message{}
}
cc.opts.Observer.SetControlClientStatus(cc, s)
cc.opts.Observer.SetControlClientStatus(s)
}
}
@@ -217,6 +219,11 @@ func (cc *mockControl) Login(t *tailcfg.Oauth2Token, flags controlclient.LoginFl
cc.authBlocked = interact || newKeys
}
func (cc *mockControl) StartLogout() {
cc.logf("StartLogout")
cc.called("StartLogout")
}
func (cc *mockControl) Logout(ctx context.Context) error {
cc.logf("Logout")
cc.called("Logout")
@@ -324,10 +331,10 @@ func TestStateMachine(t *testing.T) {
(n.Prefs != nil && n.Prefs.Valid()) ||
n.BrowseToURL != nil ||
n.LoginFinished != nil {
logf("%v\n\n", n)
logf("\n%v\n\n", n)
notifies.put(n)
} else {
logf("(ignored) %v\n\n", n)
logf("\n(ignored) %v\n\n", n)
}
})
@@ -353,7 +360,7 @@ func TestStateMachine(t *testing.T) {
// Note: a totally fresh system has Prefs.LoggedOut=false by
// default. We are logged out, but not because the user asked
// for it, so it doesn't count as Prefs.LoggedOut==true.
c.Assert(prefs.LoggedOut(), qt.IsTrue)
c.Assert(prefs.LoggedOut(), qt.IsFalse)
c.Assert(prefs.WantRunning(), qt.IsFalse)
c.Assert(ipn.NeedsLogin, qt.Equals, *nn[1].State)
c.Assert(ipn.NeedsLogin, qt.Equals, b.State())
@@ -374,7 +381,7 @@ func TestStateMachine(t *testing.T) {
cc.assertCalls()
c.Assert(nn[0].Prefs, qt.IsNotNil)
c.Assert(nn[1].State, qt.IsNotNil)
c.Assert(nn[0].Prefs.LoggedOut(), qt.IsTrue)
c.Assert(nn[0].Prefs.LoggedOut(), qt.IsFalse)
c.Assert(nn[0].Prefs.WantRunning(), qt.IsFalse)
c.Assert(ipn.NeedsLogin, qt.Equals, *nn[1].State)
c.Assert(ipn.NeedsLogin, qt.Equals, b.State())
@@ -412,7 +419,7 @@ func TestStateMachine(t *testing.T) {
nn := notifies.drain(1)
c.Assert(nn[0].Prefs, qt.IsNotNil)
c.Assert(nn[0].Prefs.LoggedOut(), qt.IsTrue)
c.Assert(nn[0].Prefs.LoggedOut(), qt.IsFalse)
c.Assert(nn[0].Prefs.WantRunning(), qt.IsFalse)
c.Assert(ipn.NeedsLogin, qt.Equals, b.State())
}
@@ -578,39 +585,59 @@ func TestStateMachine(t *testing.T) {
// User wants to logout.
store.awaitWrite()
t.Logf("\n\nLogout")
notifies.expect(5)
b.Logout(context.Background())
t.Logf("\n\nLogout (async)")
notifies.expect(2)
b.Logout()
{
nn := notifies.drain(5)
previousCC.assertCalls("pause", "Logout", "unpause", "Shutdown")
nn := notifies.drain(2)
cc.assertCalls("pause", "StartLogout")
c.Assert(nn[0].State, qt.IsNotNil)
c.Assert(*nn[0].State, qt.Equals, ipn.Stopped)
c.Assert(nn[1].Prefs, qt.IsNotNil)
c.Assert(ipn.Stopped, qt.Equals, *nn[0].State)
c.Assert(nn[1].Prefs.LoggedOut(), qt.IsTrue)
c.Assert(nn[1].Prefs.WantRunning(), qt.IsFalse)
cc.assertCalls("New")
c.Assert(nn[2].State, qt.IsNotNil)
c.Assert(*nn[2].State, qt.Equals, ipn.NoState)
c.Assert(nn[3].Prefs, qt.IsNotNil) // emptyPrefs
c.Assert(nn[3].Prefs.LoggedOut(), qt.IsTrue)
c.Assert(nn[3].Prefs.WantRunning(), qt.IsFalse)
c.Assert(nn[4].State, qt.IsNotNil)
c.Assert(*nn[4].State, qt.Equals, ipn.NeedsLogin)
c.Assert(b.State(), qt.Equals, ipn.NeedsLogin)
c.Assert(ipn.Stopped, qt.Equals, b.State())
c.Assert(store.sawWrite(), qt.IsTrue)
}
// A second logout should be a no-op as we are in the NeedsLogin state.
t.Logf("\n\nLogout2")
// Let's make the logout succeed.
t.Logf("\n\nLogout (async) - succeed")
notifies.expect(3)
cc.send(nil, "", false, nil)
{
previousCC.assertShutdown(true)
nn := notifies.drain(3)
cc.assertCalls("New")
c.Assert(nn[0].State, qt.IsNotNil)
c.Assert(*nn[0].State, qt.Equals, ipn.NoState)
c.Assert(nn[1].Prefs, qt.IsNotNil) // emptyPrefs
c.Assert(nn[2].State, qt.IsNotNil)
c.Assert(*nn[2].State, qt.Equals, ipn.NeedsLogin)
c.Assert(b.Prefs().LoggedOut(), qt.IsFalse)
c.Assert(b.Prefs().WantRunning(), qt.IsFalse)
c.Assert(b.State(), qt.Equals, ipn.NeedsLogin)
}
// A second logout should reset all prefs.
t.Logf("\n\nLogout2 (async)")
notifies.expect(1)
b.Logout()
{
nn := notifies.drain(1)
c.Assert(nn[0].Prefs, qt.IsNotNil) // emptyPrefs
// BUG: the backend has already called StartLogout, and we're
// still logged out. So it shouldn't call it again.
cc.assertCalls("StartLogout")
cc.assertCalls()
c.Assert(b.Prefs().LoggedOut(), qt.IsTrue)
c.Assert(b.Prefs().WantRunning(), qt.IsFalse)
c.Assert(ipn.NeedsLogin, qt.Equals, b.State())
}
// Let's acknowledge the second logout too.
t.Logf("\n\nLogout2 (async) - succeed")
notifies.expect(0)
b.Logout(context.Background())
cc.send(nil, "", false, nil)
{
notifies.drain(0)
cc.assertCalls()
@@ -619,11 +646,24 @@ func TestStateMachine(t *testing.T) {
c.Assert(ipn.NeedsLogin, qt.Equals, b.State())
}
// A third logout should also be a no-op as the cc should be in
// AuthCantContinue state.
t.Logf("\n\nLogout3")
notifies.expect(3)
b.Logout(context.Background())
// Try the synchronous logout feature.
t.Logf("\n\nLogout3 (sync)")
notifies.expect(0)
b.LogoutSync(context.Background())
// NOTE: This returns as soon as cc.Logout() returns, which is okay
// I guess, since that's supposed to be synchronous.
{
notifies.drain(0)
cc.assertCalls("Logout")
c.Assert(b.Prefs().LoggedOut(), qt.IsTrue)
c.Assert(b.Prefs().WantRunning(), qt.IsFalse)
c.Assert(ipn.NeedsLogin, qt.Equals, b.State())
}
// Generate the third logout event.
t.Logf("\n\nLogout3 (sync) - succeed")
notifies.expect(0)
cc.send(nil, "", false, nil)
{
notifies.drain(0)
cc.assertCalls()

View File

@@ -12,7 +12,6 @@ import (
"io"
"log"
"net/netip"
"slices"
"sort"
"strings"
"sync"
@@ -70,17 +69,8 @@ type Status struct {
// trailing periods, and without any "_acme-challenge." prefix.
CertDomains []string
// Peer is the state of each peer, keyed by each peer's current public key.
Peer map[key.NodePublic]*PeerStatus
// User contains profile information about UserIDs referenced by
// PeerStatus.UserID, PeerStatus.AltSharerUserID, etc.
User map[tailcfg.UserID]tailcfg.UserProfile
// ClientVersion, when non-nil, contains information about the latest
// version of the Tailscale client that's available. Depending on
// the platform and client settings, it may not be available.
ClientVersion *tailcfg.ClientVersion
}
// TKAKey describes a key trusted by network lock.
@@ -198,7 +188,6 @@ type PeerStatusLite struct {
NodeKey key.NodePublic
}
// PeerStatus describes a peer node and its current state.
type PeerStatus struct {
ID tailcfg.StableNodeID
PublicKey key.NodePublic
@@ -292,9 +281,6 @@ type PeerStatus struct {
Location *tailcfg.Location `json:",omitempty"`
}
// StatusBuilder is a request to construct a Status. A new StatusBuilder is
// passed to various subsystems which then call methods on it to populate state.
// Call its Status method to return the final constructed Status.
type StatusBuilder struct {
WantPeers bool // whether caller wants peers
@@ -315,8 +301,6 @@ func (sb *StatusBuilder) MutateStatus(f func(*Status)) {
f(&sb.st)
}
// Status returns the status that has been built up so far from previous
// calls to MutateStatus, MutateSelfStatus, AddPeer, etc.
func (sb *StatusBuilder) Status() *Status {
sb.mu.Lock()
defer sb.mu.Unlock()
@@ -681,29 +665,23 @@ func (pr *PingResult) ToPingResponse(pingType tailcfg.PingType) *tailcfg.PingRes
}
}
// SortPeers sorts peers by either their DNS name, hostname, Tailscale IP,
// or ultimately their current public key.
func SortPeers(peers []*PeerStatus) {
slices.SortStableFunc(peers, (*PeerStatus).compare)
sort.Slice(peers, func(i, j int) bool { return sortKey(peers[i]) < sortKey(peers[j]) })
}
func (a *PeerStatus) compare(b *PeerStatus) int {
if a.DNSName != "" || b.DNSName != "" {
if v := strings.Compare(a.DNSName, b.DNSName); v != 0 {
return v
}
func sortKey(ps *PeerStatus) string {
if ps.DNSName != "" {
return ps.DNSName
}
if a.HostName != "" || b.HostName != "" {
if v := strings.Compare(a.HostName, b.HostName); v != 0 {
return v
}
if ps.HostName != "" {
return ps.HostName
}
if len(a.TailscaleIPs) > 0 && len(b.TailscaleIPs) > 0 {
if v := a.TailscaleIPs[0].Compare(b.TailscaleIPs[0]); v != 0 {
return v
}
// TODO(bradfitz): add PeerStatus.Less and avoid these allocs in a Less func.
if len(ps.TailscaleIPs) > 0 {
return ps.TailscaleIPs[0].String()
}
return a.PublicKey.Compare(b.PublicKey)
raw := ps.PublicKey.Raw32()
return string(raw[:])
}
// DebugDERPRegionReport is the result of a "tailscale debug derp" command,

View File

@@ -7,7 +7,7 @@ package localapi
import (
"bytes"
"context"
"crypto/sha256"
"crypto/rand"
"encoding/hex"
"encoding/json"
"errors"
@@ -49,7 +49,6 @@ import (
"tailscale.com/util/httpm"
"tailscale.com/util/mak"
"tailscale.com/util/osdiag"
"tailscale.com/util/rands"
"tailscale.com/version"
)
@@ -99,6 +98,7 @@ var handler = map[string]localAPIHandler{
"set-expiry-sooner": (*Handler).serveSetExpirySooner,
"start": (*Handler).serveStart,
"status": (*Handler).serveStatus,
"stream-serve": (*Handler).serveStreamServe,
"tka/init": (*Handler).serveTKAInit,
"tka/log": (*Handler).serveTKALog,
"tka/modify": (*Handler).serveTKAModify,
@@ -118,6 +118,12 @@ var handler = map[string]localAPIHandler{
"query-feature": (*Handler).serveQueryFeature,
}
func randHex(n int) string {
b := make([]byte, n)
rand.Read(b)
return hex.EncodeToString(b)
}
var (
// The clientmetrics package is stateful, but we want to expose a simple
// imperative API to local clients, so we need to keep track of
@@ -312,7 +318,7 @@ func (h *Handler) serveBugReport(w http.ResponseWriter, r *http.Request) {
defer h.b.TryFlushLogs() // kick off upload after bugreport's done logging
logMarker := func() string {
return fmt.Sprintf("BUG-%v-%v-%v", h.backendLogID, h.clock.Now().UTC().Format("20060102150405Z"), rands.HexString(16))
return fmt.Sprintf("BUG-%v-%v-%v", h.backendLogID, h.clock.Now().UTC().Format("20060102150405Z"), randHex(8))
}
if envknob.NoLogsNoSupport() {
logMarker = func() string { return "BUG-NO-LOGS-NO-SUPPORT-this-node-has-had-its-logging-disabled" }
@@ -557,13 +563,6 @@ func (h *Handler) serveDebug(w http.ResponseWriter, r *http.Request) {
err = h.b.DebugBreakTCPConns()
case "break-derp-conns":
err = h.b.DebugBreakDERPConns()
case "control-knobs":
k := h.b.ControlKnobs()
w.Header().Set("Content-Type", "application/json")
err = json.NewEncoder(w).Encode(k.AsDebugJSON())
if err == nil {
return
}
case "":
err = fmt.Errorf("missing parameter 'action'")
default:
@@ -703,7 +702,7 @@ func (h *Handler) serveDebugPortmap(w http.ResponseWriter, r *http.Request) {
done := make(chan bool, 1)
var c *portmapper.Client
c = portmapper.NewClient(logger.WithPrefix(logf, "portmapper: "), h.netMon, debugKnobs, h.b.ControlKnobs(), func() {
c = portmapper.NewClient(logger.WithPrefix(logf, "portmapper: "), h.netMon, debugKnobs, func() {
logf("portmapping changed.")
logf("have mapping: %v", c.HaveMapping())
@@ -839,17 +838,9 @@ func (h *Handler) serveServeConfig(w http.ResponseWriter, r *http.Request) {
http.Error(w, "serve config denied", http.StatusForbidden)
return
}
config := h.b.ServeConfig()
bts, err := json.Marshal(config)
if err != nil {
http.Error(w, "error encoding config: "+err.Error(), http.StatusInternalServerError)
return
}
sum := sha256.Sum256(bts)
etag := hex.EncodeToString(sum[:])
w.Header().Set("Etag", etag)
w.Header().Set("Content-Type", "application/json")
w.Write(bts)
config := h.b.ServeConfig()
json.NewEncoder(w).Encode(config)
case "POST":
if !h.PermitWrite {
http.Error(w, "serve config denied", http.StatusForbidden)
@@ -860,12 +851,7 @@ func (h *Handler) serveServeConfig(w http.ResponseWriter, r *http.Request) {
writeErrorJSON(w, fmt.Errorf("decoding config: %w", err))
return
}
etag := r.Header.Get("If-Match")
if err := h.b.SetServeConfig(configIn, etag); err != nil {
if errors.Is(err, ipnlocal.ErrETagMismatch) {
http.Error(w, err.Error(), http.StatusPreconditionFailed)
return
}
if err := h.b.SetServeConfig(configIn); err != nil {
writeErrorJSON(w, fmt.Errorf("updating config: %w", err))
return
}
@@ -875,6 +861,31 @@ func (h *Handler) serveServeConfig(w http.ResponseWriter, r *http.Request) {
}
}
// serveStreamServe handles foreground serve and funnel streams. This is
// currently in development per https://github.com/tailscale/tailscale/issues/8489
func (h *Handler) serveStreamServe(w http.ResponseWriter, r *http.Request) {
if !h.PermitWrite {
// Write permission required because we modify the ServeConfig.
http.Error(w, "serve stream denied", http.StatusForbidden)
return
}
if r.Method != "POST" {
http.Error(w, "POST required", http.StatusMethodNotAllowed)
return
}
var req ipn.ServeStreamRequest
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
writeErrorJSON(w, fmt.Errorf("decoding HostPort: %w", err))
return
}
w.Header().Set("Content-Type", "application/json")
if err := h.b.StreamServe(r.Context(), w, req); err != nil {
writeErrorJSON(w, fmt.Errorf("streaming serve: %w", err))
return
}
w.WriteHeader(http.StatusOK)
}
func (h *Handler) serveCheckIPForwarding(w http.ResponseWriter, r *http.Request) {
if !h.PermitRead {
http.Error(w, "IP forwarding check access denied", http.StatusForbidden)
@@ -1045,7 +1056,7 @@ func (h *Handler) serveLogout(w http.ResponseWriter, r *http.Request) {
http.Error(w, "want POST", 400)
return
}
err := h.b.Logout(r.Context())
err := h.b.LogoutSync(r.Context())
if err == nil {
w.WriteHeader(http.StatusNoContent)
return

View File

@@ -196,10 +196,6 @@ type Prefs struct {
// and CLI.
ProfileName string `json:",omitempty"`
// AutoUpdate sets the auto-update preferences for the node agent. See
// AutoUpdatePrefs docs for more details.
AutoUpdate AutoUpdatePrefs
// The Persist field is named 'Config' in the file for backward
// compatibility with earlier versions.
// TODO(apenwarr): We should move this out of here, it's not a pref.
@@ -208,18 +204,6 @@ type Prefs struct {
Persist *persist.Persist `json:"Config"`
}
// AutoUpdatePrefs are the auto update settings for the node agent.
type AutoUpdatePrefs struct {
// Check specifies whether background checks for updates are enabled. When
// enabled, tailscaled will periodically check for available updates and
// notify the user about them.
Check bool
// Apply specifies whether background auto-updates are enabled. When
// enabled, tailscaled will apply available updates in the background.
// Check must also be set when Apply is set.
Apply bool
}
// MaskedPrefs is a Prefs with an associated bitmask of which fields are set.
type MaskedPrefs struct {
Prefs
@@ -245,7 +229,6 @@ type MaskedPrefs struct {
NetfilterModeSet bool `json:",omitempty"`
OperatorUserSet bool `json:",omitempty"`
ProfileNameSet bool `json:",omitempty"`
AutoUpdateSet bool `json:",omitempty"`
}
// ApplyEdits mutates p, assigning fields from m.Prefs for each MaskedPrefs
@@ -301,12 +284,6 @@ func (m *MaskedPrefs) Pretty() string {
if v.Type().Elem().Kind() == reflect.String {
return "%s=%q"
}
case reflect.Struct:
return "%s=%+v"
case reflect.Pointer:
if v.Type().Elem().Kind() == reflect.Struct {
return "%s=%+v"
}
}
return "%s=%v"
}
@@ -383,7 +360,6 @@ func (p *Prefs) pretty(goos string) string {
if p.OperatorUser != "" {
fmt.Fprintf(&sb, "op=%q ", p.OperatorUser)
}
sb.WriteString(p.AutoUpdate.Pretty())
if p.Persist != nil {
sb.WriteString(p.Persist.Pretty())
} else {
@@ -438,18 +414,7 @@ func (p *Prefs) Equals(p2 *Prefs) bool {
compareIPNets(p.AdvertiseRoutes, p2.AdvertiseRoutes) &&
compareStrings(p.AdvertiseTags, p2.AdvertiseTags) &&
p.Persist.Equals(p2.Persist) &&
p.ProfileName == p2.ProfileName &&
p.AutoUpdate == p2.AutoUpdate
}
func (au AutoUpdatePrefs) Pretty() string {
if au.Apply {
return "update=on "
}
if au.Check {
return "update=check "
}
return "update=off "
p.ProfileName == p2.ProfileName
}
func compareIPNets(a, b []netip.Prefix) bool {
@@ -494,10 +459,6 @@ func NewPrefs() *Prefs {
CorpDNS: true,
WantRunning: false,
NetfilterMode: preftype.NetfilterOn,
AutoUpdate: AutoUpdatePrefs{
Check: true,
Apply: false,
},
}
}

View File

@@ -56,7 +56,6 @@ func TestPrefsEqual(t *testing.T) {
"NetfilterMode",
"OperatorUser",
"ProfileName",
"AutoUpdate",
"Persist",
}
if have := fieldsOf(reflect.TypeOf(Prefs{})); !reflect.DeepEqual(have, prefsHandles) {
@@ -289,21 +288,6 @@ func TestPrefsEqual(t *testing.T) {
&Prefs{ProfileName: "home"},
false,
},
{
&Prefs{AutoUpdate: AutoUpdatePrefs{Check: true, Apply: false}},
&Prefs{AutoUpdate: AutoUpdatePrefs{Check: false, Apply: false}},
false,
},
{
&Prefs{AutoUpdate: AutoUpdatePrefs{Check: true, Apply: true}},
&Prefs{AutoUpdate: AutoUpdatePrefs{Check: true, Apply: false}},
false,
},
{
&Prefs{AutoUpdate: AutoUpdatePrefs{Check: true, Apply: false}},
&Prefs{AutoUpdate: AutoUpdatePrefs{Check: true, Apply: false}},
true,
},
}
for i, tt := range tests {
got := tt.a.Equals(tt.b)
@@ -388,22 +372,22 @@ func TestPrefsPretty(t *testing.T) {
{
Prefs{},
"linux",
"Prefs{ra=false mesh=false dns=false want=false routes=[] nf=off update=off Persist=nil}",
"Prefs{ra=false mesh=false dns=false want=false routes=[] nf=off Persist=nil}",
},
{
Prefs{},
"windows",
"Prefs{ra=false mesh=false dns=false want=false update=off Persist=nil}",
"Prefs{ra=false mesh=false dns=false want=false Persist=nil}",
},
{
Prefs{ShieldsUp: true},
"windows",
"Prefs{ra=false mesh=false dns=false want=false shields=true update=off Persist=nil}",
"Prefs{ra=false mesh=false dns=false want=false shields=true Persist=nil}",
},
{
Prefs{AllowSingleHosts: true},
"windows",
"Prefs{ra=false dns=false want=false update=off Persist=nil}",
"Prefs{ra=false dns=false want=false Persist=nil}",
},
{
Prefs{
@@ -411,7 +395,7 @@ func TestPrefsPretty(t *testing.T) {
AllowSingleHosts: true,
},
"windows",
"Prefs{ra=false dns=false want=false notepad=true update=off Persist=nil}",
"Prefs{ra=false dns=false want=false notepad=true Persist=nil}",
},
{
Prefs{
@@ -420,7 +404,7 @@ func TestPrefsPretty(t *testing.T) {
ForceDaemon: true, // server mode
},
"windows",
"Prefs{ra=false dns=false want=true server=true update=off Persist=nil}",
"Prefs{ra=false dns=false want=true server=true Persist=nil}",
},
{
Prefs{
@@ -430,14 +414,14 @@ func TestPrefsPretty(t *testing.T) {
AdvertiseTags: []string{"tag:foo", "tag:bar"},
},
"darwin",
`Prefs{ra=false dns=false want=true tags=tag:foo,tag:bar url="http://localhost:1234" update=off Persist=nil}`,
`Prefs{ra=false dns=false want=true tags=tag:foo,tag:bar url="http://localhost:1234" Persist=nil}`,
},
{
Prefs{
Persist: &persist.Persist{},
},
"linux",
`Prefs{ra=false mesh=false dns=false want=false routes=[] nf=off update=off Persist{lm=, o=, n= u=""}}`,
`Prefs{ra=false mesh=false dns=false want=false routes=[] nf=off Persist{lm=, o=, n= u=""}}`,
},
{
Prefs{
@@ -446,21 +430,21 @@ func TestPrefsPretty(t *testing.T) {
},
},
"linux",
`Prefs{ra=false mesh=false dns=false want=false routes=[] nf=off update=off Persist{lm=, o=, n=[B1VKl] u=""}}`,
`Prefs{ra=false mesh=false dns=false want=false routes=[] nf=off Persist{lm=, o=, n=[B1VKl] u=""}}`,
},
{
Prefs{
ExitNodeIP: netip.MustParseAddr("1.2.3.4"),
},
"linux",
`Prefs{ra=false mesh=false dns=false want=false exit=1.2.3.4 lan=false routes=[] nf=off update=off Persist=nil}`,
`Prefs{ra=false mesh=false dns=false want=false exit=1.2.3.4 lan=false routes=[] nf=off Persist=nil}`,
},
{
Prefs{
ExitNodeID: tailcfg.StableNodeID("myNodeABC"),
},
"linux",
`Prefs{ra=false mesh=false dns=false want=false exit=myNodeABC lan=false routes=[] nf=off update=off Persist=nil}`,
`Prefs{ra=false mesh=false dns=false want=false exit=myNodeABC lan=false routes=[] nf=off Persist=nil}`,
},
{
Prefs{
@@ -468,41 +452,21 @@ func TestPrefsPretty(t *testing.T) {
ExitNodeAllowLANAccess: true,
},
"linux",
`Prefs{ra=false mesh=false dns=false want=false exit=myNodeABC lan=true routes=[] nf=off update=off Persist=nil}`,
`Prefs{ra=false mesh=false dns=false want=false exit=myNodeABC lan=true routes=[] nf=off Persist=nil}`,
},
{
Prefs{
ExitNodeAllowLANAccess: true,
},
"linux",
`Prefs{ra=false mesh=false dns=false want=false routes=[] nf=off update=off Persist=nil}`,
`Prefs{ra=false mesh=false dns=false want=false routes=[] nf=off Persist=nil}`,
},
{
Prefs{
Hostname: "foo",
},
"linux",
`Prefs{ra=false mesh=false dns=false want=false routes=[] nf=off host="foo" update=off Persist=nil}`,
},
{
Prefs{
AutoUpdate: AutoUpdatePrefs{
Check: true,
Apply: false,
},
},
"linux",
`Prefs{ra=false mesh=false dns=false want=false routes=[] nf=off update=check Persist=nil}`,
},
{
Prefs{
AutoUpdate: AutoUpdatePrefs{
Check: true,
Apply: true,
},
},
"linux",
`Prefs{ra=false mesh=false dns=false want=false routes=[] nf=off update=on Persist=nil}`,
`Prefs{ra=false mesh=false dns=false want=false routes=[] nf=off host="foo" Persist=nil}`,
},
}
for i, tt := range tests {

View File

@@ -12,6 +12,7 @@ import (
"slices"
"strconv"
"strings"
"time"
"tailscale.com/tailcfg"
)
@@ -36,14 +37,6 @@ type ServeConfig struct {
// AllowFunnel is the set of SNI:port values for which funnel
// traffic is allowed, from trusted ingress peers.
AllowFunnel map[HostPort]bool `json:",omitempty"`
// Foreground is a map of an IPN Bus session ID to an alternate foreground
// serve config that's valid for the life of that WatchIPNBus session ID.
// This. This allows the config to specify ephemeral configs that are
// used in the CLI's foreground mode to ensure ungraceful shutdowns
// of either the client or the LocalBackend does not expose ports
// that users are not aware of.
Foreground map[string]*ServeConfig `json:",omitempty"`
}
// HostPort is an SNI name and port number, joined by a colon.
@@ -85,6 +78,46 @@ type FunnelConn struct {
Src netip.AddrPort
}
// ServeStreamRequest defines the JSON request body
// for the serve stream endpoint
type ServeStreamRequest struct {
// HostPort is the DNS and port of the tailscale
// URL.
HostPort HostPort `json:",omitempty"`
// Source is the user's serve source
// as defined in the `tailscale serve`
// command such as http://127.0.0.1:3000
Source string `json:",omitempty"`
// MountPoint is the path prefix for
// the given HostPort.
MountPoint string `json:",omitempty"`
// Funnel indicates whether the request
// is a serve request or a funnel one.
Funnel bool `json:",omitempty"`
}
// FunnelRequestLog is the JSON type written out to io.Writers
// watching funnel connections via ipnlocal.StreamServe.
//
// This structure is in development and subject to change.
type FunnelRequestLog struct {
Time time.Time `json:",omitempty"` // time of request forwarding
// SrcAddr is the address that initiated the Funnel request.
SrcAddr netip.AddrPort `json:",omitempty"`
// The following fields are only populated if the connection
// initiated from another node on the client's tailnet.
NodeName string `json:",omitempty"` // src node MagicDNS name
NodeTags []string `json:",omitempty"` // src node tags
UserLoginName string `json:",omitempty"` // src node's owner login (if not tagged)
UserDisplayName string `json:",omitempty"` // src node's owner name (if not tagged)
}
// WebServerConfig describes a web server's configuration.
type WebServerConfig struct {
Handlers map[string]*HTTPHandler // mountPoint => handler
@@ -299,102 +332,3 @@ func CheckFunnelPort(wantedPort uint16, nodeAttrs []string) error {
}
return deny(portsStr)
}
// RangeOverTCPs ranges over both background and foreground TCPs.
// If the returned bool from the given f is false, then this function stops
// iterating immediately and does not check other foreground configs.
func (v ServeConfigView) RangeOverTCPs(f func(port uint16, _ TCPPortHandlerView) bool) {
parentCont := true
v.TCP().Range(func(k uint16, v TCPPortHandlerView) (cont bool) {
parentCont = f(k, v)
return parentCont
})
v.Foreground().Range(func(k string, v ServeConfigView) (cont bool) {
if !parentCont {
return false
}
v.TCP().Range(func(k uint16, v TCPPortHandlerView) (cont bool) {
parentCont = f(k, v)
return parentCont
})
return parentCont
})
}
// RangeOverWebs ranges over both background and foreground Webs.
// If the returned bool from the given f is false, then this function stops
// iterating immediately and does not check other foreground configs.
func (v ServeConfigView) RangeOverWebs(f func(_ HostPort, conf WebServerConfigView) bool) {
parentCont := true
v.Web().Range(func(k HostPort, v WebServerConfigView) (cont bool) {
parentCont = f(k, v)
return parentCont
})
v.Foreground().Range(func(k string, v ServeConfigView) (cont bool) {
if !parentCont {
return false
}
v.Web().Range(func(k HostPort, v WebServerConfigView) (cont bool) {
parentCont = f(k, v)
return parentCont
})
return parentCont
})
}
// FindTCP returns the first TCP that matches with the given port. It
// prefers a foreground match first followed by a background search if none
// existed.
func (v ServeConfigView) FindTCP(port uint16) (res TCPPortHandlerView, ok bool) {
v.Foreground().Range(func(_ string, v ServeConfigView) (cont bool) {
res, ok = v.TCP().GetOk(port)
return !ok
})
if ok {
return res, ok
}
return v.TCP().GetOk(port)
}
// FindWeb returns the first Web that matches with the given HostPort. It
// prefers a foreground match first followed by a background search if none
// existed.
func (v ServeConfigView) FindWeb(hp HostPort) (res WebServerConfigView, ok bool) {
v.Foreground().Range(func(_ string, v ServeConfigView) (cont bool) {
res, ok = v.Web().GetOk(hp)
return !ok
})
if ok {
return res, ok
}
return v.Web().GetOk(hp)
}
// HasAllowFunnel returns whether this config has at least one AllowFunnel
// set in the background or foreground configs.
func (v ServeConfigView) HasAllowFunnel() bool {
return v.AllowFunnel().Len() > 0 || func() bool {
var exists bool
v.Foreground().Range(func(k string, v ServeConfigView) (cont bool) {
exists = v.AllowFunnel().Len() > 0
return !exists
})
return exists
}()
}
// FindFunnel reports whether target exists in in either the background AllowFunnel
// or any of the foreground configs.
func (v ServeConfigView) HasFunnelForTarget(target HostPort) bool {
if v.AllowFunnel().Get(target) {
return true
}
var exists bool
v.Foreground().Range(func(_ string, v ServeConfigView) (cont bool) {
if exists = v.AllowFunnel().Get(target); exists {
return false
}
return true
})
return exists
}

View File

@@ -19,7 +19,6 @@ import (
// Store is an ipn.StateStore that uses a Kubernetes Secret for persistence.
type Store struct {
client *kube.Client
canPatch bool
secretName string
}
@@ -29,13 +28,8 @@ func New(_ logger.Logf, secretName string) (*Store, error) {
if err != nil {
return nil, err
}
canPatch, err := c.CheckSecretPermissions(context.Background(), secretName)
if err != nil {
return nil, err
}
return &Store{
client: c,
canPatch: canPatch,
secretName: secretName,
}, nil
}
@@ -99,19 +93,6 @@ func (s *Store) WriteState(id ipn.StateKey, bs []byte) error {
}
return err
}
if s.canPatch {
m := []kube.JSONPatch{
{
Op: "add",
Path: "/data/" + sanitizeKey(id),
Value: bs,
},
}
if err := s.client.JSONPatchSecret(ctx, s.secretName, m); err != nil {
return err
}
return nil
}
secret.Data[sanitizeKey(id)] = bs
if err := s.client.UpdateSecret(ctx, secret); err != nil {
return err

Some files were not shown because too many files have changed in this diff Show More