Compare commits
70 Commits
v1.66.0
...
jonathan/s
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
5be738b118 | ||
|
|
01847e0123 | ||
|
|
42cfbf427c | ||
|
|
bcb55fdeb6 | ||
|
|
c2a4719e9e | ||
|
|
36d0ac6f8e | ||
|
|
0a5bd63d32 | ||
|
|
1ec0273473 | ||
|
|
f227083539 | ||
|
|
7e357e1636 | ||
|
|
0380cbc90d | ||
|
|
32120932a5 | ||
|
|
776a05223b | ||
|
|
1ea100e2e5 | ||
|
|
2d2b62c400 | ||
|
|
909a292a8d | ||
|
|
0acb61fbf8 | ||
|
|
dd77111462 | ||
|
|
08a9551a73 | ||
|
|
f1d10c12ac | ||
|
|
5ad0dad15e | ||
|
|
d0d33f257f | ||
|
|
8e4a29433f | ||
|
|
87ee559b6f | ||
|
|
9a64c06a20 | ||
|
|
4214e5f71b | ||
|
|
538c2e8f7c | ||
|
|
3c9be07214 | ||
|
|
72f0f53ed0 | ||
|
|
9351eec3e1 | ||
|
|
c9179bc261 | ||
|
|
6db1219185 | ||
|
|
4f4f317174 | ||
|
|
964282d34f | ||
|
|
1384c24e41 | ||
|
|
47b3476eb7 | ||
|
|
c56e0c4934 | ||
|
|
adb7a86559 | ||
|
|
8d1249550a | ||
|
|
6831a29f8b | ||
|
|
e5f67f90a2 | ||
|
|
59848fe14b | ||
|
|
87f00d76c4 | ||
|
|
76c30e014d | ||
|
|
8feb4ff5d2 | ||
|
|
359ef61263 | ||
|
|
89947606b2 | ||
|
|
b094e8c925 | ||
|
|
e3dec086e6 | ||
|
|
7f83f9fc83 | ||
|
|
6877d44965 | ||
|
|
1f51bb6891 | ||
|
|
60266be298 | ||
|
|
c6d42b1093 | ||
|
|
7ef2f72135 | ||
|
|
8aa5c3534d | ||
|
|
7b3e30f391 | ||
|
|
79b2d425cf | ||
|
|
fc1ae97e10 | ||
|
|
486a423716 | ||
|
|
7209c4f91e | ||
|
|
d86d1e7601 | ||
|
|
e070af7414 | ||
|
|
5708fc0639 | ||
|
|
25e32cc3ae | ||
|
|
21abb7f402 | ||
|
|
ac638f32c0 | ||
|
|
b5dbf155b1 | ||
|
|
8f7f9ac17e | ||
|
|
7901925ad3 |
5
Makefile
5
Makefile
@@ -115,10 +115,7 @@ sshintegrationtest: ## Run the SSH integration tests in various Docker container
|
||||
echo "Testing on ubuntu:focal" && docker build --build-arg="BASE=ubuntu:focal" -t ssh-ubuntu-focal ssh/tailssh/testcontainers && \
|
||||
echo "Testing on ubuntu:jammy" && docker build --build-arg="BASE=ubuntu:jammy" -t ssh-ubuntu-jammy ssh/tailssh/testcontainers && \
|
||||
echo "Testing on ubuntu:mantic" && docker build --build-arg="BASE=ubuntu:mantic" -t ssh-ubuntu-mantic ssh/tailssh/testcontainers && \
|
||||
echo "Testing on ubuntu:noble" && docker build --build-arg="BASE=ubuntu:noble" -t ssh-ubuntu-noble ssh/tailssh/testcontainers && \
|
||||
echo "Testing on fedora:38" && docker build --build-arg="BASE=dokken/fedora-38" -t ssh-fedora-38 ssh/tailssh/testcontainers && \
|
||||
echo "Testing on fedora:39" && docker build --build-arg="BASE=dokken/fedora-39" -t ssh-fedora-39 ssh/tailssh/testcontainers && \
|
||||
echo "Testing on fedora:40" && docker build --build-arg="BASE=dokken/fedora-40" -t ssh-fedora-40 ssh/tailssh/testcontainers
|
||||
echo "Testing on ubuntu:noble" && docker build --build-arg="BASE=ubuntu:noble" -t ssh-ubuntu-noble ssh/tailssh/testcontainers
|
||||
|
||||
help: ## Show this help
|
||||
@echo "\nSpecify a command. The choices are:\n"
|
||||
|
||||
@@ -1 +1 @@
|
||||
1.65.0
|
||||
1.67.0
|
||||
|
||||
@@ -778,6 +778,17 @@ func (lc *LocalClient) SetDNS(ctx context.Context, name, value string) error {
|
||||
//
|
||||
// The ctx is only used for the duration of the call, not the lifetime of the net.Conn.
|
||||
func (lc *LocalClient) DialTCP(ctx context.Context, host string, port uint16) (net.Conn, error) {
|
||||
return lc.UserDial(ctx, "tcp", host, port)
|
||||
}
|
||||
|
||||
// UserDial connects to the host's port via Tailscale for the given network.
|
||||
//
|
||||
// The host may be a base DNS name (resolved from the netmap inside tailscaled),
|
||||
// a FQDN, or an IP address.
|
||||
//
|
||||
// The ctx is only used for the duration of the call, not the lifetime of the
|
||||
// net.Conn.
|
||||
func (lc *LocalClient) UserDial(ctx context.Context, network, host string, port uint16) (net.Conn, error) {
|
||||
connCh := make(chan net.Conn, 1)
|
||||
trace := httptrace.ClientTrace{
|
||||
GotConn: func(info httptrace.GotConnInfo) {
|
||||
@@ -790,10 +801,11 @@ func (lc *LocalClient) DialTCP(ctx context.Context, host string, port uint16) (n
|
||||
return nil, err
|
||||
}
|
||||
req.Header = http.Header{
|
||||
"Upgrade": []string{"ts-dial"},
|
||||
"Connection": []string{"upgrade"},
|
||||
"Dial-Host": []string{host},
|
||||
"Dial-Port": []string{fmt.Sprint(port)},
|
||||
"Upgrade": []string{"ts-dial"},
|
||||
"Connection": []string{"upgrade"},
|
||||
"Dial-Host": []string{host},
|
||||
"Dial-Port": []string{fmt.Sprint(port)},
|
||||
"Dial-Network": []string{network},
|
||||
}
|
||||
res, err := lc.DoLocalRequest(req)
|
||||
if err != nil {
|
||||
|
||||
@@ -35,6 +35,7 @@ func TestDeps(t *testing.T) {
|
||||
BadDeps: map[string]string{
|
||||
// Make sure we don't again accidentally bring in a dependency on
|
||||
// drive or its transitive dependencies
|
||||
"testing": "do not use testing package in production code",
|
||||
"tailscale.com/drive/driveimpl": "https://github.com/tailscale/tailscale/pull/10631",
|
||||
"github.com/studio-b12/gowebdav": "https://github.com/tailscale/tailscale/pull/10631",
|
||||
},
|
||||
|
||||
@@ -653,6 +653,9 @@ func (up *Updater) updateAlpineLike() (err error) {
|
||||
return fmt.Errorf(`failed to parse latest version from "apk info tailscale": %w`, err)
|
||||
}
|
||||
if !up.confirm(ver) {
|
||||
if err := checkOutdatedAlpineRepo(up.Logf, ver, up.Track); err != nil {
|
||||
up.Logf("failed to check whether Alpine release is outdated: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -690,6 +693,37 @@ func parseAlpinePackageVersion(out []byte) (string, error) {
|
||||
return "", errors.New("tailscale version not found in output")
|
||||
}
|
||||
|
||||
var apkRepoVersionRE = regexp.MustCompile(`v[0-9]+\.[0-9]+`)
|
||||
|
||||
func checkOutdatedAlpineRepo(logf logger.Logf, apkVer, track string) error {
|
||||
latest, err := LatestTailscaleVersion(track)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if latest == apkVer {
|
||||
// Actually on latest release.
|
||||
return nil
|
||||
}
|
||||
f, err := os.Open("/etc/apk/repositories")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer f.Close()
|
||||
// Read the first repo line. Typically, there are multiple repos that all
|
||||
// contain the same version in the path, like:
|
||||
// https://dl-cdn.alpinelinux.org/alpine/v3.20/main
|
||||
// https://dl-cdn.alpinelinux.org/alpine/v3.20/community
|
||||
s := bufio.NewScanner(f)
|
||||
if !s.Scan() {
|
||||
return s.Err()
|
||||
}
|
||||
alpineVer := apkRepoVersionRE.FindString(s.Text())
|
||||
if alpineVer != "" {
|
||||
logf("The latest Tailscale release for Linux is %q, but your apk repository only provides %q.\nYour Alpine version is %q, you may need to upgrade the system to get the latest Tailscale version: https://wiki.alpinelinux.org/wiki/Upgrading_Alpine", latest, apkVer, alpineVer)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (up *Updater) updateMacSys() error {
|
||||
return errors.New("NOTREACHED: On MacSys builds, `tailscale update` is handled in Swift to launch the GUI updater")
|
||||
}
|
||||
|
||||
@@ -138,9 +138,9 @@ func initKubeClient(root string) {
|
||||
if err != nil {
|
||||
log.Fatalf("Error creating kube client: %v", err)
|
||||
}
|
||||
if root != "/" {
|
||||
// If we are running in a test, we need to set the URL to the
|
||||
// httptest server.
|
||||
if (root != "/") || os.Getenv("TS_KUBERNETES_READ_API_SERVER_ADDRESS_FROM_ENV") == "true" {
|
||||
// Derive the API server address from the environment variables
|
||||
// Used to set http server in tests, or optionally enabled by flag
|
||||
kc.SetURL(fmt.Sprintf("https://%s:%s", os.Getenv("KUBERNETES_SERVICE_HOST"), os.Getenv("KUBERNETES_SERVICE_PORT_HTTPS")))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -52,8 +52,10 @@
|
||||
// ${TS_CERT_DOMAIN}, it will be replaced with the value of the available FQDN.
|
||||
// It cannot be used in conjunction with TS_DEST_IP. The file is watched for changes,
|
||||
// and will be re-applied when it changes.
|
||||
// - EXPERIMENTAL_TS_CONFIGFILE_PATH: if specified, a path to tailscaled
|
||||
// config. If this is set, TS_HOSTNAME, TS_EXTRA_ARGS, TS_AUTHKEY,
|
||||
// - TS_EXPERIMENTAL_VERSIONED_CONFIG_DIR: if specified, a path to a
|
||||
// directory that containers tailscaled config in file. The config file needs to be
|
||||
// named cap-<current-tailscaled-cap>.hujson. If this is set, TS_HOSTNAME,
|
||||
// TS_EXTRA_ARGS, TS_AUTHKEY,
|
||||
// TS_ROUTES, TS_ACCEPT_DNS env vars must not be set. If this is set,
|
||||
// containerboot only runs `tailscaled --config <path-to-this-configfile>`
|
||||
// and not `tailscale up` or `tailscale set`.
|
||||
@@ -92,6 +94,7 @@ import (
|
||||
"os"
|
||||
"os/exec"
|
||||
"os/signal"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"slices"
|
||||
@@ -107,6 +110,7 @@ import (
|
||||
"tailscale.com/client/tailscale"
|
||||
"tailscale.com/ipn"
|
||||
"tailscale.com/ipn/conffile"
|
||||
kubeutils "tailscale.com/k8s-operator"
|
||||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/types/logger"
|
||||
"tailscale.com/types/ptr"
|
||||
@@ -145,7 +149,7 @@ func main() {
|
||||
Socket: defaultEnv("TS_SOCKET", "/tmp/tailscaled.sock"),
|
||||
AuthOnce: defaultBool("TS_AUTH_ONCE", false),
|
||||
Root: defaultEnv("TS_TEST_ONLY_ROOT", "/"),
|
||||
TailscaledConfigFilePath: defaultEnv("EXPERIMENTAL_TS_CONFIGFILE_PATH", ""),
|
||||
TailscaledConfigFilePath: tailscaledConfigFilePath(),
|
||||
AllowProxyingClusterTrafficViaIngress: defaultBool("EXPERIMENTAL_ALLOW_PROXYING_CLUSTER_TRAFFIC_VIA_INGRESS", false),
|
||||
PodIP: defaultEnv("POD_IP", ""),
|
||||
}
|
||||
@@ -957,16 +961,23 @@ func installIngressForwardingRule(ctx context.Context, dstStr string, tsIPs []ne
|
||||
return err
|
||||
}
|
||||
var local netip.Addr
|
||||
proxyHasIPv4Address := false
|
||||
for _, pfx := range tsIPs {
|
||||
if !pfx.IsSingleIP() {
|
||||
continue
|
||||
}
|
||||
if pfx.Addr().Is4() {
|
||||
proxyHasIPv4Address = true
|
||||
}
|
||||
if pfx.Addr().Is4() != dst.Is4() {
|
||||
continue
|
||||
}
|
||||
local = pfx.Addr()
|
||||
break
|
||||
}
|
||||
if proxyHasIPv4Address && dst.Is6() {
|
||||
log.Printf("Warning: proxy backend ClusterIP is an IPv6 address and the proxy has a IPv4 tailnet address. You might need to disable IPv4 address allocation for the proxy for forwarding to work. See https://github.com/tailscale/tailscale/issues/12156")
|
||||
}
|
||||
if !local.IsValid() {
|
||||
return fmt.Errorf("no tailscale IP matching family of %s found in %v", dstStr, tsIPs)
|
||||
}
|
||||
@@ -1097,6 +1108,13 @@ type settings struct {
|
||||
|
||||
func (s *settings) validate() error {
|
||||
if s.TailscaledConfigFilePath != "" {
|
||||
dir, file := path.Split(s.TailscaledConfigFilePath)
|
||||
if _, err := os.Stat(dir); err != nil {
|
||||
return fmt.Errorf("error validating whether directory with tailscaled config file %s exists: %w", dir, err)
|
||||
}
|
||||
if _, err := os.Stat(s.TailscaledConfigFilePath); err != nil {
|
||||
return fmt.Errorf("error validating whether tailscaled config directory %q contains tailscaled config for current capability version %q: %w. If this is a Tailscale Kubernetes operator proxy, please ensure that the version of the operator is not older than the version of the proxy", dir, file, err)
|
||||
}
|
||||
if _, err := conffile.Load(s.TailscaledConfigFilePath); err != nil {
|
||||
return fmt.Errorf("error validating tailscaled configfile contents: %w", err)
|
||||
}
|
||||
@@ -1120,7 +1138,7 @@ func (s *settings) validate() error {
|
||||
return errors.New("Both TS_TAILNET_TARGET_IP and TS_TAILNET_FQDN cannot be set")
|
||||
}
|
||||
if s.TailscaledConfigFilePath != "" && (s.AcceptDNS != nil || s.AuthKey != "" || s.Routes != nil || s.ExtraArgs != "" || s.Hostname != "") {
|
||||
return errors.New("EXPERIMENTAL_TS_CONFIGFILE_PATH cannot be set in combination with TS_HOSTNAME, TS_EXTRA_ARGS, TS_AUTHKEY, TS_ROUTES, TS_ACCEPT_DNS.")
|
||||
return errors.New("TS_EXPERIMENTAL_VERSIONED_CONFIG_DIR cannot be set in combination with TS_HOSTNAME, TS_EXTRA_ARGS, TS_AUTHKEY, TS_ROUTES, TS_ACCEPT_DNS.")
|
||||
}
|
||||
if s.AllowProxyingClusterTrafficViaIngress && s.UserspaceMode {
|
||||
return errors.New("EXPERIMENTAL_ALLOW_PROXYING_CLUSTER_TRAFFIC_VIA_INGRESS is not supported in userspace mode")
|
||||
@@ -1252,3 +1270,42 @@ func isTwoStepConfigAlwaysAuth(cfg *settings) bool {
|
||||
func isOneStepConfig(cfg *settings) bool {
|
||||
return cfg.TailscaledConfigFilePath != ""
|
||||
}
|
||||
|
||||
// tailscaledConfigFilePath returns the path to the tailscaled config file that
|
||||
// should be used for the current capability version. It is determined by the
|
||||
// TS_EXPERIMENTAL_VERSIONED_CONFIG_DIR environment variable and looks for a
|
||||
// file named cap-<capability_version>.hujson in the directory. It searches for
|
||||
// the highest capability version that is less than or equal to the current
|
||||
// capability version.
|
||||
func tailscaledConfigFilePath() string {
|
||||
dir := os.Getenv("TS_EXPERIMENTAL_VERSIONED_CONFIG_DIR")
|
||||
if dir == "" {
|
||||
return ""
|
||||
}
|
||||
fe, err := os.ReadDir(dir)
|
||||
if err != nil {
|
||||
log.Fatalf("error reading tailscaled config directory %q: %v", dir, err)
|
||||
}
|
||||
maxCompatVer := tailcfg.CapabilityVersion(-1)
|
||||
for _, e := range fe {
|
||||
// We don't check if type if file as in most cases this will
|
||||
// come from a mounted kube Secret, where the directory contents
|
||||
// will be various symlinks.
|
||||
if e.Type().IsDir() {
|
||||
continue
|
||||
}
|
||||
cv, err := kubeutils.CapVerFromFileName(e.Name())
|
||||
if err != nil {
|
||||
log.Printf("skipping file %q in tailscaled config directory %q: %v", e.Name(), dir, err)
|
||||
continue
|
||||
}
|
||||
if cv > maxCompatVer && cv <= tailcfg.CurrentCapabilityVersion {
|
||||
maxCompatVer = cv
|
||||
}
|
||||
}
|
||||
if maxCompatVer == -1 {
|
||||
log.Fatalf("no tailscaled config file found in %q for current capability version %q", dir, tailcfg.CurrentCapabilityVersion)
|
||||
}
|
||||
log.Printf("Using tailscaled config file %q for capability version %q", maxCompatVer, tailcfg.CurrentCapabilityVersion)
|
||||
return path.Join(dir, kubeutils.TailscaledConfigFileNameForCap(maxCompatVer))
|
||||
}
|
||||
|
||||
@@ -65,7 +65,7 @@ func TestContainerBoot(t *testing.T) {
|
||||
"dev/net",
|
||||
"proc/sys/net/ipv4",
|
||||
"proc/sys/net/ipv6/conf/all",
|
||||
"etc",
|
||||
"etc/tailscaled",
|
||||
}
|
||||
for _, path := range dirs {
|
||||
if err := os.MkdirAll(filepath.Join(d, path), 0700); err != nil {
|
||||
@@ -80,7 +80,7 @@ func TestContainerBoot(t *testing.T) {
|
||||
"dev/net/tun": []byte(""),
|
||||
"proc/sys/net/ipv4/ip_forward": []byte("0"),
|
||||
"proc/sys/net/ipv6/conf/all/forwarding": []byte("0"),
|
||||
"etc/tailscaled": tailscaledConfBytes,
|
||||
"etc/tailscaled/cap-95.hujson": tailscaledConfBytes,
|
||||
}
|
||||
resetFiles := func() {
|
||||
for path, content := range files {
|
||||
@@ -638,14 +638,14 @@ func TestContainerBoot(t *testing.T) {
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "experimental tailscaled configfile",
|
||||
Name: "experimental tailscaled config path",
|
||||
Env: map[string]string{
|
||||
"EXPERIMENTAL_TS_CONFIGFILE_PATH": filepath.Join(d, "etc/tailscaled"),
|
||||
"TS_EXPERIMENTAL_VERSIONED_CONFIG_DIR": filepath.Join(d, "etc/tailscaled/"),
|
||||
},
|
||||
Phases: []phase{
|
||||
{
|
||||
WantCmds: []string{
|
||||
"/usr/bin/tailscaled --socket=/tmp/tailscaled.sock --state=mem: --statedir=/tmp --tun=userspace-networking --config=/etc/tailscaled",
|
||||
"/usr/bin/tailscaled --socket=/tmp/tailscaled.sock --state=mem: --statedir=/tmp --tun=userspace-networking --config=/etc/tailscaled/cap-95.hujson",
|
||||
},
|
||||
}, {
|
||||
Notify: runningNotify,
|
||||
|
||||
@@ -5,35 +5,45 @@ package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"encoding/json"
|
||||
"expvar"
|
||||
"log"
|
||||
"math/rand/v2"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/netip"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"tailscale.com/syncs"
|
||||
"tailscale.com/util/mak"
|
||||
"tailscale.com/util/slicesx"
|
||||
)
|
||||
|
||||
const refreshTimeout = time.Minute
|
||||
|
||||
type dnsEntryMap map[string][]net.IP
|
||||
type dnsEntryMap struct {
|
||||
IPs map[string][]net.IP
|
||||
Percent map[string]float64 // "foo.com" => 0.5 for 50%
|
||||
}
|
||||
|
||||
var (
|
||||
dnsCache syncs.AtomicValue[dnsEntryMap]
|
||||
dnsCache atomic.Pointer[dnsEntryMap]
|
||||
dnsCacheBytes syncs.AtomicValue[[]byte] // of JSON
|
||||
unpublishedDNSCache syncs.AtomicValue[dnsEntryMap]
|
||||
unpublishedDNSCache atomic.Pointer[dnsEntryMap]
|
||||
bootstrapLookupMap syncs.Map[string, bool]
|
||||
)
|
||||
|
||||
var (
|
||||
bootstrapDNSRequests = expvar.NewInt("counter_bootstrap_dns_requests")
|
||||
publishedDNSHits = expvar.NewInt("counter_bootstrap_dns_published_hits")
|
||||
publishedDNSMisses = expvar.NewInt("counter_bootstrap_dns_published_misses")
|
||||
unpublishedDNSHits = expvar.NewInt("counter_bootstrap_dns_unpublished_hits")
|
||||
unpublishedDNSMisses = expvar.NewInt("counter_bootstrap_dns_unpublished_misses")
|
||||
bootstrapDNSRequests = expvar.NewInt("counter_bootstrap_dns_requests")
|
||||
publishedDNSHits = expvar.NewInt("counter_bootstrap_dns_published_hits")
|
||||
publishedDNSMisses = expvar.NewInt("counter_bootstrap_dns_published_misses")
|
||||
unpublishedDNSHits = expvar.NewInt("counter_bootstrap_dns_unpublished_hits")
|
||||
unpublishedDNSMisses = expvar.NewInt("counter_bootstrap_dns_unpublished_misses")
|
||||
unpublishedDNSPercentMisses = expvar.NewInt("counter_bootstrap_dns_unpublished_percent_misses")
|
||||
)
|
||||
|
||||
func init() {
|
||||
@@ -59,15 +69,13 @@ func refreshBootstrapDNS() {
|
||||
}
|
||||
ctx, cancel := context.WithTimeout(context.Background(), refreshTimeout)
|
||||
defer cancel()
|
||||
dnsEntries := resolveList(ctx, strings.Split(*bootstrapDNS, ","))
|
||||
dnsEntries := resolveList(ctx, *bootstrapDNS)
|
||||
// Randomize the order of the IPs for each name to avoid the client biasing
|
||||
// to IPv6
|
||||
for k := range dnsEntries {
|
||||
ips := dnsEntries[k]
|
||||
slicesx.Shuffle(ips)
|
||||
dnsEntries[k] = ips
|
||||
for _, vv := range dnsEntries.IPs {
|
||||
slicesx.Shuffle(vv)
|
||||
}
|
||||
j, err := json.MarshalIndent(dnsEntries, "", "\t")
|
||||
j, err := json.MarshalIndent(dnsEntries.IPs, "", "\t")
|
||||
if err != nil {
|
||||
// leave the old values in place
|
||||
return
|
||||
@@ -81,27 +89,50 @@ func refreshUnpublishedDNS() {
|
||||
if *unpublishedDNS == "" {
|
||||
return
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), refreshTimeout)
|
||||
defer cancel()
|
||||
|
||||
dnsEntries := resolveList(ctx, strings.Split(*unpublishedDNS, ","))
|
||||
dnsEntries := resolveList(ctx, *unpublishedDNS)
|
||||
unpublishedDNSCache.Store(dnsEntries)
|
||||
}
|
||||
|
||||
func resolveList(ctx context.Context, names []string) dnsEntryMap {
|
||||
dnsEntries := make(dnsEntryMap)
|
||||
// resolveList takes a comma-separated list of DNS names to resolve.
|
||||
//
|
||||
// If an entry contains a slash, it's two DNS names: the first is the one to
|
||||
// resolve and the second is that of a TXT recording containing the rollout
|
||||
// percentage in range "0".."100". If the TXT record doesn't exist or is
|
||||
// malformed, the percentage is 0. If the TXT record is not provided (there's no
|
||||
// slash), then the percentage is 100.
|
||||
func resolveList(ctx context.Context, list string) *dnsEntryMap {
|
||||
ents := strings.Split(list, ",")
|
||||
|
||||
ret := &dnsEntryMap{}
|
||||
|
||||
var r net.Resolver
|
||||
for _, name := range names {
|
||||
for _, ent := range ents {
|
||||
name, txtName, _ := strings.Cut(ent, "/")
|
||||
addrs, err := r.LookupIP(ctx, "ip", name)
|
||||
if err != nil {
|
||||
log.Printf("bootstrap DNS lookup %q: %v", name, err)
|
||||
continue
|
||||
}
|
||||
dnsEntries[name] = addrs
|
||||
mak.Set(&ret.IPs, name, addrs)
|
||||
|
||||
if txtName == "" {
|
||||
mak.Set(&ret.Percent, name, 1.0)
|
||||
continue
|
||||
}
|
||||
vals, err := r.LookupTXT(ctx, txtName)
|
||||
if err != nil {
|
||||
log.Printf("bootstrap DNS lookup %q: %v", txtName, err)
|
||||
continue
|
||||
}
|
||||
for _, v := range vals {
|
||||
if v, err := strconv.Atoi(v); err == nil && v >= 0 && v <= 100 {
|
||||
mak.Set(&ret.Percent, name, float64(v)/100)
|
||||
}
|
||||
}
|
||||
}
|
||||
return dnsEntries
|
||||
return ret
|
||||
}
|
||||
|
||||
func handleBootstrapDNS(w http.ResponseWriter, r *http.Request) {
|
||||
@@ -115,22 +146,36 @@ func handleBootstrapDNS(w http.ResponseWriter, r *http.Request) {
|
||||
// Try answering a query from our hidden map first
|
||||
if q := r.URL.Query().Get("q"); q != "" {
|
||||
bootstrapLookupMap.Store(q, true)
|
||||
if ips, ok := unpublishedDNSCache.Load()[q]; ok && len(ips) > 0 {
|
||||
if bootstrapLookupMap.Len() > 500 { // defensive
|
||||
bootstrapLookupMap.Clear()
|
||||
}
|
||||
if m := unpublishedDNSCache.Load(); m != nil && len(m.IPs[q]) > 0 {
|
||||
unpublishedDNSHits.Add(1)
|
||||
|
||||
// Only return the specific query, not everything.
|
||||
m := dnsEntryMap{q: ips}
|
||||
j, err := json.MarshalIndent(m, "", "\t")
|
||||
if err == nil {
|
||||
w.Write(j)
|
||||
return
|
||||
percent := m.Percent[q]
|
||||
if remoteAddrMatchesPercent(r.RemoteAddr, percent) {
|
||||
// Only return the specific query, not everything.
|
||||
m := map[string][]net.IP{q: m.IPs[q]}
|
||||
j, err := json.MarshalIndent(m, "", "\t")
|
||||
if err == nil {
|
||||
w.Write(j)
|
||||
return
|
||||
}
|
||||
} else {
|
||||
unpublishedDNSPercentMisses.Add(1)
|
||||
}
|
||||
}
|
||||
|
||||
// If we have a "q" query for a name in the published cache
|
||||
// list, then track whether that's a hit/miss.
|
||||
if m, ok := dnsCache.Load()[q]; ok {
|
||||
if len(m) > 0 {
|
||||
m := dnsCache.Load()
|
||||
var inPub bool
|
||||
var ips []net.IP
|
||||
if m != nil {
|
||||
ips, inPub = m.IPs[q]
|
||||
}
|
||||
if inPub {
|
||||
if len(ips) > 0 {
|
||||
publishedDNSHits.Add(1)
|
||||
} else {
|
||||
publishedDNSMisses.Add(1)
|
||||
@@ -146,3 +191,29 @@ func handleBootstrapDNS(w http.ResponseWriter, r *http.Request) {
|
||||
j := dnsCacheBytes.Load()
|
||||
w.Write(j)
|
||||
}
|
||||
|
||||
// percent is [0.0, 1.0].
|
||||
func remoteAddrMatchesPercent(remoteAddr string, percent float64) bool {
|
||||
if percent == 0 {
|
||||
return false
|
||||
}
|
||||
if percent == 1 {
|
||||
return true
|
||||
}
|
||||
reqIPStr, _, err := net.SplitHostPort(remoteAddr)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
reqIP, err := netip.ParseAddr(reqIPStr)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
if reqIP.IsLoopback() {
|
||||
// For local testing.
|
||||
return rand.Float64() < 0.5
|
||||
}
|
||||
reqIP16 := reqIP.As16()
|
||||
rndSrc := rand.NewPCG(binary.LittleEndian.Uint64(reqIP16[:8]), binary.LittleEndian.Uint64(reqIP16[8:]))
|
||||
rnd := rand.New(rndSrc)
|
||||
return percent > rnd.Float64()
|
||||
}
|
||||
|
||||
@@ -4,10 +4,13 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"net/netip"
|
||||
"net/url"
|
||||
"reflect"
|
||||
"testing"
|
||||
@@ -38,7 +41,7 @@ func (b *bitbucketResponseWriter) Write(p []byte) (int, error) { return len(p),
|
||||
|
||||
func (b *bitbucketResponseWriter) WriteHeader(statusCode int) {}
|
||||
|
||||
func getBootstrapDNS(t *testing.T, q string) dnsEntryMap {
|
||||
func getBootstrapDNS(t *testing.T, q string) map[string][]net.IP {
|
||||
t.Helper()
|
||||
req, _ := http.NewRequest("GET", "https://localhost/bootstrap-dns?q="+url.QueryEscape(q), nil)
|
||||
w := httptest.NewRecorder()
|
||||
@@ -48,11 +51,12 @@ func getBootstrapDNS(t *testing.T, q string) dnsEntryMap {
|
||||
if res.StatusCode != 200 {
|
||||
t.Fatalf("got status=%d; want %d", res.StatusCode, 200)
|
||||
}
|
||||
var ips dnsEntryMap
|
||||
if err := json.NewDecoder(res.Body).Decode(&ips); err != nil {
|
||||
t.Fatalf("error decoding response body: %v", err)
|
||||
var m map[string][]net.IP
|
||||
var buf bytes.Buffer
|
||||
if err := json.NewDecoder(io.TeeReader(res.Body, &buf)).Decode(&m); err != nil {
|
||||
t.Fatalf("error decoding response body %q: %v", buf.Bytes(), err)
|
||||
}
|
||||
return ips
|
||||
return m
|
||||
}
|
||||
|
||||
func TestUnpublishedDNS(t *testing.T) {
|
||||
@@ -107,15 +111,21 @@ func resetMetrics() {
|
||||
// Verify that we don't count an empty list in the unpublishedDNSCache as a
|
||||
// cache hit in our metrics.
|
||||
func TestUnpublishedDNSEmptyList(t *testing.T) {
|
||||
pub := dnsEntryMap{
|
||||
"tailscale.com": {net.IPv4(10, 10, 10, 10)},
|
||||
pub := &dnsEntryMap{
|
||||
IPs: map[string][]net.IP{"tailscale.com": {net.IPv4(10, 10, 10, 10)}},
|
||||
}
|
||||
dnsCache.Store(pub)
|
||||
dnsCacheBytes.Store([]byte(`{"tailscale.com":["10.10.10.10"]}`))
|
||||
|
||||
unpublishedDNSCache.Store(dnsEntryMap{
|
||||
"log.tailscale.io": {},
|
||||
"controlplane.tailscale.com": {net.IPv4(1, 2, 3, 4)},
|
||||
unpublishedDNSCache.Store(&dnsEntryMap{
|
||||
IPs: map[string][]net.IP{
|
||||
"log.tailscale.io": {},
|
||||
"controlplane.tailscale.com": {net.IPv4(1, 2, 3, 4)},
|
||||
},
|
||||
Percent: map[string]float64{
|
||||
"log.tailscale.io": 1.0,
|
||||
"controlplane.tailscale.com": 1.0,
|
||||
},
|
||||
})
|
||||
|
||||
t.Run("CacheMiss", func(t *testing.T) {
|
||||
@@ -125,8 +135,8 @@ func TestUnpublishedDNSEmptyList(t *testing.T) {
|
||||
ips := getBootstrapDNS(t, q)
|
||||
|
||||
// Expected our public map to be returned on a cache miss
|
||||
if !reflect.DeepEqual(ips, pub) {
|
||||
t.Errorf("got ips=%+v; want %+v", ips, pub)
|
||||
if !reflect.DeepEqual(ips, pub.IPs) {
|
||||
t.Errorf("got ips=%+v; want %+v", ips, pub.IPs)
|
||||
}
|
||||
if v := unpublishedDNSHits.Value(); v != 0 {
|
||||
t.Errorf("got hits=%d; want 0", v)
|
||||
@@ -141,7 +151,7 @@ func TestUnpublishedDNSEmptyList(t *testing.T) {
|
||||
t.Run("CacheHit", func(t *testing.T) {
|
||||
resetMetrics()
|
||||
ips := getBootstrapDNS(t, "controlplane.tailscale.com")
|
||||
want := dnsEntryMap{"controlplane.tailscale.com": {net.IPv4(1, 2, 3, 4)}}
|
||||
want := map[string][]net.IP{"controlplane.tailscale.com": {net.IPv4(1, 2, 3, 4)}}
|
||||
if !reflect.DeepEqual(ips, want) {
|
||||
t.Errorf("got ips=%+v; want %+v", ips, want)
|
||||
}
|
||||
@@ -166,3 +176,54 @@ func TestLookupMetric(t *testing.T) {
|
||||
t.Errorf("bootstrapLookupMap.Len() want=5, got %v", bootstrapLookupMap.Len())
|
||||
}
|
||||
}
|
||||
|
||||
func TestRemoteAddrMatchesPercent(t *testing.T) {
|
||||
tests := []struct {
|
||||
remoteAddr string
|
||||
percent float64
|
||||
want bool
|
||||
}{
|
||||
// 0% and 100%.
|
||||
{"10.0.0.1:1234", 0.0, false},
|
||||
{"10.0.0.1:1234", 1.0, true},
|
||||
|
||||
// Invalid IP.
|
||||
{"", 1.0, true},
|
||||
{"", 0.0, false},
|
||||
{"", 0.5, false},
|
||||
|
||||
// Small manual sample at 50%. The func uses a deterministic PRNG seed.
|
||||
{"1.2.3.4:567", 0.5, true},
|
||||
{"1.2.3.5:567", 0.5, true},
|
||||
{"1.2.3.6:567", 0.5, false},
|
||||
{"1.2.3.7:567", 0.5, true},
|
||||
{"1.2.3.8:567", 0.5, false},
|
||||
{"1.2.3.9:567", 0.5, true},
|
||||
{"1.2.3.10:567", 0.5, true},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
got := remoteAddrMatchesPercent(tt.remoteAddr, tt.percent)
|
||||
if got != tt.want {
|
||||
t.Errorf("remoteAddrMatchesPercent(%q, %v) = %v; want %v", tt.remoteAddr, tt.percent, got, tt.want)
|
||||
}
|
||||
}
|
||||
|
||||
var match, all int
|
||||
const wantPercent = 0.5
|
||||
for a := range 256 {
|
||||
for b := range 256 {
|
||||
all++
|
||||
if remoteAddrMatchesPercent(
|
||||
netip.AddrPortFrom(netip.AddrFrom4([4]byte{1, 2, byte(a), byte(b)}), 12345).String(),
|
||||
wantPercent) {
|
||||
match++
|
||||
}
|
||||
}
|
||||
}
|
||||
gotPercent := float64(match) / float64(all)
|
||||
const tolerance = 0.005
|
||||
t.Logf("got percent %v (goal %v)", gotPercent, wantPercent)
|
||||
if gotPercent < wantPercent-tolerance || gotPercent > wantPercent+tolerance {
|
||||
t.Errorf("got %v; want %v ± %v", gotPercent, wantPercent, tolerance)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -235,7 +235,7 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa
|
||||
encoding/pem from crypto/tls+
|
||||
errors from bufio+
|
||||
expvar from github.com/prometheus/client_golang/prometheus+
|
||||
flag from tailscale.com/cmd/derper+
|
||||
flag from tailscale.com/cmd/derper
|
||||
fmt from compress/flate+
|
||||
go/token from google.golang.org/protobuf/internal/strs
|
||||
hash from crypto+
|
||||
@@ -253,7 +253,7 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa
|
||||
math/big from crypto/dsa+
|
||||
math/bits from compress/flate+
|
||||
math/rand from github.com/mdlayher/netlink+
|
||||
math/rand/v2 from tailscale.com/util/fastuuid
|
||||
math/rand/v2 from tailscale.com/util/fastuuid+
|
||||
mime from github.com/prometheus/common/expfmt+
|
||||
mime/multipart from net/http
|
||||
mime/quotedprintable from mime/multipart
|
||||
@@ -277,7 +277,7 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa
|
||||
runtime/debug from github.com/prometheus/client_golang/prometheus+
|
||||
runtime/metrics from github.com/prometheus/client_golang/prometheus+
|
||||
runtime/pprof from net/http/pprof
|
||||
runtime/trace from net/http/pprof+
|
||||
runtime/trace from net/http/pprof
|
||||
slices from tailscale.com/ipn/ipnstate+
|
||||
sort from compress/flate+
|
||||
strconv from compress/flate+
|
||||
@@ -285,7 +285,6 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa
|
||||
sync from compress/flate+
|
||||
sync/atomic from context+
|
||||
syscall from crypto/rand+
|
||||
testing from tailscale.com/util/syspolicy
|
||||
text/tabwriter from runtime/pprof
|
||||
time from compress/gzip+
|
||||
unicode from bytes+
|
||||
|
||||
@@ -55,7 +55,7 @@ var (
|
||||
meshPSKFile = flag.String("mesh-psk-file", defaultMeshPSKFile(), "if non-empty, path to file containing the mesh pre-shared key file. It should contain some hex string; whitespace is trimmed.")
|
||||
meshWith = flag.String("mesh-with", "", "optional comma-separated list of hostnames to mesh with; the server's own hostname can be in the list")
|
||||
bootstrapDNS = flag.String("bootstrap-dns-names", "", "optional comma-separated list of hostnames to make available at /bootstrap-dns")
|
||||
unpublishedDNS = flag.String("unpublished-bootstrap-dns-names", "", "optional comma-separated list of hostnames to make available at /bootstrap-dns and not publish in the list")
|
||||
unpublishedDNS = flag.String("unpublished-bootstrap-dns-names", "", "optional comma-separated list of hostnames to make available at /bootstrap-dns and not publish in the list. If an entry contains a slash, the second part names a DNS record to poll for its TXT record with a `0` to `100` value for rollout percentage.")
|
||||
verifyClients = flag.Bool("verify-clients", false, "verify clients to this DERP server through a local tailscaled instance.")
|
||||
verifyClientURL = flag.String("verify-client-url", "", "if non-empty, an admission controller URL for permitting client connections; see tailcfg.DERPAdmitClientRequest")
|
||||
verifyFailOpen = flag.Bool("verify-client-url-fail-open", true, "whether we fail open if --verify-client-url is unreachable")
|
||||
|
||||
@@ -99,6 +99,7 @@ func TestNoContent(t *testing.T) {
|
||||
func TestDeps(t *testing.T) {
|
||||
deptest.DepChecker{
|
||||
BadDeps: map[string]string{
|
||||
"testing": "do not use testing package in production code",
|
||||
"gvisor.dev/gvisor/pkg/buffer": "https://github.com/tailscale/tailscale/issues/9756",
|
||||
"gvisor.dev/gvisor/pkg/cpuid": "https://github.com/tailscale/tailscale/issues/9756",
|
||||
"gvisor.dev/gvisor/pkg/tcpip": "https://github.com/tailscale/tailscale/issues/9756",
|
||||
|
||||
@@ -51,6 +51,10 @@ operatorConfig:
|
||||
# proxies created by the operator.
|
||||
# https://tailscale.com/kb/1236/kubernetes-operator/#cluster-ingress
|
||||
# https://tailscale.com/kb/1236/kubernetes-operator/#cluster-egress
|
||||
# Note that this section contains only a few global configuration options and
|
||||
# will not be updated with more configuration options in the future.
|
||||
# If you need more configuration options, take a look at ProxyClass:
|
||||
# https://tailscale.com/kb/1236/kubernetes-operator#cluster-resource-customization-using-proxyclass-custom-resource
|
||||
proxyConfig:
|
||||
image:
|
||||
repo: tailscale/tailscale
|
||||
|
||||
@@ -41,6 +41,12 @@ const (
|
||||
|
||||
messageNameserverCreationFailed = "Failed creating nameserver resources: %v"
|
||||
messageMultipleDNSConfigsPresent = "Multiple DNSConfig resources found in cluster. Please ensure no more than one is present."
|
||||
|
||||
defaultNameserverImageRepo = "tailscale/k8s-nameserver"
|
||||
// TODO (irbekrm): once we start publishing nameserver images for stable
|
||||
// track, replace 'unstable' here with the version of this operator
|
||||
// instance.
|
||||
defaultNameserverImageTag = "unstable"
|
||||
)
|
||||
|
||||
// NameserverReconciler knows how to create nameserver resources in cluster in
|
||||
@@ -163,11 +169,13 @@ func (a *NameserverReconciler) maybeProvision(ctx context.Context, tsDNSCfg *tsa
|
||||
ownerRefs: []metav1.OwnerReference{*metav1.NewControllerRef(tsDNSCfg, tsapi.SchemeGroupVersion.WithKind("DNSConfig"))},
|
||||
namespace: a.tsNamespace,
|
||||
labels: labels,
|
||||
imageRepo: defaultNameserverImageRepo,
|
||||
imageTag: defaultNameserverImageTag,
|
||||
}
|
||||
if tsDNSCfg.Spec.Nameserver.Image.Repo != "" {
|
||||
if tsDNSCfg.Spec.Nameserver.Image != nil && tsDNSCfg.Spec.Nameserver.Image.Repo != "" {
|
||||
dCfg.imageRepo = tsDNSCfg.Spec.Nameserver.Image.Repo
|
||||
}
|
||||
if tsDNSCfg.Spec.Nameserver.Image.Tag != "" {
|
||||
if tsDNSCfg.Spec.Nameserver.Image != nil && tsDNSCfg.Spec.Nameserver.Image.Tag != "" {
|
||||
dCfg.imageTag = tsDNSCfg.Spec.Nameserver.Image.Tag
|
||||
}
|
||||
for _, deployable := range []deployable{saDeployable, deployDeployable, svcDeployable, cmDeployable} {
|
||||
|
||||
@@ -115,4 +115,13 @@ func TestNameserverReconciler(t *testing.T) {
|
||||
Data: map[string]string{"records.json": string(bs)},
|
||||
}
|
||||
expectEqual(t, fc, wantCm, nil)
|
||||
|
||||
// Verify that if dnsconfig.spec.nameserver.image.{repo,tag} are unset,
|
||||
// the nameserver image defaults to tailscale/k8s-nameserver:unstable.
|
||||
mustUpdate(t, fc, "", "test", func(dnsCfg *tsapi.DNSConfig) {
|
||||
dnsCfg.Spec.Nameserver.Image = nil
|
||||
})
|
||||
expectReconciled(t, nr, "", "test")
|
||||
wantsDeploy.Spec.Template.Spec.Containers[0].Image = "tailscale/k8s-nameserver:unstable"
|
||||
expectEqual(t, fc, wantsDeploy, nil)
|
||||
}
|
||||
|
||||
@@ -45,12 +45,12 @@ import (
|
||||
"tailscale.com/version"
|
||||
)
|
||||
|
||||
// Generate static manifests for deploying Tailscale operator on Kubernetes from the operator's Helm chart.
|
||||
//go:generate go run tailscale.com/cmd/k8s-operator/generate staticmanifests
|
||||
|
||||
// Generate Connector and ProxyClass CustomResourceDefinition yamls from their Go types.
|
||||
//go:generate go run sigs.k8s.io/controller-tools/cmd/controller-gen crd schemapatch:manifests=./deploy/crds output:dir=./deploy/crds paths=../../k8s-operator/apis/...
|
||||
|
||||
// Generate static manifests for deploying Tailscale operator on Kubernetes from the operator's Helm chart.
|
||||
//go:generate go run tailscale.com/cmd/k8s-operator/generate staticmanifests
|
||||
|
||||
// Generate CRD docs from the yamls
|
||||
//go:generate go run fybrik.io/crdoc --resources=./deploy/crds --output=../../k8s-operator/api.md
|
||||
|
||||
|
||||
@@ -1182,7 +1182,7 @@ func TestTailscaledConfigfileHash(t *testing.T) {
|
||||
parentType: "svc",
|
||||
hostname: "default-test",
|
||||
clusterTargetIP: "10.20.30.40",
|
||||
confFileHash: "705e5ffd0bd5326237efdf542c850a65a54101284d5daa30775420fcc64d89c1",
|
||||
confFileHash: "e09bededa0379920141cbd0b0dbdf9b8b66545877f9e8397423f5ce3e1ba439e",
|
||||
}
|
||||
expectEqual(t, fc, expectedSTS(t, fc, o), nil)
|
||||
|
||||
@@ -1192,7 +1192,7 @@ func TestTailscaledConfigfileHash(t *testing.T) {
|
||||
mak.Set(&svc.Annotations, AnnotationHostname, "another-test")
|
||||
})
|
||||
o.hostname = "another-test"
|
||||
o.confFileHash = "1a087f887825d2b75d3673c7c2b0131f8ec1f0b1cb761d33e236dd28350dfe23"
|
||||
o.confFileHash = "5d754cf55463135ee34aa9821f2fd8483b53eb0570c3740c84a086304f427684"
|
||||
expectReconciled(t, sr, "default", "test")
|
||||
expectEqual(t, fc, expectedSTS(t, fc, o), nil)
|
||||
}
|
||||
|
||||
@@ -29,6 +29,7 @@ import (
|
||||
"sigs.k8s.io/yaml"
|
||||
"tailscale.com/client/tailscale"
|
||||
"tailscale.com/ipn"
|
||||
kubeutils "tailscale.com/k8s-operator"
|
||||
tsoperator "tailscale.com/k8s-operator"
|
||||
tsapi "tailscale.com/k8s-operator/apis/v1alpha1"
|
||||
"tailscale.com/net/netutil"
|
||||
@@ -92,10 +93,6 @@ const (
|
||||
podAnnotationLastSetTailnetTargetFQDN = "tailscale.com/operator-last-set-ts-tailnet-target-fqdn"
|
||||
// podAnnotationLastSetConfigFileHash is sha256 hash of the current tailscaled configuration contents.
|
||||
podAnnotationLastSetConfigFileHash = "tailscale.com/operator-last-set-config-file-hash"
|
||||
|
||||
// tailscaledConfigKey is the name of the key in proxy Secret Data that
|
||||
// holds the tailscaled config contents.
|
||||
tailscaledConfigKey = "tailscaled"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -174,11 +171,11 @@ func (a *tailscaleSTSReconciler) Provision(ctx context.Context, logger *zap.Suga
|
||||
return nil, fmt.Errorf("failed to reconcile headless service: %w", err)
|
||||
}
|
||||
|
||||
secretName, tsConfigHash, err := a.createOrGetSecret(ctx, logger, sts, hsvc)
|
||||
secretName, tsConfigHash, configs, err := a.createOrGetSecret(ctx, logger, sts, hsvc)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create or get API key secret: %w", err)
|
||||
}
|
||||
_, err = a.reconcileSTS(ctx, logger, sts, hsvc, secretName, tsConfigHash)
|
||||
_, err = a.reconcileSTS(ctx, logger, sts, hsvc, secretName, tsConfigHash, configs)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to reconcile statefulset: %w", err)
|
||||
}
|
||||
@@ -291,7 +288,7 @@ func (a *tailscaleSTSReconciler) reconcileHeadlessService(ctx context.Context, l
|
||||
return createOrUpdate(ctx, a.Client, a.operatorNamespace, hsvc, func(svc *corev1.Service) { svc.Spec = hsvc.Spec })
|
||||
}
|
||||
|
||||
func (a *tailscaleSTSReconciler) createOrGetSecret(ctx context.Context, logger *zap.SugaredLogger, stsC *tailscaleSTSConfig, hsvc *corev1.Service) (string, string, error) {
|
||||
func (a *tailscaleSTSReconciler) createOrGetSecret(ctx context.Context, logger *zap.SugaredLogger, stsC *tailscaleSTSConfig, hsvc *corev1.Service) (secretName, hash string, configs tailscaleConfigs, _ error) {
|
||||
secret := &corev1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
// Hardcode a -0 suffix so that in future, if we support
|
||||
@@ -307,25 +304,23 @@ func (a *tailscaleSTSReconciler) createOrGetSecret(ctx context.Context, logger *
|
||||
logger.Debugf("secret %s/%s already exists", secret.GetNamespace(), secret.GetName())
|
||||
orig = secret.DeepCopy()
|
||||
} else if !apierrors.IsNotFound(err) {
|
||||
return "", "", err
|
||||
return "", "", nil, err
|
||||
}
|
||||
|
||||
var (
|
||||
authKey, hash string
|
||||
)
|
||||
var authKey string
|
||||
if orig == nil {
|
||||
// Initially it contains only tailscaled config, but when the
|
||||
// proxy starts, it will also store there the state, certs and
|
||||
// ACME account key.
|
||||
sts, err := getSingleObject[appsv1.StatefulSet](ctx, a.Client, a.operatorNamespace, stsC.ChildResourceLabels)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
return "", "", nil, err
|
||||
}
|
||||
if sts != nil {
|
||||
// StatefulSet exists, so we have already created the secret.
|
||||
// If the secret is missing, they should delete the StatefulSet.
|
||||
logger.Errorf("Tailscale proxy secret doesn't exist, but the corresponding StatefulSet %s/%s already does. Something is wrong, please delete the StatefulSet.", sts.GetNamespace(), sts.GetName())
|
||||
return "", "", nil
|
||||
return "", "", nil, nil
|
||||
}
|
||||
// Create API Key secret which is going to be used by the statefulset
|
||||
// to authenticate with Tailscale.
|
||||
@@ -336,45 +331,58 @@ func (a *tailscaleSTSReconciler) createOrGetSecret(ctx context.Context, logger *
|
||||
}
|
||||
authKey, err = a.newAuthKey(ctx, tags)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
return "", "", nil, err
|
||||
}
|
||||
}
|
||||
confFileBytes, h, err := tailscaledConfig(stsC, authKey, orig)
|
||||
configs, err := tailscaledConfig(stsC, authKey, orig)
|
||||
if err != nil {
|
||||
return "", "", fmt.Errorf("error creating tailscaled config: %w", err)
|
||||
return "", "", nil, fmt.Errorf("error creating tailscaled config: %w", err)
|
||||
}
|
||||
hash, err = tailscaledConfigHash(configs)
|
||||
if err != nil {
|
||||
return "", "", nil, fmt.Errorf("error calculating hash of tailscaled configs: %w", err)
|
||||
}
|
||||
|
||||
latest := tailcfg.CapabilityVersion(-1)
|
||||
var latestConfig ipn.ConfigVAlpha
|
||||
for key, val := range configs {
|
||||
fn := kubeutils.TailscaledConfigFileNameForCap(key)
|
||||
b, err := json.Marshal(val)
|
||||
if err != nil {
|
||||
return "", "", nil, fmt.Errorf("error marshalling tailscaled config: %w", err)
|
||||
}
|
||||
mak.Set(&secret.StringData, fn, string(b))
|
||||
if key > latest {
|
||||
latest = key
|
||||
latestConfig = val
|
||||
}
|
||||
}
|
||||
hash = h
|
||||
mak.Set(&secret.StringData, tailscaledConfigKey, string(confFileBytes))
|
||||
|
||||
if stsC.ServeConfig != nil {
|
||||
j, err := json.Marshal(stsC.ServeConfig)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
return "", "", nil, err
|
||||
}
|
||||
mak.Set(&secret.StringData, "serve-config", string(j))
|
||||
}
|
||||
|
||||
if orig != nil {
|
||||
logger.Debugf("patching the existing proxy Secret with tailscaled config %s", sanitizeConfigBytes(secret.Data[tailscaledConfigKey]))
|
||||
logger.Debugf("patching the existing proxy Secret with tailscaled config %s", sanitizeConfigBytes(latestConfig))
|
||||
if err := a.Patch(ctx, secret, client.MergeFrom(orig)); err != nil {
|
||||
return "", "", err
|
||||
return "", "", nil, err
|
||||
}
|
||||
} else {
|
||||
logger.Debugf("creating a new Secret for the proxy with tailscaled config %s", sanitizeConfigBytes([]byte(secret.StringData[tailscaledConfigKey])))
|
||||
logger.Debugf("creating a new Secret for the proxy with tailscaled config %s", sanitizeConfigBytes(latestConfig))
|
||||
if err := a.Create(ctx, secret); err != nil {
|
||||
return "", "", err
|
||||
return "", "", nil, err
|
||||
}
|
||||
}
|
||||
return secret.Name, hash, nil
|
||||
return secret.Name, hash, configs, nil
|
||||
}
|
||||
|
||||
// sanitizeConfigBytes returns ipn.ConfigVAlpha in string form with redacted
|
||||
// auth key.
|
||||
func sanitizeConfigBytes(bs []byte) string {
|
||||
c := &ipn.ConfigVAlpha{}
|
||||
if err := json.Unmarshal(bs, c); err != nil {
|
||||
return "invalid config"
|
||||
}
|
||||
func sanitizeConfigBytes(c ipn.ConfigVAlpha) string {
|
||||
if c.AuthKey != nil {
|
||||
c.AuthKey = ptr.To("**redacted**")
|
||||
}
|
||||
@@ -437,7 +445,7 @@ var proxyYaml []byte
|
||||
//go:embed deploy/manifests/userspace-proxy.yaml
|
||||
var userspaceProxyYaml []byte
|
||||
|
||||
func (a *tailscaleSTSReconciler) reconcileSTS(ctx context.Context, logger *zap.SugaredLogger, sts *tailscaleSTSConfig, headlessSvc *corev1.Service, proxySecret, tsConfigHash string) (*appsv1.StatefulSet, error) {
|
||||
func (a *tailscaleSTSReconciler) reconcileSTS(ctx context.Context, logger *zap.SugaredLogger, sts *tailscaleSTSConfig, headlessSvc *corev1.Service, proxySecret, tsConfigHash string, configs map[tailcfg.CapabilityVersion]ipn.ConfigVAlpha) (*appsv1.StatefulSet, error) {
|
||||
ss := new(appsv1.StatefulSet)
|
||||
if sts.ServeConfig != nil && sts.ForwardClusterTrafficViaL7IngressProxy != true { // If forwarding cluster traffic via is required we need non-userspace + NET_ADMIN + forwarding
|
||||
if err := yaml.Unmarshal(userspaceProxyYaml, &ss); err != nil {
|
||||
@@ -493,9 +501,15 @@ func (a *tailscaleSTSReconciler) reconcileSTS(ctx context.Context, logger *zap.S
|
||||
Value: proxySecret,
|
||||
},
|
||||
corev1.EnvVar{
|
||||
// Old tailscaled config key is still used for backwards compatibility.
|
||||
Name: "EXPERIMENTAL_TS_CONFIGFILE_PATH",
|
||||
Value: "/etc/tsconfig/tailscaled",
|
||||
},
|
||||
corev1.EnvVar{
|
||||
// New style is in the form of cap-<capability-version>.hujson.
|
||||
Name: "TS_EXPERIMENTAL_VERSIONED_CONFIG_DIR",
|
||||
Value: "/etc/tsconfig",
|
||||
},
|
||||
)
|
||||
if sts.ForwardClusterTrafficViaL7IngressProxy {
|
||||
container.Env = append(container.Env, corev1.EnvVar{
|
||||
@@ -505,18 +519,16 @@ func (a *tailscaleSTSReconciler) reconcileSTS(ctx context.Context, logger *zap.S
|
||||
}
|
||||
// Configure containeboot to run tailscaled with a configfile read from the state Secret.
|
||||
mak.Set(&ss.Spec.Template.Annotations, podAnnotationLastSetConfigFileHash, tsConfigHash)
|
||||
pod.Spec.Volumes = append(ss.Spec.Template.Spec.Volumes, corev1.Volume{
|
||||
|
||||
configVolume := corev1.Volume{
|
||||
Name: "tailscaledconfig",
|
||||
VolumeSource: corev1.VolumeSource{
|
||||
Secret: &corev1.SecretVolumeSource{
|
||||
SecretName: proxySecret,
|
||||
Items: []corev1.KeyToPath{{
|
||||
Key: tailscaledConfigKey,
|
||||
Path: tailscaledConfigKey,
|
||||
}},
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
pod.Spec.Volumes = append(ss.Spec.Template.Spec.Volumes, configVolume)
|
||||
container.VolumeMounts = append(container.VolumeMounts, corev1.VolumeMount{
|
||||
Name: "tailscaledconfig",
|
||||
ReadOnly: true,
|
||||
@@ -571,10 +583,7 @@ func (a *tailscaleSTSReconciler) reconcileSTS(ctx context.Context, logger *zap.S
|
||||
VolumeSource: corev1.VolumeSource{
|
||||
Secret: &corev1.SecretVolumeSource{
|
||||
SecretName: proxySecret,
|
||||
Items: []corev1.KeyToPath{{
|
||||
Key: "serve-config",
|
||||
Path: "serve-config",
|
||||
}},
|
||||
Items: []corev1.KeyToPath{{Key: "serve-config", Path: "serve-config"}},
|
||||
},
|
||||
},
|
||||
})
|
||||
@@ -716,42 +725,82 @@ func enableMetrics(ss *appsv1.StatefulSet, pc *tsapi.ProxyClass) {
|
||||
}
|
||||
}
|
||||
|
||||
func readAuthKey(secret *corev1.Secret, key string) (*string, error) {
|
||||
origConf := &ipn.ConfigVAlpha{}
|
||||
if err := json.Unmarshal([]byte(secret.Data[key]), origConf); err != nil {
|
||||
return nil, fmt.Errorf("error unmarshaling previous tailscaled config in %q: %w", key, err)
|
||||
}
|
||||
return origConf.AuthKey, nil
|
||||
}
|
||||
|
||||
// tailscaledConfig takes a proxy config, a newly generated auth key if
|
||||
// generated and a Secret with the previous proxy state and auth key and
|
||||
// produces returns tailscaled configuration and a hash of that configuration.
|
||||
func tailscaledConfig(stsC *tailscaleSTSConfig, newAuthkey string, oldSecret *corev1.Secret) ([]byte, string, error) {
|
||||
conf := ipn.ConfigVAlpha{
|
||||
Version: "alpha0",
|
||||
AcceptDNS: "false",
|
||||
AcceptRoutes: "false", // AcceptRoutes defaults to true
|
||||
Locked: "false",
|
||||
Hostname: &stsC.Hostname,
|
||||
// returns tailscaled configuration and a hash of that configuration.
|
||||
//
|
||||
// As of 2024-05-09 it also returns legacy tailscaled config without the
|
||||
// later added NoStatefulFilter field to support proxies older than cap95.
|
||||
// TODO (irbekrm): remove the legacy config once we no longer need to support
|
||||
// versions older than cap94,
|
||||
// https://tailscale.com/kb/1236/kubernetes-operator#operator-and-proxies
|
||||
func tailscaledConfig(stsC *tailscaleSTSConfig, newAuthkey string, oldSecret *corev1.Secret) (tailscaleConfigs, error) {
|
||||
conf := &ipn.ConfigVAlpha{
|
||||
Version: "alpha0",
|
||||
AcceptDNS: "false",
|
||||
AcceptRoutes: "false", // AcceptRoutes defaults to true
|
||||
Locked: "false",
|
||||
Hostname: &stsC.Hostname,
|
||||
NoStatefulFiltering: "false",
|
||||
}
|
||||
|
||||
// For egress proxies only, we need to ensure that stateful filtering is
|
||||
// not in place so that traffic from cluster can be forwarded via
|
||||
// Tailscale IPs.
|
||||
if stsC.TailnetTargetFQDN != "" || stsC.TailnetTargetIP != "" {
|
||||
conf.NoStatefulFiltering = "true"
|
||||
}
|
||||
if stsC.Connector != nil {
|
||||
routes, err := netutil.CalcAdvertiseRoutes(stsC.Connector.routes, stsC.Connector.isExitNode)
|
||||
if err != nil {
|
||||
return nil, "", fmt.Errorf("error calculating routes: %w", err)
|
||||
return nil, fmt.Errorf("error calculating routes: %w", err)
|
||||
}
|
||||
conf.AdvertiseRoutes = routes
|
||||
}
|
||||
if newAuthkey != "" {
|
||||
conf.AuthKey = &newAuthkey
|
||||
} else if oldSecret != nil && len(oldSecret.Data[tailscaledConfigKey]) > 0 { // write to StringData, read from Data as StringData is write-only
|
||||
origConf := &ipn.ConfigVAlpha{}
|
||||
if err := json.Unmarshal([]byte(oldSecret.Data[tailscaledConfigKey]), origConf); err != nil {
|
||||
return nil, "", fmt.Errorf("error unmarshaling previous tailscaled config: %w", err)
|
||||
} else if oldSecret != nil {
|
||||
var err error
|
||||
latest := tailcfg.CapabilityVersion(-1)
|
||||
latestStr := ""
|
||||
for k, data := range oldSecret.Data {
|
||||
// write to StringData, read from Data as StringData is write-only
|
||||
if len(data) == 0 {
|
||||
continue
|
||||
}
|
||||
v, err := kubeutils.CapVerFromFileName(k)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
if v > latest {
|
||||
latestStr = k
|
||||
latest = v
|
||||
}
|
||||
}
|
||||
// Allow for configs that don't contain an auth key. Perhaps
|
||||
// users have some mechanisms to delete them. Auth key is
|
||||
// normally not needed after the initial login.
|
||||
if latestStr != "" {
|
||||
conf.AuthKey, err = readAuthKey(oldSecret, latestStr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
conf.AuthKey = origConf.AuthKey
|
||||
}
|
||||
confFileBytes, err := json.Marshal(conf)
|
||||
if err != nil {
|
||||
return nil, "", fmt.Errorf("error marshaling tailscaled config : %w", err)
|
||||
}
|
||||
hash, err := hashBytes(confFileBytes)
|
||||
if err != nil {
|
||||
return nil, "", fmt.Errorf("error calculating config hash: %w", err)
|
||||
}
|
||||
return confFileBytes, hash, nil
|
||||
capVerConfigs := make(map[tailcfg.CapabilityVersion]ipn.ConfigVAlpha)
|
||||
capVerConfigs[95] = *conf
|
||||
// legacy config should not contain NoStatefulFiltering field.
|
||||
conf.NoStatefulFiltering.Clear()
|
||||
capVerConfigs[94] = *conf
|
||||
return capVerConfigs, nil
|
||||
}
|
||||
|
||||
// ptrObject is a type constraint for pointer types that implement
|
||||
@@ -761,7 +810,9 @@ type ptrObject[T any] interface {
|
||||
*T
|
||||
}
|
||||
|
||||
// hashBytes produces a hash for the provided bytes that is the same across
|
||||
type tailscaleConfigs map[tailcfg.CapabilityVersion]ipn.ConfigVAlpha
|
||||
|
||||
// hashBytes produces a hash for the provided tailscaled config that is the same across
|
||||
// different invocations of this code. We do not use the
|
||||
// tailscale.com/deephash.Hash here because that produces a different hash for
|
||||
// the same value in different tailscale builds. The hash we are producing here
|
||||
@@ -770,10 +821,13 @@ type ptrObject[T any] interface {
|
||||
// thing that changed is operator version (the hash is also exposed to users via
|
||||
// an annotation and might be confusing if it changes without the config having
|
||||
// changed).
|
||||
func hashBytes(b []byte) (string, error) {
|
||||
h := sha256.New()
|
||||
_, err := h.Write(b)
|
||||
func tailscaledConfigHash(c tailscaleConfigs) (string, error) {
|
||||
b, err := json.Marshal(c)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("error marshalling tailscaled configs: %w", err)
|
||||
}
|
||||
h := sha256.New()
|
||||
if _, err = h.Write(b); err != nil {
|
||||
return "", fmt.Errorf("error calculating hash: %w", err)
|
||||
}
|
||||
return fmt.Sprintf("%x", h.Sum(nil)), nil
|
||||
|
||||
@@ -161,7 +161,7 @@ func (a *ServiceReconciler) maybeProvision(ctx context.Context, logger *zap.Suga
|
||||
}
|
||||
if violations := validateService(svc); len(violations) > 0 {
|
||||
msg := fmt.Sprintf("unable to provision proxy resources: invalid Service: %s", strings.Join(violations, ", "))
|
||||
a.recorder.Event(svc, corev1.EventTypeWarning, "INVALIDSERVCICE", msg)
|
||||
a.recorder.Event(svc, corev1.EventTypeWarning, "INVALIDSERVICE", msg)
|
||||
a.logger.Error(msg)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -67,6 +67,7 @@ func expectedSTS(t *testing.T, cl client.Client, opts configOpts) *appsv1.Statef
|
||||
{Name: "POD_IP", ValueFrom: &corev1.EnvVarSource{FieldRef: &corev1.ObjectFieldSelector{APIVersion: "", FieldPath: "status.podIP"}, ResourceFieldRef: nil, ConfigMapKeyRef: nil, SecretKeyRef: nil}},
|
||||
{Name: "TS_KUBE_SECRET", Value: opts.secretName},
|
||||
{Name: "EXPERIMENTAL_TS_CONFIGFILE_PATH", Value: "/etc/tsconfig/tailscaled"},
|
||||
{Name: "TS_EXPERIMENTAL_VERSIONED_CONFIG_DIR", Value: "/etc/tsconfig"},
|
||||
},
|
||||
SecurityContext: &corev1.SecurityContext{
|
||||
Capabilities: &corev1.Capabilities{
|
||||
@@ -89,12 +90,6 @@ func expectedSTS(t *testing.T, cl client.Client, opts configOpts) *appsv1.Statef
|
||||
VolumeSource: corev1.VolumeSource{
|
||||
Secret: &corev1.SecretVolumeSource{
|
||||
SecretName: opts.secretName,
|
||||
Items: []corev1.KeyToPath{
|
||||
{
|
||||
Key: "tailscaled",
|
||||
Path: "tailscaled",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -144,9 +139,7 @@ func expectedSTS(t *testing.T, cl client.Client, opts configOpts) *appsv1.Statef
|
||||
Name: "TS_SERVE_CONFIG",
|
||||
Value: "/etc/tailscaled/serve-config",
|
||||
})
|
||||
volumes = append(volumes, corev1.Volume{
|
||||
Name: "serve-config", VolumeSource: corev1.VolumeSource{Secret: &corev1.SecretVolumeSource{SecretName: opts.secretName, Items: []corev1.KeyToPath{{Path: "serve-config", Key: "serve-config"}}}},
|
||||
})
|
||||
volumes = append(volumes, corev1.Volume{Name: "serve-config", VolumeSource: corev1.VolumeSource{Secret: &corev1.SecretVolumeSource{SecretName: opts.secretName, Items: []corev1.KeyToPath{{Key: "serve-config", Path: "serve-config"}}}}})
|
||||
tsContainer.VolumeMounts = append(tsContainer.VolumeMounts, corev1.VolumeMount{Name: "serve-config", ReadOnly: true, MountPath: "/etc/tailscaled"})
|
||||
}
|
||||
ss := &appsv1.StatefulSet{
|
||||
@@ -229,6 +222,7 @@ func expectedSTSUserspace(t *testing.T, cl client.Client, opts configOpts) *apps
|
||||
{Name: "POD_IP", ValueFrom: &corev1.EnvVarSource{FieldRef: &corev1.ObjectFieldSelector{APIVersion: "", FieldPath: "status.podIP"}, ResourceFieldRef: nil, ConfigMapKeyRef: nil, SecretKeyRef: nil}},
|
||||
{Name: "TS_KUBE_SECRET", Value: opts.secretName},
|
||||
{Name: "EXPERIMENTAL_TS_CONFIGFILE_PATH", Value: "/etc/tsconfig/tailscaled"},
|
||||
{Name: "TS_EXPERIMENTAL_VERSIONED_CONFIG_DIR", Value: "/etc/tsconfig"},
|
||||
{Name: "TS_SERVE_CONFIG", Value: "/etc/tailscaled/serve-config"},
|
||||
},
|
||||
ImagePullPolicy: "Always",
|
||||
@@ -243,20 +237,12 @@ func expectedSTSUserspace(t *testing.T, cl client.Client, opts configOpts) *apps
|
||||
VolumeSource: corev1.VolumeSource{
|
||||
Secret: &corev1.SecretVolumeSource{
|
||||
SecretName: opts.secretName,
|
||||
Items: []corev1.KeyToPath{
|
||||
{
|
||||
Key: "tailscaled",
|
||||
Path: "tailscaled",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{Name: "serve-config",
|
||||
VolumeSource: corev1.VolumeSource{
|
||||
Secret: &corev1.SecretVolumeSource{SecretName: opts.secretName,
|
||||
Items: []corev1.KeyToPath{{Key: "serve-config", Path: "serve-config"}}}},
|
||||
},
|
||||
Secret: &corev1.SecretVolumeSource{SecretName: opts.secretName, Items: []corev1.KeyToPath{{Key: "serve-config", Path: "serve-config"}}}}},
|
||||
}
|
||||
ss := &appsv1.StatefulSet{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
@@ -388,7 +374,17 @@ func expectedSecret(t *testing.T, opts configOpts) *corev1.Secret {
|
||||
if err != nil {
|
||||
t.Fatalf("error marshalling tailscaled config")
|
||||
}
|
||||
if opts.tailnetTargetFQDN != "" || opts.tailnetTargetIP != "" {
|
||||
conf.NoStatefulFiltering = "true"
|
||||
} else {
|
||||
conf.NoStatefulFiltering = "false"
|
||||
}
|
||||
bn, err := json.Marshal(conf)
|
||||
if err != nil {
|
||||
t.Fatalf("error marshalling tailscaled config")
|
||||
}
|
||||
mak.Set(&s.StringData, "tailscaled", string(b))
|
||||
mak.Set(&s.StringData, "cap-95.hujson", string(bn))
|
||||
labels := map[string]string{
|
||||
"tailscale.com/managed": "true",
|
||||
"tailscale.com/parent-resource": "test",
|
||||
@@ -463,7 +459,7 @@ func mustUpdateStatus[T any, O ptrObject[T]](t *testing.T, client client.Client,
|
||||
// they are not present in the passed object and use the modify func to remove
|
||||
// them from the cluster object. If no such modifications are needed, you can
|
||||
// pass nil in place of the modify function.
|
||||
func expectEqual[T any, O ptrObject[T]](t *testing.T, client client.Client, want O, modify func(O)) {
|
||||
func expectEqual[T any, O ptrObject[T]](t *testing.T, client client.Client, want O, modifier func(O)) {
|
||||
t.Helper()
|
||||
got := O(new(T))
|
||||
if err := client.Get(context.Background(), types.NamespacedName{
|
||||
@@ -477,8 +473,8 @@ func expectEqual[T any, O ptrObject[T]](t *testing.T, client client.Client, want
|
||||
// so just remove it from both got and want.
|
||||
got.SetResourceVersion("")
|
||||
want.SetResourceVersion("")
|
||||
if modify != nil {
|
||||
modify(got)
|
||||
if modifier != nil {
|
||||
modifier(got)
|
||||
}
|
||||
if diff := cmp.Diff(got, want); diff != "" {
|
||||
t.Fatalf("unexpected object (-got +want):\n%s", diff)
|
||||
|
||||
@@ -28,7 +28,6 @@ import (
|
||||
"tailscale.com/metrics"
|
||||
"tailscale.com/tsnet"
|
||||
"tailscale.com/tsweb"
|
||||
"tailscale.com/types/logger"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -58,8 +57,6 @@ func main() {
|
||||
ts := &tsnet.Server{
|
||||
Dir: *tailscaleDir,
|
||||
Hostname: *hostname,
|
||||
// Make the stdout logs a clean audit log of connections.
|
||||
Logf: logger.Discard,
|
||||
}
|
||||
|
||||
if os.Getenv("TS_AUTHKEY") == "" {
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
"encoding/json"
|
||||
"flag"
|
||||
"fmt"
|
||||
"log"
|
||||
"net"
|
||||
"net/http/httptest"
|
||||
"net/netip"
|
||||
@@ -99,8 +100,8 @@ func startNode(t *testing.T, ctx context.Context, controlURL, hostname string) (
|
||||
Store: new(mem.Store),
|
||||
Ephemeral: true,
|
||||
}
|
||||
if !*verboseNodes {
|
||||
s.Logf = logger.Discard
|
||||
if *verboseNodes {
|
||||
s.Logf = log.Printf
|
||||
}
|
||||
t.Cleanup(func() { s.Close() })
|
||||
|
||||
|
||||
@@ -20,7 +20,7 @@ func main() {
|
||||
}
|
||||
host := os.Args[1]
|
||||
|
||||
uaddr, err := net.ResolveUDPAddr("udp", host+":3478")
|
||||
uaddr, err := net.ResolveUDPAddr("udp", net.JoinHostPort(host, "3478"))
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
@@ -24,6 +24,7 @@ import (
|
||||
"tailscale.com/tka"
|
||||
"tailscale.com/tstest"
|
||||
"tailscale.com/types/logger"
|
||||
"tailscale.com/types/opt"
|
||||
"tailscale.com/types/persist"
|
||||
"tailscale.com/types/preftype"
|
||||
"tailscale.com/version/distro"
|
||||
@@ -176,9 +177,10 @@ func TestCheckForAccidentalSettingReverts(t *testing.T) {
|
||||
name: "bare_up_means_up",
|
||||
flags: []string{},
|
||||
curPrefs: &ipn.Prefs{
|
||||
ControlURL: ipn.DefaultControlURL,
|
||||
WantRunning: false,
|
||||
Hostname: "foo",
|
||||
ControlURL: ipn.DefaultControlURL,
|
||||
WantRunning: false,
|
||||
Hostname: "foo",
|
||||
NoStatefulFiltering: opt.NewBool(true),
|
||||
},
|
||||
want: "",
|
||||
},
|
||||
@@ -186,12 +188,12 @@ func TestCheckForAccidentalSettingReverts(t *testing.T) {
|
||||
name: "losing_hostname",
|
||||
flags: []string{"--accept-dns"},
|
||||
curPrefs: &ipn.Prefs{
|
||||
ControlURL: ipn.DefaultControlURL,
|
||||
WantRunning: false,
|
||||
Hostname: "foo",
|
||||
CorpDNS: true,
|
||||
NetfilterMode: preftype.NetfilterOn,
|
||||
AllowSingleHosts: true,
|
||||
ControlURL: ipn.DefaultControlURL,
|
||||
WantRunning: false,
|
||||
Hostname: "foo",
|
||||
CorpDNS: true,
|
||||
NetfilterMode: preftype.NetfilterOn,
|
||||
NoStatefulFiltering: opt.NewBool(true),
|
||||
},
|
||||
want: accidentalUpPrefix + " --accept-dns --hostname=foo",
|
||||
},
|
||||
@@ -199,11 +201,11 @@ func TestCheckForAccidentalSettingReverts(t *testing.T) {
|
||||
name: "hostname_changing_explicitly",
|
||||
flags: []string{"--hostname=bar"},
|
||||
curPrefs: &ipn.Prefs{
|
||||
ControlURL: ipn.DefaultControlURL,
|
||||
CorpDNS: true,
|
||||
NetfilterMode: preftype.NetfilterOn,
|
||||
AllowSingleHosts: true,
|
||||
Hostname: "foo",
|
||||
ControlURL: ipn.DefaultControlURL,
|
||||
CorpDNS: true,
|
||||
NetfilterMode: preftype.NetfilterOn,
|
||||
Hostname: "foo",
|
||||
NoStatefulFiltering: opt.NewBool(true),
|
||||
},
|
||||
want: "",
|
||||
},
|
||||
@@ -211,11 +213,11 @@ func TestCheckForAccidentalSettingReverts(t *testing.T) {
|
||||
name: "hostname_changing_empty_explicitly",
|
||||
flags: []string{"--hostname="},
|
||||
curPrefs: &ipn.Prefs{
|
||||
ControlURL: ipn.DefaultControlURL,
|
||||
CorpDNS: true,
|
||||
NetfilterMode: preftype.NetfilterOn,
|
||||
AllowSingleHosts: true,
|
||||
Hostname: "foo",
|
||||
ControlURL: ipn.DefaultControlURL,
|
||||
CorpDNS: true,
|
||||
NetfilterMode: preftype.NetfilterOn,
|
||||
Hostname: "foo",
|
||||
NoStatefulFiltering: opt.NewBool(true),
|
||||
},
|
||||
want: "",
|
||||
},
|
||||
@@ -231,11 +233,11 @@ func TestCheckForAccidentalSettingReverts(t *testing.T) {
|
||||
name: "implicit_operator_change",
|
||||
flags: []string{"--hostname=foo"},
|
||||
curPrefs: &ipn.Prefs{
|
||||
ControlURL: ipn.DefaultControlURL,
|
||||
OperatorUser: "alice",
|
||||
AllowSingleHosts: true,
|
||||
CorpDNS: true,
|
||||
NetfilterMode: preftype.NetfilterOn,
|
||||
ControlURL: ipn.DefaultControlURL,
|
||||
OperatorUser: "alice",
|
||||
CorpDNS: true,
|
||||
NetfilterMode: preftype.NetfilterOn,
|
||||
NoStatefulFiltering: opt.NewBool(true),
|
||||
},
|
||||
curUser: "eve",
|
||||
want: accidentalUpPrefix + " --hostname=foo --operator=alice",
|
||||
@@ -244,11 +246,11 @@ func TestCheckForAccidentalSettingReverts(t *testing.T) {
|
||||
name: "implicit_operator_matches_shell_user",
|
||||
flags: []string{"--hostname=foo"},
|
||||
curPrefs: &ipn.Prefs{
|
||||
ControlURL: ipn.DefaultControlURL,
|
||||
AllowSingleHosts: true,
|
||||
CorpDNS: true,
|
||||
NetfilterMode: preftype.NetfilterOn,
|
||||
OperatorUser: "alice",
|
||||
ControlURL: ipn.DefaultControlURL,
|
||||
CorpDNS: true,
|
||||
NetfilterMode: preftype.NetfilterOn,
|
||||
OperatorUser: "alice",
|
||||
NoStatefulFiltering: opt.NewBool(true),
|
||||
},
|
||||
curUser: "alice",
|
||||
want: "",
|
||||
@@ -257,15 +259,15 @@ func TestCheckForAccidentalSettingReverts(t *testing.T) {
|
||||
name: "error_advertised_routes_exit_node_removed",
|
||||
flags: []string{"--advertise-routes=10.0.42.0/24"},
|
||||
curPrefs: &ipn.Prefs{
|
||||
ControlURL: ipn.DefaultControlURL,
|
||||
AllowSingleHosts: true,
|
||||
CorpDNS: true,
|
||||
NetfilterMode: preftype.NetfilterOn,
|
||||
ControlURL: ipn.DefaultControlURL,
|
||||
CorpDNS: true,
|
||||
NetfilterMode: preftype.NetfilterOn,
|
||||
AdvertiseRoutes: []netip.Prefix{
|
||||
netip.MustParsePrefix("10.0.42.0/24"),
|
||||
netip.MustParsePrefix("0.0.0.0/0"),
|
||||
netip.MustParsePrefix("::/0"),
|
||||
},
|
||||
NoStatefulFiltering: opt.NewBool(true),
|
||||
},
|
||||
want: accidentalUpPrefix + " --advertise-routes=10.0.42.0/24 --advertise-exit-node",
|
||||
},
|
||||
@@ -273,15 +275,15 @@ func TestCheckForAccidentalSettingReverts(t *testing.T) {
|
||||
name: "advertised_routes_exit_node_removed_explicit",
|
||||
flags: []string{"--advertise-routes=10.0.42.0/24", "--advertise-exit-node=false"},
|
||||
curPrefs: &ipn.Prefs{
|
||||
ControlURL: ipn.DefaultControlURL,
|
||||
AllowSingleHosts: true,
|
||||
CorpDNS: true,
|
||||
NetfilterMode: preftype.NetfilterOn,
|
||||
ControlURL: ipn.DefaultControlURL,
|
||||
CorpDNS: true,
|
||||
NetfilterMode: preftype.NetfilterOn,
|
||||
AdvertiseRoutes: []netip.Prefix{
|
||||
netip.MustParsePrefix("10.0.42.0/24"),
|
||||
netip.MustParsePrefix("0.0.0.0/0"),
|
||||
netip.MustParsePrefix("::/0"),
|
||||
},
|
||||
NoStatefulFiltering: opt.NewBool(true),
|
||||
},
|
||||
want: "",
|
||||
},
|
||||
@@ -289,15 +291,15 @@ func TestCheckForAccidentalSettingReverts(t *testing.T) {
|
||||
name: "advertised_routes_includes_the_0_routes", // but no --advertise-exit-node
|
||||
flags: []string{"--advertise-routes=11.1.43.0/24,0.0.0.0/0,::/0"},
|
||||
curPrefs: &ipn.Prefs{
|
||||
ControlURL: ipn.DefaultControlURL,
|
||||
AllowSingleHosts: true,
|
||||
CorpDNS: true,
|
||||
NetfilterMode: preftype.NetfilterOn,
|
||||
ControlURL: ipn.DefaultControlURL,
|
||||
CorpDNS: true,
|
||||
NetfilterMode: preftype.NetfilterOn,
|
||||
AdvertiseRoutes: []netip.Prefix{
|
||||
netip.MustParsePrefix("10.0.42.0/24"),
|
||||
netip.MustParsePrefix("0.0.0.0/0"),
|
||||
netip.MustParsePrefix("::/0"),
|
||||
},
|
||||
NoStatefulFiltering: opt.NewBool(true),
|
||||
},
|
||||
want: "",
|
||||
},
|
||||
@@ -305,10 +307,10 @@ func TestCheckForAccidentalSettingReverts(t *testing.T) {
|
||||
name: "advertise_exit_node", // Issue 1859
|
||||
flags: []string{"--advertise-exit-node"},
|
||||
curPrefs: &ipn.Prefs{
|
||||
ControlURL: ipn.DefaultControlURL,
|
||||
AllowSingleHosts: true,
|
||||
CorpDNS: true,
|
||||
NetfilterMode: preftype.NetfilterOn,
|
||||
ControlURL: ipn.DefaultControlURL,
|
||||
CorpDNS: true,
|
||||
NetfilterMode: preftype.NetfilterOn,
|
||||
NoStatefulFiltering: opt.NewBool(true),
|
||||
},
|
||||
want: "",
|
||||
},
|
||||
@@ -316,14 +318,14 @@ func TestCheckForAccidentalSettingReverts(t *testing.T) {
|
||||
name: "advertise_exit_node_over_existing_routes",
|
||||
flags: []string{"--advertise-exit-node"},
|
||||
curPrefs: &ipn.Prefs{
|
||||
ControlURL: ipn.DefaultControlURL,
|
||||
AllowSingleHosts: true,
|
||||
CorpDNS: true,
|
||||
NetfilterMode: preftype.NetfilterOn,
|
||||
ControlURL: ipn.DefaultControlURL,
|
||||
CorpDNS: true,
|
||||
NetfilterMode: preftype.NetfilterOn,
|
||||
|
||||
AdvertiseRoutes: []netip.Prefix{
|
||||
netip.MustParsePrefix("1.2.0.0/16"),
|
||||
},
|
||||
NoStatefulFiltering: opt.NewBool(true),
|
||||
},
|
||||
want: accidentalUpPrefix + " --advertise-exit-node --advertise-routes=1.2.0.0/16",
|
||||
},
|
||||
@@ -331,15 +333,15 @@ func TestCheckForAccidentalSettingReverts(t *testing.T) {
|
||||
name: "advertise_exit_node_over_existing_routes_and_exit_node",
|
||||
flags: []string{"--advertise-exit-node"},
|
||||
curPrefs: &ipn.Prefs{
|
||||
ControlURL: ipn.DefaultControlURL,
|
||||
AllowSingleHosts: true,
|
||||
CorpDNS: true,
|
||||
NetfilterMode: preftype.NetfilterOn,
|
||||
ControlURL: ipn.DefaultControlURL,
|
||||
CorpDNS: true,
|
||||
NetfilterMode: preftype.NetfilterOn,
|
||||
AdvertiseRoutes: []netip.Prefix{
|
||||
netip.MustParsePrefix("0.0.0.0/0"),
|
||||
netip.MustParsePrefix("::/0"),
|
||||
netip.MustParsePrefix("1.2.0.0/16"),
|
||||
},
|
||||
NoStatefulFiltering: opt.NewBool(true),
|
||||
},
|
||||
want: accidentalUpPrefix + " --advertise-exit-node --advertise-routes=1.2.0.0/16",
|
||||
},
|
||||
@@ -347,12 +349,12 @@ func TestCheckForAccidentalSettingReverts(t *testing.T) {
|
||||
name: "exit_node_clearing", // Issue 1777
|
||||
flags: []string{"--exit-node="},
|
||||
curPrefs: &ipn.Prefs{
|
||||
ControlURL: ipn.DefaultControlURL,
|
||||
AllowSingleHosts: true,
|
||||
CorpDNS: true,
|
||||
NetfilterMode: preftype.NetfilterOn,
|
||||
ControlURL: ipn.DefaultControlURL,
|
||||
CorpDNS: true,
|
||||
NetfilterMode: preftype.NetfilterOn,
|
||||
|
||||
ExitNodeID: "fooID",
|
||||
ExitNodeID: "fooID",
|
||||
NoStatefulFiltering: opt.NewBool(true),
|
||||
},
|
||||
want: "",
|
||||
},
|
||||
@@ -360,59 +362,59 @@ func TestCheckForAccidentalSettingReverts(t *testing.T) {
|
||||
name: "remove_all_implicit",
|
||||
flags: []string{"--force-reauth"},
|
||||
curPrefs: &ipn.Prefs{
|
||||
WantRunning: true,
|
||||
ControlURL: ipn.DefaultControlURL,
|
||||
RouteAll: true,
|
||||
AllowSingleHosts: false,
|
||||
ExitNodeIP: netip.MustParseAddr("100.64.5.6"),
|
||||
CorpDNS: false,
|
||||
ShieldsUp: true,
|
||||
AdvertiseTags: []string{"tag:foo", "tag:bar"},
|
||||
Hostname: "myhostname",
|
||||
ForceDaemon: true,
|
||||
WantRunning: true,
|
||||
ControlURL: ipn.DefaultControlURL,
|
||||
RouteAll: true,
|
||||
ExitNodeIP: netip.MustParseAddr("100.64.5.6"),
|
||||
CorpDNS: false,
|
||||
ShieldsUp: true,
|
||||
AdvertiseTags: []string{"tag:foo", "tag:bar"},
|
||||
Hostname: "myhostname",
|
||||
ForceDaemon: true,
|
||||
AdvertiseRoutes: []netip.Prefix{
|
||||
netip.MustParsePrefix("10.0.0.0/16"),
|
||||
netip.MustParsePrefix("0.0.0.0/0"),
|
||||
netip.MustParsePrefix("::/0"),
|
||||
},
|
||||
NetfilterMode: preftype.NetfilterNoDivert,
|
||||
OperatorUser: "alice",
|
||||
NetfilterMode: preftype.NetfilterNoDivert,
|
||||
OperatorUser: "alice",
|
||||
NoStatefulFiltering: opt.NewBool(true),
|
||||
},
|
||||
curUser: "eve",
|
||||
want: accidentalUpPrefix + " --force-reauth --accept-dns=false --accept-routes --advertise-exit-node --advertise-routes=10.0.0.0/16 --advertise-tags=tag:foo,tag:bar --exit-node=100.64.5.6 --host-routes=false --hostname=myhostname --netfilter-mode=nodivert --operator=alice --shields-up",
|
||||
want: accidentalUpPrefix + " --force-reauth --accept-dns=false --accept-routes --advertise-exit-node --advertise-routes=10.0.0.0/16 --advertise-tags=tag:foo,tag:bar --exit-node=100.64.5.6 --hostname=myhostname --netfilter-mode=nodivert --operator=alice --shields-up",
|
||||
},
|
||||
{
|
||||
name: "remove_all_implicit_except_hostname",
|
||||
flags: []string{"--hostname=newhostname"},
|
||||
curPrefs: &ipn.Prefs{
|
||||
WantRunning: true,
|
||||
ControlURL: ipn.DefaultControlURL,
|
||||
RouteAll: true,
|
||||
AllowSingleHosts: false,
|
||||
ExitNodeIP: netip.MustParseAddr("100.64.5.6"),
|
||||
CorpDNS: false,
|
||||
ShieldsUp: true,
|
||||
AdvertiseTags: []string{"tag:foo", "tag:bar"},
|
||||
Hostname: "myhostname",
|
||||
ForceDaemon: true,
|
||||
WantRunning: true,
|
||||
ControlURL: ipn.DefaultControlURL,
|
||||
RouteAll: true,
|
||||
ExitNodeIP: netip.MustParseAddr("100.64.5.6"),
|
||||
CorpDNS: false,
|
||||
ShieldsUp: true,
|
||||
AdvertiseTags: []string{"tag:foo", "tag:bar"},
|
||||
Hostname: "myhostname",
|
||||
ForceDaemon: true,
|
||||
AdvertiseRoutes: []netip.Prefix{
|
||||
netip.MustParsePrefix("10.0.0.0/16"),
|
||||
},
|
||||
NetfilterMode: preftype.NetfilterNoDivert,
|
||||
OperatorUser: "alice",
|
||||
NetfilterMode: preftype.NetfilterNoDivert,
|
||||
OperatorUser: "alice",
|
||||
NoStatefulFiltering: opt.NewBool(true),
|
||||
},
|
||||
curUser: "eve",
|
||||
want: accidentalUpPrefix + " --hostname=newhostname --accept-dns=false --accept-routes --advertise-routes=10.0.0.0/16 --advertise-tags=tag:foo,tag:bar --exit-node=100.64.5.6 --host-routes=false --netfilter-mode=nodivert --operator=alice --shields-up",
|
||||
want: accidentalUpPrefix + " --hostname=newhostname --accept-dns=false --accept-routes --advertise-routes=10.0.0.0/16 --advertise-tags=tag:foo,tag:bar --exit-node=100.64.5.6 --netfilter-mode=nodivert --operator=alice --shields-up",
|
||||
},
|
||||
{
|
||||
name: "loggedout_is_implicit",
|
||||
flags: []string{"--hostname=foo"},
|
||||
curPrefs: &ipn.Prefs{
|
||||
ControlURL: ipn.DefaultControlURL,
|
||||
LoggedOut: true,
|
||||
AllowSingleHosts: true,
|
||||
CorpDNS: true,
|
||||
NetfilterMode: preftype.NetfilterOn,
|
||||
ControlURL: ipn.DefaultControlURL,
|
||||
LoggedOut: true,
|
||||
CorpDNS: true,
|
||||
NetfilterMode: preftype.NetfilterOn,
|
||||
NoStatefulFiltering: opt.NewBool(true),
|
||||
},
|
||||
want: "", // not an error. LoggedOut is implicit.
|
||||
},
|
||||
@@ -422,10 +424,9 @@ func TestCheckForAccidentalSettingReverts(t *testing.T) {
|
||||
name: "make_windows_exit_node",
|
||||
flags: []string{"--advertise-exit-node"},
|
||||
curPrefs: &ipn.Prefs{
|
||||
ControlURL: ipn.DefaultControlURL,
|
||||
AllowSingleHosts: true,
|
||||
CorpDNS: true,
|
||||
RouteAll: true,
|
||||
ControlURL: ipn.DefaultControlURL,
|
||||
CorpDNS: true,
|
||||
RouteAll: true,
|
||||
|
||||
// And assume this no-op accidental pre-1.8 value:
|
||||
NoSNAT: true,
|
||||
@@ -437,8 +438,7 @@ func TestCheckForAccidentalSettingReverts(t *testing.T) {
|
||||
name: "ignore_netfilter_change_non_linux",
|
||||
flags: []string{"--accept-dns"},
|
||||
curPrefs: &ipn.Prefs{
|
||||
ControlURL: ipn.DefaultControlURL,
|
||||
AllowSingleHosts: true,
|
||||
ControlURL: ipn.DefaultControlURL,
|
||||
|
||||
NetfilterMode: preftype.NetfilterNoDivert, // we never had this bug, but pretend it got set non-zero on Windows somehow
|
||||
},
|
||||
@@ -449,15 +449,15 @@ func TestCheckForAccidentalSettingReverts(t *testing.T) {
|
||||
name: "operator_losing_routes_step1", // https://twitter.com/EXPbits/status/1390418145047887877
|
||||
flags: []string{"--operator=expbits"},
|
||||
curPrefs: &ipn.Prefs{
|
||||
ControlURL: ipn.DefaultControlURL,
|
||||
AllowSingleHosts: true,
|
||||
CorpDNS: true,
|
||||
NetfilterMode: preftype.NetfilterOn,
|
||||
ControlURL: ipn.DefaultControlURL,
|
||||
CorpDNS: true,
|
||||
NetfilterMode: preftype.NetfilterOn,
|
||||
AdvertiseRoutes: []netip.Prefix{
|
||||
netip.MustParsePrefix("0.0.0.0/0"),
|
||||
netip.MustParsePrefix("::/0"),
|
||||
netip.MustParsePrefix("1.2.0.0/16"),
|
||||
},
|
||||
NoStatefulFiltering: opt.NewBool(true),
|
||||
},
|
||||
want: accidentalUpPrefix + " --operator=expbits --advertise-exit-node --advertise-routes=1.2.0.0/16",
|
||||
},
|
||||
@@ -465,15 +465,15 @@ func TestCheckForAccidentalSettingReverts(t *testing.T) {
|
||||
name: "operator_losing_routes_step2", // https://twitter.com/EXPbits/status/1390418145047887877
|
||||
flags: []string{"--operator=expbits", "--advertise-routes=1.2.0.0/16"},
|
||||
curPrefs: &ipn.Prefs{
|
||||
ControlURL: ipn.DefaultControlURL,
|
||||
AllowSingleHosts: true,
|
||||
CorpDNS: true,
|
||||
NetfilterMode: preftype.NetfilterOn,
|
||||
ControlURL: ipn.DefaultControlURL,
|
||||
CorpDNS: true,
|
||||
NetfilterMode: preftype.NetfilterOn,
|
||||
AdvertiseRoutes: []netip.Prefix{
|
||||
netip.MustParsePrefix("0.0.0.0/0"),
|
||||
netip.MustParsePrefix("::/0"),
|
||||
netip.MustParsePrefix("1.2.0.0/16"),
|
||||
},
|
||||
NoStatefulFiltering: opt.NewBool(true),
|
||||
},
|
||||
want: accidentalUpPrefix + " --advertise-routes=1.2.0.0/16 --operator=expbits --advertise-exit-node",
|
||||
},
|
||||
@@ -481,13 +481,13 @@ func TestCheckForAccidentalSettingReverts(t *testing.T) {
|
||||
name: "errors_preserve_explicit_flags",
|
||||
flags: []string{"--reset", "--force-reauth=false", "--authkey=secretrand"},
|
||||
curPrefs: &ipn.Prefs{
|
||||
ControlURL: ipn.DefaultControlURL,
|
||||
WantRunning: false,
|
||||
CorpDNS: true,
|
||||
NetfilterMode: preftype.NetfilterOn,
|
||||
AllowSingleHosts: true,
|
||||
ControlURL: ipn.DefaultControlURL,
|
||||
WantRunning: false,
|
||||
CorpDNS: true,
|
||||
NetfilterMode: preftype.NetfilterOn,
|
||||
|
||||
Hostname: "foo",
|
||||
Hostname: "foo",
|
||||
NoStatefulFiltering: opt.NewBool(true),
|
||||
},
|
||||
want: accidentalUpPrefix + " --auth-key=secretrand --force-reauth=false --reset --hostname=foo",
|
||||
},
|
||||
@@ -495,12 +495,12 @@ func TestCheckForAccidentalSettingReverts(t *testing.T) {
|
||||
name: "error_exit_node_omit_with_ip_pref",
|
||||
flags: []string{"--hostname=foo"},
|
||||
curPrefs: &ipn.Prefs{
|
||||
ControlURL: ipn.DefaultControlURL,
|
||||
AllowSingleHosts: true,
|
||||
CorpDNS: true,
|
||||
NetfilterMode: preftype.NetfilterOn,
|
||||
ControlURL: ipn.DefaultControlURL,
|
||||
CorpDNS: true,
|
||||
NetfilterMode: preftype.NetfilterOn,
|
||||
|
||||
ExitNodeIP: netip.MustParseAddr("100.64.5.4"),
|
||||
ExitNodeIP: netip.MustParseAddr("100.64.5.4"),
|
||||
NoStatefulFiltering: opt.NewBool(true),
|
||||
},
|
||||
want: accidentalUpPrefix + " --hostname=foo --exit-node=100.64.5.4",
|
||||
},
|
||||
@@ -509,12 +509,12 @@ func TestCheckForAccidentalSettingReverts(t *testing.T) {
|
||||
flags: []string{"--hostname=foo"},
|
||||
curExitNodeIP: netip.MustParseAddr("100.64.5.7"),
|
||||
curPrefs: &ipn.Prefs{
|
||||
ControlURL: ipn.DefaultControlURL,
|
||||
AllowSingleHosts: true,
|
||||
CorpDNS: true,
|
||||
NetfilterMode: preftype.NetfilterOn,
|
||||
ControlURL: ipn.DefaultControlURL,
|
||||
CorpDNS: true,
|
||||
NetfilterMode: preftype.NetfilterOn,
|
||||
|
||||
ExitNodeID: "some_stable_id",
|
||||
ExitNodeID: "some_stable_id",
|
||||
NoStatefulFiltering: opt.NewBool(true),
|
||||
},
|
||||
want: accidentalUpPrefix + " --hostname=foo --exit-node=100.64.5.7",
|
||||
},
|
||||
@@ -523,13 +523,13 @@ func TestCheckForAccidentalSettingReverts(t *testing.T) {
|
||||
flags: []string{"--hostname=foo"},
|
||||
curExitNodeIP: netip.MustParseAddr("100.2.3.4"),
|
||||
curPrefs: &ipn.Prefs{
|
||||
ControlURL: ipn.DefaultControlURL,
|
||||
AllowSingleHosts: true,
|
||||
CorpDNS: true,
|
||||
NetfilterMode: preftype.NetfilterOn,
|
||||
ControlURL: ipn.DefaultControlURL,
|
||||
CorpDNS: true,
|
||||
NetfilterMode: preftype.NetfilterOn,
|
||||
|
||||
ExitNodeAllowLANAccess: true,
|
||||
ExitNodeID: "some_stable_id",
|
||||
NoStatefulFiltering: opt.NewBool(true),
|
||||
},
|
||||
want: accidentalUpPrefix + " --hostname=foo --exit-node-allow-lan-access --exit-node=100.2.3.4",
|
||||
},
|
||||
@@ -537,10 +537,10 @@ func TestCheckForAccidentalSettingReverts(t *testing.T) {
|
||||
name: "ignore_login_server_synonym",
|
||||
flags: []string{"--login-server=https://controlplane.tailscale.com"},
|
||||
curPrefs: &ipn.Prefs{
|
||||
ControlURL: "https://login.tailscale.com",
|
||||
AllowSingleHosts: true,
|
||||
CorpDNS: true,
|
||||
NetfilterMode: preftype.NetfilterOn,
|
||||
ControlURL: "https://login.tailscale.com",
|
||||
CorpDNS: true,
|
||||
NetfilterMode: preftype.NetfilterOn,
|
||||
NoStatefulFiltering: opt.NewBool(true),
|
||||
},
|
||||
want: "", // not an error
|
||||
},
|
||||
@@ -548,10 +548,10 @@ func TestCheckForAccidentalSettingReverts(t *testing.T) {
|
||||
name: "ignore_login_server_synonym_on_other_change",
|
||||
flags: []string{"--netfilter-mode=off"},
|
||||
curPrefs: &ipn.Prefs{
|
||||
ControlURL: "https://login.tailscale.com",
|
||||
AllowSingleHosts: true,
|
||||
CorpDNS: false,
|
||||
NetfilterMode: preftype.NetfilterOn,
|
||||
ControlURL: "https://login.tailscale.com",
|
||||
CorpDNS: false,
|
||||
NetfilterMode: preftype.NetfilterOn,
|
||||
NoStatefulFiltering: opt.NewBool(true),
|
||||
},
|
||||
want: accidentalUpPrefix + " --netfilter-mode=off --accept-dns=false",
|
||||
},
|
||||
@@ -561,11 +561,11 @@ func TestCheckForAccidentalSettingReverts(t *testing.T) {
|
||||
name: "synology_permit_omit_accept_routes",
|
||||
flags: []string{"--hostname=foo"},
|
||||
curPrefs: &ipn.Prefs{
|
||||
ControlURL: "https://login.tailscale.com",
|
||||
CorpDNS: true,
|
||||
AllowSingleHosts: true,
|
||||
RouteAll: true,
|
||||
NetfilterMode: preftype.NetfilterOn,
|
||||
ControlURL: "https://login.tailscale.com",
|
||||
CorpDNS: true,
|
||||
RouteAll: true,
|
||||
NetfilterMode: preftype.NetfilterOn,
|
||||
NoStatefulFiltering: opt.NewBool(true),
|
||||
},
|
||||
goos: "linux",
|
||||
distro: distro.Synology,
|
||||
@@ -577,11 +577,11 @@ func TestCheckForAccidentalSettingReverts(t *testing.T) {
|
||||
name: "not_synology_dont_permit_omit_accept_routes",
|
||||
flags: []string{"--hostname=foo"},
|
||||
curPrefs: &ipn.Prefs{
|
||||
ControlURL: "https://login.tailscale.com",
|
||||
CorpDNS: true,
|
||||
AllowSingleHosts: true,
|
||||
RouteAll: true,
|
||||
NetfilterMode: preftype.NetfilterOn,
|
||||
ControlURL: "https://login.tailscale.com",
|
||||
CorpDNS: true,
|
||||
RouteAll: true,
|
||||
NetfilterMode: preftype.NetfilterOn,
|
||||
NoStatefulFiltering: opt.NewBool(true),
|
||||
},
|
||||
goos: "linux",
|
||||
distro: "", // not Synology
|
||||
@@ -591,11 +591,11 @@ func TestCheckForAccidentalSettingReverts(t *testing.T) {
|
||||
name: "profile_name_ignored_in_up",
|
||||
flags: []string{"--hostname=foo"},
|
||||
curPrefs: &ipn.Prefs{
|
||||
ControlURL: "https://login.tailscale.com",
|
||||
CorpDNS: true,
|
||||
AllowSingleHosts: true,
|
||||
NetfilterMode: preftype.NetfilterOn,
|
||||
ProfileName: "foo",
|
||||
ControlURL: "https://login.tailscale.com",
|
||||
CorpDNS: true,
|
||||
NetfilterMode: preftype.NetfilterOn,
|
||||
ProfileName: "foo",
|
||||
NoStatefulFiltering: opt.NewBool(true),
|
||||
},
|
||||
goos: "linux",
|
||||
want: "",
|
||||
@@ -658,10 +658,9 @@ func TestPrefsFromUpArgs(t *testing.T) {
|
||||
ControlURL: ipn.DefaultControlURL,
|
||||
WantRunning: true,
|
||||
NoSNAT: false,
|
||||
NoStatefulFiltering: "false",
|
||||
NoStatefulFiltering: "true",
|
||||
NetfilterMode: preftype.NetfilterOn,
|
||||
CorpDNS: true,
|
||||
AllowSingleHosts: true,
|
||||
AutoUpdate: ipn.AutoUpdatePrefs{
|
||||
Check: true,
|
||||
},
|
||||
@@ -675,10 +674,9 @@ func TestPrefsFromUpArgs(t *testing.T) {
|
||||
ControlURL: ipn.DefaultControlURL,
|
||||
WantRunning: true,
|
||||
CorpDNS: true,
|
||||
AllowSingleHosts: true,
|
||||
RouteAll: true,
|
||||
NoSNAT: false,
|
||||
NoStatefulFiltering: "false",
|
||||
NoStatefulFiltering: "true",
|
||||
NetfilterMode: preftype.NetfilterOn,
|
||||
AutoUpdate: ipn.AutoUpdatePrefs{
|
||||
Check: true,
|
||||
@@ -689,15 +687,14 @@ func TestPrefsFromUpArgs(t *testing.T) {
|
||||
name: "advertise_default_route",
|
||||
args: upArgsFromOSArgs("linux", "--advertise-exit-node"),
|
||||
want: &ipn.Prefs{
|
||||
ControlURL: ipn.DefaultControlURL,
|
||||
WantRunning: true,
|
||||
AllowSingleHosts: true,
|
||||
CorpDNS: true,
|
||||
ControlURL: ipn.DefaultControlURL,
|
||||
WantRunning: true,
|
||||
CorpDNS: true,
|
||||
AdvertiseRoutes: []netip.Prefix{
|
||||
netip.MustParsePrefix("0.0.0.0/0"),
|
||||
netip.MustParsePrefix("::/0"),
|
||||
},
|
||||
NoStatefulFiltering: "false",
|
||||
NoStatefulFiltering: "true",
|
||||
NetfilterMode: preftype.NetfilterOn,
|
||||
AutoUpdate: ipn.AutoUpdatePrefs{
|
||||
Check: true,
|
||||
@@ -922,6 +919,9 @@ func TestPrefFlagMapping(t *testing.T) {
|
||||
continue
|
||||
}
|
||||
switch prefName {
|
||||
case "AllowSingleHosts":
|
||||
// Fake pref for downgrade compat. See #12058.
|
||||
continue
|
||||
case "WantRunning", "Persist", "LoggedOut":
|
||||
// All explicitly handled (ignored) by checkForAccidentalSettingReverts.
|
||||
continue
|
||||
@@ -1029,7 +1029,6 @@ func TestUpdatePrefs(t *testing.T) {
|
||||
wantJustEditMP: &ipn.MaskedPrefs{
|
||||
AdvertiseRoutesSet: true,
|
||||
AdvertiseTagsSet: true,
|
||||
AllowSingleHostsSet: true,
|
||||
AppConnectorSet: true,
|
||||
ControlURLSet: true,
|
||||
CorpDNSSet: true,
|
||||
@@ -1062,11 +1061,11 @@ func TestUpdatePrefs(t *testing.T) {
|
||||
name: "change_login_server",
|
||||
flags: []string{"--login-server=https://localhost:1000"},
|
||||
curPrefs: &ipn.Prefs{
|
||||
ControlURL: "https://login.tailscale.com",
|
||||
Persist: &persist.Persist{UserProfile: tailcfg.UserProfile{LoginName: "crawshaw.github"}},
|
||||
AllowSingleHosts: true,
|
||||
CorpDNS: true,
|
||||
NetfilterMode: preftype.NetfilterOn,
|
||||
ControlURL: "https://login.tailscale.com",
|
||||
Persist: &persist.Persist{UserProfile: tailcfg.UserProfile{LoginName: "crawshaw.github"}},
|
||||
CorpDNS: true,
|
||||
NetfilterMode: preftype.NetfilterOn,
|
||||
NoStatefulFiltering: opt.NewBool(true),
|
||||
},
|
||||
env: upCheckEnv{backendState: "Running"},
|
||||
wantSimpleUp: true,
|
||||
@@ -1077,11 +1076,11 @@ func TestUpdatePrefs(t *testing.T) {
|
||||
name: "change_tags",
|
||||
flags: []string{"--advertise-tags=tag:foo"},
|
||||
curPrefs: &ipn.Prefs{
|
||||
ControlURL: "https://login.tailscale.com",
|
||||
Persist: &persist.Persist{UserProfile: tailcfg.UserProfile{LoginName: "crawshaw.github"}},
|
||||
AllowSingleHosts: true,
|
||||
CorpDNS: true,
|
||||
NetfilterMode: preftype.NetfilterOn,
|
||||
ControlURL: "https://login.tailscale.com",
|
||||
Persist: &persist.Persist{UserProfile: tailcfg.UserProfile{LoginName: "crawshaw.github"}},
|
||||
CorpDNS: true,
|
||||
NetfilterMode: preftype.NetfilterOn,
|
||||
NoStatefulFiltering: opt.NewBool(true),
|
||||
},
|
||||
env: upCheckEnv{backendState: "Running"},
|
||||
},
|
||||
@@ -1090,11 +1089,11 @@ func TestUpdatePrefs(t *testing.T) {
|
||||
name: "explicit_empty_operator",
|
||||
flags: []string{"--operator="},
|
||||
curPrefs: &ipn.Prefs{
|
||||
ControlURL: "https://login.tailscale.com",
|
||||
CorpDNS: true,
|
||||
AllowSingleHosts: true,
|
||||
NetfilterMode: preftype.NetfilterOn,
|
||||
OperatorUser: "somebody",
|
||||
ControlURL: "https://login.tailscale.com",
|
||||
CorpDNS: true,
|
||||
NetfilterMode: preftype.NetfilterOn,
|
||||
OperatorUser: "somebody",
|
||||
NoStatefulFiltering: opt.NewBool(true),
|
||||
},
|
||||
env: upCheckEnv{user: "somebody", backendState: "Running"},
|
||||
wantJustEditMP: &ipn.MaskedPrefs{
|
||||
@@ -1111,11 +1110,11 @@ func TestUpdatePrefs(t *testing.T) {
|
||||
name: "enable_ssh",
|
||||
flags: []string{"--ssh"},
|
||||
curPrefs: &ipn.Prefs{
|
||||
ControlURL: "https://login.tailscale.com",
|
||||
Persist: &persist.Persist{UserProfile: tailcfg.UserProfile{LoginName: "crawshaw.github"}},
|
||||
AllowSingleHosts: true,
|
||||
CorpDNS: true,
|
||||
NetfilterMode: preftype.NetfilterOn,
|
||||
ControlURL: "https://login.tailscale.com",
|
||||
Persist: &persist.Persist{UserProfile: tailcfg.UserProfile{LoginName: "crawshaw.github"}},
|
||||
CorpDNS: true,
|
||||
NetfilterMode: preftype.NetfilterOn,
|
||||
NoStatefulFiltering: opt.NewBool(true),
|
||||
},
|
||||
wantJustEditMP: &ipn.MaskedPrefs{
|
||||
RunSSHSet: true,
|
||||
@@ -1132,12 +1131,12 @@ func TestUpdatePrefs(t *testing.T) {
|
||||
name: "disable_ssh",
|
||||
flags: []string{"--ssh=false"},
|
||||
curPrefs: &ipn.Prefs{
|
||||
ControlURL: "https://login.tailscale.com",
|
||||
Persist: &persist.Persist{UserProfile: tailcfg.UserProfile{LoginName: "crawshaw.github"}},
|
||||
AllowSingleHosts: true,
|
||||
CorpDNS: true,
|
||||
RunSSH: true,
|
||||
NetfilterMode: preftype.NetfilterOn,
|
||||
ControlURL: "https://login.tailscale.com",
|
||||
Persist: &persist.Persist{UserProfile: tailcfg.UserProfile{LoginName: "crawshaw.github"}},
|
||||
CorpDNS: true,
|
||||
RunSSH: true,
|
||||
NetfilterMode: preftype.NetfilterOn,
|
||||
NoStatefulFiltering: opt.NewBool(true),
|
||||
},
|
||||
wantJustEditMP: &ipn.MaskedPrefs{
|
||||
RunSSHSet: true,
|
||||
@@ -1157,12 +1156,12 @@ func TestUpdatePrefs(t *testing.T) {
|
||||
flags: []string{"--ssh=false"},
|
||||
sshOverTailscale: true,
|
||||
curPrefs: &ipn.Prefs{
|
||||
ControlURL: "https://login.tailscale.com",
|
||||
Persist: &persist.Persist{UserProfile: tailcfg.UserProfile{LoginName: "crawshaw.github"}},
|
||||
AllowSingleHosts: true,
|
||||
CorpDNS: true,
|
||||
NetfilterMode: preftype.NetfilterOn,
|
||||
RunSSH: true,
|
||||
ControlURL: "https://login.tailscale.com",
|
||||
Persist: &persist.Persist{UserProfile: tailcfg.UserProfile{LoginName: "crawshaw.github"}},
|
||||
CorpDNS: true,
|
||||
NetfilterMode: preftype.NetfilterOn,
|
||||
RunSSH: true,
|
||||
NoStatefulFiltering: opt.NewBool(true),
|
||||
},
|
||||
wantJustEditMP: &ipn.MaskedPrefs{
|
||||
RunSSHSet: true,
|
||||
@@ -1181,11 +1180,11 @@ func TestUpdatePrefs(t *testing.T) {
|
||||
flags: []string{"--ssh=true"},
|
||||
sshOverTailscale: true,
|
||||
curPrefs: &ipn.Prefs{
|
||||
ControlURL: "https://login.tailscale.com",
|
||||
Persist: &persist.Persist{UserProfile: tailcfg.UserProfile{LoginName: "crawshaw.github"}},
|
||||
AllowSingleHosts: true,
|
||||
CorpDNS: true,
|
||||
NetfilterMode: preftype.NetfilterOn,
|
||||
ControlURL: "https://login.tailscale.com",
|
||||
Persist: &persist.Persist{UserProfile: tailcfg.UserProfile{LoginName: "crawshaw.github"}},
|
||||
CorpDNS: true,
|
||||
NetfilterMode: preftype.NetfilterOn,
|
||||
NoStatefulFiltering: opt.NewBool(true),
|
||||
},
|
||||
wantJustEditMP: &ipn.MaskedPrefs{
|
||||
RunSSHSet: true,
|
||||
@@ -1204,11 +1203,11 @@ func TestUpdatePrefs(t *testing.T) {
|
||||
flags: []string{"--ssh=true", "--accept-risk=lose-ssh"},
|
||||
sshOverTailscale: true,
|
||||
curPrefs: &ipn.Prefs{
|
||||
ControlURL: "https://login.tailscale.com",
|
||||
Persist: &persist.Persist{UserProfile: tailcfg.UserProfile{LoginName: "crawshaw.github"}},
|
||||
AllowSingleHosts: true,
|
||||
CorpDNS: true,
|
||||
NetfilterMode: preftype.NetfilterOn,
|
||||
ControlURL: "https://login.tailscale.com",
|
||||
Persist: &persist.Persist{UserProfile: tailcfg.UserProfile{LoginName: "crawshaw.github"}},
|
||||
CorpDNS: true,
|
||||
NetfilterMode: preftype.NetfilterOn,
|
||||
NoStatefulFiltering: opt.NewBool(true),
|
||||
},
|
||||
wantJustEditMP: &ipn.MaskedPrefs{
|
||||
RunSSHSet: true,
|
||||
@@ -1226,12 +1225,12 @@ func TestUpdatePrefs(t *testing.T) {
|
||||
flags: []string{"--ssh=false", "--accept-risk=lose-ssh"},
|
||||
sshOverTailscale: true,
|
||||
curPrefs: &ipn.Prefs{
|
||||
ControlURL: "https://login.tailscale.com",
|
||||
Persist: &persist.Persist{UserProfile: tailcfg.UserProfile{LoginName: "crawshaw.github"}},
|
||||
AllowSingleHosts: true,
|
||||
CorpDNS: true,
|
||||
RunSSH: true,
|
||||
NetfilterMode: preftype.NetfilterOn,
|
||||
ControlURL: "https://login.tailscale.com",
|
||||
Persist: &persist.Persist{UserProfile: tailcfg.UserProfile{LoginName: "crawshaw.github"}},
|
||||
CorpDNS: true,
|
||||
RunSSH: true,
|
||||
NetfilterMode: preftype.NetfilterOn,
|
||||
NoStatefulFiltering: opt.NewBool(true),
|
||||
},
|
||||
wantJustEditMP: &ipn.MaskedPrefs{
|
||||
RunSSHSet: true,
|
||||
@@ -1249,10 +1248,10 @@ func TestUpdatePrefs(t *testing.T) {
|
||||
flags: []string{"--force-reauth"},
|
||||
sshOverTailscale: true,
|
||||
curPrefs: &ipn.Prefs{
|
||||
ControlURL: "https://login.tailscale.com",
|
||||
AllowSingleHosts: true,
|
||||
CorpDNS: true,
|
||||
NetfilterMode: preftype.NetfilterOn,
|
||||
ControlURL: "https://login.tailscale.com",
|
||||
CorpDNS: true,
|
||||
NetfilterMode: preftype.NetfilterOn,
|
||||
NoStatefulFiltering: opt.NewBool(true),
|
||||
},
|
||||
env: upCheckEnv{backendState: "Running"},
|
||||
wantErrSubtr: "aborted, no changes made",
|
||||
@@ -1262,10 +1261,10 @@ func TestUpdatePrefs(t *testing.T) {
|
||||
flags: []string{"--force-reauth", "--accept-risk=lose-ssh"},
|
||||
sshOverTailscale: true,
|
||||
curPrefs: &ipn.Prefs{
|
||||
ControlURL: "https://login.tailscale.com",
|
||||
AllowSingleHosts: true,
|
||||
CorpDNS: true,
|
||||
NetfilterMode: preftype.NetfilterOn,
|
||||
ControlURL: "https://login.tailscale.com",
|
||||
CorpDNS: true,
|
||||
NetfilterMode: preftype.NetfilterOn,
|
||||
NoStatefulFiltering: opt.NewBool(true),
|
||||
},
|
||||
wantJustEditMP: nil,
|
||||
env: upCheckEnv{backendState: "Running"},
|
||||
@@ -1274,10 +1273,10 @@ func TestUpdatePrefs(t *testing.T) {
|
||||
name: "advertise_connector",
|
||||
flags: []string{"--advertise-connector"},
|
||||
curPrefs: &ipn.Prefs{
|
||||
ControlURL: ipn.DefaultControlURL,
|
||||
AllowSingleHosts: true,
|
||||
CorpDNS: true,
|
||||
NetfilterMode: preftype.NetfilterOn,
|
||||
ControlURL: ipn.DefaultControlURL,
|
||||
CorpDNS: true,
|
||||
NetfilterMode: preftype.NetfilterOn,
|
||||
NoStatefulFiltering: opt.NewBool(true),
|
||||
},
|
||||
wantJustEditMP: &ipn.MaskedPrefs{
|
||||
AppConnectorSet: true,
|
||||
@@ -1294,13 +1293,13 @@ func TestUpdatePrefs(t *testing.T) {
|
||||
name: "no_advertise_connector",
|
||||
flags: []string{"--advertise-connector=false"},
|
||||
curPrefs: &ipn.Prefs{
|
||||
ControlURL: ipn.DefaultControlURL,
|
||||
AllowSingleHosts: true,
|
||||
CorpDNS: true,
|
||||
NetfilterMode: preftype.NetfilterOn,
|
||||
ControlURL: ipn.DefaultControlURL,
|
||||
CorpDNS: true,
|
||||
NetfilterMode: preftype.NetfilterOn,
|
||||
AppConnector: ipn.AppConnectorPrefs{
|
||||
Advertise: true,
|
||||
},
|
||||
NoStatefulFiltering: opt.NewBool(true),
|
||||
},
|
||||
wantJustEditMP: &ipn.MaskedPrefs{
|
||||
AppConnectorSet: true,
|
||||
|
||||
@@ -127,13 +127,13 @@ func printReport(dm *tailcfg.DERPMap, report *netcheck.Report) error {
|
||||
|
||||
printf("\nReport:\n")
|
||||
printf("\t* UDP: %v\n", report.UDP)
|
||||
if report.GlobalV4 != "" {
|
||||
printf("\t* IPv4: yes, %v\n", report.GlobalV4)
|
||||
if report.GlobalV4.IsValid() {
|
||||
printf("\t* IPv4: yes, %s\n", report.GlobalV4)
|
||||
} else {
|
||||
printf("\t* IPv4: (no addr found)\n")
|
||||
}
|
||||
if report.GlobalV6 != "" {
|
||||
printf("\t* IPv6: yes, %v\n", report.GlobalV6)
|
||||
if report.GlobalV6.IsValid() {
|
||||
printf("\t* IPv6: yes, %s\n", report.GlobalV6)
|
||||
} else if report.IPv6 {
|
||||
printf("\t* IPv6: (no addr found)\n")
|
||||
} else if report.OSHasIPv6 {
|
||||
@@ -142,7 +142,6 @@ func printReport(dm *tailcfg.DERPMap, report *netcheck.Report) error {
|
||||
printf("\t* IPv6: no, unavailable in OS\n")
|
||||
}
|
||||
printf("\t* MappingVariesByDestIP: %v\n", report.MappingVariesByDestIP)
|
||||
printf("\t* HairPinning: %v\n", report.HairPinning)
|
||||
printf("\t* PortMapping: %v\n", portMapping(report))
|
||||
if report.CaptivePortal != "" {
|
||||
printf("\t* CaptivePortal: %v\n", report.CaptivePortal)
|
||||
|
||||
@@ -222,7 +222,8 @@ func runNetworkLockStatus(ctx context.Context, args []string) error {
|
||||
|
||||
if st.Enabled && st.NodeKey != nil && !st.PublicKey.IsZero() {
|
||||
if st.NodeKeySigned {
|
||||
fmt.Println("This node is accessible under tailnet lock.")
|
||||
fmt.Println("This node is accessible under tailnet lock. Node signature:")
|
||||
fmt.Println(st.NodeKeySignature.String())
|
||||
} else {
|
||||
fmt.Println("This node is LOCKED OUT by tailnet-lock, and action is required to establish connectivity.")
|
||||
fmt.Printf("Run the following command on a node with a trusted key:\n\ttailscale lock sign %v %s\n", st.NodeKey, st.PublicKey.CLIString())
|
||||
|
||||
@@ -58,6 +58,9 @@ type setArgsT struct {
|
||||
updateCheck bool
|
||||
updateApply bool
|
||||
postureChecking bool
|
||||
snat bool
|
||||
statefulFiltering bool
|
||||
netfilterMode string
|
||||
}
|
||||
|
||||
func newSetFlagSet(goos string, setArgs *setArgsT) *flag.FlagSet {
|
||||
@@ -98,6 +101,10 @@ func newSetFlagSet(goos string, setArgs *setArgsT) *flag.FlagSet {
|
||||
setf.StringVar(&setArgs.opUser, "operator", "", "Unix username to allow to operate on tailscaled without sudo")
|
||||
}
|
||||
switch goos {
|
||||
case "linux":
|
||||
setf.BoolVar(&setArgs.snat, "snat-subnet-routes", true, "source NAT traffic to local routes advertised with --advertise-routes")
|
||||
setf.BoolVar(&setArgs.statefulFiltering, "stateful-filtering", false, "apply stateful filtering to forwarded packets (subnet routers, exit nodes, etc.)")
|
||||
setf.StringVar(&setArgs.netfilterMode, "netfilter-mode", defaultNetfilterMode(), "netfilter mode (one of on, nodivert, off)")
|
||||
case "windows":
|
||||
setf.BoolVar(&setArgs.forceDaemon, "unattended", false, "run in \"Unattended Mode\" where Tailscale keeps running even after the current GUI user logs out (Windows-only)")
|
||||
}
|
||||
@@ -121,6 +128,9 @@ func runSet(ctx context.Context, args []string) (retErr error) {
|
||||
return err
|
||||
}
|
||||
|
||||
// Note that even though we set the values here regardless of whether the
|
||||
// user passed the flag, the value is only used if the user passed the flag.
|
||||
// See updateMaskedPrefsFromUpOrSetFlag.
|
||||
maskedPrefs := &ipn.MaskedPrefs{
|
||||
Prefs: ipn.Prefs{
|
||||
ProfileName: setArgs.profileName,
|
||||
@@ -132,6 +142,7 @@ func runSet(ctx context.Context, args []string) (retErr error) {
|
||||
RunWebClient: setArgs.runWebClient,
|
||||
Hostname: setArgs.hostname,
|
||||
OperatorUser: setArgs.opUser,
|
||||
NoSNAT: !setArgs.snat,
|
||||
ForceDaemon: setArgs.forceDaemon,
|
||||
AutoUpdate: ipn.AutoUpdatePrefs{
|
||||
Check: setArgs.updateCheck,
|
||||
@@ -140,10 +151,22 @@ func runSet(ctx context.Context, args []string) (retErr error) {
|
||||
AppConnector: ipn.AppConnectorPrefs{
|
||||
Advertise: setArgs.advertiseConnector,
|
||||
},
|
||||
PostureChecking: setArgs.postureChecking,
|
||||
PostureChecking: setArgs.postureChecking,
|
||||
NoStatefulFiltering: opt.NewBool(!setArgs.statefulFiltering),
|
||||
},
|
||||
}
|
||||
|
||||
if effectiveGOOS() == "linux" {
|
||||
nfMode, warning, err := netfilterModeFromFlag(setArgs.netfilterMode)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if warning != "" {
|
||||
warnf(warning)
|
||||
}
|
||||
maskedPrefs.Prefs.NetfilterMode = nfMode
|
||||
}
|
||||
|
||||
if setArgs.exitNodeIP != "" {
|
||||
if err := maskedPrefs.Prefs.SetExitNodeIP(setArgs.exitNodeIP, st); err != nil {
|
||||
var e ipn.ExitNodeLocalIPError
|
||||
|
||||
@@ -104,7 +104,7 @@ func newUpFlagSet(goos string, upArgs *upArgsT, cmd string) *flag.FlagSet {
|
||||
upf.StringVar(&upArgs.server, "login-server", ipn.DefaultControlURL, "base URL of control server")
|
||||
upf.BoolVar(&upArgs.acceptRoutes, "accept-routes", acceptRouteDefault(goos), "accept routes advertised by other Tailscale nodes")
|
||||
upf.BoolVar(&upArgs.acceptDNS, "accept-dns", true, "accept DNS configuration from the admin panel")
|
||||
upf.BoolVar(&upArgs.singleRoutes, "host-routes", true, hidden+"install host routes to other Tailscale nodes")
|
||||
upf.Var(notFalseVar{}, "host-routes", hidden+"install host routes to other Tailscale nodes (must be true as of Tailscale 1.67+)")
|
||||
upf.StringVar(&upArgs.exitNodeIP, "exit-node", "", "Tailscale exit node (IP or base name) for internet traffic, or empty string to not use an exit node")
|
||||
upf.BoolVar(&upArgs.exitNodeAllowLANAccess, "exit-node-allow-lan-access", false, "Allow direct access to the local network when routing traffic via an exit node")
|
||||
upf.BoolVar(&upArgs.shieldsUp, "shields-up", false, "don't allow incoming connections")
|
||||
@@ -121,7 +121,7 @@ func newUpFlagSet(goos string, upArgs *upArgsT, cmd string) *flag.FlagSet {
|
||||
switch goos {
|
||||
case "linux":
|
||||
upf.BoolVar(&upArgs.snat, "snat-subnet-routes", true, "source NAT traffic to local routes advertised with --advertise-routes")
|
||||
upf.BoolVar(&upArgs.statefulFiltering, "stateful-filtering", true, "apply stateful filtering to forwarded packets (subnet routers, exit nodes, etc.)")
|
||||
upf.BoolVar(&upArgs.statefulFiltering, "stateful-filtering", false, "apply stateful filtering to forwarded packets (subnet routers, exit nodes, etc.)")
|
||||
upf.StringVar(&upArgs.netfilterMode, "netfilter-mode", defaultNetfilterMode(), "netfilter mode (one of on, nodivert, off)")
|
||||
case "windows":
|
||||
upf.BoolVar(&upArgs.forceDaemon, "unattended", false, "run in \"Unattended Mode\" where Tailscale keeps running even after the current GUI user logs out (Windows-only)")
|
||||
@@ -143,6 +143,18 @@ func newUpFlagSet(goos string, upArgs *upArgsT, cmd string) *flag.FlagSet {
|
||||
return upf
|
||||
}
|
||||
|
||||
// notFalseVar is is a flag.Value that can only be "true", if set.
|
||||
type notFalseVar struct{}
|
||||
|
||||
func (notFalseVar) IsBoolFlag() bool { return true }
|
||||
func (notFalseVar) Set(v string) error {
|
||||
if v != "true" {
|
||||
return fmt.Errorf("unsupported value; only 'true' is allowed")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func (notFalseVar) String() string { return "true" }
|
||||
|
||||
func defaultNetfilterMode() string {
|
||||
if distro.Get() == distro.Synology {
|
||||
return "off"
|
||||
@@ -156,7 +168,6 @@ type upArgsT struct {
|
||||
server string
|
||||
acceptRoutes bool
|
||||
acceptDNS bool
|
||||
singleRoutes bool
|
||||
exitNodeIP string
|
||||
exitNodeAllowLANAccess bool
|
||||
shieldsUp bool
|
||||
@@ -278,7 +289,6 @@ func prefsFromUpArgs(upArgs upArgsT, warnf logger.Logf, st *ipnstate.Status, goo
|
||||
|
||||
prefs.ExitNodeAllowLANAccess = upArgs.exitNodeAllowLANAccess
|
||||
prefs.CorpDNS = upArgs.acceptDNS
|
||||
prefs.AllowSingleHosts = upArgs.singleRoutes
|
||||
prefs.ShieldsUp = upArgs.shieldsUp
|
||||
prefs.RunSSH = upArgs.runSSH
|
||||
prefs.RunWebClient = upArgs.runWebClient
|
||||
@@ -295,25 +305,42 @@ func prefsFromUpArgs(upArgs upArgsT, warnf logger.Logf, st *ipnstate.Status, goo
|
||||
|
||||
// Backfills for NoStatefulFiltering occur when loading a profile; just set it explicitly here.
|
||||
prefs.NoStatefulFiltering.Set(!upArgs.statefulFiltering)
|
||||
|
||||
switch upArgs.netfilterMode {
|
||||
case "on":
|
||||
prefs.NetfilterMode = preftype.NetfilterOn
|
||||
case "nodivert":
|
||||
prefs.NetfilterMode = preftype.NetfilterNoDivert
|
||||
warnf("netfilter=nodivert; add iptables calls to ts-* chains manually.")
|
||||
case "off":
|
||||
prefs.NetfilterMode = preftype.NetfilterOff
|
||||
if defaultNetfilterMode() != "off" {
|
||||
warnf("netfilter=off; configure iptables yourself.")
|
||||
}
|
||||
default:
|
||||
return nil, fmt.Errorf("invalid value --netfilter-mode=%q", upArgs.netfilterMode)
|
||||
v, warning, err := netfilterModeFromFlag(upArgs.netfilterMode)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
prefs.NetfilterMode = v
|
||||
if warning != "" {
|
||||
warnf(warning)
|
||||
}
|
||||
}
|
||||
return prefs, nil
|
||||
}
|
||||
|
||||
// netfilterModeFromFlag returns the preftype.NetfilterMode for the provided
|
||||
// flag value. It returns a warning if there is something the user should know
|
||||
// about the value.
|
||||
func netfilterModeFromFlag(v string) (_ preftype.NetfilterMode, warning string, _ error) {
|
||||
switch v {
|
||||
case "on", "nodivert", "off":
|
||||
default:
|
||||
return preftype.NetfilterOn, "", fmt.Errorf("invalid value --netfilter-mode=%q", v)
|
||||
}
|
||||
m, err := preftype.ParseNetfilterMode(v)
|
||||
if err != nil {
|
||||
return preftype.NetfilterOn, "", err
|
||||
}
|
||||
switch m {
|
||||
case preftype.NetfilterNoDivert:
|
||||
warning = "netfilter=nodivert; add iptables calls to ts-* chains manually."
|
||||
case preftype.NetfilterOff:
|
||||
if defaultNetfilterMode() != "off" {
|
||||
warning = "netfilter=off; configure iptables yourself."
|
||||
}
|
||||
}
|
||||
return m, warning, nil
|
||||
}
|
||||
|
||||
// updatePrefs returns how to edit preferences based on the
|
||||
// flag-provided 'prefs' and the currently active 'curPrefs'.
|
||||
//
|
||||
@@ -723,7 +750,6 @@ func init() {
|
||||
addPrefFlagMapping("accept-dns", "CorpDNS")
|
||||
addPrefFlagMapping("accept-routes", "RouteAll")
|
||||
addPrefFlagMapping("advertise-tags", "AdvertiseTags")
|
||||
addPrefFlagMapping("host-routes", "AllowSingleHosts")
|
||||
addPrefFlagMapping("hostname", "Hostname")
|
||||
addPrefFlagMapping("login-server", "ControlURL")
|
||||
addPrefFlagMapping("netfilter-mode", "NetfilterMode")
|
||||
@@ -762,7 +788,7 @@ func addPrefFlagMapping(flagName string, prefNames ...string) {
|
||||
// correspond to an ipn.Pref.
|
||||
func preflessFlag(flagName string) bool {
|
||||
switch flagName {
|
||||
case "auth-key", "force-reauth", "reset", "qr", "json", "timeout", "accept-risk":
|
||||
case "auth-key", "force-reauth", "reset", "qr", "json", "timeout", "accept-risk", "host-routes":
|
||||
return true
|
||||
}
|
||||
return false
|
||||
@@ -859,11 +885,26 @@ func checkForAccidentalSettingReverts(newPrefs, curPrefs *ipn.Prefs, env upCheck
|
||||
// Issue 6811. Ignore on Synology.
|
||||
continue
|
||||
}
|
||||
if flagName == "stateful-filtering" && valCur == true && valNew == false && env.goos == "linux" {
|
||||
// See https://github.com/tailscale/tailscale/issues/12307
|
||||
// Stateful filtering was on by default in tailscale 1.66.0-1.66.3, then off in 1.66.4.
|
||||
// This broke Tailscale installations in containerized
|
||||
// environments that use the default containerboot
|
||||
// configuration that configures tailscale using
|
||||
// 'tailscale up' command, which requires that all
|
||||
// previously set flags are explicitly provided on
|
||||
// subsequent restarts.
|
||||
continue
|
||||
}
|
||||
missing = append(missing, fmtFlagValueArg(flagName, valCur))
|
||||
}
|
||||
if len(missing) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Some previously provided flags are missing. This run of 'tailscale
|
||||
// up' will error out.
|
||||
|
||||
sort.Strings(missing)
|
||||
|
||||
// Compute the stringification of the explicitly provided args in flagSet
|
||||
@@ -958,8 +999,6 @@ func prefsToFlags(env upCheckEnv, prefs *ipn.Prefs) (flagVal map[string]any) {
|
||||
set(prefs.ControlURL)
|
||||
case "accept-routes":
|
||||
set(prefs.RouteAll)
|
||||
case "host-routes":
|
||||
set(prefs.AllowSingleHosts)
|
||||
case "accept-dns":
|
||||
set(prefs.CorpDNS)
|
||||
case "shields-up":
|
||||
|
||||
@@ -299,7 +299,6 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep
|
||||
regexp from github.com/coreos/go-iptables/iptables+
|
||||
regexp/syntax from regexp
|
||||
runtime/debug from nhooyr.io/websocket/internal/xsync+
|
||||
runtime/trace from testing
|
||||
slices from tailscale.com/client/web+
|
||||
sort from archive/tar+
|
||||
strconv from archive/tar+
|
||||
@@ -307,7 +306,6 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep
|
||||
sync from archive/tar+
|
||||
sync/atomic from context+
|
||||
syscall from archive/tar+
|
||||
testing from tailscale.com/util/syspolicy
|
||||
text/tabwriter from github.com/peterbourgon/ff/v3/ffcli+
|
||||
text/template from html/template
|
||||
text/template/parse from html/template+
|
||||
|
||||
@@ -12,6 +12,7 @@ import (
|
||||
func TestDeps(t *testing.T) {
|
||||
deptest.DepChecker{
|
||||
BadDeps: map[string]string{
|
||||
"testing": "do not use testing package in production code",
|
||||
"gvisor.dev/gvisor/pkg/buffer": "https://github.com/tailscale/tailscale/issues/9756",
|
||||
"gvisor.dev/gvisor/pkg/cpuid": "https://github.com/tailscale/tailscale/issues/9756",
|
||||
"gvisor.dev/gvisor/pkg/tcpip": "https://github.com/tailscale/tailscale/issues/9756",
|
||||
|
||||
@@ -144,6 +144,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de
|
||||
L github.com/pierrec/lz4/v4/internal/xxh32 from github.com/pierrec/lz4/v4/internal/lz4stream
|
||||
LD github.com/pkg/sftp from tailscale.com/ssh/tailssh
|
||||
LD github.com/pkg/sftp/internal/encoding/ssh/filexfer from github.com/pkg/sftp
|
||||
D github.com/prometheus-community/pro-bing from tailscale.com/wgengine/netstack
|
||||
L 💣 github.com/safchain/ethtool from tailscale.com/net/netkernelconf+
|
||||
W 💣 github.com/tailscale/certstore from tailscale.com/control/controlclient
|
||||
W 💣 github.com/tailscale/go-winio from tailscale.com/safesocket
|
||||
@@ -319,6 +320,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de
|
||||
💣 tailscale.com/net/tshttpproxy from tailscale.com/clientupdate/distsign+
|
||||
tailscale.com/net/tstun from tailscale.com/cmd/tailscaled+
|
||||
tailscale.com/net/wsconn from tailscale.com/control/controlhttp+
|
||||
tailscale.com/omit from tailscale.com/ipn/conffile
|
||||
tailscale.com/paths from tailscale.com/client/tailscale+
|
||||
💣 tailscale.com/portlist from tailscale.com/ipn/ipnlocal
|
||||
tailscale.com/posture from tailscale.com/ipn/ipnlocal
|
||||
@@ -439,7 +441,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de
|
||||
golang.org/x/net/http2 from golang.org/x/net/http2/h2c+
|
||||
golang.org/x/net/http2/h2c from tailscale.com/ipn/ipnlocal
|
||||
golang.org/x/net/http2/hpack from golang.org/x/net/http2+
|
||||
golang.org/x/net/icmp from tailscale.com/net/ping
|
||||
golang.org/x/net/icmp from tailscale.com/net/ping+
|
||||
golang.org/x/net/idna from golang.org/x/net/http/httpguts+
|
||||
golang.org/x/net/ipv4 from github.com/miekg/dns+
|
||||
golang.org/x/net/ipv6 from github.com/miekg/dns+
|
||||
@@ -552,7 +554,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de
|
||||
regexp/syntax from regexp
|
||||
runtime/debug from github.com/aws/aws-sdk-go-v2/internal/sync/singleflight+
|
||||
runtime/pprof from net/http/pprof+
|
||||
runtime/trace from net/http/pprof+
|
||||
runtime/trace from net/http/pprof
|
||||
slices from tailscale.com/appc+
|
||||
sort from archive/tar+
|
||||
strconv from archive/tar+
|
||||
@@ -560,7 +562,6 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de
|
||||
sync from archive/tar+
|
||||
sync/atomic from context+
|
||||
syscall from archive/tar+
|
||||
testing from tailscale.com/util/syspolicy
|
||||
text/tabwriter from runtime/pprof
|
||||
text/template from html/template
|
||||
text/template/parse from html/template+
|
||||
|
||||
@@ -118,7 +118,7 @@ var args struct {
|
||||
tunname string
|
||||
|
||||
cleanUp bool
|
||||
confFile string
|
||||
confFile string // empty, file path, or "vm:user-data"
|
||||
debug string
|
||||
port uint16
|
||||
statepath string
|
||||
@@ -169,7 +169,7 @@ func main() {
|
||||
flag.StringVar(&args.birdSocketPath, "bird-socket", "", "path of the bird unix socket")
|
||||
flag.BoolVar(&printVersion, "version", false, "print version information and exit")
|
||||
flag.BoolVar(&args.disableLogs, "no-logs-no-support", false, "disable log uploads; this also disables any technical support")
|
||||
flag.StringVar(&args.confFile, "config", "", "path to config file")
|
||||
flag.StringVar(&args.confFile, "config", "", "path to config file, or 'vm:user-data' to use the VM's user-data (EC2)")
|
||||
|
||||
if len(os.Args) > 0 && filepath.Base(os.Args[0]) == "tailscale" && beCLI != nil {
|
||||
beCLI()
|
||||
@@ -548,14 +548,25 @@ func getLocalBackend(ctx context.Context, logf logger.Logf, logID logid.PublicID
|
||||
return ok
|
||||
}
|
||||
dialer.NetstackDialTCP = func(ctx context.Context, dst netip.AddrPort) (net.Conn, error) {
|
||||
// Note: don't just return ns.DialContextTCP or we'll
|
||||
// return an interface containing a nil pointer.
|
||||
// Note: don't just return ns.DialContextTCP or we'll return
|
||||
// *gonet.TCPConn(nil) instead of a nil interface which trips up
|
||||
// callers.
|
||||
tcpConn, err := ns.DialContextTCP(ctx, dst)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return tcpConn, nil
|
||||
}
|
||||
dialer.NetstackDialUDP = func(ctx context.Context, dst netip.AddrPort) (net.Conn, error) {
|
||||
// Note: don't just return ns.DialContextUDP or we'll return
|
||||
// *gonet.UDPConn(nil) instead of a nil interface which trips up
|
||||
// callers.
|
||||
udpConn, err := ns.DialContextUDP(ctx, dst)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return udpConn, nil
|
||||
}
|
||||
}
|
||||
if socksListener != nil || httpProxyListener != nil {
|
||||
var addrs []string
|
||||
|
||||
@@ -20,6 +20,7 @@ func TestDeps(t *testing.T) {
|
||||
GOOS: "darwin",
|
||||
GOARCH: "arm64",
|
||||
BadDeps: map[string]string{
|
||||
"testing": "do not use testing package in production code",
|
||||
"gvisor.dev/gvisor/pkg/hostarch": "will crash on non-4K page sizes; see https://github.com/tailscale/tailscale/issues/8658",
|
||||
},
|
||||
}.Check(t)
|
||||
@@ -28,6 +29,7 @@ func TestDeps(t *testing.T) {
|
||||
GOOS: "linux",
|
||||
GOARCH: "arm64",
|
||||
BadDeps: map[string]string{
|
||||
"testing": "do not use testing package in production code",
|
||||
"gvisor.dev/gvisor/pkg/hostarch": "will crash on non-4K page sizes; see https://github.com/tailscale/tailscale/issues/8658",
|
||||
},
|
||||
}.Check(t)
|
||||
|
||||
@@ -298,11 +298,10 @@ func (i *jsIPN) run(jsCallbacks js.Value) {
|
||||
go func() {
|
||||
err := i.lb.Start(ipn.Options{
|
||||
UpdatePrefs: &ipn.Prefs{
|
||||
ControlURL: i.controlURL,
|
||||
RouteAll: false,
|
||||
AllowSingleHosts: true,
|
||||
WantRunning: true,
|
||||
Hostname: i.hostname,
|
||||
ControlURL: i.controlURL,
|
||||
RouteAll: false,
|
||||
WantRunning: true,
|
||||
Hostname: i.hostname,
|
||||
},
|
||||
AuthKey: i.authKey,
|
||||
})
|
||||
|
||||
@@ -40,7 +40,6 @@ import (
|
||||
"tailscale.com/tsnet"
|
||||
"tailscale.com/types/key"
|
||||
"tailscale.com/types/lazy"
|
||||
"tailscale.com/types/logger"
|
||||
"tailscale.com/types/views"
|
||||
"tailscale.com/util/mak"
|
||||
"tailscale.com/util/must"
|
||||
@@ -95,8 +94,8 @@ func main() {
|
||||
ts := &tsnet.Server{
|
||||
Hostname: "idp",
|
||||
}
|
||||
if !*flagVerbose {
|
||||
ts.Logf = logger.Discard
|
||||
if *flagVerbose {
|
||||
ts.Logf = log.Printf
|
||||
}
|
||||
st, err = ts.Up(ctx)
|
||||
if err != nil {
|
||||
|
||||
@@ -26,9 +26,8 @@ import (
|
||||
|
||||
type LoginGoal struct {
|
||||
_ structs.Incomparable
|
||||
token *tailcfg.Oauth2Token // oauth token to use when logging in
|
||||
flags LoginFlags // flags to use when logging in
|
||||
url string // auth url that needs to be visited
|
||||
flags LoginFlags // flags to use when logging in
|
||||
url string // auth url that needs to be visited
|
||||
}
|
||||
|
||||
var _ Client = (*Auto)(nil)
|
||||
@@ -338,7 +337,7 @@ func (c *Auto) authRoutine() {
|
||||
url, err = c.direct.WaitLoginURL(ctx, goal.url)
|
||||
f = "WaitLoginURL"
|
||||
} else {
|
||||
url, err = c.direct.TryLogin(ctx, goal.token, goal.flags)
|
||||
url, err = c.direct.TryLogin(ctx, goal.flags)
|
||||
f = "TryLogin"
|
||||
}
|
||||
if err != nil {
|
||||
@@ -612,8 +611,8 @@ func (c *Auto) sendStatus(who string, err error, url string, nm *netmap.NetworkM
|
||||
})
|
||||
}
|
||||
|
||||
func (c *Auto) Login(t *tailcfg.Oauth2Token, flags LoginFlags) {
|
||||
c.logf("client.Login(%v, %v)", t != nil, flags)
|
||||
func (c *Auto) Login(flags LoginFlags) {
|
||||
c.logf("client.Login(%v)", flags)
|
||||
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
@@ -625,7 +624,6 @@ func (c *Auto) Login(t *tailcfg.Oauth2Token, flags LoginFlags) {
|
||||
}
|
||||
c.wantLoggedIn = true
|
||||
c.loginGoal = &LoginGoal{
|
||||
token: t,
|
||||
flags: flags,
|
||||
}
|
||||
c.cancelMapCtxLocked()
|
||||
|
||||
@@ -45,7 +45,7 @@ type Client interface {
|
||||
// LoginFinished flag (on success) or an auth URL (if further
|
||||
// interaction is needed). It merely sets the process in motion,
|
||||
// and doesn't wait for it to complete.
|
||||
Login(*tailcfg.Oauth2Token, LoginFlags)
|
||||
Login(LoginFlags)
|
||||
// Logout starts a synchronous logout process. It doesn't return
|
||||
// until the logout operation has been completed.
|
||||
Logout(context.Context) error
|
||||
|
||||
@@ -401,12 +401,12 @@ func (c *Direct) TryLogout(ctx context.Context) error {
|
||||
return err
|
||||
}
|
||||
|
||||
func (c *Direct) TryLogin(ctx context.Context, t *tailcfg.Oauth2Token, flags LoginFlags) (url string, err error) {
|
||||
func (c *Direct) TryLogin(ctx context.Context, flags LoginFlags) (url string, err error) {
|
||||
if strings.Contains(c.serverURL, "controlplane.tailscale.com") && envknob.Bool("TS_PANIC_IF_HIT_MAIN_CONTROL") {
|
||||
panic(fmt.Sprintf("[unexpected] controlclient: TryLogin called on %s; tainted=%v", c.serverURL, c.panicOnUse))
|
||||
}
|
||||
c.logf("[v1] direct.TryLogin(token=%v, flags=%v)", t != nil, flags)
|
||||
return c.doLoginOrRegen(ctx, loginOpt{Token: t, Flags: flags})
|
||||
c.logf("[v1] direct.TryLogin(flags=%v)", flags)
|
||||
return c.doLoginOrRegen(ctx, loginOpt{Flags: flags})
|
||||
}
|
||||
|
||||
// WaitLoginURL sits in a long poll waiting for the user to authenticate at url.
|
||||
@@ -441,7 +441,6 @@ func (c *Direct) SetExpirySooner(ctx context.Context, expiry time.Time) error {
|
||||
}
|
||||
|
||||
type loginOpt struct {
|
||||
Token *tailcfg.Oauth2Token
|
||||
Flags LoginFlags
|
||||
Regen bool // generate a new nodekey, can be overridden in doLogin
|
||||
URL string
|
||||
@@ -559,7 +558,7 @@ func (c *Direct) doLogin(ctx context.Context, opt loginOpt) (mustRegen bool, new
|
||||
|
||||
var nodeKeySignature tkatype.MarshaledSignature
|
||||
if !oldNodeKey.IsZero() && opt.OldNodeKeySignature != nil {
|
||||
if nodeKeySignature, err = resignNKS(persist.NetworkLockKey, tryingNewKey.Public(), opt.OldNodeKeySignature); err != nil {
|
||||
if nodeKeySignature, err = tka.ResignNKS(persist.NetworkLockKey, tryingNewKey.Public(), opt.OldNodeKeySignature); err != nil {
|
||||
c.logf("Failed re-signing node-key signature: %v", err)
|
||||
}
|
||||
} else if isWrapped {
|
||||
@@ -610,10 +609,9 @@ func (c *Direct) doLogin(ctx context.Context, opt loginOpt) (mustRegen bool, new
|
||||
c.logf("RegisterReq: onode=%v node=%v fup=%v nks=%v",
|
||||
request.OldNodeKey.ShortString(),
|
||||
request.NodeKey.ShortString(), opt.URL != "", len(nodeKeySignature) > 0)
|
||||
if opt.Token != nil || authKey != "" {
|
||||
if authKey != "" {
|
||||
request.Auth = &tailcfg.RegisterResponseAuth{
|
||||
Oauth2Token: opt.Token,
|
||||
AuthKey: authKey,
|
||||
AuthKey: authKey,
|
||||
}
|
||||
}
|
||||
err = signRegisterRequest(&request, c.serverURL, c.serverLegacyKey, machinePrivKey.Public())
|
||||
@@ -731,45 +729,6 @@ func (c *Direct) doLogin(ctx context.Context, opt loginOpt) (mustRegen bool, new
|
||||
return false, resp.AuthURL, nil, nil
|
||||
}
|
||||
|
||||
// resignNKS re-signs a node-key signature for a new node-key.
|
||||
//
|
||||
// This only matters on network-locked tailnets, because node-key signatures are
|
||||
// how other nodes know that a node-key is authentic. When the node-key is
|
||||
// rotated then the existing signature becomes invalid, so this function is
|
||||
// responsible for generating a new wrapping signature to certify the new node-key.
|
||||
//
|
||||
// The signature itself is a SigRotation signature, which embeds the old signature
|
||||
// and certifies the new node-key as a replacement for the old by signing the new
|
||||
// signature with RotationPubkey (which is the node's own network-lock key).
|
||||
func resignNKS(priv key.NLPrivate, nodeKey key.NodePublic, oldNKS tkatype.MarshaledSignature) (tkatype.MarshaledSignature, error) {
|
||||
var oldSig tka.NodeKeySignature
|
||||
if err := oldSig.Unserialize(oldNKS); err != nil {
|
||||
return nil, fmt.Errorf("decoding NKS: %w", err)
|
||||
}
|
||||
|
||||
nk, err := nodeKey.MarshalBinary()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("marshalling node-key: %w", err)
|
||||
}
|
||||
|
||||
if bytes.Equal(nk, oldSig.Pubkey) {
|
||||
// The old signature is valid for the node-key we are using, so just
|
||||
// use it verbatim.
|
||||
return oldNKS, nil
|
||||
}
|
||||
|
||||
newSig := tka.NodeKeySignature{
|
||||
SigKind: tka.SigRotation,
|
||||
Pubkey: nk,
|
||||
Nested: &oldSig,
|
||||
}
|
||||
if newSig.Signature, err = priv.SignNKS(newSig.SigHash()); err != nil {
|
||||
return nil, fmt.Errorf("signing NKS: %w", err)
|
||||
}
|
||||
|
||||
return newSig.Serialize(), nil
|
||||
}
|
||||
|
||||
// newEndpoints acquires c.mu and sets the local port and endpoints and reports
|
||||
// whether they've changed.
|
||||
//
|
||||
|
||||
@@ -329,20 +329,36 @@ func NewServer(privateKey key.NodePrivate, logf logger.Logf) *Server {
|
||||
s.initMetacert()
|
||||
s.packetsRecvDisco = s.packetsRecvByKind.Get("disco")
|
||||
s.packetsRecvOther = s.packetsRecvByKind.Get("other")
|
||||
s.packetsDroppedReasonCounters = []*expvar.Int{
|
||||
s.packetsDroppedReason.Get("unknown_dest"),
|
||||
s.packetsDroppedReason.Get("unknown_dest_on_fwd"),
|
||||
s.packetsDroppedReason.Get("gone_disconnected"),
|
||||
s.packetsDroppedReason.Get("gone_not_here"),
|
||||
s.packetsDroppedReason.Get("queue_head"),
|
||||
s.packetsDroppedReason.Get("queue_tail"),
|
||||
s.packetsDroppedReason.Get("write_error"),
|
||||
}
|
||||
|
||||
s.packetsDroppedReasonCounters = s.genPacketsDroppedReasonCounters()
|
||||
|
||||
s.packetsDroppedTypeDisco = s.packetsDroppedType.Get("disco")
|
||||
s.packetsDroppedTypeOther = s.packetsDroppedType.Get("other")
|
||||
return s
|
||||
}
|
||||
|
||||
func (s *Server) genPacketsDroppedReasonCounters() []*expvar.Int {
|
||||
getMetric := s.packetsDroppedReason.Get
|
||||
ret := []*expvar.Int{
|
||||
dropReasonUnknownDest: getMetric("unknown_dest"),
|
||||
dropReasonUnknownDestOnFwd: getMetric("unknown_dest_on_fwd"),
|
||||
dropReasonGoneDisconnected: getMetric("gone_disconnected"),
|
||||
dropReasonQueueHead: getMetric("queue_head"),
|
||||
dropReasonQueueTail: getMetric("queue_tail"),
|
||||
dropReasonWriteError: getMetric("write_error"),
|
||||
dropReasonDupClient: getMetric("dup_client"),
|
||||
}
|
||||
if len(ret) != int(numDropReasons) {
|
||||
panic("dropReason metrics out of sync")
|
||||
}
|
||||
for i := range numDropReasons {
|
||||
if ret[i] == nil {
|
||||
panic("dropReason metrics out of sync")
|
||||
}
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
// SetMesh sets the pre-shared key that regional DERP servers used to mesh
|
||||
// amongst themselves.
|
||||
//
|
||||
@@ -776,7 +792,6 @@ func (c *sclient) run(ctx context.Context) error {
|
||||
var grp errgroup.Group
|
||||
sendCtx, cancelSender := context.WithCancel(ctx)
|
||||
grp.Go(func() error { return c.sendLoop(sendCtx) })
|
||||
grp.Go(func() error { return c.statsLoop(sendCtx) })
|
||||
defer func() {
|
||||
cancelSender()
|
||||
if err := grp.Wait(); err != nil && !c.s.isClosed() {
|
||||
@@ -788,6 +803,8 @@ func (c *sclient) run(ctx context.Context) error {
|
||||
}
|
||||
}()
|
||||
|
||||
c.startStatsLoop(sendCtx)
|
||||
|
||||
for {
|
||||
ft, fl, err := readFrameHeader(c.br)
|
||||
c.debugLogf("read frame type %d len %d err %v", ft, fl, err)
|
||||
@@ -1046,6 +1063,7 @@ const (
|
||||
dropReasonQueueTail // destination queue is full, dropped packet at queue tail
|
||||
dropReasonWriteError // OS write() failed
|
||||
dropReasonDupClient // the public key is connected 2+ times (active/active, fighting)
|
||||
numDropReasons // unused; keep last
|
||||
)
|
||||
|
||||
func (s *Server) recordDrop(packetBytes []byte, srcKey, dstKey key.NodePublic, reason dropReason) {
|
||||
|
||||
@@ -7,6 +7,7 @@ package derp
|
||||
|
||||
import "context"
|
||||
|
||||
func (c *sclient) statsLoop(ctx context.Context) error {
|
||||
return nil
|
||||
func (c *sclient) startStatsLoop(ctx context.Context) {
|
||||
// Nothing to do
|
||||
return
|
||||
}
|
||||
|
||||
@@ -12,40 +12,43 @@ import (
|
||||
"tailscale.com/net/tcpinfo"
|
||||
)
|
||||
|
||||
func (c *sclient) statsLoop(ctx context.Context) error {
|
||||
func (c *sclient) startStatsLoop(ctx context.Context) {
|
||||
// Get the RTT initially to verify it's supported.
|
||||
conn := c.tcpConn()
|
||||
if conn == nil {
|
||||
c.s.tcpRtt.Add("non-tcp", 1)
|
||||
return nil
|
||||
return
|
||||
}
|
||||
if _, err := tcpinfo.RTT(conn); err != nil {
|
||||
c.logf("error fetching initial RTT: %v", err)
|
||||
c.s.tcpRtt.Add("error", 1)
|
||||
return nil
|
||||
return
|
||||
}
|
||||
|
||||
const statsInterval = 10 * time.Second
|
||||
|
||||
ticker, tickerChannel := c.s.clock.NewTicker(statsInterval)
|
||||
defer ticker.Stop()
|
||||
|
||||
statsLoop:
|
||||
for {
|
||||
select {
|
||||
case <-tickerChannel:
|
||||
rtt, err := tcpinfo.RTT(conn)
|
||||
if err != nil {
|
||||
continue statsLoop
|
||||
}
|
||||
|
||||
// TODO(andrew): more metrics?
|
||||
c.s.tcpRtt.Add(durationToLabel(rtt), 1)
|
||||
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
// Don't launch a goroutine; use a timer instead.
|
||||
var gatherStats func()
|
||||
gatherStats = func() {
|
||||
// Do nothing if the context is finished.
|
||||
if ctx.Err() != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Reschedule ourselves when this stats gathering is finished.
|
||||
defer c.s.clock.AfterFunc(statsInterval, gatherStats)
|
||||
|
||||
// Gather TCP RTT information.
|
||||
rtt, err := tcpinfo.RTT(conn)
|
||||
if err == nil {
|
||||
c.s.tcpRtt.Add(durationToLabel(rtt), 1)
|
||||
}
|
||||
|
||||
// TODO(andrew): more metrics?
|
||||
}
|
||||
|
||||
// Kick off the initial timer.
|
||||
c.s.clock.AfterFunc(statsInterval, gatherStats)
|
||||
}
|
||||
|
||||
// tcpConn attempts to get the underlying *net.TCPConn from this client's
|
||||
|
||||
@@ -18,11 +18,12 @@ func _() {
|
||||
_ = x[dropReasonQueueTail-4]
|
||||
_ = x[dropReasonWriteError-5]
|
||||
_ = x[dropReasonDupClient-6]
|
||||
_ = x[numDropReasons-7]
|
||||
}
|
||||
|
||||
const _dropReason_name = "UnknownDestUnknownDestOnFwdGoneDisconnectedQueueHeadQueueTailWriteErrorDupClient"
|
||||
const _dropReason_name = "UnknownDestUnknownDestOnFwdGoneDisconnectedQueueHeadQueueTailWriteErrorDupClientnumDropReasons"
|
||||
|
||||
var _dropReason_index = [...]uint8{0, 11, 27, 43, 52, 61, 71, 80}
|
||||
var _dropReason_index = [...]uint8{0, 11, 27, 43, 52, 61, 71, 80, 94}
|
||||
|
||||
func (i dropReason) String() string {
|
||||
if i < 0 || i >= dropReason(len(_dropReason_index)-1) {
|
||||
|
||||
@@ -29,5 +29,6 @@ spec:
|
||||
- name: TS_ROUTES
|
||||
value: "{{TS_ROUTES}}"
|
||||
securityContext:
|
||||
runAsUser: 1000
|
||||
runAsGroup: 1000
|
||||
capabilities:
|
||||
add:
|
||||
- NET_ADMIN
|
||||
|
||||
@@ -81,24 +81,39 @@ type Handler struct {
|
||||
staticRoot string
|
||||
}
|
||||
|
||||
var cacheInvalidatingMethods = map[string]bool{
|
||||
"PUT": true,
|
||||
"POST": true,
|
||||
"COPY": true,
|
||||
"MKCOL": true,
|
||||
"MOVE": true,
|
||||
"PROPPATCH": true,
|
||||
"DELETE": true,
|
||||
}
|
||||
|
||||
// ServeHTTP implements http.Handler.
|
||||
func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
if r.Method == "PROPFIND" {
|
||||
h.handlePROPFIND(w, r)
|
||||
pathComponents := shared.CleanAndSplit(r.URL.Path)
|
||||
mpl := h.maxPathLength(r)
|
||||
|
||||
switch r.Method {
|
||||
case "PROPFIND":
|
||||
h.handlePROPFIND(w, r, pathComponents, mpl)
|
||||
return
|
||||
case "LOCK":
|
||||
h.handleLOCK(w, r, pathComponents, mpl)
|
||||
return
|
||||
}
|
||||
|
||||
if r.Method != "GET" {
|
||||
// If the user is performing a modification (e.g. PUT, MKDIR, etc),
|
||||
_, shouldInvalidate := cacheInvalidatingMethods[r.Method]
|
||||
if shouldInvalidate {
|
||||
// If the user is performing a modification (e.g. PUT, MKDIR, etc.),
|
||||
// we need to invalidate the StatCache to make sure we're not knowingly
|
||||
// showing stale stats.
|
||||
// TODO(oxtoacart): maybe be more selective about invalidating cache
|
||||
// TODO(oxtoacart): maybe only invalidate specific paths
|
||||
h.StatCache.invalidate()
|
||||
}
|
||||
|
||||
mpl := h.maxPathLength(r)
|
||||
pathComponents := shared.CleanAndSplit(r.URL.Path)
|
||||
|
||||
if len(pathComponents) >= mpl {
|
||||
h.delegate(mpl, pathComponents[mpl-1:], w, r)
|
||||
return
|
||||
@@ -130,6 +145,8 @@ func (h *Handler) handle(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
// delegate sends the request to the Child WebDAV server.
|
||||
func (h *Handler) delegate(mpl int, pathComponents []string, w http.ResponseWriter, r *http.Request) {
|
||||
rewriteIfHeader(r, pathComponents, mpl)
|
||||
|
||||
dest := r.Header.Get("Destination")
|
||||
if dest != "" {
|
||||
// Rewrite destination header
|
||||
|
||||
@@ -1,77 +0,0 @@
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
package compositedav
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"math"
|
||||
"net/http"
|
||||
"regexp"
|
||||
|
||||
"tailscale.com/drive/driveimpl/shared"
|
||||
)
|
||||
|
||||
var (
|
||||
hrefRegex = regexp.MustCompile(`(?s)<D:href>/?([^<]*)/?</D:href>`)
|
||||
)
|
||||
|
||||
func (h *Handler) handlePROPFIND(w http.ResponseWriter, r *http.Request) {
|
||||
pathComponents := shared.CleanAndSplit(r.URL.Path)
|
||||
mpl := h.maxPathLength(r)
|
||||
if !shared.IsRoot(r.URL.Path) && len(pathComponents)+getDepth(r) > mpl {
|
||||
// Delegate to a Child.
|
||||
depth := getDepth(r)
|
||||
|
||||
status, result := h.StatCache.getOr(r.URL.Path, depth, func() (int, []byte) {
|
||||
// Use a buffering ResponseWriter so that we can manipulate the result.
|
||||
// The only thing we use from the original ResponseWriter is Header().
|
||||
bw := &bufferingResponseWriter{ResponseWriter: w}
|
||||
|
||||
mpl := h.maxPathLength(r)
|
||||
h.delegate(mpl, pathComponents[mpl-1:], bw, r)
|
||||
|
||||
// Fixup paths to add the requested path as a prefix.
|
||||
pathPrefix := shared.Join(pathComponents[0:mpl]...)
|
||||
b := hrefRegex.ReplaceAll(bw.buf.Bytes(), []byte(fmt.Sprintf("<D:href>%s/$1</D:href>", pathPrefix)))
|
||||
|
||||
return bw.status, b
|
||||
})
|
||||
|
||||
w.Header().Del("Content-Length")
|
||||
w.WriteHeader(status)
|
||||
if result != nil {
|
||||
w.Write(result)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
h.handle(w, r)
|
||||
}
|
||||
|
||||
func getDepth(r *http.Request) int {
|
||||
switch r.Header.Get("Depth") {
|
||||
case "0":
|
||||
return 0
|
||||
case "1":
|
||||
return 1
|
||||
case "infinity":
|
||||
return math.MaxInt
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
type bufferingResponseWriter struct {
|
||||
http.ResponseWriter
|
||||
status int
|
||||
buf bytes.Buffer
|
||||
}
|
||||
|
||||
func (bw *bufferingResponseWriter) WriteHeader(statusCode int) {
|
||||
bw.status = statusCode
|
||||
}
|
||||
|
||||
func (bw *bufferingResponseWriter) Write(p []byte) (int, error) {
|
||||
return bw.buf.Write(p)
|
||||
}
|
||||
122
drive/driveimpl/compositedav/rewriting.go
Normal file
122
drive/driveimpl/compositedav/rewriting.go
Normal file
@@ -0,0 +1,122 @@
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
package compositedav
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"math"
|
||||
"net/http"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"tailscale.com/drive/driveimpl/shared"
|
||||
)
|
||||
|
||||
var (
|
||||
responseHrefRegex = regexp.MustCompile(`(?s)(<D:(response|lockroot)>)<D:href>/?([^<]*)/?</D:href>`)
|
||||
ifHrefRegex = regexp.MustCompile(`^<(https?://[^/]+)?([^>]+)>`)
|
||||
)
|
||||
|
||||
func (h *Handler) handlePROPFIND(w http.ResponseWriter, r *http.Request, pathComponents []string, mpl int) {
|
||||
if shouldDelegateToChild(r, pathComponents, mpl) {
|
||||
// Delegate to a Child.
|
||||
depth := getDepth(r)
|
||||
|
||||
status, result := h.StatCache.getOr(r.URL.Path, depth, func() (int, []byte) {
|
||||
return h.delegateRewriting(w, r, pathComponents, mpl)
|
||||
})
|
||||
|
||||
respondRewritten(w, status, result)
|
||||
return
|
||||
}
|
||||
|
||||
h.handle(w, r)
|
||||
}
|
||||
|
||||
func (h *Handler) handleLOCK(w http.ResponseWriter, r *http.Request, pathComponents []string, mpl int) {
|
||||
if shouldDelegateToChild(r, pathComponents, mpl) {
|
||||
// Delegate to a Child.
|
||||
status, result := h.delegateRewriting(w, r, pathComponents, mpl)
|
||||
respondRewritten(w, status, result)
|
||||
return
|
||||
}
|
||||
|
||||
http.Error(w, "locking of top level directories is not allowed", http.StatusMethodNotAllowed)
|
||||
}
|
||||
|
||||
// shouldDelegateToChild decides whether a request should be delegated to a
|
||||
// child filesystem, as opposed to being handled by this filesystem. It checks
|
||||
// the depth of the requested path, and if it's deeper than the portion of the
|
||||
// tree that's handled by the parent, returns true.
|
||||
func shouldDelegateToChild(r *http.Request, pathComponents []string, mpl int) bool {
|
||||
return !shared.IsRoot(r.URL.Path) && len(pathComponents)+getDepth(r) > mpl
|
||||
}
|
||||
|
||||
func (h *Handler) delegateRewriting(w http.ResponseWriter, r *http.Request, pathComponents []string, mpl int) (int, []byte) {
|
||||
// Use a buffering ResponseWriter so that we can manipulate the result.
|
||||
// The only thing we use from the original ResponseWriter is Header().
|
||||
bw := &bufferingResponseWriter{ResponseWriter: w}
|
||||
|
||||
h.delegate(mpl, pathComponents[mpl-1:], bw, r)
|
||||
|
||||
// Fixup paths to add the requested path as a prefix, escaped for inclusion in XML.
|
||||
pp := shared.EscapeForXML(shared.Join(pathComponents[0:mpl]...))
|
||||
b := responseHrefRegex.ReplaceAll(bw.buf.Bytes(), []byte(fmt.Sprintf("$1<D:href>%s/$3</D:href>", pp)))
|
||||
return bw.status, b
|
||||
}
|
||||
|
||||
func respondRewritten(w http.ResponseWriter, status int, result []byte) {
|
||||
w.Header().Del("Content-Length")
|
||||
w.WriteHeader(status)
|
||||
if result != nil {
|
||||
w.Write(result)
|
||||
}
|
||||
}
|
||||
|
||||
func getDepth(r *http.Request) int {
|
||||
switch r.Header.Get("Depth") {
|
||||
case "0":
|
||||
return 0
|
||||
case "1":
|
||||
return 1
|
||||
case "infinity":
|
||||
return math.MaxInt16 // a really large number, but not infinity (avoids wrapping when we do arithmetic with this)
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
type bufferingResponseWriter struct {
|
||||
http.ResponseWriter
|
||||
status int
|
||||
buf bytes.Buffer
|
||||
}
|
||||
|
||||
func (bw *bufferingResponseWriter) WriteHeader(statusCode int) {
|
||||
bw.status = statusCode
|
||||
}
|
||||
|
||||
func (bw *bufferingResponseWriter) Write(p []byte) (int, error) {
|
||||
return bw.buf.Write(p)
|
||||
}
|
||||
|
||||
// rewriteIfHeader rewrites URLs in the If header by removing the host and the
|
||||
// portion of the path that corresponds to this composite filesystem. This way,
|
||||
// when we delegate requests to child filesystems, the If header will reference
|
||||
// a path that makes sense on those filesystems.
|
||||
//
|
||||
// See http://www.webdav.org/specs/rfc4918.html#HEADER_If
|
||||
func rewriteIfHeader(r *http.Request, pathComponents []string, mpl int) {
|
||||
ih := r.Header.Get("If")
|
||||
if ih == "" {
|
||||
return
|
||||
}
|
||||
matches := ifHrefRegex.FindStringSubmatch(ih)
|
||||
if len(matches) == 3 {
|
||||
pp := shared.JoinEscaped(pathComponents[0:mpl]...)
|
||||
p := strings.Replace(shared.JoinEscaped(pathComponents...), pp, "", 1)
|
||||
nih := ifHrefRegex.ReplaceAllString(ih, fmt.Sprintf("<%s>", p))
|
||||
r.Header.Set("If", nih)
|
||||
}
|
||||
}
|
||||
@@ -4,11 +4,19 @@
|
||||
package compositedav
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/xml"
|
||||
"log"
|
||||
"net/http"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/jellydator/ttlcache/v3"
|
||||
"tailscale.com/drive/driveimpl/shared"
|
||||
)
|
||||
|
||||
var (
|
||||
notFound = newCacheEntry(http.StatusNotFound, nil)
|
||||
)
|
||||
|
||||
// StatCache provides a cache for directory listings and file metadata.
|
||||
@@ -18,12 +26,38 @@ import (
|
||||
// This is similar to the DirectoryCacheLifetime setting of Windows' built-in
|
||||
// SMB client, see
|
||||
// https://learn.microsoft.com/en-us/previous-versions/windows/it-pro/windows-7/ff686200(v=ws.10)
|
||||
//
|
||||
// StatCache is built specifically to cache the results of PROPFIND requests,
|
||||
// which come back as MultiStatus XML responses. Typical clients will issue two
|
||||
// kinds of PROPFIND:
|
||||
//
|
||||
// The first kind of PROPFIND is a directory listing performed to depth 1. At
|
||||
// this depth, the resulting XML will contain stats for the requested folder as
|
||||
// well as for all children of that folder.
|
||||
//
|
||||
// The second kind of PROPFIND is a file listing performed to depth 0. At this
|
||||
// depth, the resulting XML will contain stats only for the requested file.
|
||||
//
|
||||
// In order to avoid round-trips, when a PROPFIND at depth 0 is attempted, and
|
||||
// the requested file is not in the cache, StatCache will check to see if the
|
||||
// parent folder of that file is cached. If so, StatCache infers the correct
|
||||
// MultiStatus for the file according to the following logic:
|
||||
//
|
||||
// 1. If the parent folder is NotFound (404), treat the file itself as NotFound
|
||||
// 2. If the parent folder's XML doesn't contain the file, treat it as
|
||||
// NotFound.
|
||||
// 3. If the parent folder's XML contains the file, build a MultiStatus for the
|
||||
// file based on the parent's XML.
|
||||
//
|
||||
// To avoid inconsistencies from the perspective of the client, any operations
|
||||
// that modify the filesystem (e.g. PUT, MKDIR, etc.) should call invalidate()
|
||||
// to invalidate the cache.
|
||||
type StatCache struct {
|
||||
TTL time.Duration
|
||||
|
||||
// mu guards the below values.
|
||||
mu sync.Mutex
|
||||
cachesByDepthAndPath map[int]*ttlcache.Cache[string, []byte]
|
||||
cachesByDepthAndPath map[int]*ttlcache.Cache[string, *cacheEntry]
|
||||
}
|
||||
|
||||
// getOr checks the cache for the named value at the given depth. If a cached
|
||||
@@ -32,25 +66,57 @@ type StatCache struct {
|
||||
// status and value. If the function returned http.StatusMultiStatus, getOr
|
||||
// caches the resulting value at the given name and depth before returning.
|
||||
func (c *StatCache) getOr(name string, depth int, or func() (int, []byte)) (int, []byte) {
|
||||
cached := c.get(name, depth)
|
||||
if cached != nil {
|
||||
return http.StatusMultiStatus, cached
|
||||
ce := c.get(name, depth)
|
||||
if ce == nil {
|
||||
// Not cached, fetch value.
|
||||
status, raw := or()
|
||||
ce = newCacheEntry(status, raw)
|
||||
if status == http.StatusMultiStatus || status == http.StatusNotFound {
|
||||
// Got a legit status, cache value
|
||||
c.set(name, depth, ce)
|
||||
}
|
||||
}
|
||||
status, next := or()
|
||||
if c != nil && status == http.StatusMultiStatus && next != nil {
|
||||
c.set(name, depth, next)
|
||||
}
|
||||
return status, next
|
||||
return ce.Status, ce.Raw
|
||||
}
|
||||
|
||||
func (c *StatCache) get(name string, depth int) []byte {
|
||||
// get retrieves the entry for the named file at the given depth. If no entry
|
||||
// is found, and depth == 0, get will check to see if the parent path of name
|
||||
// is present in the cache at depth 1. If so, it will infer that the child does
|
||||
// not exist and return notFound (404).
|
||||
func (c *StatCache) get(name string, depth int) *cacheEntry {
|
||||
if c == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
name = shared.Normalize(name)
|
||||
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
|
||||
ce := c.tryGetLocked(name, depth)
|
||||
if ce != nil {
|
||||
// Cache hit.
|
||||
return ce
|
||||
}
|
||||
|
||||
if depth > 0 {
|
||||
// Cache miss.
|
||||
return nil
|
||||
}
|
||||
|
||||
// At depth 0, if child's parent is in the cache, and the child isn't
|
||||
// cached, we can infer that the child is notFound.
|
||||
p := c.tryGetLocked(shared.Parent(name), 1)
|
||||
if p != nil {
|
||||
return notFound
|
||||
}
|
||||
|
||||
// No parent in cache, cache miss.
|
||||
return nil
|
||||
}
|
||||
|
||||
// tryGetLocked requires that c.mu be held.
|
||||
func (c *StatCache) tryGetLocked(name string, depth int) *cacheEntry {
|
||||
if c.cachesByDepthAndPath == nil {
|
||||
return nil
|
||||
}
|
||||
@@ -65,28 +131,80 @@ func (c *StatCache) get(name string, depth int) []byte {
|
||||
return item.Value()
|
||||
}
|
||||
|
||||
func (c *StatCache) set(name string, depth int, value []byte) {
|
||||
// set stores the given cacheEntry in the cache at the given name and depth. If
|
||||
// the depth is 1, set also populates depth 0 entries in the cache for the bare
|
||||
// name. If status is StatusMultiStatus, set will parse the PROPFIND result and
|
||||
// store depth 0 entries for all children. If parsing the result fails, nothing
|
||||
// is cached.
|
||||
func (c *StatCache) set(name string, depth int, ce *cacheEntry) {
|
||||
if c == nil {
|
||||
return
|
||||
}
|
||||
|
||||
name = shared.Normalize(name)
|
||||
|
||||
var self *cacheEntry
|
||||
var children map[string]*cacheEntry
|
||||
if depth == 1 {
|
||||
switch ce.Status {
|
||||
case http.StatusNotFound:
|
||||
// Record notFound as the self entry.
|
||||
self = ce
|
||||
case http.StatusMultiStatus:
|
||||
// Parse the raw MultiStatus and extract specific responses
|
||||
// corresponding to the self entry (e.g. the directory, but at depth 0)
|
||||
// and children (e.g. files within the directory) so that subsequent
|
||||
// requests for these can be satisfied from the cache.
|
||||
var ms multiStatus
|
||||
err := xml.Unmarshal(ce.Raw, &ms)
|
||||
if err != nil {
|
||||
// unparseable MultiStatus response, don't cache
|
||||
log.Printf("statcache.set error: %s", err)
|
||||
return
|
||||
}
|
||||
children = make(map[string]*cacheEntry, len(ms.Responses)-1)
|
||||
for i := 0; i < len(ms.Responses); i++ {
|
||||
response := ms.Responses[i]
|
||||
name := shared.Normalize(response.Href)
|
||||
raw := marshalMultiStatus(response)
|
||||
entry := newCacheEntry(ce.Status, raw)
|
||||
if i == 0 {
|
||||
self = entry
|
||||
} else {
|
||||
children[name] = entry
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
c.setLocked(name, depth, ce)
|
||||
if self != nil {
|
||||
c.setLocked(name, 0, self)
|
||||
}
|
||||
for childName, child := range children {
|
||||
c.setLocked(childName, 0, child)
|
||||
}
|
||||
}
|
||||
|
||||
// setLocked requires that c.mu be held.
|
||||
func (c *StatCache) setLocked(name string, depth int, ce *cacheEntry) {
|
||||
if c.cachesByDepthAndPath == nil {
|
||||
c.cachesByDepthAndPath = make(map[int]*ttlcache.Cache[string, []byte])
|
||||
c.cachesByDepthAndPath = make(map[int]*ttlcache.Cache[string, *cacheEntry])
|
||||
}
|
||||
cache := c.cachesByDepthAndPath[depth]
|
||||
if cache == nil {
|
||||
cache = ttlcache.New(
|
||||
ttlcache.WithTTL[string, []byte](c.TTL),
|
||||
ttlcache.WithTTL[string, *cacheEntry](c.TTL),
|
||||
)
|
||||
go cache.Start()
|
||||
c.cachesByDepthAndPath[depth] = cache
|
||||
}
|
||||
cache.Set(name, value, ttlcache.DefaultTTL)
|
||||
cache.Set(name, ce, ttlcache.DefaultTTL)
|
||||
}
|
||||
|
||||
// invalidate invalidates the entire cache.
|
||||
func (c *StatCache) invalidate() {
|
||||
if c == nil {
|
||||
return
|
||||
@@ -108,3 +226,54 @@ func (c *StatCache) stop() {
|
||||
cache.Stop()
|
||||
}
|
||||
}
|
||||
|
||||
type cacheEntry struct {
|
||||
Status int
|
||||
Raw []byte
|
||||
}
|
||||
|
||||
func newCacheEntry(status int, raw []byte) *cacheEntry {
|
||||
return &cacheEntry{Status: status, Raw: raw}
|
||||
}
|
||||
|
||||
type propStat struct {
|
||||
InnerXML []byte `xml:",innerxml"`
|
||||
}
|
||||
|
||||
type response struct {
|
||||
XMLName xml.Name `xml:"response"`
|
||||
Href string `xml:"href"`
|
||||
PropStats []*propStat `xml:"propstat"`
|
||||
}
|
||||
|
||||
type multiStatus struct {
|
||||
XMLName xml.Name `xml:"multistatus"`
|
||||
Responses []*response `xml:"response"`
|
||||
}
|
||||
|
||||
// marshalMultiStatus performs custom marshalling of a MultiStatus to preserve
|
||||
// the original formatting, namespacing, etc. Doing this with Go's XML encoder
|
||||
// is somewhere between difficult and impossible, which is why we use this more
|
||||
// manual approach.
|
||||
func marshalMultiStatus(response *response) []byte {
|
||||
// TODO(percy): maybe pool these buffers
|
||||
var buf bytes.Buffer
|
||||
buf.WriteString(multistatusTemplateStart)
|
||||
buf.WriteString(response.Href)
|
||||
buf.WriteString(hrefEnd)
|
||||
for _, propStat := range response.PropStats {
|
||||
buf.WriteString(propstatStart)
|
||||
buf.Write(propStat.InnerXML)
|
||||
buf.WriteString(propstatEnd)
|
||||
}
|
||||
buf.WriteString(multistatusTemplateEnd)
|
||||
return buf.Bytes()
|
||||
}
|
||||
|
||||
const (
|
||||
multistatusTemplateStart = `<?xml version="1.0" encoding="UTF-8"?><D:multistatus xmlns:D="DAV:"><D:response><D:href>`
|
||||
hrefEnd = `</D:href>`
|
||||
propstatStart = `<D:propstat>`
|
||||
propstatEnd = `</D:propstat>`
|
||||
multistatusTemplateEnd = `</D:response></D:multistatus>`
|
||||
)
|
||||
|
||||
@@ -4,17 +4,65 @@
|
||||
package compositedav
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"log"
|
||||
"net/http"
|
||||
"path"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"tailscale.com/tstest"
|
||||
)
|
||||
|
||||
var (
|
||||
val = []byte("1")
|
||||
file = "file"
|
||||
)
|
||||
var parentPath = "/parent"
|
||||
|
||||
var childPath = "/parent/child.txt"
|
||||
|
||||
var parentResponse = `<D:response>
|
||||
<D:href>/parent/</D:href>
|
||||
<D:propstat>
|
||||
<D:prop>
|
||||
<D:getlastmodified>Mon, 29 Apr 2024 19:52:23 GMT</D:getlastmodified>
|
||||
<D:creationdate>Fri, 19 Apr 2024 04:13:34 GMT</D:creationdate>
|
||||
<D:resourcetype>
|
||||
<D:collection xmlns:D="DAV:" />
|
||||
</D:resourcetype>
|
||||
</D:prop>
|
||||
<D:status>HTTP/1.1 200 OK</D:status>
|
||||
</D:propstat>
|
||||
</D:response>`
|
||||
|
||||
var childResponse = `
|
||||
<D:response>
|
||||
<D:href>/parent/child.txt</D:href>
|
||||
<D:propstat>
|
||||
<D:prop>
|
||||
<D:getlastmodified>Mon, 29 Apr 2024 19:52:23 GMT</D:getlastmodified>
|
||||
<D:creationdate>Fri, 19 Apr 2024 04:13:34 GMT</D:creationdate>
|
||||
<D:resourcetype>
|
||||
<D:collection xmlns:D="DAV:" />
|
||||
</D:resourcetype>
|
||||
</D:prop>
|
||||
<D:status>HTTP/1.1 200 OK</D:status>
|
||||
</D:propstat>
|
||||
</D:response>`
|
||||
|
||||
var fullParent = []byte(
|
||||
strings.ReplaceAll(
|
||||
fmt.Sprintf(`<?xml version="1.0" encoding="UTF-8"?><D:multistatus xmlns:D="DAV:">%s%s</D:multistatus>`, parentResponse, childResponse),
|
||||
"\n", ""))
|
||||
|
||||
var partialParent = []byte(
|
||||
strings.ReplaceAll(
|
||||
fmt.Sprintf(`<?xml version="1.0" encoding="UTF-8"?><D:multistatus xmlns:D="DAV:">%s</D:multistatus>`, parentResponse),
|
||||
"\n", ""))
|
||||
|
||||
var fullChild = []byte(
|
||||
strings.ReplaceAll(
|
||||
fmt.Sprintf(`<?xml version="1.0" encoding="UTF-8"?><D:multistatus xmlns:D="DAV:">%s</D:multistatus>`, childResponse),
|
||||
"\n", ""))
|
||||
|
||||
func TestStatCacheNoTimeout(t *testing.T) {
|
||||
// Make sure we don't leak goroutines
|
||||
@@ -24,22 +72,23 @@ func TestStatCacheNoTimeout(t *testing.T) {
|
||||
defer c.stop()
|
||||
|
||||
// check get before set
|
||||
fetched := c.get(file, 1)
|
||||
fetched := c.get(childPath, 0)
|
||||
if fetched != nil {
|
||||
t.Errorf("got %q, want nil", fetched)
|
||||
t.Errorf("got %v, want nil", fetched)
|
||||
}
|
||||
|
||||
// set new stat
|
||||
c.set(file, 1, val)
|
||||
fetched = c.get(file, 1)
|
||||
if !bytes.Equal(fetched, val) {
|
||||
t.Errorf("got %q, want %q", fetched, val)
|
||||
ce := newCacheEntry(http.StatusMultiStatus, fullChild)
|
||||
c.set(childPath, 0, ce)
|
||||
fetched = c.get(childPath, 0)
|
||||
if diff := cmp.Diff(fetched, ce); diff != "" {
|
||||
t.Errorf("should have gotten cached value; (-got+want):%v", diff)
|
||||
}
|
||||
|
||||
// fetch stat again, should still be cached
|
||||
fetched = c.get(file, 1)
|
||||
if !bytes.Equal(fetched, val) {
|
||||
t.Errorf("got %q, want %q", fetched, val)
|
||||
fetched = c.get(childPath, 0)
|
||||
if diff := cmp.Diff(fetched, ce); diff != "" {
|
||||
t.Errorf("should still have gotten cached value; (-got+want):%v", diff)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -51,25 +100,114 @@ func TestStatCacheTimeout(t *testing.T) {
|
||||
defer c.stop()
|
||||
|
||||
// set new stat
|
||||
c.set(file, 1, val)
|
||||
fetched := c.get(file, 1)
|
||||
if !bytes.Equal(fetched, val) {
|
||||
t.Errorf("got %q, want %q", fetched, val)
|
||||
ce := newCacheEntry(http.StatusMultiStatus, fullChild)
|
||||
c.set(childPath, 0, ce)
|
||||
fetched := c.get(childPath, 0)
|
||||
if diff := cmp.Diff(fetched, ce); diff != "" {
|
||||
t.Errorf("should have gotten cached value; (-got+want):%v", diff)
|
||||
}
|
||||
|
||||
// wait for cache to expire and refetch stat, should be empty now
|
||||
time.Sleep(c.TTL * 2)
|
||||
|
||||
fetched = c.get(file, 1)
|
||||
fetched = c.get(childPath, 0)
|
||||
if fetched != nil {
|
||||
t.Errorf("invalidate should have cleared cached value")
|
||||
t.Errorf("cached value should have expired")
|
||||
}
|
||||
|
||||
c.set(file, 1, val)
|
||||
c.set(childPath, 0, ce)
|
||||
// invalidate the cache and make sure nothing is returned
|
||||
c.invalidate()
|
||||
fetched = c.get(file, 1)
|
||||
fetched = c.get(childPath, 0)
|
||||
if fetched != nil {
|
||||
t.Errorf("invalidate should have cleared cached value")
|
||||
}
|
||||
}
|
||||
|
||||
func TestParentChildRelationship(t *testing.T) {
|
||||
// Make sure we don't leak goroutines
|
||||
tstest.ResourceCheck(t)
|
||||
|
||||
c := &StatCache{TTL: 24 * time.Hour} // don't expire
|
||||
defer c.stop()
|
||||
|
||||
missingParentPath := "/missingparent"
|
||||
unparseableParentPath := "/unparseable"
|
||||
|
||||
c.set(parentPath, 1, newCacheEntry(http.StatusMultiStatus, fullParent))
|
||||
c.set(missingParentPath, 1, newCacheEntry(http.StatusNotFound, nil))
|
||||
c.set(unparseableParentPath, 1, newCacheEntry(http.StatusMultiStatus, []byte("<this will not parse")))
|
||||
|
||||
tests := []struct {
|
||||
path string
|
||||
depth int
|
||||
want *cacheEntry
|
||||
}{
|
||||
{
|
||||
path: parentPath,
|
||||
depth: 1,
|
||||
want: newCacheEntry(http.StatusMultiStatus, fullParent),
|
||||
},
|
||||
{
|
||||
path: parentPath,
|
||||
depth: 0,
|
||||
want: newCacheEntry(http.StatusMultiStatus, partialParent),
|
||||
},
|
||||
{
|
||||
path: childPath,
|
||||
depth: 0,
|
||||
want: newCacheEntry(http.StatusMultiStatus, fullChild),
|
||||
},
|
||||
{
|
||||
path: path.Join(parentPath, "nonexistent.txt"),
|
||||
depth: 0,
|
||||
want: notFound,
|
||||
},
|
||||
{
|
||||
path: missingParentPath,
|
||||
depth: 1,
|
||||
want: notFound,
|
||||
},
|
||||
{
|
||||
path: missingParentPath,
|
||||
depth: 0,
|
||||
want: notFound,
|
||||
},
|
||||
{
|
||||
path: path.Join(missingParentPath, "filename.txt"),
|
||||
depth: 0,
|
||||
want: notFound,
|
||||
},
|
||||
{
|
||||
path: unparseableParentPath,
|
||||
depth: 1,
|
||||
want: nil,
|
||||
},
|
||||
{
|
||||
path: unparseableParentPath,
|
||||
depth: 0,
|
||||
want: nil,
|
||||
},
|
||||
{
|
||||
path: path.Join(unparseableParentPath, "filename.txt"),
|
||||
depth: 0,
|
||||
want: nil,
|
||||
},
|
||||
{
|
||||
path: "/unknown",
|
||||
depth: 1,
|
||||
want: nil,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(fmt.Sprintf("%d%s", test.depth, test.path), func(t *testing.T) {
|
||||
got := c.get(test.path, test.depth)
|
||||
if diff := cmp.Diff(got, test.want); diff != "" {
|
||||
t.Errorf("unexpected cached value; (-got+want):%v", diff)
|
||||
log.Printf("want\n%s", test.want.Raw)
|
||||
log.Printf("got\n%s", got.Raw)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -14,6 +14,8 @@ import (
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"runtime"
|
||||
"slices"
|
||||
"strings"
|
||||
"sync"
|
||||
@@ -30,14 +32,29 @@ import (
|
||||
const (
|
||||
domain = `test$%domain.com`
|
||||
|
||||
remote1 = `rem ote$%1`
|
||||
remote2 = `_rem ote$%2`
|
||||
share11 = `sha re$%11`
|
||||
share12 = `_sha re$%12`
|
||||
file111 = `fi le$%111.txt`
|
||||
remote1 = `rem ote$%<>1`
|
||||
remote2 = `_rem ote$%<>2`
|
||||
share11 = `sha re$%<>11`
|
||||
share12 = `_sha re$%<>12`
|
||||
file112 = `file112.txt`
|
||||
)
|
||||
|
||||
var (
|
||||
file111 = `fi le$%<>111.txt`
|
||||
)
|
||||
|
||||
func init() {
|
||||
if runtime.GOOS == "windows" {
|
||||
// file with less than and greater than doesn't work on Windows
|
||||
file111 = `fi le$%111.txt`
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
lockRootRegex = regexp.MustCompile(`<D:lockroot><D:href>/?([^<]*)/?</D:href>`)
|
||||
lockTokenRegex = regexp.MustCompile(`<D:locktoken><D:href>([0-9]+)/?</D:href>`)
|
||||
)
|
||||
|
||||
func init() {
|
||||
// set AllowShareAs() to false so that we don't try to use sub-processes
|
||||
// for access files on disk.
|
||||
@@ -145,6 +162,206 @@ func TestSecretTokenAuth(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestLOCK(t *testing.T) {
|
||||
s := newSystem(t)
|
||||
|
||||
s.addRemote(remote1)
|
||||
s.addShare(remote1, share11, drive.PermissionReadWrite)
|
||||
s.writeFile("writing file to read/write remote should succeed", remote1, share11, file111, "hello world", true)
|
||||
|
||||
client := &http.Client{
|
||||
Transport: &http.Transport{DisableKeepAlives: true},
|
||||
}
|
||||
|
||||
u := fmt.Sprintf("http://%s/%s/%s/%s/%s",
|
||||
s.local.l.Addr(),
|
||||
url.PathEscape(domain),
|
||||
url.PathEscape(remote1),
|
||||
url.PathEscape(share11),
|
||||
url.PathEscape(file111))
|
||||
|
||||
// First acquire a lock with a short timeout
|
||||
req, err := http.NewRequest("LOCK", u, strings.NewReader(lockBody))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
req.Header.Set("Depth", "infinity")
|
||||
req.Header.Set("Timeout", "Second-1")
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
if resp.StatusCode != 200 {
|
||||
t.Fatalf("expected LOCK to succeed, but got status %d", resp.StatusCode)
|
||||
}
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
submatches := lockRootRegex.FindStringSubmatch(string(body))
|
||||
if len(submatches) != 2 {
|
||||
t.Fatal("failed to find lockroot")
|
||||
}
|
||||
want := shared.EscapeForXML(pathTo(remote1, share11, file111))
|
||||
got := submatches[1]
|
||||
if got != want {
|
||||
t.Fatalf("want lockroot %q, got %q", want, got)
|
||||
}
|
||||
|
||||
submatches = lockTokenRegex.FindStringSubmatch(string(body))
|
||||
if len(submatches) != 2 {
|
||||
t.Fatal("failed to find locktoken")
|
||||
}
|
||||
lockToken := submatches[1]
|
||||
ifHeader := fmt.Sprintf("<%s> (<%s>)", u, lockToken)
|
||||
|
||||
// Then refresh the lock with a longer timeout
|
||||
req, err = http.NewRequest("LOCK", u, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
req.Header.Set("Depth", "infinity")
|
||||
req.Header.Set("Timeout", "Second-600")
|
||||
req.Header.Set("If", ifHeader)
|
||||
resp, err = client.Do(req)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
if resp.StatusCode != 200 {
|
||||
t.Fatalf("expected LOCK refresh to succeed, but got status %d", resp.StatusCode)
|
||||
}
|
||||
body, err = io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
submatches = lockRootRegex.FindStringSubmatch(string(body))
|
||||
if len(submatches) != 2 {
|
||||
t.Fatal("failed to find lockroot after refresh")
|
||||
}
|
||||
want = shared.EscapeForXML(pathTo(remote1, share11, file111))
|
||||
got = submatches[1]
|
||||
if got != want {
|
||||
t.Fatalf("want lockroot after refresh %q, got %q", want, got)
|
||||
}
|
||||
|
||||
submatches = lockTokenRegex.FindStringSubmatch(string(body))
|
||||
if len(submatches) != 2 {
|
||||
t.Fatal("failed to find locktoken after refresh")
|
||||
}
|
||||
if submatches[1] != lockToken {
|
||||
t.Fatalf("on refresh, lock token changed from %q to %q", lockToken, submatches[1])
|
||||
}
|
||||
|
||||
// Then wait past the original timeout, then try to delete without the lock
|
||||
// (should fail)
|
||||
time.Sleep(1 * time.Second)
|
||||
req, err = http.NewRequest("DELETE", u, nil)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
resp, err = client.Do(req)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
if resp.StatusCode != 423 {
|
||||
t.Fatalf("deleting without lock token should fail with 423, but got %d", resp.StatusCode)
|
||||
}
|
||||
|
||||
// Then delete with the lock (should succeed)
|
||||
req, err = http.NewRequest("DELETE", u, nil)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
req.Header.Set("If", ifHeader)
|
||||
resp, err = client.Do(req)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
if resp.StatusCode != 204 {
|
||||
t.Fatalf("deleting with lock token should have succeeded with 204, but got %d", resp.StatusCode)
|
||||
}
|
||||
}
|
||||
|
||||
func TestUNLOCK(t *testing.T) {
|
||||
s := newSystem(t)
|
||||
|
||||
s.addRemote(remote1)
|
||||
s.addShare(remote1, share11, drive.PermissionReadWrite)
|
||||
s.writeFile("writing file to read/write remote should succeed", remote1, share11, file111, "hello world", true)
|
||||
|
||||
client := &http.Client{
|
||||
Transport: &http.Transport{DisableKeepAlives: true},
|
||||
}
|
||||
|
||||
u := fmt.Sprintf("http://%s/%s/%s/%s/%s",
|
||||
s.local.l.Addr(),
|
||||
url.PathEscape(domain),
|
||||
url.PathEscape(remote1),
|
||||
url.PathEscape(share11),
|
||||
url.PathEscape(file111))
|
||||
|
||||
// Acquire a lock
|
||||
req, err := http.NewRequest("LOCK", u, strings.NewReader(lockBody))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
req.Header.Set("Depth", "infinity")
|
||||
req.Header.Set("Timeout", "Second-600")
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
if resp.StatusCode != 200 {
|
||||
t.Fatalf("expected LOCK to succeed, but got status %d", resp.StatusCode)
|
||||
}
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
submatches := lockTokenRegex.FindStringSubmatch(string(body))
|
||||
if len(submatches) != 2 {
|
||||
t.Fatal("failed to find locktoken")
|
||||
}
|
||||
lockToken := submatches[1]
|
||||
|
||||
// Release the lock
|
||||
req, err = http.NewRequest("UNLOCK", u, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
req.Header.Set("Lock-Token", fmt.Sprintf("<%s>", lockToken))
|
||||
resp, err = client.Do(req)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
if resp.StatusCode != 204 {
|
||||
t.Fatalf("expected UNLOCK to succeed with a 204, but got status %d", resp.StatusCode)
|
||||
}
|
||||
|
||||
// Then delete without the lock (should succeed)
|
||||
req, err = http.NewRequest("DELETE", u, nil)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
resp, err = client.Do(req)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
if resp.StatusCode != 204 {
|
||||
t.Fatalf("deleting without lock should have succeeded with 204, but got %d", resp.StatusCode)
|
||||
}
|
||||
}
|
||||
|
||||
type local struct {
|
||||
l net.Listener
|
||||
fs *FileSystemForLocal
|
||||
@@ -486,3 +703,9 @@ func (a *noopAuthenticator) Clone() gowebdav.Authenticator {
|
||||
func (a *noopAuthenticator) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
const lockBody = `<?xml version="1.0" encoding="utf-8" ?>
|
||||
<D:lockinfo xmlns:D='DAV:'>
|
||||
<D:lockscope><D:exclusive/></D:lockscope>
|
||||
<D:locktype><D:write/></D:locktype>
|
||||
</D:lockinfo>`
|
||||
|
||||
@@ -151,6 +151,9 @@ func (s *FileServer) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
// WebDAV's locking code compares the lock resources with the request's
|
||||
// host header, set this to empty to avoid mismatches.
|
||||
r.Host = ""
|
||||
h.ServeHTTP(w, r)
|
||||
}
|
||||
|
||||
|
||||
@@ -26,6 +26,17 @@ func CleanAndSplit(p string) []string {
|
||||
return strings.Split(strings.Trim(path.Clean(p), sepStringAndDot), sepString)
|
||||
}
|
||||
|
||||
// Normalize normalizes the given path (e.g. dropping trailing slashes).
|
||||
func Normalize(p string) string {
|
||||
return Join(CleanAndSplit(p)...)
|
||||
}
|
||||
|
||||
// Parent extracts the parent of the given path.
|
||||
func Parent(p string) string {
|
||||
parts := CleanAndSplit(p)
|
||||
return Join(parts[:len(parts)-1]...)
|
||||
}
|
||||
|
||||
// Join behaves like path.Join() but also includes a leading slash.
|
||||
func Join(parts ...string) string {
|
||||
fullParts := make([]string, 0, len(parts))
|
||||
|
||||
16
drive/driveimpl/shared/xml.go
Normal file
16
drive/driveimpl/shared/xml.go
Normal file
@@ -0,0 +1,16 @@
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
package shared
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/xml"
|
||||
)
|
||||
|
||||
// EscapeForXML escapes the given string for use in XML text.
|
||||
func EscapeForXML(s string) string {
|
||||
result := bytes.NewBuffer(nil)
|
||||
xml.Escape(result, []byte(s))
|
||||
return result.String()
|
||||
}
|
||||
4
go.mod
4
go.mod
@@ -14,6 +14,7 @@ require (
|
||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.64
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.33.0
|
||||
github.com/aws/aws-sdk-go-v2/service/ssm v1.44.7
|
||||
github.com/bramvdbogaerde/go-scp v1.4.0
|
||||
github.com/coreos/go-iptables v0.7.1-0.20240112124308-65c67c9f46e6
|
||||
github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf
|
||||
github.com/creack/pty v1.1.21
|
||||
@@ -37,7 +38,7 @@ require (
|
||||
github.com/google/go-cmp v0.6.0
|
||||
github.com/google/go-containerregistry v0.18.0
|
||||
github.com/google/nftables v0.2.1-0.20240414091927-5e242ec57806
|
||||
github.com/google/uuid v1.5.0
|
||||
github.com/google/uuid v1.6.0
|
||||
github.com/goreleaser/nfpm/v2 v2.33.1
|
||||
github.com/hdevalence/ed25519consensus v0.2.0
|
||||
github.com/iancoleman/strcase v0.3.0
|
||||
@@ -60,6 +61,7 @@ require (
|
||||
github.com/peterbourgon/ff/v3 v3.4.0
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/pkg/sftp v1.13.6
|
||||
github.com/prometheus-community/pro-bing v0.4.0
|
||||
github.com/prometheus/client_golang v1.18.0
|
||||
github.com/prometheus/common v0.46.0
|
||||
github.com/safchain/ethtool v0.3.0
|
||||
|
||||
8
go.sum
8
go.sum
@@ -177,6 +177,8 @@ github.com/blizzy78/varnamelen v0.8.0 h1:oqSblyuQvFsW1hbBHh1zfwrKe3kcSj0rnXkKzsQ
|
||||
github.com/blizzy78/varnamelen v0.8.0/go.mod h1:V9TzQZ4fLJ1DSrjVDfl89H7aMnTvKkApdHeyESmyR7k=
|
||||
github.com/bombsimon/wsl/v3 v3.4.0 h1:RkSxjT3tmlptwfgEgTgU+KYKLI35p/tviNXNXiL2aNU=
|
||||
github.com/bombsimon/wsl/v3 v3.4.0/go.mod h1:KkIB+TXkqy6MvK9BDZVbZxKNYsE1/oLRJbIFtf14qqo=
|
||||
github.com/bramvdbogaerde/go-scp v1.4.0 h1:jKMwpwCbcX1KyvDbm/PDJuXcMuNVlLGi0Q0reuzjyKY=
|
||||
github.com/bramvdbogaerde/go-scp v1.4.0/go.mod h1:on2aH5AxaFb2G0N5Vsdy6B0Ml7k9HuHSwfo1y0QzAbQ=
|
||||
github.com/breml/bidichk v0.2.4 h1:i3yedFWWQ7YzjdZJHnPo9d/xURinSq3OM+gyM43K4/8=
|
||||
github.com/breml/bidichk v0.2.4/go.mod h1:7Zk0kRFt1LIZxtQdl9W9JwGAcLTTkOs+tN7wuEYGJ3s=
|
||||
github.com/breml/errchkjson v0.3.1 h1:hlIeXuspTyt8Y/UmP5qy1JocGNR00KQHgfaNtRAjoxQ=
|
||||
@@ -468,8 +470,8 @@ github.com/google/rpmpack v0.5.0 h1:L16KZ3QvkFGpYhmp23iQip+mx1X39foEsqszjMNBm8A=
|
||||
github.com/google/rpmpack v0.5.0/go.mod h1:uqVAUVQLq8UY2hCDfmJ/+rtO3aw7qyhc90rCVEabEfI=
|
||||
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.5.0 h1:1p67kYwdtXjb0gL0BPiP1Av9wiZPo5A8z2cWkTZ+eyU=
|
||||
github.com/google/uuid v1.5.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
||||
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
|
||||
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
|
||||
github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g=
|
||||
@@ -731,6 +733,8 @@ github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZb
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/polyfloyd/go-errorlint v1.4.1 h1:r8ru5FhXSn34YU1GJDOuoJv2LdsQkPmK325EOpPMJlM=
|
||||
github.com/polyfloyd/go-errorlint v1.4.1/go.mod h1:k6fU/+fQe38ednoZS51T7gSIGQW1y94d6TkSr35OzH8=
|
||||
github.com/prometheus-community/pro-bing v0.4.0 h1:YMbv+i08gQz97OZZBwLyvmmQEEzyfyrrjEaAchdy3R4=
|
||||
github.com/prometheus-community/pro-bing v0.4.0/go.mod h1:b7wRYZtCcPmt4Sz319BykUU241rWLe1VFXyiyWK/dH4=
|
||||
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
||||
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
|
||||
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
|
||||
|
||||
@@ -32,7 +32,8 @@ type ConfigVAlpha struct {
|
||||
AdvertiseRoutes []netip.Prefix `json:",omitempty"`
|
||||
DisableSNAT opt.Bool `json:",omitempty"`
|
||||
|
||||
NetfilterMode *string `json:",omitempty"` // "on", "off", "nodivert"
|
||||
NetfilterMode *string `json:",omitempty"` // "on", "off", "nodivert"
|
||||
NoStatefulFiltering opt.Bool `json:",omitempty"`
|
||||
|
||||
PostureChecking opt.Bool `json:",omitempty"`
|
||||
RunSSHServer opt.Bool `json:",omitempty"` // Tailscale SSH
|
||||
@@ -50,6 +51,7 @@ func (c *ConfigVAlpha) ToPrefs() (MaskedPrefs, error) {
|
||||
if c == nil {
|
||||
return mp, nil
|
||||
}
|
||||
|
||||
mp.WantRunning = !c.Enabled.EqualBool(false)
|
||||
mp.WantRunningSet = mp.WantRunning || c.Enabled != ""
|
||||
if c.ServerURL != nil {
|
||||
@@ -98,6 +100,11 @@ func (c *ConfigVAlpha) ToPrefs() (MaskedPrefs, error) {
|
||||
mp.NoSNAT = c.DisableSNAT.EqualBool(true)
|
||||
mp.NoSNAT = true
|
||||
}
|
||||
if c.NoStatefulFiltering != "" {
|
||||
mp.NoStatefulFiltering = c.NoStatefulFiltering
|
||||
mp.NoStatefulFilteringSet = true
|
||||
}
|
||||
|
||||
if c.NetfilterMode != nil {
|
||||
m, err := preftype.ParseNetfilterMode(*c.NetfilterMode)
|
||||
if err != nil {
|
||||
|
||||
59
ipn/conffile/cloudconf.go
Normal file
59
ipn/conffile/cloudconf.go
Normal file
@@ -0,0 +1,59 @@
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
package conffile
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"tailscale.com/omit"
|
||||
)
|
||||
|
||||
func getEC2MetadataToken() (string, error) {
|
||||
if omit.AWS {
|
||||
return "", omit.Err
|
||||
}
|
||||
req, _ := http.NewRequest("PUT", "http://169.254.169.254/latest/api/token", nil)
|
||||
req.Header.Add("X-aws-ec2-metadata-token-ttl-seconds", "300")
|
||||
res, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to get metadata token: %w", err)
|
||||
}
|
||||
defer res.Body.Close()
|
||||
if res.StatusCode != 200 {
|
||||
return "", fmt.Errorf("failed to get metadata token: %v", res.Status)
|
||||
}
|
||||
all, err := io.ReadAll(res.Body)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to read metadata token: %w", err)
|
||||
}
|
||||
return strings.TrimSpace(string(all)), nil
|
||||
}
|
||||
|
||||
func readVMUserData() ([]byte, error) {
|
||||
// TODO(bradfitz): support GCP, Azure, Proxmox/cloud-init
|
||||
// (NoCloud/ConfigDrive ISO), etc.
|
||||
|
||||
if omit.AWS {
|
||||
return nil, omit.Err
|
||||
}
|
||||
token, tokErr := getEC2MetadataToken()
|
||||
req, _ := http.NewRequest("GET", "http://169.254.169.254/latest/user-data", nil)
|
||||
req.Header.Add("X-aws-ec2-metadata-token", token)
|
||||
res, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer res.Body.Close()
|
||||
if res.StatusCode != 200 {
|
||||
if tokErr != nil {
|
||||
return nil, fmt.Errorf("failed to get VM user data: %v; also failed to get metadata token: %v", res.Status, tokErr)
|
||||
}
|
||||
return nil, errors.New(res.Status)
|
||||
}
|
||||
return io.ReadAll(res.Body)
|
||||
}
|
||||
@@ -17,7 +17,7 @@ import (
|
||||
|
||||
// Config describes a config file.
|
||||
type Config struct {
|
||||
Path string // disk path of HuJSON
|
||||
Path string // disk path of HuJSON, or VMUserDataPath
|
||||
Raw []byte // raw bytes from disk, in HuJSON form
|
||||
Std []byte // standardized JSON form
|
||||
Version string // "alpha0" for now
|
||||
@@ -35,13 +35,22 @@ func (c *Config) WantRunning() bool {
|
||||
return c != nil && !c.Parsed.Enabled.EqualBool(false)
|
||||
}
|
||||
|
||||
// VMUserDataPath is a sentinel value for Load to use to get the data
|
||||
// from the VM's metadata service's user-data field.
|
||||
const VMUserDataPath = "vm:user-data"
|
||||
|
||||
// Load reads and parses the config file at the provided path on disk.
|
||||
func Load(path string) (*Config, error) {
|
||||
var c Config
|
||||
c.Path = path
|
||||
|
||||
var err error
|
||||
c.Raw, err = os.ReadFile(path)
|
||||
|
||||
switch path {
|
||||
case VMUserDataPath:
|
||||
c.Raw, err = readVMUserData()
|
||||
default:
|
||||
c.Raw, err = os.ReadFile(path)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -40,7 +40,6 @@ func (src *Prefs) Clone() *Prefs {
|
||||
var _PrefsCloneNeedsRegeneration = Prefs(struct {
|
||||
ControlURL string
|
||||
RouteAll bool
|
||||
AllowSingleHosts bool
|
||||
ExitNodeID tailcfg.StableNodeID
|
||||
ExitNodeIP netip.Addr
|
||||
InternalExitNodePrior tailcfg.StableNodeID
|
||||
@@ -67,6 +66,7 @@ var _PrefsCloneNeedsRegeneration = Prefs(struct {
|
||||
PostureChecking bool
|
||||
NetfilterKind string
|
||||
DriveShares []*drive.Share
|
||||
AllowSingleHosts marshalAsTrueInJSON
|
||||
Persist *persist.Persist
|
||||
}{})
|
||||
|
||||
|
||||
@@ -12,6 +12,7 @@ import (
|
||||
func TestDeps(t *testing.T) {
|
||||
deptest.DepChecker{
|
||||
BadDeps: map[string]string{
|
||||
"testing": "do not use testing package in production code",
|
||||
"gvisor.dev/gvisor/pkg/buffer": "https://github.com/tailscale/tailscale/issues/9756",
|
||||
"gvisor.dev/gvisor/pkg/cpuid": "https://github.com/tailscale/tailscale/issues/9756",
|
||||
"gvisor.dev/gvisor/pkg/tcpip": "https://github.com/tailscale/tailscale/issues/9756",
|
||||
|
||||
@@ -67,7 +67,6 @@ func (v *PrefsView) UnmarshalJSON(b []byte) error {
|
||||
|
||||
func (v PrefsView) ControlURL() string { return v.ж.ControlURL }
|
||||
func (v PrefsView) RouteAll() bool { return v.ж.RouteAll }
|
||||
func (v PrefsView) AllowSingleHosts() bool { return v.ж.AllowSingleHosts }
|
||||
func (v PrefsView) ExitNodeID() tailcfg.StableNodeID { return v.ж.ExitNodeID }
|
||||
func (v PrefsView) ExitNodeIP() netip.Addr { return v.ж.ExitNodeIP }
|
||||
func (v PrefsView) InternalExitNodePrior() tailcfg.StableNodeID { return v.ж.InternalExitNodePrior }
|
||||
@@ -98,13 +97,13 @@ func (v PrefsView) NetfilterKind() string { return v.ж.Netfilte
|
||||
func (v PrefsView) DriveShares() views.SliceView[*drive.Share, drive.ShareView] {
|
||||
return views.SliceOfViews[*drive.Share, drive.ShareView](v.ж.DriveShares)
|
||||
}
|
||||
func (v PrefsView) Persist() persist.PersistView { return v.ж.Persist.View() }
|
||||
func (v PrefsView) AllowSingleHosts() marshalAsTrueInJSON { return v.ж.AllowSingleHosts }
|
||||
func (v PrefsView) Persist() persist.PersistView { return v.ж.Persist.View() }
|
||||
|
||||
// A compilation failure here means this code must be regenerated, with the command at the top of this file.
|
||||
var _PrefsViewNeedsRegeneration = Prefs(struct {
|
||||
ControlURL string
|
||||
RouteAll bool
|
||||
AllowSingleHosts bool
|
||||
ExitNodeID tailcfg.StableNodeID
|
||||
ExitNodeIP netip.Addr
|
||||
InternalExitNodePrior tailcfg.StableNodeID
|
||||
@@ -131,6 +130,7 @@ var _PrefsViewNeedsRegeneration = Prefs(struct {
|
||||
PostureChecking bool
|
||||
NetfilterKind string
|
||||
DriveShares []*drive.Share
|
||||
AllowSingleHosts marshalAsTrueInJSON
|
||||
Persist *persist.Persist
|
||||
}{})
|
||||
|
||||
|
||||
@@ -478,17 +478,44 @@ func findCmdTailscale() (string, error) {
|
||||
}
|
||||
|
||||
func tailscaleUpdateCmd(cmdTS string) *exec.Cmd {
|
||||
defaultCmd := exec.Command(cmdTS, "update", "--yes")
|
||||
if runtime.GOOS != "linux" {
|
||||
return exec.Command(cmdTS, "update", "--yes")
|
||||
return defaultCmd
|
||||
}
|
||||
if _, err := exec.LookPath("systemd-run"); err != nil {
|
||||
return exec.Command(cmdTS, "update", "--yes")
|
||||
return defaultCmd
|
||||
}
|
||||
|
||||
// When systemd-run is available, use it to run the update command. This
|
||||
// creates a new temporary unit separate from the tailscaled unit. When
|
||||
// tailscaled is restarted during the update, systemd won't kill this
|
||||
// temporary update unit, which could cause unexpected breakage.
|
||||
return exec.Command("systemd-run", "--wait", "--pipe", "--collect", cmdTS, "update", "--yes")
|
||||
//
|
||||
// We want to use the --wait flag for systemd-run, to block the update
|
||||
// command until completion and collect output. But this flag was added in
|
||||
// systemd 232, so we need to check the version first.
|
||||
//
|
||||
// The output will look like:
|
||||
//
|
||||
// systemd 255 (255.7-1-arch)
|
||||
// +PAM +AUDIT ... other feature flags ...
|
||||
systemdVerOut, err := exec.Command("systemd-run", "--version").Output()
|
||||
if err != nil {
|
||||
return defaultCmd
|
||||
}
|
||||
parts := strings.Fields(string(systemdVerOut))
|
||||
if len(parts) < 2 || parts[0] != "systemd" {
|
||||
return defaultCmd
|
||||
}
|
||||
systemdVer, err := strconv.Atoi(parts[1])
|
||||
if err != nil {
|
||||
return defaultCmd
|
||||
}
|
||||
if systemdVer < 232 {
|
||||
return exec.Command("systemd-run", "--pipe", "--collect", cmdTS, "update", "--yes")
|
||||
} else {
|
||||
return exec.Command("systemd-run", "--wait", "--pipe", "--collect", cmdTS, "update", "--yes")
|
||||
}
|
||||
}
|
||||
|
||||
func regularFileExists(path string) bool {
|
||||
|
||||
@@ -259,10 +259,8 @@ type LocalBackend struct {
|
||||
endpoints []tailcfg.Endpoint
|
||||
blocked bool
|
||||
keyExpired bool
|
||||
authURL string // cleared on Notify
|
||||
authURLSticky string // not cleared on Notify
|
||||
authURL string // non-empty if not Running
|
||||
authURLTime time.Time // when the authURL was received from the control server
|
||||
interact bool
|
||||
egg bool
|
||||
prevIfState *netmon.State
|
||||
peerAPIServer *peerAPIServer // or nil
|
||||
@@ -785,7 +783,7 @@ func (b *LocalBackend) UpdateStatus(sb *ipnstate.StatusBuilder) {
|
||||
s.Version = version.Long()
|
||||
s.TUN = !b.sys.IsNetstack()
|
||||
s.BackendState = b.state.String()
|
||||
s.AuthURL = b.authURLSticky
|
||||
s.AuthURL = b.authURL
|
||||
if prefs := b.pm.CurrentPrefs(); prefs.Valid() && prefs.AutoUpdate().Check {
|
||||
s.ClientVersion = b.lastClientVersion
|
||||
}
|
||||
@@ -1139,7 +1137,6 @@ func (b *LocalBackend) SetControlClientStatus(c controlclient.Client, st control
|
||||
prefsChanged := false
|
||||
prefs := b.pm.CurrentPrefs().AsStruct()
|
||||
netMap := b.netMap
|
||||
interact := b.interact
|
||||
|
||||
if prefs.ControlURL == "" {
|
||||
// Once we get a message from the control plane, set
|
||||
@@ -1158,7 +1155,6 @@ func (b *LocalBackend) SetControlClientStatus(c controlclient.Client, st control
|
||||
}
|
||||
if st.URL != "" {
|
||||
b.authURL = st.URL
|
||||
b.authURLSticky = st.URL
|
||||
b.authURLTime = b.clock.Now()
|
||||
}
|
||||
if (wasBlocked || b.seamlessRenewalEnabled()) && st.LoginFinished() {
|
||||
@@ -1276,9 +1272,7 @@ func (b *LocalBackend) SetControlClientStatus(c controlclient.Client, st control
|
||||
}
|
||||
if st.URL != "" {
|
||||
b.logf("Received auth URL: %.20v...", st.URL)
|
||||
if interact {
|
||||
b.popBrowserAuthNow()
|
||||
}
|
||||
b.popBrowserAuthNow()
|
||||
}
|
||||
b.stateMachine()
|
||||
// This is currently (2020-07-28) necessary; conditionally disabling it is fragile!
|
||||
@@ -1848,7 +1842,7 @@ func (b *LocalBackend) Start(opts ipn.Options) error {
|
||||
// Without this, the state machine transitions to "NeedsLogin" implying
|
||||
// that user interaction is required, which is not the case and can
|
||||
// regress tsnet.Server restarts.
|
||||
cc.Login(nil, controlclient.LoginDefault)
|
||||
cc.Login(controlclient.LoginDefault)
|
||||
}
|
||||
b.stateMachineLockedOnEntry(unlock)
|
||||
|
||||
@@ -2281,8 +2275,8 @@ func (b *LocalBackend) WatchNotifications(ctx context.Context, mask ipn.NotifyWa
|
||||
if mask&ipn.NotifyInitialState != 0 {
|
||||
ini.SessionID = sessionID
|
||||
ini.State = ptr.To(b.state)
|
||||
if b.state == ipn.NeedsLogin && b.authURLSticky != "" {
|
||||
ini.BrowseToURL = ptr.To(b.authURLSticky)
|
||||
if b.state == ipn.NeedsLogin && b.authURL != "" {
|
||||
ini.BrowseToURL = ptr.To(b.authURL)
|
||||
}
|
||||
}
|
||||
if mask&ipn.NotifyInitialPrefs != 0 {
|
||||
@@ -2336,11 +2330,27 @@ func (b *LocalBackend) WatchNotifications(ctx context.Context, mask ipn.NotifyWa
|
||||
// TODO(marwan-at-work): streaming background logs?
|
||||
defer b.DeleteForegroundSession(sessionID)
|
||||
|
||||
var lastURLPop string // to dup suppress URL popups
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case n, ok := <-ch:
|
||||
// URLs flow into Notify.BrowseToURL via two means:
|
||||
// 1. From MapResponse.PopBrowserURL, which already says they're dup
|
||||
// suppressed if identical, and that's done by the controlclient,
|
||||
// so this added later adds nothing.
|
||||
//
|
||||
// 2. From the controlclient auth routes, on register. This makes sure
|
||||
// we don't tell clients (mac, windows, android) to pop the same URL
|
||||
// multiple times.
|
||||
if n != nil && n.BrowseToURL != nil {
|
||||
if v := *n.BrowseToURL; v == lastURLPop {
|
||||
n.BrowseToURL = nil
|
||||
} else {
|
||||
lastURLPop = v
|
||||
}
|
||||
}
|
||||
if !ok || !fn(n) {
|
||||
return
|
||||
}
|
||||
@@ -2476,8 +2486,6 @@ func (b *LocalBackend) sendFileNotify() {
|
||||
func (b *LocalBackend) popBrowserAuthNow() {
|
||||
b.mu.Lock()
|
||||
url := b.authURL
|
||||
b.interact = false
|
||||
b.authURL = "" // but NOT clearing authURLSticky
|
||||
expired := b.keyExpired
|
||||
b.mu.Unlock()
|
||||
|
||||
@@ -2805,7 +2813,6 @@ func (b *LocalBackend) StartLoginInteractive(ctx context.Context) error {
|
||||
if b.cc == nil {
|
||||
panic("LocalBackend.assertClient: b.cc == nil")
|
||||
}
|
||||
b.interact = true
|
||||
url := b.authURL
|
||||
timeSinceAuthURLCreated := b.clock.Since(b.authURLTime)
|
||||
cc := b.cc
|
||||
@@ -2818,7 +2825,7 @@ func (b *LocalBackend) StartLoginInteractive(ctx context.Context) error {
|
||||
if url != "" && timeSinceAuthURLCreated < ((7*24*time.Hour)-(1*time.Hour)) {
|
||||
b.popBrowserAuthNow()
|
||||
} else {
|
||||
cc.Login(nil, b.loginFlags|controlclient.LoginInteractive)
|
||||
cc.Login(b.loginFlags | controlclient.LoginInteractive)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -3332,7 +3339,7 @@ func (b *LocalBackend) setPrefsLockedOnEntry(newp *ipn.Prefs, unlock unlockOnce)
|
||||
|
||||
if !oldp.WantRunning() && newp.WantRunning {
|
||||
b.logf("transitioning to running; doing Login...")
|
||||
cc.Login(nil, controlclient.LoginDefault)
|
||||
cc.Login(controlclient.LoginDefault)
|
||||
}
|
||||
|
||||
if oldp.WantRunning() != newp.WantRunning {
|
||||
@@ -3642,9 +3649,6 @@ func (b *LocalBackend) authReconfig() {
|
||||
if prefs.RouteAll() {
|
||||
flags |= netmap.AllowSubnetRoutes
|
||||
}
|
||||
if prefs.AllowSingleHosts() {
|
||||
flags |= netmap.AllowSingleHosts
|
||||
}
|
||||
if hasPAC && disableSubnetsIfPAC {
|
||||
if flags&netmap.AllowSubnetRoutes != 0 {
|
||||
b.logf("authReconfig: have PAC; disabling subnet routes")
|
||||
@@ -4182,18 +4186,7 @@ func (b *LocalBackend) routerConfig(cfg *wgcfg.Config, prefs ipn.PrefsView, oneC
|
||||
}
|
||||
|
||||
var doStatefulFiltering bool
|
||||
if v, ok := prefs.NoStatefulFiltering().Get(); !ok {
|
||||
// The stateful filtering preference isn't explicitly set; this is
|
||||
// unexpected since we expect it to be set during the profile
|
||||
// backfill, but to be safe let's enable stateful filtering
|
||||
// absent further information.
|
||||
doStatefulFiltering = true
|
||||
b.logf("[unexpected] NoStatefulFiltering preference not set; enabling stateful filtering")
|
||||
} else if v {
|
||||
// The preferences explicitly say "no stateful filtering", so
|
||||
// we don't do it.
|
||||
doStatefulFiltering = false
|
||||
} else {
|
||||
if v, ok := prefs.NoStatefulFiltering().Get(); ok && !v {
|
||||
// The preferences explicitly "do stateful filtering" is turned
|
||||
// off, or to expand the double negative, to do stateful
|
||||
// filtering. Do so.
|
||||
@@ -4347,7 +4340,6 @@ func (b *LocalBackend) enterStateLockedOnEntry(newState ipn.State, unlock unlock
|
||||
authURL := b.authURL
|
||||
if newState == ipn.Running {
|
||||
b.authURL = ""
|
||||
b.authURLSticky = ""
|
||||
b.authURLTime = time.Time{}
|
||||
} else if oldState == ipn.Running {
|
||||
// Transitioning away from running.
|
||||
@@ -4607,7 +4599,6 @@ func (b *LocalBackend) resetControlClientLocked() controlclient.Client {
|
||||
}
|
||||
|
||||
b.authURL = ""
|
||||
b.authURLSticky = ""
|
||||
|
||||
// When we clear the control client, stop any outstanding netmap expiry
|
||||
// timer; synthesizing a new netmap while we don't have a control
|
||||
@@ -4653,7 +4644,6 @@ func (b *LocalBackend) ResetForClientDisconnect() {
|
||||
}
|
||||
b.keyExpired = false
|
||||
b.authURL = ""
|
||||
b.authURLSticky = ""
|
||||
b.authURLTime = time.Time{}
|
||||
b.activeLogin = ""
|
||||
b.resetDialPlan()
|
||||
@@ -6424,7 +6414,7 @@ func (b *LocalBackend) SuggestExitNode() (response apitype.ExitNodeSuggestionRes
|
||||
lastSuggestedExitNode := b.lastSuggestedExitNode
|
||||
b.mu.Unlock()
|
||||
if lastReport == nil || netMap == nil {
|
||||
last, err := suggestLastExitNode(lastSuggestedExitNode)
|
||||
last, err := lastSuggestedExitNode.asAPIType()
|
||||
if err != nil {
|
||||
return response, ErrCannotSuggestExitNode
|
||||
}
|
||||
@@ -6434,7 +6424,7 @@ func (b *LocalBackend) SuggestExitNode() (response apitype.ExitNodeSuggestionRes
|
||||
r := rand.New(rand.NewSource(seed))
|
||||
res, err := suggestExitNode(lastReport, netMap, r)
|
||||
if err != nil {
|
||||
last, err := suggestLastExitNode(lastSuggestedExitNode)
|
||||
last, err := lastSuggestedExitNode.asAPIType()
|
||||
if err != nil {
|
||||
return response, ErrCannotSuggestExitNode
|
||||
}
|
||||
@@ -6447,12 +6437,13 @@ func (b *LocalBackend) SuggestExitNode() (response apitype.ExitNodeSuggestionRes
|
||||
return res, err
|
||||
}
|
||||
|
||||
// suggestLastExitNode formats a response with the last suggested exit node's ID and name.
|
||||
// asAPIType formats a response with the last suggested exit node's ID and name.
|
||||
// Returns error if there is no id or name.
|
||||
// Used as a fallback before returning a nil response and error.
|
||||
func suggestLastExitNode(lastSuggestedExitNode lastSuggestedExitNode) (res apitype.ExitNodeSuggestionResponse, err error) {
|
||||
if lastSuggestedExitNode.id != "" && lastSuggestedExitNode.name != "" {
|
||||
res.ID = lastSuggestedExitNode.id
|
||||
res.Name = lastSuggestedExitNode.name
|
||||
func (n lastSuggestedExitNode) asAPIType() (res apitype.ExitNodeSuggestionResponse, _ error) {
|
||||
if n.id != "" && n.name != "" {
|
||||
res.ID = n.id
|
||||
res.Name = n.name
|
||||
return res, nil
|
||||
}
|
||||
return res, ErrUnableToSuggestLastExitNode
|
||||
@@ -6462,8 +6453,17 @@ func suggestExitNode(report *netcheck.Report, netMap *netmap.NetworkMap, r *rand
|
||||
if report.PreferredDERP == 0 {
|
||||
return res, ErrNoPreferredDERP
|
||||
}
|
||||
var allowedCandidates set.Set[string]
|
||||
if allowed, err := syspolicy.GetStringArray(syspolicy.AllowedSuggestedExitNodes, nil); err != nil {
|
||||
return res, fmt.Errorf("unable to read %s policy: %w", syspolicy.AllowedSuggestedExitNodes, err)
|
||||
} else if allowed != nil && len(allowed) > 0 {
|
||||
allowedCandidates = set.SetOf(allowed)
|
||||
}
|
||||
candidates := make([]tailcfg.NodeView, 0, len(netMap.Peers))
|
||||
for _, peer := range netMap.Peers {
|
||||
if allowedCandidates != nil && !allowedCandidates.Contains(string(peer.StableID())) {
|
||||
continue
|
||||
}
|
||||
if peer.CapMap().Has(tailcfg.NodeAttrSuggestExitNode) && tsaddr.ContainsExitRoutes(peer.AllowedIPs()) {
|
||||
candidates = append(candidates, peer)
|
||||
}
|
||||
|
||||
@@ -1595,6 +1595,9 @@ type mockSyspolicyHandler struct {
|
||||
// queried by the current test. If the policy is expected but unset, then
|
||||
// use nil, otherwise use a string equal to the policy's desired value.
|
||||
stringPolicies map[syspolicy.Key]*string
|
||||
// stringArrayPolicies is the collection of policies that we expected to see
|
||||
// queries by the current test, that return policy string arrays.
|
||||
stringArrayPolicies map[syspolicy.Key][]string
|
||||
// failUnknownPolicies is set if policies other than those in stringPolicies
|
||||
// (uint64 or bool policies are not supported by mockSyspolicyHandler yet)
|
||||
// should be considered a test failure if they are queried.
|
||||
@@ -1632,6 +1635,12 @@ func (h *mockSyspolicyHandler) ReadStringArray(key string) ([]string, error) {
|
||||
if h.failUnknownPolicies {
|
||||
h.t.Errorf("ReadStringArray(%q) unexpectedly called", key)
|
||||
}
|
||||
if s, ok := h.stringArrayPolicies[syspolicy.Key(key)]; ok {
|
||||
if s == nil {
|
||||
return []string{}, syspolicy.ErrNoSuchKey
|
||||
}
|
||||
return s, nil
|
||||
}
|
||||
return nil, syspolicy.ErrNoSuchKey
|
||||
}
|
||||
|
||||
@@ -3438,7 +3447,7 @@ func TestMinLatencyDERPregion(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestSuggestLastExitNode(t *testing.T) {
|
||||
func TestLastSuggestedExitNodeAsAPIType(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
lastSuggestedExitNode lastSuggestedExitNode
|
||||
@@ -3460,7 +3469,7 @@ func TestSuggestLastExitNode(t *testing.T) {
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got, err := suggestLastExitNode(tt.lastSuggestedExitNode)
|
||||
got, err := tt.lastSuggestedExitNode.asAPIType()
|
||||
if got != tt.wantRes || err != tt.wantErr {
|
||||
t.Errorf("got %v error %v, want %v error %v", got, err, tt.wantRes, tt.wantErr)
|
||||
}
|
||||
@@ -3472,8 +3481,9 @@ func TestLocalBackendSuggestExitNode(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
lastSuggestedExitNode lastSuggestedExitNode
|
||||
report netcheck.Report
|
||||
report *netcheck.Report
|
||||
netMap netmap.NetworkMap
|
||||
allowedSuggestedExitNodes []string
|
||||
wantID tailcfg.StableNodeID
|
||||
wantName string
|
||||
wantErr error
|
||||
@@ -3482,7 +3492,7 @@ func TestLocalBackendSuggestExitNode(t *testing.T) {
|
||||
{
|
||||
name: "nil netmap, returns last suggested exit node",
|
||||
lastSuggestedExitNode: lastSuggestedExitNode{name: "test", id: "test"},
|
||||
report: netcheck.Report{
|
||||
report: &netcheck.Report{
|
||||
RegionLatency: map[int]time.Duration{
|
||||
1: 0,
|
||||
2: -1,
|
||||
@@ -3518,7 +3528,7 @@ func TestLocalBackendSuggestExitNode(t *testing.T) {
|
||||
{
|
||||
name: "found better derp node, last suggested exit node updates",
|
||||
lastSuggestedExitNode: lastSuggestedExitNode{name: "test", id: "test"},
|
||||
report: netcheck.Report{
|
||||
report: &netcheck.Report{
|
||||
RegionLatency: map[int]time.Duration{
|
||||
1: 10,
|
||||
2: 10,
|
||||
@@ -3574,7 +3584,7 @@ func TestLocalBackendSuggestExitNode(t *testing.T) {
|
||||
{
|
||||
name: "found better mullvad node, last suggested exit node updates",
|
||||
lastSuggestedExitNode: lastSuggestedExitNode{name: "San Jose", id: "3"},
|
||||
report: netcheck.Report{
|
||||
report: &netcheck.Report{
|
||||
RegionLatency: map[int]time.Duration{
|
||||
1: 0,
|
||||
2: 0,
|
||||
@@ -3645,7 +3655,7 @@ func TestLocalBackendSuggestExitNode(t *testing.T) {
|
||||
{
|
||||
name: "ErrNoPreferredDERP, use last suggested exit node",
|
||||
lastSuggestedExitNode: lastSuggestedExitNode{name: "test", id: "test"},
|
||||
report: netcheck.Report{
|
||||
report: &netcheck.Report{
|
||||
RegionLatency: map[int]time.Duration{
|
||||
1: 10,
|
||||
2: 10,
|
||||
@@ -3701,7 +3711,7 @@ func TestLocalBackendSuggestExitNode(t *testing.T) {
|
||||
{
|
||||
name: "ErrNoPreferredDERP, use last suggested exit node",
|
||||
lastSuggestedExitNode: lastSuggestedExitNode{name: "test", id: "test"},
|
||||
report: netcheck.Report{
|
||||
report: &netcheck.Report{
|
||||
RegionLatency: map[int]time.Duration{
|
||||
1: 10,
|
||||
2: 10,
|
||||
@@ -3756,7 +3766,7 @@ func TestLocalBackendSuggestExitNode(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "unable to use last suggested exit node",
|
||||
report: netcheck.Report{
|
||||
report: &netcheck.Report{
|
||||
RegionLatency: map[int]time.Duration{
|
||||
1: 10,
|
||||
2: 10,
|
||||
@@ -3766,13 +3776,141 @@ func TestLocalBackendSuggestExitNode(t *testing.T) {
|
||||
},
|
||||
wantErr: ErrCannotSuggestExitNode,
|
||||
},
|
||||
{
|
||||
name: "only pick from allowed suggested exit nodes",
|
||||
lastSuggestedExitNode: lastSuggestedExitNode{name: "test", id: "test"},
|
||||
report: &netcheck.Report{
|
||||
RegionLatency: map[int]time.Duration{
|
||||
1: 10,
|
||||
2: 10,
|
||||
3: 5,
|
||||
},
|
||||
PreferredDERP: 1,
|
||||
},
|
||||
netMap: netmap.NetworkMap{
|
||||
SelfNode: (&tailcfg.Node{
|
||||
Addresses: []netip.Prefix{
|
||||
netip.MustParsePrefix("100.64.1.1/32"),
|
||||
netip.MustParsePrefix("fe70::1/128"),
|
||||
},
|
||||
}).View(),
|
||||
DERPMap: &tailcfg.DERPMap{
|
||||
Regions: map[int]*tailcfg.DERPRegion{
|
||||
1: {},
|
||||
2: {},
|
||||
3: {},
|
||||
},
|
||||
},
|
||||
Peers: []tailcfg.NodeView{
|
||||
(&tailcfg.Node{
|
||||
ID: 2,
|
||||
StableID: "test",
|
||||
Name: "test",
|
||||
DERP: "127.3.3.40:1",
|
||||
AllowedIPs: []netip.Prefix{
|
||||
netip.MustParsePrefix("0.0.0.0/0"), netip.MustParsePrefix("::/0"),
|
||||
},
|
||||
CapMap: (tailcfg.NodeCapMap)(map[tailcfg.NodeCapability][]tailcfg.RawMessage{
|
||||
tailcfg.NodeAttrSuggestExitNode: {},
|
||||
tailcfg.NodeAttrAutoExitNode: {},
|
||||
}),
|
||||
}).View(),
|
||||
(&tailcfg.Node{
|
||||
ID: 3,
|
||||
StableID: "foo",
|
||||
Name: "foo",
|
||||
DERP: "127.3.3.40:3",
|
||||
AllowedIPs: []netip.Prefix{
|
||||
netip.MustParsePrefix("0.0.0.0/0"), netip.MustParsePrefix("::/0"),
|
||||
},
|
||||
CapMap: (tailcfg.NodeCapMap)(map[tailcfg.NodeCapability][]tailcfg.RawMessage{
|
||||
tailcfg.NodeAttrSuggestExitNode: {},
|
||||
tailcfg.NodeAttrAutoExitNode: {},
|
||||
}),
|
||||
}).View(),
|
||||
},
|
||||
},
|
||||
allowedSuggestedExitNodes: []string{"test"},
|
||||
wantID: "test",
|
||||
wantName: "test",
|
||||
wantLastSuggestedExitNode: lastSuggestedExitNode{name: "test", id: "test"},
|
||||
},
|
||||
{
|
||||
name: "allowed suggested exit nodes not nil but length 0",
|
||||
lastSuggestedExitNode: lastSuggestedExitNode{name: "test", id: "test"},
|
||||
report: &netcheck.Report{
|
||||
RegionLatency: map[int]time.Duration{
|
||||
1: 10,
|
||||
2: 10,
|
||||
3: 5,
|
||||
},
|
||||
PreferredDERP: 1,
|
||||
},
|
||||
netMap: netmap.NetworkMap{
|
||||
SelfNode: (&tailcfg.Node{
|
||||
Addresses: []netip.Prefix{
|
||||
netip.MustParsePrefix("100.64.1.1/32"),
|
||||
netip.MustParsePrefix("fe70::1/128"),
|
||||
},
|
||||
}).View(),
|
||||
DERPMap: &tailcfg.DERPMap{
|
||||
Regions: map[int]*tailcfg.DERPRegion{
|
||||
1: {},
|
||||
2: {},
|
||||
3: {},
|
||||
},
|
||||
},
|
||||
Peers: []tailcfg.NodeView{
|
||||
(&tailcfg.Node{
|
||||
ID: 2,
|
||||
StableID: "test",
|
||||
Name: "test",
|
||||
DERP: "127.3.3.40:1",
|
||||
AllowedIPs: []netip.Prefix{
|
||||
netip.MustParsePrefix("0.0.0.0/0"), netip.MustParsePrefix("::/0"),
|
||||
},
|
||||
CapMap: (tailcfg.NodeCapMap)(map[tailcfg.NodeCapability][]tailcfg.RawMessage{
|
||||
tailcfg.NodeAttrSuggestExitNode: {},
|
||||
tailcfg.NodeAttrAutoExitNode: {},
|
||||
}),
|
||||
}).View(),
|
||||
(&tailcfg.Node{
|
||||
ID: 3,
|
||||
StableID: "foo",
|
||||
Name: "foo",
|
||||
DERP: "127.3.3.40:3",
|
||||
AllowedIPs: []netip.Prefix{
|
||||
netip.MustParsePrefix("0.0.0.0/0"), netip.MustParsePrefix("::/0"),
|
||||
},
|
||||
CapMap: (tailcfg.NodeCapMap)(map[tailcfg.NodeCapability][]tailcfg.RawMessage{
|
||||
tailcfg.NodeAttrSuggestExitNode: {},
|
||||
tailcfg.NodeAttrAutoExitNode: {},
|
||||
}),
|
||||
}).View(),
|
||||
},
|
||||
},
|
||||
allowedSuggestedExitNodes: []string{},
|
||||
wantID: "foo",
|
||||
wantName: "foo",
|
||||
wantLastSuggestedExitNode: lastSuggestedExitNode{name: "foo", id: "foo"},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
lb := newTestLocalBackend(t)
|
||||
msh := &mockSyspolicyHandler{
|
||||
t: t,
|
||||
stringArrayPolicies: map[syspolicy.Key][]string{
|
||||
syspolicy.AllowedSuggestedExitNodes: nil,
|
||||
},
|
||||
}
|
||||
if len(tt.allowedSuggestedExitNodes) != 0 {
|
||||
msh.stringArrayPolicies[syspolicy.AllowedSuggestedExitNodes] = tt.allowedSuggestedExitNodes
|
||||
}
|
||||
syspolicy.SetHandlerForTest(t, msh)
|
||||
lb.lastSuggestedExitNode = tt.lastSuggestedExitNode
|
||||
lb.netMap = &tt.netMap
|
||||
lb.sys.MagicSock.Get().SetLastNetcheckReport(context.Background(), tt.report)
|
||||
lb.sys.MagicSock.Get().SetLastNetcheckReportForTest(context.Background(), tt.report)
|
||||
got, err := lb.SuggestExitNode()
|
||||
if got.ID != tt.wantID {
|
||||
t.Errorf("ID=%v, want=%v", got.ID, tt.wantID)
|
||||
|
||||
@@ -18,6 +18,7 @@ import (
|
||||
"net/netip"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"slices"
|
||||
"time"
|
||||
|
||||
"tailscale.com/health/healthmsg"
|
||||
@@ -27,10 +28,12 @@ import (
|
||||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/tka"
|
||||
"tailscale.com/types/key"
|
||||
"tailscale.com/types/logger"
|
||||
"tailscale.com/types/netmap"
|
||||
"tailscale.com/types/persist"
|
||||
"tailscale.com/types/tkatype"
|
||||
"tailscale.com/util/mak"
|
||||
"tailscale.com/util/set"
|
||||
)
|
||||
|
||||
// TODO(tom): RPC retry/backoff was broken and has been removed. Fix?
|
||||
@@ -66,6 +69,7 @@ func (b *LocalBackend) tkaFilterNetmapLocked(nm *netmap.NetworkMap) {
|
||||
return // TKA not enabled.
|
||||
}
|
||||
|
||||
tracker := rotationTracker{logf: b.logf}
|
||||
var toDelete map[int]bool // peer index => true
|
||||
for i, p := range nm.Peers {
|
||||
if p.UnsignedPeerAPIOnly() {
|
||||
@@ -76,21 +80,32 @@ func (b *LocalBackend) tkaFilterNetmapLocked(nm *netmap.NetworkMap) {
|
||||
b.logf("Network lock is dropping peer %v(%v) due to missing signature", p.ID(), p.StableID())
|
||||
mak.Set(&toDelete, i, true)
|
||||
} else {
|
||||
if err := b.tka.authority.NodeKeyAuthorized(p.Key(), p.KeySignature().AsSlice()); err != nil {
|
||||
details, err := b.tka.authority.NodeKeyAuthorizedWithDetails(p.Key(), p.KeySignature().AsSlice())
|
||||
if err != nil {
|
||||
b.logf("Network lock is dropping peer %v(%v) due to failed signature check: %v", p.ID(), p.StableID(), err)
|
||||
mak.Set(&toDelete, i, true)
|
||||
continue
|
||||
}
|
||||
if details != nil {
|
||||
// Rotation details are returned when the node key is signed by a valid SigRotation signature.
|
||||
tracker.addRotationDetails(p.Key(), details)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
obsoleteByRotation := tracker.obsoleteKeys()
|
||||
|
||||
// nm.Peers is ordered, so deletion must be order-preserving.
|
||||
if len(toDelete) > 0 {
|
||||
if len(toDelete) > 0 || len(obsoleteByRotation) > 0 {
|
||||
peers := make([]tailcfg.NodeView, 0, len(nm.Peers))
|
||||
filtered := make([]ipnstate.TKAFilteredPeer, 0, len(toDelete))
|
||||
filtered := make([]ipnstate.TKAFilteredPeer, 0, len(toDelete)+len(obsoleteByRotation))
|
||||
for i, p := range nm.Peers {
|
||||
if !toDelete[i] {
|
||||
if !toDelete[i] && !obsoleteByRotation.Contains(p.Key()) {
|
||||
peers = append(peers, p)
|
||||
} else {
|
||||
if obsoleteByRotation.Contains(p.Key()) {
|
||||
b.logf("Network lock is dropping peer %v(%v) due to key rotation", p.ID(), p.StableID())
|
||||
}
|
||||
// Record information about the node we filtered out.
|
||||
fp := ipnstate.TKAFilteredPeer{
|
||||
Name: p.Name(),
|
||||
@@ -122,6 +137,84 @@ func (b *LocalBackend) tkaFilterNetmapLocked(nm *netmap.NetworkMap) {
|
||||
}
|
||||
}
|
||||
|
||||
// rotationTracker determines the set of node keys that are made obsolete by key
|
||||
// rotation.
|
||||
// - for each SigRotation signature, all previous node keys referenced by the
|
||||
// nested signatures are marked as obsolete.
|
||||
// - if there are multiple SigRotation signatures tracing back to the same
|
||||
// wrapping pubkey (e.g. if a node is cloned with all its keys), we keep
|
||||
// just one of them, marking the others as obsolete.
|
||||
type rotationTracker struct {
|
||||
// obsolete is the set of node keys that are obsolete due to key rotation.
|
||||
// users of rotationTracker should use the obsoleteKeys method for complete results.
|
||||
obsolete set.Set[key.NodePublic]
|
||||
|
||||
// byWrappingKey keeps track of rotation details per wrapping pubkey.
|
||||
byWrappingKey map[string][]sigRotationDetails
|
||||
|
||||
logf logger.Logf
|
||||
}
|
||||
|
||||
// sigRotationDetails holds information about a node key signed by a SigRotation.
|
||||
type sigRotationDetails struct {
|
||||
np key.NodePublic
|
||||
numPrevKeys int
|
||||
}
|
||||
|
||||
// addRotationDetails records the rotation signature details for a node key.
|
||||
func (r *rotationTracker) addRotationDetails(np key.NodePublic, d *tka.RotationDetails) {
|
||||
r.obsolete.Make()
|
||||
r.obsolete.AddSlice(d.PrevNodeKeys)
|
||||
rd := sigRotationDetails{
|
||||
np: np,
|
||||
numPrevKeys: len(d.PrevNodeKeys),
|
||||
}
|
||||
if r.byWrappingKey == nil {
|
||||
r.byWrappingKey = make(map[string][]sigRotationDetails)
|
||||
}
|
||||
wp := string(d.WrappingPubkey)
|
||||
r.byWrappingKey[wp] = append(r.byWrappingKey[wp], rd)
|
||||
}
|
||||
|
||||
// obsoleteKeys returns the set of node keys that are obsolete due to key rotation.
|
||||
func (r *rotationTracker) obsoleteKeys() set.Set[key.NodePublic] {
|
||||
for _, v := range r.byWrappingKey {
|
||||
// If there are multiple rotation signatures with the same wrapping
|
||||
// pubkey, we need to decide which one is the "latest", and keep it.
|
||||
// The signature with the largest number of previous keys is likely to
|
||||
// be the latest, unless it has been marked as obsolete (rotated out) by
|
||||
// another signature (which might happen in the future if we start
|
||||
// compacting long rotated signature chains).
|
||||
slices.SortStableFunc(v, func(a, b sigRotationDetails) int {
|
||||
// Group all obsolete keys after non-obsolete keys.
|
||||
if ao, bo := r.obsolete.Contains(a.np), r.obsolete.Contains(b.np); ao != bo {
|
||||
if ao {
|
||||
return 1
|
||||
}
|
||||
return -1
|
||||
}
|
||||
// Sort by decreasing number of previous keys.
|
||||
return b.numPrevKeys - a.numPrevKeys
|
||||
})
|
||||
// If there are several signatures with the same number of previous
|
||||
// keys, we cannot determine which one is the latest, so all of them are
|
||||
// rejected for safety.
|
||||
if len(v) >= 2 && v[0].numPrevKeys == v[1].numPrevKeys {
|
||||
r.logf("at least two nodes (%s and %s) have equally valid rotation signatures with the same wrapping pubkey, rejecting", v[0].np, v[1].np)
|
||||
for _, rd := range v {
|
||||
r.obsolete.Add(rd.np)
|
||||
}
|
||||
} else {
|
||||
// The first key in v is the one with the longest chain of previous
|
||||
// keys, so it must be the newest one. Mark all older keys as obsolete.
|
||||
for _, rd := range v[1:] {
|
||||
r.obsolete.Add(rd.np)
|
||||
}
|
||||
}
|
||||
}
|
||||
return r.obsolete
|
||||
}
|
||||
|
||||
// tkaSyncIfNeeded examines TKA info reported from the control plane,
|
||||
// performing the steps necessary to synchronize local tka state.
|
||||
//
|
||||
@@ -423,8 +516,12 @@ func (b *LocalBackend) NetworkLockStatus() *ipnstate.NetworkLockStatus {
|
||||
copy(head[:], h[:])
|
||||
|
||||
var selfAuthorized bool
|
||||
nodeKeySignature := &tka.NodeKeySignature{}
|
||||
if b.netMap != nil {
|
||||
selfAuthorized = b.tka.authority.NodeKeyAuthorized(b.netMap.SelfNode.Key(), b.netMap.SelfNode.KeySignature().AsSlice()) == nil
|
||||
if err := nodeKeySignature.Unserialize(b.netMap.SelfNode.KeySignature().AsSlice()); err != nil {
|
||||
b.logf("failed to decode self node key signature: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
keys := b.tka.authority.Keys()
|
||||
@@ -445,14 +542,15 @@ func (b *LocalBackend) NetworkLockStatus() *ipnstate.NetworkLockStatus {
|
||||
stateID1, _ := b.tka.authority.StateIDs()
|
||||
|
||||
return &ipnstate.NetworkLockStatus{
|
||||
Enabled: true,
|
||||
Head: &head,
|
||||
PublicKey: nlPriv.Public(),
|
||||
NodeKey: nodeKey,
|
||||
NodeKeySigned: selfAuthorized,
|
||||
TrustedKeys: outKeys,
|
||||
FilteredPeers: filtered,
|
||||
StateID: stateID1,
|
||||
Enabled: true,
|
||||
Head: &head,
|
||||
PublicKey: nlPriv.Public(),
|
||||
NodeKey: nodeKey,
|
||||
NodeKeySigned: selfAuthorized,
|
||||
NodeKeySignature: nodeKeySignature,
|
||||
TrustedKeys: outKeys,
|
||||
FilteredPeers: filtered,
|
||||
StateID: stateID1,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -13,8 +13,11 @@ import (
|
||||
"net/http/httptest"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
go4mem "go4.org/mem"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"tailscale.com/control/controlclient"
|
||||
"tailscale.com/health"
|
||||
@@ -30,6 +33,7 @@ import (
|
||||
"tailscale.com/types/persist"
|
||||
"tailscale.com/types/tkatype"
|
||||
"tailscale.com/util/must"
|
||||
"tailscale.com/util/set"
|
||||
)
|
||||
|
||||
type observerFunc func(controlclient.Status)
|
||||
@@ -563,18 +567,32 @@ func TestTKAFilterNetmap(t *testing.T) {
|
||||
}
|
||||
n4Sig.Signature[3] = 42 // mess up the signature
|
||||
n4Sig.Signature[4] = 42 // mess up the signature
|
||||
n5GoodSig, err := signNodeKey(tailcfg.TKASignInfo{NodePublic: n5.Public()}, nlPriv)
|
||||
|
||||
n5nl := key.NewNLPrivate()
|
||||
n5InitialSig, err := signNodeKey(tailcfg.TKASignInfo{NodePublic: n5.Public(), RotationPubkey: n5nl.Public().Verifier()}, nlPriv)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
resign := func(nl key.NLPrivate, currentSig tkatype.MarshaledSignature) (key.NodePrivate, tkatype.MarshaledSignature) {
|
||||
nk := key.NewNode()
|
||||
sig, err := tka.ResignNKS(nl, nk.Public(), currentSig)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
return nk, sig
|
||||
}
|
||||
|
||||
n5Rotated, n5RotatedSig := resign(n5nl, n5InitialSig.Serialize())
|
||||
|
||||
nm := &netmap.NetworkMap{
|
||||
Peers: nodeViews([]*tailcfg.Node{
|
||||
{ID: 1, Key: n1.Public(), KeySignature: n1GoodSig.Serialize()},
|
||||
{ID: 2, Key: n2.Public(), KeySignature: nil}, // missing sig
|
||||
{ID: 3, Key: n3.Public(), KeySignature: n1GoodSig.Serialize()}, // someone elses sig
|
||||
{ID: 4, Key: n4.Public(), KeySignature: n4Sig.Serialize()}, // messed-up signature
|
||||
{ID: 5, Key: n5.Public(), KeySignature: n5GoodSig.Serialize()},
|
||||
{ID: 2, Key: n2.Public(), KeySignature: nil}, // missing sig
|
||||
{ID: 3, Key: n3.Public(), KeySignature: n1GoodSig.Serialize()}, // someone elses sig
|
||||
{ID: 4, Key: n4.Public(), KeySignature: n4Sig.Serialize()}, // messed-up signature
|
||||
{ID: 50, Key: n5.Public(), KeySignature: n5InitialSig.Serialize()}, // rotated
|
||||
{ID: 51, Key: n5Rotated.Public(), KeySignature: n5RotatedSig},
|
||||
}),
|
||||
}
|
||||
|
||||
@@ -586,12 +604,39 @@ func TestTKAFilterNetmap(t *testing.T) {
|
||||
|
||||
want := nodeViews([]*tailcfg.Node{
|
||||
{ID: 1, Key: n1.Public(), KeySignature: n1GoodSig.Serialize()},
|
||||
{ID: 5, Key: n5.Public(), KeySignature: n5GoodSig.Serialize()},
|
||||
{ID: 51, Key: n5Rotated.Public(), KeySignature: n5RotatedSig},
|
||||
})
|
||||
nodePubComparer := cmp.Comparer(func(x, y key.NodePublic) bool {
|
||||
return x.Raw32() == y.Raw32()
|
||||
})
|
||||
if diff := cmp.Diff(nm.Peers, want, nodePubComparer); diff != "" {
|
||||
if diff := cmp.Diff(want, nm.Peers, nodePubComparer); diff != "" {
|
||||
t.Errorf("filtered netmap differs (-want, +got):\n%s", diff)
|
||||
}
|
||||
|
||||
// Create two more node signatures using the same wrapping key as n5.
|
||||
// Since they have the same rotation chain, both will be filtered out.
|
||||
n7, n7Sig := resign(n5nl, n5RotatedSig)
|
||||
n8, n8Sig := resign(n5nl, n5RotatedSig)
|
||||
|
||||
nm = &netmap.NetworkMap{
|
||||
Peers: nodeViews([]*tailcfg.Node{
|
||||
{ID: 1, Key: n1.Public(), KeySignature: n1GoodSig.Serialize()},
|
||||
{ID: 2, Key: n2.Public(), KeySignature: nil}, // missing sig
|
||||
{ID: 3, Key: n3.Public(), KeySignature: n1GoodSig.Serialize()}, // someone elses sig
|
||||
{ID: 4, Key: n4.Public(), KeySignature: n4Sig.Serialize()}, // messed-up signature
|
||||
{ID: 50, Key: n5.Public(), KeySignature: n5InitialSig.Serialize()}, // rotated
|
||||
{ID: 51, Key: n5Rotated.Public(), KeySignature: n5RotatedSig}, // rotated
|
||||
{ID: 7, Key: n7.Public(), KeySignature: n7Sig}, // same rotation chain as n8
|
||||
{ID: 8, Key: n8.Public(), KeySignature: n8Sig}, // same rotation chain as n7
|
||||
}),
|
||||
}
|
||||
|
||||
b.tkaFilterNetmapLocked(nm)
|
||||
|
||||
want = nodeViews([]*tailcfg.Node{
|
||||
{ID: 1, Key: n1.Public(), KeySignature: n1GoodSig.Serialize()},
|
||||
})
|
||||
if diff := cmp.Diff(want, nm.Peers, nodePubComparer); diff != "" {
|
||||
t.Errorf("filtered netmap differs (-want, +got):\n%s", diff)
|
||||
}
|
||||
}
|
||||
@@ -1130,3 +1175,85 @@ func TestTKARecoverCompromisedKeyFlow(t *testing.T) {
|
||||
t.Errorf("NetworkLockSubmitRecoveryAUM() failed: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRotationTracker(t *testing.T) {
|
||||
newNK := func(idx byte) key.NodePublic {
|
||||
// single-byte public key to make it human-readable in tests.
|
||||
raw32 := [32]byte{idx}
|
||||
return key.NodePublicFromRaw32(go4mem.B(raw32[:]))
|
||||
}
|
||||
n1, n2, n3, n4, n5 := newNK(1), newNK(2), newNK(3), newNK(4), newNK(5)
|
||||
|
||||
pk1, pk2, pk3 := []byte{1}, []byte{2}, []byte{3}
|
||||
type addDetails struct {
|
||||
np key.NodePublic
|
||||
details *tka.RotationDetails
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
addDetails []addDetails
|
||||
want set.Set[key.NodePublic]
|
||||
}{
|
||||
{
|
||||
name: "empty",
|
||||
want: nil,
|
||||
},
|
||||
{
|
||||
name: "single_prev_key",
|
||||
addDetails: []addDetails{
|
||||
{np: n1, details: &tka.RotationDetails{PrevNodeKeys: []key.NodePublic{n2}, WrappingPubkey: pk1}},
|
||||
},
|
||||
want: set.SetOf([]key.NodePublic{n2}),
|
||||
},
|
||||
{
|
||||
name: "several_prev_keys",
|
||||
addDetails: []addDetails{
|
||||
{np: n1, details: &tka.RotationDetails{PrevNodeKeys: []key.NodePublic{n2}, WrappingPubkey: pk1}},
|
||||
{np: n3, details: &tka.RotationDetails{PrevNodeKeys: []key.NodePublic{n4}, WrappingPubkey: pk2}},
|
||||
{np: n2, details: &tka.RotationDetails{PrevNodeKeys: []key.NodePublic{n3, n4}, WrappingPubkey: pk1}},
|
||||
},
|
||||
want: set.SetOf([]key.NodePublic{n2, n3, n4}),
|
||||
},
|
||||
{
|
||||
name: "several_per_pubkey_latest_wins",
|
||||
addDetails: []addDetails{
|
||||
{np: n2, details: &tka.RotationDetails{PrevNodeKeys: []key.NodePublic{n1}, WrappingPubkey: pk3}},
|
||||
{np: n3, details: &tka.RotationDetails{PrevNodeKeys: []key.NodePublic{n1, n2}, WrappingPubkey: pk3}},
|
||||
{np: n4, details: &tka.RotationDetails{PrevNodeKeys: []key.NodePublic{n1, n2, n3}, WrappingPubkey: pk3}},
|
||||
{np: n5, details: &tka.RotationDetails{PrevNodeKeys: []key.NodePublic{n4}, WrappingPubkey: pk3}},
|
||||
},
|
||||
want: set.SetOf([]key.NodePublic{n1, n2, n3, n4}),
|
||||
},
|
||||
{
|
||||
name: "several_per_pubkey_same_chain_length_all_rejected",
|
||||
addDetails: []addDetails{
|
||||
{np: n2, details: &tka.RotationDetails{PrevNodeKeys: []key.NodePublic{n1}, WrappingPubkey: pk3}},
|
||||
{np: n3, details: &tka.RotationDetails{PrevNodeKeys: []key.NodePublic{n1, n2}, WrappingPubkey: pk3}},
|
||||
{np: n4, details: &tka.RotationDetails{PrevNodeKeys: []key.NodePublic{n1, n2}, WrappingPubkey: pk3}},
|
||||
{np: n5, details: &tka.RotationDetails{PrevNodeKeys: []key.NodePublic{n1, n2}, WrappingPubkey: pk3}},
|
||||
},
|
||||
want: set.SetOf([]key.NodePublic{n1, n2, n3, n4, n5}),
|
||||
},
|
||||
{
|
||||
name: "several_per_pubkey_longest_wins",
|
||||
addDetails: []addDetails{
|
||||
{np: n2, details: &tka.RotationDetails{PrevNodeKeys: []key.NodePublic{n1}, WrappingPubkey: pk3}},
|
||||
{np: n3, details: &tka.RotationDetails{PrevNodeKeys: []key.NodePublic{n1, n2}, WrappingPubkey: pk3}},
|
||||
{np: n4, details: &tka.RotationDetails{PrevNodeKeys: []key.NodePublic{n1, n2}, WrappingPubkey: pk3}},
|
||||
{np: n5, details: &tka.RotationDetails{PrevNodeKeys: []key.NodePublic{n1, n2, n3}, WrappingPubkey: pk3}},
|
||||
},
|
||||
want: set.SetOf([]key.NodePublic{n1, n2, n3, n4}),
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
r := &rotationTracker{logf: t.Logf}
|
||||
for _, ad := range tt.addDetails {
|
||||
r.addRotationDetails(ad.np, ad.details)
|
||||
}
|
||||
if got := r.obsoleteKeys(); !reflect.DeepEqual(got, tt.want) {
|
||||
t.Errorf("rotationTracker.obsoleteKeys() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -354,10 +354,6 @@ func (pm *profileManager) loadSavedPrefs(key ipn.StateKey) (ipn.PrefsView, error
|
||||
return ipn.PrefsView{}, err
|
||||
}
|
||||
savedPrefs := ipn.NewPrefs()
|
||||
// NewPrefs sets a default NoStatefulFiltering, but we want to actually see
|
||||
// if the saved state had an empty value. The empty value gets migrated
|
||||
// based on NoSNAT, while a default "false" does not.
|
||||
savedPrefs.NoStatefulFiltering = ""
|
||||
if err := ipn.PrefsFromBytes(bs, savedPrefs); err != nil {
|
||||
return ipn.PrefsView{}, fmt.Errorf("parsing saved prefs: %v", err)
|
||||
}
|
||||
@@ -382,32 +378,6 @@ func (pm *profileManager) loadSavedPrefs(key ipn.StateKey) (ipn.PrefsView, error
|
||||
savedPrefs.AutoUpdate.Apply.Clear()
|
||||
}
|
||||
|
||||
// Backfill a missing NoStatefulFiltering field based on the value of
|
||||
// the NoSNAT field; we want to apply stateful filtering in all cases
|
||||
// *except* where the user has disabled SNAT.
|
||||
//
|
||||
// Only backfill if the user hasn't set a value for
|
||||
// NoStatefulFiltering, however.
|
||||
_, haveNoStateful := savedPrefs.NoStatefulFiltering.Get()
|
||||
if !haveNoStateful {
|
||||
if savedPrefs.NoSNAT {
|
||||
pm.logf("backfilling NoStatefulFiltering field to true because NoSNAT is set")
|
||||
|
||||
// No SNAT: no stateful filtering
|
||||
savedPrefs.NoStatefulFiltering.Set(true)
|
||||
} else {
|
||||
pm.logf("backfilling NoStatefulFiltering field to false because NoSNAT is not set")
|
||||
|
||||
// SNAT (default): apply stateful filtering
|
||||
savedPrefs.NoStatefulFiltering.Set(false)
|
||||
}
|
||||
|
||||
// Write back to the preferences store now that we've updated it.
|
||||
if err := pm.writePrefsToStore(key, savedPrefs.View()); err != nil {
|
||||
return ipn.PrefsView{}, err
|
||||
}
|
||||
}
|
||||
|
||||
return savedPrefs.View(), nil
|
||||
}
|
||||
|
||||
|
||||
@@ -4,7 +4,6 @@
|
||||
package ipnlocal
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os/user"
|
||||
"strconv"
|
||||
@@ -13,14 +12,12 @@ import (
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"github.com/google/go-cmp/cmp/cmpopts"
|
||||
"tailscale.com/clientupdate"
|
||||
"tailscale.com/envknob"
|
||||
"tailscale.com/health"
|
||||
"tailscale.com/ipn"
|
||||
"tailscale.com/ipn/store/mem"
|
||||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/types/key"
|
||||
"tailscale.com/types/logger"
|
||||
"tailscale.com/types/opt"
|
||||
"tailscale.com/types/persist"
|
||||
"tailscale.com/util/must"
|
||||
)
|
||||
@@ -604,89 +601,6 @@ func TestProfileManagementWindows(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestProfileBackfillStatefulFiltering(t *testing.T) {
|
||||
envknob.Setenv("TS_DEBUG_PROFILES", "true")
|
||||
|
||||
tests := []struct {
|
||||
noSNAT bool
|
||||
noStateful opt.Bool
|
||||
want bool
|
||||
}{
|
||||
// Default: NoSNAT is false, NoStatefulFiltering is false, so
|
||||
// we want it to stay false.
|
||||
{false, "false", false},
|
||||
|
||||
// NoSNAT being set to true and NoStatefulFiltering being false
|
||||
// should result in NoStatefulFiltering still being false,
|
||||
// since it was explicitly set.
|
||||
{true, "false", false},
|
||||
|
||||
// If NoSNAT is false, and NoStatefulFiltering is unset, we
|
||||
// backfill it to 'false'.
|
||||
{false, "", false},
|
||||
|
||||
// If NoSNAT is true, and NoStatefulFiltering is unset, we
|
||||
// backfill to 'true' to not break users of NoSNAT.
|
||||
//
|
||||
// In other words: if the user is not using SNAT, they almost
|
||||
// certainly also don't want to use stateful filtering.
|
||||
{true, "", true},
|
||||
|
||||
// However, if the user specifies both NoSNAT and stateful
|
||||
// filtering, don't change that.
|
||||
{true, "true", true},
|
||||
{false, "true", true},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(fmt.Sprintf("noSNAT=%v,noStateful=%q", tt.noSNAT, tt.noStateful), func(t *testing.T) {
|
||||
prefs := ipn.NewPrefs()
|
||||
prefs.Persist = &persist.Persist{
|
||||
NodeID: tailcfg.StableNodeID("node1"),
|
||||
UserProfile: tailcfg.UserProfile{
|
||||
ID: tailcfg.UserID(1),
|
||||
LoginName: "user1@example.com",
|
||||
},
|
||||
}
|
||||
|
||||
prefs.NoSNAT = tt.noSNAT
|
||||
prefs.NoStatefulFiltering = tt.noStateful
|
||||
|
||||
// Make enough of a state store to load the prefs.
|
||||
const profileName = "profile1"
|
||||
bn := must.Get(json.Marshal(map[string]any{
|
||||
string(ipn.CurrentProfileStateKey): []byte(profileName),
|
||||
string(ipn.KnownProfilesStateKey): must.Get(json.Marshal(map[ipn.ProfileID]*ipn.LoginProfile{
|
||||
profileName: {
|
||||
ID: "profile1-id",
|
||||
Key: profileName,
|
||||
},
|
||||
})),
|
||||
profileName: prefs.ToBytes(),
|
||||
}))
|
||||
|
||||
store := new(mem.Store)
|
||||
err := store.LoadFromJSON([]byte(bn))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
ht := new(health.Tracker)
|
||||
pm, err := newProfileManagerWithGOOS(store, t.Logf, ht, "linux")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Get the current profile and verify that we backfilled our
|
||||
// StatefulFiltering boolean.
|
||||
pf := pm.CurrentPrefs()
|
||||
if !pf.NoStatefulFiltering().EqualBool(tt.want) {
|
||||
t.Fatalf("got NoStatefulFiltering=%q, want %v", pf.NoStatefulFiltering(), tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestDefaultPrefs tests that defaultPrefs is just NewPrefs with
|
||||
// LoggedOut=true (the Prefs we use before connecting to control). We shouldn't
|
||||
// be putting any defaulting there, and instead put all defaults in NewPrefs.
|
||||
|
||||
@@ -198,8 +198,8 @@ func (cc *mockControl) Shutdown() {
|
||||
// Login starts a login process. Note that in this mock, we don't automatically
|
||||
// generate notifications about the progress of the login operation. You have to
|
||||
// call send() as required by the test.
|
||||
func (cc *mockControl) Login(t *tailcfg.Oauth2Token, flags controlclient.LoginFlags) {
|
||||
cc.logf("Login token=%v flags=%v", t, flags)
|
||||
func (cc *mockControl) Login(flags controlclient.LoginFlags) {
|
||||
cc.logf("Login flags=%v", flags)
|
||||
cc.called("Login")
|
||||
newKeys := cc.populateKeys()
|
||||
|
||||
@@ -265,7 +265,7 @@ func (b *LocalBackend) nonInteractiveLoginForStateTest() {
|
||||
cc := b.cc
|
||||
b.mu.Unlock()
|
||||
|
||||
cc.Login(nil, b.loginFlags|controlclient.LoginInteractive)
|
||||
cc.Login(b.loginFlags | controlclient.LoginInteractive)
|
||||
}
|
||||
|
||||
// A very precise test of the sequence of function calls generated by
|
||||
@@ -329,7 +329,7 @@ func TestStateMachine(t *testing.T) {
|
||||
(n.Prefs != nil && n.Prefs.Valid()) ||
|
||||
n.BrowseToURL != nil ||
|
||||
n.LoginFinished != nil {
|
||||
logf("%v\n\n", n)
|
||||
logf("%+v\n\n", n)
|
||||
notifies.put(n)
|
||||
} else {
|
||||
logf("(ignored) %v\n\n", n)
|
||||
@@ -406,7 +406,7 @@ func TestStateMachine(t *testing.T) {
|
||||
// the user needs to visit a login URL.
|
||||
t.Logf("\n\nLogin (url response)")
|
||||
|
||||
notifies.expect(2)
|
||||
notifies.expect(3)
|
||||
b.EditPrefs(&ipn.MaskedPrefs{
|
||||
ControlURLSet: true,
|
||||
Prefs: ipn.Prefs{
|
||||
@@ -421,12 +421,15 @@ func TestStateMachine(t *testing.T) {
|
||||
// ...but backend eats that notification, because the user
|
||||
// didn't explicitly request interactive login yet, and
|
||||
// we're already in NeedsLogin state.
|
||||
nn := notifies.drain(2)
|
||||
nn := notifies.drain(3)
|
||||
|
||||
c.Assert(nn[1].Prefs, qt.IsNotNil)
|
||||
c.Assert(nn[1].Prefs.LoggedOut(), qt.IsTrue)
|
||||
c.Assert(nn[1].Prefs.WantRunning(), qt.IsFalse)
|
||||
c.Assert(ipn.NeedsLogin, qt.Equals, b.State())
|
||||
c.Assert(nn[2].BrowseToURL, qt.IsNotNil)
|
||||
c.Assert(url1, qt.Equals, *nn[2].BrowseToURL)
|
||||
c.Assert(ipn.NeedsLogin, qt.Equals, b.State())
|
||||
}
|
||||
|
||||
// Now we'll try an interactive login.
|
||||
@@ -434,13 +437,10 @@ func TestStateMachine(t *testing.T) {
|
||||
// ask control to do anything. Instead backend will emit an event
|
||||
// indicating that the UI should browse to the given URL.
|
||||
t.Logf("\n\nLogin (interactive)")
|
||||
notifies.expect(1)
|
||||
notifies.expect(0)
|
||||
b.StartLoginInteractive(context.Background())
|
||||
{
|
||||
nn := notifies.drain(1)
|
||||
cc.assertCalls()
|
||||
c.Assert(nn[0].BrowseToURL, qt.IsNotNil)
|
||||
c.Assert(url1, qt.Equals, *nn[0].BrowseToURL)
|
||||
c.Assert(ipn.NeedsLogin, qt.Equals, b.State())
|
||||
}
|
||||
|
||||
@@ -453,9 +453,8 @@ func TestStateMachine(t *testing.T) {
|
||||
notifies.expect(0)
|
||||
b.StartLoginInteractive(context.Background())
|
||||
{
|
||||
notifies.drain(0)
|
||||
// backend asks control for another login sequence
|
||||
cc.assertCalls("Login")
|
||||
cc.assertCalls()
|
||||
c.Assert(ipn.NeedsLogin, qt.Equals, b.State())
|
||||
}
|
||||
|
||||
|
||||
@@ -18,6 +18,7 @@ import (
|
||||
"time"
|
||||
|
||||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/tka"
|
||||
"tailscale.com/types/key"
|
||||
"tailscale.com/types/ptr"
|
||||
"tailscale.com/types/views"
|
||||
@@ -126,6 +127,9 @@ type NetworkLockStatus struct {
|
||||
// NodeKeySigned is true if our node is authorized by network-lock.
|
||||
NodeKeySigned bool
|
||||
|
||||
// NodeKeySignature is the current signature of this node's key.
|
||||
NodeKeySignature *tka.NodeKeySignature
|
||||
|
||||
// TrustedKeys describes the keys currently trusted to make changes
|
||||
// to network-lock.
|
||||
TrustedKeys []TKAKey
|
||||
|
||||
@@ -6,6 +6,7 @@ package localapi
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"cmp"
|
||||
"context"
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
@@ -1939,8 +1940,10 @@ func (h *Handler) serveDial(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
network := cmp.Or(r.Header.Get("Dial-Network"), "tcp")
|
||||
|
||||
addr := net.JoinHostPort(hostStr, portStr)
|
||||
outConn, err := h.b.Dialer().UserDial(r.Context(), "tcp", addr)
|
||||
outConn, err := h.b.Dialer().UserDial(r.Context(), network, addr)
|
||||
if err != nil {
|
||||
http.Error(w, "dial failure: "+err.Error(), http.StatusBadGateway)
|
||||
return
|
||||
|
||||
48
ipn/prefs.go
48
ipn/prefs.go
@@ -75,18 +75,6 @@ type Prefs struct {
|
||||
// controlled by ExitNodeID/IP below.
|
||||
RouteAll bool
|
||||
|
||||
// AllowSingleHosts specifies whether to install routes for each
|
||||
// node IP on the tailscale network, in addition to a route for
|
||||
// the whole network.
|
||||
// This corresponds to the "tailscale up --host-routes" value,
|
||||
// which defaults to true.
|
||||
//
|
||||
// TODO(danderson): why do we have this? It dumps a lot of stuff
|
||||
// into the routing table, and a single network route _should_ be
|
||||
// all that we need. But when I turn this off in my tailscaled,
|
||||
// packets stop flowing. What's up with that?
|
||||
AllowSingleHosts bool
|
||||
|
||||
// ExitNodeID and ExitNodeIP specify the node that should be used
|
||||
// as an exit node for internet traffic. At most one of these
|
||||
// should be non-zero.
|
||||
@@ -203,17 +191,16 @@ type Prefs struct {
|
||||
// Linux-only.
|
||||
NoSNAT bool
|
||||
|
||||
// NoStatefulFiltering specifies whether to apply stateful filtering
|
||||
// when advertising routes in AdvertiseRoutes. The default is to apply
|
||||
// NoStatefulFiltering specifies whether to apply stateful filtering when
|
||||
// advertising routes in AdvertiseRoutes. The default is to not apply
|
||||
// stateful filtering.
|
||||
//
|
||||
// To allow inbound connections from advertised routes, both NoSNAT and
|
||||
// NoStatefulFiltering must be true.
|
||||
//
|
||||
// This is an opt.Bool because it was added after NoSNAT, but is backfilled
|
||||
// based on the value of that parameter. We need to treat it as a tristate:
|
||||
// true, false, or unset, and backfill based on that value. See
|
||||
// ipn/ipnlocal for more details on the backfill.
|
||||
// This is an opt.Bool because it was first added after NoSNAT, with a
|
||||
// backfill based on the value of that parameter. The backfill has been
|
||||
// removed since then, but the field remains an opt.Bool.
|
||||
//
|
||||
// Linux-only.
|
||||
NoStatefulFiltering opt.Bool `json:",omitempty"`
|
||||
@@ -252,6 +239,16 @@ type Prefs struct {
|
||||
// by name.
|
||||
DriveShares []*drive.Share
|
||||
|
||||
// AllowSingleHosts was a legacy field that was always true
|
||||
// for the past 4.5 years. It controlled whether Tailscale
|
||||
// peers got /32 or /127 routes for each other.
|
||||
// As of 2024-05-17 we're starting to ignore it, but to let
|
||||
// people still downgrade Tailscale versions and not break
|
||||
// all peer-to-peer networking we still write it to disk (as JSON)
|
||||
// so it can be loaded back by old versions.
|
||||
// TODO(bradfitz): delete this in 2025 sometime. See #12058.
|
||||
AllowSingleHosts marshalAsTrueInJSON
|
||||
|
||||
// The Persist field is named 'Config' in the file for backward
|
||||
// compatibility with earlier versions.
|
||||
// TODO(apenwarr): We should move this out of here, it's not a pref.
|
||||
@@ -282,6 +279,13 @@ func (au1 AutoUpdatePrefs) Equals(au2 AutoUpdatePrefs) bool {
|
||||
ok1 == ok2
|
||||
}
|
||||
|
||||
type marshalAsTrueInJSON struct{}
|
||||
|
||||
var trueJSON = []byte("true")
|
||||
|
||||
func (marshalAsTrueInJSON) MarshalJSON() ([]byte, error) { return trueJSON, nil }
|
||||
func (*marshalAsTrueInJSON) UnmarshalJSON([]byte) error { return nil }
|
||||
|
||||
// AppConnectorPrefs are the app connector settings for the node agent.
|
||||
type AppConnectorPrefs struct {
|
||||
// Advertise specifies whether the app connector subsystem is advertising
|
||||
@@ -299,7 +303,6 @@ type MaskedPrefs struct {
|
||||
|
||||
ControlURLSet bool `json:",omitempty"`
|
||||
RouteAllSet bool `json:",omitempty"`
|
||||
AllowSingleHostsSet bool `json:",omitempty"`
|
||||
ExitNodeIDSet bool `json:",omitempty"`
|
||||
ExitNodeIPSet bool `json:",omitempty"`
|
||||
InternalExitNodePriorSet bool `json:",omitempty"` // Internal; can't be set by LocalAPI clients
|
||||
@@ -484,9 +487,6 @@ func (p *Prefs) pretty(goos string) string {
|
||||
var sb strings.Builder
|
||||
sb.WriteString("Prefs{")
|
||||
fmt.Fprintf(&sb, "ra=%v ", p.RouteAll)
|
||||
if !p.AllowSingleHosts {
|
||||
sb.WriteString("mesh=false ")
|
||||
}
|
||||
fmt.Fprintf(&sb, "dns=%v want=%v ", p.CorpDNS, p.WantRunning)
|
||||
if p.RunSSH {
|
||||
sb.WriteString("ssh=true ")
|
||||
@@ -579,7 +579,6 @@ func (p *Prefs) Equals(p2 *Prefs) bool {
|
||||
|
||||
return p.ControlURL == p2.ControlURL &&
|
||||
p.RouteAll == p2.RouteAll &&
|
||||
p.AllowSingleHosts == p2.AllowSingleHosts &&
|
||||
p.ExitNodeID == p2.ExitNodeID &&
|
||||
p.ExitNodeIP == p2.ExitNodeIP &&
|
||||
p.InternalExitNodePrior == p2.InternalExitNodePrior &&
|
||||
@@ -663,11 +662,10 @@ func NewPrefs() *Prefs {
|
||||
ControlURL: "",
|
||||
|
||||
RouteAll: true,
|
||||
AllowSingleHosts: true,
|
||||
CorpDNS: true,
|
||||
WantRunning: false,
|
||||
NetfilterMode: preftype.NetfilterOn,
|
||||
NoStatefulFiltering: opt.NewBool(false),
|
||||
NoStatefulFiltering: opt.NewBool(true),
|
||||
AutoUpdate: AutoUpdatePrefs{
|
||||
Check: true,
|
||||
Apply: opt.Bool("unset"),
|
||||
|
||||
@@ -38,7 +38,6 @@ func TestPrefsEqual(t *testing.T) {
|
||||
prefsHandles := []string{
|
||||
"ControlURL",
|
||||
"RouteAll",
|
||||
"AllowSingleHosts",
|
||||
"ExitNodeID",
|
||||
"ExitNodeIP",
|
||||
"InternalExitNodePrior",
|
||||
@@ -65,6 +64,7 @@ func TestPrefsEqual(t *testing.T) {
|
||||
"PostureChecking",
|
||||
"NetfilterKind",
|
||||
"DriveShares",
|
||||
"AllowSingleHosts",
|
||||
"Persist",
|
||||
}
|
||||
if have := fieldsOf(reflect.TypeFor[Prefs]()); !reflect.DeepEqual(have, prefsHandles) {
|
||||
@@ -123,18 +123,6 @@ func TestPrefsEqual(t *testing.T) {
|
||||
&Prefs{RouteAll: true},
|
||||
true,
|
||||
},
|
||||
|
||||
{
|
||||
&Prefs{AllowSingleHosts: true},
|
||||
&Prefs{AllowSingleHosts: false},
|
||||
false,
|
||||
},
|
||||
{
|
||||
&Prefs{AllowSingleHosts: true},
|
||||
&Prefs{AllowSingleHosts: true},
|
||||
true,
|
||||
},
|
||||
|
||||
{
|
||||
&Prefs{ExitNodeID: "n1234"},
|
||||
&Prefs{},
|
||||
@@ -376,7 +364,7 @@ func checkPrefs(t *testing.T, p Prefs) {
|
||||
p2b = new(Prefs)
|
||||
err = PrefsFromBytes(p2.ToBytes(), p2b)
|
||||
if err != nil {
|
||||
t.Fatalf("PrefsFromBytes(p2) failed\n")
|
||||
t.Fatalf("PrefsFromBytes(p2) failed: bytes=%q; err=%v\n", p2.ToBytes(), err)
|
||||
}
|
||||
p2p := p2.Pretty()
|
||||
p2bp := p2b.Pretty()
|
||||
@@ -427,46 +415,43 @@ func TestPrefsPretty(t *testing.T) {
|
||||
{
|
||||
Prefs{},
|
||||
"linux",
|
||||
"Prefs{ra=false mesh=false dns=false want=false routes=[] nf=off update=off Persist=nil}",
|
||||
"Prefs{ra=false dns=false want=false routes=[] nf=off update=off Persist=nil}",
|
||||
},
|
||||
{
|
||||
Prefs{},
|
||||
"windows",
|
||||
"Prefs{ra=false mesh=false dns=false want=false update=off Persist=nil}",
|
||||
"Prefs{ra=false dns=false want=false update=off Persist=nil}",
|
||||
},
|
||||
{
|
||||
Prefs{ShieldsUp: true},
|
||||
"windows",
|
||||
"Prefs{ra=false mesh=false dns=false want=false shields=true update=off Persist=nil}",
|
||||
"Prefs{ra=false dns=false want=false shields=true update=off Persist=nil}",
|
||||
},
|
||||
{
|
||||
Prefs{AllowSingleHosts: true},
|
||||
Prefs{},
|
||||
"windows",
|
||||
"Prefs{ra=false dns=false want=false update=off Persist=nil}",
|
||||
},
|
||||
{
|
||||
Prefs{
|
||||
NotepadURLs: true,
|
||||
AllowSingleHosts: true,
|
||||
NotepadURLs: true,
|
||||
},
|
||||
"windows",
|
||||
"Prefs{ra=false dns=false want=false notepad=true update=off Persist=nil}",
|
||||
},
|
||||
{
|
||||
Prefs{
|
||||
AllowSingleHosts: true,
|
||||
WantRunning: true,
|
||||
ForceDaemon: true, // server mode
|
||||
WantRunning: true,
|
||||
ForceDaemon: true, // server mode
|
||||
},
|
||||
"windows",
|
||||
"Prefs{ra=false dns=false want=true server=true update=off Persist=nil}",
|
||||
},
|
||||
{
|
||||
Prefs{
|
||||
AllowSingleHosts: true,
|
||||
WantRunning: true,
|
||||
ControlURL: "http://localhost:1234",
|
||||
AdvertiseTags: []string{"tag:foo", "tag:bar"},
|
||||
WantRunning: true,
|
||||
ControlURL: "http://localhost:1234",
|
||||
AdvertiseTags: []string{"tag:foo", "tag:bar"},
|
||||
},
|
||||
"darwin",
|
||||
`Prefs{ra=false dns=false want=true tags=tag:foo,tag:bar url="http://localhost:1234" update=off Persist=nil}`,
|
||||
@@ -476,7 +461,7 @@ func TestPrefsPretty(t *testing.T) {
|
||||
Persist: &persist.Persist{},
|
||||
},
|
||||
"linux",
|
||||
`Prefs{ra=false mesh=false dns=false want=false routes=[] nf=off update=off Persist{lm=, o=, n= u=""}}`,
|
||||
`Prefs{ra=false dns=false want=false routes=[] nf=off update=off Persist{lm=, o=, n= u=""}}`,
|
||||
},
|
||||
{
|
||||
Prefs{
|
||||
@@ -485,21 +470,21 @@ func TestPrefsPretty(t *testing.T) {
|
||||
},
|
||||
},
|
||||
"linux",
|
||||
`Prefs{ra=false mesh=false dns=false want=false routes=[] nf=off update=off Persist{lm=, o=, n=[B1VKl] u=""}}`,
|
||||
`Prefs{ra=false dns=false want=false routes=[] nf=off update=off Persist{lm=, o=, n=[B1VKl] u=""}}`,
|
||||
},
|
||||
{
|
||||
Prefs{
|
||||
ExitNodeIP: netip.MustParseAddr("1.2.3.4"),
|
||||
},
|
||||
"linux",
|
||||
`Prefs{ra=false mesh=false dns=false want=false exit=1.2.3.4 lan=false routes=[] nf=off update=off Persist=nil}`,
|
||||
`Prefs{ra=false dns=false want=false exit=1.2.3.4 lan=false routes=[] nf=off update=off Persist=nil}`,
|
||||
},
|
||||
{
|
||||
Prefs{
|
||||
ExitNodeID: tailcfg.StableNodeID("myNodeABC"),
|
||||
},
|
||||
"linux",
|
||||
`Prefs{ra=false mesh=false dns=false want=false exit=myNodeABC lan=false routes=[] nf=off update=off Persist=nil}`,
|
||||
`Prefs{ra=false dns=false want=false exit=myNodeABC lan=false routes=[] nf=off update=off Persist=nil}`,
|
||||
},
|
||||
{
|
||||
Prefs{
|
||||
@@ -507,21 +492,21 @@ func TestPrefsPretty(t *testing.T) {
|
||||
ExitNodeAllowLANAccess: true,
|
||||
},
|
||||
"linux",
|
||||
`Prefs{ra=false mesh=false dns=false want=false exit=myNodeABC lan=true routes=[] nf=off update=off Persist=nil}`,
|
||||
`Prefs{ra=false dns=false want=false exit=myNodeABC lan=true routes=[] nf=off update=off Persist=nil}`,
|
||||
},
|
||||
{
|
||||
Prefs{
|
||||
ExitNodeAllowLANAccess: true,
|
||||
},
|
||||
"linux",
|
||||
`Prefs{ra=false mesh=false dns=false want=false routes=[] nf=off update=off Persist=nil}`,
|
||||
`Prefs{ra=false dns=false want=false routes=[] nf=off update=off Persist=nil}`,
|
||||
},
|
||||
{
|
||||
Prefs{
|
||||
Hostname: "foo",
|
||||
},
|
||||
"linux",
|
||||
`Prefs{ra=false mesh=false dns=false want=false routes=[] nf=off host="foo" update=off Persist=nil}`,
|
||||
`Prefs{ra=false dns=false want=false routes=[] nf=off host="foo" update=off Persist=nil}`,
|
||||
},
|
||||
{
|
||||
Prefs{
|
||||
@@ -531,7 +516,7 @@ func TestPrefsPretty(t *testing.T) {
|
||||
},
|
||||
},
|
||||
"linux",
|
||||
`Prefs{ra=false mesh=false dns=false want=false routes=[] nf=off update=check Persist=nil}`,
|
||||
`Prefs{ra=false dns=false want=false routes=[] nf=off update=check Persist=nil}`,
|
||||
},
|
||||
{
|
||||
Prefs{
|
||||
@@ -541,7 +526,7 @@ func TestPrefsPretty(t *testing.T) {
|
||||
},
|
||||
},
|
||||
"linux",
|
||||
`Prefs{ra=false mesh=false dns=false want=false routes=[] nf=off update=on Persist=nil}`,
|
||||
`Prefs{ra=false dns=false want=false routes=[] nf=off update=on Persist=nil}`,
|
||||
},
|
||||
{
|
||||
Prefs{
|
||||
@@ -550,7 +535,7 @@ func TestPrefsPretty(t *testing.T) {
|
||||
},
|
||||
},
|
||||
"linux",
|
||||
`Prefs{ra=false mesh=false dns=false want=false routes=[] nf=off update=off appconnector=advertise Persist=nil}`,
|
||||
`Prefs{ra=false dns=false want=false routes=[] nf=off update=off appconnector=advertise Persist=nil}`,
|
||||
},
|
||||
{
|
||||
Prefs{
|
||||
@@ -559,21 +544,21 @@ func TestPrefsPretty(t *testing.T) {
|
||||
},
|
||||
},
|
||||
"linux",
|
||||
`Prefs{ra=false mesh=false dns=false want=false routes=[] nf=off update=off Persist=nil}`,
|
||||
`Prefs{ra=false dns=false want=false routes=[] nf=off update=off Persist=nil}`,
|
||||
},
|
||||
{
|
||||
Prefs{
|
||||
NetfilterKind: "iptables",
|
||||
},
|
||||
"linux",
|
||||
`Prefs{ra=false mesh=false dns=false want=false routes=[] nf=off netfilterKind=iptables update=off Persist=nil}`,
|
||||
`Prefs{ra=false dns=false want=false routes=[] nf=off netfilterKind=iptables update=off Persist=nil}`,
|
||||
},
|
||||
{
|
||||
Prefs{
|
||||
NetfilterKind: "",
|
||||
},
|
||||
"linux",
|
||||
`Prefs{ra=false mesh=false dns=false want=false routes=[] nf=off update=off Persist=nil}`,
|
||||
`Prefs{ra=false dns=false want=false routes=[] nf=off update=off Persist=nil}`,
|
||||
},
|
||||
}
|
||||
for i, tt := range tests {
|
||||
@@ -633,8 +618,9 @@ func TestMaskedPrefsSetsInternal(t *testing.T) {
|
||||
func TestMaskedPrefsFields(t *testing.T) {
|
||||
have := map[string]bool{}
|
||||
for _, f := range fieldsOf(reflect.TypeFor[Prefs]()) {
|
||||
if f == "Persist" {
|
||||
// This one can't be edited.
|
||||
switch f {
|
||||
case "Persist", "AllowSingleHosts":
|
||||
// These can't be edited.
|
||||
continue
|
||||
}
|
||||
have[f] = true
|
||||
@@ -753,13 +739,12 @@ func TestMaskedPrefsPretty(t *testing.T) {
|
||||
{
|
||||
m: &MaskedPrefs{
|
||||
Prefs: Prefs{
|
||||
Hostname: "bar",
|
||||
OperatorUser: "galaxybrain",
|
||||
AllowSingleHosts: true,
|
||||
RouteAll: false,
|
||||
ExitNodeID: "foo",
|
||||
AdvertiseTags: []string{"tag:foo", "tag:bar"},
|
||||
NetfilterMode: preftype.NetfilterNoDivert,
|
||||
Hostname: "bar",
|
||||
OperatorUser: "galaxybrain",
|
||||
RouteAll: false,
|
||||
ExitNodeID: "foo",
|
||||
AdvertiseTags: []string{"tag:foo", "tag:bar"},
|
||||
NetfilterMode: preftype.NetfilterNoDivert,
|
||||
},
|
||||
RouteAllSet: true,
|
||||
HostnameSet: true,
|
||||
@@ -1064,3 +1049,24 @@ func TestNotifyPrefsJSONRoundtrip(t *testing.T) {
|
||||
t.Fatal("Prefs should not be valid after deserialization")
|
||||
}
|
||||
}
|
||||
|
||||
// Verify that our Prefs type writes out an AllowSingleHosts field so we can
|
||||
// downgrade to older versions that require it.
|
||||
func TestPrefsDowngrade(t *testing.T) {
|
||||
var p Prefs
|
||||
j, err := json.Marshal(p)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
type oldPrefs struct {
|
||||
AllowSingleHosts bool
|
||||
}
|
||||
var op oldPrefs
|
||||
if err := json.Unmarshal(j, &op); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !op.AllowSingleHosts {
|
||||
t.Fatal("AllowSingleHosts should be true")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -626,7 +626,7 @@ func (v ServeConfigView) HasAllowFunnel() bool {
|
||||
}()
|
||||
}
|
||||
|
||||
// FindFunnel reports whether target exists in in either the background AllowFunnel
|
||||
// FindFunnel reports whether target exists in either the background AllowFunnel
|
||||
// or any of the foreground configs.
|
||||
func (v ServeConfigView) HasFunnelForTarget(target HostPort) bool {
|
||||
if v.AllowFunnel().Get(target) {
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
@@ -30,6 +31,10 @@ func New(_ logger.Logf, secretName string) (*Store, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if os.Getenv("TS_KUBERNETES_READ_API_SERVER_ADDRESS_FROM_ENV") == "true" {
|
||||
// Derive the API server address from the environment variables
|
||||
c.SetURL(fmt.Sprintf("https://%s:%s", os.Getenv("KUBERNETES_SERVICE_HOST"), os.Getenv("KUBERNETES_SERVICE_PORT_HTTPS")))
|
||||
}
|
||||
canPatch, _, err := c.CheckSecretPermissions(context.Background(), secretName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
||||
@@ -1,23 +0,0 @@
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
//go:build !plan9
|
||||
|
||||
package kube
|
||||
|
||||
const (
|
||||
Alpha1Version = "v1alpha1"
|
||||
|
||||
DNSRecordsCMName = "dnsrecords"
|
||||
DNSRecordsCMKey = "records.json"
|
||||
)
|
||||
|
||||
type Records struct {
|
||||
// Version is the version of this Records configuration. Version is
|
||||
// written by the operator, i.e when it first populates the Records.
|
||||
// k8s-nameserver must verify that it knows how to parse a given
|
||||
// version.
|
||||
Version string `json:"version"`
|
||||
// IP4 contains a mapping of DNS names to IPv4 address(es).
|
||||
IP4 map[string][]string `json:"ip4"`
|
||||
}
|
||||
49
k8s-operator/utils.go
Normal file
49
k8s-operator/utils.go
Normal file
@@ -0,0 +1,49 @@
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
//go:build !plan9
|
||||
|
||||
package kube
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"tailscale.com/tailcfg"
|
||||
)
|
||||
|
||||
const (
|
||||
Alpha1Version = "v1alpha1"
|
||||
|
||||
DNSRecordsCMName = "dnsrecords"
|
||||
DNSRecordsCMKey = "records.json"
|
||||
)
|
||||
|
||||
type Records struct {
|
||||
// Version is the version of this Records configuration. Version is
|
||||
// written by the operator, i.e when it first populates the Records.
|
||||
// k8s-nameserver must verify that it knows how to parse a given
|
||||
// version.
|
||||
Version string `json:"version"`
|
||||
// IP4 contains a mapping of DNS names to IPv4 address(es).
|
||||
IP4 map[string][]string `json:"ip4"`
|
||||
}
|
||||
|
||||
// TailscaledConfigFileNameForCap returns a tailscaled config file name in
|
||||
// format expected by containerboot for the given CapVer.
|
||||
func TailscaledConfigFileNameForCap(cap tailcfg.CapabilityVersion) string {
|
||||
if cap < 95 {
|
||||
return "tailscaled"
|
||||
}
|
||||
return fmt.Sprintf("cap-%v.hujson", cap)
|
||||
}
|
||||
|
||||
// CapVerFromFileName parses the capability version from a tailscaled
|
||||
// config file name previously generated by TailscaledConfigFileNameForCap.
|
||||
func CapVerFromFileName(name string) (tailcfg.CapabilityVersion, error) {
|
||||
if name == "tailscaled" {
|
||||
return 0, nil
|
||||
}
|
||||
var cap tailcfg.CapabilityVersion
|
||||
_, err := fmt.Sscanf(name, "cap-%d.hujson", &cap)
|
||||
return cap, err
|
||||
}
|
||||
@@ -35,7 +35,7 @@ Client][]. See also the dependencies in the [Tailscale CLI][].
|
||||
- [github.com/golang/groupcache/lru](https://pkg.go.dev/github.com/golang/groupcache/lru) ([Apache-2.0](https://github.com/golang/groupcache/blob/41bb18bfe9da/LICENSE))
|
||||
- [github.com/google/btree](https://pkg.go.dev/github.com/google/btree) ([Apache-2.0](https://github.com/google/btree/blob/v1.1.2/LICENSE))
|
||||
- [github.com/google/nftables](https://pkg.go.dev/github.com/google/nftables) ([Apache-2.0](https://github.com/google/nftables/blob/5e242ec57806/LICENSE))
|
||||
- [github.com/google/uuid](https://pkg.go.dev/github.com/google/uuid) ([BSD-3-Clause](https://github.com/google/uuid/blob/v1.5.0/LICENSE))
|
||||
- [github.com/google/uuid](https://pkg.go.dev/github.com/google/uuid) ([BSD-3-Clause](https://github.com/google/uuid/blob/v1.6.0/LICENSE))
|
||||
- [github.com/hdevalence/ed25519consensus](https://pkg.go.dev/github.com/hdevalence/ed25519consensus) ([BSD-3-Clause](https://github.com/hdevalence/ed25519consensus/blob/v0.2.0/LICENSE))
|
||||
- [github.com/illarion/gonotify](https://pkg.go.dev/github.com/illarion/gonotify) ([MIT](https://github.com/illarion/gonotify/blob/v1.0.1/LICENSE))
|
||||
- [github.com/insomniacslk/dhcp](https://pkg.go.dev/github.com/insomniacslk/dhcp) ([BSD-3-Clause](https://github.com/insomniacslk/dhcp/blob/8c70d406f6d2/LICENSE))
|
||||
|
||||
@@ -58,6 +58,7 @@ See also the dependencies in the [Tailscale CLI][].
|
||||
- [github.com/miekg/dns](https://pkg.go.dev/github.com/miekg/dns) ([BSD-3-Clause](https://github.com/miekg/dns/blob/v1.1.58/LICENSE))
|
||||
- [github.com/mitchellh/go-ps](https://pkg.go.dev/github.com/mitchellh/go-ps) ([MIT](https://github.com/mitchellh/go-ps/blob/v1.0.0/LICENSE.md))
|
||||
- [github.com/pierrec/lz4/v4](https://pkg.go.dev/github.com/pierrec/lz4/v4) ([BSD-3-Clause](https://github.com/pierrec/lz4/blob/v4.1.21/LICENSE))
|
||||
- [github.com/prometheus-community/pro-bing](https://pkg.go.dev/github.com/prometheus-community/pro-bing) ([MIT](https://github.com/prometheus-community/pro-bing/blob/v0.4.0/LICENSE))
|
||||
- [github.com/safchain/ethtool](https://pkg.go.dev/github.com/safchain/ethtool) ([Apache-2.0](https://github.com/safchain/ethtool/blob/v0.3.0/LICENSE))
|
||||
- [github.com/tailscale/golang-x-crypto](https://pkg.go.dev/github.com/tailscale/golang-x-crypto) ([BSD-3-Clause](https://github.com/tailscale/golang-x-crypto/blob/7ce1f622c780/LICENSE))
|
||||
- [github.com/tailscale/goupnp](https://pkg.go.dev/github.com/tailscale/goupnp) ([BSD-2-Clause](https://github.com/tailscale/goupnp/blob/c64d0f06ea05/LICENSE))
|
||||
|
||||
@@ -47,7 +47,7 @@ Some packages may only be included on certain architectures or operating systems
|
||||
- [github.com/golang/groupcache/lru](https://pkg.go.dev/github.com/golang/groupcache/lru) ([Apache-2.0](https://github.com/golang/groupcache/blob/41bb18bfe9da/LICENSE))
|
||||
- [github.com/google/btree](https://pkg.go.dev/github.com/google/btree) ([Apache-2.0](https://github.com/google/btree/blob/v1.1.2/LICENSE))
|
||||
- [github.com/google/nftables](https://pkg.go.dev/github.com/google/nftables) ([Apache-2.0](https://github.com/google/nftables/blob/5e242ec57806/LICENSE))
|
||||
- [github.com/google/uuid](https://pkg.go.dev/github.com/google/uuid) ([BSD-3-Clause](https://github.com/google/uuid/blob/v1.5.0/LICENSE))
|
||||
- [github.com/google/uuid](https://pkg.go.dev/github.com/google/uuid) ([BSD-3-Clause](https://github.com/google/uuid/blob/v1.6.0/LICENSE))
|
||||
- [github.com/gorilla/csrf](https://pkg.go.dev/github.com/gorilla/csrf) ([BSD-3-Clause](https://github.com/gorilla/csrf/blob/v1.7.2/LICENSE))
|
||||
- [github.com/gorilla/securecookie](https://pkg.go.dev/github.com/gorilla/securecookie) ([BSD-3-Clause](https://github.com/gorilla/securecookie/blob/v1.1.2/LICENSE))
|
||||
- [github.com/hdevalence/ed25519consensus](https://pkg.go.dev/github.com/hdevalence/ed25519consensus) ([BSD-3-Clause](https://github.com/hdevalence/ed25519consensus/blob/v0.2.0/LICENSE))
|
||||
@@ -73,6 +73,7 @@ Some packages may only be included on certain architectures or operating systems
|
||||
- [github.com/peterbourgon/ff/v3](https://pkg.go.dev/github.com/peterbourgon/ff/v3) ([Apache-2.0](https://github.com/peterbourgon/ff/blob/v3.4.0/LICENSE))
|
||||
- [github.com/pierrec/lz4/v4](https://pkg.go.dev/github.com/pierrec/lz4/v4) ([BSD-3-Clause](https://github.com/pierrec/lz4/blob/v4.1.21/LICENSE))
|
||||
- [github.com/pkg/sftp](https://pkg.go.dev/github.com/pkg/sftp) ([BSD-2-Clause](https://github.com/pkg/sftp/blob/v1.13.6/LICENSE))
|
||||
- [github.com/prometheus-community/pro-bing](https://pkg.go.dev/github.com/prometheus-community/pro-bing) ([MIT](https://github.com/prometheus-community/pro-bing/blob/v0.4.0/LICENSE))
|
||||
- [github.com/safchain/ethtool](https://pkg.go.dev/github.com/safchain/ethtool) ([Apache-2.0](https://github.com/safchain/ethtool/blob/v0.3.0/LICENSE))
|
||||
- [github.com/skip2/go-qrcode](https://pkg.go.dev/github.com/skip2/go-qrcode) ([MIT](https://github.com/skip2/go-qrcode/blob/da1b6568686e/LICENSE))
|
||||
- [github.com/tailscale/certstore](https://pkg.go.dev/github.com/tailscale/certstore) ([MIT](https://github.com/tailscale/certstore/blob/d3fa0460f47e/LICENSE.md))
|
||||
|
||||
@@ -44,9 +44,8 @@ func NewBackoff(name string, logf logger.Logf, maxBackoff time.Duration) *Backof
|
||||
}
|
||||
}
|
||||
|
||||
// Backoff sleeps an increasing amount of time if err is non-nil.
|
||||
// and the context is not a
|
||||
// It resets the backoff schedule once err is nil.
|
||||
// BackOff sleeps an increasing amount of time if err is non-nil while the
|
||||
// context is active. It resets the backoff schedule once err is nil.
|
||||
func (b *Backoff) BackOff(ctx context.Context, err error) {
|
||||
if err == nil {
|
||||
// No error. Reset number of consecutive failures.
|
||||
|
||||
@@ -262,6 +262,18 @@ func (m *Manager) compileConfig(cfg Config) (rcfg resolver.Config, ocfg OSConfig
|
||||
// config is empty, then we need to fallback to SplitDNS mode.
|
||||
ocfg.MatchDomains = cfg.matchDomains()
|
||||
} else {
|
||||
// On iOS only (for now), check if all route names point to resources inside the tailnet.
|
||||
// If so, we can set those names as MatchDomains to enable a split DNS configuration
|
||||
// which will help preserve battery life.
|
||||
// Because on iOS MatchDomains must equal SearchDomains, we cannot do this when
|
||||
// we have any Routes outside the tailnet. Otherwise when app connectors are enabled,
|
||||
// a query for 'work-laptop' might lead to search domain expansion, resolving
|
||||
// as 'work-laptop.aws.com' for example.
|
||||
if runtime.GOOS == "ios" && rcfg.RoutesRequireNoCustomResolvers() {
|
||||
for r := range rcfg.Routes {
|
||||
ocfg.MatchDomains = append(ocfg.MatchDomains, r)
|
||||
}
|
||||
}
|
||||
var defaultRoutes []*dnstype.Resolver
|
||||
for _, ip := range baseCfg.Nameservers {
|
||||
defaultRoutes = append(defaultRoutes, &dnstype.Resolver{Addr: ip.String()})
|
||||
|
||||
@@ -125,8 +125,8 @@ func DoHIPsOfBase(dohBase string) []netip.Addr {
|
||||
return []netip.Addr{
|
||||
controlDv4One,
|
||||
controlDv4Two,
|
||||
controlDv6Gen(nextDNSv6RangeA.Addr(), pathStr),
|
||||
controlDv6Gen(nextDNSv6RangeB.Addr(), pathStr),
|
||||
controlDv6Gen(controlDv6RangeA.Addr(), pathStr),
|
||||
controlDv6Gen(controlDv6RangeB.Addr(), pathStr),
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
||||
@@ -121,8 +121,8 @@ func TestDoHIPsOfBase(t *testing.T) {
|
||||
want: ips(
|
||||
"76.76.2.22",
|
||||
"76.76.10.22",
|
||||
"2a07:a8c0:0:6:7b5b:5949:35ad:0",
|
||||
"2a07:a8c1:0:6:7b5b:5949:35ad:0",
|
||||
"2606:1a40:0:6:7b5b:5949:35ad:0",
|
||||
"2606:1a40:1:6:7b5b:5949:35ad:0",
|
||||
),
|
||||
},
|
||||
{
|
||||
@@ -130,8 +130,8 @@ func TestDoHIPsOfBase(t *testing.T) {
|
||||
want: ips(
|
||||
"76.76.2.22",
|
||||
"76.76.10.22",
|
||||
"2a07:a8c0:0:ffff:ffff:ffff:ffff:0",
|
||||
"2a07:a8c1:0:ffff:ffff:ffff:ffff:0",
|
||||
"2606:1a40:0:ffff:ffff:ffff:ffff:0",
|
||||
"2606:1a40:1:ffff:ffff:ffff:ffff:0",
|
||||
),
|
||||
},
|
||||
}
|
||||
|
||||
@@ -175,6 +175,25 @@ func WriteRoutes(w *bufio.Writer, routes map[dnsname.FQDN][]*dnstype.Resolver) {
|
||||
}
|
||||
}
|
||||
|
||||
// RoutesRequireNoCustomResolvers returns true if this resolver.Config only contains routes
|
||||
// that do not specify a set of custom resolver(s), i.e. they can be resolved by the local
|
||||
// upstream DNS resolver.
|
||||
func (c *Config) RoutesRequireNoCustomResolvers() bool {
|
||||
for route, resolvers := range c.Routes {
|
||||
if route.WithoutTrailingDot() == "ts.net" {
|
||||
// Ignore the "ts.net" route here. It always specifies the corp resolvers but
|
||||
// its presence is not an issue, as ts.net will be a search domain.
|
||||
continue
|
||||
}
|
||||
if len(resolvers) != 0 {
|
||||
// Found a route with custom resolvers.
|
||||
return false
|
||||
}
|
||||
}
|
||||
// No routes other than ts.net have specified one or more resolvers.
|
||||
return true
|
||||
}
|
||||
|
||||
// Resolver is a DNS resolver for nodes on the Tailscale network,
|
||||
// associating them with domain names of the form <mynode>.<mydomain>.<root>.
|
||||
// If it is asked to resolve a domain that is not of that form,
|
||||
|
||||
@@ -243,6 +243,43 @@ func mustIP(str string) netip.Addr {
|
||||
return ip
|
||||
}
|
||||
|
||||
func TestRoutesRequireNoCustomResolvers(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
config Config
|
||||
expected bool
|
||||
}{
|
||||
{"noRoutes", Config{Routes: map[dnsname.FQDN][]*dnstype.Resolver{}}, true},
|
||||
{"onlyDefault", Config{Routes: map[dnsname.FQDN][]*dnstype.Resolver{
|
||||
"ts.net.": {
|
||||
{},
|
||||
},
|
||||
}}, true},
|
||||
{"oneOther", Config{Routes: map[dnsname.FQDN][]*dnstype.Resolver{
|
||||
"example.com.": {
|
||||
{},
|
||||
},
|
||||
}}, false},
|
||||
{"defaultAndOneOther", Config{Routes: map[dnsname.FQDN][]*dnstype.Resolver{
|
||||
"ts.net.": {
|
||||
{},
|
||||
},
|
||||
"example.com.": {
|
||||
{},
|
||||
},
|
||||
}}, false},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
result := tt.config.RoutesRequireNoCustomResolvers()
|
||||
if result != tt.expected {
|
||||
t.Errorf("result = %v; want %v", result, tt.expected)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestRDNSNameToIPv4(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
|
||||
@@ -13,6 +13,7 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"maps"
|
||||
"math/rand"
|
||||
"net"
|
||||
"net/http"
|
||||
@@ -63,9 +64,6 @@ const (
|
||||
// icmpProbeTimeout is the maximum amount of time netcheck will spend
|
||||
// probing with ICMP packets.
|
||||
icmpProbeTimeout = 1 * time.Second
|
||||
// hairpinCheckTimeout is the amount of time we wait for a
|
||||
// hairpinned packet to come back.
|
||||
hairpinCheckTimeout = 100 * time.Millisecond
|
||||
// defaultActiveRetransmitTime is the retransmit interval we use
|
||||
// for STUN probes when we're in steady state (not in start-up),
|
||||
// but don't have previous latency information for a DERP
|
||||
@@ -95,11 +93,6 @@ type Report struct {
|
||||
// STUN server you're talking to (on IPv4).
|
||||
MappingVariesByDestIP opt.Bool
|
||||
|
||||
// HairPinning is whether the router supports communicating
|
||||
// between two local devices through the NATted public IP address
|
||||
// (on IPv4).
|
||||
HairPinning opt.Bool
|
||||
|
||||
// UPnP is whether UPnP appears present on the LAN.
|
||||
// Empty means not checked.
|
||||
UPnP opt.Bool
|
||||
@@ -115,8 +108,11 @@ type Report struct {
|
||||
RegionV4Latency map[int]time.Duration // keyed by DERP Region ID
|
||||
RegionV6Latency map[int]time.Duration // keyed by DERP Region ID
|
||||
|
||||
GlobalV4 string // ip:port of global IPv4
|
||||
GlobalV6 string // [ip]:port of global IPv6
|
||||
GlobalV4Counters map[netip.AddrPort]int // number of times the endpoint was observed
|
||||
GlobalV6Counters map[netip.AddrPort]int // number of times the endpoint was observed
|
||||
|
||||
GlobalV4 netip.AddrPort
|
||||
GlobalV6 netip.AddrPort
|
||||
|
||||
// CaptivePortal is set when we think there's a captive portal that is
|
||||
// intercepting HTTP traffic.
|
||||
@@ -125,6 +121,43 @@ type Report struct {
|
||||
// TODO: update Clone when adding new fields
|
||||
}
|
||||
|
||||
// GetGlobalAddrs returns the v4 and v6 global addresses observed during the
|
||||
// netcheck, which includes the best latency endpoint first, followed by any
|
||||
// other endpoints that were observed repeatedly. It excludes singular endpoints
|
||||
// that are likely only the result of a hard NAT.
|
||||
func (r *Report) GetGlobalAddrs() (v4, v6 []netip.AddrPort) {
|
||||
// Always add the best latency entries first.
|
||||
if r.GlobalV4.IsValid() {
|
||||
v4 = append(v4, r.GlobalV4)
|
||||
}
|
||||
if r.GlobalV6.IsValid() {
|
||||
v6 = append(v6, r.GlobalV6)
|
||||
}
|
||||
// Add any other entries for which we have multiple observations.
|
||||
// This covers a case of bad NATs that start to provide new mappings for new
|
||||
// STUN sessions mid-expiration, even while a live mapping for the best
|
||||
// latency endpoint still exists. This has been observed on some Palo Alto
|
||||
// Networks firewalls, wherein new traffic to the old endpoint will not
|
||||
// succeed, but new traffic to the newly discovered endpoints does succeed.
|
||||
for ipp, count := range r.GlobalV4Counters {
|
||||
if ipp == r.GlobalV4 {
|
||||
continue
|
||||
}
|
||||
if count > 1 {
|
||||
v4 = append(v4, ipp)
|
||||
}
|
||||
}
|
||||
for ipp, count := range r.GlobalV6Counters {
|
||||
if ipp == r.GlobalV6 {
|
||||
continue
|
||||
}
|
||||
if count > 1 {
|
||||
v6 = append(v6, ipp)
|
||||
}
|
||||
}
|
||||
return v4, v6
|
||||
}
|
||||
|
||||
// AnyPortMappingChecked reports whether any of UPnP, PMP, or PCP are non-empty.
|
||||
func (r *Report) AnyPortMappingChecked() bool {
|
||||
return r.UPnP != "" || r.PMP != "" || r.PCP != ""
|
||||
@@ -138,6 +171,8 @@ func (r *Report) Clone() *Report {
|
||||
r2.RegionLatency = cloneDurationMap(r2.RegionLatency)
|
||||
r2.RegionV4Latency = cloneDurationMap(r2.RegionV4Latency)
|
||||
r2.RegionV6Latency = cloneDurationMap(r2.RegionV6Latency)
|
||||
r2.GlobalV4Counters = maps.Clone(r2.GlobalV4Counters)
|
||||
r2.GlobalV6Counters = maps.Clone(r2.GlobalV6Counters)
|
||||
return &r2
|
||||
}
|
||||
|
||||
@@ -243,23 +278,6 @@ func (c *Client) vlogf(format string, a ...any) {
|
||||
}
|
||||
}
|
||||
|
||||
// handleHairSTUN reports whether pkt (from src) was our magic hairpin
|
||||
// probe packet that we sent to ourselves.
|
||||
func (c *Client) handleHairSTUNLocked(pkt []byte, src netip.AddrPort) bool {
|
||||
rs := c.curState
|
||||
if rs == nil {
|
||||
return false
|
||||
}
|
||||
if tx, err := stun.ParseBindingRequest(pkt); err == nil && tx == rs.hairTX {
|
||||
select {
|
||||
case rs.gotHairSTUN <- src:
|
||||
default:
|
||||
}
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// MakeNextReportFull forces the next GetReport call to be a full
|
||||
// (non-incremental) probe of all DERP regions.
|
||||
func (c *Client) MakeNextReportFull() {
|
||||
@@ -282,10 +300,6 @@ func (c *Client) ReceiveSTUNPacket(pkt []byte, src netip.AddrPort) {
|
||||
}
|
||||
|
||||
c.mu.Lock()
|
||||
if c.handleHairSTUNLocked(pkt, src) {
|
||||
c.mu.Unlock()
|
||||
return
|
||||
}
|
||||
rs := c.curState
|
||||
c.mu.Unlock()
|
||||
|
||||
@@ -296,6 +310,8 @@ func (c *Client) ReceiveSTUNPacket(pkt []byte, src netip.AddrPort) {
|
||||
tx, addrPort, err := stun.ParseResponse(pkt)
|
||||
if err != nil {
|
||||
if _, err := stun.ParseBindingRequest(pkt); err == nil {
|
||||
// We no longer send hairpin checks, but perhaps we might catch a
|
||||
// stray from earlier versions.
|
||||
// This was probably our own netcheck hairpin
|
||||
// check probe coming in late. Ignore.
|
||||
return
|
||||
@@ -521,20 +537,15 @@ type reportState struct {
|
||||
c *Client
|
||||
start time.Time
|
||||
opts *GetReportOpts
|
||||
hairTX stun.TxID
|
||||
gotHairSTUN chan netip.AddrPort
|
||||
hairTimeout chan struct{} // closed on timeout
|
||||
pc4Hair nettype.PacketConn
|
||||
incremental bool // doing a lite, follow-up netcheck
|
||||
stopProbeCh chan struct{}
|
||||
waitPortMap sync.WaitGroup
|
||||
|
||||
mu sync.Mutex
|
||||
sentHairCheck bool
|
||||
report *Report // to be returned by GetReport
|
||||
inFlight map[stun.TxID]func(netip.AddrPort) // called without c.mu held
|
||||
gotEP4 string
|
||||
timers []*time.Timer
|
||||
mu sync.Mutex
|
||||
report *Report // to be returned by GetReport
|
||||
inFlight map[stun.TxID]func(netip.AddrPort) // called without c.mu held
|
||||
gotEP4 netip.AddrPort
|
||||
timers []*time.Timer
|
||||
}
|
||||
|
||||
func (rs *reportState) anyUDP() bool {
|
||||
@@ -584,50 +595,6 @@ func (rs *reportState) probeWouldHelp(probe probe, node *tailcfg.DERPNode) bool
|
||||
return false
|
||||
}
|
||||
|
||||
func (rs *reportState) startHairCheckLocked(dst netip.AddrPort) {
|
||||
if rs.sentHairCheck || rs.incremental {
|
||||
return
|
||||
}
|
||||
rs.sentHairCheck = true
|
||||
rs.pc4Hair.WriteToUDPAddrPort(stun.Request(rs.hairTX), dst)
|
||||
rs.c.vlogf("sent haircheck to %v", dst)
|
||||
time.AfterFunc(hairpinCheckTimeout, func() { close(rs.hairTimeout) })
|
||||
}
|
||||
|
||||
func (rs *reportState) waitHairCheck(ctx context.Context) {
|
||||
rs.mu.Lock()
|
||||
defer rs.mu.Unlock()
|
||||
ret := rs.report
|
||||
if rs.incremental {
|
||||
if rs.c.last != nil {
|
||||
ret.HairPinning = rs.c.last.HairPinning
|
||||
}
|
||||
return
|
||||
}
|
||||
if !rs.sentHairCheck {
|
||||
return
|
||||
}
|
||||
|
||||
// First, check whether we have a value before we check for timeouts.
|
||||
select {
|
||||
case <-rs.gotHairSTUN:
|
||||
ret.HairPinning.Set(true)
|
||||
return
|
||||
default:
|
||||
}
|
||||
|
||||
// Now, wait for a response or a timeout.
|
||||
select {
|
||||
case <-rs.gotHairSTUN:
|
||||
ret.HairPinning.Set(true)
|
||||
case <-rs.hairTimeout:
|
||||
rs.c.vlogf("hairCheck timeout")
|
||||
ret.HairPinning.Set(false)
|
||||
case <-ctx.Done():
|
||||
rs.c.vlogf("hairCheck context timeout")
|
||||
}
|
||||
}
|
||||
|
||||
func (rs *reportState) stopTimers() {
|
||||
rs.mu.Lock()
|
||||
defer rs.mu.Unlock()
|
||||
@@ -640,11 +607,6 @@ func (rs *reportState) stopTimers() {
|
||||
// is non-zero (for all but HTTPS replies), it's recorded as our UDP
|
||||
// IP:port.
|
||||
func (rs *reportState) addNodeLatency(node *tailcfg.DERPNode, ipp netip.AddrPort, d time.Duration) {
|
||||
var ipPortStr string
|
||||
if ipp != (netip.AddrPort{}) {
|
||||
ipPortStr = net.JoinHostPort(ipp.Addr().String(), fmt.Sprint(ipp.Port()))
|
||||
}
|
||||
|
||||
rs.mu.Lock()
|
||||
defer rs.mu.Unlock()
|
||||
ret := rs.report
|
||||
@@ -670,18 +632,19 @@ func (rs *reportState) addNodeLatency(node *tailcfg.DERPNode, ipp netip.AddrPort
|
||||
case ipp.Addr().Is6():
|
||||
updateLatency(ret.RegionV6Latency, node.RegionID, d)
|
||||
ret.IPv6 = true
|
||||
ret.GlobalV6 = ipPortStr
|
||||
ret.GlobalV6 = ipp
|
||||
mak.Set(&ret.GlobalV6Counters, ipp, ret.GlobalV6Counters[ipp]+1)
|
||||
// TODO: track MappingVariesByDestIP for IPv6
|
||||
// too? Would be sad if so, but who knows.
|
||||
case ipp.Addr().Is4():
|
||||
updateLatency(ret.RegionV4Latency, node.RegionID, d)
|
||||
ret.IPv4 = true
|
||||
if rs.gotEP4 == "" {
|
||||
rs.gotEP4 = ipPortStr
|
||||
ret.GlobalV4 = ipPortStr
|
||||
rs.startHairCheckLocked(ipp)
|
||||
mak.Set(&ret.GlobalV4Counters, ipp, ret.GlobalV4Counters[ipp]+1)
|
||||
if !rs.gotEP4.IsValid() {
|
||||
rs.gotEP4 = ipp
|
||||
ret.GlobalV4 = ipp
|
||||
} else {
|
||||
if rs.gotEP4 != ipPortStr {
|
||||
if rs.gotEP4 != ipp {
|
||||
ret.MappingVariesByDestIP.Set(true)
|
||||
} else if ret.MappingVariesByDestIP == "" {
|
||||
ret.MappingVariesByDestIP.Set(false)
|
||||
@@ -793,9 +756,6 @@ func (c *Client) GetReport(ctx context.Context, dm *tailcfg.DERPMap, opts *GetRe
|
||||
opts: opts,
|
||||
report: newReport(),
|
||||
inFlight: map[stun.TxID]func(netip.AddrPort){},
|
||||
hairTX: stun.NewTxID(), // random payload
|
||||
gotHairSTUN: make(chan netip.AddrPort, 1),
|
||||
hairTimeout: make(chan struct{}),
|
||||
stopProbeCh: make(chan struct{}, 1),
|
||||
}
|
||||
c.curState = rs
|
||||
@@ -853,34 +813,11 @@ func (c *Client) GetReport(ctx context.Context, dm *tailcfg.DERPMap, opts *GetRe
|
||||
v6udp.Close()
|
||||
}
|
||||
|
||||
// Create a UDP4 socket used for sending to our discovered IPv4 address.
|
||||
rs.pc4Hair, err = nettype.MakePacketListenerWithNetIP(netns.Listener(c.logf, c.NetMon)).ListenPacket(ctx, "udp4", ":0")
|
||||
if err != nil {
|
||||
c.logf("udp4: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
defer rs.pc4Hair.Close()
|
||||
|
||||
if !c.SkipExternalNetwork && c.PortMapper != nil {
|
||||
rs.waitPortMap.Add(1)
|
||||
go rs.probePortMapServices()
|
||||
}
|
||||
|
||||
// At least the Apple Airport Extreme doesn't allow hairpin
|
||||
// sends from a private socket until it's seen traffic from
|
||||
// that src IP:port to something else out on the internet.
|
||||
//
|
||||
// See https://github.com/tailscale/tailscale/issues/188#issuecomment-600728643
|
||||
//
|
||||
// And it seems that even sending to a likely-filtered RFC 5737
|
||||
// documentation-only IPv4 range is enough to set up the mapping.
|
||||
// So do that for now. In the future we might want to classify networks
|
||||
// that do and don't require this separately. But for now help it.
|
||||
const documentationIP = "203.0.113.1"
|
||||
rs.pc4Hair.WriteToUDPAddrPort(
|
||||
[]byte("tailscale netcheck; see https://github.com/tailscale/tailscale/issues/188"),
|
||||
netip.AddrPortFrom(netip.MustParseAddr(documentationIP), 12345))
|
||||
|
||||
plan := makeProbePlan(dm, ifState, last)
|
||||
|
||||
// If we're doing a full probe, also check for a captive portal. We
|
||||
@@ -958,8 +895,6 @@ func (c *Client) GetReport(ctx context.Context, dm *tailcfg.DERPMap, opts *GetRe
|
||||
captivePortalStop()
|
||||
}
|
||||
|
||||
rs.waitHairCheck(ctx)
|
||||
c.vlogf("hairCheck done")
|
||||
if !c.SkipExternalNetwork && c.PortMapper != nil {
|
||||
rs.waitPortMap.Wait()
|
||||
c.vlogf("portMap done")
|
||||
@@ -1328,17 +1263,16 @@ func (c *Client) logConciseReport(r *Report, dm *tailcfg.DERPMap) {
|
||||
fmt.Fprintf(w, " v6os=%v", r.OSHasIPv6)
|
||||
}
|
||||
fmt.Fprintf(w, " mapvarydest=%v", r.MappingVariesByDestIP)
|
||||
fmt.Fprintf(w, " hair=%v", r.HairPinning)
|
||||
if r.AnyPortMappingChecked() {
|
||||
fmt.Fprintf(w, " portmap=%v%v%v", conciseOptBool(r.UPnP, "U"), conciseOptBool(r.PMP, "M"), conciseOptBool(r.PCP, "C"))
|
||||
} else {
|
||||
fmt.Fprintf(w, " portmap=?")
|
||||
}
|
||||
if r.GlobalV4 != "" {
|
||||
fmt.Fprintf(w, " v4a=%v", r.GlobalV4)
|
||||
if r.GlobalV4.IsValid() {
|
||||
fmt.Fprintf(w, " v4a=%s", r.GlobalV4)
|
||||
}
|
||||
if r.GlobalV6 != "" {
|
||||
fmt.Fprintf(w, " v6a=%v", r.GlobalV6)
|
||||
if r.GlobalV6.IsValid() {
|
||||
fmt.Fprintf(w, " v6a=%s", r.GlobalV6)
|
||||
}
|
||||
if r.CaptivePortal != "" {
|
||||
fmt.Fprintf(w, " captiveportal=%v", r.CaptivePortal)
|
||||
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
"net/http"
|
||||
"net/netip"
|
||||
"reflect"
|
||||
"slices"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
@@ -19,142 +20,12 @@ import (
|
||||
"time"
|
||||
|
||||
"tailscale.com/net/netmon"
|
||||
"tailscale.com/net/stun"
|
||||
"tailscale.com/net/stun/stuntest"
|
||||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/tstest"
|
||||
"tailscale.com/tstest/nettest"
|
||||
)
|
||||
|
||||
func TestHairpinSTUN(t *testing.T) {
|
||||
tx := stun.NewTxID()
|
||||
c := &Client{
|
||||
curState: &reportState{
|
||||
hairTX: tx,
|
||||
gotHairSTUN: make(chan netip.AddrPort, 1),
|
||||
},
|
||||
}
|
||||
req := stun.Request(tx)
|
||||
if !stun.Is(req) {
|
||||
t.Fatal("expected STUN message")
|
||||
}
|
||||
if !c.handleHairSTUNLocked(req, netip.AddrPort{}) {
|
||||
t.Fatal("expected true")
|
||||
}
|
||||
select {
|
||||
case <-c.curState.gotHairSTUN:
|
||||
default:
|
||||
t.Fatal("expected value")
|
||||
}
|
||||
}
|
||||
|
||||
func TestHairpinWait(t *testing.T) {
|
||||
makeClient := func(t *testing.T) (*Client, *reportState) {
|
||||
tx := stun.NewTxID()
|
||||
c := &Client{}
|
||||
req := stun.Request(tx)
|
||||
if !stun.Is(req) {
|
||||
t.Fatal("expected STUN message")
|
||||
}
|
||||
|
||||
var err error
|
||||
rs := &reportState{
|
||||
c: c,
|
||||
hairTX: tx,
|
||||
gotHairSTUN: make(chan netip.AddrPort, 1),
|
||||
hairTimeout: make(chan struct{}),
|
||||
report: newReport(),
|
||||
}
|
||||
rs.pc4Hair, err = net.ListenUDP("udp4", &net.UDPAddr{
|
||||
IP: net.ParseIP("127.0.0.1"),
|
||||
Port: 0,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
c.curState = rs
|
||||
return c, rs
|
||||
}
|
||||
|
||||
ll, err := net.ListenPacket("udp", "localhost:0")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer ll.Close()
|
||||
dstAddr := netip.MustParseAddrPort(ll.LocalAddr().String())
|
||||
|
||||
t.Run("Success", func(t *testing.T) {
|
||||
c, rs := makeClient(t)
|
||||
req := stun.Request(rs.hairTX)
|
||||
|
||||
// Start a hairpin check to ourselves.
|
||||
rs.startHairCheckLocked(dstAddr)
|
||||
|
||||
// Fake receiving the stun check from ourselves after some period of time.
|
||||
src := netip.MustParseAddrPort(rs.pc4Hair.LocalAddr().String())
|
||||
c.handleHairSTUNLocked(req, src)
|
||||
|
||||
rs.waitHairCheck(context.Background())
|
||||
|
||||
// Verify that we set HairPinning
|
||||
if got := rs.report.HairPinning; !got.EqualBool(true) {
|
||||
t.Errorf("wanted HairPinning=true, got %v", got)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("LateReply", func(t *testing.T) {
|
||||
c, rs := makeClient(t)
|
||||
req := stun.Request(rs.hairTX)
|
||||
|
||||
// Start a hairpin check to ourselves.
|
||||
rs.startHairCheckLocked(dstAddr)
|
||||
|
||||
// Wait until we've timed out, to mimic the race in #1795.
|
||||
<-rs.hairTimeout
|
||||
|
||||
// Fake receiving the stun check from ourselves after some period of time.
|
||||
src := netip.MustParseAddrPort(rs.pc4Hair.LocalAddr().String())
|
||||
c.handleHairSTUNLocked(req, src)
|
||||
|
||||
// Wait for a hairpin response
|
||||
rs.waitHairCheck(context.Background())
|
||||
|
||||
// Verify that we set HairPinning
|
||||
if got := rs.report.HairPinning; !got.EqualBool(true) {
|
||||
t.Errorf("wanted HairPinning=true, got %v", got)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("Timeout", func(t *testing.T) {
|
||||
_, rs := makeClient(t)
|
||||
|
||||
// Start a hairpin check to ourselves.
|
||||
rs.startHairCheckLocked(dstAddr)
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), hairpinCheckTimeout*50)
|
||||
defer cancel()
|
||||
|
||||
// Wait in the background
|
||||
waitDone := make(chan struct{})
|
||||
go func() {
|
||||
rs.waitHairCheck(ctx)
|
||||
close(waitDone)
|
||||
}()
|
||||
|
||||
// If we do nothing, then we time out; confirm that we set
|
||||
// HairPinning to false in this case.
|
||||
select {
|
||||
case <-waitDone:
|
||||
if got := rs.report.HairPinning; !got.EqualBool(false) {
|
||||
t.Errorf("wanted HairPinning=false, got %v", got)
|
||||
}
|
||||
case <-ctx.Done():
|
||||
t.Fatalf("timed out waiting for hairpin channel")
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func newTestClient(t testing.TB) *Client {
|
||||
c := &Client{
|
||||
NetMon: netmon.NewStatic(),
|
||||
@@ -189,12 +60,49 @@ func TestBasic(t *testing.T) {
|
||||
if _, ok := r.RegionLatency[1]; !ok {
|
||||
t.Errorf("expected key 1 in DERPLatency; got %+v", r.RegionLatency)
|
||||
}
|
||||
if r.GlobalV4 == "" {
|
||||
if !r.GlobalV4.IsValid() {
|
||||
t.Error("expected GlobalV4 set")
|
||||
}
|
||||
if r.PreferredDERP != 1 {
|
||||
t.Errorf("PreferredDERP = %v; want 1", r.PreferredDERP)
|
||||
}
|
||||
v4Addrs, _ := r.GetGlobalAddrs()
|
||||
if len(v4Addrs) != 1 {
|
||||
t.Error("expected one global IPv4 address")
|
||||
}
|
||||
if got, want := v4Addrs[0], r.GlobalV4; got != want {
|
||||
t.Errorf("got %v; want %v", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMultiGlobalAddressMapping(t *testing.T) {
|
||||
c := &Client{
|
||||
Logf: t.Logf,
|
||||
}
|
||||
|
||||
rs := &reportState{
|
||||
c: c,
|
||||
start: time.Now(),
|
||||
report: newReport(),
|
||||
}
|
||||
derpNode := &tailcfg.DERPNode{}
|
||||
port1 := netip.MustParseAddrPort("127.0.0.1:1234")
|
||||
port2 := netip.MustParseAddrPort("127.0.0.1:2345")
|
||||
port3 := netip.MustParseAddrPort("127.0.0.1:3456")
|
||||
// First report for port1
|
||||
rs.addNodeLatency(derpNode, port1, 10*time.Millisecond)
|
||||
// Singular report for port2
|
||||
rs.addNodeLatency(derpNode, port2, 11*time.Millisecond)
|
||||
// Duplicate reports for port3
|
||||
rs.addNodeLatency(derpNode, port3, 12*time.Millisecond)
|
||||
rs.addNodeLatency(derpNode, port3, 13*time.Millisecond)
|
||||
|
||||
r := rs.report
|
||||
v4Addrs, _ := r.GetGlobalAddrs()
|
||||
wantV4Addrs := []netip.AddrPort{port1, port3}
|
||||
if !slices.Equal(v4Addrs, wantV4Addrs) {
|
||||
t.Errorf("got global addresses: %v, want %v", v4Addrs, wantV4Addrs)
|
||||
}
|
||||
}
|
||||
|
||||
func TestWorksWhenUDPBlocked(t *testing.T) {
|
||||
@@ -745,12 +653,12 @@ func TestLogConciseReport(t *testing.T) {
|
||||
{
|
||||
name: "no_udp",
|
||||
r: &Report{},
|
||||
want: "udp=false v4=false icmpv4=false v6=false mapvarydest= hair= portmap=? derp=0",
|
||||
want: "udp=false v4=false icmpv4=false v6=false mapvarydest= portmap=? derp=0",
|
||||
},
|
||||
{
|
||||
name: "no_udp_icmp",
|
||||
r: &Report{ICMPv4: true, IPv4: true},
|
||||
want: "udp=false icmpv4=true v6=false mapvarydest= hair= portmap=? derp=0",
|
||||
want: "udp=false icmpv4=true v6=false mapvarydest= portmap=? derp=0",
|
||||
},
|
||||
{
|
||||
name: "ipv4_one_region",
|
||||
@@ -765,7 +673,7 @@ func TestLogConciseReport(t *testing.T) {
|
||||
1: 10 * ms,
|
||||
},
|
||||
},
|
||||
want: "udp=true v6=false mapvarydest= hair= portmap=? derp=1 derpdist=1v4:10ms",
|
||||
want: "udp=true v6=false mapvarydest= portmap=? derp=1 derpdist=1v4:10ms",
|
||||
},
|
||||
{
|
||||
name: "ipv4_all_region",
|
||||
@@ -784,7 +692,7 @@ func TestLogConciseReport(t *testing.T) {
|
||||
3: 30 * ms,
|
||||
},
|
||||
},
|
||||
want: "udp=true v6=false mapvarydest= hair= portmap=? derp=1 derpdist=1v4:10ms,2v4:20ms,3v4:30ms",
|
||||
want: "udp=true v6=false mapvarydest= portmap=? derp=1 derpdist=1v4:10ms,2v4:20ms,3v4:30ms",
|
||||
},
|
||||
{
|
||||
name: "ipboth_all_region",
|
||||
@@ -809,7 +717,7 @@ func TestLogConciseReport(t *testing.T) {
|
||||
3: 30 * ms,
|
||||
},
|
||||
},
|
||||
want: "udp=true v6=true mapvarydest= hair= portmap=? derp=1 derpdist=1v4:10ms,1v6:10ms,2v4:20ms,2v6:20ms,3v4:30ms,3v6:30ms",
|
||||
want: "udp=true v6=true mapvarydest= portmap=? derp=1 derpdist=1v4:10ms,1v6:10ms,2v4:20ms,2v6:20ms,3v4:30ms,3v6:30ms",
|
||||
},
|
||||
{
|
||||
name: "portmap_all",
|
||||
@@ -819,7 +727,7 @@ func TestLogConciseReport(t *testing.T) {
|
||||
PMP: "true",
|
||||
PCP: "true",
|
||||
},
|
||||
want: "udp=true v4=false v6=false mapvarydest= hair= portmap=UMC derp=0",
|
||||
want: "udp=true v4=false v6=false mapvarydest= portmap=UMC derp=0",
|
||||
},
|
||||
{
|
||||
name: "portmap_some",
|
||||
@@ -829,7 +737,7 @@ func TestLogConciseReport(t *testing.T) {
|
||||
PMP: "false",
|
||||
PCP: "true",
|
||||
},
|
||||
want: "udp=true v4=false v6=false mapvarydest= hair= portmap=UC derp=0",
|
||||
want: "udp=true v4=false v6=false mapvarydest= portmap=UC derp=0",
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
|
||||
@@ -59,6 +59,10 @@ type Dialer struct {
|
||||
// If nil, it's not used.
|
||||
NetstackDialTCP func(context.Context, netip.AddrPort) (net.Conn, error)
|
||||
|
||||
// NetstackDialUDP dials the provided IPPort using netstack.
|
||||
// If nil, it's not used.
|
||||
NetstackDialUDP func(context.Context, netip.AddrPort) (net.Conn, error)
|
||||
|
||||
peerClientOnce sync.Once
|
||||
peerClient *http.Client
|
||||
|
||||
@@ -403,9 +407,12 @@ func (d *Dialer) UserDial(ctx context.Context, network, addr string) (net.Conn,
|
||||
return nil, err
|
||||
}
|
||||
if d.UseNetstackForIP != nil && d.UseNetstackForIP(ipp.Addr()) {
|
||||
if d.NetstackDialTCP == nil {
|
||||
if d.NetstackDialTCP == nil || d.NetstackDialUDP == nil {
|
||||
return nil, errors.New("Dialer not initialized correctly")
|
||||
}
|
||||
if strings.HasPrefix(network, "udp") {
|
||||
return d.NetstackDialUDP(ctx, ipp)
|
||||
}
|
||||
return d.NetstackDialTCP(ctx, ipp)
|
||||
}
|
||||
|
||||
|
||||
@@ -915,8 +915,6 @@ func (t *Wrapper) Read(buffs [][]byte, sizes []int, offset int) (int, error) {
|
||||
for _, data := range res.data {
|
||||
p.Decode(data[res.dataOffset:])
|
||||
|
||||
pc.snat(p)
|
||||
|
||||
if m := t.destIPActivity.Load(); m != nil {
|
||||
if fn := m[p.Dst.Addr()]; fn != nil {
|
||||
fn()
|
||||
@@ -932,6 +930,10 @@ func (t *Wrapper) Read(buffs [][]byte, sizes []int, offset int) (int, error) {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
// Make sure to do SNAT after filtering, so that any flow tracking in
|
||||
// the filter sees the original source address. See #12133.
|
||||
pc.snat(p)
|
||||
n := copy(buffs[buffsPos][offset:], p.Buffer())
|
||||
if n != len(data)-res.dataOffset {
|
||||
panic(fmt.Sprintf("short copy: %d != %d", n, len(data)-res.dataOffset))
|
||||
|
||||
9
omit/aws_def.go
Normal file
9
omit/aws_def.go
Normal file
@@ -0,0 +1,9 @@
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
//go:build !ts_omit_aws
|
||||
|
||||
package omit
|
||||
|
||||
// AWS is whether AWS support should be omitted from the build.
|
||||
const AWS = false
|
||||
9
omit/aws_omit.go
Normal file
9
omit/aws_omit.go
Normal file
@@ -0,0 +1,9 @@
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
//go:build ts_omit_aws
|
||||
|
||||
package omit
|
||||
|
||||
// AWS is whether AWS support should be omitted from the build.
|
||||
const AWS = true
|
||||
12
omit/omit.go
Normal file
12
omit/omit.go
Normal file
@@ -0,0 +1,12 @@
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
// Package omit provides consts to access Tailscale ts_omit_FOO build tags.
|
||||
// They're often more convenient to eliminate some away locally with a const
|
||||
// rather than using build tags.
|
||||
package omit
|
||||
|
||||
import "errors"
|
||||
|
||||
// Err is an error that can be returned by functions in this package.
|
||||
var Err = errors.New("feature not linked into binary per ts_omit build tag")
|
||||
@@ -23,6 +23,7 @@ import (
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"tailscale.com/derp"
|
||||
"tailscale.com/derp/derphttp"
|
||||
"tailscale.com/net/netmon"
|
||||
"tailscale.com/net/stun"
|
||||
"tailscale.com/syncs"
|
||||
"tailscale.com/tailcfg"
|
||||
@@ -544,7 +545,7 @@ func newConn(ctx context.Context, dm *tailcfg.DERPMap, n *tailcfg.DERPNode, isPr
|
||||
return !strings.Contains(s, "derphttp.Client.Connect: connecting to")
|
||||
})
|
||||
priv := key.NewNode()
|
||||
dc := derphttp.NewRegionClient(priv, l, nil /* no netMon */, func() *tailcfg.DERPRegion {
|
||||
dc := derphttp.NewRegionClient(priv, l, netmon.NewStatic(), func() *tailcfg.DERPRegion {
|
||||
rid := n.RegionID
|
||||
return &tailcfg.DERPRegion{
|
||||
RegionID: rid,
|
||||
|
||||
891
publicapi/device.md
Normal file
891
publicapi/device.md
Normal file
@@ -0,0 +1,891 @@
|
||||
# Device
|
||||
|
||||
A Tailscale device (sometimes referred to as _node_ or _machine_), is any computer or mobile device that joins a tailnet.
|
||||
|
||||
Each device has a unique ID (`nodeId` in the JSON below) that is used to identify the device in API calls.
|
||||
This ID can be found by going to the [**Machines**](https://login.tailscale.com/admin/machines) page in the admin console,
|
||||
selecting the relevant device, then finding the ID in the Machine Details section.
|
||||
You can also [list all devices in the tailnet](#list-tailnet-devices) to get their `nodeId` values.
|
||||
|
||||
(A device's numeric `id` value can also be used in API calls, but `nodeId` is preferred.)
|
||||
|
||||
### Attributes
|
||||
|
||||
```jsonc
|
||||
{
|
||||
// addresses (array of strings) is a list of Tailscale IP
|
||||
// addresses for the device, including both IPv4 (formatted as 100.x.y.z)
|
||||
// and IPv6 (formatted as fd7a:115c:a1e0:a:b:c:d:e) addresses.
|
||||
"addresses": ["100.87.74.78", "fd7a:115c:a1e0:ac82:4843:ca90:697d:c36e"],
|
||||
|
||||
// id (string) is the legacy identifier for a device; you
|
||||
// can supply this value wherever {deviceId} is indicated in the
|
||||
// endpoint. Note that although "id" is still accepted, "nodeId" is
|
||||
// preferred.
|
||||
"id": "393735751060",
|
||||
|
||||
// nodeID (string) is the preferred identifier for a device;
|
||||
// supply this value wherever {deviceId} is indicated in the endpoint.
|
||||
"nodeId": "n5SUKe8CNTRL",
|
||||
|
||||
// user (string) is the user who registered the node. For untagged nodes,
|
||||
// this user is the device owner.
|
||||
"user": "amelie@example.com",
|
||||
|
||||
// name (string) is the MagicDNS name of the device.
|
||||
// Learn more about MagicDNS at https://tailscale.com/kb/1081/.
|
||||
"name": "pangolin.tailfe8c.ts.net",
|
||||
|
||||
// hostname (string) is the machine name in the admin console
|
||||
// Learn more about machine names at https://tailscale.com/kb/1098/.
|
||||
"hostname": "pangolin",
|
||||
|
||||
// clientVersion (string) is the version of the Tailscale client
|
||||
// software; this is empty for external devices.
|
||||
"clientVersion": "",
|
||||
|
||||
// updateAvailable (boolean) is 'true' if a Tailscale client version
|
||||
// upgrade is available. This value is empty for external devices.
|
||||
"updateAvailable": false,
|
||||
|
||||
// os (string) is the operating system that the device is running.
|
||||
"os": "linux",
|
||||
|
||||
// created (string) is the date on which the device was added
|
||||
// to the tailnet; this is empty for external devices.
|
||||
"created": "2022-12-01T05:23:30Z",
|
||||
|
||||
// lastSeen (string) is when device was last active on the tailnet.
|
||||
"lastSeen": "2022-12-01T05:23:30Z",
|
||||
|
||||
// keyExpiryDisabled (boolean) is 'true' if the keys for the device
|
||||
// will not expire. Learn more at https://tailscale.com/kb/1028/.
|
||||
"keyExpiryDisabled": true,
|
||||
|
||||
// expires (string) is the expiration date of the device's auth key.
|
||||
// Learn more about key expiry at https://tailscale.com/kb/1028/.
|
||||
"expires": "2023-05-30T04:44:05Z",
|
||||
|
||||
// authorized (boolean) is 'true' if the device has been
|
||||
// authorized to join the tailnet; otherwise, 'false'. Learn
|
||||
// more about device authorization at https://tailscale.com/kb/1099/.
|
||||
"authorized": true,
|
||||
|
||||
// isExternal (boolean) if 'true', indicates that a device is not
|
||||
// a member of the tailnet, but is shared in to the tailnet;
|
||||
// if 'false', the device is a member of the tailnet.
|
||||
// Learn more about node sharing at https://tailscale.com/kb/1084/.
|
||||
"isExternal": true,
|
||||
|
||||
// machineKey (string) is for internal use and is not required for
|
||||
// any API operations. This value is empty for external devices.
|
||||
"machineKey": "",
|
||||
|
||||
// nodeKey (string) is mostly for internal use, required for select
|
||||
// operations, such as adding a node to a locked tailnet.
|
||||
// Learn about tailnet locks at https://tailscale.com/kb/1226/.
|
||||
"nodeKey": "nodekey:01234567890abcdef",
|
||||
|
||||
// blocksIncomingConnections (boolean) is 'true' if the device is not
|
||||
// allowed to accept any connections over Tailscale, including pings.
|
||||
// Learn more in the "Allow incoming connections"
|
||||
// section of https://tailscale.com/kb/1072/.
|
||||
"blocksIncomingConnections": false,
|
||||
|
||||
// enabledRoutes (array of strings) are the subnet routes for this
|
||||
// device that have been approved by the tailnet admin.
|
||||
// Learn more about subnet routes at https://tailscale.com/kb/1019/.
|
||||
"enabledRoutes": ["10.0.0.0/16", "192.168.1.0/24"],
|
||||
|
||||
// advertisedRoutes (array of strings) are the subnets this device
|
||||
// intends to expose.
|
||||
// Learn more about subnet routes at https://tailscale.com/kb/1019/.
|
||||
"advertisedRoutes": ["10.0.0.0/16", "192.168.1.0/24"],
|
||||
|
||||
// clientConnectivity provides a report on the device's current physical
|
||||
// network conditions.
|
||||
"clientConnectivity": {
|
||||
// endpoints (array of strings) Client's magicsock UDP IP:port
|
||||
// endpoints (IPv4 or IPv6)
|
||||
"endpoints": ["199.9.14.201:59128", "192.68.0.21:59128"],
|
||||
|
||||
// mappingVariesByDestIP (boolean) is 'true' if the host's NAT mappings
|
||||
// vary based on the destination IP.
|
||||
"mappingVariesByDestIP": false,
|
||||
|
||||
// latency (JSON object) lists DERP server locations and their current
|
||||
// latency; "preferred" is 'true' for the node's preferred DERP
|
||||
// server for incoming traffic.
|
||||
"latency": {
|
||||
"Dallas": {
|
||||
"latencyMs": 60.463043
|
||||
},
|
||||
"New York City": {
|
||||
"preferred": true,
|
||||
"latencyMs": 31.323811
|
||||
}
|
||||
},
|
||||
|
||||
// clientSupports (JSON object) identifies features supported by the client.
|
||||
"clientSupports": {
|
||||
// hairpinning (boolean) is 'true' if your router can route connections
|
||||
// from endpoints on your LAN back to your LAN using those endpoints’
|
||||
// globally-mapped IPv4 addresses/ports
|
||||
"hairPinning": false,
|
||||
|
||||
// ipv6 (boolean) is 'true' if the device OS supports IPv6,
|
||||
// regardless of whether IPv6 internet connectivity is available.
|
||||
"ipv6": false,
|
||||
|
||||
// pcp (boolean) is 'true' if PCP port-mapping service exists on
|
||||
// your router.
|
||||
"pcp": false,
|
||||
|
||||
// pmp (boolean) is 'true' if NAT-PMP port-mapping service exists
|
||||
// on your router.
|
||||
"pmp": false,
|
||||
|
||||
// udp (boolean) is 'true' if UDP traffic is enabled on the
|
||||
// current network; if 'false', Tailscale may be unable to make
|
||||
// direct connections, and will rely on our DERP servers.
|
||||
"udp": true,
|
||||
|
||||
// upnp (boolean) is 'true' if UPnP port-mapping service exists
|
||||
// on your router.
|
||||
"upnp": false
|
||||
}
|
||||
},
|
||||
|
||||
// tags (array of strings) let you assign an identity to a device that
|
||||
// is separate from human users, and use it as part of an ACL to restrict
|
||||
// access. Once a device is tagged, the tag is the owner of that device.
|
||||
// A single node can have multiple tags assigned. This value is empty for
|
||||
// external devices.
|
||||
// Learn more about tags at https://tailscale.com/kb/1068/.
|
||||
"tags": ["tag:golink"],
|
||||
|
||||
// tailnetLockError (string) indicates an issue with the tailnet lock
|
||||
// node-key signature on this device.
|
||||
// This field is only populated when tailnet lock is enabled.
|
||||
"tailnetLockError": "",
|
||||
|
||||
// tailnetLockKey (string) is the node's tailnet lock key. Every node
|
||||
// generates a tailnet lock key (so the value will be present) even if
|
||||
// tailnet lock is not enabled.
|
||||
// Learn more about tailnet lock at https://tailscale.com/kb/1226/.
|
||||
"tailnetLockKey": "",
|
||||
|
||||
// postureIdentity contains extra identifiers from the device when the tailnet
|
||||
// it is connected to has device posture identification collection enabled.
|
||||
// If the device has not opted-in to posture identification collection, this
|
||||
// will contain {"disabled": true}.
|
||||
// Learn more about posture identity at https://tailscale.com/kb/1326/device-identity
|
||||
"postureIdentity": {
|
||||
"serialNumbers": ["CP74LFQJXM"]
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
# APIs
|
||||
|
||||
**[Device](#device)**
|
||||
|
||||
- Get a device: [`GET /api/v2/device/{deviceid}`](#get-device)
|
||||
- Delete a device: [`DELETE /api/v2/device/{deviceID}`](#delete-device)
|
||||
- Expire device key: [`POST /api/v2/device/{deviceID}/expire`](#expire-device-key)
|
||||
- [**Routes**](#routes)
|
||||
- Get device routes: [`GET /api/v2/device/{deviceID}/routes`](#get-device-routes)
|
||||
- Set device routes: [`POST /api/v2/device/{deviceID}/routes`](#set-device-routes)
|
||||
- [**Authorize**](#authorize)
|
||||
- Authorize a device: [`POST /api/v2/device/{deviceID}/authorized`](#authorize-device)
|
||||
- [**Tags**](#tags)
|
||||
- Update tags: [`POST /api/v2/device/{deviceID}/tags`](#update-device-tags)
|
||||
- [**Keys**](#keys)
|
||||
- Update device key: [`POST /api/v2/device/{deviceID}/key`](#update-device-key)
|
||||
- [**IP Addresses**](#ip-addresses)
|
||||
- Set device IPv4 address: [`POST /api/v2/device/{deviceID}/ip`](#set-device-ipv4-address)
|
||||
- [**Device posture attributes**](#device-posture-attributes)
|
||||
- Get device posture attributes: [`GET /api/v2/device/{deviceID}/attributes`](#get-device-posture-attributes)
|
||||
- Set custom device posture attributes: [`POST /api/v2/device/{deviceID}/attributes/{attributeKey}`](#set-device-posture-attributes)
|
||||
- Delete custom device posture attributes: [`DELETE /api/v2/device/{deviceID}/attributes/{attributeKey}`](#delete-custom-device-posture-attributes)
|
||||
- [**Device invites**](#invites-to-a-device)
|
||||
- List device invites: [`GET /api/v2/device/{deviceID}/device-invites`](#list-device-invites)
|
||||
- Create device invites: [`POST /api/v2/device/{deviceID}/device-invites`](#create-device-invites)
|
||||
|
||||
### Subnet routes
|
||||
|
||||
Devices within a tailnet can be set up as subnet routers.
|
||||
A subnet router acts as a gateway, relaying traffic from your Tailscale network onto your physical subnet.
|
||||
Setting up subnet routers exposes routes to other devices in the tailnet.
|
||||
Learn more about [subnet routers](https://tailscale.com/kb/1019).
|
||||
|
||||
A device can act as a subnet router if its subnet routes are both advertised and enabled.
|
||||
This is a two-step process, but the steps can occur in any order:
|
||||
|
||||
- The device that intends to act as a subnet router exposes its routes by **advertising** them.
|
||||
This is done in the Tailscale command-line interface.
|
||||
- The tailnet admin must approve the routes by **enabling** them.
|
||||
This is done in the [**Machines**](https://login.tailscale.com/admin/machines) page of the Tailscale admin console
|
||||
or [via the API](#set-device-routes).
|
||||
|
||||
If a device has advertised routes, they are not exposed to traffic until they are enabled by the tailnet admin.
|
||||
Conversely, if a tailnet admin pre-approves certain routes by enabling them, they are not available for routing until the device in question has advertised them.
|
||||
|
||||
The API exposes two methods for dealing with subnet routes:
|
||||
|
||||
- Get routes: [`GET /api/v2/device/{deviceID}/routes`](#get-device-routes) to fetch lists of advertised and enabled routes for a device
|
||||
- Set routes: [`POST /api/v2/device/{deviceID}/routes`](#set-device-routes) to set enabled routes for a device
|
||||
|
||||
## Get device
|
||||
|
||||
```http
|
||||
GET /api/v2/device/{deviceid}
|
||||
```
|
||||
|
||||
Retrieve the details for the specified device.
|
||||
This returns a JSON `device` object listing device attributes.
|
||||
|
||||
### Parameters
|
||||
|
||||
#### `deviceid` (required in URL path)
|
||||
|
||||
The ID of the device.
|
||||
|
||||
#### `fields` (optional in query string)
|
||||
|
||||
Controls whether the response returns **all** object fields or only a predefined subset of fields.
|
||||
Currently, there are two supported options:
|
||||
|
||||
- **`all`:** return all object fields in the response
|
||||
- **`default`:** return all object fields **except**:
|
||||
- `enabledRoutes`
|
||||
- `advertisedRoutes`
|
||||
- `clientConnectivity` (which contains the following fields: `mappingVariesByDestIP`, `derp`, `endpoints`, `latency`, and `clientSupports`)
|
||||
- `postureIdentity`
|
||||
|
||||
### Request example
|
||||
|
||||
```sh
|
||||
curl "https://api.tailscale.com/api/v2/device/12345?fields=all" \
|
||||
-u "tskey-api-xxxxx:"
|
||||
```
|
||||
|
||||
### Response
|
||||
|
||||
```jsonc
|
||||
{
|
||||
"addresses":[
|
||||
"100.71.74.78",
|
||||
"fd7a:115c:a1e0:ac82:4843:ca90:697d:c36e"
|
||||
],
|
||||
"id":"12345",
|
||||
|
||||
// Additional fields as documented in device "Attributes" section above
|
||||
}
|
||||
{
|
||||
"addresses":[
|
||||
"100.74.66.78",
|
||||
"fd7a:115c:a1e0:ac82:4843:ca90:697d:c36f"
|
||||
],
|
||||
"id":"67890",
|
||||
|
||||
// Additional fields as documented in device "Attributes" section above
|
||||
}
|
||||
```
|
||||
|
||||
## Delete device
|
||||
|
||||
```http
|
||||
DELETE /api/v2/device/{deviceID}
|
||||
```
|
||||
|
||||
Deletes the supplied device from its tailnet.
|
||||
The device must belong to the user's tailnet.
|
||||
Deleting shared/external devices is not supported.
|
||||
|
||||
### Parameters
|
||||
|
||||
#### `deviceid` (required in URL path)
|
||||
|
||||
The ID of the device.
|
||||
|
||||
### Request example
|
||||
|
||||
```sh
|
||||
curl -X DELETE 'https://api.tailscale.com/api/v2/device/12345' \
|
||||
-u "tskey-api-xxxxx:"
|
||||
```
|
||||
|
||||
### Response
|
||||
|
||||
If successful, the response should be empty:
|
||||
|
||||
```http
|
||||
HTTP/1.1 200 OK
|
||||
```
|
||||
|
||||
If the device is not owned by your tailnet:
|
||||
|
||||
```http
|
||||
HTTP/1.1 501 Not Implemented
|
||||
...
|
||||
{"message":"cannot delete devices outside of your tailnet"}
|
||||
```
|
||||
|
||||
## Expire a device's key
|
||||
|
||||
```http
|
||||
POST /api/v2/device/{deviceID}/expire
|
||||
```
|
||||
|
||||
Mark a device's node key as expired.
|
||||
This will require the device to re-authenticate in order to connect to the tailnet.
|
||||
The device must belong to the requesting user's tailnet.
|
||||
|
||||
### Parameters
|
||||
|
||||
#### `deviceid` (required in URL path)
|
||||
|
||||
The ID of the device.
|
||||
|
||||
### Request example
|
||||
|
||||
```sh
|
||||
curl -X POST 'https://api.tailscale.com/api/v2/device/12345/expire' \
|
||||
-u "tskey-api-xxxxx:" \
|
||||
-H "Content-Type: application/json"
|
||||
```
|
||||
|
||||
### Response
|
||||
|
||||
If successful, the response should be empty:
|
||||
|
||||
```http
|
||||
HTTP/1.1 200 OK
|
||||
```
|
||||
|
||||
## Routes
|
||||
|
||||
## Get device routes
|
||||
|
||||
```http
|
||||
GET /api/v2/device/{deviceID}/routes
|
||||
```
|
||||
|
||||
Retrieve the list of [subnet routes](#subnet-routes) that a device is advertising, as well as those that are enabled for it:
|
||||
|
||||
- **Enabled routes:** The subnet routes for this device that have been approved by the tailnet admin.
|
||||
- **Advertised routes:** The subnets this device intends to expose.
|
||||
|
||||
### Parameters
|
||||
|
||||
#### `deviceid` (required in URL path)
|
||||
|
||||
The ID of the device.
|
||||
|
||||
### Request example
|
||||
|
||||
```sh
|
||||
curl "https://api.tailscale.com/api/v2/device/11055/routes" \
|
||||
-u "tskey-api-xxxxx:"
|
||||
```
|
||||
|
||||
### Response
|
||||
|
||||
Returns the enabled and advertised subnet routes for a device.
|
||||
|
||||
```jsonc
|
||||
{
|
||||
"advertisedRoutes": ["10.0.0.0/16", "192.168.1.0/24"],
|
||||
"enabledRoutes": []
|
||||
}
|
||||
```
|
||||
|
||||
## Set device routes
|
||||
|
||||
```http
|
||||
POST /api/v2/device/{deviceID}/routes
|
||||
```
|
||||
|
||||
Sets a device's enabled [subnet routes](#subnet-routes) by replacing the existing list of subnet routes with the supplied parameters.
|
||||
Advertised routes cannot be set through the API, since they must be set directly on the device.
|
||||
|
||||
### Parameters
|
||||
|
||||
#### `deviceid` (required in URL path)
|
||||
|
||||
The ID of the device.
|
||||
|
||||
#### `routes` (required in `POST` body)
|
||||
|
||||
The new list of enabled subnet routes.
|
||||
|
||||
```jsonc
|
||||
{
|
||||
"routes": ["10.0.0.0/16", "192.168.1.0/24"]
|
||||
}
|
||||
```
|
||||
|
||||
### Request example
|
||||
|
||||
```sh
|
||||
curl "https://api.tailscale.com/api/v2/device/11055/routes" \
|
||||
-u "tskey-api-xxxxx:" \
|
||||
-H "Content-Type: application/json" \
|
||||
--data-binary '{"routes": ["10.0.0.0/16", "192.168.1.0/24"]}'
|
||||
```
|
||||
|
||||
### Response
|
||||
|
||||
Returns the enabled and advertised subnet routes for a device.
|
||||
|
||||
```jsonc
|
||||
{
|
||||
"advertisedRoutes": ["10.0.0.0/16", "192.168.1.0/24"],
|
||||
"enabledRoutes": ["10.0.0.0/16", "192.168.1.0/24"]
|
||||
}
|
||||
```
|
||||
|
||||
## Authorize
|
||||
|
||||
## Authorize device
|
||||
|
||||
```http
|
||||
POST /api/v2/device/{deviceID}/authorized
|
||||
```
|
||||
|
||||
Authorize a device.
|
||||
This call marks a device as authorized or revokes its authorization for tailnets where device authorization is required, according to the `authorized` field in the payload.
|
||||
|
||||
This returns a successful 2xx response with an empty JSON object in the response body.
|
||||
|
||||
### Parameters
|
||||
|
||||
#### `deviceid` (required in URL path)
|
||||
|
||||
The ID of the device.
|
||||
|
||||
#### `authorized` (required in `POST` body)
|
||||
|
||||
Specify whether the device is authorized. False to deauthorize an authorized device, and true to authorize a new device or to re-authorize a previously deauthorized device.
|
||||
|
||||
```jsonc
|
||||
{
|
||||
"authorized": true
|
||||
}
|
||||
```
|
||||
|
||||
### Request example
|
||||
|
||||
```sh
|
||||
curl "https://api.tailscale.com/api/v2/device/11055/authorized" \
|
||||
-u "tskey-api-xxxxx:" \
|
||||
-H "Content-Type: application/json" \
|
||||
--data-binary '{"authorized": true}'
|
||||
```
|
||||
|
||||
### Response
|
||||
|
||||
The response is 2xx on success. The response body is currently an empty JSON object.
|
||||
|
||||
## Tags
|
||||
|
||||
## Update device tags
|
||||
|
||||
```http
|
||||
POST /api/v2/device/{deviceID}/tags
|
||||
```
|
||||
|
||||
Update the tags set on a device.
|
||||
Tags let you assign an identity to a device that is separate from human users, and use that identity as part of an ACL to restrict access.
|
||||
Tags are similar to role accounts, but more flexible.
|
||||
|
||||
Tags are created in the tailnet policy file by defining the tag and an owner of the tag.
|
||||
Once a device is tagged, the tag is the owner of that device.
|
||||
A single node can have multiple tags assigned.
|
||||
|
||||
Consult the policy file for your tailnet in the [admin console](https://login.tailscale.com/admin/acls) for the list of tags that have been created for your tailnet.
|
||||
Learn more about [tags](https://tailscale.com/kb/1068/).
|
||||
|
||||
This returns a 2xx code if successful, with an empty JSON object in the response body.
|
||||
|
||||
### Parameters
|
||||
|
||||
#### `deviceid` (required in URL path)
|
||||
|
||||
The ID of the device.
|
||||
|
||||
#### `tags` (required in `POST` body)
|
||||
|
||||
The new list of tags for the device.
|
||||
|
||||
```jsonc
|
||||
{
|
||||
"tags": ["tag:foo", "tag:bar"]
|
||||
}
|
||||
```
|
||||
|
||||
### Request example
|
||||
|
||||
```sh
|
||||
curl "https://api.tailscale.com/api/v2/device/11055/tags" \
|
||||
-u "tskey-api-xxxxx:" \
|
||||
-H "Content-Type: application/json" \
|
||||
--data-binary '{"tags": ["tag:foo", "tag:bar"]}'
|
||||
```
|
||||
|
||||
### Response
|
||||
|
||||
The response is 2xx on success. The response body is currently an empty JSON object.
|
||||
|
||||
If the tags supplied in the `POST` call do not exist in the tailnet policy file, the response is '400 Bad Request':
|
||||
|
||||
```jsonc
|
||||
{
|
||||
"message": "requested tags [tag:madeup tag:wrongexample] are invalid or not permitted"
|
||||
}
|
||||
```
|
||||
|
||||
## Keys
|
||||
|
||||
## Update device key
|
||||
|
||||
```http
|
||||
POST /api/v2/device/{deviceID}/key
|
||||
```
|
||||
|
||||
Update properties of the device key.
|
||||
|
||||
### Parameters
|
||||
|
||||
#### `deviceid` (required in URL path)
|
||||
|
||||
The ID of the device.
|
||||
|
||||
#### `keyExpiryDisabled` (optional in `POST` body)
|
||||
|
||||
Disable or enable the expiry of the device's node key.
|
||||
|
||||
When a device is added to a tailnet, its key expiry is set according to the tailnet's [key expiry](https://tailscale.com/kb/1028/) setting.
|
||||
If the key is not refreshed and expires, the device can no longer communicate with other devices in the tailnet.
|
||||
|
||||
Set `"keyExpiryDisabled": true` to disable key expiry for the device and allow it to rejoin the tailnet (for example to access an accidentally expired device).
|
||||
You can then call this method again with `"keyExpiryDisabled": false` to re-enable expiry.
|
||||
|
||||
```jsonc
|
||||
{
|
||||
"keyExpiryDisabled": true
|
||||
}
|
||||
```
|
||||
|
||||
- If `true`, disable the device's key expiry.
|
||||
The original key expiry time is still maintained.
|
||||
Upon re-enabling, the key will expire at that original time.
|
||||
- If `false`, enable the device's key expiry.
|
||||
Sets the key to expire at the original expiry time prior to disabling.
|
||||
The key may already have expired. In that case, the device must be re-authenticated.
|
||||
- Empty value will not change the key expiry.
|
||||
|
||||
This returns a 2xx code on success, with an empty JSON object in the response body.
|
||||
|
||||
### Request example
|
||||
|
||||
```sh
|
||||
curl "https://api.tailscale.com/api/v2/device/11055/key" \
|
||||
-u "tskey-api-xxxxx:" \
|
||||
-H "Content-Type: application/json" \
|
||||
--data-binary '{"keyExpiryDisabled": true}'
|
||||
```
|
||||
|
||||
### Response
|
||||
|
||||
The response is 2xx on success. The response body is currently an empty JSON object.
|
||||
|
||||
## IP Addresses
|
||||
|
||||
## Set device IPv4 address
|
||||
|
||||
```http
|
||||
POST /api/v2/device/{deviceID}/ip
|
||||
```
|
||||
|
||||
Set the Tailscale IPv4 address of the device.
|
||||
|
||||
### Parameters
|
||||
|
||||
#### `deviceid` (required in URL path)
|
||||
|
||||
The ID of the device.
|
||||
|
||||
#### `ipv4` (optional in `POST` body)
|
||||
|
||||
Provide a new IPv4 address for the device.
|
||||
|
||||
When a device is added to a tailnet, its Tailscale IPv4 address is set at random either from the CGNAT range, or a subset of the CGNAT range specified by an [ip pool](https://tailscale.com/kb/1304/ip-pool).
|
||||
This endpoint can be used to replace the existing IPv4 address with a specific value.
|
||||
|
||||
```jsonc
|
||||
{
|
||||
"ipv4": "100.80.0.1"
|
||||
}
|
||||
```
|
||||
|
||||
This action will break any existing connections to this machine.
|
||||
You will need to reconnect to this machine using the new IP address.
|
||||
You may also need to flush your DNS cache.
|
||||
|
||||
This returns a 2xx code on success, with an empty JSON object in the response body.
|
||||
|
||||
### Request example
|
||||
|
||||
```sh
|
||||
curl "https://api.tailscale.com/api/v2/device/11055/ip" \
|
||||
-u "tskey-api-xxxxx:" \
|
||||
-H "Content-Type: application/json" \
|
||||
--data-binary '{"ipv4": "100.80.0.1"}'
|
||||
```
|
||||
|
||||
### Response
|
||||
|
||||
The response is 2xx on success. The response body is currently an empty JSON object.
|
||||
|
||||
## Device posture attributes
|
||||
|
||||
## Get device posture attributes
|
||||
|
||||
The posture attributes API endpoints can be called with OAuth access tokens with
|
||||
an `acl` or `devices` [scope](https://tailscale.com/kb/1215/oauth-clients#scopes), or personal access belonging to
|
||||
[user roles](https://tailscale.com/kb/1138/user-roles) Owners, Admins, Network Admins, or IT Admins.
|
||||
|
||||
```
|
||||
GET /api/v2/device/{deviceID}/attributes
|
||||
```
|
||||
|
||||
Retrieve all posture attributes for the specified device. This returns a JSON object of all the key-value pairs of posture attributes for the device.
|
||||
|
||||
### Parameters
|
||||
|
||||
#### `deviceID` (required in URL path)
|
||||
|
||||
The ID of the device to fetch posture attributes for.
|
||||
|
||||
### Request example
|
||||
|
||||
```
|
||||
curl "https://api.tailscale.com/api/v2/device/11055/attributes" \
|
||||
-u "tskey-api-xxxxx:"
|
||||
```
|
||||
|
||||
### Response
|
||||
|
||||
The response is 200 on success. The response body is a JSON object containing all the posture attributes assigned to the node. Attribute values can be strings, numbers or booleans.
|
||||
|
||||
```json
|
||||
{
|
||||
"attributes": {
|
||||
"custom:myScore": 87,
|
||||
"custom:diskEncryption": true,
|
||||
"custom:myAttribute": "my_value",
|
||||
"node:os": "linux",
|
||||
"node:osVersion": "5.19.0-42-generic",
|
||||
"node:tsReleaseTrack": "stable",
|
||||
"node:tsVersion": "1.40.0",
|
||||
"node:tsAutoUpdate": false
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Set custom device posture attributes
|
||||
|
||||
```
|
||||
POST /api/v2/device/{deviceID}/attributes/{attributeKey}
|
||||
```
|
||||
|
||||
Create or update a custom posture attribute on the specified device. User-managed attributes must be in the `custom` namespace, which is indicated by prefixing the attribute key with `custom:`.
|
||||
|
||||
Custom device posture attributes are available for the Personal and Enterprise plans.
|
||||
|
||||
### Parameters
|
||||
|
||||
#### `deviceID` (required in URL path)
|
||||
|
||||
The ID of the device on which to set the custom posture attribute.
|
||||
|
||||
#### `attributeKey` (required in URL path)
|
||||
|
||||
The name of the posture attribute to set. This must be prefixed with `custom:`.
|
||||
|
||||
Keys have a maximum length of 50 characters including the namespace, and can only contain letters, numbers, underscores, and colon.
|
||||
|
||||
Keys are case-sensitive. Keys must be unique, but are checked for uniqueness in a case-insensitive manner. For example, `custom:MyAttribute` and `custom:myattribute` cannot both be set within a single tailnet.
|
||||
|
||||
All values for a given key need to be of the same type, which is determined when the first value is written for a given key. For example, `custom:myattribute` cannot have a numeric value (`87`) for one node and a string value (`"78"`) for another node within the same tailnet.
|
||||
|
||||
### Posture attribute `value` (required in POST body)
|
||||
|
||||
```json
|
||||
{
|
||||
"value": "foo"
|
||||
}
|
||||
```
|
||||
|
||||
A value can be either a string, number or boolean.
|
||||
|
||||
A string value can have a maximum length of 50 characters, and can only contain letters, numbers, underscores, and periods.
|
||||
|
||||
A number value is an integer and must be a JSON safe number (up to 2^53 - 1).
|
||||
|
||||
### Request example
|
||||
|
||||
```
|
||||
curl "https://api.tailscale.com/api/v2/device/11055/attributes/custom:my_attribute" \
|
||||
-u "tskey-api-xxxxx:" \
|
||||
-H "Content-Type: application/json" \
|
||||
--data-binary '{"value": "my_value"}'
|
||||
```
|
||||
|
||||
### Response
|
||||
|
||||
The response is 2xx on success. The response body is currently an empty JSON object.
|
||||
|
||||
## Delete custom device posture attributes
|
||||
|
||||
```
|
||||
DELETE /api/v2/device/{deviceID}/attributes/{attributeKey}
|
||||
```
|
||||
|
||||
Delete a posture attribute from the specified device. This is only applicable to user-managed posture attributes in the `custom` namespace, which is indicated by prefixing the attribute key with `custom:`.
|
||||
|
||||
<PricingPlanNote feature="Custom device posture attributes" verb="are" plan="the Personal and Enterprise plans" />
|
||||
|
||||
### Parameters
|
||||
|
||||
#### `deviceID` (required in URL path)
|
||||
|
||||
The ID of the device from which to delete the posture attribute.
|
||||
|
||||
#### `attributeKey` (required in URL path)
|
||||
|
||||
The name of the posture attribute to delete. This must be prefixed with `custom:`.
|
||||
|
||||
Keys have a maximum length of 50 characters including the namespace, and can only contain letters, numbers, underscores, and a delimiting colon.
|
||||
|
||||
### Request example
|
||||
|
||||
```
|
||||
curl -X DELETE "https://api.tailscale.com/api/v2/device/11055/attributes/custom:my_attribute" \
|
||||
-u "tskey-api-xxxxx:"
|
||||
```
|
||||
|
||||
### Response
|
||||
|
||||
The response is 2xx on success. The response body is currently an empty JSON object.
|
||||
|
||||
## Invites to a device
|
||||
|
||||
The device sharing invite methods let you create and list [invites to share a device](https://tailscale.com/kb/1084/sharing).
|
||||
|
||||
## List device invites
|
||||
|
||||
```http
|
||||
GET /api/v2/device/{deviceID}/device-invites
|
||||
```
|
||||
|
||||
List all share invites for a device.
|
||||
|
||||
### Parameters
|
||||
|
||||
#### `deviceID` (required in URL path)
|
||||
|
||||
The ID of the device.
|
||||
|
||||
### Request example
|
||||
|
||||
```sh
|
||||
curl -X GET "https://api.tailscale.com/api/v2/device/11055/device-invites" \
|
||||
-u "tskey-api-xxxxx:"
|
||||
```
|
||||
|
||||
### Response
|
||||
|
||||
```jsonc
|
||||
[
|
||||
{
|
||||
"id": "12345",
|
||||
"created": "2024-05-08T20:19:51.777861756Z",
|
||||
"tailnetId": 59954,
|
||||
"deviceId": 11055,
|
||||
"sharerId": 22011,
|
||||
"allowExitNode": true,
|
||||
"email": "user@example.com",
|
||||
"lastEmailSentAt": "2024-05-08T20:19:51.777861756Z",
|
||||
"inviteUrl": "https://login.tailscale.com/admin/invite/<code>",
|
||||
"accepted": false
|
||||
},
|
||||
{
|
||||
"id": "12346",
|
||||
"created": "2024-04-03T21:38:49.333829261Z",
|
||||
"tailnetId": 59954,
|
||||
"deviceId": 11055,
|
||||
"sharerId": 22012,
|
||||
"inviteUrl": "https://login.tailscale.com/admin/invite/<code>",
|
||||
"accepted": true,
|
||||
"acceptedBy": {
|
||||
"id": 33223,
|
||||
"loginName": "someone@example.com",
|
||||
"profilePicUrl": ""
|
||||
}
|
||||
}
|
||||
]
|
||||
```
|
||||
|
||||
## Create device invites
|
||||
|
||||
```http
|
||||
POST /api/v2/device/{deviceID}/device-invites
|
||||
```
|
||||
|
||||
Create new share invites for a device.
|
||||
|
||||
### Parameters
|
||||
|
||||
#### `deviceID` (required in URL path)
|
||||
|
||||
The ID of the device.
|
||||
|
||||
#### List of invite requests (required in `POST` body)
|
||||
|
||||
Each invite request is an object with the following optional fields:
|
||||
|
||||
- **`multiUse`:** (Optional) Specify whether the invite can be accepted more than once. When set to `true`, it results in an invite that can be accepted up to 1,000 times.
|
||||
- **`allowExitNode`:** (Optional) Specify whether the invited user can use the device as an exit node when it advertises as one.
|
||||
- **`email`:** (Optional) Specify the email to send the created invite. If not set, the endpoint generates and returns an invite URL (but doesn't send it out).
|
||||
|
||||
### Request example
|
||||
|
||||
```sh
|
||||
curl -X POST "https://api.tailscale.com/api/v2/device/11055/device-invites" \
|
||||
-u "tskey-api-xxxxx:" \
|
||||
-H "Content-Type: application/json" \
|
||||
--data-binary '[{"multiUse": true, "allowExitNode": true, "email":"user@example.com"}]'
|
||||
```
|
||||
|
||||
### Response
|
||||
|
||||
```jsonc
|
||||
[
|
||||
{
|
||||
"id": "12347",
|
||||
"created": "2024-05-08T20:29:45.842358533Z",
|
||||
"tailnetId": 59954,
|
||||
"deviceId": 11055,
|
||||
"sharerId": 22012,
|
||||
"multiUse": true,
|
||||
"allowExitNode": true,
|
||||
"email": "user@example.com",
|
||||
"lastEmailSentAt": "2024-05-08T20:29:45.842358533Z",
|
||||
"inviteUrl": "https://login.tailscale.com/admin/invite/<code>",
|
||||
"accepted": false
|
||||
}
|
||||
]
|
||||
```
|
||||
221
publicapi/deviceinvites.md
Normal file
221
publicapi/deviceinvites.md
Normal file
@@ -0,0 +1,221 @@
|
||||
# Device invites
|
||||
|
||||
A device invite is an invitation that shares a device with an external user (a user not in the device's tailnet).
|
||||
|
||||
Each device invite has a unique ID that is used to identify the invite in API calls.
|
||||
You can find all device invite IDs for a particular device by [listing all device invites for a device](#list-device-invites).
|
||||
|
||||
### Attributes
|
||||
|
||||
```jsonc
|
||||
{
|
||||
// id (strings) is the unique identifier for the invite.
|
||||
// Supply this value wherever {deviceInviteId} is indicated in the endpoint.
|
||||
"id": "12346",
|
||||
|
||||
// created is the creation time of the invite.
|
||||
"created": "2024-04-03T21:38:49.333829261Z",
|
||||
|
||||
// tailnetId is the ID of the tailnet to which the shared device belongs.
|
||||
"tailnetId": 59954,
|
||||
|
||||
// deviceId is the ID of the device being shared.
|
||||
"deviceId": 11055,
|
||||
|
||||
// sharerId is the ID of the user who created the share invite.
|
||||
"sharerId": 22012,
|
||||
|
||||
// multiUse specifies whether this device invite can be accepted more than
|
||||
// once.
|
||||
"multiUse": false,
|
||||
|
||||
// allowExitNode specifies whether the invited user is able to use the
|
||||
// device as an exit node when the device is advertising as one.
|
||||
"allowExitNode": true,
|
||||
|
||||
// email is the email to which the invite was sent.
|
||||
// If empty, the invite was not emailed to anyone, but the inviteUrl can be
|
||||
// shared manually.
|
||||
"email": "user@example.com",
|
||||
|
||||
// lastEmailSentAt is the last time the invite was attempted to be sent to
|
||||
// Email. Only ever set if Email is not empty.
|
||||
"lastEmailSentAt": "2024-04-03T21:38:49.333829261Z",
|
||||
|
||||
// inviteUrl is the link to accept the invite.
|
||||
// Anyone with this link can accept the invite.
|
||||
// It is not restricted to the person to which the invite was emailed.
|
||||
"inviteUrl": "https://login.tailscale.com/admin/invite/<code>",
|
||||
|
||||
// accepted is true when share invite has been accepted.
|
||||
"accepted": true,
|
||||
|
||||
// acceptedBy is set when the invite has been accepted.
|
||||
// It holds information about the user who accepted the share invite.
|
||||
"acceptedBy": {
|
||||
// id is the ID of the user who accepted the share invite.
|
||||
"id": 33223,
|
||||
|
||||
// loginName is the login name of the user who accepted the share invite.
|
||||
"loginName": "someone@example.com",
|
||||
|
||||
// profilePicUrl is optionally the profile pic URL for the user who accepted
|
||||
// the share invite.
|
||||
"profilePicUrl": ""
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
# API
|
||||
|
||||
**[Device invites](#device-invites)**
|
||||
|
||||
- Get device invite: [`GET /api/v2/device-invites/{deviceInviteId}`](#get-device-invite)
|
||||
- Delete device invite: [`DELETE /api/v2/device-invites/{deviceInviteId}`](#delete-device-invite)
|
||||
- Resend device invite (by email): [`POST /api/v2/device-invites/{deviceInviteId}/resend`](#resend-device-invite)
|
||||
- Accept device invite [`POST /api/v2/device-invites/-/accept`](#accept-device-invite)
|
||||
|
||||
## Get device invite
|
||||
|
||||
```http
|
||||
GET /api/v2/device-invites/{deviceInviteId}
|
||||
```
|
||||
|
||||
Retrieve the specified device invite.
|
||||
|
||||
### Parameters
|
||||
|
||||
#### `deviceInviteId` (required in URL path)
|
||||
|
||||
The ID of the device share invite.
|
||||
|
||||
### Request example
|
||||
|
||||
```sh
|
||||
curl "https://api.tailscale.com/api/v2/device-invites/12346" \
|
||||
-u "tskey-api-xxxxx:"
|
||||
```
|
||||
|
||||
### Response
|
||||
|
||||
```jsonc
|
||||
{
|
||||
"id": "12346",
|
||||
"created": "2024-04-03T21:38:49.333829261Z",
|
||||
"tailnetId": 59954,
|
||||
"deviceId": 11055,
|
||||
"sharerId": 22012,
|
||||
"multiUse": true,
|
||||
"allowExitNode": true,
|
||||
"email": "user@example.com",
|
||||
"lastEmailSentAt": "2024-04-03T21:38:49.333829261Z",
|
||||
"inviteUrl": "https://login.tailscale.com/admin/invite/<code>",
|
||||
"accepted": false
|
||||
}
|
||||
```
|
||||
|
||||
## Delete device invite
|
||||
|
||||
```http
|
||||
DELETE /api/v2/device-invites/{deviceInviteId}
|
||||
```
|
||||
|
||||
Delete the specified device invite.
|
||||
|
||||
### Parameters
|
||||
|
||||
#### `deviceInviteId` (required in URL path)
|
||||
|
||||
The ID of the device share invite.
|
||||
|
||||
### Request example
|
||||
|
||||
```sh
|
||||
curl -X DELETE "https://api.tailscale.com/api/v2/device-invites/12346" \
|
||||
-u "tskey-api-xxxxx:"
|
||||
```
|
||||
|
||||
### Response
|
||||
|
||||
The response is 2xx on success. The response body is an empty JSON object.
|
||||
|
||||
## Resend device invite
|
||||
|
||||
```http
|
||||
POST /api/v2/device-invites/{deviceInviteId}/resend
|
||||
```
|
||||
|
||||
Resend the specified device invite by email. You can only use this if the specified invite was originally created with an email specified. Refer to [creating device invites for a device](#create-device-invites).
|
||||
|
||||
Note: Invite resends are rate limited to one per minute.
|
||||
|
||||
### Parameters
|
||||
|
||||
#### `deviceInviteId` (required in URL path)
|
||||
|
||||
The ID of the device share invite.
|
||||
|
||||
### Request example
|
||||
|
||||
```sh
|
||||
curl -X POST "https://api.tailscale.com/api/v2/device-invites/12346/resend" \
|
||||
-u "tskey-api-xxxxx:"
|
||||
```
|
||||
|
||||
### Response
|
||||
|
||||
The response is 2xx on success. The response body is an empty JSON object.
|
||||
|
||||
## Accept device invite
|
||||
|
||||
```http
|
||||
POST /api/v2/device-invites/-/accept
|
||||
```
|
||||
|
||||
Resend the specified device invite by email. This can only be used if the specified invite was originally created with an email specified.
|
||||
See [creating device invites for a device](#create-device-invites).
|
||||
|
||||
Note that invite resends are rate limited to once per minute.
|
||||
|
||||
### Parameters
|
||||
|
||||
#### `invite` (required in `POST` body)
|
||||
|
||||
The URL of the invite (in the form "https://login.tailscale.com/admin/invite/{code}") or the "{code}" component of the URL.
|
||||
|
||||
### Request example
|
||||
|
||||
```sh
|
||||
curl -X POST "https://api.tailscale.com/api/v2/device-invites/-/accept" \
|
||||
-u "tskey-api-xxxxx:" \
|
||||
-H "Content-Type: application/json" \
|
||||
--data-binary '[{"invite": "https://login.tailscale.com/admin/invite/xxxxxx"}]'
|
||||
```
|
||||
|
||||
### Response
|
||||
|
||||
```jsonc
|
||||
{
|
||||
"device": {
|
||||
"id": "11055",
|
||||
"os": "iOS",
|
||||
"name": "my-phone",
|
||||
"fqdn": "my-phone.something.ts.net",
|
||||
"ipv4": "100.x.y.z",
|
||||
"ipv6": "fd7a:115c:x::y:z",
|
||||
"includeExitNode": false
|
||||
},
|
||||
"sharer": {
|
||||
"id": "22012",
|
||||
"displayName": "Some User",
|
||||
"loginName": "someuser@example.com",
|
||||
"profilePicURL": ""
|
||||
},
|
||||
"acceptedBy": {
|
||||
"id": "33233",
|
||||
"displayName": "Another User",
|
||||
"loginName": "anotheruser@exmaple2.com",
|
||||
"profilePicURL": ""
|
||||
}
|
||||
}
|
||||
```
|
||||
118
publicapi/readme.md
Normal file
118
publicapi/readme.md
Normal file
@@ -0,0 +1,118 @@
|
||||
# Tailscale API
|
||||
|
||||
The Tailscale API is a (mostly) RESTful API. Typically, both `POST` bodies and responses are JSON-encoded.
|
||||
|
||||
## Base URL
|
||||
|
||||
The base URL for the Tailscale API is `https://api.tailscale.com/api/v2/`.
|
||||
|
||||
Examples in this document may abbreviate this to `/api/v2/`.
|
||||
|
||||
## Authentication
|
||||
|
||||
Requests to the Tailscale API are authenticated with an API access token (sometimes called an API key).
|
||||
Access tokens can be supplied as the username portion of HTTP Basic authentication (leave the password blank) or as an OAuth Bearer token:
|
||||
|
||||
```sh
|
||||
# passing token with basic auth
|
||||
curl -u "tskey-api-xxxxx:" https://api.tailscale.com/api/v2/...
|
||||
|
||||
# passing token as bearer token
|
||||
curl -H "Authorization: Bearer tskey-api-xxxxx" https://api.tailscale.com/api/v2/...
|
||||
```
|
||||
|
||||
Access tokens for individual users can be created and managed from the [**Keys**](https://login.tailscale.com/admin/settings/keys) page of the admin console.
|
||||
These tokens will have the same permissions as the owning user, and can be set to expire in 1 to 90 days.
|
||||
Access tokens are identifiable by the prefix `tskey-api-`.
|
||||
|
||||
Alternatively, an OAuth client can be used to create short-lived access tokens with scoped permission.
|
||||
OAuth clients don't expire, and can therefore be used to provide ongoing access to the API, creating access tokens as needed.
|
||||
OAuth clients and the access tokens they create are not tied to an individual Tailscale user.
|
||||
OAuth client secrets are identifiable by the prefix `tskey-client-`.
|
||||
Learn more about [OAuth clients](https://tailscale.com/kb/1215/).
|
||||
|
||||
## Errors
|
||||
|
||||
The Tailscale API returns status codes consistent with [standard HTTP conventions](https://developer.mozilla.org/en-US/docs/Web/HTTP/Status).
|
||||
In addition to the status code, errors may include additional information in the response body:
|
||||
|
||||
```jsonc
|
||||
{
|
||||
"message": "additional error information"
|
||||
}
|
||||
```
|
||||
|
||||
## Pagination
|
||||
|
||||
The Tailscale API does not currently support pagination. All results are returned at once.
|
||||
|
||||
# APIs
|
||||
|
||||
**[Device](./device.md#device)**
|
||||
|
||||
- Get a device: [`GET /api/v2/device/{deviceid}`](./device.md#get-device)
|
||||
- Delete a device: [`DELETE /api/v2/device/{deviceID}`](./device.md#delete-device)
|
||||
- Expire device key: [`POST /api/v2/device/{deviceID}/expire`](./device.md#expire-device-key)
|
||||
- [**Routes**](./device.md#routes)
|
||||
- Get device routes: [`GET /api/v2/device/{deviceID}/routes`](./device.md#get-device-routes)
|
||||
- Set device routes: [`POST /api/v2/device/{deviceID}/routes`](./device.md#set-device-routes)
|
||||
- [**Authorize**](./device.md#authorize)
|
||||
- Authorize a device: [`POST /api/v2/device/{deviceID}/authorized`](./device.md#authorize-device)
|
||||
- [**Tags**](./device.md#tags)
|
||||
- Update tags: [`POST /api/v2/device/{deviceID}/tags`](./device.md#update-device-tags)
|
||||
- [**Keys**](./device.md#keys)
|
||||
- Update device key: [`POST /api/v2/device/{deviceID}/key`](./device.md#update-device-key)
|
||||
- [**IP Addresses**](./device.md#ip-addresses)
|
||||
- Set device IPv4 address: [`POST /api/v2/device/{deviceID}/ip`](./device.md#set-device-ipv4-address)
|
||||
- [**Device posture attributes**](./device.md#device-posture-attributes)
|
||||
- Get device posture attributes: [`GET /api/v2/device/{deviceID}/attributes`](./device.md#get-device-posture-attributes)
|
||||
- Set custom device posture attributes: [`POST /api/v2/device/{deviceID}/attributes/{attributeKey}`](./device.md#set-device-posture-attributes)
|
||||
- Delete custom device posture attributes: [`DELETE /api/v2/device/{deviceID}/attributes/{attributeKey}`](./device.md#delete-custom-device-posture-attributes)
|
||||
- [**Device invites**](./device.md#invites-to-a-device)
|
||||
- List device invites: [`GET /api/v2/device/{deviceID}/device-invites`](./device.md#list-device-invites)
|
||||
- Create device invites: [`POST /api/v2/device/{deviceID}/device-invites`](./device.md#create-device-invites)
|
||||
|
||||
**[Tailnet](./tailnet.md#tailnet)**
|
||||
|
||||
- [**Policy File**](./tailnet.md#policy-file)
|
||||
- Get policy file: [`GET /api/v2/tailnet/{tailnet}/acl`](./tailnet.md#get-policy-file)
|
||||
- Update policy file: [`POST /api/v2/tailnet/{tailnet}/acl`](./tailnet.md#update-policy-file)
|
||||
- Preview rule matches: [`POST /api/v2/tailnet/{tailnet}/acl/preview`](./tailnet.md#preview-policy-file-rule-matches)
|
||||
- Validate and test policy file: [`POST /api/v2/tailnet/{tailnet}/acl/validate`](./tailnet.md#validate-and-test-policy-file)
|
||||
- [**Devices**](./tailnet.md#devices)
|
||||
- List tailnet devices: [`GET /api/v2/tailnet/{tailnet}/devices`](./tailnet.md#list-tailnet-devices)
|
||||
- [**Keys**](./tailnet.md#tailnet-keys)
|
||||
- List tailnet keys: [`GET /api/v2/tailnet/{tailnet}/keys`](./tailnet.md#list-tailnet-keys)
|
||||
- Create an auth key: [`POST /api/v2/tailnet/{tailnet}/keys`](./tailnet.md#create-auth-key)
|
||||
- Get a key: [`GET /api/v2/tailnet/{tailnet}/keys/{keyid}`](./tailnet.md#get-key)
|
||||
- Delete a key: [`DELETE /api/v2/tailnet/{tailnet}/keys/{keyid}`](./tailnet.md#delete-key)
|
||||
- [**DNS**](./tailnet.md#dns)
|
||||
- [**Nameservers**](./tailnet.md#nameservers)
|
||||
- Get nameservers: [`GET /api/v2/tailnet/{tailnet}/dns/nameservers`](./tailnet.md#get-nameservers)
|
||||
- Set nameservers: [`POST /api/v2/tailnet/{tailnet}/dns/nameservers`](./tailnet.md#set-nameservers)
|
||||
- [**Preferences**](./tailnet.md#preferences)
|
||||
- Get DNS preferences: [`GET /api/v2/tailnet/{tailnet}/dns/preferences`](./tailnet.md#get-dns-preferences)
|
||||
- Set DNS preferences: [`POST /api/v2/tailnet/{tailnet}/dns/preferences`](./tailnet.md#set-dns-preferences)
|
||||
- [**Search Paths**](./tailnet.md#search-paths)
|
||||
- Get search paths: [`GET /api/v2/tailnet/{tailnet}/dns/searchpaths`](./tailnet.md#get-search-paths)
|
||||
- Set search paths: [`POST /api/v2/tailnet/{tailnet}/dns/searchpaths`](./tailnet.md#set-search-paths)
|
||||
- [**Split DNS**](./tailnet.md#split-dns)
|
||||
- Get split DNS: [`GET /api/v2/tailnet/{tailnet}/dns/split-dns`](./tailnet.md#get-split-dns)
|
||||
- Update split DNS: [`PATCH /api/v2/tailnet/{tailnet}/dns/split-dns`](./tailnet.md#update-split-dns)
|
||||
- Set split DNS: [`PUT /api/v2/tailnet/{tailnet}/dns/split-dns`](./tailnet.md#set-split-dns)
|
||||
- [**User invites**](./tailnet.md#tailnet-user-invites)
|
||||
- List user invites: [`GET /api/v2/tailnet/{tailnet}/user-invites`](./tailnet.md#list-user-invites)
|
||||
- Create user invites: [`POST /api/v2/tailnet/{tailnet}/user-invites`](./tailnet.md#create-user-invites)
|
||||
|
||||
**[User invites](./userinvites.md#user-invites)**
|
||||
|
||||
- Get user invite: [`GET /api/v2/user-invites/{userInviteId}`](./userinvites.md#get-user-invite)
|
||||
- Delete user invite: [`DELETE /api/v2/user-invites/{userInviteId}`](./userinvites.md#delete-user-invite)
|
||||
- Resend user invite (by email): [`POST /api/v2/user-invites/{userInviteId}/resend`](#resend-user-invite)
|
||||
|
||||
**[Device invites](./deviceinvites.md#device-invites)**
|
||||
|
||||
- Get device invite: [`GET /api/v2/device-invites/{deviceInviteId}`](./deviceinvites.md#get-device-invite)
|
||||
- Delete device invite: [`DELETE /api/v2/device-invites/{deviceInviteId}`](./deviceinvites.md#delete-device-invite)
|
||||
- Resend device invite (by email): [`POST /api/v2/device-invites/{deviceInviteId}/resend`](./deviceinvites.md#resend-device-invite)
|
||||
- Accept device invite [`POST /api/v2/device-invites/-/accept`](#accept-device-invite)
|
||||
1389
publicapi/tailnet.md
Normal file
1389
publicapi/tailnet.md
Normal file
File diff suppressed because it is too large
Load Diff
144
publicapi/userinvites.md
Normal file
144
publicapi/userinvites.md
Normal file
@@ -0,0 +1,144 @@
|
||||
# User invites
|
||||
|
||||
A user invite is an active invitation that lets a user join a tailnet with a pre-assigned [user role](https://tailscale.com/kb/1138/user-roles).
|
||||
|
||||
Each user invite has a unique ID that is used to identify the invite in API calls.
|
||||
You can find all user invite IDs for a particular tailnet by [listing user invites](#list-user-invites).
|
||||
|
||||
### Attributes
|
||||
|
||||
```jsonc
|
||||
{
|
||||
// id (string) is the unique identifier for the invite.
|
||||
// Supply this value wherever {userInviteId} is indicated in the endpoint.
|
||||
"id": "12346",
|
||||
|
||||
// role is the tailnet user role to assign to the invited user upon accepting
|
||||
// the invite. Value options are "member", "admin", "it-admin", "network-admin",
|
||||
// "billing-admin", and "auditor".
|
||||
"role": "admin",
|
||||
|
||||
// tailnetId is the ID of the tailnet to which the user was invited.
|
||||
"tailnetId": 59954,
|
||||
|
||||
// inviterId is the ID of the user who created the invite.
|
||||
"inviterId": 22012,
|
||||
|
||||
// email is the email to which the invite was sent.
|
||||
// If empty, the invite was not emailed to anyone, but the inviteUrl can be
|
||||
// shared manually.
|
||||
"email": "user@example.com",
|
||||
|
||||
// lastEmailSentAt is the last time the invite was attempted to be sent to
|
||||
// Email. Only ever set if `email` is not empty.
|
||||
"lastEmailSentAt": "2024-04-03T21:38:49.333829261Z",
|
||||
|
||||
// inviteUrl is included when `email` is not part of the tailnet's domain,
|
||||
// or when `email` is empty. It is the link to accept the invite.
|
||||
//
|
||||
// When included, anyone with this link can accept the invite.
|
||||
// It is not restricted to the person to which the invite was emailed.
|
||||
//
|
||||
// When `email` is part of the tailnet's domain (has the same @domain.com
|
||||
// suffix as the tailnet), the user can join the tailnet automatically by
|
||||
// logging in with their domain email at https://login.tailscale.com/start.
|
||||
// They'll be assigned the specified `role` upon signing in for the first
|
||||
// time.
|
||||
"inviteUrl": "https://login.tailscale.com/admin/invite/<code>"
|
||||
}
|
||||
```
|
||||
|
||||
# API
|
||||
|
||||
**[User invites](#user-invites)**
|
||||
|
||||
- Get user invite: [`GET /api/v2/user-invites/{userInviteId}`](#get-user-invite)
|
||||
- Delete user invite: [`DELETE /api/v2/user-invites/{userInviteId}`](#delete-user-invite)
|
||||
- Resend user invite (by email): [`POST /api/v2/user-invites/{userInviteId}/resend`](#resend-user-invite)
|
||||
|
||||
## Get user invite
|
||||
|
||||
```http
|
||||
GET /api/v2/user-invites/{userInviteId}
|
||||
```
|
||||
|
||||
Retrieve the specified user invite.
|
||||
|
||||
### Parameters
|
||||
|
||||
#### `userInviteId` (required in URL path)
|
||||
|
||||
The ID of the user invite.
|
||||
|
||||
### Request example
|
||||
|
||||
```sh
|
||||
curl "https://api.tailscale.com/api/v2/user-invites/29214" \
|
||||
-u "tskey-api-xxxxx:"
|
||||
```
|
||||
|
||||
### Response
|
||||
|
||||
```jsonc
|
||||
{
|
||||
"id": "29214",
|
||||
"role": "admin",
|
||||
"tailnetId": 12345,
|
||||
"inviterId": 34567,
|
||||
"email": "user@example.com",
|
||||
"lastEmailSentAt": "2024-05-09T16:23:26.91778771Z",
|
||||
"inviteUrl": "https://login.tailscale.com/uinv/<code>"
|
||||
}
|
||||
```
|
||||
|
||||
## Delete user invite
|
||||
|
||||
```http
|
||||
DELETE /api/v2/user-invites/{userInviteId}
|
||||
```
|
||||
|
||||
Delete the specified user invite.
|
||||
|
||||
### Parameters
|
||||
|
||||
#### `userInviteId` (required in URL path)
|
||||
|
||||
The ID of the user invite.
|
||||
|
||||
### Request example
|
||||
|
||||
```sh
|
||||
curl -X DELETE "https://api.tailscale.com/api/v2/user-invites/29214" \
|
||||
-u "tskey-api-xxxxx:"
|
||||
```
|
||||
|
||||
### Response
|
||||
|
||||
The response is 2xx on success. The response body is an empty JSON object.
|
||||
|
||||
## Resend user invite
|
||||
|
||||
```http
|
||||
POST /api/v2/user-invites/{userInviteId}/resend
|
||||
```
|
||||
|
||||
Resend the specified user invite by email. You can only use this if the specified invite was originally created with an email specified. Refer to [creating user invites for a tailnet](#create-user-invites).
|
||||
|
||||
Note: Invite resends are rate limited to one per minute.
|
||||
|
||||
### Parameters
|
||||
|
||||
#### `userInviteId` (required in URL path)
|
||||
|
||||
The ID of the user invite.
|
||||
|
||||
### Request example
|
||||
|
||||
```sh
|
||||
curl -X POST "https://api.tailscale.com/api/v2/user-invites/29214/resend" \
|
||||
-u "tskey-api-xxxxx:"
|
||||
```
|
||||
|
||||
### Response
|
||||
|
||||
The response is 2xx on success. The response body is an empty JSON object.
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user