Compare commits
37 Commits
bradfitz/s
...
Xe/gitops-
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
2716667c9d | ||
|
|
affa25097c | ||
|
|
227c6b2a53 | ||
|
|
90ccba6730 | ||
|
|
f7a36dfeb1 | ||
|
|
9514ed33d2 | ||
|
|
1d33157ab9 | ||
|
|
3e06b9ea7a | ||
|
|
480fd6c797 | ||
|
|
41e60dae80 | ||
|
|
43f3a969ca | ||
|
|
d8cb5aae17 | ||
|
|
b763a12331 | ||
|
|
de2dcda2e0 | ||
|
|
b7f1fe7b0d | ||
|
|
9bd3b5b89c | ||
|
|
cfdb862673 | ||
|
|
6f5096fa61 | ||
|
|
2a22ea3e83 | ||
|
|
4d0461f721 | ||
|
|
393a229de9 | ||
|
|
165c8f898e | ||
|
|
2491fe1afe | ||
|
|
c1cb3efbba | ||
|
|
4c0feba38e | ||
|
|
3c892d106c | ||
|
|
bd4b27753e | ||
|
|
469c30c33b | ||
|
|
c6648db333 | ||
|
|
9fcda1f0a0 | ||
|
|
0d52674a84 | ||
|
|
931f18b575 | ||
|
|
4f1374ec9e | ||
|
|
af412e8874 | ||
|
|
004f0ca3e0 | ||
|
|
16c85d0dc5 | ||
|
|
7fb6781bda |
4
.github/workflows/cross-wasm.yml
vendored
4
.github/workflows/cross-wasm.yml
vendored
@@ -25,11 +25,11 @@ jobs:
|
||||
- name: Check out code into the Go module directory
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Wasm build CLI and client modules
|
||||
- name: Wasm client build
|
||||
env:
|
||||
GOOS: js
|
||||
GOARCH: wasm
|
||||
run: go build ./cmd/tailscale/cli ./ipn/... ./net/... ./safesocket ./types/... ./wgengine/...
|
||||
run: go build ./cmd/tsconnect/wasm
|
||||
|
||||
- uses: k0kubun/action-slack@v2.0.0
|
||||
with:
|
||||
|
||||
@@ -72,3 +72,4 @@ FROM alpine:3.16
|
||||
RUN apk add --no-cache ca-certificates iptables iproute2 ip6tables
|
||||
|
||||
COPY --from=build-env /go/bin/* /usr/local/bin/
|
||||
COPY --from=build-env /go/src/tailscale/docs/k8s/run.sh /usr/local/bin/
|
||||
|
||||
@@ -1 +1 @@
|
||||
1.27.0
|
||||
1.29.0
|
||||
|
||||
2
api.md
2
api.md
@@ -1120,7 +1120,7 @@ Replaces the list of searchpaths with the list supplied by the user and returns
|
||||
`searchPaths` - A list of searchpaths in JSON.
|
||||
```
|
||||
{
|
||||
"searchPaths: ["user1.example.com", "user2.example.com"]
|
||||
"searchPaths": ["user1.example.com", "user2.example.com"]
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
@@ -6,7 +6,6 @@ package main
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"expvar"
|
||||
"log"
|
||||
"net/http"
|
||||
@@ -45,8 +44,8 @@ func addWebSocketSupport(s *derp.Server, base http.Handler) http.Handler {
|
||||
return
|
||||
}
|
||||
counterWebSocketAccepts.Add(1)
|
||||
wc := websocket.NetConn(context.Background(), c, websocket.MessageBinary)
|
||||
wc := websocket.NetConn(r.Context(), c, websocket.MessageBinary)
|
||||
brw := bufio.NewReadWriter(bufio.NewReader(wc), bufio.NewWriter(wc))
|
||||
s.Accept(wc, brw, r.RemoteAddr)
|
||||
s.Accept(r.Context(), wc, brw, r.RemoteAddr)
|
||||
})
|
||||
}
|
||||
|
||||
1
cmd/gitops-pusher/.gitignore
vendored
Normal file
1
cmd/gitops-pusher/.gitignore
vendored
Normal file
@@ -0,0 +1 @@
|
||||
version-cache.json
|
||||
67
cmd/gitops-pusher/cache.go
Normal file
67
cmd/gitops-pusher/cache.go
Normal file
@@ -0,0 +1,67 @@
|
||||
// Copyright (c) 2022 Tailscale Inc & AUTHORS All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"os"
|
||||
)
|
||||
|
||||
// Cache contains cached information about the last time this tool was run.
|
||||
//
|
||||
// This is serialized to a JSON file that should NOT be checked into git.
|
||||
// It should be managed with either CI cache tools or stored locally somehow. The
|
||||
// exact mechanism is irrelevant as long as it is consistent.
|
||||
//
|
||||
// This allows gitops-pusher to detect external ACL changes. I'm not sure what to
|
||||
// call this problem, so I've been calling it the "three version problem" in my
|
||||
// notes. The basic problem is that at any given time we only have two versions
|
||||
// of the ACL file at any given point. In order to check if there has been
|
||||
// tampering of the ACL files in the admin panel, we need to have a _third_ version
|
||||
// to compare against.
|
||||
//
|
||||
// In this case I am not storing the old ACL entirely (though that could be a
|
||||
// reasonable thing to add in the future), but only its sha256sum. This allows
|
||||
// us to detect if the shasum in control matches the shasum we expect, and if that
|
||||
// expectation fails, then we can react accordingly.
|
||||
type Cache struct {
|
||||
PrevETag string // Stores the previous ETag of the ACL to allow
|
||||
}
|
||||
|
||||
// Save persists the cache to a given file.
|
||||
func (c *Cache) Save(fname string) error {
|
||||
os.Remove(fname)
|
||||
fout, err := os.Create(fname)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer fout.Close()
|
||||
|
||||
return json.NewEncoder(fout).Encode(c)
|
||||
}
|
||||
|
||||
// LoadCache loads the cache from a given file.
|
||||
func LoadCache(fname string) (*Cache, error) {
|
||||
var result Cache
|
||||
|
||||
fin, err := os.Open(fname)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer fin.Close()
|
||||
|
||||
err = json.NewDecoder(fin).Decode(&result)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &result, nil
|
||||
}
|
||||
|
||||
// Shuck removes the first and last character of a string, analogous to
|
||||
// shucking off the husk of an ear of corn.
|
||||
func Shuck(s string) string {
|
||||
return s[1 : len(s)-1]
|
||||
}
|
||||
@@ -13,24 +13,140 @@ import (
|
||||
"encoding/json"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"net/http"
|
||||
"os"
|
||||
"regexp"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/peterbourgon/ff/v3/ffcli"
|
||||
"github.com/tailscale/hujson"
|
||||
)
|
||||
|
||||
var (
|
||||
policyFname = flag.String("policy-file", "./policy.hujson", "filename for policy file")
|
||||
timeout = flag.Duration("timeout", 5*time.Minute, "timeout for the entire CI run")
|
||||
rootFlagSet = flag.NewFlagSet("gitops-pusher", flag.ExitOnError)
|
||||
policyFname = rootFlagSet.String("policy-file", "./policy.hujson", "filename for policy file")
|
||||
cacheFname = rootFlagSet.String("cache-file", "./version-cache.json", "filename for the previous known version hash")
|
||||
timeout = rootFlagSet.Duration("timeout", 5*time.Minute, "timeout for the entire CI run")
|
||||
githubSyntax = rootFlagSet.Bool("github-syntax", true, "use GitHub Action error syntax (https://docs.github.com/en/actions/using-workflows/workflow-commands-for-github-actions#setting-an-error-message)")
|
||||
|
||||
modifiedExternallyFailure = make(chan struct{}, 1)
|
||||
)
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
ctx, cancel := context.WithTimeout(context.Background(), *timeout)
|
||||
defer cancel()
|
||||
func modifiedExternallyError() {
|
||||
if *githubSyntax {
|
||||
fmt.Printf("::error file=%s,line=1,col=1,title=Policy File Modified Externally::The policy file was modified externally in the admin console.\n", *policyFname)
|
||||
} else {
|
||||
fmt.Printf("The policy file was modified externally in the admin console.\n")
|
||||
}
|
||||
modifiedExternallyFailure <- struct{}{}
|
||||
}
|
||||
|
||||
func apply(cache *Cache, tailnet, apiKey string) func(context.Context, []string) error {
|
||||
return func(ctx context.Context, args []string) error {
|
||||
controlEtag, err := getACLETag(ctx, tailnet, apiKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
localEtag, err := sumFile(*policyFname)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if cache.PrevETag == "" {
|
||||
log.Println("no previous etag found, assuming local file is correct and recording that")
|
||||
cache.PrevETag = localEtag
|
||||
}
|
||||
|
||||
log.Printf("control: %s", controlEtag)
|
||||
log.Printf("local: %s", localEtag)
|
||||
log.Printf("cache: %s", cache.PrevETag)
|
||||
|
||||
if cache.PrevETag != controlEtag {
|
||||
modifiedExternallyError()
|
||||
}
|
||||
|
||||
if controlEtag == localEtag {
|
||||
cache.PrevETag = localEtag
|
||||
log.Println("no update needed, doing nothing")
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := applyNewACL(ctx, tailnet, apiKey, *policyFname, controlEtag); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cache.PrevETag = localEtag
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func test(cache *Cache, tailnet, apiKey string) func(context.Context, []string) error {
|
||||
return func(ctx context.Context, args []string) error {
|
||||
controlEtag, err := getACLETag(ctx, tailnet, apiKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
localEtag, err := sumFile(*policyFname)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if cache.PrevETag == "" {
|
||||
log.Println("no previous etag found, assuming local file is correct and recording that")
|
||||
cache.PrevETag = localEtag
|
||||
}
|
||||
|
||||
log.Printf("control: %s", controlEtag)
|
||||
log.Printf("local: %s", localEtag)
|
||||
log.Printf("cache: %s", cache.PrevETag)
|
||||
|
||||
if cache.PrevETag != controlEtag {
|
||||
modifiedExternallyError()
|
||||
}
|
||||
|
||||
if controlEtag == localEtag {
|
||||
log.Println("no updates found, doing nothing")
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := testNewACLs(ctx, tailnet, apiKey, *policyFname); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func getChecksums(cache *Cache, tailnet, apiKey string) func(context.Context, []string) error {
|
||||
return func(ctx context.Context, args []string) error {
|
||||
controlEtag, err := getACLETag(ctx, tailnet, apiKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
localEtag, err := sumFile(*policyFname)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if cache.PrevETag == "" {
|
||||
log.Println("no previous etag found, assuming local file is correct and recording that")
|
||||
cache.PrevETag = Shuck(localEtag)
|
||||
}
|
||||
|
||||
log.Printf("control: %s", controlEtag)
|
||||
log.Printf("local: %s", localEtag)
|
||||
log.Printf("cache: %s", cache.PrevETag)
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func main() {
|
||||
tailnet, ok := os.LookupEnv("TS_TAILNET")
|
||||
if !ok {
|
||||
log.Fatal("set envvar TS_TAILNET to your tailnet's name")
|
||||
@@ -39,72 +155,82 @@ func main() {
|
||||
if !ok {
|
||||
log.Fatal("set envvar TS_API_KEY to your Tailscale API key")
|
||||
}
|
||||
|
||||
switch flag.Arg(0) {
|
||||
case "apply":
|
||||
controlEtag, err := getACLETag(ctx, tailnet, apiKey)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
cache, err := LoadCache(*cacheFname)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
cache = &Cache{}
|
||||
} else {
|
||||
log.Fatalf("error loading cache: %v", err)
|
||||
}
|
||||
}
|
||||
defer cache.Save(*cacheFname)
|
||||
|
||||
localEtag, err := sumFile(*policyFname)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
applyCmd := &ffcli.Command{
|
||||
Name: "apply",
|
||||
ShortUsage: "gitops-pusher [options] apply",
|
||||
ShortHelp: "Pushes changes to CONTROL",
|
||||
LongHelp: `Pushes changes to CONTROL`,
|
||||
Exec: apply(cache, tailnet, apiKey),
|
||||
}
|
||||
|
||||
log.Printf("control: %s", controlEtag)
|
||||
log.Printf("local: %s", localEtag)
|
||||
testCmd := &ffcli.Command{
|
||||
Name: "test",
|
||||
ShortUsage: "gitops-pusher [options] test",
|
||||
ShortHelp: "Tests ACL changes",
|
||||
LongHelp: "Tests ACL changes",
|
||||
Exec: test(cache, tailnet, apiKey),
|
||||
}
|
||||
|
||||
if controlEtag == localEtag {
|
||||
log.Println("no update needed, doing nothing")
|
||||
os.Exit(0)
|
||||
}
|
||||
cksumCmd := &ffcli.Command{
|
||||
Name: "checksum",
|
||||
ShortUsage: "Shows checksums of ACL files",
|
||||
ShortHelp: "Fetch checksum of CONTROL's ACL and the local ACL for comparison",
|
||||
LongHelp: "Fetch checksum of CONTROL's ACL and the local ACL for comparison",
|
||||
Exec: getChecksums(cache, tailnet, apiKey),
|
||||
}
|
||||
|
||||
if err := applyNewACL(ctx, tailnet, apiKey, *policyFname, controlEtag); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
root := &ffcli.Command{
|
||||
ShortUsage: "gitops-pusher [options] <command>",
|
||||
ShortHelp: "Push Tailscale ACLs to CONTROL using a GitOps workflow",
|
||||
Subcommands: []*ffcli.Command{applyCmd, cksumCmd, testCmd},
|
||||
FlagSet: rootFlagSet,
|
||||
}
|
||||
|
||||
case "test":
|
||||
controlEtag, err := getACLETag(ctx, tailnet, apiKey)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
if err := root.Parse(os.Args[1:]); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
localEtag, err := sumFile(*policyFname)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
ctx, cancel := context.WithTimeout(context.Background(), *timeout)
|
||||
defer cancel()
|
||||
|
||||
log.Printf("control: %s", controlEtag)
|
||||
log.Printf("local: %s", localEtag)
|
||||
if err := root.Run(ctx); err != nil {
|
||||
fmt.Println(err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
if controlEtag == localEtag {
|
||||
log.Println("no updates found, doing nothing")
|
||||
os.Exit(0)
|
||||
}
|
||||
|
||||
if err := testNewACLs(ctx, tailnet, apiKey, *policyFname); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
default:
|
||||
log.Fatalf("usage: %s [options] <test|apply>", os.Args[0])
|
||||
if len(modifiedExternallyFailure) != 0 {
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
func sumFile(fname string) (string, error) {
|
||||
fin, err := os.Open(fname)
|
||||
data, err := os.ReadFile(fname)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
formatted, err := hujson.Format(data)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer fin.Close()
|
||||
|
||||
h := sha256.New()
|
||||
_, err = io.Copy(h, fin)
|
||||
_, err = h.Write(formatted)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return fmt.Sprintf("\"%x\"", h.Sum(nil)), nil
|
||||
return fmt.Sprintf("%x", h.Sum(nil)), nil
|
||||
}
|
||||
|
||||
func applyNewACL(ctx context.Context, tailnet, apiKey, policyFname, oldEtag string) error {
|
||||
@@ -121,7 +247,7 @@ func applyNewACL(ctx context.Context, tailnet, apiKey, policyFname, oldEtag stri
|
||||
|
||||
req.SetBasicAuth(apiKey, "")
|
||||
req.Header.Set("Content-Type", "application/hujson")
|
||||
req.Header.Set("If-Match", oldEtag)
|
||||
req.Header.Set("If-Match", `"`+oldEtag+`"`)
|
||||
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
@@ -177,13 +303,15 @@ func testNewACLs(ctx context.Context, tailnet, apiKey, policyFname string) error
|
||||
return err
|
||||
}
|
||||
|
||||
if len(ate.Data) != 0 {
|
||||
if len(ate.Message) != 0 || len(ate.Data) != 0 {
|
||||
return ate
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
var lineColMessageSplit = regexp.MustCompile(`line ([0-9]+), column ([0-9]+): (.*)$`)
|
||||
|
||||
type ACLTestError struct {
|
||||
Message string `json:"message"`
|
||||
Data []ACLTestErrorDetail `json:"data"`
|
||||
@@ -192,7 +320,17 @@ type ACLTestError struct {
|
||||
func (ate ACLTestError) Error() string {
|
||||
var sb strings.Builder
|
||||
|
||||
fmt.Fprintln(&sb, ate.Message)
|
||||
if *githubSyntax && lineColMessageSplit.MatchString(ate.Message) {
|
||||
sp := lineColMessageSplit.FindStringSubmatch(ate.Message)
|
||||
|
||||
line := sp[1]
|
||||
col := sp[2]
|
||||
msg := sp[3]
|
||||
|
||||
fmt.Fprintf(&sb, "::error file=%s,line=%s,col=%s::%s", *policyFname, line, col, msg)
|
||||
} else {
|
||||
fmt.Fprintln(&sb, ate.Message)
|
||||
}
|
||||
fmt.Fprintln(&sb)
|
||||
|
||||
for _, data := range ate.Data {
|
||||
@@ -231,5 +369,5 @@ func getACLETag(ctx context.Context, tailnet, apiKey string) (string, error) {
|
||||
return "", fmt.Errorf("wanted HTTP status code %d but got %d", want, got)
|
||||
}
|
||||
|
||||
return resp.Header.Get("ETag"), nil
|
||||
return Shuck(resp.Header.Get("ETag")), nil
|
||||
}
|
||||
|
||||
@@ -17,7 +17,6 @@ import (
|
||||
|
||||
"github.com/peterbourgon/ff/v3/ffcli"
|
||||
"tailscale.com/atomicfile"
|
||||
"tailscale.com/client/tailscale"
|
||||
"tailscale.com/ipn"
|
||||
"tailscale.com/version"
|
||||
)
|
||||
@@ -46,7 +45,7 @@ func runCert(ctx context.Context, args []string) error {
|
||||
if certArgs.serve {
|
||||
s := &http.Server{
|
||||
TLSConfig: &tls.Config{
|
||||
GetCertificate: tailscale.GetCertificate,
|
||||
GetCertificate: localClient.GetCertificate,
|
||||
},
|
||||
Handler: http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
if r.TLS != nil && !strings.Contains(r.Host, ".") && r.Method == "GET" {
|
||||
@@ -90,7 +89,7 @@ func runCert(ctx context.Context, args []string) error {
|
||||
certArgs.certFile = domain + ".crt"
|
||||
certArgs.keyFile = domain + ".key"
|
||||
}
|
||||
certPEM, keyPEM, err := tailscale.CertPair(ctx, domain)
|
||||
certPEM, keyPEM, err := localClient.CertPair(ctx, domain)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -126,8 +126,10 @@ func printReport(dm *tailcfg.DERPMap, report *netcheck.Report) error {
|
||||
printf("\t* IPv6: yes, %v\n", report.GlobalV6)
|
||||
} else if report.IPv6 {
|
||||
printf("\t* IPv6: (no addr found)\n")
|
||||
} else if report.OSHasIPv6 {
|
||||
printf("\t* IPv6: no, but OS has support\n")
|
||||
} else {
|
||||
printf("\t* IPv6: no\n")
|
||||
printf("\t* IPv6: no, unavailable in OS\n")
|
||||
}
|
||||
printf("\t* MappingVariesByDestIP: %v\n", report.MappingVariesByDestIP)
|
||||
printf("\t* HairPinning: %v\n", report.HairPinning)
|
||||
|
||||
@@ -208,12 +208,20 @@ func qnapAuthn(r *http.Request) (string, *qnapAuthResponse, error) {
|
||||
return "", nil, err
|
||||
}
|
||||
token, err := r.Cookie("qtoken")
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
if err == nil {
|
||||
return qnapAuthnQtoken(r, user.Value, token.Value)
|
||||
}
|
||||
sid, err := r.Cookie("NAS_SID")
|
||||
if err == nil {
|
||||
return qnapAuthnSid(r, user.Value, sid.Value)
|
||||
}
|
||||
return "", nil, fmt.Errorf("not authenticated by any mechanism")
|
||||
}
|
||||
|
||||
func qnapAuthnQtoken(r *http.Request, user, token string) (string, *qnapAuthResponse, error) {
|
||||
query := url.Values{
|
||||
"qtoken": []string{token.Value},
|
||||
"user": []string{user.Value},
|
||||
"qtoken": []string{token},
|
||||
"user": []string{user},
|
||||
}
|
||||
u := url.URL{
|
||||
Scheme: r.URL.Scheme,
|
||||
@@ -221,7 +229,26 @@ func qnapAuthn(r *http.Request) (string, *qnapAuthResponse, error) {
|
||||
Path: "/cgi-bin/authLogin.cgi",
|
||||
RawQuery: query.Encode(),
|
||||
}
|
||||
resp, err := http.Get(u.String())
|
||||
|
||||
return qnapAuthnFinish(user, u.String())
|
||||
}
|
||||
|
||||
func qnapAuthnSid(r *http.Request, user, sid string) (string, *qnapAuthResponse, error) {
|
||||
query := url.Values{
|
||||
"sid": []string{sid},
|
||||
}
|
||||
u := url.URL{
|
||||
Scheme: r.URL.Scheme,
|
||||
Host: r.URL.Host,
|
||||
Path: "/cgi-bin/authLogin.cgi",
|
||||
RawQuery: query.Encode(),
|
||||
}
|
||||
|
||||
return qnapAuthnFinish(user, u.String())
|
||||
}
|
||||
|
||||
func qnapAuthnFinish(user, url string) (string, *qnapAuthResponse, error) {
|
||||
resp, err := http.Get(url)
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
@@ -237,7 +264,7 @@ func qnapAuthn(r *http.Request) (string, *qnapAuthResponse, error) {
|
||||
if authResp.AuthPassed == 0 {
|
||||
return "", nil, fmt.Errorf("not authenticated")
|
||||
}
|
||||
return user.Value, authResp, nil
|
||||
return user, authResp, nil
|
||||
}
|
||||
|
||||
func synoAuthn() (string, error) {
|
||||
|
||||
@@ -129,7 +129,8 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de
|
||||
💣 golang.zx2c4.com/wireguard/tun from golang.zx2c4.com/wireguard/device+
|
||||
W 💣 golang.zx2c4.com/wireguard/windows/tunnel/winipcfg from tailscale.com/cmd/tailscaled+
|
||||
gvisor.dev/gvisor/pkg/atomicbitops from gvisor.dev/gvisor/pkg/tcpip+
|
||||
💣 gvisor.dev/gvisor/pkg/buffer from gvisor.dev/gvisor/pkg/tcpip/stack
|
||||
gvisor.dev/gvisor/pkg/bits from gvisor.dev/gvisor/pkg/bufferv2
|
||||
💣 gvisor.dev/gvisor/pkg/bufferv2 from gvisor.dev/gvisor/pkg/tcpip+
|
||||
gvisor.dev/gvisor/pkg/context from gvisor.dev/gvisor/pkg/refs+
|
||||
💣 gvisor.dev/gvisor/pkg/gohacks from gvisor.dev/gvisor/pkg/state/wire+
|
||||
gvisor.dev/gvisor/pkg/linewriter from gvisor.dev/gvisor/pkg/log
|
||||
@@ -143,7 +144,6 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de
|
||||
💣 gvisor.dev/gvisor/pkg/sync from gvisor.dev/gvisor/pkg/linewriter+
|
||||
gvisor.dev/gvisor/pkg/tcpip from gvisor.dev/gvisor/pkg/tcpip/header+
|
||||
gvisor.dev/gvisor/pkg/tcpip/adapters/gonet from tailscale.com/wgengine/netstack
|
||||
💣 gvisor.dev/gvisor/pkg/tcpip/buffer from gvisor.dev/gvisor/pkg/tcpip/header+
|
||||
gvisor.dev/gvisor/pkg/tcpip/hash/jenkins from gvisor.dev/gvisor/pkg/tcpip/stack+
|
||||
gvisor.dev/gvisor/pkg/tcpip/header from gvisor.dev/gvisor/pkg/tcpip/header/parse+
|
||||
gvisor.dev/gvisor/pkg/tcpip/header/parse from gvisor.dev/gvisor/pkg/tcpip/network/ipv4+
|
||||
@@ -152,6 +152,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de
|
||||
gvisor.dev/gvisor/pkg/tcpip/network/hash from gvisor.dev/gvisor/pkg/tcpip/network/ipv4+
|
||||
gvisor.dev/gvisor/pkg/tcpip/network/internal/fragmentation from gvisor.dev/gvisor/pkg/tcpip/network/ipv4+
|
||||
gvisor.dev/gvisor/pkg/tcpip/network/internal/ip from gvisor.dev/gvisor/pkg/tcpip/network/ipv4+
|
||||
gvisor.dev/gvisor/pkg/tcpip/network/internal/multicast from gvisor.dev/gvisor/pkg/tcpip/network/ipv4+
|
||||
gvisor.dev/gvisor/pkg/tcpip/network/ipv4 from tailscale.com/net/tstun+
|
||||
gvisor.dev/gvisor/pkg/tcpip/network/ipv6 from tailscale.com/wgengine/netstack
|
||||
gvisor.dev/gvisor/pkg/tcpip/ports from gvisor.dev/gvisor/pkg/tcpip/stack+
|
||||
|
||||
@@ -404,7 +404,6 @@ func run() error {
|
||||
// want to keep running.
|
||||
signal.Ignore(syscall.SIGPIPE)
|
||||
go func() {
|
||||
defer dialer.Close()
|
||||
select {
|
||||
case s := <-interrupt:
|
||||
logf("tailscaled got signal %v; shutting down", s)
|
||||
@@ -437,6 +436,7 @@ func run() error {
|
||||
if err != nil {
|
||||
return fmt.Errorf("safesocket.Listen: %v", err)
|
||||
}
|
||||
defer dialer.Close()
|
||||
|
||||
err = srv.Run(ctx, ln)
|
||||
// Cancelation is not an error: it is the only way to stop ipnserver.
|
||||
@@ -515,7 +515,7 @@ func tryEngine(logf logger.Logf, linkMon *monitor.Mon, dialer *tsdial.Dialer, na
|
||||
} else {
|
||||
dev, devName, err := tstun.New(logf, name)
|
||||
if err != nil {
|
||||
tstun.Diagnose(logf, name)
|
||||
tstun.Diagnose(logf, name, err)
|
||||
return nil, false, fmt.Errorf("tstun.New(%q): %w", name, err)
|
||||
}
|
||||
conf.Tun = dev
|
||||
|
||||
4
cmd/tsconnect/.gitignore
vendored
Normal file
4
cmd/tsconnect/.gitignore
vendored
Normal file
@@ -0,0 +1,4 @@
|
||||
src/wasm_exec.js
|
||||
src/main.wasm
|
||||
node_modules/
|
||||
dist/
|
||||
190
cmd/tsconnect/build.go
Normal file
190
cmd/tsconnect/build.go
Normal file
@@ -0,0 +1,190 @@
|
||||
// Copyright (c) 2022 Tailscale Inc & AUTHORS All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/fs"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/andybalholm/brotli"
|
||||
esbuild "github.com/evanw/esbuild/pkg/api"
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
func runBuild() {
|
||||
buildOptions, err := commonSetup(prodMode)
|
||||
if err != nil {
|
||||
log.Fatalf("Cannot setup: %v", err)
|
||||
}
|
||||
|
||||
if err := cleanDist(); err != nil {
|
||||
log.Fatalf("Cannot clean %s: %v", *distDir, err)
|
||||
}
|
||||
|
||||
buildOptions.Write = true
|
||||
buildOptions.MinifyWhitespace = true
|
||||
buildOptions.MinifyIdentifiers = true
|
||||
buildOptions.MinifySyntax = true
|
||||
|
||||
buildOptions.EntryNames = "[dir]/[name]-[hash]"
|
||||
buildOptions.AssetNames = "[name]-[hash]"
|
||||
buildOptions.Metafile = true
|
||||
|
||||
log.Printf("Running esbuild...\n")
|
||||
result := esbuild.Build(*buildOptions)
|
||||
if len(result.Errors) > 0 {
|
||||
log.Printf("ESBuild Error:\n")
|
||||
for _, e := range result.Errors {
|
||||
log.Printf("%v", e)
|
||||
}
|
||||
log.Fatal("Build failed")
|
||||
}
|
||||
if len(result.Warnings) > 0 {
|
||||
log.Printf("ESBuild Warnings:\n")
|
||||
for _, w := range result.Warnings {
|
||||
log.Printf("%v", w)
|
||||
}
|
||||
}
|
||||
|
||||
// Preserve build metadata so we can extract hashed file names for serving.
|
||||
metadataBytes, err := fixEsbuildMetadataPaths(result.Metafile)
|
||||
if err != nil {
|
||||
log.Fatalf("Cannot fix esbuild metadata paths: %v", err)
|
||||
}
|
||||
if err := ioutil.WriteFile(path.Join(*distDir, "/esbuild-metadata.json"), metadataBytes, 0666); err != nil {
|
||||
log.Fatalf("Cannot write metadata: %v", err)
|
||||
}
|
||||
|
||||
if er := precompressDist(); err != nil {
|
||||
log.Fatalf("Cannot precompress resources: %v", er)
|
||||
}
|
||||
}
|
||||
|
||||
// fixEsbuildMetadataPaths re-keys the esbuild metadata file to use paths
|
||||
// relative to the dist directory (it normally uses paths relative to the cwd,
|
||||
// which are akward if we're running with a different cwd at serving time).
|
||||
func fixEsbuildMetadataPaths(metadataStr string) ([]byte, error) {
|
||||
var metadata EsbuildMetadata
|
||||
if err := json.Unmarshal([]byte(metadataStr), &metadata); err != nil {
|
||||
return nil, fmt.Errorf("Cannot parse metadata: %w", err)
|
||||
}
|
||||
distAbsPath, err := filepath.Abs(*distDir)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Cannot get absolute path from %s: %w", *distDir, err)
|
||||
}
|
||||
for outputPath, output := range metadata.Outputs {
|
||||
outputAbsPath, err := filepath.Abs(outputPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Cannot get absolute path from %s: %w", outputPath, err)
|
||||
}
|
||||
outputRelPath, err := filepath.Rel(distAbsPath, outputAbsPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Cannot get relative path from %s: %w", outputRelPath, err)
|
||||
}
|
||||
delete(metadata.Outputs, outputPath)
|
||||
metadata.Outputs[outputRelPath] = output
|
||||
}
|
||||
return json.Marshal(metadata)
|
||||
}
|
||||
|
||||
// cleanDist removes files from the dist build directory, except the placeholder
|
||||
// one that we keep to make sure Git still creates the directory.
|
||||
func cleanDist() error {
|
||||
log.Printf("Cleaning %s...\n", *distDir)
|
||||
files, err := os.ReadDir(*distDir)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return os.MkdirAll(*distDir, 0755)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
for _, file := range files {
|
||||
if file.Name() != "placeholder" {
|
||||
if err := os.Remove(filepath.Join(*distDir, file.Name())); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func precompressDist() error {
|
||||
log.Printf("Pre-compressing files in %s/...\n", *distDir)
|
||||
var eg errgroup.Group
|
||||
err := fs.WalkDir(os.DirFS(*distDir), ".", func(p string, d fs.DirEntry, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if d.IsDir() {
|
||||
return nil
|
||||
}
|
||||
if !compressibleExtensions[filepath.Ext(p)] {
|
||||
return nil
|
||||
}
|
||||
p = path.Join(*distDir, p)
|
||||
log.Printf("Pre-compressing %v\n", p)
|
||||
|
||||
eg.Go(func() error {
|
||||
return precompress(p)
|
||||
})
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return eg.Wait()
|
||||
}
|
||||
|
||||
var compressibleExtensions = map[string]bool{
|
||||
".js": true,
|
||||
".css": true,
|
||||
".wasm": true,
|
||||
}
|
||||
|
||||
func precompress(path string) error {
|
||||
contents, err := os.ReadFile(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fi, err := os.Lstat(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = writeCompressed(contents, func(w io.Writer) (io.WriteCloser, error) {
|
||||
return gzip.NewWriterLevel(w, gzip.BestCompression)
|
||||
}, path+".gz", fi.Mode())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return writeCompressed(contents, func(w io.Writer) (io.WriteCloser, error) {
|
||||
return brotli.NewWriterLevel(w, brotli.BestCompression), nil
|
||||
}, path+".br", fi.Mode())
|
||||
}
|
||||
|
||||
func writeCompressed(contents []byte, compressedWriterCreator func(io.Writer) (io.WriteCloser, error), outputPath string, outputMode fs.FileMode) error {
|
||||
var buf bytes.Buffer
|
||||
compressedWriter, err := compressedWriterCreator(&buf)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := compressedWriter.Write(contents); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := compressedWriter.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
return os.WriteFile(outputPath, buf.Bytes(), outputMode)
|
||||
}
|
||||
114
cmd/tsconnect/common.go
Normal file
114
cmd/tsconnect/common.go
Normal file
@@ -0,0 +1,114 @@
|
||||
// Copyright (c) 2022 Tailscale Inc & AUTHORS All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strconv"
|
||||
|
||||
esbuild "github.com/evanw/esbuild/pkg/api"
|
||||
)
|
||||
|
||||
const (
|
||||
devMode = true
|
||||
prodMode = false
|
||||
)
|
||||
|
||||
// commonSetup performs setup that is common to both dev and build modes.
|
||||
func commonSetup(dev bool) (*esbuild.BuildOptions, error) {
|
||||
// Change cwd to to where this file lives -- that's where all inputs for
|
||||
// esbuild and other build steps live.
|
||||
if _, filename, _, ok := runtime.Caller(0); ok {
|
||||
if err := os.Chdir(path.Dir(filename)); err != nil {
|
||||
return nil, fmt.Errorf("Cannot change cwd: %w", err)
|
||||
}
|
||||
}
|
||||
if err := buildDeps(dev); err != nil {
|
||||
return nil, fmt.Errorf("Cannot build deps: %w", err)
|
||||
}
|
||||
|
||||
return &esbuild.BuildOptions{
|
||||
EntryPoints: []string{"src/index.js", "src/index.css"},
|
||||
Loader: map[string]esbuild.Loader{".wasm": esbuild.LoaderFile},
|
||||
Outdir: *distDir,
|
||||
Bundle: true,
|
||||
Sourcemap: esbuild.SourceMapLinked,
|
||||
LogLevel: esbuild.LogLevelInfo,
|
||||
Define: map[string]string{"DEBUG": strconv.FormatBool(dev)},
|
||||
Target: esbuild.ES2017,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// buildDeps builds the static assets that are needed for the server (except for
|
||||
// JS/CSS bundling, which is handled by esbuild).
|
||||
func buildDeps(dev bool) error {
|
||||
if err := copyWasmExec(); err != nil {
|
||||
return fmt.Errorf("Cannot copy wasm_exec.js: %w", err)
|
||||
}
|
||||
if err := buildWasm(dev); err != nil {
|
||||
return fmt.Errorf("Cannot build main.wasm: %w", err)
|
||||
}
|
||||
if err := installJSDeps(); err != nil {
|
||||
return fmt.Errorf("Cannot install JS deps: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// copyWasmExec grabs the current wasm_exec.js runtime helper library from the
|
||||
// Go toolchain.
|
||||
func copyWasmExec() error {
|
||||
log.Printf("Copying wasm_exec.js...\n")
|
||||
wasmExecSrcPath := filepath.Join(runtime.GOROOT(), "misc", "wasm", "wasm_exec.js")
|
||||
wasmExecDstPath := filepath.Join("src", "wasm_exec.js")
|
||||
contents, err := os.ReadFile(wasmExecSrcPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return os.WriteFile(wasmExecDstPath, contents, 0600)
|
||||
}
|
||||
|
||||
// buildWasm builds the Tailscale wasm binary and places it where the JS can
|
||||
// load it.
|
||||
func buildWasm(dev bool) error {
|
||||
log.Printf("Building wasm...\n")
|
||||
args := []string{"build", "-tags", "tailscale_go,osusergo,netgo,nethttpomithttp2,omitidna,omitpemdecrypt"}
|
||||
if !dev {
|
||||
// Omit long paths and debug symbols in release builds, to reduce the
|
||||
// generated WASM binary size.
|
||||
args = append(args, "-trimpath", "-ldflags", "-s -w")
|
||||
}
|
||||
args = append(args, "-o", "src/main.wasm", "./wasm")
|
||||
cmd := exec.Command(filepath.Join(runtime.GOROOT(), "bin", "go"), args...)
|
||||
cmd.Env = append(os.Environ(), "GOOS=js", "GOARCH=wasm")
|
||||
cmd.Stdin = os.Stdin
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
return cmd.Run()
|
||||
}
|
||||
|
||||
// installJSDeps installs the JavaScript dependencies specified by package.json
|
||||
func installJSDeps() error {
|
||||
log.Printf("Installing JS deps...\n")
|
||||
stdoutStderr, err := exec.Command("yarn").CombinedOutput()
|
||||
if err != nil {
|
||||
log.Printf("yarn failed: %s", stdoutStderr)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// EsbuildMetadata is the subset of metadata struct (described by
|
||||
// https://esbuild.github.io/api/#metafile) that we care about for mapping
|
||||
// from entry points to hashed file names.
|
||||
type EsbuildMetadata struct {
|
||||
Outputs map[string]struct {
|
||||
EntryPoint string `json:"entryPoint,omitempty"`
|
||||
} `json:"outputs,omitempty"`
|
||||
}
|
||||
38
cmd/tsconnect/dev.go
Normal file
38
cmd/tsconnect/dev.go
Normal file
@@ -0,0 +1,38 @@
|
||||
// Copyright (c) 2022 Tailscale Inc & AUTHORS All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"log"
|
||||
"net"
|
||||
"strconv"
|
||||
|
||||
esbuild "github.com/evanw/esbuild/pkg/api"
|
||||
)
|
||||
|
||||
func runDev() {
|
||||
buildOptions, err := commonSetup(devMode)
|
||||
if err != nil {
|
||||
log.Fatalf("Cannot setup: %v", err)
|
||||
}
|
||||
host, portStr, err := net.SplitHostPort(*addr)
|
||||
if err != nil {
|
||||
log.Fatalf("Cannot parse addr: %v", err)
|
||||
}
|
||||
port, err := strconv.ParseUint(portStr, 10, 16)
|
||||
if err != nil {
|
||||
log.Fatalf("Cannot parse port: %v", err)
|
||||
}
|
||||
result, err := esbuild.Serve(esbuild.ServeOptions{
|
||||
Port: uint16(port),
|
||||
Host: host,
|
||||
Servedir: "./",
|
||||
}, *buildOptions)
|
||||
if err != nil {
|
||||
log.Fatalf("Cannot start esbuild server: %v", err)
|
||||
}
|
||||
log.Printf("Listening on http://%s:%d\n", result.Host, result.Port)
|
||||
result.Wait()
|
||||
}
|
||||
2
cmd/tsconnect/dist/placeholder
vendored
Normal file
2
cmd/tsconnect/dist/placeholder
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
This is here to make sure the dist/ directory exists for the go:embed command
|
||||
in serve.go.
|
||||
16
cmd/tsconnect/index.html
Normal file
16
cmd/tsconnect/index.html
Normal file
@@ -0,0 +1,16 @@
|
||||
<!DOCTYPE html>
|
||||
<html>
|
||||
<head>
|
||||
<meta charset="utf-8" />
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
|
||||
<link rel="stylesheet" type="text/css" href="dist/index.css" />
|
||||
</head>
|
||||
<body>
|
||||
<div id="header">
|
||||
<h1>Tailscale Connect</h1>
|
||||
<div id="state">Loading…</div>
|
||||
</div>
|
||||
<div id="peers"></div>
|
||||
<script src="dist/index.js"></script>
|
||||
</body>
|
||||
</html>
|
||||
12
cmd/tsconnect/package.json
Normal file
12
cmd/tsconnect/package.json
Normal file
@@ -0,0 +1,12 @@
|
||||
{
|
||||
"name": "@tailscale/ssh",
|
||||
"version": "0.0.1",
|
||||
"devDependencies": {
|
||||
"qrcode": "^1.5.0",
|
||||
"xterm": "^4.18.0"
|
||||
},
|
||||
"prettier": {
|
||||
"semi": false,
|
||||
"printWidth": 80
|
||||
}
|
||||
}
|
||||
148
cmd/tsconnect/serve.go
Normal file
148
cmd/tsconnect/serve.go
Normal file
@@ -0,0 +1,148 @@
|
||||
// Copyright (c) 2022 Tailscale Inc & AUTHORS All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"embed"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/fs"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net/http"
|
||||
"os"
|
||||
"path"
|
||||
"time"
|
||||
|
||||
"tailscale.com/tsweb"
|
||||
)
|
||||
|
||||
//go:embed index.html
|
||||
var embeddedFS embed.FS
|
||||
|
||||
//go:embed dist/*
|
||||
var embeddedDistFS embed.FS
|
||||
|
||||
var serveStartTime = time.Now()
|
||||
|
||||
func runServe() {
|
||||
mux := http.NewServeMux()
|
||||
|
||||
var distFS fs.FS
|
||||
if *distDir == "./dist" {
|
||||
var err error
|
||||
distFS, err = fs.Sub(embeddedDistFS, "dist")
|
||||
if err != nil {
|
||||
log.Fatalf("Could not drop dist/ prefix from embedded FS: %v", err)
|
||||
}
|
||||
} else {
|
||||
distFS = os.DirFS(*distDir)
|
||||
}
|
||||
|
||||
indexBytes, err := generateServeIndex(distFS)
|
||||
if err != nil {
|
||||
log.Fatalf("Could not generate index.html: %v", err)
|
||||
}
|
||||
mux.Handle("/", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
http.ServeContent(w, r, "index.html", serveStartTime, bytes.NewReader(indexBytes))
|
||||
}))
|
||||
mux.Handle("/dist/", http.StripPrefix("/dist/", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
handleServeDist(w, r, distFS)
|
||||
})))
|
||||
tsweb.Debugger(mux)
|
||||
|
||||
log.Printf("Listening on %s", *addr)
|
||||
err = http.ListenAndServe(*addr, mux)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func generateServeIndex(distFS fs.FS) ([]byte, error) {
|
||||
log.Printf("Generating index.html...\n")
|
||||
rawIndexBytes, err := embeddedFS.ReadFile("index.html")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Could not read index.html: %w", err)
|
||||
}
|
||||
|
||||
esbuildMetadataFile, err := distFS.Open("esbuild-metadata.json")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Could not open esbuild-metadata.json: %w", err)
|
||||
}
|
||||
defer esbuildMetadataFile.Close()
|
||||
esbuildMetadataBytes, err := ioutil.ReadAll(esbuildMetadataFile)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Could not read esbuild-metadata.json: %w", err)
|
||||
}
|
||||
var esbuildMetadata EsbuildMetadata
|
||||
if err := json.Unmarshal(esbuildMetadataBytes, &esbuildMetadata); err != nil {
|
||||
return nil, fmt.Errorf("Could not parse esbuild-metadata.json: %w", err)
|
||||
}
|
||||
entryPointsToHashedDistPaths := make(map[string]string)
|
||||
for outputPath, output := range esbuildMetadata.Outputs {
|
||||
if output.EntryPoint != "" {
|
||||
entryPointsToHashedDistPaths[output.EntryPoint] = path.Join("dist", outputPath)
|
||||
}
|
||||
}
|
||||
|
||||
indexBytes := rawIndexBytes
|
||||
for entryPointPath, defaultDistPath := range entryPointsToDefaultDistPaths {
|
||||
hashedDistPath := entryPointsToHashedDistPaths[entryPointPath]
|
||||
if hashedDistPath != "" {
|
||||
indexBytes = bytes.ReplaceAll(indexBytes, []byte(defaultDistPath), []byte(hashedDistPath))
|
||||
}
|
||||
}
|
||||
|
||||
return indexBytes, nil
|
||||
}
|
||||
|
||||
var entryPointsToDefaultDistPaths = map[string]string{
|
||||
"src/index.css": "dist/index.css",
|
||||
"src/index.js": "dist/index.js",
|
||||
}
|
||||
|
||||
func handleServeDist(w http.ResponseWriter, r *http.Request, distFS fs.FS) {
|
||||
path := r.URL.Path
|
||||
var f fs.File
|
||||
// Prefer pre-compressed versions generated during the build step.
|
||||
if tsweb.AcceptsEncoding(r, "br") {
|
||||
if brotliFile, err := distFS.Open(path + ".br"); err == nil {
|
||||
f = brotliFile
|
||||
w.Header().Set("Content-Encoding", "br")
|
||||
}
|
||||
}
|
||||
if f == nil && tsweb.AcceptsEncoding(r, "gzip") {
|
||||
if gzipFile, err := distFS.Open(path + ".gz"); err == nil {
|
||||
f = gzipFile
|
||||
w.Header().Set("Content-Encoding", "gzip")
|
||||
}
|
||||
}
|
||||
|
||||
if f == nil {
|
||||
if rawFile, err := distFS.Open(path); err == nil {
|
||||
f = rawFile
|
||||
} else {
|
||||
http.Error(w, err.Error(), http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
// fs.File does not claim to implement Seeker, but in practice it does.
|
||||
fSeeker, ok := f.(io.ReadSeeker)
|
||||
if !ok {
|
||||
http.Error(w, "Not seekable", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
// Aggressively cache static assets, since we cache-bust our assets with
|
||||
// hashed filenames.
|
||||
w.Header().Set("Cache-Control", "public, max-age=31535996")
|
||||
w.Header().Set("Vary", "Accept-Encoding")
|
||||
|
||||
http.ServeContent(w, r, path, serveStartTime, fSeeker)
|
||||
}
|
||||
91
cmd/tsconnect/src/index.css
Normal file
91
cmd/tsconnect/src/index.css
Normal file
@@ -0,0 +1,91 @@
|
||||
/* Copyright (c) 2022 Tailscale Inc & AUTHORS All rights reserved. */
|
||||
/* Use of this source code is governed by a BSD-style */
|
||||
/* license that can be found in the LICENSE file. */
|
||||
|
||||
@import "xterm/css/xterm.css";
|
||||
|
||||
html {
|
||||
background: #fff;
|
||||
font-family: ui-sans-serif, system-ui, -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, "Helvetica Neue", Arial, "Noto Sans", sans-serif, "Apple Color Emoji", "Segoe UI Emoji", "Segoe UI Symbol", "Noto Color Emoji";
|
||||
}
|
||||
|
||||
body {
|
||||
margin: 0;
|
||||
}
|
||||
|
||||
button {
|
||||
font-family: inherit;
|
||||
border: solid 1px #ccc;
|
||||
background: #fff;
|
||||
color: #000;
|
||||
padding: 4px 8px;
|
||||
border-radius: 4px;
|
||||
}
|
||||
|
||||
#header {
|
||||
background: #f7f5f4;
|
||||
border-bottom: 1px solid #eeebea;
|
||||
padding: 12px;
|
||||
display: flex;
|
||||
align-items: center;
|
||||
}
|
||||
|
||||
#header h1 {
|
||||
margin: 0;
|
||||
flex-grow: 1;
|
||||
}
|
||||
|
||||
#header #state {
|
||||
padding: 0 8px;
|
||||
color: #444342;
|
||||
}
|
||||
|
||||
#peers {
|
||||
box-sizing: border-box;
|
||||
width: 100%;
|
||||
padding: 12px;
|
||||
}
|
||||
|
||||
.login {
|
||||
text-align: center;
|
||||
}
|
||||
|
||||
.logout {
|
||||
font-weight: bold;
|
||||
}
|
||||
|
||||
.peer {
|
||||
display: flex;
|
||||
justify-content: space-between;
|
||||
padding: 2px;
|
||||
}
|
||||
|
||||
.peer:hover {
|
||||
background: #eee;
|
||||
}
|
||||
|
||||
.peer .name {
|
||||
font-family: monospace;
|
||||
}
|
||||
|
||||
.peer .ssh {
|
||||
background-color: #cbf4c9;
|
||||
}
|
||||
|
||||
.term-container {
|
||||
padding: 12px;
|
||||
}
|
||||
|
||||
.xterm-viewport.xterm-viewport {
|
||||
scrollbar-width: thin;
|
||||
}
|
||||
.xterm-viewport::-webkit-scrollbar {
|
||||
width: 10px;
|
||||
}
|
||||
.xterm-viewport::-webkit-scrollbar-track {
|
||||
opacity: 0;
|
||||
}
|
||||
.xterm-viewport::-webkit-scrollbar-thumb {
|
||||
min-height: 20px;
|
||||
background-color: #ffffff20;
|
||||
}
|
||||
26
cmd/tsconnect/src/index.js
Normal file
26
cmd/tsconnect/src/index.js
Normal file
@@ -0,0 +1,26 @@
|
||||
// Copyright (c) 2022 Tailscale Inc & AUTHORS All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
import "./wasm_exec"
|
||||
import wasmUrl from "./main.wasm"
|
||||
import { notifyState, notifyNetMap, notifyBrowseToURL } from "./notifier"
|
||||
import { sessionStateStorage } from "./js-state-store"
|
||||
|
||||
const go = new window.Go()
|
||||
WebAssembly.instantiateStreaming(
|
||||
fetch(`./dist/${wasmUrl}`),
|
||||
go.importObject
|
||||
).then((result) => {
|
||||
go.run(result.instance)
|
||||
const ipn = newIPN({
|
||||
// Persist IPN state in sessionStorage in development, so that we don't need
|
||||
// to re-authorize every time we reload the page.
|
||||
stateStorage: DEBUG ? sessionStateStorage : undefined,
|
||||
})
|
||||
ipn.run({
|
||||
notifyState: notifyState.bind(null, ipn),
|
||||
notifyNetMap: notifyNetMap.bind(null, ipn),
|
||||
notifyBrowseToURL: notifyBrowseToURL.bind(null, ipn),
|
||||
})
|
||||
})
|
||||
16
cmd/tsconnect/src/js-state-store.js
Normal file
16
cmd/tsconnect/src/js-state-store.js
Normal file
@@ -0,0 +1,16 @@
|
||||
// Copyright (c) 2022 Tailscale Inc & AUTHORS All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
/**
|
||||
* @fileoverview Callbacks used by jsStateStore to persist IPN state.
|
||||
*/
|
||||
|
||||
export const sessionStateStorage = {
|
||||
setState(id, value) {
|
||||
window.sessionStorage[`ipn-state-${id}`] = value
|
||||
},
|
||||
getState(id) {
|
||||
return window.sessionStorage[`ipn-state-${id}`] || ""
|
||||
},
|
||||
}
|
||||
71
cmd/tsconnect/src/login.js
Normal file
71
cmd/tsconnect/src/login.js
Normal file
@@ -0,0 +1,71 @@
|
||||
// Copyright (c) 2022 Tailscale Inc & AUTHORS All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
import QRCode from "qrcode"
|
||||
|
||||
export async function showLoginURL(url) {
|
||||
if (loginNode) {
|
||||
loginNode.remove()
|
||||
}
|
||||
loginNode = document.createElement("div")
|
||||
loginNode.className = "login"
|
||||
const linkNode = document.createElement("a")
|
||||
linkNode.href = url
|
||||
linkNode.target = "_blank"
|
||||
loginNode.appendChild(linkNode)
|
||||
|
||||
try {
|
||||
const dataURL = await QRCode.toDataURL(url, { width: 512 })
|
||||
const imageNode = document.createElement("img")
|
||||
imageNode.src = dataURL
|
||||
imageNode.width = 256
|
||||
imageNode.height = 256
|
||||
imageNode.border = "0"
|
||||
linkNode.appendChild(imageNode)
|
||||
} catch (err) {
|
||||
console.error("Could not generate QR code:", err)
|
||||
}
|
||||
|
||||
linkNode.appendChild(document.createElement("br"))
|
||||
linkNode.appendChild(document.createTextNode(url))
|
||||
|
||||
document.body.appendChild(loginNode)
|
||||
}
|
||||
|
||||
export function hideLoginURL() {
|
||||
if (!loginNode) {
|
||||
return
|
||||
}
|
||||
loginNode.remove()
|
||||
loginNode = undefined
|
||||
}
|
||||
|
||||
let loginNode
|
||||
|
||||
export function showLogoutButton(ipn) {
|
||||
if (logoutButtonNode) {
|
||||
logoutButtonNode.remove()
|
||||
}
|
||||
logoutButtonNode = document.createElement("button")
|
||||
logoutButtonNode.className = "logout"
|
||||
logoutButtonNode.textContent = "Logout"
|
||||
logoutButtonNode.addEventListener(
|
||||
"click",
|
||||
() => {
|
||||
ipn.logout()
|
||||
},
|
||||
{ once: true }
|
||||
)
|
||||
document.getElementById("header").appendChild(logoutButtonNode)
|
||||
}
|
||||
|
||||
export function hideLogoutButton() {
|
||||
if (!logoutButtonNode) {
|
||||
return
|
||||
}
|
||||
logoutButtonNode.remove()
|
||||
logoutButtonNode = undefined
|
||||
}
|
||||
|
||||
let logoutButtonNode
|
||||
75
cmd/tsconnect/src/notifier.js
Normal file
75
cmd/tsconnect/src/notifier.js
Normal file
@@ -0,0 +1,75 @@
|
||||
// Copyright (c) 2022 Tailscale Inc & AUTHORS All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
import {
|
||||
showLoginURL,
|
||||
hideLoginURL,
|
||||
showLogoutButton,
|
||||
hideLogoutButton,
|
||||
} from "./login"
|
||||
import { showSSHPeers, hideSSHPeers } from "./ssh"
|
||||
|
||||
/**
|
||||
* @fileoverview Notification callback functions (bridged from ipn.Notify)
|
||||
*/
|
||||
|
||||
/** Mirrors values from ipn/backend.go */
|
||||
const State = {
|
||||
NoState: 0,
|
||||
InUseOtherUser: 1,
|
||||
NeedsLogin: 2,
|
||||
NeedsMachineAuth: 3,
|
||||
Stopped: 4,
|
||||
Starting: 5,
|
||||
Running: 6,
|
||||
}
|
||||
|
||||
export function notifyState(ipn, state) {
|
||||
let stateLabel
|
||||
switch (state) {
|
||||
case State.NoState:
|
||||
stateLabel = "Initializing…"
|
||||
break
|
||||
case State.InUseOtherUser:
|
||||
stateLabel = "In-use by another user"
|
||||
break
|
||||
case State.NeedsLogin:
|
||||
stateLabel = "Needs Login"
|
||||
hideLogoutButton()
|
||||
hideSSHPeers()
|
||||
ipn.login()
|
||||
break
|
||||
case State.NeedsMachineAuth:
|
||||
stateLabel = "Needs authorization"
|
||||
break
|
||||
case State.Stopped:
|
||||
stateLabel = "Stopped"
|
||||
hideLogoutButton()
|
||||
hideSSHPeers()
|
||||
break
|
||||
case State.Starting:
|
||||
stateLabel = "Starting…"
|
||||
break
|
||||
case State.Running:
|
||||
stateLabel = "Running"
|
||||
hideLoginURL()
|
||||
showLogoutButton(ipn)
|
||||
break
|
||||
}
|
||||
const stateNode = document.getElementById("state")
|
||||
stateNode.textContent = stateLabel ?? ""
|
||||
}
|
||||
|
||||
export function notifyNetMap(ipn, netMapStr) {
|
||||
const netMap = JSON.parse(netMapStr)
|
||||
if (DEBUG) {
|
||||
console.log("Received net map: " + JSON.stringify(netMap, null, 2))
|
||||
}
|
||||
|
||||
showSSHPeers(netMap.peers, ipn)
|
||||
}
|
||||
|
||||
export function notifyBrowseToURL(ipn, url) {
|
||||
showLoginURL(url)
|
||||
}
|
||||
77
cmd/tsconnect/src/ssh.js
Normal file
77
cmd/tsconnect/src/ssh.js
Normal file
@@ -0,0 +1,77 @@
|
||||
// Copyright (c) 2022 Tailscale Inc & AUTHORS All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
import { Terminal } from "xterm"
|
||||
|
||||
export function showSSHPeers(peers, ipn) {
|
||||
const peersNode = document.getElementById("peers")
|
||||
peersNode.innerHTML = ""
|
||||
|
||||
const sshPeers = peers.filter((p) => p.tailscaleSSHEnabled)
|
||||
if (!sshPeers.length) {
|
||||
peersNode.textContent = "No machines have Tailscale SSH installed."
|
||||
return
|
||||
}
|
||||
|
||||
for (const peer of sshPeers) {
|
||||
const peerNode = document.createElement("div")
|
||||
peerNode.className = "peer"
|
||||
const nameNode = document.createElement("div")
|
||||
nameNode.className = "name"
|
||||
nameNode.textContent = peer.name
|
||||
peerNode.appendChild(nameNode)
|
||||
|
||||
const sshButtonNode = document.createElement("button")
|
||||
sshButtonNode.className = "ssh"
|
||||
sshButtonNode.addEventListener("click", function () {
|
||||
ssh(peer.name, ipn)
|
||||
})
|
||||
sshButtonNode.textContent = "SSH"
|
||||
peerNode.appendChild(sshButtonNode)
|
||||
|
||||
peersNode.appendChild(peerNode)
|
||||
}
|
||||
}
|
||||
|
||||
export function hideSSHPeers() {
|
||||
const peersNode = document.getElementById("peers")
|
||||
peersNode.innerHTML = ""
|
||||
}
|
||||
|
||||
function ssh(hostname, ipn) {
|
||||
const termContainerNode = document.createElement("div")
|
||||
termContainerNode.className = "term-container"
|
||||
document.body.appendChild(termContainerNode)
|
||||
|
||||
const term = new Terminal({
|
||||
cursorBlink: true,
|
||||
})
|
||||
term.open(termContainerNode)
|
||||
|
||||
// Cancel wheel events from scrolling the page if the terminal has scrollback
|
||||
termContainerNode.addEventListener("wheel", (e) => {
|
||||
if (term.buffer.active.baseY > 0) {
|
||||
e.preventDefault()
|
||||
}
|
||||
})
|
||||
|
||||
let onDataHook
|
||||
term.onData((e) => {
|
||||
onDataHook?.(e)
|
||||
})
|
||||
|
||||
term.focus()
|
||||
|
||||
ipn.ssh(
|
||||
hostname,
|
||||
(input) => term.write(input),
|
||||
(hook) => (onDataHook = hook),
|
||||
term.rows,
|
||||
term.cols,
|
||||
() => {
|
||||
term.dispose()
|
||||
termContainerNode.remove()
|
||||
}
|
||||
)
|
||||
}
|
||||
61
cmd/tsconnect/tsconnect.go
Normal file
61
cmd/tsconnect/tsconnect.go
Normal file
@@ -0,0 +1,61 @@
|
||||
// Copyright (c) 2022 Tailscale Inc & AUTHORS All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// The tsconnect command builds and serves the static site that is generated for
|
||||
// the Tailscale Connect JS/WASM client. Can be run in 3 modes:
|
||||
// - dev: builds the site and serves it. JS and CSS changes can be picked up
|
||||
// with a reload.
|
||||
// - build: builds the site and writes it to dist/
|
||||
// - serve: serves the site from dist/ (embedded in the binary)
|
||||
package main // import "tailscale.com/cmd/tsconnect"
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
)
|
||||
|
||||
var (
|
||||
addr = flag.String("addr", ":9090", "address to listen on")
|
||||
distDir = flag.String("distdir", "./dist", "path of directory to place build output in")
|
||||
)
|
||||
|
||||
func main() {
|
||||
flag.Usage = usage
|
||||
flag.Parse()
|
||||
if len(flag.Args()) != 1 {
|
||||
flag.Usage()
|
||||
}
|
||||
|
||||
switch flag.Arg(0) {
|
||||
case "dev":
|
||||
runDev()
|
||||
case "build":
|
||||
runBuild()
|
||||
case "serve":
|
||||
runServe()
|
||||
default:
|
||||
log.Printf("Unknown command: %s", flag.Arg(0))
|
||||
flag.Usage()
|
||||
}
|
||||
}
|
||||
|
||||
func usage() {
|
||||
fmt.Fprintf(os.Stderr, `
|
||||
usage: tsconnect {dev|build|serve}
|
||||
`[1:])
|
||||
|
||||
flag.PrintDefaults()
|
||||
fmt.Fprintf(os.Stderr, `
|
||||
|
||||
tsconnect implements development/build/serving workflows for Tailscale Connect.
|
||||
It can be invoked with one of three subcommands:
|
||||
|
||||
- dev: Run in development mode, allowing JS and CSS changes to be picked up without a rebuilt or restart.
|
||||
- build: Run in production build mode (generating static assets)
|
||||
- serve: Run in production serve mode (serving static assets)
|
||||
`[1:])
|
||||
os.Exit(2)
|
||||
}
|
||||
411
cmd/tsconnect/wasm/wasm_js.go
Normal file
411
cmd/tsconnect/wasm/wasm_js.go
Normal file
@@ -0,0 +1,411 @@
|
||||
// Copyright (c) 2022 Tailscale Inc & AUTHORS All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// The wasm package builds a WebAssembly module that provides a subset of
|
||||
// Tailscale APIs to JavaScript.
|
||||
//
|
||||
// When run in the browser, a newIPN(config) function is added to the global JS
|
||||
// namespace. When called it returns an ipn object with the methods
|
||||
// run(callbacks), login(), logout(), and ssh(...).
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"log"
|
||||
"math/rand"
|
||||
"net"
|
||||
"strings"
|
||||
"syscall/js"
|
||||
"time"
|
||||
|
||||
"golang.org/x/crypto/ssh"
|
||||
"inet.af/netaddr"
|
||||
"tailscale.com/control/controlclient"
|
||||
"tailscale.com/ipn"
|
||||
"tailscale.com/ipn/ipnlocal"
|
||||
"tailscale.com/ipn/ipnserver"
|
||||
"tailscale.com/ipn/store/mem"
|
||||
"tailscale.com/net/netns"
|
||||
"tailscale.com/net/tsdial"
|
||||
"tailscale.com/safesocket"
|
||||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/types/logger"
|
||||
"tailscale.com/wgengine"
|
||||
"tailscale.com/wgengine/netstack"
|
||||
"tailscale.com/words"
|
||||
)
|
||||
|
||||
func main() {
|
||||
js.Global().Set("newIPN", js.FuncOf(func(this js.Value, args []js.Value) interface{} {
|
||||
if len(args) != 1 {
|
||||
log.Fatal("Usage: newIPN(config)")
|
||||
return nil
|
||||
}
|
||||
return newIPN(args[0])
|
||||
}))
|
||||
// Keep Go runtime alive, otherwise it will be shut down before newIPN gets
|
||||
// called.
|
||||
<-make(chan bool)
|
||||
}
|
||||
|
||||
func newIPN(jsConfig js.Value) map[string]any {
|
||||
netns.SetEnabled(false)
|
||||
var logf logger.Logf = log.Printf
|
||||
|
||||
dialer := new(tsdial.Dialer)
|
||||
eng, err := wgengine.NewUserspaceEngine(logf, wgengine.Config{
|
||||
Dialer: dialer,
|
||||
})
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
tunDev, magicConn, dnsManager, ok := eng.(wgengine.InternalsGetter).GetInternals()
|
||||
if !ok {
|
||||
log.Fatalf("%T is not a wgengine.InternalsGetter", eng)
|
||||
}
|
||||
ns, err := netstack.Create(logf, tunDev, eng, magicConn, dialer, dnsManager)
|
||||
if err != nil {
|
||||
log.Fatalf("netstack.Create: %v", err)
|
||||
}
|
||||
ns.ProcessLocalIPs = true
|
||||
ns.ProcessSubnets = true
|
||||
if err := ns.Start(); err != nil {
|
||||
log.Fatalf("failed to start netstack: %v", err)
|
||||
}
|
||||
dialer.UseNetstackForIP = func(ip netaddr.IP) bool {
|
||||
return true
|
||||
}
|
||||
dialer.NetstackDialTCP = func(ctx context.Context, dst netaddr.IPPort) (net.Conn, error) {
|
||||
return ns.DialContextTCP(ctx, dst)
|
||||
}
|
||||
|
||||
jsStateStorage := jsConfig.Get("stateStorage")
|
||||
var store ipn.StateStore
|
||||
if jsStateStorage.IsUndefined() {
|
||||
store = new(mem.Store)
|
||||
} else {
|
||||
store = &jsStateStore{jsStateStorage}
|
||||
}
|
||||
srv, err := ipnserver.New(log.Printf, "some-logid", store, eng, dialer, nil, ipnserver.Options{
|
||||
SurviveDisconnects: true,
|
||||
LoginFlags: controlclient.LoginEphemeral,
|
||||
})
|
||||
if err != nil {
|
||||
log.Fatalf("ipnserver.New: %v", err)
|
||||
}
|
||||
lb := srv.LocalBackend()
|
||||
|
||||
jsIPN := &jsIPN{
|
||||
dialer: dialer,
|
||||
srv: srv,
|
||||
lb: lb,
|
||||
}
|
||||
|
||||
return map[string]any{
|
||||
"run": js.FuncOf(func(this js.Value, args []js.Value) interface{} {
|
||||
if len(args) != 1 {
|
||||
log.Fatal(`Usage: run({
|
||||
notifyState(state: int): void,
|
||||
notifyNetMap(netMap: object): void,
|
||||
notifyBrowseToURL(url: string): void,
|
||||
})`)
|
||||
return nil
|
||||
}
|
||||
jsIPN.run(args[0])
|
||||
return nil
|
||||
}),
|
||||
"login": js.FuncOf(func(this js.Value, args []js.Value) interface{} {
|
||||
if len(args) != 0 {
|
||||
log.Printf("Usage: login()")
|
||||
return nil
|
||||
}
|
||||
jsIPN.login()
|
||||
return nil
|
||||
}),
|
||||
"logout": js.FuncOf(func(this js.Value, args []js.Value) interface{} {
|
||||
if len(args) != 0 {
|
||||
log.Printf("Usage: logout()")
|
||||
return nil
|
||||
}
|
||||
jsIPN.logout()
|
||||
return nil
|
||||
}),
|
||||
"ssh": js.FuncOf(func(this js.Value, args []js.Value) interface{} {
|
||||
if len(args) != 6 {
|
||||
log.Printf("Usage: ssh(hostname, writeFn, readFn, rows, cols, onDone)")
|
||||
return nil
|
||||
}
|
||||
go jsIPN.ssh(
|
||||
args[0].String(),
|
||||
args[1],
|
||||
args[2],
|
||||
args[3].Int(),
|
||||
args[4].Int(),
|
||||
args[5])
|
||||
return nil
|
||||
}),
|
||||
}
|
||||
}
|
||||
|
||||
type jsIPN struct {
|
||||
dialer *tsdial.Dialer
|
||||
srv *ipnserver.Server
|
||||
lb *ipnlocal.LocalBackend
|
||||
}
|
||||
|
||||
func (i *jsIPN) run(jsCallbacks js.Value) {
|
||||
notifyState := func(state ipn.State) {
|
||||
jsCallbacks.Call("notifyState", int(state))
|
||||
}
|
||||
notifyState(ipn.NoState)
|
||||
|
||||
i.lb.SetNotifyCallback(func(n ipn.Notify) {
|
||||
log.Printf("NOTIFY: %+v", n)
|
||||
if n.State != nil {
|
||||
notifyState(*n.State)
|
||||
}
|
||||
if nm := n.NetMap; nm != nil {
|
||||
jsNetMap := jsNetMap{
|
||||
Self: jsNetMapSelfNode{
|
||||
jsNetMapNode: jsNetMapNode{
|
||||
Name: nm.Name,
|
||||
Addresses: mapSlice(nm.Addresses, func(a netaddr.IPPrefix) string { return a.IP().String() }),
|
||||
NodeKey: nm.NodeKey.String(),
|
||||
MachineKey: nm.MachineKey.String(),
|
||||
},
|
||||
MachineStatus: int(nm.MachineStatus),
|
||||
},
|
||||
Peers: mapSlice(nm.Peers, func(p *tailcfg.Node) jsNetMapPeerNode {
|
||||
return jsNetMapPeerNode{
|
||||
jsNetMapNode: jsNetMapNode{
|
||||
Name: p.Name,
|
||||
Addresses: mapSlice(p.Addresses, func(a netaddr.IPPrefix) string { return a.IP().String() }),
|
||||
MachineKey: p.Machine.String(),
|
||||
NodeKey: p.Key.String(),
|
||||
},
|
||||
Online: *p.Online,
|
||||
TailscaleSSHEnabled: p.Hostinfo.TailscaleSSHEnabled(),
|
||||
}
|
||||
}),
|
||||
}
|
||||
if jsonNetMap, err := json.Marshal(jsNetMap); err == nil {
|
||||
jsCallbacks.Call("notifyNetMap", string(jsonNetMap))
|
||||
} else {
|
||||
log.Printf("Could not generate JSON netmap: %v", err)
|
||||
}
|
||||
}
|
||||
if n.BrowseToURL != nil {
|
||||
jsCallbacks.Call("notifyBrowseToURL", *n.BrowseToURL)
|
||||
}
|
||||
})
|
||||
|
||||
go func() {
|
||||
err := i.lb.Start(ipn.Options{
|
||||
StateKey: "wasm",
|
||||
UpdatePrefs: &ipn.Prefs{
|
||||
ControlURL: ipn.DefaultControlURL,
|
||||
RouteAll: false,
|
||||
AllowSingleHosts: true,
|
||||
WantRunning: true,
|
||||
Hostname: generateHostname(),
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
log.Printf("Start error: %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
go func() {
|
||||
ln, _, err := safesocket.Listen("", 0)
|
||||
if err != nil {
|
||||
log.Fatalf("safesocket.Listen: %v", err)
|
||||
}
|
||||
|
||||
err = i.srv.Run(context.Background(), ln)
|
||||
log.Fatalf("ipnserver.Run exited: %v", err)
|
||||
}()
|
||||
}
|
||||
|
||||
func (i *jsIPN) login() {
|
||||
go i.lb.StartLoginInteractive()
|
||||
}
|
||||
|
||||
func (i *jsIPN) logout() {
|
||||
if i.lb.State() == ipn.NoState {
|
||||
log.Printf("Backend not running")
|
||||
}
|
||||
go i.lb.Logout()
|
||||
}
|
||||
|
||||
func (i *jsIPN) ssh(host string, writeFn js.Value, setReadFn js.Value, rows, cols int, onDone js.Value) {
|
||||
defer onDone.Invoke()
|
||||
|
||||
write := func(s string) {
|
||||
writeFn.Invoke(s)
|
||||
}
|
||||
writeError := func(label string, err error) {
|
||||
write(fmt.Sprintf("%s Error: %v\r\n", label, err))
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
defer cancel()
|
||||
c, err := i.dialer.UserDial(ctx, "tcp", net.JoinHostPort(host, "22"))
|
||||
if err != nil {
|
||||
writeError("Dial", err)
|
||||
return
|
||||
}
|
||||
defer c.Close()
|
||||
|
||||
config := &ssh.ClientConfig{
|
||||
HostKeyCallback: ssh.InsecureIgnoreHostKey(),
|
||||
}
|
||||
|
||||
sshConn, _, _, err := ssh.NewClientConn(c, host, config)
|
||||
if err != nil {
|
||||
writeError("SSH Connection", err)
|
||||
return
|
||||
}
|
||||
defer sshConn.Close()
|
||||
write("SSH Connected\r\n")
|
||||
|
||||
sshClient := ssh.NewClient(sshConn, nil, nil)
|
||||
defer sshClient.Close()
|
||||
|
||||
session, err := sshClient.NewSession()
|
||||
if err != nil {
|
||||
writeError("SSH Session", err)
|
||||
return
|
||||
}
|
||||
write("Session Established\r\n")
|
||||
defer session.Close()
|
||||
|
||||
stdin, err := session.StdinPipe()
|
||||
if err != nil {
|
||||
writeError("SSH Stdin", err)
|
||||
return
|
||||
}
|
||||
|
||||
session.Stdout = termWriter{writeFn}
|
||||
session.Stderr = termWriter{writeFn}
|
||||
|
||||
setReadFn.Invoke(js.FuncOf(func(this js.Value, args []js.Value) interface{} {
|
||||
input := args[0].String()
|
||||
_, err := stdin.Write([]byte(input))
|
||||
if err != nil {
|
||||
writeError("Write Input", err)
|
||||
}
|
||||
return nil
|
||||
}))
|
||||
|
||||
err = session.RequestPty("xterm", rows, cols, ssh.TerminalModes{})
|
||||
|
||||
if err != nil {
|
||||
writeError("Pseudo Terminal", err)
|
||||
return
|
||||
}
|
||||
|
||||
err = session.Shell()
|
||||
if err != nil {
|
||||
writeError("Shell", err)
|
||||
return
|
||||
}
|
||||
|
||||
err = session.Wait()
|
||||
if err != nil {
|
||||
writeError("Exit", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
type termWriter struct {
|
||||
f js.Value
|
||||
}
|
||||
|
||||
func (w termWriter) Write(p []byte) (n int, err error) {
|
||||
r := bytes.Replace(p, []byte("\n"), []byte("\n\r"), -1)
|
||||
w.f.Invoke(string(r))
|
||||
return len(p), nil
|
||||
}
|
||||
|
||||
type jsNetMap struct {
|
||||
Self jsNetMapSelfNode `json:"self"`
|
||||
Peers []jsNetMapPeerNode `json:"peers"`
|
||||
}
|
||||
|
||||
type jsNetMapNode struct {
|
||||
Name string `json:"name"`
|
||||
Addresses []string `json:"addresses"`
|
||||
MachineStatus int `json:"machineStatus"`
|
||||
MachineKey string `json:"machineKey"`
|
||||
NodeKey string `json:"nodeKey"`
|
||||
}
|
||||
|
||||
type jsNetMapSelfNode struct {
|
||||
jsNetMapNode
|
||||
MachineStatus int `json:"machineStatus"`
|
||||
}
|
||||
|
||||
type jsNetMapPeerNode struct {
|
||||
jsNetMapNode
|
||||
Online bool `json:"online"`
|
||||
TailscaleSSHEnabled bool `json:"tailscaleSSHEnabled"`
|
||||
}
|
||||
|
||||
type jsStateStore struct {
|
||||
jsStateStorage js.Value
|
||||
}
|
||||
|
||||
func (s *jsStateStore) ReadState(id ipn.StateKey) ([]byte, error) {
|
||||
jsValue := s.jsStateStorage.Call("getState", string(id))
|
||||
if jsValue.String() == "" {
|
||||
return nil, ipn.ErrStateNotExist
|
||||
}
|
||||
return hex.DecodeString(jsValue.String())
|
||||
}
|
||||
|
||||
func (s *jsStateStore) WriteState(id ipn.StateKey, bs []byte) error {
|
||||
s.jsStateStorage.Call("setState", string(id), hex.EncodeToString(bs))
|
||||
return nil
|
||||
}
|
||||
|
||||
func mapSlice[T any, M any](a []T, f func(T) M) []M {
|
||||
n := make([]M, len(a))
|
||||
for i, e := range a {
|
||||
n[i] = f(e)
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func filterSlice[T any](a []T, f func(T) bool) []T {
|
||||
n := make([]T, 0, len(a))
|
||||
for _, e := range a {
|
||||
if f(e) {
|
||||
n = append(n, e)
|
||||
}
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func generateHostname() string {
|
||||
tails := words.Tails()
|
||||
scales := words.Scales()
|
||||
if rand.Int()%2 == 0 {
|
||||
// JavaScript
|
||||
tails = filterSlice(tails, func(s string) bool { return strings.HasPrefix(s, "j") })
|
||||
scales = filterSlice(scales, func(s string) bool { return strings.HasPrefix(s, "s") })
|
||||
} else {
|
||||
// WebAssembly
|
||||
tails = filterSlice(tails, func(s string) bool { return strings.HasPrefix(s, "w") })
|
||||
scales = filterSlice(scales, func(s string) bool { return strings.HasPrefix(s, "a") })
|
||||
}
|
||||
|
||||
tail := tails[rand.Intn(len(tails))]
|
||||
scale := scales[rand.Intn(len(scales))]
|
||||
return fmt.Sprintf("%s-%s", tail, scale)
|
||||
}
|
||||
205
cmd/tsconnect/yarn.lock
Normal file
205
cmd/tsconnect/yarn.lock
Normal file
@@ -0,0 +1,205 @@
|
||||
# THIS IS AN AUTOGENERATED FILE. DO NOT EDIT THIS FILE DIRECTLY.
|
||||
# yarn lockfile v1
|
||||
|
||||
|
||||
ansi-regex@^5.0.1:
|
||||
version "5.0.1"
|
||||
resolved "https://registry.yarnpkg.com/ansi-regex/-/ansi-regex-5.0.1.tgz#082cb2c89c9fe8659a311a53bd6a4dc5301db304"
|
||||
integrity sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==
|
||||
|
||||
ansi-styles@^4.0.0:
|
||||
version "4.3.0"
|
||||
resolved "https://registry.yarnpkg.com/ansi-styles/-/ansi-styles-4.3.0.tgz#edd803628ae71c04c85ae7a0906edad34b648937"
|
||||
integrity sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==
|
||||
dependencies:
|
||||
color-convert "^2.0.1"
|
||||
|
||||
camelcase@^5.0.0:
|
||||
version "5.3.1"
|
||||
resolved "https://registry.yarnpkg.com/camelcase/-/camelcase-5.3.1.tgz#e3c9b31569e106811df242f715725a1f4c494320"
|
||||
integrity sha512-L28STB170nwWS63UjtlEOE3dldQApaJXZkOI1uMFfzf3rRuPegHaHesyee+YxQ+W6SvRDQV6UrdOdRiR153wJg==
|
||||
|
||||
cliui@^6.0.0:
|
||||
version "6.0.0"
|
||||
resolved "https://registry.yarnpkg.com/cliui/-/cliui-6.0.0.tgz#511d702c0c4e41ca156d7d0e96021f23e13225b1"
|
||||
integrity sha512-t6wbgtoCXvAzst7QgXxJYqPt0usEfbgQdftEPbLL/cvv6HPE5VgvqCuAIDR0NgU52ds6rFwqrgakNLrHEjCbrQ==
|
||||
dependencies:
|
||||
string-width "^4.2.0"
|
||||
strip-ansi "^6.0.0"
|
||||
wrap-ansi "^6.2.0"
|
||||
|
||||
color-convert@^2.0.1:
|
||||
version "2.0.1"
|
||||
resolved "https://registry.yarnpkg.com/color-convert/-/color-convert-2.0.1.tgz#72d3a68d598c9bdb3af2ad1e84f21d896abd4de3"
|
||||
integrity sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==
|
||||
dependencies:
|
||||
color-name "~1.1.4"
|
||||
|
||||
color-name@~1.1.4:
|
||||
version "1.1.4"
|
||||
resolved "https://registry.yarnpkg.com/color-name/-/color-name-1.1.4.tgz#c2a09a87acbde69543de6f63fa3995c826c536a2"
|
||||
integrity sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==
|
||||
|
||||
decamelize@^1.2.0:
|
||||
version "1.2.0"
|
||||
resolved "https://registry.yarnpkg.com/decamelize/-/decamelize-1.2.0.tgz#f6534d15148269b20352e7bee26f501f9a191290"
|
||||
integrity sha1-9lNNFRSCabIDUue+4m9QH5oZEpA=
|
||||
|
||||
dijkstrajs@^1.0.1:
|
||||
version "1.0.2"
|
||||
resolved "https://registry.yarnpkg.com/dijkstrajs/-/dijkstrajs-1.0.2.tgz#2e48c0d3b825462afe75ab4ad5e829c8ece36257"
|
||||
integrity sha512-QV6PMaHTCNmKSeP6QoXhVTw9snc9VD8MulTT0Bd99Pacp4SS1cjcrYPgBPmibqKVtMJJfqC6XvOXgPMEEPH/fg==
|
||||
|
||||
emoji-regex@^8.0.0:
|
||||
version "8.0.0"
|
||||
resolved "https://registry.yarnpkg.com/emoji-regex/-/emoji-regex-8.0.0.tgz#e818fd69ce5ccfcb404594f842963bf53164cc37"
|
||||
integrity sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==
|
||||
|
||||
encode-utf8@^1.0.3:
|
||||
version "1.0.3"
|
||||
resolved "https://registry.yarnpkg.com/encode-utf8/-/encode-utf8-1.0.3.tgz#f30fdd31da07fb596f281beb2f6b027851994cda"
|
||||
integrity sha512-ucAnuBEhUK4boH2HjVYG5Q2mQyPorvv0u/ocS+zhdw0S8AlHYY+GOFhP1Gio5z4icpP2ivFSvhtFjQi8+T9ppw==
|
||||
|
||||
find-up@^4.1.0:
|
||||
version "4.1.0"
|
||||
resolved "https://registry.yarnpkg.com/find-up/-/find-up-4.1.0.tgz#97afe7d6cdc0bc5928584b7c8d7b16e8a9aa5d19"
|
||||
integrity sha512-PpOwAdQ/YlXQ2vj8a3h8IipDuYRi3wceVQQGYWxNINccq40Anw7BlsEXCMbt1Zt+OLA6Fq9suIpIWD0OsnISlw==
|
||||
dependencies:
|
||||
locate-path "^5.0.0"
|
||||
path-exists "^4.0.0"
|
||||
|
||||
get-caller-file@^2.0.1:
|
||||
version "2.0.5"
|
||||
resolved "https://registry.yarnpkg.com/get-caller-file/-/get-caller-file-2.0.5.tgz#4f94412a82db32f36e3b0b9741f8a97feb031f7e"
|
||||
integrity sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg==
|
||||
|
||||
is-fullwidth-code-point@^3.0.0:
|
||||
version "3.0.0"
|
||||
resolved "https://registry.yarnpkg.com/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz#f116f8064fe90b3f7844a38997c0b75051269f1d"
|
||||
integrity sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==
|
||||
|
||||
locate-path@^5.0.0:
|
||||
version "5.0.0"
|
||||
resolved "https://registry.yarnpkg.com/locate-path/-/locate-path-5.0.0.tgz#1afba396afd676a6d42504d0a67a3a7eb9f62aa0"
|
||||
integrity sha512-t7hw9pI+WvuwNJXwk5zVHpyhIqzg2qTlklJOf0mVxGSbe3Fp2VieZcduNYjaLDoy6p9uGpQEGWG87WpMKlNq8g==
|
||||
dependencies:
|
||||
p-locate "^4.1.0"
|
||||
|
||||
p-limit@^2.2.0:
|
||||
version "2.3.0"
|
||||
resolved "https://registry.yarnpkg.com/p-limit/-/p-limit-2.3.0.tgz#3dd33c647a214fdfffd835933eb086da0dc21db1"
|
||||
integrity sha512-//88mFWSJx8lxCzwdAABTJL2MyWB12+eIY7MDL2SqLmAkeKU9qxRvWuSyTjm3FUmpBEMuFfckAIqEaVGUDxb6w==
|
||||
dependencies:
|
||||
p-try "^2.0.0"
|
||||
|
||||
p-locate@^4.1.0:
|
||||
version "4.1.0"
|
||||
resolved "https://registry.yarnpkg.com/p-locate/-/p-locate-4.1.0.tgz#a3428bb7088b3a60292f66919278b7c297ad4f07"
|
||||
integrity sha512-R79ZZ/0wAxKGu3oYMlz8jy/kbhsNrS7SKZ7PxEHBgJ5+F2mtFW2fK2cOtBh1cHYkQsbzFV7I+EoRKe6Yt0oK7A==
|
||||
dependencies:
|
||||
p-limit "^2.2.0"
|
||||
|
||||
p-try@^2.0.0:
|
||||
version "2.2.0"
|
||||
resolved "https://registry.yarnpkg.com/p-try/-/p-try-2.2.0.tgz#cb2868540e313d61de58fafbe35ce9004d5540e6"
|
||||
integrity sha512-R4nPAVTAU0B9D35/Gk3uJf/7XYbQcyohSKdvAxIRSNghFl4e71hVoGnBNQz9cWaXxO2I10KTC+3jMdvvoKw6dQ==
|
||||
|
||||
path-exists@^4.0.0:
|
||||
version "4.0.0"
|
||||
resolved "https://registry.yarnpkg.com/path-exists/-/path-exists-4.0.0.tgz#513bdbe2d3b95d7762e8c1137efa195c6c61b5b3"
|
||||
integrity sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==
|
||||
|
||||
pngjs@^5.0.0:
|
||||
version "5.0.0"
|
||||
resolved "https://registry.yarnpkg.com/pngjs/-/pngjs-5.0.0.tgz#e79dd2b215767fd9c04561c01236df960bce7fbb"
|
||||
integrity sha512-40QW5YalBNfQo5yRYmiw7Yz6TKKVr3h6970B2YE+3fQpsWcrbj1PzJgxeJ19DRQjhMbKPIuMY8rFaXc8moolVw==
|
||||
|
||||
qrcode@^1.5.0:
|
||||
version "1.5.0"
|
||||
resolved "https://registry.yarnpkg.com/qrcode/-/qrcode-1.5.0.tgz#95abb8a91fdafd86f8190f2836abbfc500c72d1b"
|
||||
integrity sha512-9MgRpgVc+/+47dFvQeD6U2s0Z92EsKzcHogtum4QB+UNd025WOJSHvn/hjk9xmzj7Stj95CyUAs31mrjxliEsQ==
|
||||
dependencies:
|
||||
dijkstrajs "^1.0.1"
|
||||
encode-utf8 "^1.0.3"
|
||||
pngjs "^5.0.0"
|
||||
yargs "^15.3.1"
|
||||
|
||||
require-directory@^2.1.1:
|
||||
version "2.1.1"
|
||||
resolved "https://registry.yarnpkg.com/require-directory/-/require-directory-2.1.1.tgz#8c64ad5fd30dab1c976e2344ffe7f792a6a6df42"
|
||||
integrity sha1-jGStX9MNqxyXbiNE/+f3kqam30I=
|
||||
|
||||
require-main-filename@^2.0.0:
|
||||
version "2.0.0"
|
||||
resolved "https://registry.yarnpkg.com/require-main-filename/-/require-main-filename-2.0.0.tgz#d0b329ecc7cc0f61649f62215be69af54aa8989b"
|
||||
integrity sha512-NKN5kMDylKuldxYLSUfrbo5Tuzh4hd+2E8NPPX02mZtn1VuREQToYe/ZdlJy+J3uCpfaiGF05e7B8W0iXbQHmg==
|
||||
|
||||
set-blocking@^2.0.0:
|
||||
version "2.0.0"
|
||||
resolved "https://registry.yarnpkg.com/set-blocking/-/set-blocking-2.0.0.tgz#045f9782d011ae9a6803ddd382b24392b3d890f7"
|
||||
integrity sha1-BF+XgtARrppoA93TgrJDkrPYkPc=
|
||||
|
||||
string-width@^4.1.0, string-width@^4.2.0:
|
||||
version "4.2.3"
|
||||
resolved "https://registry.yarnpkg.com/string-width/-/string-width-4.2.3.tgz#269c7117d27b05ad2e536830a8ec895ef9c6d010"
|
||||
integrity sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==
|
||||
dependencies:
|
||||
emoji-regex "^8.0.0"
|
||||
is-fullwidth-code-point "^3.0.0"
|
||||
strip-ansi "^6.0.1"
|
||||
|
||||
strip-ansi@^6.0.0, strip-ansi@^6.0.1:
|
||||
version "6.0.1"
|
||||
resolved "https://registry.yarnpkg.com/strip-ansi/-/strip-ansi-6.0.1.tgz#9e26c63d30f53443e9489495b2105d37b67a85d9"
|
||||
integrity sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==
|
||||
dependencies:
|
||||
ansi-regex "^5.0.1"
|
||||
|
||||
which-module@^2.0.0:
|
||||
version "2.0.0"
|
||||
resolved "https://registry.yarnpkg.com/which-module/-/which-module-2.0.0.tgz#d9ef07dce77b9902b8a3a8fa4b31c3e3f7e6e87a"
|
||||
integrity sha1-2e8H3Od7mQK4o6j6SzHD4/fm6Ho=
|
||||
|
||||
wrap-ansi@^6.2.0:
|
||||
version "6.2.0"
|
||||
resolved "https://registry.yarnpkg.com/wrap-ansi/-/wrap-ansi-6.2.0.tgz#e9393ba07102e6c91a3b221478f0257cd2856e53"
|
||||
integrity sha512-r6lPcBGxZXlIcymEu7InxDMhdW0KDxpLgoFLcguasxCaJ/SOIZwINatK9KY/tf+ZrlywOKU0UDj3ATXUBfxJXA==
|
||||
dependencies:
|
||||
ansi-styles "^4.0.0"
|
||||
string-width "^4.1.0"
|
||||
strip-ansi "^6.0.0"
|
||||
|
||||
xterm@^4.18.0:
|
||||
version "4.18.0"
|
||||
resolved "https://registry.yarnpkg.com/xterm/-/xterm-4.18.0.tgz#a1f6ab2c330c3918fb094ae5f4c2562987398ea1"
|
||||
integrity sha512-JQoc1S0dti6SQfI0bK1AZvGnAxH4MVw45ZPFSO6FHTInAiau3Ix77fSxNx3mX4eh9OL4AYa8+4C8f5UvnSfppQ==
|
||||
|
||||
y18n@^4.0.0:
|
||||
version "4.0.3"
|
||||
resolved "https://registry.yarnpkg.com/y18n/-/y18n-4.0.3.tgz#b5f259c82cd6e336921efd7bfd8bf560de9eeedf"
|
||||
integrity sha512-JKhqTOwSrqNA1NY5lSztJ1GrBiUodLMmIZuLiDaMRJ+itFd+ABVE8XBjOvIWL+rSqNDC74LCSFmlb/U4UZ4hJQ==
|
||||
|
||||
yargs-parser@^18.1.2:
|
||||
version "18.1.3"
|
||||
resolved "https://registry.yarnpkg.com/yargs-parser/-/yargs-parser-18.1.3.tgz#be68c4975c6b2abf469236b0c870362fab09a7b0"
|
||||
integrity sha512-o50j0JeToy/4K6OZcaQmW6lyXXKhq7csREXcDwk2omFPJEwUNOVtJKvmDr9EI1fAJZUyZcRF7kxGBWmRXudrCQ==
|
||||
dependencies:
|
||||
camelcase "^5.0.0"
|
||||
decamelize "^1.2.0"
|
||||
|
||||
yargs@^15.3.1:
|
||||
version "15.4.1"
|
||||
resolved "https://registry.yarnpkg.com/yargs/-/yargs-15.4.1.tgz#0d87a16de01aee9d8bec2bfbf74f67851730f4f8"
|
||||
integrity sha512-aePbxDmcYW++PaqBsJ+HYUFwCdv4LVvdnhBy78E57PIor8/OVvhMrADFFEDh8DHDFRv/O9i3lPhsENjO7QX0+A==
|
||||
dependencies:
|
||||
cliui "^6.0.0"
|
||||
decamelize "^1.2.0"
|
||||
find-up "^4.1.0"
|
||||
get-caller-file "^2.0.1"
|
||||
require-directory "^2.1.1"
|
||||
require-main-filename "^2.0.0"
|
||||
set-blocking "^2.0.0"
|
||||
string-width "^4.2.0"
|
||||
which-module "^2.0.0"
|
||||
y18n "^4.0.0"
|
||||
yargs-parser "^18.1.2"
|
||||
@@ -5,6 +5,7 @@
|
||||
package controlclient
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"sort"
|
||||
|
||||
@@ -165,12 +166,6 @@ func (ms *mapSession) netmapForResponse(resp *tailcfg.MapResponse) *netmap.Netwo
|
||||
}
|
||||
ms.addUserProfile(peer.User)
|
||||
}
|
||||
if len(resp.DNS) > 0 {
|
||||
nm.DNS.Nameservers = resp.DNS
|
||||
}
|
||||
if len(resp.SearchPaths) > 0 {
|
||||
nm.DNS.Domains = resp.SearchPaths
|
||||
}
|
||||
if Debug.ProxyDNS {
|
||||
nm.DNS.Proxied = true
|
||||
}
|
||||
@@ -244,7 +239,7 @@ func undeltaPeers(mapRes *tailcfg.MapResponse, prev []*tailcfg.Node) {
|
||||
sortNodes(newFull)
|
||||
}
|
||||
|
||||
if len(mapRes.PeerSeenChange) != 0 || len(mapRes.OnlineChange) != 0 {
|
||||
if len(mapRes.PeerSeenChange) != 0 || len(mapRes.OnlineChange) != 0 || len(mapRes.PeersChangedPatch) != 0 {
|
||||
peerByID := make(map[tailcfg.NodeID]*tailcfg.Node, len(newFull))
|
||||
for _, n := range newFull {
|
||||
peerByID[n.ID] = n
|
||||
@@ -265,6 +260,16 @@ func undeltaPeers(mapRes *tailcfg.MapResponse, prev []*tailcfg.Node) {
|
||||
n.Online = &online
|
||||
}
|
||||
}
|
||||
for _, ec := range mapRes.PeersChangedPatch {
|
||||
if n, ok := peerByID[ec.NodeID]; ok {
|
||||
if ec.DERPRegion != 0 {
|
||||
n.DERP = fmt.Sprintf("%s:%v", tailcfg.DerpMagicIP, ec.DERPRegion)
|
||||
}
|
||||
if ec.Endpoints != nil {
|
||||
n.Endpoints = ec.Endpoints
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
mapRes.Peers = newFull
|
||||
|
||||
@@ -34,6 +34,16 @@ func TestUndeltaPeers(t *testing.T) {
|
||||
n.LastSeen = &t
|
||||
}
|
||||
}
|
||||
withDERP := func(d string) func(*tailcfg.Node) {
|
||||
return func(n *tailcfg.Node) {
|
||||
n.DERP = d
|
||||
}
|
||||
}
|
||||
withEP := func(ep string) func(*tailcfg.Node) {
|
||||
return func(n *tailcfg.Node) {
|
||||
n.Endpoints = []string{ep}
|
||||
}
|
||||
}
|
||||
n := func(id tailcfg.NodeID, name string, mod ...func(*tailcfg.Node)) *tailcfg.Node {
|
||||
n := &tailcfg.Node{ID: id, Name: name}
|
||||
for _, f := range mod {
|
||||
@@ -137,7 +147,53 @@ func TestUndeltaPeers(t *testing.T) {
|
||||
n(2, "bar", seenAt(time.Unix(123, 0))),
|
||||
),
|
||||
},
|
||||
{
|
||||
name: "ep_change_derp",
|
||||
prev: peers(n(1, "foo", withDERP("127.3.3.40:3"))),
|
||||
mapRes: &tailcfg.MapResponse{
|
||||
PeersChangedPatch: []*tailcfg.PeerChange{{
|
||||
NodeID: 1,
|
||||
DERPRegion: 4,
|
||||
}},
|
||||
},
|
||||
want: peers(n(1, "foo", withDERP("127.3.3.40:4"))),
|
||||
},
|
||||
{
|
||||
name: "ep_change_udp",
|
||||
prev: peers(n(1, "foo", withEP("1.2.3.4:111"))),
|
||||
mapRes: &tailcfg.MapResponse{
|
||||
PeersChangedPatch: []*tailcfg.PeerChange{{
|
||||
NodeID: 1,
|
||||
Endpoints: []string{"1.2.3.4:56"},
|
||||
}},
|
||||
},
|
||||
want: peers(n(1, "foo", withEP("1.2.3.4:56"))),
|
||||
},
|
||||
{
|
||||
name: "ep_change_udp",
|
||||
prev: peers(n(1, "foo", withDERP("127.3.3.40:3"), withEP("1.2.3.4:111"))),
|
||||
mapRes: &tailcfg.MapResponse{
|
||||
PeersChangedPatch: []*tailcfg.PeerChange{{
|
||||
NodeID: 1,
|
||||
Endpoints: []string{"1.2.3.4:56"},
|
||||
}},
|
||||
},
|
||||
want: peers(n(1, "foo", withDERP("127.3.3.40:3"), withEP("1.2.3.4:56"))),
|
||||
},
|
||||
{
|
||||
name: "ep_change_both",
|
||||
prev: peers(n(1, "foo", withDERP("127.3.3.40:3"), withEP("1.2.3.4:111"))),
|
||||
mapRes: &tailcfg.MapResponse{
|
||||
PeersChangedPatch: []*tailcfg.PeerChange{{
|
||||
NodeID: 1,
|
||||
DERPRegion: 2,
|
||||
Endpoints: []string{"1.2.3.4:56"},
|
||||
}},
|
||||
},
|
||||
want: peers(n(1, "foo", withDERP("127.3.3.40:2"), withEP("1.2.3.4:56"))),
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if !tt.curTime.IsZero() {
|
||||
|
||||
@@ -156,6 +156,8 @@ func (c *Client) parseServerInfo(b []byte) (*serverInfo, error) {
|
||||
}
|
||||
|
||||
type clientInfo struct {
|
||||
// Version is the DERP protocol version that the client was built with.
|
||||
// See the ProtocolVersion const.
|
||||
Version int `json:"version,omitempty"`
|
||||
|
||||
// MeshKey optionally specifies a pre-shared key used by
|
||||
|
||||
@@ -410,7 +410,7 @@ func (s *Server) IsClientConnectedForTest(k key.NodePublic) bool {
|
||||
// on its own.
|
||||
//
|
||||
// Accept closes nc.
|
||||
func (s *Server) Accept(nc Conn, brw *bufio.ReadWriter, remoteAddr string) {
|
||||
func (s *Server) Accept(ctx context.Context, nc Conn, brw *bufio.ReadWriter, remoteAddr string) {
|
||||
closed := make(chan struct{})
|
||||
|
||||
s.mu.Lock()
|
||||
@@ -428,7 +428,7 @@ func (s *Server) Accept(nc Conn, brw *bufio.ReadWriter, remoteAddr string) {
|
||||
s.mu.Unlock()
|
||||
}()
|
||||
|
||||
if err := s.accept(nc, brw, remoteAddr, connNum); err != nil && !s.isClosed() {
|
||||
if err := s.accept(ctx, nc, brw, remoteAddr, connNum); err != nil && !s.isClosed() {
|
||||
s.logf("derp: %s: %v", remoteAddr, err)
|
||||
}
|
||||
}
|
||||
@@ -641,7 +641,7 @@ func (s *Server) addWatcher(c *sclient) {
|
||||
go c.requestMeshUpdate()
|
||||
}
|
||||
|
||||
func (s *Server) accept(nc Conn, brw *bufio.ReadWriter, remoteAddr string, connNum int64) error {
|
||||
func (s *Server) accept(ctx context.Context, nc Conn, brw *bufio.ReadWriter, remoteAddr string, connNum int64) error {
|
||||
br := brw.Reader
|
||||
nc.SetDeadline(time.Now().Add(10 * time.Second))
|
||||
bw := &lazyBufioWriter{w: nc, lbw: brw.Writer}
|
||||
@@ -660,7 +660,7 @@ func (s *Server) accept(nc Conn, brw *bufio.ReadWriter, remoteAddr string, connN
|
||||
// At this point we trust the client so we don't time out.
|
||||
nc.SetDeadline(time.Time{})
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
|
||||
remoteIPPort, _ := netaddr.ParseIPPort(remoteAddr)
|
||||
|
||||
@@ -85,8 +85,12 @@ func TestSendRecv(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer cin.Close()
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
brwServer := bufio.NewReadWriter(bufio.NewReader(cin), bufio.NewWriter(cin))
|
||||
go s.Accept(cin, brwServer, fmt.Sprintf("test-client-%d", i))
|
||||
go s.Accept(ctx, cin, brwServer, fmt.Sprintf("test-client-%d", i))
|
||||
|
||||
key := clientPrivateKeys[i]
|
||||
brw := bufio.NewReadWriter(bufio.NewReader(cout), bufio.NewWriter(cout))
|
||||
@@ -231,10 +235,10 @@ func TestSendFreeze(t *testing.T) {
|
||||
// Then cathy stops processing messsages.
|
||||
// That should not interfere with alice talking to bob.
|
||||
|
||||
newClient := func(name string, k key.NodePrivate) (c *Client, clientConn nettest.Conn) {
|
||||
newClient := func(ctx context.Context, name string, k key.NodePrivate) (c *Client, clientConn nettest.Conn) {
|
||||
t.Helper()
|
||||
c1, c2 := nettest.NewConn(name, 1024)
|
||||
go s.Accept(c1, bufio.NewReadWriter(bufio.NewReader(c1), bufio.NewWriter(c1)), name)
|
||||
go s.Accept(ctx, c1, bufio.NewReadWriter(bufio.NewReader(c1), bufio.NewWriter(c1)), name)
|
||||
|
||||
brw := bufio.NewReadWriter(bufio.NewReader(c2), bufio.NewWriter(c2))
|
||||
c, err := NewClient(k, c2, brw, t.Logf)
|
||||
@@ -245,14 +249,17 @@ func TestSendFreeze(t *testing.T) {
|
||||
return c, c2
|
||||
}
|
||||
|
||||
ctx, clientCtxCancel := context.WithCancel(context.Background())
|
||||
defer clientCtxCancel()
|
||||
|
||||
aliceKey := key.NewNode()
|
||||
aliceClient, aliceConn := newClient("alice", aliceKey)
|
||||
aliceClient, aliceConn := newClient(ctx, "alice", aliceKey)
|
||||
|
||||
bobKey := key.NewNode()
|
||||
bobClient, bobConn := newClient("bob", bobKey)
|
||||
bobClient, bobConn := newClient(ctx, "bob", bobKey)
|
||||
|
||||
cathyKey := key.NewNode()
|
||||
cathyClient, cathyConn := newClient("cathy", cathyKey)
|
||||
cathyClient, cathyConn := newClient(ctx, "cathy", cathyKey)
|
||||
|
||||
var (
|
||||
aliceCh = make(chan struct{}, 32)
|
||||
@@ -455,7 +462,7 @@ func (ts *testServer) close(t *testing.T) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func newTestServer(t *testing.T) *testServer {
|
||||
func newTestServer(t *testing.T, ctx context.Context) *testServer {
|
||||
t.Helper()
|
||||
logf := logger.WithPrefix(t.Logf, "derp-server: ")
|
||||
s := NewServer(key.NewNode(), logf)
|
||||
@@ -475,7 +482,7 @@ func newTestServer(t *testing.T) *testServer {
|
||||
// TODO: register c in ts so Close also closes it?
|
||||
go func(i int) {
|
||||
brwServer := bufio.NewReadWriter(bufio.NewReader(c), bufio.NewWriter(c))
|
||||
go s.Accept(c, brwServer, fmt.Sprintf("test-client-%d", i))
|
||||
go s.Accept(ctx, c, brwServer, fmt.Sprintf("test-client-%d", i))
|
||||
}(i)
|
||||
}
|
||||
}()
|
||||
@@ -610,7 +617,10 @@ func (c *testClient) close(t *testing.T) {
|
||||
// TestWatch tests the connection watcher mechanism used by regional
|
||||
// DERP nodes to mesh up with each other.
|
||||
func TestWatch(t *testing.T) {
|
||||
ts := newTestServer(t)
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
ts := newTestServer(t, ctx)
|
||||
defer ts.close(t)
|
||||
|
||||
w1 := newTestWatcher(t, ts, "w1")
|
||||
@@ -1198,7 +1208,10 @@ func benchmarkSendRecvSize(b *testing.B, packetSize int) {
|
||||
defer connIn.Close()
|
||||
|
||||
brwServer := bufio.NewReadWriter(bufio.NewReader(connIn), bufio.NewWriter(connIn))
|
||||
go s.Accept(connIn, brwServer, "test-client")
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
go s.Accept(ctx, connIn, brwServer, "test-client")
|
||||
|
||||
brw := bufio.NewReadWriter(bufio.NewReader(connOut), bufio.NewWriter(connOut))
|
||||
client, err := NewClient(k, connOut, brw, logger.Discard)
|
||||
@@ -1354,7 +1367,10 @@ func TestClientSendRateLimiting(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestServerRepliesToPing(t *testing.T) {
|
||||
ts := newTestServer(t)
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
ts := newTestServer(t, ctx)
|
||||
defer ts.close(t)
|
||||
|
||||
tc := newRegularClient(t, ts, "alice")
|
||||
|
||||
@@ -56,6 +56,6 @@ func Handler(s *derp.Server) http.Handler {
|
||||
pubKey.UntypedHexString())
|
||||
}
|
||||
|
||||
s.Accept(netConn, conn, netConn.RemoteAddr().String())
|
||||
s.Accept(r.Context(), netConn, conn, netConn.RemoteAddr().String())
|
||||
})
|
||||
}
|
||||
|
||||
@@ -4,6 +4,8 @@
|
||||
|
||||
#! /bin/sh
|
||||
|
||||
set -m # enable job control
|
||||
|
||||
export PATH=$PATH:/tailscale/bin
|
||||
|
||||
TS_AUTH_KEY="${TS_AUTH_KEY:-}"
|
||||
@@ -23,11 +25,11 @@ set -e
|
||||
TAILSCALED_ARGS="--socket=/tmp/tailscaled.sock"
|
||||
|
||||
if [[ ! -z "${KUBERNETES_SERVICE_HOST}" ]]; then
|
||||
TAILSCALED_ARGS="${TAILSCALED_ARGS} --state=kube:${TS_KUBE_SECRET}"
|
||||
TAILSCALED_ARGS="${TAILSCALED_ARGS} --state=kube:${TS_KUBE_SECRET} --statedir=${TS_STATE_DIR:-/tmp}"
|
||||
elif [[ ! -z "${TS_STATE_DIR}" ]]; then
|
||||
TAILSCALED_ARGS="${TAILSCALED_ARGS} --statedir=${TS_STATE_DIR}"
|
||||
else
|
||||
TAILSCALED_ARGS="${TAILSCALED_ARGS} --state=mem:"
|
||||
TAILSCALED_ARGS="${TAILSCALED_ARGS} --state=mem: --statedir=/tmp"
|
||||
fi
|
||||
|
||||
if [[ "${TS_USERSPACE}" == "true" ]]; then
|
||||
@@ -60,7 +62,6 @@ fi
|
||||
|
||||
echo "Starting tailscaled"
|
||||
tailscaled ${TAILSCALED_ARGS} &
|
||||
PID=$!
|
||||
|
||||
UP_ARGS="--accept-dns=${TS_ACCEPT_DNS}"
|
||||
if [[ ! -z "${TS_ROUTES}" ]]; then
|
||||
@@ -81,4 +82,4 @@ if [[ ! -z "${TS_DEST_IP}" ]]; then
|
||||
iptables -t nat -I PREROUTING -d "$(tailscale --socket=/tmp/tailscaled.sock ip -4)" -j DNAT --to-destination "${TS_DEST_IP}"
|
||||
fi
|
||||
|
||||
wait ${PID}
|
||||
fg
|
||||
|
||||
10
go.mod
10
go.mod
@@ -6,6 +6,7 @@ require (
|
||||
filippo.io/mkcert v1.4.3
|
||||
github.com/akutz/memconn v0.1.0
|
||||
github.com/alexbrainman/sspi v0.0.0-20210105120005-909beea2cc74
|
||||
github.com/andybalholm/brotli v1.0.3
|
||||
github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be
|
||||
github.com/aws/aws-sdk-go-v2 v1.11.2
|
||||
github.com/aws/aws-sdk-go-v2/config v1.11.0
|
||||
@@ -16,13 +17,16 @@ require (
|
||||
github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf
|
||||
github.com/creack/pty v1.1.17
|
||||
github.com/dave/jennifer v1.4.1
|
||||
github.com/evanw/esbuild v0.14.39
|
||||
github.com/frankban/quicktest v1.14.0
|
||||
github.com/fxamacker/cbor/v2 v2.4.0
|
||||
github.com/go-ole/go-ole v1.2.6
|
||||
github.com/godbus/dbus/v5 v5.0.6
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da
|
||||
github.com/google/go-cmp v0.5.8
|
||||
github.com/google/uuid v1.3.0
|
||||
github.com/goreleaser/nfpm v1.10.3
|
||||
github.com/hdevalence/ed25519consensus v0.0.0-20220222234857-c00d1f31bab3
|
||||
github.com/iancoleman/strcase v0.2.0
|
||||
github.com/insomniacslk/dhcp v0.0.0-20211209223715-7d93572ebe8e
|
||||
github.com/jsimonetti/rtnetlink v1.1.2-0.20220408201609-d380b505068b
|
||||
@@ -42,7 +46,7 @@ require (
|
||||
github.com/tailscale/goexpect v0.0.0-20210902213824-6e8c725cea41
|
||||
github.com/tailscale/golang-x-crypto v0.0.0-20220428210705-0b941c09a5e1
|
||||
github.com/tailscale/goupnp v1.0.1-0.20210804011211-c64d0f06ea05
|
||||
github.com/tailscale/hujson v0.0.0-20220506202205-92b4b88a9e17
|
||||
github.com/tailscale/hujson v0.0.0-20220630195928-54599719472f
|
||||
github.com/tailscale/mkctr v0.0.0-20220601142259-c0b937af2e89
|
||||
github.com/tailscale/netlink v1.1.1-0.20211101221916-cabfb018fe85
|
||||
github.com/tcnksm/go-httpstat v0.2.0
|
||||
@@ -59,7 +63,7 @@ require (
|
||||
golang.org/x/tools v0.1.11
|
||||
golang.zx2c4.com/wireguard v0.0.0-20220703234212-c31a7b1ab478
|
||||
golang.zx2c4.com/wireguard/windows v0.4.10
|
||||
gvisor.dev/gvisor v0.0.0-20220407223209-21871174d445
|
||||
gvisor.dev/gvisor v0.0.0-20220721202624-0b2c11c2773c
|
||||
honnef.co/go/tools v0.4.0-0.dev.0.20220404092545-59d7a2877f83
|
||||
inet.af/netaddr v0.0.0-20220617031823-097006376321
|
||||
inet.af/peercred v0.0.0-20210906144145-0893ea02156a
|
||||
@@ -122,7 +126,6 @@ require (
|
||||
github.com/fatih/color v1.13.0 // indirect
|
||||
github.com/fatih/structtag v1.2.0 // indirect
|
||||
github.com/fsnotify/fsnotify v1.5.1 // indirect
|
||||
github.com/fxamacker/cbor/v2 v2.4.0 // indirect
|
||||
github.com/fzipp/gocyclo v0.3.1 // indirect
|
||||
github.com/gliderlabs/ssh v0.3.3 // indirect
|
||||
github.com/go-critic/go-critic v0.6.1 // indirect
|
||||
@@ -164,7 +167,6 @@ require (
|
||||
github.com/hashicorp/errwrap v1.1.0 // indirect
|
||||
github.com/hashicorp/go-multierror v1.1.1 // indirect
|
||||
github.com/hashicorp/hcl v1.0.0 // indirect
|
||||
github.com/hdevalence/ed25519consensus v0.0.0-20220222234857-c00d1f31bab3 // indirect
|
||||
github.com/huandu/xstrings v1.3.2 // indirect
|
||||
github.com/imdario/mergo v0.3.12 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.0.0 // indirect
|
||||
|
||||
20
go.sum
20
go.sum
@@ -112,6 +112,7 @@ github.com/alexkohler/prealloc v1.0.0 h1:Hbq0/3fJPQhNkN0dR95AVrr6R7tou91y0uHG5pO
|
||||
github.com/alexkohler/prealloc v1.0.0/go.mod h1:VetnK3dIgFBBKmg0YnD9F9x6Icjd+9cvfHR56wJVlKE=
|
||||
github.com/andybalholm/brotli v1.0.0/go.mod h1:loMXtMfwqflxFJPmdbJO0a3KNoPuLBgiu3qAvBg8x/Y=
|
||||
github.com/andybalholm/brotli v1.0.2/go.mod h1:loMXtMfwqflxFJPmdbJO0a3KNoPuLBgiu3qAvBg8x/Y=
|
||||
github.com/andybalholm/brotli v1.0.3 h1:fpcw+r1N1h0Poc1F/pHbW40cUm/lMEQslZtCkBQ0UnM=
|
||||
github.com/andybalholm/brotli v1.0.3/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig=
|
||||
github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c=
|
||||
github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be h1:9AeTilPcZAjCFIImctFaOjnTIavg87rW78vTPkQqLI8=
|
||||
@@ -280,6 +281,8 @@ github.com/esimonov/ifshort v1.0.3 h1:JD6x035opqGec5fZ0TLjXeROD2p5H7oLGn8MKfy9HT
|
||||
github.com/esimonov/ifshort v1.0.3/go.mod h1:yZqNJUrNn20K8Q9n2CrjTKYyVEmX209Hgu+M1LBpeZE=
|
||||
github.com/ettle/strcase v0.1.1 h1:htFueZyVeE1XNnMEfbqp5r67qAN/4r6ya1ysq8Q+Zcw=
|
||||
github.com/ettle/strcase v0.1.1/go.mod h1:hzDLsPC7/lwKyBOywSHEP89nt2pDgdy+No1NBA9o9VY=
|
||||
github.com/evanw/esbuild v0.14.39 h1:1TMZtCXOY4ctAbGY4QT9sjT203I/cQ16vXt2F9rLT58=
|
||||
github.com/evanw/esbuild v0.14.39/go.mod h1:GG+zjdi59yh3ehDn4ZWfPcATxjPDUH53iU4ZJbp7dkY=
|
||||
github.com/fanliao/go-promise v0.0.0-20141029170127-1890db352a72/go.mod h1:PjfxuH4FZdUyfMdtBio2lsRr1AKEaVPwelzuHuh8Lqc=
|
||||
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
|
||||
github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU=
|
||||
@@ -304,6 +307,7 @@ github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeME
|
||||
github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE=
|
||||
github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI=
|
||||
github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI=
|
||||
github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI=
|
||||
github.com/gin-gonic/gin v1.6.3 h1:ahKqKTFpO5KTPHxWZjEdPScmYaGtLo8Y4DMHoEsnp14=
|
||||
github.com/gin-gonic/gin v1.6.3/go.mod h1:75u5sXoLsGZoRN5Sgbi1eraJ4GU3++wFwWzhwvtwp4M=
|
||||
github.com/github/fakeca v0.1.0 h1:Km/MVOFvclqxPM9dZBC4+QE564nU4gz4iZ0D9pMw28I=
|
||||
@@ -343,9 +347,11 @@ github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvSc
|
||||
github.com/go-playground/locales v0.13.0 h1:HyWk6mgj5qFqCT5fjGBuRArbVDfE4hi8+e8ceBS/t7Q=
|
||||
github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8=
|
||||
github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8=
|
||||
github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8=
|
||||
github.com/go-playground/universal-translator v0.17.0 h1:icxd5fm+REJzpZx7ZfpaD876Lmtgy7VtROAbHHXk8no=
|
||||
github.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+Scu5vgOQjsIJAF8j9muTVoKLVtA=
|
||||
github.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+Scu5vgOQjsIJAF8j9muTVoKLVtA=
|
||||
github.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+Scu5vgOQjsIJAF8j9muTVoKLVtA=
|
||||
github.com/go-playground/validator/v10 v10.2.0 h1:KgJ0snyC2R9VXYN2rneOtQcw5aHQB1Vv0sFl1UcHBOY=
|
||||
github.com/go-playground/validator/v10 v10.2.0/go.mod h1:uOYAAleCW8F/7oMFd6aG0GOhaH6EGOAJShg8Id5JGkI=
|
||||
github.com/go-redis/redis v6.15.8+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA=
|
||||
@@ -380,9 +386,11 @@ github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJA
|
||||
github.com/gobwas/httphead v0.0.0-20180130184737-2c6c146eadee h1:s+21KNqlpePfkah2I+gwHF8xmJWRjooY+5248k6m4A0=
|
||||
github.com/gobwas/httphead v0.0.0-20180130184737-2c6c146eadee/go.mod h1:L0fX3K22YWvt/FAX9NnzrNzcI4wNYi9Yku4O0LKYflo=
|
||||
github.com/gobwas/httphead v0.0.0-20180130184737-2c6c146eadee/go.mod h1:L0fX3K22YWvt/FAX9NnzrNzcI4wNYi9Yku4O0LKYflo=
|
||||
github.com/gobwas/httphead v0.0.0-20180130184737-2c6c146eadee/go.mod h1:L0fX3K22YWvt/FAX9NnzrNzcI4wNYi9Yku4O0LKYflo=
|
||||
github.com/gobwas/pool v0.2.0 h1:QEmUOlnSjWtnpRGHF3SauEiOsy82Cup83Vf2LcMlnc8=
|
||||
github.com/gobwas/pool v0.2.0/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw=
|
||||
github.com/gobwas/pool v0.2.0/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw=
|
||||
github.com/gobwas/pool v0.2.0/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw=
|
||||
github.com/gobwas/ws v1.0.2 h1:CoAavW/wd/kulfZmSIBt6p24n4j7tHgNVCjsfHVNUbo=
|
||||
github.com/gobwas/ws v1.0.2/go.mod h1:szmBTxLgaFppYjEmNtny/v3w89xOydFnnZMcgRRu/EM=
|
||||
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
|
||||
@@ -839,6 +847,8 @@ github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJ
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||
github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI=
|
||||
@@ -1100,8 +1110,8 @@ github.com/tailscale/golang-x-crypto v0.0.0-20220428210705-0b941c09a5e1 h1:vsFV6
|
||||
github.com/tailscale/golang-x-crypto v0.0.0-20220428210705-0b941c09a5e1/go.mod h1:95n9fbUCixVSI4QXLEvdKJjnYK2eUlkTx9+QwLPXFKU=
|
||||
github.com/tailscale/goupnp v1.0.1-0.20210804011211-c64d0f06ea05 h1:4chzWmimtJPxRs2O36yuGRW3f9SYV+bMTTvMBI0EKio=
|
||||
github.com/tailscale/goupnp v1.0.1-0.20210804011211-c64d0f06ea05/go.mod h1:PdCqy9JzfWMJf1H5UJW2ip33/d4YkoKN0r67yKH1mG8=
|
||||
github.com/tailscale/hujson v0.0.0-20220506202205-92b4b88a9e17 h1:QaQrUggZ7U2lE3HhoPx6bDK7fO385FR7pHRYSPEv70Q=
|
||||
github.com/tailscale/hujson v0.0.0-20220506202205-92b4b88a9e17/go.mod h1:DFSS3NAGHthKo1gTlmEcSBiZrRJXi28rLNd/1udP1c8=
|
||||
github.com/tailscale/hujson v0.0.0-20220630195928-54599719472f h1:n4r/sJ92cBSBHK8n9lR1XLFr0OiTVeGfN5TR+9LaN7E=
|
||||
github.com/tailscale/hujson v0.0.0-20220630195928-54599719472f/go.mod h1:DFSS3NAGHthKo1gTlmEcSBiZrRJXi28rLNd/1udP1c8=
|
||||
github.com/tailscale/mkctr v0.0.0-20220601142259-c0b937af2e89 h1:7xU7AFQE83h0wz/dIMvD0t77g0FxFfZIQjghDQxyG2U=
|
||||
github.com/tailscale/mkctr v0.0.0-20220601142259-c0b937af2e89/go.mod h1:OGMqrTzDqmJkGumUTtOv44Rp3/4xS+QFbE8Rn0AGlaU=
|
||||
github.com/tailscale/netlink v1.1.1-0.20211101221916-cabfb018fe85 h1:zrsUcqrG2uQSPhaUPjUQwozcRdDdSxxqhNgNZ3drZFk=
|
||||
@@ -1147,6 +1157,7 @@ github.com/u-root/uio v0.0.0-20210528151154-e40b768296a7/go.mod h1:LpEX5FO/cB+WF
|
||||
github.com/ugorji/go v1.1.7 h1:/68gy2h+1mWMrwZFeD1kQialdSzAb432dtpeJ42ovdo=
|
||||
github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw=
|
||||
github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw=
|
||||
github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw=
|
||||
github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0=
|
||||
github.com/ugorji/go/codec v1.1.7 h1:2SvQaVZ1ouYrrKKwoSk2pzd4A9evlKJb9oTL+OaLUSs=
|
||||
github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY=
|
||||
@@ -1509,6 +1520,7 @@ golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBc
|
||||
golang.org/x/sys v0.0.0-20210816074244-15123e1e1f71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210906170528-6f6e22806c34/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210915083310-ed5796bab164/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210917161153-d61c044b1678/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
@@ -1845,8 +1857,8 @@ gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C
|
||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo=
|
||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gotest.tools/v3 v3.0.3 h1:4AuOwCGf4lLR9u3YOe2awrHygurzhO/HeQ6laiA6Sx0=
|
||||
gvisor.dev/gvisor v0.0.0-20220407223209-21871174d445 h1:pLNQCtMzh4O6rdhoUeWHuutt4yMft+B9Cgw/bezWchE=
|
||||
gvisor.dev/gvisor v0.0.0-20220407223209-21871174d445/go.mod h1:tWwEcFvJavs154OdjFCw78axNrsDlz4Zh8jvPqwcpGI=
|
||||
gvisor.dev/gvisor v0.0.0-20220721202624-0b2c11c2773c h1:frrINYSQqhraHqy23/dWqdNt7mRlsGJJBwGHvI3Q+/c=
|
||||
gvisor.dev/gvisor v0.0.0-20220721202624-0b2c11c2773c/go.mod h1:TIvkJD0sxe8pIob3p6T8IzxXunlp6yfgktvTNp+DGNM=
|
||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
|
||||
@@ -1 +1 @@
|
||||
04d67b90d8cfd6f822664220f79e0e69cacb6b5c
|
||||
149f7d88f11384083d42ccbcdb31693510385ec6
|
||||
|
||||
@@ -342,7 +342,19 @@ func (b *LocalBackend) onHealthChange(sys health.Subsystem, err error) {
|
||||
// can no longer be used after Shutdown returns.
|
||||
func (b *LocalBackend) Shutdown() {
|
||||
b.mu.Lock()
|
||||
if b.shutdownCalled {
|
||||
b.mu.Unlock()
|
||||
return
|
||||
}
|
||||
b.shutdownCalled = true
|
||||
|
||||
if b.loginFlags&controlclient.LoginEphemeral != 0 {
|
||||
b.mu.Unlock()
|
||||
ctx, cancel := context.WithTimeout(b.ctx, 5*time.Second)
|
||||
defer cancel()
|
||||
b.LogoutSync(ctx) // best effort
|
||||
b.mu.Lock()
|
||||
}
|
||||
cc := b.cc
|
||||
if b.sshServer != nil {
|
||||
b.sshServer.Shutdown()
|
||||
|
||||
@@ -117,7 +117,8 @@ func (b *LocalBackend) hostKeyFileOrCreate(keyDir, typ string) ([]byte, error) {
|
||||
func (b *LocalBackend) getSystemSSH_HostKeys() (ret []ssh.Signer, err error) {
|
||||
// TODO(bradfitz): cache this?
|
||||
for _, typ := range keyTypes {
|
||||
hostKey, err := ioutil.ReadFile("/etc/ssh/ssh_host_" + typ + "_key")
|
||||
filename := "/etc/ssh/ssh_host_" + typ + "_key"
|
||||
hostKey, err := ioutil.ReadFile(filename)
|
||||
if os.IsNotExist(err) {
|
||||
continue
|
||||
}
|
||||
@@ -126,7 +127,7 @@ func (b *LocalBackend) getSystemSSH_HostKeys() (ret []ssh.Signer, err error) {
|
||||
}
|
||||
signer, err := ssh.ParsePrivateKey(hostKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, fmt.Errorf("error reading private key %s: %w", filename, err)
|
||||
}
|
||||
ret = append(ret, signer)
|
||||
}
|
||||
|
||||
@@ -29,6 +29,7 @@ import (
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
@@ -37,6 +38,7 @@ import (
|
||||
"tailscale.com/envknob"
|
||||
"tailscale.com/ipn/ipnstate"
|
||||
"tailscale.com/types/logger"
|
||||
"tailscale.com/version/distro"
|
||||
)
|
||||
|
||||
// Process-wide cache. (A new *Handler is created per connection,
|
||||
@@ -53,6 +55,13 @@ var (
|
||||
|
||||
func (h *Handler) certDir() (string, error) {
|
||||
d := h.b.TailscaleVarRoot()
|
||||
|
||||
// As a workaround for Synology DSM6 not having a "var" directory, use the
|
||||
// app's "etc" directory (on a small partition) to hold certs at least.
|
||||
// See https://github.com/tailscale/tailscale/issues/4060#issuecomment-1186592251
|
||||
if d == "" && runtime.GOOS == "linux" && distro.Get() == distro.Synology && distro.DSMVersion() == 6 {
|
||||
d = "/var/packages/Tailscale/etc" // base; we append "certs" below
|
||||
}
|
||||
if d == "" {
|
||||
return "", errors.New("no TailscaleVarRoot")
|
||||
}
|
||||
|
||||
@@ -76,6 +76,7 @@ type Report struct {
|
||||
IPv4 bool // an IPv4 STUN round trip completed
|
||||
IPv6CanSend bool // an IPv6 packet was able to be sent
|
||||
IPv4CanSend bool // an IPv4 packet was able to be sent
|
||||
OSHasIPv6 bool // could bind a socket to ::1
|
||||
|
||||
// MappingVariesByDestIP is whether STUN results depend which
|
||||
// STUN server you're talking to (on IPv4).
|
||||
@@ -806,6 +807,14 @@ func (c *Client) GetReport(ctx context.Context, dm *tailcfg.DERPMap) (_ *Report,
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// See if IPv6 works at all, or if it's been hard disabled at the
|
||||
// OS level.
|
||||
v6udp, err := netns.Listener(c.logf).ListenPacket(ctx, "udp6", "[::1]:0")
|
||||
if err == nil {
|
||||
rs.report.OSHasIPv6 = true
|
||||
v6udp.Close()
|
||||
}
|
||||
|
||||
// Create a UDP4 socket used for sending to our discovered IPv4 address.
|
||||
rs.pc4Hair, err = netns.Listener(c.logf).ListenPacket(ctx, "udp4", ":0")
|
||||
if err != nil {
|
||||
|
||||
@@ -111,6 +111,9 @@ func TestWorksWhenUDPBlocked(t *testing.T) {
|
||||
// That's not relevant to this test, so just accept what we're
|
||||
// given.
|
||||
want.IPv4CanSend = r.IPv4CanSend
|
||||
// OS IPv6 test is irrelevant here, accept whatever the current
|
||||
// machine has.
|
||||
want.OSHasIPv6 = r.OSHasIPv6
|
||||
|
||||
if !reflect.DeepEqual(r, want) {
|
||||
t.Errorf("mismatch\n got: %+v\nwant: %+v\n", r, want)
|
||||
|
||||
@@ -14,7 +14,6 @@ import (
|
||||
"golang.org/x/sys/unix"
|
||||
"golang.zx2c4.com/wireguard/tun"
|
||||
"gvisor.dev/gvisor/pkg/tcpip"
|
||||
"gvisor.dev/gvisor/pkg/tcpip/buffer"
|
||||
"gvisor.dev/gvisor/pkg/tcpip/header"
|
||||
"gvisor.dev/gvisor/pkg/tcpip/network/ipv4"
|
||||
"gvisor.dev/gvisor/pkg/tcpip/transport/udp"
|
||||
@@ -290,7 +289,7 @@ func (t *Wrapper) handleDHCPRequest(ethBuf []byte) bool {
|
||||
}
|
||||
|
||||
func packLayer2UDP(payload []byte, srcMAC, dstMAC net.HardwareAddr, src, dst netaddr.IPPort) []byte {
|
||||
buf := buffer.NewView(header.EthernetMinimumSize + header.UDPMinimumSize + header.IPv4MinimumSize + len(payload))
|
||||
buf := make([]byte, header.EthernetMinimumSize+header.UDPMinimumSize+header.IPv4MinimumSize+len(payload))
|
||||
payloadStart := len(buf) - len(payload)
|
||||
copy(buf[payloadStart:], payload)
|
||||
srcB := src.IP().As4()
|
||||
|
||||
@@ -74,15 +74,18 @@ func New(logf logger.Logf, tunName string) (tun.Device, string, error) {
|
||||
|
||||
// tunDiagnoseFailure, if non-nil, does OS-specific diagnostics of why
|
||||
// TUN failed to work.
|
||||
var tunDiagnoseFailure func(tunName string, logf logger.Logf)
|
||||
var tunDiagnoseFailure func(tunName string, logf logger.Logf, err error)
|
||||
|
||||
// Diagnose tries to explain a tuntap device creation failure.
|
||||
// It pokes around the system and logs some diagnostic info that might
|
||||
// help debug why tun creation failed. Because device creation has
|
||||
// already failed and the program's about to end, log a lot.
|
||||
func Diagnose(logf logger.Logf, tunName string) {
|
||||
//
|
||||
// The tunName is the name of the tun device that was requested but failed.
|
||||
// The err error is how the tun creation failed.
|
||||
func Diagnose(logf logger.Logf, tunName string, err error) {
|
||||
if tunDiagnoseFailure != nil {
|
||||
tunDiagnoseFailure(tunName, logf)
|
||||
tunDiagnoseFailure(tunName, logf, err)
|
||||
} else {
|
||||
logf("no TUN failure diagnostics for OS %q", runtime.GOOS)
|
||||
}
|
||||
|
||||
@@ -6,6 +6,7 @@ package tstun
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"os"
|
||||
"os/exec"
|
||||
"strings"
|
||||
@@ -19,7 +20,14 @@ func init() {
|
||||
tunDiagnoseFailure = diagnoseLinuxTUNFailure
|
||||
}
|
||||
|
||||
func diagnoseLinuxTUNFailure(tunName string, logf logger.Logf) {
|
||||
func diagnoseLinuxTUNFailure(tunName string, logf logger.Logf, createErr error) {
|
||||
if errors.Is(createErr, syscall.EBUSY) {
|
||||
logf("TUN device %s is busy; another process probably still has it open (from old version of Tailscale that had a bug)", tunName)
|
||||
logf("To fix, kill the process that has it open. Find with:\n\n$ sudo lsof -n /dev/net/tun\n\n")
|
||||
logf("... and then kill those PID(s)")
|
||||
return
|
||||
}
|
||||
|
||||
var un syscall.Utsname
|
||||
err := syscall.Uname(&un)
|
||||
if err != nil {
|
||||
|
||||
@@ -17,7 +17,7 @@ func init() {
|
||||
tunDiagnoseFailure = diagnoseDarwinTUNFailure
|
||||
}
|
||||
|
||||
func diagnoseDarwinTUNFailure(tunName string, logf logger.Logf) {
|
||||
func diagnoseDarwinTUNFailure(tunName string, logf logger.Logf, err error) {
|
||||
if os.Getuid() != 0 {
|
||||
logf("failed to create TUN device as non-root user; use 'sudo tailscaled', or run under launchd with 'sudo tailscaled install-system-daemon'")
|
||||
}
|
||||
|
||||
@@ -524,9 +524,10 @@ func (t *Wrapper) Read(buf []byte, offset int) (int, error) {
|
||||
|
||||
var n int
|
||||
if res.packet != nil {
|
||||
n = copy(buf[offset:], res.packet.NetworkHeader().View())
|
||||
n += copy(buf[offset+n:], res.packet.TransportHeader().View())
|
||||
n += copy(buf[offset+n:], res.packet.Data().AsRange().AsView())
|
||||
|
||||
n = copy(buf[offset:], res.packet.NetworkHeader().Slice())
|
||||
n += copy(buf[offset+n:], res.packet.TransportHeader().Slice())
|
||||
n += copy(buf[offset+n:], res.packet.Data().AsRange().ToSlice())
|
||||
|
||||
res.packet.DecRef()
|
||||
} else {
|
||||
@@ -715,9 +716,9 @@ func (t *Wrapper) SetFilter(filt *filter.Filter) {
|
||||
func (t *Wrapper) InjectInboundPacketBuffer(pkt *stack.PacketBuffer) error {
|
||||
buf := make([]byte, PacketStartOffset+pkt.Size())
|
||||
|
||||
n := copy(buf[PacketStartOffset:], pkt.NetworkHeader().View())
|
||||
n += copy(buf[PacketStartOffset+n:], pkt.TransportHeader().View())
|
||||
n += copy(buf[PacketStartOffset+n:], pkt.Data().AsRange().AsView())
|
||||
n := copy(buf[PacketStartOffset:], pkt.NetworkHeader().Slice())
|
||||
n += copy(buf[PacketStartOffset+n:], pkt.TransportHeader().Slice())
|
||||
n += copy(buf[PacketStartOffset+n:], pkt.Data().AsRange().ToSlice())
|
||||
if n != pkt.Size() {
|
||||
panic("unexpected packet size after copy")
|
||||
}
|
||||
|
||||
@@ -86,8 +86,11 @@ func (ss *sshSession) newIncubatorCommand() *exec.Cmd {
|
||||
// TODO(maisem): this doesn't work with sftp
|
||||
return exec.CommandContext(ss.ctx, name, args...)
|
||||
}
|
||||
ss.conn.mu.Lock()
|
||||
lu := ss.conn.localUser
|
||||
ci := ss.conn.info
|
||||
gids := strings.Join(ss.conn.userGroupIDs, ",")
|
||||
ss.conn.mu.Unlock()
|
||||
remoteUser := ci.uprof.LoginName
|
||||
if len(ci.node.Tags) > 0 {
|
||||
remoteUser = strings.Join(ci.node.Tags, ",")
|
||||
@@ -98,7 +101,7 @@ func (ss *sshSession) newIncubatorCommand() *exec.Cmd {
|
||||
"ssh",
|
||||
"--uid=" + lu.Uid,
|
||||
"--gid=" + lu.Gid,
|
||||
"--groups=" + strings.Join(ss.conn.userGroupIDs, ","),
|
||||
"--groups=" + gids,
|
||||
"--local-user=" + lu.Username,
|
||||
"--remote-user=" + remoteUser,
|
||||
"--remote-ip=" + ci.src.IP().String(),
|
||||
@@ -310,15 +313,25 @@ func (ss *sshSession) launchProcess() error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
go resizeWindow(pty, winCh)
|
||||
ss.stdout = pty // no stderr for a pty
|
||||
|
||||
// We need to be able to close stdin and stdout separately later so make a
|
||||
// dup.
|
||||
ptyDup, err := syscall.Dup(int(pty.Fd()))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
go resizeWindow(ptyDup /* arbitrary fd */, winCh)
|
||||
|
||||
ss.stdin = pty
|
||||
ss.stdout = os.NewFile(uintptr(ptyDup), pty.Name())
|
||||
ss.stderr = nil // not available for pty
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func resizeWindow(f *os.File, winCh <-chan ssh.Window) {
|
||||
func resizeWindow(fd int, winCh <-chan ssh.Window) {
|
||||
for win := range winCh {
|
||||
unix.IoctlSetWinsize(int(f.Fd()), syscall.TIOCSWINSZ, &unix.Winsize{
|
||||
unix.IoctlSetWinsize(fd, syscall.TIOCSWINSZ, &unix.Winsize{
|
||||
Row: uint16(win.Height),
|
||||
Col: uint16(win.Width),
|
||||
})
|
||||
|
||||
@@ -141,6 +141,14 @@ func (srv *server) OnPolicyChange() {
|
||||
srv.mu.Lock()
|
||||
defer srv.mu.Unlock()
|
||||
for c := range srv.activeConns {
|
||||
c.mu.Lock()
|
||||
ci := c.info
|
||||
c.mu.Unlock()
|
||||
if ci == nil {
|
||||
// c.info is nil when the connection hasn't been authenticated yet.
|
||||
// In that case, the connection will be terminated when it is.
|
||||
continue
|
||||
}
|
||||
go c.checkStillValid()
|
||||
}
|
||||
}
|
||||
@@ -152,14 +160,14 @@ type conn struct {
|
||||
|
||||
insecureSkipTailscaleAuth bool // used by tests.
|
||||
|
||||
connID string // ID that's shared with control
|
||||
action0 *tailcfg.SSHAction // first matching action
|
||||
srv *server
|
||||
info *sshConnInfo // set by setInfo
|
||||
connID string // ID that's shared with control
|
||||
action0 *tailcfg.SSHAction // first matching action
|
||||
srv *server
|
||||
|
||||
mu sync.Mutex // protects the following
|
||||
localUser *user.User // set by checkAuth
|
||||
userGroupIDs []string // set by checkAuth
|
||||
|
||||
mu sync.Mutex // protects the following
|
||||
info *sshConnInfo // set by setInfo
|
||||
// idH is the RFC4253 sec8 hash H. It is used to identify the connection,
|
||||
// and is shared among all sessions. It should not be shared outside
|
||||
// process. It is confusingly referred to as SessionID by the gliderlabs/ssh
|
||||
@@ -179,9 +187,13 @@ func (c *conn) logf(format string, args ...any) {
|
||||
// PublicKeyHandler implements ssh.PublicKeyHandler is called by the the
|
||||
// ssh.Server when the client presents a public key.
|
||||
func (c *conn) PublicKeyHandler(ctx ssh.Context, pubKey ssh.PublicKey) error {
|
||||
if c.info == nil {
|
||||
c.mu.Lock()
|
||||
ci := c.info
|
||||
c.mu.Unlock()
|
||||
if ci == nil {
|
||||
return gossh.ErrDenied
|
||||
}
|
||||
|
||||
if err := c.checkAuth(pubKey); err != nil {
|
||||
// TODO(maisem/bradfitz): surface the error here.
|
||||
c.logf("rejecting SSH public key %s: %v", bytes.TrimSpace(gossh.MarshalAuthorizedKey(pubKey)), err)
|
||||
@@ -217,7 +229,7 @@ func (c *conn) NoClientAuthCallback(cm gossh.ConnMetadata) (*gossh.Permissions,
|
||||
func (c *conn) checkAuth(pubKey ssh.PublicKey) error {
|
||||
a, localUser, err := c.evaluatePolicy(pubKey)
|
||||
if err != nil {
|
||||
if pubKey == nil && c.havePubKeyPolicy(c.info) {
|
||||
if pubKey == nil && c.havePubKeyPolicy() {
|
||||
return errPubKeyRequired
|
||||
}
|
||||
return fmt.Errorf("%w: %v", gossh.ErrDenied, err)
|
||||
@@ -236,6 +248,8 @@ func (c *conn) checkAuth(pubKey ssh.PublicKey) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
c.userGroupIDs = gids
|
||||
c.localUser = lu
|
||||
return nil
|
||||
@@ -276,7 +290,7 @@ func (srv *server) newConn() (*conn, error) {
|
||||
srv.mu.Unlock()
|
||||
c := &conn{srv: srv}
|
||||
now := srv.now()
|
||||
c.connID = fmt.Sprintf("conn-%s-%02x", now.UTC().Format("20060102T150405"), randBytes(5))
|
||||
c.connID = fmt.Sprintf("ssh-conn-%s-%02x", now.UTC().Format("20060102T150405"), randBytes(5))
|
||||
c.Server = &ssh.Server{
|
||||
Version: "Tailscale",
|
||||
Handler: c.handleSessionPostSSHAuth,
|
||||
@@ -329,7 +343,13 @@ func (c *conn) mayForwardLocalPortTo(ctx ssh.Context, destinationHost string, de
|
||||
|
||||
// havePubKeyPolicy reports whether any policy rule may provide access by means
|
||||
// of a ssh.PublicKey.
|
||||
func (c *conn) havePubKeyPolicy(ci *sshConnInfo) bool {
|
||||
func (c *conn) havePubKeyPolicy() bool {
|
||||
c.mu.Lock()
|
||||
ci := c.info
|
||||
c.mu.Unlock()
|
||||
if ci == nil {
|
||||
panic("havePubKeyPolicy called before setInfo")
|
||||
}
|
||||
// Is there any rule that looks like it'd require a public key for this
|
||||
// sshUser?
|
||||
pol, ok := c.sshPolicy()
|
||||
@@ -414,6 +434,8 @@ func (c *conn) setInfo(cm gossh.ConnMetadata) error {
|
||||
if !ok {
|
||||
return fmt.Errorf("unknown Tailscale identity from src %v", ci.src)
|
||||
}
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
ci.node = node
|
||||
ci.uprof = &uprof
|
||||
|
||||
@@ -589,8 +611,10 @@ func (c *conn) handleSessionPostSSHAuth(s ssh.Session) {
|
||||
}
|
||||
|
||||
ss := c.newSSHSession(s)
|
||||
c.mu.Lock()
|
||||
ss.logf("handling new SSH connection from %v (%v) to ssh-user %q", c.info.uprof.LoginName, c.info.src.IP(), c.localUser.Username)
|
||||
ss.logf("access granted to %v as ssh-user %q", c.info.uprof.LoginName, c.localUser.Username)
|
||||
c.mu.Unlock()
|
||||
ss.run()
|
||||
}
|
||||
|
||||
@@ -688,7 +712,10 @@ func (c *conn) resolveTerminalActionLocked(s ssh.Session, cr *contextReader) (ac
|
||||
|
||||
func (c *conn) expandDelegateURL(actionURL string) string {
|
||||
nm := c.srv.lb.NetMap()
|
||||
c.mu.Lock()
|
||||
ci := c.info
|
||||
lu := c.localUser
|
||||
c.mu.Unlock()
|
||||
var dstNodeID string
|
||||
if nm != nil {
|
||||
dstNodeID = fmt.Sprint(int64(nm.SelfNode.ID))
|
||||
@@ -699,7 +726,7 @@ func (c *conn) expandDelegateURL(actionURL string) string {
|
||||
"$DST_NODE_IP", url.QueryEscape(ci.dst.IP().String()),
|
||||
"$DST_NODE_ID", dstNodeID,
|
||||
"$SSH_USER", url.QueryEscape(ci.sshUser),
|
||||
"$LOCAL_USER", url.QueryEscape(c.localUser.Username),
|
||||
"$LOCAL_USER", url.QueryEscape(lu.Username),
|
||||
).Replace(actionURL)
|
||||
}
|
||||
|
||||
@@ -709,10 +736,12 @@ func (c *conn) expandPublicKeyURL(pubKeyURL string) string {
|
||||
}
|
||||
var localPart string
|
||||
var loginName string
|
||||
c.mu.Lock()
|
||||
if c.info.uprof != nil {
|
||||
loginName = c.info.uprof.LoginName
|
||||
localPart, _, _ = strings.Cut(loginName, "@")
|
||||
}
|
||||
c.mu.Unlock()
|
||||
return strings.NewReplacer(
|
||||
"$LOGINNAME_EMAIL", loginName,
|
||||
"$LOGINNAME_LOCALPART", localPart,
|
||||
@@ -732,7 +761,7 @@ type sshSession struct {
|
||||
// initialized by launchProcess:
|
||||
cmd *exec.Cmd
|
||||
stdin io.WriteCloser
|
||||
stdout io.Reader
|
||||
stdout io.ReadCloser
|
||||
stderr io.Reader // nil for pty sessions
|
||||
ptyReq *ssh.Pty // non-nil for pty sessions
|
||||
|
||||
@@ -768,6 +797,8 @@ func (c *conn) isStillValid() bool {
|
||||
if !a.Accept && a.HoldAndDelegate == "" {
|
||||
return false
|
||||
}
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
return c.localUser.Username == localUser
|
||||
}
|
||||
|
||||
@@ -843,6 +874,8 @@ func (ss *sshSession) killProcessOnContextDone() {
|
||||
ss.logf("terminating SSH session from %v: %v", ss.conn.info.src.IP(), err)
|
||||
// We don't need to Process.Wait here, sshSession.run() does
|
||||
// the waiting regardless of termination reason.
|
||||
|
||||
// TODO(maisem): should this be a SIGTERM followed by a SIGKILL?
|
||||
ss.cmd.Process.Kill()
|
||||
})
|
||||
}
|
||||
@@ -942,6 +975,8 @@ func (ss *sshSession) run() {
|
||||
return
|
||||
}
|
||||
ss.conn.startSessionLocked(ss)
|
||||
lu := ss.conn.localUser
|
||||
localUser := lu.Username
|
||||
srv.mu.Unlock()
|
||||
|
||||
defer ss.conn.endSession(ss)
|
||||
@@ -957,8 +992,6 @@ func (ss *sshSession) run() {
|
||||
}
|
||||
|
||||
logf := ss.logf
|
||||
lu := ss.conn.localUser
|
||||
localUser := lu.Username
|
||||
|
||||
if euid := os.Geteuid(); euid != 0 {
|
||||
if lu.Uid != fmt.Sprint(euid) {
|
||||
@@ -1004,20 +1037,23 @@ func (ss *sshSession) run() {
|
||||
go ss.killProcessOnContextDone()
|
||||
|
||||
go func() {
|
||||
_, err := io.Copy(rec.writer("i", ss.stdin), ss)
|
||||
if err != nil {
|
||||
// TODO: don't log in the success case.
|
||||
defer ss.stdin.Close()
|
||||
if _, err := io.Copy(rec.writer("i", ss.stdin), ss); err != nil {
|
||||
logf("stdin copy: %v", err)
|
||||
ss.ctx.CloseWithError(err)
|
||||
} else if ss.ptyReq != nil {
|
||||
const EOT = 4 // https://en.wikipedia.org/wiki/End-of-Transmission_character
|
||||
ss.stdin.Write([]byte{EOT})
|
||||
}
|
||||
ss.stdin.Close()
|
||||
}()
|
||||
go func() {
|
||||
defer ss.stdout.Close()
|
||||
_, err := io.Copy(rec.writer("o", ss), ss.stdout)
|
||||
if err != nil {
|
||||
if err != nil && !errors.Is(err, io.EOF) {
|
||||
logf("stdout copy: %v", err)
|
||||
// If we got an error here, it's probably because the client has
|
||||
// disconnected.
|
||||
ss.ctx.CloseWithError(err)
|
||||
} else {
|
||||
ss.CloseWrite()
|
||||
}
|
||||
}()
|
||||
// stderr is nil for ptys.
|
||||
@@ -1029,6 +1065,7 @@ func (ss *sshSession) run() {
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
err = ss.cmd.Wait()
|
||||
// This will either make the SSH Termination goroutine be a no-op,
|
||||
// or itself will be a no-op because the process was killed by the
|
||||
@@ -1036,6 +1073,7 @@ func (ss *sshSession) run() {
|
||||
ss.exitOnce.Do(func() {})
|
||||
|
||||
if err == nil {
|
||||
ss.logf("Session complete")
|
||||
ss.Exit(0)
|
||||
return
|
||||
}
|
||||
@@ -1103,9 +1141,20 @@ var (
|
||||
errRuleExpired = errors.New("rule expired")
|
||||
errPrincipalMatch = errors.New("principal didn't match")
|
||||
errUserMatch = errors.New("user didn't match")
|
||||
errInvalidConn = errors.New("invalid connection state")
|
||||
)
|
||||
|
||||
func (c *conn) matchRule(r *tailcfg.SSHRule, pubKey gossh.PublicKey) (a *tailcfg.SSHAction, localUser string, err error) {
|
||||
if c == nil {
|
||||
return nil, "", errInvalidConn
|
||||
}
|
||||
c.mu.Lock()
|
||||
ci := c.info
|
||||
c.mu.Unlock()
|
||||
if ci == nil {
|
||||
c.logf("invalid connection state")
|
||||
return nil, "", errInvalidConn
|
||||
}
|
||||
if r == nil {
|
||||
return nil, "", errNilRule
|
||||
}
|
||||
@@ -1119,7 +1168,7 @@ func (c *conn) matchRule(r *tailcfg.SSHRule, pubKey gossh.PublicKey) (a *tailcfg
|
||||
// For all but Reject rules, SSHUsers is required.
|
||||
// If SSHUsers is nil or empty, mapLocalUser will return an
|
||||
// empty string anyway.
|
||||
localUser = mapLocalUser(r.SSHUsers, c.info.sshUser)
|
||||
localUser = mapLocalUser(r.SSHUsers, ci.sshUser)
|
||||
if localUser == "" {
|
||||
return nil, "", errUserMatch
|
||||
}
|
||||
@@ -1168,7 +1217,9 @@ func (c *conn) principalMatches(p *tailcfg.SSHPrincipal, pubKey gossh.PublicKey)
|
||||
// that match the Tailscale identity match (Node, NodeIP, UserLogin, Any).
|
||||
// This function does not consider PubKeys.
|
||||
func (c *conn) principalMatchesTailscaleIdentity(p *tailcfg.SSHPrincipal) bool {
|
||||
c.mu.Lock()
|
||||
ci := c.info
|
||||
c.mu.Unlock()
|
||||
if p.Any {
|
||||
return true
|
||||
}
|
||||
|
||||
@@ -47,13 +47,26 @@ func TestMatchRule(t *testing.T) {
|
||||
wantErr error
|
||||
wantUser string
|
||||
}{
|
||||
{
|
||||
name: "invalid-conn",
|
||||
rule: &tailcfg.SSHRule{
|
||||
Action: someAction,
|
||||
Principals: []*tailcfg.SSHPrincipal{{Any: true}},
|
||||
SSHUsers: map[string]string{
|
||||
"*": "ubuntu",
|
||||
},
|
||||
},
|
||||
wantErr: errInvalidConn,
|
||||
},
|
||||
{
|
||||
name: "nil-rule",
|
||||
ci: &sshConnInfo{},
|
||||
rule: nil,
|
||||
wantErr: errNilRule,
|
||||
},
|
||||
{
|
||||
name: "nil-action",
|
||||
ci: &sshConnInfo{},
|
||||
rule: &tailcfg.SSHRule{},
|
||||
wantErr: errNilAction,
|
||||
},
|
||||
@@ -180,6 +193,7 @@ func TestMatchRule(t *testing.T) {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
c := &conn{
|
||||
info: tt.ci,
|
||||
srv: &server{logf: t.Logf},
|
||||
}
|
||||
got, gotUser, err := c.matchRule(tt.rule, nil)
|
||||
if err != tt.wantErr {
|
||||
|
||||
@@ -67,7 +67,8 @@ type CapabilityVersion int
|
||||
// 30: 2022-03-22: client can request id tokens.
|
||||
// 31: 2022-04-15: PingRequest & PingResponse TSMP & disco support
|
||||
// 32: 2022-04-17: client knows FilterRule.CapMatch
|
||||
const CurrentCapabilityVersion CapabilityVersion = 32
|
||||
// 33: 2022-07-20: added MapResponse.PeersChangedPatch (DERPRegion + Endpoints)
|
||||
const CurrentCapabilityVersion CapabilityVersion = 33
|
||||
|
||||
type StableID string
|
||||
|
||||
@@ -496,10 +497,14 @@ type NetInfo struct {
|
||||
// It reports true even if there's no NAT involved.
|
||||
HairPinning opt.Bool
|
||||
|
||||
// WorkingIPv6 is whether IPv6 works.
|
||||
// WorkingIPv6 is whether the host has IPv6 internet connectivity.
|
||||
WorkingIPv6 opt.Bool
|
||||
|
||||
// WorkingUDP is whether UDP works.
|
||||
// OSHasIPv6 is whether the OS supports IPv6 at all, regardless of
|
||||
// whether IPv6 internet connectivity is available.
|
||||
OSHasIPv6 opt.Bool
|
||||
|
||||
// WorkingUDP is whether the host has UDP internet connectivity.
|
||||
WorkingUDP opt.Bool
|
||||
|
||||
// HavePortMap is whether we have an existing portmap open
|
||||
@@ -590,6 +595,7 @@ func (ni *NetInfo) BasicallyEqual(ni2 *NetInfo) bool {
|
||||
return ni.MappingVariesByDestIP == ni2.MappingVariesByDestIP &&
|
||||
ni.HairPinning == ni2.HairPinning &&
|
||||
ni.WorkingIPv6 == ni2.WorkingIPv6 &&
|
||||
ni.OSHasIPv6 == ni2.OSHasIPv6 &&
|
||||
ni.WorkingUDP == ni2.WorkingUDP &&
|
||||
ni.HavePortMap == ni2.HavePortMap &&
|
||||
ni.UPnP == ni2.UPnP &&
|
||||
@@ -1204,7 +1210,7 @@ type MapResponse struct {
|
||||
// PopBrowserURL, if non-empty, is a URL for the client to
|
||||
// open to complete an action. The client should dup suppress
|
||||
// identical URLs and only open it once for the same URL.
|
||||
PopBrowserURL string
|
||||
PopBrowserURL string `json:",omitempty"`
|
||||
|
||||
// Networking
|
||||
|
||||
@@ -1232,6 +1238,15 @@ type MapResponse struct {
|
||||
// PeersRemoved are the NodeIDs that are no longer in the peer list.
|
||||
PeersRemoved []NodeID `json:",omitempty"`
|
||||
|
||||
// PeersChangedPatch, if non-nil, means that node(s) have changed.
|
||||
// This is a lighter version of the older PeersChanged support that
|
||||
// only supports certain types of updates
|
||||
//
|
||||
// These are applied after Peers* above, but in practice the
|
||||
// control server should only send these on their own, without
|
||||
// the Peers* fields also set.
|
||||
PeersChangedPatch []*PeerChange `json:",omitempty"`
|
||||
|
||||
// PeerSeenChange contains information on how to update peers' LastSeen
|
||||
// times. If the value is false, the peer is gone. If the value is true,
|
||||
// the LastSeen time is now. Absent means unchanged.
|
||||
@@ -1240,14 +1255,6 @@ type MapResponse struct {
|
||||
// OnlineChange changes the value of a Peer Node.Online value.
|
||||
OnlineChange map[NodeID]bool `json:",omitempty"`
|
||||
|
||||
// DNS is the same as DNSConfig.Nameservers.
|
||||
// Only populated if MapRequest.Version < 9.
|
||||
DNS []netaddr.IP `json:",omitempty"`
|
||||
|
||||
// SearchPaths is the old way to specify DNS search domains.
|
||||
// Only populated if MapRequest.Version < 9.
|
||||
SearchPaths []string `json:",omitempty"`
|
||||
|
||||
// DNSConfig contains the DNS settings for the client to use.
|
||||
// A nil value means no change from an earlier non-nil value.
|
||||
DNSConfig *DNSConfig `json:",omitempty"`
|
||||
@@ -1727,3 +1734,27 @@ type TokenResponse struct {
|
||||
// `uid` | user ID, if not tagged
|
||||
IDToken string `json:"id_token"`
|
||||
}
|
||||
|
||||
// PeerChange is an update to a node.
|
||||
type PeerChange struct {
|
||||
// NodeID is the node ID being mutated. If the NodeID is not
|
||||
// known in the current netmap, this update should be
|
||||
// ignored. (But the server will try not to send such useless
|
||||
// updates.)
|
||||
NodeID NodeID
|
||||
|
||||
// DERPRegion, if non-zero, means that NodeID's home DERP
|
||||
// region ID is now this number.
|
||||
DERPRegion int `json:",omitempty"`
|
||||
|
||||
// Endpoints, if non-empty, means that NodeID's UDP Endpoints
|
||||
// have changed to these.
|
||||
Endpoints []string `json:",omitempty"`
|
||||
}
|
||||
|
||||
// DerpMagicIP is a fake WireGuard endpoint IP address that means to
|
||||
// use DERP. When used (in the Node.DERP field), the port number of
|
||||
// the WireGuard endpoint is the DERP region ID number to use.
|
||||
//
|
||||
// Mnemonic: 3.3.40 are numbers above the keys D, E, R, P.
|
||||
const DerpMagicIP = "127.3.3.40"
|
||||
|
||||
@@ -154,6 +154,7 @@ var _NetInfoCloneNeedsRegeneration = NetInfo(struct {
|
||||
MappingVariesByDestIP opt.Bool
|
||||
HairPinning opt.Bool
|
||||
WorkingIPv6 opt.Bool
|
||||
OSHasIPv6 opt.Bool
|
||||
WorkingUDP opt.Bool
|
||||
HavePortMap bool
|
||||
UPnP opt.Bool
|
||||
|
||||
@@ -500,6 +500,7 @@ func TestNetInfoFields(t *testing.T) {
|
||||
"MappingVariesByDestIP",
|
||||
"HairPinning",
|
||||
"WorkingIPv6",
|
||||
"OSHasIPv6",
|
||||
"WorkingUDP",
|
||||
"HavePortMap",
|
||||
"UPnP",
|
||||
|
||||
@@ -338,6 +338,7 @@ func (v *NetInfoView) UnmarshalJSON(b []byte) error {
|
||||
func (v NetInfoView) MappingVariesByDestIP() opt.Bool { return v.ж.MappingVariesByDestIP }
|
||||
func (v NetInfoView) HairPinning() opt.Bool { return v.ж.HairPinning }
|
||||
func (v NetInfoView) WorkingIPv6() opt.Bool { return v.ж.WorkingIPv6 }
|
||||
func (v NetInfoView) OSHasIPv6() opt.Bool { return v.ж.OSHasIPv6 }
|
||||
func (v NetInfoView) WorkingUDP() opt.Bool { return v.ж.WorkingUDP }
|
||||
func (v NetInfoView) HavePortMap() bool { return v.ж.HavePortMap }
|
||||
func (v NetInfoView) UPnP() opt.Bool { return v.ж.UPnP }
|
||||
@@ -354,6 +355,7 @@ var _NetInfoViewNeedsRegeneration = NetInfo(struct {
|
||||
MappingVariesByDestIP opt.Bool
|
||||
HairPinning opt.Bool
|
||||
WorkingIPv6 opt.Bool
|
||||
OSHasIPv6 opt.Bool
|
||||
WorkingUDP opt.Bool
|
||||
HavePortMap bool
|
||||
UPnP opt.Bool
|
||||
|
||||
@@ -146,7 +146,7 @@ func (a *AUM) StaticValidate() error {
|
||||
}
|
||||
|
||||
if a.State != nil {
|
||||
if len(a.State.LastAUMHash) != 0 {
|
||||
if a.State.LastAUMHash != nil {
|
||||
return errors.New("checkpoint state cannot specify a parent AUM")
|
||||
}
|
||||
if len(a.State.DisablementSecrets) == 0 {
|
||||
|
||||
365
tka/chaintest_test.go
Normal file
365
tka/chaintest_test.go
Normal file
@@ -0,0 +1,365 @@
|
||||
// Copyright (c) 2022 Tailscale Inc & AUTHORS All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package tka
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/ed25519"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
"text/scanner"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"github.com/google/go-cmp/cmp/cmpopts"
|
||||
)
|
||||
|
||||
// chaintest_test.go implements test helpers for concisely describing
|
||||
// chains of possibly signed AUMs, to assist in making tests shorter and
|
||||
// easier to read.
|
||||
|
||||
// parsed representation of a named AUM in a test chain.
|
||||
type testchainNode struct {
|
||||
Name string
|
||||
Parent string
|
||||
Uses []scanner.Position
|
||||
|
||||
HashSeed int
|
||||
Template string
|
||||
SignedWith string
|
||||
}
|
||||
|
||||
// testChain represents a constructed web of AUMs for testing purposes.
|
||||
type testChain struct {
|
||||
Nodes map[string]*testchainNode
|
||||
AUMs map[string]AUM
|
||||
AUMHashes map[string]AUMHash
|
||||
|
||||
// Configured by options to NewTestchain()
|
||||
Template map[string]AUM
|
||||
Key map[string]*Key
|
||||
KeyPrivs map[string]ed25519.PrivateKey
|
||||
SignAllKeys []string
|
||||
}
|
||||
|
||||
// newTestchain constructs a web of AUMs based on the provided input and
|
||||
// options.
|
||||
//
|
||||
// Input is expected to be a graph & tweaks, looking like this:
|
||||
//
|
||||
// G1 -> A -> B
|
||||
// | -> C
|
||||
//
|
||||
// which defines AUMs G1, A, B, and C; with G1 having no parent, A having
|
||||
// G1 as a parent, and both B & C having A as a parent.
|
||||
//
|
||||
// Tweaks are specified like this:
|
||||
//
|
||||
// <AUM>.<tweak> = <value>
|
||||
//
|
||||
// for example: G1.hashSeed = 2
|
||||
//
|
||||
// There are 3 available tweaks:
|
||||
// - hashSeed: Set to an integer to tweak the AUM hash of that AUM.
|
||||
// - template: Set to the name of a template provided via optTemplate().
|
||||
// The template is copied and use as the content for that AUM.
|
||||
// - signedWith: Set to the name of a key provided via optKey(). This
|
||||
// key is used to sign that AUM.
|
||||
func newTestchain(t *testing.T, input string, options ...testchainOpt) *testChain {
|
||||
t.Helper()
|
||||
|
||||
var (
|
||||
s scanner.Scanner
|
||||
out = testChain{
|
||||
Nodes: map[string]*testchainNode{},
|
||||
Template: map[string]AUM{},
|
||||
Key: map[string]*Key{},
|
||||
KeyPrivs: map[string]ed25519.PrivateKey{},
|
||||
}
|
||||
)
|
||||
|
||||
// Process any options
|
||||
for _, o := range options {
|
||||
if o.Template != nil {
|
||||
out.Template[o.Name] = *o.Template
|
||||
}
|
||||
if o.Key != nil {
|
||||
out.Key[o.Name] = o.Key
|
||||
out.KeyPrivs[o.Name] = o.Private
|
||||
}
|
||||
if o.SignAllWith {
|
||||
out.SignAllKeys = append(out.SignAllKeys, o.Name)
|
||||
}
|
||||
}
|
||||
|
||||
s.Init(strings.NewReader(input))
|
||||
s.Mode = scanner.ScanIdents | scanner.SkipComments | scanner.ScanComments | scanner.ScanChars | scanner.ScanInts
|
||||
s.Whitespace ^= 1 << '\t' // clear tabs
|
||||
var (
|
||||
lastIdent string
|
||||
lastWasChain bool // if the last token was '->'
|
||||
)
|
||||
for tok := s.Scan(); tok != scanner.EOF; tok = s.Scan() {
|
||||
switch tok {
|
||||
case '\t':
|
||||
t.Fatalf("tabs disallowed, use spaces (seen at %v)", s.Pos())
|
||||
|
||||
case '.': // tweaks, like <ident>.hashSeed = <val>
|
||||
s.Scan()
|
||||
tweak := s.TokenText()
|
||||
if tok := s.Scan(); tok == '=' {
|
||||
s.Scan()
|
||||
switch tweak {
|
||||
case "hashSeed":
|
||||
out.Nodes[lastIdent].HashSeed, _ = strconv.Atoi(s.TokenText())
|
||||
case "template":
|
||||
out.Nodes[lastIdent].Template = s.TokenText()
|
||||
case "signedWith":
|
||||
out.Nodes[lastIdent].SignedWith = s.TokenText()
|
||||
}
|
||||
}
|
||||
|
||||
case scanner.Ident:
|
||||
out.recordPos(s.TokenText(), s.Pos())
|
||||
// If the last token was '->', that means
|
||||
// that the next identifier has a child relationship
|
||||
// with the identifier preceeding '->'.
|
||||
if lastWasChain {
|
||||
out.recordParent(t, s.TokenText(), lastIdent)
|
||||
}
|
||||
lastIdent = s.TokenText()
|
||||
|
||||
case '-': // handle '->'
|
||||
switch s.Peek() {
|
||||
case '>':
|
||||
s.Scan()
|
||||
lastWasChain = true
|
||||
continue
|
||||
}
|
||||
|
||||
case '|': // handle '|'
|
||||
line, col := s.Pos().Line, s.Pos().Column
|
||||
nodeLoop:
|
||||
for _, n := range out.Nodes {
|
||||
for _, p := range n.Uses {
|
||||
// Find the identifier used right here on the line above.
|
||||
if p.Line == line-1 && col <= p.Column && col > p.Column-len(n.Name) {
|
||||
lastIdent = n.Name
|
||||
out.recordPos(n.Name, s.Pos())
|
||||
break nodeLoop
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
lastWasChain = false
|
||||
// t.Logf("tok = %v, %q", tok, s.TokenText())
|
||||
}
|
||||
|
||||
out.buildChain()
|
||||
return &out
|
||||
}
|
||||
|
||||
// called from the parser to record the location of an
|
||||
// identifier (a named AUM).
|
||||
func (c *testChain) recordPos(ident string, pos scanner.Position) {
|
||||
n := c.Nodes[ident]
|
||||
if n == nil {
|
||||
n = &testchainNode{Name: ident}
|
||||
}
|
||||
|
||||
n.Uses = append(n.Uses, pos)
|
||||
c.Nodes[ident] = n
|
||||
}
|
||||
|
||||
// called from the parser to record a parent relationship between
|
||||
// two AUMs.
|
||||
func (c *testChain) recordParent(t *testing.T, child, parent string) {
|
||||
if p := c.Nodes[child].Parent; p != "" && p != parent {
|
||||
t.Fatalf("differing parent specified for %s: %q != %q", child, p, parent)
|
||||
}
|
||||
c.Nodes[child].Parent = parent
|
||||
}
|
||||
|
||||
// called after parsing to build the web of AUM structures.
|
||||
// This method populates c.AUMs and c.AUMHashes.
|
||||
func (c *testChain) buildChain() {
|
||||
pending := make(map[string]*testchainNode, len(c.Nodes))
|
||||
for k, v := range c.Nodes {
|
||||
pending[k] = v
|
||||
}
|
||||
|
||||
// AUMs with a parent need to know their hash, so we
|
||||
// only compute AUMs who's parents have been computed
|
||||
// each iteration. Since at least the genesis AUM
|
||||
// had no parent, theres always a path to completion
|
||||
// in O(n+1) where n is the number of AUMs.
|
||||
c.AUMs = make(map[string]AUM, len(c.Nodes))
|
||||
c.AUMHashes = make(map[string]AUMHash, len(c.Nodes))
|
||||
for i := 0; i < len(c.Nodes)+1; i++ {
|
||||
if len(pending) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
next := make([]*testchainNode, 0, 10)
|
||||
for _, v := range pending {
|
||||
if _, parentPending := pending[v.Parent]; !parentPending {
|
||||
next = append(next, v)
|
||||
}
|
||||
}
|
||||
|
||||
for _, v := range next {
|
||||
aum := c.makeAUM(v)
|
||||
h := aum.Hash()
|
||||
|
||||
c.AUMHashes[v.Name] = h
|
||||
c.AUMs[v.Name] = aum
|
||||
delete(pending, v.Name)
|
||||
}
|
||||
}
|
||||
panic("unexpected: incomplete despite len(Nodes)+1 iterations")
|
||||
}
|
||||
|
||||
func (c *testChain) makeAUM(v *testchainNode) AUM {
|
||||
// By default, the AUM used is just a no-op AUM
|
||||
// with a parent hash set (if any).
|
||||
//
|
||||
// If <AUM>.template is set to the same name as in
|
||||
// a provided optTemplate(), the AUM is built
|
||||
// from a copy of that instead.
|
||||
//
|
||||
// If <AUM>.hashSeed = <int> is set, the KeyID is
|
||||
// tweaked to effect tweaking the hash. This is useful
|
||||
// if you want one AUM to have a lower hash than another.
|
||||
aum := AUM{MessageKind: AUMNoOp}
|
||||
if template := v.Template; template != "" {
|
||||
aum = c.Template[template]
|
||||
}
|
||||
if v.Parent != "" {
|
||||
parentHash := c.AUMHashes[v.Parent]
|
||||
aum.PrevAUMHash = parentHash[:]
|
||||
}
|
||||
if seed := v.HashSeed; seed != 0 {
|
||||
aum.KeyID = []byte{byte(seed)}
|
||||
}
|
||||
if err := aum.StaticValidate(); err != nil {
|
||||
// Usually caused by a test writer specifying a template
|
||||
// AUM which is ultimately invalid.
|
||||
panic(fmt.Sprintf("aum %+v failed static validation: %v", aum, err))
|
||||
}
|
||||
|
||||
sigHash := aum.SigHash()
|
||||
for _, key := range c.SignAllKeys {
|
||||
aum.Signatures = append(aum.Signatures, Signature{
|
||||
KeyID: c.Key[key].ID(),
|
||||
Signature: ed25519.Sign(c.KeyPrivs[key], sigHash[:]),
|
||||
})
|
||||
}
|
||||
|
||||
// If the aum was specified as being signed by some key, then
|
||||
// sign it using that key.
|
||||
if key := v.SignedWith; key != "" {
|
||||
aum.Signatures = append(aum.Signatures, Signature{
|
||||
KeyID: c.Key[key].ID(),
|
||||
Signature: ed25519.Sign(c.KeyPrivs[key], sigHash[:]),
|
||||
})
|
||||
}
|
||||
|
||||
return aum
|
||||
}
|
||||
|
||||
// Chonk returns a tailchonk containing all AUMs.
|
||||
func (c *testChain) Chonk() Chonk {
|
||||
var out Mem
|
||||
for _, update := range c.AUMs {
|
||||
if err := out.CommitVerifiedAUMs([]AUM{update}); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
return &out
|
||||
}
|
||||
|
||||
// ChonkWith returns a tailchonk containing the named AUMs.
|
||||
func (c *testChain) ChonkWith(names ...string) Chonk {
|
||||
var out Mem
|
||||
for _, name := range names {
|
||||
update := c.AUMs[name]
|
||||
if err := out.CommitVerifiedAUMs([]AUM{update}); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
return &out
|
||||
}
|
||||
|
||||
type testchainOpt struct {
|
||||
Name string
|
||||
Template *AUM
|
||||
Key *Key
|
||||
Private ed25519.PrivateKey
|
||||
SignAllWith bool
|
||||
}
|
||||
|
||||
func optTemplate(name string, template AUM) testchainOpt {
|
||||
return testchainOpt{
|
||||
Name: name,
|
||||
Template: &template,
|
||||
}
|
||||
}
|
||||
|
||||
func optKey(name string, key Key, priv ed25519.PrivateKey) testchainOpt {
|
||||
return testchainOpt{
|
||||
Name: name,
|
||||
Key: &key,
|
||||
Private: priv,
|
||||
}
|
||||
}
|
||||
|
||||
func optSignAllUsing(keyName string) testchainOpt {
|
||||
return testchainOpt{
|
||||
Name: keyName,
|
||||
SignAllWith: true,
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewTestchain(t *testing.T) {
|
||||
c := newTestchain(t, `
|
||||
genesis -> B -> C
|
||||
| -> D
|
||||
| -> E -> F
|
||||
|
||||
E.hashSeed = 12 // tweak E to have the lowest hash so its chosen
|
||||
F.template = test
|
||||
`, optTemplate("test", AUM{MessageKind: AUMNoOp, KeyID: []byte{10}}))
|
||||
|
||||
want := map[string]*testchainNode{
|
||||
"genesis": &testchainNode{Name: "genesis", Uses: []scanner.Position{{Line: 2, Column: 16}}},
|
||||
"B": &testchainNode{
|
||||
Name: "B",
|
||||
Parent: "genesis",
|
||||
Uses: []scanner.Position{{Line: 2, Column: 21}, {Line: 3, Column: 21}, {Line: 4, Column: 21}},
|
||||
},
|
||||
"C": &testchainNode{Name: "C", Parent: "B", Uses: []scanner.Position{{Line: 2, Column: 26}}},
|
||||
"D": &testchainNode{Name: "D", Parent: "B", Uses: []scanner.Position{{Line: 3, Column: 26}}},
|
||||
"E": &testchainNode{Name: "E", Parent: "B", HashSeed: 12, Uses: []scanner.Position{{Line: 4, Column: 26}, {Line: 6, Column: 10}}},
|
||||
"F": &testchainNode{Name: "F", Parent: "E", Template: "test", Uses: []scanner.Position{{Line: 4, Column: 31}, {Line: 7, Column: 10}}},
|
||||
}
|
||||
|
||||
if diff := cmp.Diff(want, c.Nodes, cmpopts.IgnoreFields(scanner.Position{}, "Offset")); diff != "" {
|
||||
t.Errorf("decoded state differs (-want, +got):\n%s", diff)
|
||||
}
|
||||
if !bytes.Equal(c.AUMs["F"].KeyID, []byte{10}) {
|
||||
t.Errorf("AUM 'F' missing KeyID from template: %v", c.AUMs["F"])
|
||||
}
|
||||
|
||||
// chonk := c.Chonk()
|
||||
// authority, err := Open(chonk)
|
||||
// if err != nil {
|
||||
// t.Errorf("failed to initialize from chonk: %v", err)
|
||||
// }
|
||||
|
||||
// if authority.Head() != c.AUMHashes["F"] {
|
||||
// t.Errorf("head = %X, want %X", authority.Head(), c.AUMHashes["F"])
|
||||
// }
|
||||
}
|
||||
251
tka/sync.go
Normal file
251
tka/sync.go
Normal file
@@ -0,0 +1,251 @@
|
||||
// Copyright (c) 2022 Tailscale Inc & AUTHORS All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package tka
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
)
|
||||
|
||||
const (
|
||||
// Max iterations searching for any intersection.
|
||||
maxSyncIter = 2000
|
||||
// Max iterations searching for a head intersection.
|
||||
maxSyncHeadIntersectionIter = 400
|
||||
)
|
||||
|
||||
// ErrNoIntersection is returned when a shared AUM could
|
||||
// not be determined when evaluating a remote sync offer.
|
||||
var ErrNoIntersection = errors.New("no intersection")
|
||||
|
||||
// SyncOffer conveys information about the current head & ancestor AUMs,
|
||||
// for the purpose of synchronization with some remote end.
|
||||
//
|
||||
// Ancestors should contain a subset of the ancestors of the chain.
|
||||
// The last entry in that slice is the oldest-known AUM in the chain.
|
||||
type SyncOffer struct {
|
||||
Head AUMHash
|
||||
Ancestors []AUMHash
|
||||
}
|
||||
|
||||
const (
|
||||
// The starting number of AUMs to skip when listing
|
||||
// ancestors in a SyncOffer.
|
||||
ancestorsSkipStart = 4
|
||||
|
||||
// How many bits to advance the skip count when listing
|
||||
// ancestors in a SyncOffer.
|
||||
//
|
||||
// 2 bits, so (4<<2), so after skipping 4 it skips 16.
|
||||
ancestorsSkipShift = 2
|
||||
)
|
||||
|
||||
func (a *Authority) syncOffer() (SyncOffer, error) {
|
||||
oldest := a.oldestAncestor.Hash()
|
||||
|
||||
out := SyncOffer{
|
||||
Head: a.Head(),
|
||||
Ancestors: make([]AUMHash, 0, 6), // 6 chosen arbitrarily.
|
||||
}
|
||||
|
||||
// We send some subset of our ancestors to help the remote
|
||||
// find a more-recent 'head intersection'.
|
||||
// The number of AUMs between each ancestor entry gets
|
||||
// exponentially larger.
|
||||
var (
|
||||
skipAmount uint64 = ancestorsSkipStart
|
||||
curs AUMHash = a.Head()
|
||||
)
|
||||
for i := uint64(0); i < maxSyncHeadIntersectionIter; i++ {
|
||||
if i > 0 && (i%skipAmount) == 0 {
|
||||
out.Ancestors = append(out.Ancestors, curs)
|
||||
skipAmount = skipAmount << ancestorsSkipShift
|
||||
}
|
||||
|
||||
parent, err := a.storage.AUM(curs)
|
||||
if err != nil {
|
||||
if err != os.ErrNotExist {
|
||||
return SyncOffer{}, err
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
// We add the oldest later on, so don't duplicate.
|
||||
if parent.Hash() == oldest {
|
||||
break
|
||||
}
|
||||
copy(curs[:], parent.PrevAUMHash)
|
||||
}
|
||||
|
||||
out.Ancestors = append(out.Ancestors, oldest)
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// SyncOffer returns an abbreviated description of the current AUM
|
||||
// chain, which can be used to synchronize with another (untrusted)
|
||||
// Authority instance.
|
||||
//
|
||||
// The returned SyncOffer structure should be transmitted to the remote
|
||||
// Authority, which should call MissingAUMs() using it to determine
|
||||
// AUMs which need to be transmitted. This list of AUMs from the remote
|
||||
// can then be applied locally with Inform().
|
||||
//
|
||||
// This SyncOffer + AUM exchange should be performed by both ends,
|
||||
// because its possible that either end has AUMs that the other needs
|
||||
// to find out about.
|
||||
func (a *Authority) SyncOffer() (SyncOffer, error) {
|
||||
return a.syncOffer()
|
||||
}
|
||||
|
||||
// intersection describes how to synchronize AUMs with a remote
|
||||
// authority.
|
||||
type intersection struct {
|
||||
// if true, no exchange of AUMs is needed.
|
||||
upToDate bool
|
||||
|
||||
// headIntersection is the latest common AUM on the remote. In other
|
||||
// words, we need to send all AUMs since this one.
|
||||
headIntersection *AUMHash
|
||||
|
||||
// tailIntersection is the oldest common AUM on the remote. In other
|
||||
// words, we diverge with the remote after this AUM, so we both need
|
||||
// to transmit our AUM chain starting here.
|
||||
tailIntersection *AUMHash
|
||||
}
|
||||
|
||||
// computeSyncIntersection determines the common AUMs between a local and
|
||||
// remote SyncOffer. This intersection can be used to synchronize both
|
||||
// sides.
|
||||
func computeSyncIntersection(authority *Authority, localOffer, remoteOffer SyncOffer) (*intersection, error) {
|
||||
// Simple case: up to date.
|
||||
if remoteOffer.Head == localOffer.Head {
|
||||
return &intersection{upToDate: true, headIntersection: &localOffer.Head}, nil
|
||||
}
|
||||
|
||||
// Case: 'head intersection'
|
||||
// If we have the remote's head, its more likely than not that
|
||||
// we have updates that build on that head. To confirm this,
|
||||
// we iterate backwards through our chain to see if the given
|
||||
// head is an ancestor of our current chain.
|
||||
//
|
||||
// In other words:
|
||||
// <Us> A -> B -> C
|
||||
// <Them> A -> B
|
||||
// ∴ their head intersects with our chain, we need to send C
|
||||
var hasRemoteHead bool
|
||||
_, err := authority.storage.AUM(remoteOffer.Head)
|
||||
if err != nil {
|
||||
if err != os.ErrNotExist {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
hasRemoteHead = true
|
||||
}
|
||||
|
||||
if hasRemoteHead {
|
||||
curs := localOffer.Head
|
||||
for i := 0; i < maxSyncHeadIntersectionIter; i++ {
|
||||
parent, err := authority.storage.AUM(curs)
|
||||
if err != nil {
|
||||
if err != os.ErrNotExist {
|
||||
return nil, err
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
if parent.Hash() == remoteOffer.Head {
|
||||
h := parent.Hash()
|
||||
return &intersection{headIntersection: &h}, nil
|
||||
}
|
||||
|
||||
copy(curs[:], parent.PrevAUMHash)
|
||||
}
|
||||
}
|
||||
|
||||
// Case: 'tail intersection'
|
||||
// So we don't have a clue what the remote's head is, but
|
||||
// if one of the ancestors they gave us is part of our chain,
|
||||
// then theres an intersection, which is a starting point for
|
||||
// the remote to send us AUMs from.
|
||||
//
|
||||
// We iterate the list of ancestors in order because the remote
|
||||
// ordered them such that the newer ones are earlier, so with
|
||||
// a bit of luck we can use an earlier one and hence do less work /
|
||||
// transmit fewer AUMs.
|
||||
for _, a := range remoteOffer.Ancestors {
|
||||
state, err := computeStateAt(authority.storage, maxSyncIter, a)
|
||||
if err != nil {
|
||||
if err != os.ErrNotExist {
|
||||
return nil, fmt.Errorf("computeStateAt: %v", err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
end, _, err := fastForward(authority.storage, maxSyncIter, state, func(curs AUM, _ State) bool {
|
||||
return curs.Hash() == localOffer.Head
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// fastForward can terminate before the done condition if there are
|
||||
// no more children left, so we check again before considering this
|
||||
// an intersection.
|
||||
if end.Hash() == localOffer.Head {
|
||||
return &intersection{tailIntersection: &a}, nil
|
||||
}
|
||||
}
|
||||
|
||||
return nil, ErrNoIntersection
|
||||
}
|
||||
|
||||
// MissingAUMs returns AUMs a remote may be missing based on the
|
||||
// remotes' SyncOffer.
|
||||
func (a *Authority) MissingAUMs(remoteOffer SyncOffer) ([]AUM, error) {
|
||||
localOffer, err := a.syncOffer()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("local syncOffer: %v", err)
|
||||
}
|
||||
intersection, err := computeSyncIntersection(a, localOffer, remoteOffer)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("intersection: %v", err)
|
||||
}
|
||||
if intersection.upToDate {
|
||||
return nil, nil
|
||||
}
|
||||
out := make([]AUM, 0, 12) // 12 chosen arbitrarily.
|
||||
|
||||
if intersection.headIntersection != nil {
|
||||
state, err := computeStateAt(a.storage, maxSyncIter, *intersection.headIntersection)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
_, _, err = fastForward(a.storage, maxSyncIter, state, func(curs AUM, _ State) bool {
|
||||
if curs.Hash() != *intersection.headIntersection {
|
||||
out = append(out, curs)
|
||||
}
|
||||
return false
|
||||
})
|
||||
return out, err
|
||||
}
|
||||
|
||||
if intersection.tailIntersection != nil {
|
||||
state, err := computeStateAt(a.storage, maxSyncIter, *intersection.tailIntersection)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
_, _, err = fastForward(a.storage, maxSyncIter, state, func(curs AUM, _ State) bool {
|
||||
if curs.Hash() != *intersection.tailIntersection {
|
||||
out = append(out, curs)
|
||||
}
|
||||
return false
|
||||
})
|
||||
return out, err
|
||||
}
|
||||
|
||||
panic("unreachable")
|
||||
}
|
||||
373
tka/sync_test.go
Normal file
373
tka/sync_test.go
Normal file
@@ -0,0 +1,373 @@
|
||||
// Copyright (c) 2022 Tailscale Inc & AUTHORS All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package tka
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"strconv"
|
||||
"testing"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
)
|
||||
|
||||
func TestSyncOffer(t *testing.T) {
|
||||
c := newTestchain(t, `
|
||||
A1 -> A2 -> A3 -> A4 -> A5 -> A6 -> A7 -> A8 -> A9 -> A10
|
||||
A10 -> A11 -> A12 -> A13 -> A14 -> A15 -> A16 -> A17 -> A18
|
||||
A18 -> A19 -> A20 -> A21 -> A22 -> A23 -> A24 -> A25
|
||||
`)
|
||||
a, err := Open(c.Chonk())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
got, err := a.SyncOffer()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// A SyncOffer includes a selection of AUMs going backwards in the tree,
|
||||
// progressively skipping more and more each iteration.
|
||||
want := SyncOffer{
|
||||
Head: c.AUMHashes["A25"],
|
||||
Ancestors: []AUMHash{
|
||||
c.AUMHashes["A"+strconv.Itoa(25-ancestorsSkipStart)],
|
||||
c.AUMHashes["A"+strconv.Itoa(25-ancestorsSkipStart<<ancestorsSkipShift)],
|
||||
c.AUMHashes["A1"],
|
||||
},
|
||||
}
|
||||
|
||||
if diff := cmp.Diff(want, got); diff != "" {
|
||||
t.Errorf("SyncOffer diff (-want, +got):\n%s", diff)
|
||||
}
|
||||
}
|
||||
|
||||
func TestComputeSyncIntersection_FastForward(t *testing.T) {
|
||||
// Node 1 has: A1 -> A2
|
||||
// Node 2 has: A1 -> A2 -> A3 -> A4
|
||||
c := newTestchain(t, `
|
||||
A1 -> A2 -> A3 -> A4
|
||||
`)
|
||||
a1H, a2H := c.AUMHashes["A1"], c.AUMHashes["A2"]
|
||||
|
||||
chonk1 := c.ChonkWith("A1", "A2")
|
||||
n1, err := Open(chonk1)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
offer1, err := n1.SyncOffer()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
chonk2 := c.Chonk() // All AUMs
|
||||
n2, err := Open(chonk2)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
offer2, err := n2.SyncOffer()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Node 1 only knows about the first two nodes, so the head of n2 is
|
||||
// alien to it.
|
||||
t.Run("n1", func(t *testing.T) {
|
||||
got, err := computeSyncIntersection(n1, offer1, offer2)
|
||||
if err != nil {
|
||||
t.Fatalf("computeSyncIntersection() failed: %v", err)
|
||||
}
|
||||
want := &intersection{
|
||||
tailIntersection: &a1H,
|
||||
}
|
||||
if diff := cmp.Diff(want, got, cmp.AllowUnexported(intersection{})); diff != "" {
|
||||
t.Errorf("intersection diff (-want, +got):\n%s", diff)
|
||||
}
|
||||
})
|
||||
|
||||
// Node 2 knows about the full chain, so it can see that the head of n1
|
||||
// intersects with a subset of its chain (a Head Intersection).
|
||||
t.Run("n2", func(t *testing.T) {
|
||||
got, err := computeSyncIntersection(n2, offer2, offer1)
|
||||
if err != nil {
|
||||
t.Fatalf("computeSyncIntersection() failed: %v", err)
|
||||
}
|
||||
want := &intersection{
|
||||
headIntersection: &a2H,
|
||||
}
|
||||
if diff := cmp.Diff(want, got, cmp.AllowUnexported(intersection{})); diff != "" {
|
||||
t.Errorf("intersection diff (-want, +got):\n%s", diff)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestComputeSyncIntersection_ForkSmallDiff(t *testing.T) {
|
||||
// The number of nodes in the chain is longer than ancestorSkipStart,
|
||||
// so that during sync both nodes are able to find a common ancestor
|
||||
// which was later than A1.
|
||||
|
||||
c := newTestchain(t, `
|
||||
A1 -> A2 -> A3 -> A4 -> A5 -> A6 -> A7 -> A8 -> A9 -> A10
|
||||
| -> F1
|
||||
// Make F1 different to A9.
|
||||
// hashSeed is chosen such that the hash is higher than A9.
|
||||
F1.hashSeed = 7
|
||||
`)
|
||||
// Node 1 has: A1 -> A2 -> A3 -> A4 -> A5 -> A6 -> A7 -> A8 -> F1
|
||||
// Node 2 has: A1 -> A2 -> A3 -> A4 -> A5 -> A6 -> A7 -> A8 -> A9 -> A10
|
||||
f1H, a9H := c.AUMHashes["F1"], c.AUMHashes["A9"]
|
||||
|
||||
if bytes.Compare(f1H[:], a9H[:]) < 0 {
|
||||
t.Fatal("failed assert: h(a9) > h(f1H)\nTweak hashSeed till this passes")
|
||||
}
|
||||
|
||||
n1, err := Open(c.ChonkWith("A1", "A2", "A3", "A4", "A5", "A6", "A7", "A8", "F1"))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
offer1, err := n1.SyncOffer()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if diff := cmp.Diff(SyncOffer{
|
||||
Head: c.AUMHashes["F1"],
|
||||
Ancestors: []AUMHash{
|
||||
c.AUMHashes["A"+strconv.Itoa(9-ancestorsSkipStart)],
|
||||
c.AUMHashes["A1"],
|
||||
},
|
||||
}, offer1); diff != "" {
|
||||
t.Errorf("offer1 diff (-want, +got):\n%s", diff)
|
||||
}
|
||||
|
||||
n2, err := Open(c.ChonkWith("A1", "A2", "A3", "A4", "A5", "A6", "A7", "A8", "A9", "A10"))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
offer2, err := n2.SyncOffer()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if diff := cmp.Diff(SyncOffer{
|
||||
Head: c.AUMHashes["A10"],
|
||||
Ancestors: []AUMHash{
|
||||
c.AUMHashes["A"+strconv.Itoa(10-ancestorsSkipStart)],
|
||||
c.AUMHashes["A1"],
|
||||
},
|
||||
}, offer2); diff != "" {
|
||||
t.Errorf("offer2 diff (-want, +got):\n%s", diff)
|
||||
}
|
||||
|
||||
// Node 1 only knows about the first eight nodes, so the head of n2 is
|
||||
// alien to it.
|
||||
t.Run("n1", func(t *testing.T) {
|
||||
// n2 has 10 nodes, so the first common ancestor should be 10-ancestorsSkipStart
|
||||
wantIntersection := c.AUMHashes["A"+strconv.Itoa(10-ancestorsSkipStart)]
|
||||
|
||||
got, err := computeSyncIntersection(n1, offer1, offer2)
|
||||
if err != nil {
|
||||
t.Fatalf("computeSyncIntersection() failed: %v", err)
|
||||
}
|
||||
want := &intersection{
|
||||
tailIntersection: &wantIntersection,
|
||||
}
|
||||
if diff := cmp.Diff(want, got, cmp.AllowUnexported(intersection{})); diff != "" {
|
||||
t.Errorf("intersection diff (-want, +got):\n%s", diff)
|
||||
}
|
||||
})
|
||||
|
||||
// Node 2 knows about the full chain but doesn't recognize the head.
|
||||
t.Run("n2", func(t *testing.T) {
|
||||
// n1 has 9 nodes, so the first common ancestor should be 9-ancestorsSkipStart
|
||||
wantIntersection := c.AUMHashes["A"+strconv.Itoa(9-ancestorsSkipStart)]
|
||||
|
||||
got, err := computeSyncIntersection(n2, offer2, offer1)
|
||||
if err != nil {
|
||||
t.Fatalf("computeSyncIntersection() failed: %v", err)
|
||||
}
|
||||
want := &intersection{
|
||||
tailIntersection: &wantIntersection,
|
||||
}
|
||||
if diff := cmp.Diff(want, got, cmp.AllowUnexported(intersection{})); diff != "" {
|
||||
t.Errorf("intersection diff (-want, +got):\n%s", diff)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestMissingAUMs_FastForward(t *testing.T) {
|
||||
// Node 1 has: A1 -> A2
|
||||
// Node 2 has: A1 -> A2 -> A3 -> A4
|
||||
c := newTestchain(t, `
|
||||
A1 -> A2 -> A3 -> A4
|
||||
A1.hashSeed = 1
|
||||
A2.hashSeed = 2
|
||||
A3.hashSeed = 3
|
||||
A4.hashSeed = 4
|
||||
`)
|
||||
|
||||
chonk1 := c.ChonkWith("A1", "A2")
|
||||
n1, err := Open(chonk1)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
offer1, err := n1.SyncOffer()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
chonk2 := c.Chonk() // All AUMs
|
||||
n2, err := Open(chonk2)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
offer2, err := n2.SyncOffer()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Node 1 only knows about the first two nodes, so the head of n2 is
|
||||
// alien to it. As such, it should send history from the newest ancestor,
|
||||
// A1 (if the chain was longer there would be one in the middle).
|
||||
t.Run("n1", func(t *testing.T) {
|
||||
got, err := n1.MissingAUMs(offer2)
|
||||
if err != nil {
|
||||
t.Fatalf("MissingAUMs() failed: %v", err)
|
||||
}
|
||||
|
||||
// Both sides have A1, so the only AUM that n2 might not have is
|
||||
// A2.
|
||||
want := []AUM{c.AUMs["A2"]}
|
||||
if diff := cmp.Diff(want, got); diff != "" {
|
||||
t.Errorf("MissingAUMs diff (-want, +got):\n%s", diff)
|
||||
}
|
||||
})
|
||||
|
||||
// Node 2 knows about the full chain, so it can see that the head of n1
|
||||
// intersects with a subset of its chain (a Head Intersection).
|
||||
t.Run("n2", func(t *testing.T) {
|
||||
got, err := n2.MissingAUMs(offer1)
|
||||
if err != nil {
|
||||
t.Fatalf("MissingAUMs() failed: %v", err)
|
||||
}
|
||||
|
||||
want := []AUM{
|
||||
c.AUMs["A3"],
|
||||
c.AUMs["A4"],
|
||||
}
|
||||
if diff := cmp.Diff(want, got); diff != "" {
|
||||
t.Errorf("MissingAUMs diff (-want, +got):\n%s", diff)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestMissingAUMs_Fork(t *testing.T) {
|
||||
// Node 1 has: A1 -> A2 -> A3 -> F1
|
||||
// Node 2 has: A1 -> A2 -> A3 -> A4
|
||||
c := newTestchain(t, `
|
||||
A1 -> A2 -> A3 -> A4
|
||||
| -> F1
|
||||
A1.hashSeed = 1
|
||||
A2.hashSeed = 2
|
||||
A3.hashSeed = 3
|
||||
A4.hashSeed = 4
|
||||
`)
|
||||
|
||||
chonk1 := c.ChonkWith("A1", "A2", "A3", "F1")
|
||||
n1, err := Open(chonk1)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
offer1, err := n1.SyncOffer()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
chonk2 := c.ChonkWith("A1", "A2", "A3", "A4")
|
||||
n2, err := Open(chonk2)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
offer2, err := n2.SyncOffer()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
t.Run("n1", func(t *testing.T) {
|
||||
got, err := n1.MissingAUMs(offer2)
|
||||
if err != nil {
|
||||
t.Fatalf("MissingAUMs() failed: %v", err)
|
||||
}
|
||||
|
||||
// Both sides have A1, so n1 will send everything it knows from
|
||||
// there to head.
|
||||
want := []AUM{
|
||||
c.AUMs["A2"],
|
||||
c.AUMs["A3"],
|
||||
c.AUMs["F1"],
|
||||
}
|
||||
if diff := cmp.Diff(want, got); diff != "" {
|
||||
t.Errorf("MissingAUMs diff (-want, +got):\n%s", diff)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("n2", func(t *testing.T) {
|
||||
got, err := n2.MissingAUMs(offer1)
|
||||
if err != nil {
|
||||
t.Fatalf("MissingAUMs() failed: %v", err)
|
||||
}
|
||||
|
||||
// Both sides have A1, so n2 will send everything it knows from
|
||||
// there to head.
|
||||
want := []AUM{
|
||||
c.AUMs["A2"],
|
||||
c.AUMs["A3"],
|
||||
c.AUMs["A4"],
|
||||
}
|
||||
if diff := cmp.Diff(want, got); diff != "" {
|
||||
t.Errorf("MissingAUMs diff (-want, +got):\n%s", diff)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestSyncSimpleE2E(t *testing.T) {
|
||||
pub, priv := testingKey25519(t, 1)
|
||||
key := Key{Kind: Key25519, Public: pub, Votes: 2}
|
||||
|
||||
c := newTestchain(t, `
|
||||
G1 -> L1 -> L2 -> L3
|
||||
G1.template = genesis
|
||||
`,
|
||||
optTemplate("genesis", AUM{MessageKind: AUMCheckpoint, State: &State{
|
||||
Keys: []Key{key},
|
||||
DisablementSecrets: [][]byte{disablementKDF([]byte{1, 2, 3})},
|
||||
}}),
|
||||
optKey("key", key, priv),
|
||||
optSignAllUsing("key"))
|
||||
|
||||
node, err := Bootstrap(&Mem{}, c.AUMs["G1"])
|
||||
if err != nil {
|
||||
t.Fatalf("node Bootstrap() failed: %v", err)
|
||||
}
|
||||
control, err := Open(c.Chonk())
|
||||
if err != nil {
|
||||
t.Fatalf("control Open() failed: %v", err)
|
||||
}
|
||||
|
||||
// Control knows the full chain, node only knows the genesis. Lets see
|
||||
// if they can sync.
|
||||
nodeOffer, err := node.SyncOffer()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
controlAUMs, err := control.MissingAUMs(nodeOffer)
|
||||
if err != nil {
|
||||
t.Fatalf("control.MissingAUMs(%v) failed: %v", nodeOffer, err)
|
||||
}
|
||||
if err := node.Inform(controlAUMs); err != nil {
|
||||
t.Fatalf("node.Inform(%v) failed: %v", controlAUMs, err)
|
||||
}
|
||||
|
||||
if cHash, nHash := control.Head(), node.Head(); cHash != nHash {
|
||||
t.Errorf("node & control are not synced: c=%x, n=%x", cHash, nHash)
|
||||
}
|
||||
}
|
||||
539
tka/tka.go
539
tka/tka.go
@@ -4,3 +4,542 @@
|
||||
|
||||
// Package tka (WIP) implements the Tailnet Key Authority.
|
||||
package tka
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/ed25519"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"sort"
|
||||
)
|
||||
|
||||
// Authority is a Tailnet Key Authority. This type is the main coupling
|
||||
// point to the rest of the tailscale client.
|
||||
//
|
||||
// Authority objects can either be created from an existing, non-empty
|
||||
// tailchonk (via tka.Open()), or created from scratch using tka.Bootstrap()
|
||||
// or tka.Create().
|
||||
type Authority struct {
|
||||
head AUM
|
||||
oldestAncestor AUM
|
||||
state State
|
||||
|
||||
storage Chonk
|
||||
}
|
||||
|
||||
// A chain describes a linear sequence of updates from Oldest to Head,
|
||||
// resulting in some State at Head.
|
||||
type chain struct {
|
||||
Oldest AUM
|
||||
Head AUM
|
||||
|
||||
state State
|
||||
|
||||
// Set to true if the AUM chain intersects with the active
|
||||
// chain from a previous run.
|
||||
chainsThroughActive bool
|
||||
}
|
||||
|
||||
// computeChainCandidates returns all possible chains based on AUMs stored
|
||||
// in the given tailchonk. A chain is defined as a unique (oldest, newest)
|
||||
// AUM tuple. chain.state is not yet populated in returned chains.
|
||||
//
|
||||
// If lastKnownOldest is provided, any chain that includes the given AUM
|
||||
// has the chainsThroughActive field set to true. This bit is leveraged
|
||||
// in computeActiveAncestor() to filter out irrelevant chains when determining
|
||||
// the active ancestor from a list of distinct chains.
|
||||
func computeChainCandidates(storage Chonk, lastKnownOldest *AUMHash, maxIter int) ([]chain, error) {
|
||||
heads, err := storage.Heads()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("reading heads: %v", err)
|
||||
}
|
||||
candidates := make([]chain, len(heads))
|
||||
for i := range heads {
|
||||
// Oldest is iteratively computed below.
|
||||
candidates[i] = chain{Oldest: heads[i], Head: heads[i]}
|
||||
}
|
||||
// Not strictly necessary, but simplifies checks in tests.
|
||||
sort.Slice(candidates, func(i, j int) bool {
|
||||
ih, jh := candidates[i].Oldest.Hash(), candidates[j].Oldest.Hash()
|
||||
return bytes.Compare(ih[:], jh[:]) < 0
|
||||
})
|
||||
|
||||
// candidates.Oldest needs to be computed by working backwards from
|
||||
// head as far as we can.
|
||||
iterAgain := true // if theres still work to be done.
|
||||
for i := 0; iterAgain; i++ {
|
||||
if i >= maxIter {
|
||||
return nil, fmt.Errorf("iteration limit exceeded (%d)", maxIter)
|
||||
}
|
||||
|
||||
iterAgain = false
|
||||
for j := range candidates {
|
||||
parent, hasParent := candidates[j].Oldest.Parent()
|
||||
if hasParent {
|
||||
parent, err := storage.AUM(parent)
|
||||
if err != nil {
|
||||
if err == os.ErrNotExist {
|
||||
continue
|
||||
}
|
||||
return nil, fmt.Errorf("reading parent: %v", err)
|
||||
}
|
||||
candidates[j].Oldest = parent
|
||||
if lastKnownOldest != nil && *lastKnownOldest == parent.Hash() {
|
||||
candidates[j].chainsThroughActive = true
|
||||
}
|
||||
iterAgain = true
|
||||
}
|
||||
}
|
||||
}
|
||||
return candidates, nil
|
||||
}
|
||||
|
||||
// pickNextAUM returns the AUM which should be used as the next
|
||||
// AUM in the chain, possibly applying fork resolution logic.
|
||||
//
|
||||
// In other words: given an AUM with 3 children like this:
|
||||
// / - 1
|
||||
// P - 2
|
||||
// \ - 3
|
||||
//
|
||||
// pickNextAUM will determine and return the correct branch.
|
||||
//
|
||||
// This method takes ownership of the provided slice.
|
||||
func pickNextAUM(state State, candidates []AUM) AUM {
|
||||
switch len(candidates) {
|
||||
case 0:
|
||||
panic("pickNextAUM called with empty candidate set")
|
||||
case 1:
|
||||
return candidates[0]
|
||||
}
|
||||
|
||||
// Oooof, we have some forks in the chain. We need to pick which
|
||||
// one to use by applying the Fork Resolution Algorithm ✨
|
||||
//
|
||||
// The rules are this:
|
||||
// 1. The child with the highest signature weight is chosen.
|
||||
// 2. If equal, the child which is a RemoveKey AUM is chosen.
|
||||
// 3. If equal, the child with the lowest AUM hash is chosen.
|
||||
sort.Slice(candidates, func(j, i int) bool {
|
||||
// Rule 1.
|
||||
iSigWeight, jSigWeight := candidates[i].Weight(state), candidates[j].Weight(state)
|
||||
if iSigWeight != jSigWeight {
|
||||
return iSigWeight < jSigWeight
|
||||
}
|
||||
|
||||
// Rule 2.
|
||||
if iKind, jKind := candidates[i].MessageKind, candidates[j].MessageKind; iKind != jKind &&
|
||||
(iKind == AUMRemoveKey || jKind == AUMRemoveKey) {
|
||||
return jKind == AUMRemoveKey
|
||||
}
|
||||
|
||||
// Rule 3.
|
||||
iHash, jHash := candidates[i].Hash(), candidates[j].Hash()
|
||||
return bytes.Compare(iHash[:], jHash[:]) > 0
|
||||
})
|
||||
|
||||
return candidates[0]
|
||||
}
|
||||
|
||||
// advanceChain computes the next AUM to advance with based on all child
|
||||
// AUMs, returning the chosen AUM & the state obtained by applying that
|
||||
// AUM.
|
||||
//
|
||||
// The return value for next is nil if there are no children AUMs, hence
|
||||
// the provided state is at head (up to date).
|
||||
func advanceChain(state State, candidates []AUM) (next *AUM, out State, err error) {
|
||||
if len(candidates) == 0 {
|
||||
return nil, state, nil
|
||||
}
|
||||
|
||||
aum := pickNextAUM(state, candidates)
|
||||
if state, err = state.applyVerifiedAUM(aum); err != nil {
|
||||
return nil, State{}, fmt.Errorf("advancing state: %v", err)
|
||||
}
|
||||
return &aum, state, nil
|
||||
}
|
||||
|
||||
// fastForward iteratively advances the current state based on known AUMs until
|
||||
// the given termination function returns true or there is no more progress possible.
|
||||
//
|
||||
// The last-processed AUM, and the state computed after applying the last AUM,
|
||||
// are returned.
|
||||
func fastForward(storage Chonk, maxIter int, startState State, done func(curAUM AUM, curState State) bool) (AUM, State, error) {
|
||||
if startState.LastAUMHash == nil {
|
||||
return AUM{}, State{}, errors.New("invalid initial state")
|
||||
}
|
||||
nextAUM, err := storage.AUM(*startState.LastAUMHash)
|
||||
if err != nil {
|
||||
return AUM{}, State{}, fmt.Errorf("reading next: %v", err)
|
||||
}
|
||||
|
||||
curs := nextAUM
|
||||
state := startState
|
||||
for i := 0; i < maxIter; i++ {
|
||||
if done != nil && done(curs, state) {
|
||||
return curs, state, nil
|
||||
}
|
||||
|
||||
children, err := storage.ChildAUMs(curs.Hash())
|
||||
if err != nil {
|
||||
return AUM{}, State{}, fmt.Errorf("getting children of %X: %v", curs.Hash(), err)
|
||||
}
|
||||
next, nextState, err := advanceChain(state, children)
|
||||
if err != nil {
|
||||
return AUM{}, State{}, fmt.Errorf("advance %X: %v", curs.Hash(), err)
|
||||
}
|
||||
if next == nil {
|
||||
// There were no more children, we are at 'head'.
|
||||
return curs, state, nil
|
||||
}
|
||||
curs = *next
|
||||
state = nextState
|
||||
}
|
||||
|
||||
return AUM{}, State{}, fmt.Errorf("iteration limit exceeded (%d)", maxIter)
|
||||
}
|
||||
|
||||
// computeStateAt returns the State at wantHash.
|
||||
func computeStateAt(storage Chonk, maxIter int, wantHash AUMHash) (State, error) {
|
||||
// TODO(tom): This is going to get expensive for really long
|
||||
// chains. We should make nodes emit a checkpoint every
|
||||
// X updates or something.
|
||||
|
||||
topAUM, err := storage.AUM(wantHash)
|
||||
if err != nil {
|
||||
return State{}, err
|
||||
}
|
||||
|
||||
// Iterate backwards till we find a starting point to compute
|
||||
// the state from.
|
||||
//
|
||||
// Valid starting points are either a checkpoint AUM, or a
|
||||
// genesis AUM.
|
||||
curs := topAUM
|
||||
var state State
|
||||
for i := 0; true; i++ {
|
||||
if i > maxIter {
|
||||
return State{}, fmt.Errorf("iteration limit exceeded (%d)", maxIter)
|
||||
}
|
||||
|
||||
// Checkpoints encapsulate the state at that point, dope.
|
||||
if curs.MessageKind == AUMCheckpoint {
|
||||
state = curs.State.cloneForUpdate(&curs)
|
||||
break
|
||||
}
|
||||
parent, hasParent := curs.Parent()
|
||||
if !hasParent {
|
||||
// This is a 'genesis' update: there are none before it, so
|
||||
// this AUM can be applied to the empty state to determine
|
||||
// the state at this AUM.
|
||||
//
|
||||
// It is only valid for NoOp, AddKey, and Checkpoint AUMs
|
||||
// to be a genesis update. Checkpoint was handled earlier.
|
||||
if mk := curs.MessageKind; mk == AUMNoOp || mk == AUMAddKey {
|
||||
var err error
|
||||
if state, err = (State{}).applyVerifiedAUM(curs); err != nil {
|
||||
return State{}, fmt.Errorf("applying genesis (%+v): %v", curs, err)
|
||||
}
|
||||
break
|
||||
}
|
||||
return State{}, fmt.Errorf("invalid genesis update: %+v", curs)
|
||||
}
|
||||
|
||||
// If we got here, the current state is dependent on the previous.
|
||||
// Keep iterating backwards till thats not the case.
|
||||
if curs, err = storage.AUM(parent); err != nil {
|
||||
return State{}, fmt.Errorf("reading parent: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// We now know some starting point state. Iterate forward till we
|
||||
// are at the AUM we want state for.
|
||||
_, state, err = fastForward(storage, maxIter, state, func(curs AUM, _ State) bool {
|
||||
return curs.Hash() == wantHash
|
||||
})
|
||||
// fastForward only terminates before the done condition if it
|
||||
// doesnt have any later AUMs to process. This cant be the case
|
||||
// as we've already iterated through them above so they must exist,
|
||||
// but we check anyway to be super duper sure.
|
||||
if err == nil && *state.LastAUMHash != wantHash {
|
||||
panic("unexpected fastForward outcome")
|
||||
}
|
||||
return state, err
|
||||
}
|
||||
|
||||
// computeActiveAncestor determines which ancestor AUM to use as the
|
||||
// ancestor of the valid chain.
|
||||
//
|
||||
// If all the chains end up having the same ancestor, then thats the
|
||||
// only possible ancestor, ezpz. However if there are multiple distinct
|
||||
// ancestors, that means there are distinct chains, and we need some
|
||||
// hint to choose what to use. For that, we rely on the chainsThroughActive
|
||||
// bit, which signals to us that that ancestor was part of the
|
||||
// chain in a previous run.
|
||||
func computeActiveAncestor(storage Chonk, chains []chain) (AUMHash, error) {
|
||||
// Dedupe possible ancestors, tracking if they were part of
|
||||
// the active chain on a previous run.
|
||||
ancestors := make(map[AUMHash]bool, len(chains))
|
||||
for _, c := range chains {
|
||||
ancestors[c.Oldest.Hash()] = c.chainsThroughActive
|
||||
}
|
||||
|
||||
if len(ancestors) == 1 {
|
||||
// There's only one. DOPE.
|
||||
for k, _ := range ancestors {
|
||||
return k, nil
|
||||
}
|
||||
}
|
||||
|
||||
// Theres more than one, so we need to use the ancestor that was
|
||||
// part of the active chain in a previous iteration.
|
||||
// Note that there can only be one distinct ancestor that was
|
||||
// formerly part of the active chain, because AUMs can only have
|
||||
// one parent and would have converged to a common ancestor.
|
||||
for k, chainsThroughActive := range ancestors {
|
||||
if chainsThroughActive {
|
||||
return k, nil
|
||||
}
|
||||
}
|
||||
|
||||
return AUMHash{}, errors.New("multiple distinct chains")
|
||||
}
|
||||
|
||||
// computeActiveChain bootstraps the runtime state of the Authority when
|
||||
// starting entirely off stored state.
|
||||
//
|
||||
// TODO(tom): Don't look at head states, just iterate forward from
|
||||
// the ancestor.
|
||||
//
|
||||
// The algorithm is as follows:
|
||||
// 1. Determine all possible 'head' (like in git) states.
|
||||
// 2. Filter these possible chains based on whether the ancestor was
|
||||
// formerly (in a previous run) part of the chain.
|
||||
// 3. Compute the state of the state machine at this ancestor. This is
|
||||
// needed for fast-forward, as each update operates on the state of
|
||||
// the update preceeding it.
|
||||
// 4. Iteratively apply updates till we reach head ('fast forward').
|
||||
func computeActiveChain(storage Chonk, lastKnownOldest *AUMHash, maxIter int) (chain, error) {
|
||||
chains, err := computeChainCandidates(storage, lastKnownOldest, maxIter)
|
||||
if err != nil {
|
||||
return chain{}, fmt.Errorf("computing candidates: %v", err)
|
||||
}
|
||||
|
||||
// Find the right ancestor.
|
||||
oldestHash, err := computeActiveAncestor(storage, chains)
|
||||
if err != nil {
|
||||
return chain{}, fmt.Errorf("computing ancestor: %v", err)
|
||||
}
|
||||
ancestor, err := storage.AUM(oldestHash)
|
||||
if err != nil {
|
||||
return chain{}, err
|
||||
}
|
||||
|
||||
// At this stage we know the ancestor AUM, so we have excluded distinct
|
||||
// chains but we might still have forks (so we don't know the head AUM).
|
||||
//
|
||||
// We iterate forward from the ancestor AUM, handling any forks as we go
|
||||
// till we arrive at a head.
|
||||
out := chain{Oldest: ancestor, Head: ancestor}
|
||||
if out.state, err = computeStateAt(storage, maxIter, oldestHash); err != nil {
|
||||
return chain{}, fmt.Errorf("bootstrapping state: %v", err)
|
||||
}
|
||||
out.Head, out.state, err = fastForward(storage, maxIter, out.state, nil)
|
||||
if err != nil {
|
||||
return chain{}, fmt.Errorf("fast forward: %v", err)
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// aumVerify verifies if an AUM is well-formed, correctly signed, and
|
||||
// can be accepted for storage.
|
||||
func aumVerify(aum AUM, state State, isGenesisAUM bool) error {
|
||||
if err := aum.StaticValidate(); err != nil {
|
||||
return fmt.Errorf("invalid: %v", err)
|
||||
}
|
||||
if !isGenesisAUM {
|
||||
if err := checkParent(aum, state); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if len(aum.Signatures) == 0 {
|
||||
return errors.New("unsigned AUM")
|
||||
}
|
||||
sigHash := aum.SigHash()
|
||||
for i, sig := range aum.Signatures {
|
||||
key, err := state.GetKey(sig.KeyID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("bad keyID on signature %d: %v", i, err)
|
||||
}
|
||||
if err := sig.Verify(sigHash, key); err != nil {
|
||||
return fmt.Errorf("signature %d: %v", i, err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func checkParent(aum AUM, state State) error {
|
||||
parent, hasParent := aum.Parent()
|
||||
if !hasParent {
|
||||
return errors.New("aum has no parent")
|
||||
}
|
||||
if state.LastAUMHash == nil {
|
||||
return errors.New("cannot check update parent hash against a state with no previous AUM")
|
||||
}
|
||||
if *state.LastAUMHash != parent {
|
||||
return fmt.Errorf("aum with parent %x cannot be applied to a state with parent %x", state.LastAUMHash, parent)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Head returns the AUM digest of the latest update applied to the state
|
||||
// machine.
|
||||
func (a *Authority) Head() AUMHash {
|
||||
return *a.state.LastAUMHash
|
||||
}
|
||||
|
||||
// Open initializes an existing TKA from the given tailchonk.
|
||||
//
|
||||
// Only use this if the current node has initialized an Authority before.
|
||||
// If a TKA exists on other nodes but theres nothing locally, use Bootstrap().
|
||||
// If no TKA exists anywhere and you are creating it for the first
|
||||
// time, use New().
|
||||
func Open(storage Chonk) (*Authority, error) {
|
||||
a, err := storage.LastActiveAncestor()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("reading last ancestor: %v", err)
|
||||
}
|
||||
|
||||
c, err := computeActiveChain(storage, a, 2000)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("active chain: %v", err)
|
||||
}
|
||||
|
||||
return &Authority{
|
||||
head: c.Head,
|
||||
oldestAncestor: c.Oldest,
|
||||
storage: storage,
|
||||
state: c.state,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Create initializes a brand-new TKA, generating a genesis update
|
||||
// and committing it to the given storage.
|
||||
//
|
||||
// The given signer must also be present in state as a trusted key.
|
||||
//
|
||||
// Do not use this to initialize a TKA that already exists, use Open()
|
||||
// or Bootstrap() instead.
|
||||
func Create(storage Chonk, state State, signer ed25519.PrivateKey) (*Authority, AUM, error) {
|
||||
// Generate & sign a checkpoint, our genesis update.
|
||||
genesis := AUM{
|
||||
MessageKind: AUMCheckpoint,
|
||||
State: &state,
|
||||
}
|
||||
if err := genesis.StaticValidate(); err != nil {
|
||||
// This serves as an easy way to validate the given state.
|
||||
return nil, AUM{}, fmt.Errorf("invalid state: %v", err)
|
||||
}
|
||||
genesis.sign25519(signer)
|
||||
|
||||
a, err := Bootstrap(storage, genesis)
|
||||
return a, genesis, err
|
||||
}
|
||||
|
||||
// Bootstrap initializes a TKA based on the given checkpoint.
|
||||
//
|
||||
// Call this when setting up a new nodes' TKA, but other nodes
|
||||
// with initialized TKA's exist.
|
||||
//
|
||||
// Pass the returned genesis AUM from Create(), or a later checkpoint AUM.
|
||||
//
|
||||
// TODO(tom): We should test an authority bootstrapped from a later checkpoint
|
||||
// works fine with sync and everything.
|
||||
func Bootstrap(storage Chonk, bootstrap AUM) (*Authority, error) {
|
||||
heads, err := storage.Heads()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("reading heads: %v", err)
|
||||
}
|
||||
if len(heads) != 0 {
|
||||
return nil, errors.New("tailchonk is not empty")
|
||||
}
|
||||
|
||||
// Check the AUM is well-formed.
|
||||
if bootstrap.MessageKind != AUMCheckpoint {
|
||||
return nil, fmt.Errorf("bootstrap AUMs must be checkpoint messages, got %v", bootstrap.MessageKind)
|
||||
}
|
||||
if bootstrap.State == nil {
|
||||
return nil, errors.New("bootstrap AUM is missing state")
|
||||
}
|
||||
if err := aumVerify(bootstrap, *bootstrap.State, true); err != nil {
|
||||
return nil, fmt.Errorf("invalid bootstrap: %v", err)
|
||||
}
|
||||
|
||||
// Everything looks good, write it to storage.
|
||||
if err := storage.CommitVerifiedAUMs([]AUM{bootstrap}); err != nil {
|
||||
return nil, fmt.Errorf("commit: %v", err)
|
||||
}
|
||||
if err := storage.SetLastActiveAncestor(bootstrap.Hash()); err != nil {
|
||||
return nil, fmt.Errorf("set ancestor: %v", err)
|
||||
}
|
||||
|
||||
return Open(storage)
|
||||
}
|
||||
|
||||
// Inform is called to tell the authority about new updates. Updates
|
||||
// should be ordered oldest to newest. An error is returned if any
|
||||
// of the updates could not be processed.
|
||||
func (a *Authority) Inform(updates []AUM) error {
|
||||
stateAt := make(map[AUMHash]State, len(updates)+1)
|
||||
toCommit := make([]AUM, 0, len(updates))
|
||||
|
||||
for i, update := range updates {
|
||||
hash := update.Hash()
|
||||
if _, err := a.storage.AUM(hash); err == nil {
|
||||
// Already have this AUM.
|
||||
continue
|
||||
}
|
||||
|
||||
parent, hasParent := update.Parent()
|
||||
if !hasParent {
|
||||
return fmt.Errorf("update %d: missing parent", i)
|
||||
}
|
||||
|
||||
state, hasState := stateAt[parent]
|
||||
var err error
|
||||
if !hasState {
|
||||
if state, err = computeStateAt(a.storage, 2000, parent); err != nil {
|
||||
return fmt.Errorf("update %d computing state: %v", i, err)
|
||||
}
|
||||
stateAt[parent] = state
|
||||
}
|
||||
|
||||
if err := aumVerify(update, state, false); err != nil {
|
||||
return fmt.Errorf("update %d invalid: %v", i, err)
|
||||
}
|
||||
if stateAt[hash], err = state.applyVerifiedAUM(update); err != nil {
|
||||
return fmt.Errorf("update %d cannot be applied: %v", i, err)
|
||||
}
|
||||
toCommit = append(toCommit, update)
|
||||
}
|
||||
|
||||
if err := a.storage.CommitVerifiedAUMs(toCommit); err != nil {
|
||||
return fmt.Errorf("commit: %v", err)
|
||||
}
|
||||
|
||||
// TODO(tom): Theres no need to recompute the state from scratch
|
||||
// in every case. We should detect when updates were
|
||||
// a linear, non-forking series applied to head, and
|
||||
// just use the last State we computed.
|
||||
oldestAncestor := a.oldestAncestor.Hash()
|
||||
c, err := computeActiveChain(a.storage, &oldestAncestor, 2000)
|
||||
if err != nil {
|
||||
return fmt.Errorf("recomputing active chain: %v", err)
|
||||
}
|
||||
a.head = c.Head
|
||||
a.oldestAncestor = c.Oldest
|
||||
a.state = c.state
|
||||
return nil
|
||||
}
|
||||
|
||||
372
tka/tka_test.go
Normal file
372
tka/tka_test.go
Normal file
@@ -0,0 +1,372 @@
|
||||
// Copyright (c) 2022 Tailscale Inc & AUTHORS All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package tka
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"testing"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
)
|
||||
|
||||
func TestComputeChainCandidates(t *testing.T) {
|
||||
c := newTestchain(t, `
|
||||
G1 -> I1 -> I2 -> I3 -> L2
|
||||
| -> L1 | -> L3
|
||||
|
||||
G2 -> L4
|
||||
|
||||
// We tweak these AUMs so they are different hashes.
|
||||
G2.hashSeed = 2
|
||||
L1.hashSeed = 2
|
||||
L3.hashSeed = 2
|
||||
L4.hashSeed = 3
|
||||
`)
|
||||
// Should result in 4 chains:
|
||||
// G1->L1, G1->L2, G1->L3, G2->L4
|
||||
|
||||
i1H := c.AUMHashes["I1"]
|
||||
got, err := computeChainCandidates(c.Chonk(), &i1H, 50)
|
||||
if err != nil {
|
||||
t.Fatalf("computeChainCandidates() failed: %v", err)
|
||||
}
|
||||
|
||||
want := []chain{
|
||||
{Oldest: c.AUMs["G1"], Head: c.AUMs["L1"], chainsThroughActive: true},
|
||||
{Oldest: c.AUMs["G1"], Head: c.AUMs["L3"], chainsThroughActive: true},
|
||||
{Oldest: c.AUMs["G1"], Head: c.AUMs["L2"], chainsThroughActive: true},
|
||||
{Oldest: c.AUMs["G2"], Head: c.AUMs["L4"]},
|
||||
}
|
||||
if diff := cmp.Diff(want, got, cmp.AllowUnexported(chain{})); diff != "" {
|
||||
t.Errorf("chains differ (-want, +got):\n%s", diff)
|
||||
}
|
||||
}
|
||||
|
||||
func TestForkResolutionHash(t *testing.T) {
|
||||
c := newTestchain(t, `
|
||||
G1 -> L1
|
||||
| -> L2
|
||||
|
||||
// tweak hashes so L1 & L2 are not identical
|
||||
L1.hashSeed = 2
|
||||
L2.hashSeed = 3
|
||||
`)
|
||||
|
||||
got, err := computeActiveChain(c.Chonk(), nil, 50)
|
||||
if err != nil {
|
||||
t.Fatalf("computeActiveChain() failed: %v", err)
|
||||
}
|
||||
|
||||
// The fork with the lowest AUM hash should have been chosen.
|
||||
l1H := c.AUMHashes["L1"]
|
||||
l2H := c.AUMHashes["L2"]
|
||||
want := l1H
|
||||
if bytes.Compare(l2H[:], l1H[:]) < 0 {
|
||||
want = l2H
|
||||
}
|
||||
|
||||
if got := got.Head.Hash(); got != want {
|
||||
t.Errorf("head was %x, want %x", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestForkResolutionSigWeight(t *testing.T) {
|
||||
pub, priv := testingKey25519(t, 1)
|
||||
key := Key{Kind: Key25519, Public: pub, Votes: 2}
|
||||
|
||||
c := newTestchain(t, `
|
||||
G1 -> L1
|
||||
| -> L2
|
||||
|
||||
G1.template = addKey
|
||||
L1.hashSeed = 2
|
||||
L2.signedWith = key
|
||||
`,
|
||||
optTemplate("addKey", AUM{MessageKind: AUMAddKey, Key: &key}),
|
||||
optKey("key", key, priv))
|
||||
|
||||
l1H := c.AUMHashes["L1"]
|
||||
l2H := c.AUMHashes["L2"]
|
||||
if bytes.Compare(l2H[:], l1H[:]) < 0 {
|
||||
t.Fatal("failed assert: h(l1) > h(l2)\nTweak hashSeed till this passes")
|
||||
}
|
||||
|
||||
got, err := computeActiveChain(c.Chonk(), nil, 50)
|
||||
if err != nil {
|
||||
t.Fatalf("computeActiveChain() failed: %v", err)
|
||||
}
|
||||
|
||||
// Based on the hash, l1H should be chosen.
|
||||
// But based on the signature weight (which has higher
|
||||
// precedence), it should be l2H
|
||||
want := l2H
|
||||
if got := got.Head.Hash(); got != want {
|
||||
t.Errorf("head was %x, want %x", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestForkResolutionMessageType(t *testing.T) {
|
||||
pub, _ := testingKey25519(t, 1)
|
||||
key := Key{Kind: Key25519, Public: pub, Votes: 2}
|
||||
|
||||
c := newTestchain(t, `
|
||||
G1 -> L1
|
||||
| -> L2
|
||||
| -> L3
|
||||
|
||||
G1.template = addKey
|
||||
L1.hashSeed = 11
|
||||
L2.template = removeKey
|
||||
L3.hashSeed = 18
|
||||
`,
|
||||
optTemplate("addKey", AUM{MessageKind: AUMAddKey, Key: &key}),
|
||||
optTemplate("removeKey", AUM{MessageKind: AUMRemoveKey, KeyID: key.ID()}))
|
||||
|
||||
l1H := c.AUMHashes["L1"]
|
||||
l2H := c.AUMHashes["L2"]
|
||||
l3H := c.AUMHashes["L3"]
|
||||
if bytes.Compare(l2H[:], l1H[:]) < 0 {
|
||||
t.Fatal("failed assert: h(l1) > h(l2)\nTweak hashSeed till this passes")
|
||||
}
|
||||
if bytes.Compare(l2H[:], l3H[:]) < 0 {
|
||||
t.Fatal("failed assert: h(l3) > h(l2)\nTweak hashSeed till this passes")
|
||||
}
|
||||
|
||||
got, err := computeActiveChain(c.Chonk(), nil, 50)
|
||||
if err != nil {
|
||||
t.Fatalf("computeActiveChain() failed: %v", err)
|
||||
}
|
||||
|
||||
// Based on the hash, L1 or L3 should be chosen.
|
||||
// But based on the preference for AUMRemoveKey messages,
|
||||
// it should be L2.
|
||||
want := l2H
|
||||
if got := got.Head.Hash(); got != want {
|
||||
t.Errorf("head was %x, want %x", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestComputeStateAt(t *testing.T) {
|
||||
pub, _ := testingKey25519(t, 1)
|
||||
key := Key{Kind: Key25519, Public: pub, Votes: 2}
|
||||
|
||||
c := newTestchain(t, `
|
||||
G1 -> I1 -> I2
|
||||
I1.template = addKey
|
||||
`,
|
||||
optTemplate("addKey", AUM{MessageKind: AUMAddKey, Key: &key}))
|
||||
|
||||
// G1 is before the key, so there shouldn't be a key there.
|
||||
state, err := computeStateAt(c.Chonk(), 500, c.AUMHashes["G1"])
|
||||
if err != nil {
|
||||
t.Fatalf("computeStateAt(G1) failed: %v", err)
|
||||
}
|
||||
if _, err := state.GetKey(key.ID()); err != ErrNoSuchKey {
|
||||
t.Errorf("expected key to be missing: err = %v", err)
|
||||
}
|
||||
if *state.LastAUMHash != c.AUMHashes["G1"] {
|
||||
t.Errorf("LastAUMHash = %x, want %x", *state.LastAUMHash, c.AUMHashes["G1"])
|
||||
}
|
||||
|
||||
// I1 & I2 are after the key, so the computed state should contain
|
||||
// the key.
|
||||
for _, wantHash := range []AUMHash{c.AUMHashes["I1"], c.AUMHashes["I2"]} {
|
||||
state, err = computeStateAt(c.Chonk(), 500, wantHash)
|
||||
if err != nil {
|
||||
t.Fatalf("computeStateAt(%X) failed: %v", wantHash, err)
|
||||
}
|
||||
if *state.LastAUMHash != wantHash {
|
||||
t.Errorf("LastAUMHash = %x, want %x", *state.LastAUMHash, wantHash)
|
||||
}
|
||||
if _, err := state.GetKey(key.ID()); err != nil {
|
||||
t.Errorf("expected key to be present at state: err = %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// fakeAUM generates an AUM structure based on the template.
|
||||
// If parent is provided, PrevAUMHash is set to that value.
|
||||
//
|
||||
// If template is an AUM, the returned AUM is based on that.
|
||||
// If template is an int, a NOOP AUM is returned, and the
|
||||
// provided int can be used to tweak the resulting hash (needed
|
||||
// for tests you want one AUM to be 'lower' than another, so that
|
||||
// that chain is taken based on fork resolution rules).
|
||||
func fakeAUM(t *testing.T, template interface{}, parent *AUMHash) (AUM, AUMHash) {
|
||||
if seed, ok := template.(int); ok {
|
||||
a := AUM{MessageKind: AUMNoOp, KeyID: []byte{byte(seed)}}
|
||||
if parent != nil {
|
||||
a.PrevAUMHash = (*parent)[:]
|
||||
}
|
||||
h := a.Hash()
|
||||
return a, h
|
||||
}
|
||||
|
||||
if a, ok := template.(AUM); ok {
|
||||
if parent != nil {
|
||||
a.PrevAUMHash = (*parent)[:]
|
||||
}
|
||||
h := a.Hash()
|
||||
return a, h
|
||||
}
|
||||
|
||||
panic("template must be an int or an AUM")
|
||||
}
|
||||
|
||||
func TestOpenAuthority(t *testing.T) {
|
||||
pub, _ := testingKey25519(t, 1)
|
||||
key := Key{Kind: Key25519, Public: pub, Votes: 2}
|
||||
|
||||
// /- L1
|
||||
// G1 - I1 - I2 - I3 -L2
|
||||
// \-L3
|
||||
// G2 - L4
|
||||
//
|
||||
// We set the previous-known ancestor to G1, so the
|
||||
// ancestor to start from should be G1.
|
||||
g1, g1H := fakeAUM(t, AUM{MessageKind: AUMAddKey, Key: &key}, nil)
|
||||
i1, i1H := fakeAUM(t, 2, &g1H) // AUM{MessageKind: AUMAddKey, Key: &key2}
|
||||
l1, l1H := fakeAUM(t, 13, &i1H)
|
||||
|
||||
i2, i2H := fakeAUM(t, 2, &i1H)
|
||||
i3, i3H := fakeAUM(t, 5, &i2H)
|
||||
l2, l2H := fakeAUM(t, AUM{MessageKind: AUMNoOp, KeyID: []byte{7}, Signatures: []Signature{{KeyID: key.ID()}}}, &i3H)
|
||||
l3, l3H := fakeAUM(t, 4, &i3H)
|
||||
|
||||
g2, g2H := fakeAUM(t, 8, nil)
|
||||
l4, _ := fakeAUM(t, 9, &g2H)
|
||||
|
||||
// We make sure that I2 has a lower hash than L1, so
|
||||
// it should take that path rather than L1.
|
||||
if bytes.Compare(l1H[:], i2H[:]) < 0 {
|
||||
t.Fatal("failed assert: h(i2) > h(l1)\nTweak parameters to fakeAUM till this passes")
|
||||
}
|
||||
// We make sure L2 has a signature with key, so it should
|
||||
// take that path over L3. We assert that the L3 hash
|
||||
// is less than L2 so the test will fail if the signature
|
||||
// preference logic is broken.
|
||||
if bytes.Compare(l2H[:], l3H[:]) < 0 {
|
||||
t.Fatal("failed assert: h(l3) > h(l2)\nTweak parameters to fakeAUM till this passes")
|
||||
}
|
||||
|
||||
// Construct the state of durable storage.
|
||||
chonk := &Mem{}
|
||||
err := chonk.CommitVerifiedAUMs([]AUM{g1, i1, l1, i2, i3, l2, l3, g2, l4})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
chonk.SetLastActiveAncestor(i1H)
|
||||
|
||||
a, err := Open(chonk)
|
||||
if err != nil {
|
||||
t.Fatalf("New() failed: %v", err)
|
||||
}
|
||||
// Should include the key added in G1
|
||||
if _, err := a.state.GetKey(key.ID()); err != nil {
|
||||
t.Errorf("missing G1 key: %v", err)
|
||||
}
|
||||
// The head of the chain should be L2.
|
||||
if a.Head() != l2H {
|
||||
t.Errorf("head was %x, want %x", a.state.LastAUMHash, l2H)
|
||||
}
|
||||
}
|
||||
|
||||
func TestOpenAuthority_EmptyErrors(t *testing.T) {
|
||||
_, err := Open(&Mem{})
|
||||
if err == nil {
|
||||
t.Error("Expected an error initializing an empty authority, got nil")
|
||||
}
|
||||
}
|
||||
|
||||
func TestAuthorityHead(t *testing.T) {
|
||||
c := newTestchain(t, `
|
||||
G1 -> L1
|
||||
| -> L2
|
||||
|
||||
L1.hashSeed = 2
|
||||
`)
|
||||
|
||||
a, _ := Open(c.Chonk())
|
||||
if got, want := a.head.Hash(), a.Head(); got != want {
|
||||
t.Errorf("Hash() returned %x, want %x", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCreateBootstrapAuthority(t *testing.T) {
|
||||
pub, priv := testingKey25519(t, 1)
|
||||
key := Key{Kind: Key25519, Public: pub, Votes: 2}
|
||||
|
||||
a1, genesisAUM, err := Create(&Mem{}, State{
|
||||
Keys: []Key{key},
|
||||
DisablementSecrets: [][]byte{disablementKDF([]byte{1, 2, 3})},
|
||||
}, priv)
|
||||
if err != nil {
|
||||
t.Fatalf("Create() failed: %v", err)
|
||||
}
|
||||
|
||||
a2, err := Bootstrap(&Mem{}, genesisAUM)
|
||||
if err != nil {
|
||||
t.Fatalf("Bootstrap() failed: %v", err)
|
||||
}
|
||||
|
||||
if a1.Head() != a2.Head() {
|
||||
t.Fatal("created and bootstrapped authority differ")
|
||||
}
|
||||
|
||||
// Both authorities should trust the key laid down in the genesis state.
|
||||
if _, err := a1.state.GetKey(key.ID()); err != nil {
|
||||
t.Errorf("reading genesis key from a1: %v", err)
|
||||
}
|
||||
if _, err := a2.state.GetKey(key.ID()); err != nil {
|
||||
t.Errorf("reading genesis key from a2: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAuthorityInform(t *testing.T) {
|
||||
pub, priv := testingKey25519(t, 1)
|
||||
key := Key{Kind: Key25519, Public: pub, Votes: 2}
|
||||
|
||||
c := newTestchain(t, `
|
||||
G1 -> L1
|
||||
| -> L2 -> L3
|
||||
| -> L4 -> L5
|
||||
|
||||
G1.template = genesis
|
||||
L2.hashSeed = 1
|
||||
L4.hashSeed = 2
|
||||
`,
|
||||
optTemplate("genesis", AUM{MessageKind: AUMCheckpoint, State: &State{
|
||||
Keys: []Key{key},
|
||||
DisablementSecrets: [][]byte{disablementKDF([]byte{1, 2, 3})},
|
||||
}}),
|
||||
optKey("key", key, priv),
|
||||
optSignAllUsing("key"))
|
||||
|
||||
storage := &Mem{}
|
||||
a, err := Bootstrap(storage, c.AUMs["G1"])
|
||||
if err != nil {
|
||||
t.Fatalf("Bootstrap() failed: %v", err)
|
||||
}
|
||||
|
||||
informAUMs := []AUM{c.AUMs["L1"], c.AUMs["L2"], c.AUMs["L3"], c.AUMs["L4"], c.AUMs["L5"]}
|
||||
|
||||
if err := a.Inform(informAUMs); err != nil {
|
||||
t.Fatalf("Inform() failed: %v", err)
|
||||
}
|
||||
for i, update := range informAUMs {
|
||||
stored, err := storage.AUM(update.Hash())
|
||||
if err != nil {
|
||||
t.Errorf("reading stored update %d: %v", i, err)
|
||||
continue
|
||||
}
|
||||
if diff := cmp.Diff(update, stored); diff != "" {
|
||||
t.Errorf("update %d differs (-want, +got):\n%s", i, diff)
|
||||
}
|
||||
}
|
||||
|
||||
if a.Head() != c.AUMHashes["L3"] {
|
||||
t.Fatal("authority did not converge to correct AUM")
|
||||
}
|
||||
}
|
||||
@@ -14,9 +14,9 @@ import (
|
||||
// tests netstack's AlignedAtomicInt64.
|
||||
func TestAlignedAtomicInt64(t *testing.T) {
|
||||
type T struct {
|
||||
A atomicbitops.AlignedAtomicInt64
|
||||
A atomicbitops.Int64
|
||||
x int32
|
||||
B atomicbitops.AlignedAtomicInt64
|
||||
B atomicbitops.Int64
|
||||
}
|
||||
|
||||
t.Logf("I am %v/%v\n", runtime.GOOS, runtime.GOARCH)
|
||||
|
||||
@@ -4,8 +4,8 @@
|
||||
|
||||
// Package jsdeps is a just a list of the packages we import in the
|
||||
// JavaScript/WASM build, to let us test that our transitive closure of
|
||||
// dependencies on iOS doesn't accidentally grow too large, since binary size
|
||||
// is more of a concern there.
|
||||
// dependencies doesn't accidentally grow too large, since binary size
|
||||
// is more of a concern.
|
||||
package jsdeps
|
||||
|
||||
import (
|
||||
|
||||
@@ -56,6 +56,11 @@ func (k ControlPrivate) SealTo(p MachinePublic, cleartext []byte) (ciphertext []
|
||||
return k.mkey.SealTo(p, cleartext)
|
||||
}
|
||||
|
||||
// SharedKey returns the precomputed Nacl box shared key between k and p.
|
||||
func (k ControlPrivate) SharedKey(p MachinePublic) MachinePrecomputedSharedKey {
|
||||
return k.mkey.SharedKey(p)
|
||||
}
|
||||
|
||||
// OpenFrom opens the NaCl box ciphertext, which must be a value
|
||||
// created by SealTo, and returns the inner cleartext if ciphertext is
|
||||
// a valid box from p to k.
|
||||
|
||||
@@ -105,6 +105,33 @@ func (k MachinePrivate) SealTo(p MachinePublic, cleartext []byte) (ciphertext []
|
||||
return box.Seal(nonce[:], cleartext, &nonce, &p.k, &k.k)
|
||||
}
|
||||
|
||||
// SharedKey returns the precomputed Nacl box shared key between k and p.
|
||||
func (k MachinePrivate) SharedKey(p MachinePublic) MachinePrecomputedSharedKey {
|
||||
var shared MachinePrecomputedSharedKey
|
||||
box.Precompute(&shared.k, &p.k, &k.k)
|
||||
return shared
|
||||
}
|
||||
|
||||
// MachinePrecomputedSharedKey is a precomputed shared NaCl box shared key.
|
||||
type MachinePrecomputedSharedKey struct {
|
||||
k [32]byte
|
||||
}
|
||||
|
||||
// Seal wraps cleartext into a NaCl box (see
|
||||
// golang.org/x/crypto/nacl) using the shared key k as generated
|
||||
// by MachinePrivate.SharedKey.
|
||||
//
|
||||
// The returned ciphertext is a 24-byte nonce concatenated with the
|
||||
// box value.
|
||||
func (k MachinePrecomputedSharedKey) Seal(cleartext []byte) (ciphertext []byte) {
|
||||
if k == (MachinePrecomputedSharedKey{}) {
|
||||
panic("can't seal with zero keys")
|
||||
}
|
||||
var nonce [24]byte
|
||||
rand(nonce[:])
|
||||
return box.SealAfterPrecomputation(nonce[:], cleartext, &nonce, &k.k)
|
||||
}
|
||||
|
||||
// OpenFrom opens the NaCl box ciphertext, which must be a value
|
||||
// created by SealTo, and returns the inner cleartext if ciphertext is
|
||||
// a valid box from p to k.
|
||||
|
||||
@@ -90,3 +90,23 @@ func TestMachineSerialization(t *testing.T) {
|
||||
t.Error("json serialization doesn't roundtrip")
|
||||
}
|
||||
}
|
||||
|
||||
func TestSealViaSharedKey(t *testing.T) {
|
||||
// encrypt a message from a to b
|
||||
a := NewMachine()
|
||||
b := NewMachine()
|
||||
apub, bpub := a.Public(), b.Public()
|
||||
|
||||
shared := a.SharedKey(bpub)
|
||||
|
||||
const clear = "the eagle flies at midnight"
|
||||
enc := shared.Seal([]byte(clear))
|
||||
|
||||
back, ok := b.OpenFrom(apub, enc)
|
||||
if !ok {
|
||||
t.Fatal("failed to decrypt")
|
||||
}
|
||||
if string(back) != clear {
|
||||
t.Errorf("got %q; want cleartext %q", back, clear)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -132,6 +132,43 @@ func (v Slice[T]) AsSlice() []T {
|
||||
return v.AppendTo(v.ж[:0:0])
|
||||
}
|
||||
|
||||
// IndexFunc returns the first index of an element in v satisfying f(e),
|
||||
// or -1 if none do.
|
||||
//
|
||||
// As it runs in O(n) time, use with care.
|
||||
func (v Slice[T]) IndexFunc(f func(T) bool) int {
|
||||
for i := 0; i < v.Len(); i++ {
|
||||
if f(v.At(i)) {
|
||||
return i
|
||||
}
|
||||
}
|
||||
return -1
|
||||
}
|
||||
|
||||
// ContainsFunc reports whether any element in v satisfies f(e).
|
||||
//
|
||||
// As it runs in O(n) time, use with care.
|
||||
func (v Slice[T]) ContainsFunc(f func(T) bool) bool {
|
||||
for i := 0; i < v.Len(); i++ {
|
||||
if f(v.At(i)) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// SliceContains reports whether v contains element e.
|
||||
//
|
||||
// As it runs in O(n) time, use with care.
|
||||
func SliceContains[T comparable](v Slice[T], e T) bool {
|
||||
for i := 0; i < v.Len(); i++ {
|
||||
if v.At(i) == e {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// IPPrefixSlice is a read-only accessor for a slice of netaddr.IPPrefix.
|
||||
type IPPrefixSlice struct {
|
||||
ж Slice[netaddr.IPPrefix]
|
||||
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
qt "github.com/frankban/quicktest"
|
||||
"inet.af/netaddr"
|
||||
)
|
||||
|
||||
@@ -73,3 +74,15 @@ func TestViewsJSON(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestViewUtils(t *testing.T) {
|
||||
v := SliceOf([]string{"foo", "bar"})
|
||||
c := qt.New(t)
|
||||
|
||||
c.Check(v.ContainsFunc(func(s string) bool { return strings.HasPrefix(s, "f") }), qt.Equals, true)
|
||||
c.Check(v.ContainsFunc(func(s string) bool { return strings.HasPrefix(s, "g") }), qt.Equals, false)
|
||||
c.Check(v.IndexFunc(func(s string) bool { return strings.HasPrefix(s, "b") }), qt.Equals, 1)
|
||||
c.Check(v.IndexFunc(func(s string) bool { return strings.HasPrefix(s, "z") }), qt.Equals, -1)
|
||||
c.Check(SliceContains(v, "bar"), qt.Equals, true)
|
||||
c.Check(SliceContains(v, "baz"), qt.Equals, false)
|
||||
}
|
||||
|
||||
@@ -26,6 +26,7 @@ import (
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"hash"
|
||||
"log"
|
||||
"math"
|
||||
"reflect"
|
||||
"sync"
|
||||
@@ -142,6 +143,35 @@ func Hash(v any) (s Sum) {
|
||||
return h.sum()
|
||||
}
|
||||
|
||||
// HasherForType is like Hash, but it returns a Hash func that's specialized for
|
||||
// the provided reflect type, avoiding a map lookup per value.
|
||||
func HasherForType[T any]() func(T) Sum {
|
||||
var zeroT T
|
||||
ti := getTypeInfo(reflect.TypeOf(zeroT))
|
||||
seedOnce.Do(initSeed)
|
||||
|
||||
return func(v T) Sum {
|
||||
h := hasherPool.Get().(*hasher)
|
||||
defer hasherPool.Put(h)
|
||||
h.reset()
|
||||
h.hashUint64(seed)
|
||||
|
||||
rv := reflect.ValueOf(v)
|
||||
|
||||
if rv.IsValid() {
|
||||
// Always treat the Hash input as an interface (it is), including hashing
|
||||
// its type, otherwise two Hash calls of different types could hash to the
|
||||
// same bytes off the different types and get equivalent Sum values. This is
|
||||
// the same thing that we do for reflect.Kind Interface in hashValue, but
|
||||
// the initial reflect.ValueOf from an interface value effectively strips
|
||||
// the interface box off so we have to do it at the top level by hand.
|
||||
h.hashType(rv.Type())
|
||||
h.hashValueWithType(rv, ti, false)
|
||||
}
|
||||
return h.sum()
|
||||
}
|
||||
}
|
||||
|
||||
// Update sets last to the hash of v and reports whether its value changed.
|
||||
func Update(last *Sum, v ...any) (changed bool) {
|
||||
sum := Hash(v)
|
||||
@@ -170,14 +200,26 @@ func (h *hasher) hashUint32(i uint32) {
|
||||
binary.LittleEndian.PutUint32(h.scratch[:4], i)
|
||||
h.bw.Write(h.scratch[:4])
|
||||
}
|
||||
func (h *hasher) hashLen(n int) {
|
||||
binary.LittleEndian.PutUint64(h.scratch[:8], uint64(n))
|
||||
h.bw.Write(h.scratch[:8])
|
||||
}
|
||||
func (h *hasher) hashUint64(i uint64) {
|
||||
binary.LittleEndian.PutUint64(h.scratch[:8], i)
|
||||
h.bw.Write(h.scratch[:8])
|
||||
}
|
||||
|
||||
var uint8Type = reflect.TypeOf(byte(0))
|
||||
var (
|
||||
uint8Type = reflect.TypeOf(byte(0))
|
||||
timeTimeType = reflect.TypeOf(time.Time{})
|
||||
)
|
||||
|
||||
// typeInfo describes properties of a type.
|
||||
//
|
||||
// A non-nil typeInfo is populated into the typeHasher map
|
||||
// when its type is first requested, before its func is created.
|
||||
// Its func field fn is only populated once the type has been created.
|
||||
// This is used for recursive types.
|
||||
type typeInfo struct {
|
||||
rtype reflect.Type
|
||||
canMemHash bool
|
||||
@@ -190,11 +232,394 @@ type typeInfo struct {
|
||||
// keyTypeInfo is the map key type's typeInfo.
|
||||
// It's set when rtype is of Kind Map.
|
||||
keyTypeInfo *typeInfo
|
||||
|
||||
hashFuncOnce sync.Once
|
||||
hashFuncLazy typeHasherFunc // nil until created
|
||||
}
|
||||
|
||||
// returns ok if it was handled; else slow path runs
|
||||
type typeHasherFunc func(h *hasher, v reflect.Value) (ok bool)
|
||||
|
||||
var typeInfoMap sync.Map // map[reflect.Type]*typeInfo
|
||||
var typeInfoMapPopulate sync.Mutex // just for adding to typeInfoMap
|
||||
|
||||
func (ti *typeInfo) hasher() typeHasherFunc {
|
||||
ti.hashFuncOnce.Do(ti.buildHashFuncOnce)
|
||||
return ti.hashFuncLazy
|
||||
}
|
||||
|
||||
func (ti *typeInfo) buildHashFuncOnce() {
|
||||
ti.hashFuncLazy = genTypeHasher(ti.rtype)
|
||||
}
|
||||
|
||||
func (h *hasher) hashBoolv(v reflect.Value) bool {
|
||||
var b byte
|
||||
if v.Bool() {
|
||||
b = 1
|
||||
}
|
||||
h.hashUint8(b)
|
||||
return true
|
||||
}
|
||||
|
||||
func (h *hasher) hashUint8v(v reflect.Value) bool {
|
||||
h.hashUint8(uint8(v.Uint()))
|
||||
return true
|
||||
}
|
||||
|
||||
func (h *hasher) hashInt8v(v reflect.Value) bool {
|
||||
h.hashUint8(uint8(v.Int()))
|
||||
return true
|
||||
}
|
||||
|
||||
func (h *hasher) hashUint16v(v reflect.Value) bool {
|
||||
h.hashUint16(uint16(v.Uint()))
|
||||
return true
|
||||
}
|
||||
|
||||
func (h *hasher) hashInt16v(v reflect.Value) bool {
|
||||
h.hashUint16(uint16(v.Int()))
|
||||
return true
|
||||
}
|
||||
|
||||
func (h *hasher) hashUint32v(v reflect.Value) bool {
|
||||
h.hashUint32(uint32(v.Uint()))
|
||||
return true
|
||||
}
|
||||
|
||||
func (h *hasher) hashInt32v(v reflect.Value) bool {
|
||||
h.hashUint32(uint32(v.Int()))
|
||||
return true
|
||||
}
|
||||
|
||||
func (h *hasher) hashUint64v(v reflect.Value) bool {
|
||||
h.hashUint64(v.Uint())
|
||||
return true
|
||||
}
|
||||
|
||||
func (h *hasher) hashInt64v(v reflect.Value) bool {
|
||||
h.hashUint64(uint64(v.Int()))
|
||||
return true
|
||||
}
|
||||
|
||||
func hashStructAppenderTo(h *hasher, v reflect.Value) bool {
|
||||
if !v.CanInterface() {
|
||||
return false // slow path
|
||||
}
|
||||
var a appenderTo
|
||||
if v.CanAddr() {
|
||||
a = v.Addr().Interface().(appenderTo)
|
||||
} else {
|
||||
a = v.Interface().(appenderTo)
|
||||
}
|
||||
size := h.scratch[:8]
|
||||
record := a.AppendTo(size)
|
||||
binary.LittleEndian.PutUint64(record, uint64(len(record)-len(size)))
|
||||
h.bw.Write(record)
|
||||
return true
|
||||
}
|
||||
|
||||
// hashPointerAppenderTo hashes v, a reflect.Ptr, that implements appenderTo.
|
||||
func hashPointerAppenderTo(h *hasher, v reflect.Value) bool {
|
||||
if !v.CanInterface() {
|
||||
return false // slow path
|
||||
}
|
||||
if v.IsNil() {
|
||||
h.hashUint8(0) // indicates nil
|
||||
return true
|
||||
}
|
||||
h.hashUint8(1) // indicates visiting a pointer
|
||||
a := v.Interface().(appenderTo)
|
||||
size := h.scratch[:8]
|
||||
record := a.AppendTo(size)
|
||||
binary.LittleEndian.PutUint64(record, uint64(len(record)-len(size)))
|
||||
h.bw.Write(record)
|
||||
return true
|
||||
}
|
||||
|
||||
// fieldInfo describes a struct field.
|
||||
type fieldInfo struct {
|
||||
index int // index of field for reflect.Value.Field(n)
|
||||
typeInfo *typeInfo
|
||||
canMemHash bool
|
||||
offset uintptr // when we can memhash the field
|
||||
size uintptr // when we can memhash the field
|
||||
}
|
||||
|
||||
// mergeContiguousFieldsCopy returns a copy of f with contiguous memhashable fields
|
||||
// merged together. Such fields get a bogus index and fu value.
|
||||
func mergeContiguousFieldsCopy(in []fieldInfo) []fieldInfo {
|
||||
ret := make([]fieldInfo, 0, len(in))
|
||||
var last *fieldInfo
|
||||
for _, f := range in {
|
||||
// Combine two fields if they're both contiguous & memhash-able.
|
||||
if f.canMemHash && last != nil && last.canMemHash && last.offset+last.size == f.offset {
|
||||
last.size += f.size
|
||||
last.index = -1
|
||||
last.typeInfo = nil
|
||||
} else {
|
||||
ret = append(ret, f)
|
||||
last = &ret[len(ret)-1]
|
||||
}
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
// genHashStructFields generates a typeHasherFunc for t, which must be of kind Struct.
|
||||
func genHashStructFields(t reflect.Type) typeHasherFunc {
|
||||
fields := make([]fieldInfo, 0, t.NumField())
|
||||
for i, n := 0, t.NumField(); i < n; i++ {
|
||||
sf := t.Field(i)
|
||||
if sf.Type.Size() == 0 {
|
||||
continue
|
||||
}
|
||||
fields = append(fields, fieldInfo{
|
||||
index: i,
|
||||
typeInfo: getTypeInfo(sf.Type),
|
||||
canMemHash: canMemHash(sf.Type),
|
||||
offset: sf.Offset,
|
||||
size: sf.Type.Size(),
|
||||
})
|
||||
}
|
||||
fieldsIfCanAddr := mergeContiguousFieldsCopy(fields)
|
||||
return structHasher{fields, fieldsIfCanAddr}.hash
|
||||
}
|
||||
|
||||
type structHasher struct {
|
||||
fields, fieldsIfCanAddr []fieldInfo
|
||||
}
|
||||
|
||||
func (sh structHasher) hash(h *hasher, v reflect.Value) bool {
|
||||
var base unsafe.Pointer
|
||||
if v.CanAddr() {
|
||||
base = v.Addr().UnsafePointer()
|
||||
for _, f := range sh.fieldsIfCanAddr {
|
||||
if f.canMemHash {
|
||||
h.bw.Write(unsafe.Slice((*byte)(unsafe.Pointer(uintptr(base)+f.offset)), f.size))
|
||||
} else if !f.typeInfo.hasher()(h, v.Field(f.index)) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
} else {
|
||||
for _, f := range sh.fields {
|
||||
if !f.typeInfo.hasher()(h, v.Field(f.index)) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// genHashPtrToMemoryRange returns a hasher where the reflect.Value is a Ptr to
|
||||
// the provided eleType.
|
||||
func genHashPtrToMemoryRange(eleType reflect.Type) typeHasherFunc {
|
||||
size := eleType.Size()
|
||||
return func(h *hasher, v reflect.Value) bool {
|
||||
if v.IsNil() {
|
||||
h.hashUint8(0) // indicates nil
|
||||
} else {
|
||||
h.hashUint8(1) // indicates visiting a pointer
|
||||
h.bw.Write(unsafe.Slice((*byte)(v.UnsafePointer()), size))
|
||||
}
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
const debug = false
|
||||
|
||||
func genTypeHasher(t reflect.Type) typeHasherFunc {
|
||||
if debug {
|
||||
log.Printf("generating func for %v", t)
|
||||
}
|
||||
|
||||
switch t.Kind() {
|
||||
case reflect.Bool:
|
||||
return (*hasher).hashBoolv
|
||||
case reflect.Int8:
|
||||
return (*hasher).hashInt8v
|
||||
case reflect.Int16:
|
||||
return (*hasher).hashInt16v
|
||||
case reflect.Int32:
|
||||
return (*hasher).hashInt32v
|
||||
case reflect.Int, reflect.Int64:
|
||||
return (*hasher).hashInt64v
|
||||
case reflect.Uint8:
|
||||
return (*hasher).hashUint8v
|
||||
case reflect.Uint16:
|
||||
return (*hasher).hashUint16v
|
||||
case reflect.Uint32:
|
||||
return (*hasher).hashUint32v
|
||||
case reflect.Uint, reflect.Uintptr, reflect.Uint64:
|
||||
return (*hasher).hashUint64v
|
||||
case reflect.Float32:
|
||||
return (*hasher).hashFloat32v
|
||||
case reflect.Float64:
|
||||
return (*hasher).hashFloat64v
|
||||
case reflect.Complex64:
|
||||
return (*hasher).hashComplex64v
|
||||
case reflect.Complex128:
|
||||
return (*hasher).hashComplex128v
|
||||
case reflect.String:
|
||||
return (*hasher).hashString
|
||||
case reflect.Slice:
|
||||
et := t.Elem()
|
||||
if canMemHash(et) {
|
||||
return (*hasher).hashSliceMem
|
||||
}
|
||||
eti := getTypeInfo(et)
|
||||
return genHashSliceElements(eti)
|
||||
case reflect.Array:
|
||||
et := t.Elem()
|
||||
eti := getTypeInfo(et)
|
||||
return genHashArray(t, eti)
|
||||
case reflect.Struct:
|
||||
if t == timeTimeType {
|
||||
return (*hasher).hashTimev
|
||||
}
|
||||
if t.Implements(appenderToType) {
|
||||
return hashStructAppenderTo
|
||||
}
|
||||
return genHashStructFields(t)
|
||||
case reflect.Pointer:
|
||||
et := t.Elem()
|
||||
if canMemHash(et) {
|
||||
return genHashPtrToMemoryRange(et)
|
||||
}
|
||||
if t.Implements(appenderToType) {
|
||||
return hashPointerAppenderTo
|
||||
}
|
||||
if !typeIsRecursive(t) {
|
||||
eti := getTypeInfo(et)
|
||||
return func(h *hasher, v reflect.Value) bool {
|
||||
if v.IsNil() {
|
||||
h.hashUint8(0) // indicates nil
|
||||
return true
|
||||
}
|
||||
h.hashUint8(1) // indicates visiting a pointer
|
||||
return eti.hasher()(h, v.Elem())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return func(h *hasher, v reflect.Value) bool {
|
||||
if debug {
|
||||
log.Printf("unhandled type %v", v.Type())
|
||||
}
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// hashString hashes v, of kind String.
|
||||
func (h *hasher) hashString(v reflect.Value) bool {
|
||||
s := v.String()
|
||||
h.hashLen(len(s))
|
||||
h.bw.WriteString(s)
|
||||
return true
|
||||
}
|
||||
|
||||
func (h *hasher) hashFloat32v(v reflect.Value) bool {
|
||||
h.hashUint32(math.Float32bits(float32(v.Float())))
|
||||
return true
|
||||
}
|
||||
|
||||
func (h *hasher) hashFloat64v(v reflect.Value) bool {
|
||||
h.hashUint64(math.Float64bits(v.Float()))
|
||||
return true
|
||||
}
|
||||
|
||||
func (h *hasher) hashComplex64v(v reflect.Value) bool {
|
||||
c := complex64(v.Complex())
|
||||
h.hashUint32(math.Float32bits(real(c)))
|
||||
h.hashUint32(math.Float32bits(imag(c)))
|
||||
return true
|
||||
}
|
||||
|
||||
func (h *hasher) hashComplex128v(v reflect.Value) bool {
|
||||
c := v.Complex()
|
||||
h.hashUint64(math.Float64bits(real(c)))
|
||||
h.hashUint64(math.Float64bits(imag(c)))
|
||||
return true
|
||||
}
|
||||
|
||||
// hashString hashes v, of kind time.Time.
|
||||
func (h *hasher) hashTimev(v reflect.Value) bool {
|
||||
var t time.Time
|
||||
if v.CanAddr() {
|
||||
t = *(v.Addr().Interface().(*time.Time))
|
||||
} else {
|
||||
t = v.Interface().(time.Time)
|
||||
}
|
||||
b := t.AppendFormat(h.scratch[:1], time.RFC3339Nano)
|
||||
b[0] = byte(len(b) - 1) // more than sufficient width; if not, good enough.
|
||||
h.bw.Write(b)
|
||||
return true
|
||||
}
|
||||
|
||||
// hashSliceMem hashes v, of kind Slice, with a memhash-able element type.
|
||||
func (h *hasher) hashSliceMem(v reflect.Value) bool {
|
||||
vLen := v.Len()
|
||||
h.hashUint64(uint64(vLen))
|
||||
if vLen == 0 {
|
||||
return true
|
||||
}
|
||||
h.bw.Write(unsafe.Slice((*byte)(v.UnsafePointer()), v.Type().Elem().Size()*uintptr(vLen)))
|
||||
return true
|
||||
}
|
||||
|
||||
func genHashArrayMem(n int, arraySize uintptr, efu *typeInfo) typeHasherFunc {
|
||||
byElement := genHashArrayElements(n, efu)
|
||||
return func(h *hasher, v reflect.Value) bool {
|
||||
if v.CanAddr() {
|
||||
h.bw.Write(unsafe.Slice((*byte)(v.Addr().UnsafePointer()), arraySize))
|
||||
return true
|
||||
}
|
||||
return byElement(h, v)
|
||||
}
|
||||
}
|
||||
|
||||
func genHashArrayElements(n int, eti *typeInfo) typeHasherFunc {
|
||||
return func(h *hasher, v reflect.Value) bool {
|
||||
for i := 0; i < n; i++ {
|
||||
if !eti.hasher()(h, v.Index(i)) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
func noopHasherFunc(h *hasher, v reflect.Value) bool { return true }
|
||||
|
||||
func genHashArray(t reflect.Type, eti *typeInfo) typeHasherFunc {
|
||||
if t.Size() == 0 {
|
||||
return noopHasherFunc
|
||||
}
|
||||
et := t.Elem()
|
||||
if canMemHash(et) {
|
||||
return genHashArrayMem(t.Len(), t.Size(), eti)
|
||||
}
|
||||
n := t.Len()
|
||||
return genHashArrayElements(n, eti)
|
||||
}
|
||||
|
||||
func genHashSliceElements(eti *typeInfo) typeHasherFunc {
|
||||
return sliceElementHasher{eti}.hash
|
||||
}
|
||||
|
||||
type sliceElementHasher struct {
|
||||
eti *typeInfo
|
||||
}
|
||||
|
||||
func (seh sliceElementHasher) hash(h *hasher, v reflect.Value) bool {
|
||||
vLen := v.Len()
|
||||
h.hashUint64(uint64(vLen))
|
||||
for i := 0; i < vLen; i++ {
|
||||
if !seh.eti.hasher()(h, v.Index(i)) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func getTypeInfo(t reflect.Type) *typeInfo {
|
||||
if f, ok := typeInfoMap.Load(t); ok {
|
||||
return f.(*typeInfo)
|
||||
@@ -353,6 +778,13 @@ func (h *hasher) hashValueWithType(v reflect.Value, ti *typeInfo, forceCycleChec
|
||||
w := h.bw
|
||||
doCheckCycles := forceCycleChecking || ti.isRecursive
|
||||
|
||||
if !doCheckCycles {
|
||||
hf := ti.hasher()
|
||||
if hf(h, v) {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Generic handling.
|
||||
switch v.Kind() {
|
||||
default:
|
||||
|
||||
@@ -392,6 +392,238 @@ func TestCanMemHash(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetTypeHasher(t *testing.T) {
|
||||
switch runtime.GOARCH {
|
||||
case "amd64", "arm64", "arm", "386", "riscv64":
|
||||
default:
|
||||
// Test outputs below are specifically for little-endian machines.
|
||||
// Just skip everything else for now. Feel free to add more above if
|
||||
// you have the hardware to test and it's little-endian.
|
||||
t.Skipf("skipping on %v", runtime.GOARCH)
|
||||
}
|
||||
type typedString string
|
||||
var (
|
||||
someInt = int('A')
|
||||
someComplex128 = complex128(1 + 2i)
|
||||
someIP = netaddr.MustParseIP("1.2.3.4")
|
||||
)
|
||||
tests := []struct {
|
||||
name string
|
||||
val any
|
||||
want bool // set true automatically if out != ""
|
||||
out string
|
||||
out32 string // overwrites out if 32-bit
|
||||
}{
|
||||
{
|
||||
name: "int",
|
||||
val: int(1),
|
||||
out: "\x01\x00\x00\x00\x00\x00\x00\x00",
|
||||
},
|
||||
{
|
||||
name: "int_negative",
|
||||
val: int(-1),
|
||||
out: "\xff\xff\xff\xff\xff\xff\xff\xff",
|
||||
},
|
||||
{
|
||||
name: "int8",
|
||||
val: int8(1),
|
||||
out: "\x01",
|
||||
},
|
||||
{
|
||||
name: "float64",
|
||||
val: float64(1.0),
|
||||
out: "\x00\x00\x00\x00\x00\x00\xf0?",
|
||||
},
|
||||
{
|
||||
name: "float32",
|
||||
val: float32(1.0),
|
||||
out: "\x00\x00\x80?",
|
||||
},
|
||||
{
|
||||
name: "string",
|
||||
val: "foo",
|
||||
out: "\x03\x00\x00\x00\x00\x00\x00\x00foo",
|
||||
},
|
||||
{
|
||||
name: "typedString",
|
||||
val: typedString("foo"),
|
||||
out: "\x03\x00\x00\x00\x00\x00\x00\x00foo",
|
||||
},
|
||||
{
|
||||
name: "string_slice",
|
||||
val: []string{"foo", "bar"},
|
||||
out: "\x02\x00\x00\x00\x00\x00\x00\x00\x03\x00\x00\x00\x00\x00\x00\x00foo\x03\x00\x00\x00\x00\x00\x00\x00bar",
|
||||
},
|
||||
{
|
||||
name: "int_slice",
|
||||
val: []int{1, 0, -1},
|
||||
out: "\x03\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\xff\xff\xff\xff",
|
||||
out32: "\x03\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff",
|
||||
},
|
||||
{
|
||||
name: "struct",
|
||||
val: struct {
|
||||
a, b int
|
||||
c uint16
|
||||
}{1, -1, 2},
|
||||
out: "\x01\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\xff\xff\xff\xff\x02\x00",
|
||||
},
|
||||
{
|
||||
name: "nil_int_ptr",
|
||||
val: (*int)(nil),
|
||||
out: "\x00",
|
||||
},
|
||||
{
|
||||
name: "int_ptr",
|
||||
val: &someInt,
|
||||
out: "\x01A\x00\x00\x00\x00\x00\x00\x00",
|
||||
out32: "\x01A\x00\x00\x00",
|
||||
},
|
||||
{
|
||||
name: "nil_uint32_ptr",
|
||||
val: (*uint32)(nil),
|
||||
out: "\x00",
|
||||
},
|
||||
{
|
||||
name: "complex128_ptr",
|
||||
val: &someComplex128,
|
||||
out: "\x01\x00\x00\x00\x00\x00\x00\xf0?\x00\x00\x00\x00\x00\x00\x00@",
|
||||
},
|
||||
{
|
||||
name: "packet_filter",
|
||||
val: filterRules,
|
||||
out: "\x04\x00\x00\x00\x00\x00\x00\x00\x03\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00*\v\x00\x00\x00\x00\x00\x00\x0010.1.3.4/32\v\x00\x00\x00\x00\x00\x00\x0010.0.0.0/24\x03\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x03\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\n\x00\x00\x00\x00\x00\x00\x001.2.3.4/32\x01 \x00\x00\x00\x00\x00\x00\x00\x01\x00\x02\x00\x04\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x03\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\n\x00\x00\x00\x00\x00\x00\x001.2.3.4/32\x01\x00\x00\x00\x00\x00\x00\x00\x03\x00\x00\x00\x00\x00\x00\x00foo\x01\x00\x00\x00\x00\x00\x00\x00\v\x00\x00\x00\x00\x00\x00\x00foooooooooo\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\f\x00\x00\x00\x00\x00\x00\x00baaaaaarrrrr\x00\x01\x00\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\v\x00\x00\x00\x00\x00\x00\x00foooooooooo\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\f\x00\x00\x00\x00\x00\x00\x00baaaaaarrrrr\x00\x01\x00\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\v\x00\x00\x00\x00\x00\x00\x00foooooooooo\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\f\x00\x00\x00\x00\x00\x00\x00baaaaaarrrrr\x00\x01\x00\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00",
|
||||
out32: "\x04\x00\x00\x00\x00\x00\x00\x00\x03\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00*\v\x00\x00\x00\x00\x00\x00\x0010.1.3.4/32\v\x00\x00\x00\x00\x00\x00\x0010.0.0.0/24\x03\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x02\x00\x00\x00\x03\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\n\x00\x00\x00\x00\x00\x00\x001.2.3.4/32\x01 \x00\x00\x00\x01\x00\x02\x00\x04\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x02\x00\x00\x00\x03\x00\x00\x00\x04\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\n\x00\x00\x00\x00\x00\x00\x001.2.3.4/32\x01\x00\x00\x00\x00\x00\x00\x00\x03\x00\x00\x00\x00\x00\x00\x00foo\x01\x00\x00\x00\x00\x00\x00\x00\v\x00\x00\x00\x00\x00\x00\x00foooooooooo\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\f\x00\x00\x00\x00\x00\x00\x00baaaaaarrrrr\x00\x01\x00\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\v\x00\x00\x00\x00\x00\x00\x00foooooooooo\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\f\x00\x00\x00\x00\x00\x00\x00baaaaaarrrrr\x00\x01\x00\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\v\x00\x00\x00\x00\x00\x00\x00foooooooooo\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\f\x00\x00\x00\x00\x00\x00\x00baaaaaarrrrr\x00\x01\x00\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00",
|
||||
},
|
||||
{
|
||||
name: "netaddr.IP",
|
||||
val: netaddr.MustParseIP("fe80::123%foo"),
|
||||
out: "\r\x00\x00\x00\x00\x00\x00\x00fe80::123%foo",
|
||||
},
|
||||
{
|
||||
name: "ptr-netaddr.IP",
|
||||
val: &someIP,
|
||||
out: "\x01\a\x00\x00\x00\x00\x00\x00\x001.2.3.4",
|
||||
},
|
||||
{
|
||||
name: "ptr-nil-netaddr.IP",
|
||||
val: (*netaddr.IP)(nil),
|
||||
out: "\x00",
|
||||
},
|
||||
{
|
||||
name: "time",
|
||||
val: time.Unix(0, 0).In(time.UTC),
|
||||
out: "\x141970-01-01T00:00:00Z",
|
||||
},
|
||||
{
|
||||
name: "time_custom_zone",
|
||||
val: time.Unix(1655311822, 0).In(time.FixedZone("FOO", -60*60)),
|
||||
out: "\x192022-06-15T15:50:22-01:00",
|
||||
},
|
||||
{
|
||||
name: "time_nil",
|
||||
val: (*time.Time)(nil),
|
||||
out: "\x00",
|
||||
},
|
||||
{
|
||||
name: "array_memhash",
|
||||
val: [4]byte{1, 2, 3, 4},
|
||||
out: "\x01\x02\x03\x04",
|
||||
},
|
||||
{
|
||||
name: "array_ptr_memhash",
|
||||
val: ptrTo([4]byte{1, 2, 3, 4}),
|
||||
out: "\x01\x01\x02\x03\x04",
|
||||
},
|
||||
{
|
||||
name: "ptr_to_struct_partially_memhashable",
|
||||
val: &struct {
|
||||
A int16
|
||||
B int16
|
||||
C *int
|
||||
}{5, 6, nil},
|
||||
out: "\x01\x05\x00\x06\x00\x00",
|
||||
},
|
||||
{
|
||||
name: "struct_partially_memhashable_but_cant_addr",
|
||||
val: struct {
|
||||
A int16
|
||||
B int16
|
||||
C *int
|
||||
}{5, 6, nil},
|
||||
out: "\x05\x00\x06\x00\x00",
|
||||
},
|
||||
{
|
||||
name: "array_elements",
|
||||
val: [4]byte{1, 2, 3, 4},
|
||||
out: "\x01\x02\x03\x04",
|
||||
},
|
||||
{
|
||||
name: "bool",
|
||||
val: true,
|
||||
out: "\x01",
|
||||
},
|
||||
{
|
||||
name: "IntIntByteInt",
|
||||
val: IntIntByteInt{1, 2, 3, 4},
|
||||
out: "\x01\x00\x00\x00\x02\x00\x00\x00\x03\x04\x00\x00\x00",
|
||||
},
|
||||
{
|
||||
name: "IntIntByteInt-canddr",
|
||||
val: &IntIntByteInt{1, 2, 3, 4},
|
||||
out: "\x01\x01\x00\x00\x00\x02\x00\x00\x00\x03\x04\x00\x00\x00",
|
||||
},
|
||||
{
|
||||
name: "array-IntIntByteInt",
|
||||
val: [2]IntIntByteInt{
|
||||
{1, 2, 3, 4},
|
||||
{5, 6, 7, 8},
|
||||
},
|
||||
out: "\x01\x00\x00\x00\x02\x00\x00\x00\x03\x04\x00\x00\x00\x05\x00\x00\x00\x06\x00\x00\x00\a\b\x00\x00\x00",
|
||||
},
|
||||
{
|
||||
name: "array-IntIntByteInt-canaddr",
|
||||
val: &[2]IntIntByteInt{
|
||||
{1, 2, 3, 4},
|
||||
{5, 6, 7, 8},
|
||||
},
|
||||
out: "\x01\x01\x00\x00\x00\x02\x00\x00\x00\x03\x04\x00\x00\x00\x05\x00\x00\x00\x06\x00\x00\x00\a\b\x00\x00\x00",
|
||||
},
|
||||
{
|
||||
name: "tailcfg.Node",
|
||||
val: &tailcfg.Node{},
|
||||
out: "\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x140001-01-01T00:00:00Z\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x140001-01-01T00:00:00Z\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00",
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
rv := reflect.ValueOf(tt.val)
|
||||
fn := getTypeInfo(rv.Type()).hasher()
|
||||
var buf bytes.Buffer
|
||||
h := &hasher{
|
||||
bw: bufio.NewWriter(&buf),
|
||||
}
|
||||
got := fn(h, rv)
|
||||
const ptrSize = 32 << uintptr(^uintptr(0)>>63)
|
||||
if tt.out32 != "" && ptrSize == 32 {
|
||||
tt.out = tt.out32
|
||||
}
|
||||
if tt.out != "" {
|
||||
tt.want = true
|
||||
}
|
||||
if got != tt.want {
|
||||
t.Fatalf("func returned %v; want %v", got, tt.want)
|
||||
}
|
||||
if err := h.bw.Flush(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if got := buf.String(); got != tt.out {
|
||||
t.Fatalf("got %q; want %q", got, tt.out)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
var sink = Hash("foo")
|
||||
|
||||
func BenchmarkHash(b *testing.B) {
|
||||
@@ -448,8 +680,9 @@ var filterRules = []tailcfg.FilterRule{
|
||||
func BenchmarkHashPacketFilter(b *testing.B) {
|
||||
b.ReportAllocs()
|
||||
|
||||
hash := HasherForType[[]tailcfg.FilterRule]()
|
||||
for i := 0; i < b.N; i++ {
|
||||
sink = Hash(filterRules)
|
||||
sink = hash(filterRules)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -8,6 +8,7 @@ package distro
|
||||
import (
|
||||
"os"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"sync/atomic"
|
||||
)
|
||||
|
||||
@@ -94,3 +95,17 @@ func freebsdDistro() Distro {
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// DSMVersion reports the Synology DSM major version.
|
||||
//
|
||||
// If not Synology, it reports 0.
|
||||
func DSMVersion() int {
|
||||
if runtime.GOOS != "linux" {
|
||||
return 0
|
||||
}
|
||||
if Get() != Synology {
|
||||
return 0
|
||||
}
|
||||
v, _ := strconv.Atoi(os.Getenv("SYNOPKG_DSM_VERSION_MAJOR"))
|
||||
return v
|
||||
}
|
||||
|
||||
@@ -442,14 +442,7 @@ func (c *Conn) addDerpPeerRoute(peer key.NodePublic, derpID int, dc *derphttp.Cl
|
||||
mak.Set(&c.derpRoute, peer, derpRoute{derpID, dc})
|
||||
}
|
||||
|
||||
// DerpMagicIP is a fake WireGuard endpoint IP address that means
|
||||
// to use DERP. When used, the port number of the WireGuard endpoint
|
||||
// is the DERP server number to use.
|
||||
//
|
||||
// Mnemonic: 3.3.40 are numbers above the keys D, E, R, P.
|
||||
const DerpMagicIP = "127.3.3.40"
|
||||
|
||||
var derpMagicIPAddr = netaddr.MustParseIP(DerpMagicIP)
|
||||
var derpMagicIPAddr = netaddr.MustParseIP(tailcfg.DerpMagicIP)
|
||||
|
||||
// activeDerp contains fields for an active DERP connection.
|
||||
type activeDerp struct {
|
||||
|
||||
@@ -21,10 +21,10 @@ import (
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"gvisor.dev/gvisor/pkg/bufferv2"
|
||||
"gvisor.dev/gvisor/pkg/refs"
|
||||
"gvisor.dev/gvisor/pkg/tcpip"
|
||||
"gvisor.dev/gvisor/pkg/tcpip/adapters/gonet"
|
||||
"gvisor.dev/gvisor/pkg/tcpip/buffer"
|
||||
"gvisor.dev/gvisor/pkg/tcpip/header"
|
||||
"gvisor.dev/gvisor/pkg/tcpip/link/channel"
|
||||
"gvisor.dev/gvisor/pkg/tcpip/network/ipv4"
|
||||
@@ -402,9 +402,9 @@ func (ns *Impl) handleLocalPackets(p *packet.Parsed, t *tstun.Wrapper) filter.Re
|
||||
if debugPackets {
|
||||
ns.logf("[v2] service packet in (from %v): % x", p.Src, p.Buffer())
|
||||
}
|
||||
vv := buffer.View(append([]byte(nil), p.Buffer()...)).ToVectorisedView()
|
||||
|
||||
packetBuf := stack.NewPacketBuffer(stack.PacketBufferOptions{
|
||||
Data: vv,
|
||||
Payload: bufferv2.MakeWithData(append([]byte(nil), p.Buffer()...)),
|
||||
})
|
||||
ns.linkEP.InjectInbound(pn, packetBuf)
|
||||
packetBuf.DecRef()
|
||||
@@ -477,7 +477,7 @@ func (ns *Impl) inject() {
|
||||
// TODO(tom): Figure out if its safe to modify packet.Parsed to fill in
|
||||
// the IP src/dest even if its missing the rest of the pkt.
|
||||
// That way we dont have to do this twitchy-af byte-yeeting.
|
||||
if b := pkt.NetworkHeader().View(); len(b) >= 20 { // min ipv4 header
|
||||
if b := pkt.NetworkHeader().Slice(); len(b) >= 20 { // min ipv4 header
|
||||
switch b[0] >> 4 { // ip proto field
|
||||
case 4:
|
||||
if srcIP := netaddr.IPv4(b[12], b[13], b[14], b[15]); magicDNSIP == srcIP {
|
||||
@@ -687,9 +687,8 @@ func (ns *Impl) injectInbound(p *packet.Parsed, t *tstun.Wrapper) filter.Respons
|
||||
if debugPackets {
|
||||
ns.logf("[v2] packet in (from %v): % x", p.Src, p.Buffer())
|
||||
}
|
||||
vv := buffer.View(append([]byte(nil), p.Buffer()...)).ToVectorisedView()
|
||||
packetBuf := stack.NewPacketBuffer(stack.PacketBufferOptions{
|
||||
Data: vv,
|
||||
Payload: bufferv2.MakeWithData(append([]byte(nil), p.Buffer()...)),
|
||||
})
|
||||
ns.linkEP.InjectInbound(pn, packetBuf)
|
||||
packetBuf.DecRef()
|
||||
|
||||
@@ -182,11 +182,7 @@ func useAmbientCaps() bool {
|
||||
if distro.Get() != distro.Synology {
|
||||
return false
|
||||
}
|
||||
v, err := strconv.Atoi(os.Getenv("SYNOPKG_DSM_VERSION_MAJOR"))
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return v >= 7
|
||||
return distro.DSMVersion() >= 7
|
||||
}
|
||||
|
||||
var forceIPCommand = envknob.Bool("TS_DEBUG_USE_IP_COMMAND")
|
||||
|
||||
Reference in New Issue
Block a user