Compare commits

...

10 Commits

Author SHA1 Message Date
Brad Fitzpatrick
3579452856 wgengine/netstack: add netstack port rewriting mechanism
Signed-off-by: Brad Fitzpatrick <bradfitz@tailscale.com>
2021-05-13 20:57:41 -07:00
Josh Bleecher Snyder
6f62bbae79 cmd/tailscale: make ping --until-direct require direct connection to exit 0
If --until-direct is set, the goal is to make a direct connection.
If we failed at that, say so, and exit with an error.

RELNOTE=tailscale ping --until-direct (the default) now exits with
a non-zero exit code if no direct connection was established.

Signed-off-by: Josh Bleecher Snyder <josh@tailscale.com>
2021-05-13 15:07:19 -07:00
Avery Pennarun
6fd4e8d244 ipnlocal: fix switching users while logged in + Stopped.
This code path is very tricky since it was originally designed for the
"re-authenticate to refresh my keys" use case, which didn't want to
lose the original session even if the refresh cycle failed. This is why
it acts differently from the Logout(); Login(); case.

Maybe that's too fancy, considering that it probably never quite worked
at all, for switching between users without logging out first. But it
works now.

This was more invasive than I hoped, but the necessary fixes actually
removed several other suspicious BUG: lines from state_test.go, so I'm
pretty confident this is a significant net improvement.

Fixes tailscale/corp#1756.

Signed-off-by: Avery Pennarun <apenwarr@tailscale.com>
2021-05-12 23:21:22 -04:00
Avery Pennarun
6307a9285d controlclient: update Persist.LoginName when it changes.
Well, that was anticlimactic.

Fixes tailscale/corp#461.

Signed-off-by: Avery Pennarun <apenwarr@tailscale.com>
2021-05-12 23:21:11 -04:00
Avery Pennarun
285d0e3b4d ipnlocal: fix deadlock in RequestEngineStatusAndWait() error path.
If the engine was shutting down from a previous session
(e.closing=true), it would return an error code when trying to get
status. In that case, ipnlocal would never unblock any callers that
were waiting on the status.

Not sure if this ever happened in real life, but I accidentally
triggered it while writing a test.

Signed-off-by: Avery Pennarun <apenwarr@tailscale.com>
2021-05-12 23:21:11 -04:00
Brad Fitzpatrick
5a7c6f1678 tstest/integration{,/testcontrol}: add node update support, two node test
Signed-off-by: Brad Fitzpatrick <bradfitz@tailscale.com>
2021-05-12 14:43:43 -07:00
Brad Fitzpatrick
d32667011d tstest/integration: build test binaries with -race if test itself is
Signed-off-by: Brad Fitzpatrick <bradfitz@tailscale.com>
2021-05-12 13:13:08 -07:00
Brad Fitzpatrick
314d15b3fb version: add func IsRace to report whether race detector enabled
Signed-off-by: Brad Fitzpatrick <bradfitz@tailscale.com>
2021-05-12 13:12:41 -07:00
Brad Fitzpatrick
ed9d825552 tstest/integration: fix integration test on linux/386
Apparently can't use GOBIN with GOARCH.

Signed-off-by: Brad Fitzpatrick <bradfitz@tailscale.com>
2021-05-12 11:56:00 -07:00
Brad Fitzpatrick
c0158bcd0b tstest/integration{,/testcontrol}: add testcontrol.RequireAuth mode, new test
Signed-off-by: Brad Fitzpatrick <bradfitz@tailscale.com>
2021-05-12 11:37:27 -07:00
9 changed files with 573 additions and 141 deletions

View File

@@ -139,6 +139,9 @@ func runPing(ctx context.Context, args []string) error {
if !anyPong {
return errors.New("no reply")
}
if pingArgs.untilDirect {
return errors.New("direct connection not established")
}
return nil
}
}

View File

@@ -460,10 +460,10 @@ func (c *Direct) doLogin(ctx context.Context, opt loginOpt) (mustRegen bool, new
request.NodeKey.ShortString())
return true, "", nil
}
if persist.Provider == "" {
if resp.Login.Provider != "" {
persist.Provider = resp.Login.Provider
}
if persist.LoginName == "" {
if resp.Login.LoginName != "" {
persist.LoginName = resp.Login.LoginName
}

View File

@@ -434,14 +434,15 @@ func (b *LocalBackend) setClientStatus(st controlclient.Status) {
}
return
}
if st.LoginFinished != nil {
b.mu.Lock()
wasBlocked := b.blocked
b.mu.Unlock()
if st.LoginFinished != nil && wasBlocked {
// Auth completed, unblock the engine
b.blockEngineUpdates(false)
b.authReconfig()
b.EditPrefs(&ipn.MaskedPrefs{
LoggedOutSet: true,
Prefs: ipn.Prefs{LoggedOut: false},
})
b.send(ipn.Notify{LoginFinished: &empty.Message{}})
}
@@ -480,11 +481,15 @@ func (b *LocalBackend) setClientStatus(st controlclient.Status) {
b.authURL = st.URL
b.authURLSticky = st.URL
}
if b.state == ipn.NeedsLogin {
if !b.prefs.WantRunning {
if wasBlocked && st.LoginFinished != nil {
// Interactive login finished successfully (URL visited).
// After an interactive login, the user always wants
// WantRunning.
if !b.prefs.WantRunning || b.prefs.LoggedOut {
prefsChanged = true
}
b.prefs.WantRunning = true
b.prefs.LoggedOut = false
}
// Prefs will be written out; this is not safe unless locked or cloned.
if prefsChanged {
@@ -566,10 +571,18 @@ func (b *LocalBackend) findExitNodeIDLocked(nm *netmap.NetworkMap) (prefsChanged
func (b *LocalBackend) setWgengineStatus(s *wgengine.Status, err error) {
if err != nil {
b.logf("wgengine status error: %v", err)
b.statusLock.Lock()
b.statusChanged.Broadcast()
b.statusLock.Unlock()
return
}
if s == nil {
b.logf("[unexpected] non-error wgengine update with status=nil: %v", s)
b.statusLock.Lock()
b.statusChanged.Broadcast()
b.statusLock.Unlock()
return
}
@@ -2113,8 +2126,8 @@ func (b *LocalBackend) enterState(newState ipn.State) {
if oldState == newState {
return
}
b.logf("Switching ipn state %v -> %v (WantRunning=%v)",
oldState, newState, prefs.WantRunning)
b.logf("Switching ipn state %v -> %v (WantRunning=%v, nm=%v)",
oldState, newState, prefs.WantRunning, netMap != nil)
health.SetIPNState(newState.String(), prefs.WantRunning)
b.send(ipn.Notify{State: &newState})
@@ -2170,13 +2183,14 @@ func (b *LocalBackend) nextState() ipn.State {
cc = b.cc
netMap = b.netMap
state = b.state
blocked = b.blocked
wantRunning = b.prefs.WantRunning
loggedOut = b.prefs.LoggedOut
)
b.mu.Unlock()
switch {
case !wantRunning && !loggedOut && b.hasNodeKey():
case !wantRunning && !loggedOut && !blocked && b.hasNodeKey():
return ipn.Stopped
case netMap == nil:
if cc.AuthCantContinue() || loggedOut {

View File

@@ -368,13 +368,11 @@ func TestStateMachine(t *testing.T) {
{
c.Assert(cc.getCalls(), qt.DeepEquals, []string{"Login"})
notifies.drain(0)
// BUG: this should immediately set WantRunning to true.
// Users don't log in if they don't want to also connect.
// (Generally, we're inconsistent about who is supposed to
// update Prefs at what time. But the overall philosophy is:
// update it when the user's intent changes. This is clearly
// at the time the user *requests* Login, not at the time
// the login finishes.)
// Note: WantRunning isn't true yet. It'll switch to true
// after a successful login finishes.
// (This behaviour is needed so that b.Login() won't
// start connecting to an old account right away, if one
// exists when you launch another login.)
}
// Attempted non-interactive login with no key; indicate that
@@ -384,18 +382,16 @@ func TestStateMachine(t *testing.T) {
url1 := "http://localhost:1/1"
cc.send(nil, url1, false, nil)
{
c.Assert(cc.getCalls(), qt.HasLen, 0)
c.Assert(cc.getCalls(), qt.DeepEquals, []string{})
// ...but backend eats that notification, because the user
// didn't explicitly request interactive login yet, and
// we're already in NeedsLogin state.
nn := notifies.drain(1)
// Trying to log in automatically sets WantRunning.
// BUG: that should have happened right after Login().
c.Assert(nn[0].Prefs, qt.Not(qt.IsNil))
c.Assert(nn[0].Prefs.LoggedOut, qt.IsFalse)
c.Assert(nn[0].Prefs.WantRunning, qt.IsTrue)
c.Assert(nn[0].Prefs.WantRunning, qt.IsFalse)
}
// Now we'll try an interactive login.
@@ -451,11 +447,12 @@ func TestStateMachine(t *testing.T) {
// same time.
// The backend should propagate this upward for the UI.
t.Logf("\n\nLoginFinished")
notifies.expect(2)
notifies.expect(3)
cc.setAuthBlocked(false)
cc.persist.LoginName = "user1"
cc.send(nil, "", true, &netmap.NetworkMap{})
{
nn := notifies.drain(2)
nn := notifies.drain(3)
// BUG: still too soon for UpdateEndpoints.
//
// Arguably it makes sense to unpause now, since the machine
@@ -468,15 +465,12 @@ func TestStateMachine(t *testing.T) {
// it's visible in the logs)
c.Assert([]string{"unpause", "UpdateEndpoints"}, qt.DeepEquals, cc.getCalls())
c.Assert(nn[0].LoginFinished, qt.Not(qt.IsNil))
c.Assert(nn[1].State, qt.Not(qt.IsNil))
c.Assert(ipn.NeedsMachineAuth, qt.Equals, *nn[1].State)
c.Assert(nn[1].Prefs, qt.Not(qt.IsNil))
c.Assert(nn[2].State, qt.Not(qt.IsNil))
c.Assert(nn[1].Prefs.Persist.LoginName, qt.Equals, "user1")
c.Assert(ipn.NeedsMachineAuth, qt.Equals, *nn[2].State)
}
// TODO: check that the logged-in username propagates from control
// through to the UI notifications. I think it's used as a hint
// for future logins, to pre-fill the username box? Not really sure
// how it works.
// Pretend that the administrator has authorized our machine.
t.Logf("\n\nMachineAuthorized")
notifies.expect(1)
@@ -581,77 +575,72 @@ func TestStateMachine(t *testing.T) {
// Let's make the logout succeed.
t.Logf("\n\nLogout (async) - succeed")
notifies.expect(1)
notifies.expect(0)
cc.setAuthBlocked(true)
cc.send(nil, "", false, nil)
{
nn := notifies.drain(1)
notifies.drain(0)
c.Assert(cc.getCalls(), qt.HasLen, 0)
c.Assert(nn[0].Prefs, qt.Not(qt.IsNil))
c.Assert(nn[0].Prefs.LoggedOut, qt.IsTrue)
// BUG: WantRunning should be false after manual logout.
c.Assert(nn[0].Prefs.WantRunning, qt.IsTrue)
c.Assert(b.Prefs().LoggedOut, qt.IsTrue)
c.Assert(b.Prefs().WantRunning, qt.IsFalse)
c.Assert(ipn.NeedsLogin, qt.Equals, b.State())
}
// A second logout should do nothing, since the prefs haven't changed.
t.Logf("\n\nLogout2 (async)")
notifies.expect(1)
notifies.expect(0)
b.Logout()
{
nn := notifies.drain(1)
notifies.drain(0)
// BUG: the backend has already called StartLogout, and we're
// still logged out. So it shouldn't call it again.
c.Assert([]string{"StartLogout"}, qt.DeepEquals, cc.getCalls())
// BUG: Prefs should not change here. Already logged out.
c.Assert(nn[0].Prefs, qt.Not(qt.IsNil))
c.Assert(nn[0].Prefs.LoggedOut, qt.IsTrue)
c.Assert(nn[0].Prefs.WantRunning, qt.IsFalse)
c.Assert(cc.getCalls(), qt.HasLen, 0)
c.Assert(b.Prefs().LoggedOut, qt.IsTrue)
c.Assert(b.Prefs().WantRunning, qt.IsFalse)
c.Assert(ipn.NeedsLogin, qt.Equals, b.State())
}
// Let's acknowledge the second logout too.
t.Logf("\n\nLogout2 (async) - succeed")
notifies.expect(1)
notifies.expect(0)
cc.setAuthBlocked(true)
cc.send(nil, "", false, nil)
{
nn := notifies.drain(1)
notifies.drain(0)
c.Assert(cc.getCalls(), qt.HasLen, 0)
c.Assert(nn[0].Prefs, qt.Not(qt.IsNil))
c.Assert(nn[0].Prefs.LoggedOut, qt.IsTrue)
// BUG: second logout shouldn't cause WantRunning->true !!
c.Assert(nn[0].Prefs.WantRunning, qt.IsTrue)
c.Assert(cc.getCalls(), qt.HasLen, 0)
c.Assert(b.Prefs().LoggedOut, qt.IsTrue)
c.Assert(b.Prefs().WantRunning, qt.IsFalse)
c.Assert(ipn.NeedsLogin, qt.Equals, b.State())
}
// Try the synchronous logout feature.
t.Logf("\n\nLogout3 (sync)")
notifies.expect(1)
notifies.expect(0)
b.LogoutSync(context.Background())
// NOTE: This returns as soon as cc.Logout() returns, which is okay
// I guess, since that's supposed to be synchronous.
{
nn := notifies.drain(1)
notifies.drain(0)
c.Assert([]string{"Logout"}, qt.DeepEquals, cc.getCalls())
c.Assert(nn[0].Prefs, qt.Not(qt.IsNil))
c.Assert(nn[0].Prefs.LoggedOut, qt.IsTrue)
c.Assert(nn[0].Prefs.WantRunning, qt.IsFalse)
c.Assert(cc.getCalls(), qt.HasLen, 0)
c.Assert(b.Prefs().LoggedOut, qt.IsTrue)
c.Assert(b.Prefs().WantRunning, qt.IsFalse)
c.Assert(ipn.NeedsLogin, qt.Equals, b.State())
}
// Generate the third logout event.
t.Logf("\n\nLogout3 (sync) - succeed")
notifies.expect(1)
notifies.expect(0)
cc.setAuthBlocked(true)
cc.send(nil, "", false, nil)
{
nn := notifies.drain(1)
notifies.drain(0)
c.Assert(cc.getCalls(), qt.HasLen, 0)
c.Assert(nn[0].Prefs, qt.Not(qt.IsNil))
c.Assert(nn[0].Prefs.LoggedOut, qt.IsTrue)
// BUG: third logout shouldn't cause WantRunning->true !!
c.Assert(nn[0].Prefs.WantRunning, qt.IsTrue)
c.Assert(cc.getCalls(), qt.HasLen, 0)
c.Assert(b.Prefs().LoggedOut, qt.IsTrue)
c.Assert(b.Prefs().WantRunning, qt.IsFalse)
c.Assert(ipn.NeedsLogin, qt.Equals, b.State())
}
@@ -669,10 +658,6 @@ func TestStateMachine(t *testing.T) {
// happens if the user exits and restarts while logged out.
// Note that it's explicitly okay to call b.Start() over and over
// again, every time the frontend reconnects.
//
// BUG: WantRunning is true here (because of the bug above).
// We'll have to adjust the following test's expectations if we
// fix that.
// TODO: test user switching between statekeys.
@@ -691,7 +676,7 @@ func TestStateMachine(t *testing.T) {
c.Assert(nn[0].Prefs, qt.Not(qt.IsNil))
c.Assert(nn[1].State, qt.Not(qt.IsNil))
c.Assert(nn[0].Prefs.LoggedOut, qt.IsTrue)
c.Assert(nn[0].Prefs.WantRunning, qt.IsTrue)
c.Assert(nn[0].Prefs.WantRunning, qt.IsFalse)
c.Assert(ipn.NeedsLogin, qt.Equals, *nn[1].State)
c.Assert(ipn.NeedsLogin, qt.Equals, b.State())
}
@@ -703,16 +688,20 @@ func TestStateMachine(t *testing.T) {
t.Logf("\n\nLoginFinished3")
notifies.expect(3)
cc.setAuthBlocked(false)
cc.persist.LoginName = "user2"
cc.send(nil, "", true, &netmap.NetworkMap{
MachineStatus: tailcfg.MachineAuthorized,
})
{
nn := notifies.drain(3)
c.Assert([]string{"unpause"}, qt.DeepEquals, cc.getCalls())
c.Assert(nn[0].Prefs, qt.Not(qt.IsNil))
c.Assert(nn[1].LoginFinished, qt.Not(qt.IsNil))
c.Assert(nn[0].LoginFinished, qt.Not(qt.IsNil))
c.Assert(nn[1].Prefs, qt.Not(qt.IsNil))
c.Assert(nn[2].State, qt.Not(qt.IsNil))
c.Assert(nn[0].Prefs.LoggedOut, qt.IsFalse)
// Prefs after finishing the login, so LoginName updated.
c.Assert(nn[1].Prefs.Persist.LoginName, qt.Equals, "user2")
c.Assert(nn[1].Prefs.LoggedOut, qt.IsFalse)
c.Assert(nn[1].Prefs.WantRunning, qt.IsTrue)
c.Assert(ipn.Starting, qt.Equals, *nn[2].State)
}
@@ -773,6 +762,63 @@ func TestStateMachine(t *testing.T) {
c.Assert(ipn.Starting, qt.Equals, *nn[0].State)
}
// Disconnect.
t.Logf("\n\nStop")
notifies.expect(2)
b.EditPrefs(&ipn.MaskedPrefs{
WantRunningSet: true,
Prefs: ipn.Prefs{WantRunning: false},
})
{
nn := notifies.drain(2)
c.Assert([]string{"unpause"}, qt.DeepEquals, cc.getCalls())
// BUG: I would expect Prefs to change first, and state after.
c.Assert(nn[0].State, qt.Not(qt.IsNil))
c.Assert(nn[1].Prefs, qt.Not(qt.IsNil))
c.Assert(ipn.Stopped, qt.Equals, *nn[0].State)
}
// We want to try logging in as a different user, while Stopped.
// First, start the login process (without logging out first).
t.Logf("\n\nLoginDifferent")
notifies.expect(2)
b.StartLoginInteractive()
url3 := "http://localhost:1/3"
cc.send(nil, url3, false, nil)
{
nn := notifies.drain(2)
// It might seem like WantRunning should switch to true here,
// but that would be risky since we already have a valid
// user account. It might try to reconnect to the old account
// before the new one is ready. So no change yet.
c.Assert([]string{"Login", "unpause"}, qt.DeepEquals, cc.getCalls())
c.Assert(nn[0].BrowseToURL, qt.Not(qt.IsNil))
c.Assert(nn[1].State, qt.Not(qt.IsNil))
c.Assert(*nn[0].BrowseToURL, qt.Equals, url3)
c.Assert(ipn.NeedsLogin, qt.Equals, *nn[1].State)
}
// Now, let's say the interactive login completed, using a different
// user account than before.
t.Logf("\n\nLoginDifferent URL visited")
notifies.expect(3)
cc.persist.LoginName = "user3"
cc.send(nil, "", true, &netmap.NetworkMap{
MachineStatus: tailcfg.MachineAuthorized,
})
{
nn := notifies.drain(3)
c.Assert([]string{"unpause"}, qt.DeepEquals, cc.getCalls())
c.Assert(nn[0].LoginFinished, qt.Not(qt.IsNil))
c.Assert(nn[1].Prefs, qt.Not(qt.IsNil))
c.Assert(nn[2].State, qt.Not(qt.IsNil))
// Prefs after finishing the login, so LoginName updated.
c.Assert(nn[1].Prefs.Persist.LoginName, qt.Equals, "user3")
c.Assert(nn[1].Prefs.LoggedOut, qt.IsFalse)
c.Assert(nn[1].Prefs.WantRunning, qt.IsTrue)
c.Assert(ipn.Starting, qt.Equals, *nn[2].State)
}
// The last test case is the most common one: restarting when both
// logged in and WantRunning.
t.Logf("\n\nStart5")
@@ -793,17 +839,18 @@ func TestStateMachine(t *testing.T) {
// Control server accepts our valid key from before.
t.Logf("\n\nLoginFinished5")
notifies.expect(2)
notifies.expect(1)
cc.setAuthBlocked(false)
cc.send(nil, "", true, &netmap.NetworkMap{
MachineStatus: tailcfg.MachineAuthorized,
})
{
nn := notifies.drain(2)
nn := notifies.drain(1)
c.Assert([]string{"unpause"}, qt.DeepEquals, cc.getCalls())
c.Assert(nn[0].LoginFinished, qt.Not(qt.IsNil))
c.Assert(nn[1].State, qt.Not(qt.IsNil))
c.Assert(ipn.Starting, qt.Equals, *nn[1].State)
// NOTE: No LoginFinished message since no interactive
// login was needed.
c.Assert(nn[0].State, qt.Not(qt.IsNil))
c.Assert(ipn.Starting, qt.Equals, *nn[0].State)
// NOTE: No prefs change this time. WantRunning stays true.
// We were in Starting in the first place, so that doesn't
// change either.

View File

@@ -10,6 +10,8 @@ import (
crand "crypto/rand"
"crypto/tls"
"encoding/json"
"errors"
"flag"
"fmt"
"io"
"io/ioutil"
@@ -21,6 +23,7 @@ import (
"os/exec"
"path"
"path/filepath"
"regexp"
"runtime"
"strings"
"sync"
@@ -41,8 +44,11 @@ import (
"tailscale.com/types/key"
"tailscale.com/types/logger"
"tailscale.com/types/nettype"
"tailscale.com/version"
)
var verbose = flag.Bool("verbose", false, "verbose debug logs")
var mainError atomic.Value // of error
func TestMain(m *testing.M) {
@@ -57,11 +63,8 @@ func TestMain(m *testing.M) {
os.Exit(0)
}
func TestIntegration(t *testing.T) {
if runtime.GOOS == "windows" {
t.Skip("not tested/working on Windows yet")
}
func TestOneNodeUp_NoAuth(t *testing.T) {
t.Parallel()
bins := buildTestBinaries(t)
env := newTestEnv(t, bins)
@@ -69,8 +72,8 @@ func TestIntegration(t *testing.T) {
n1 := newTestNode(t, env)
dcmd := n1.StartDaemon(t)
defer dcmd.Process.Kill()
d1 := n1.StartDaemon(t)
defer d1.Kill()
n1.AwaitListening(t)
@@ -87,44 +90,110 @@ func TestIntegration(t *testing.T) {
t.Error(err)
}
t.Logf("Running up --login-server=%s ...", env.ControlServer.URL)
if err := n1.Tailscale("up", "--login-server="+env.ControlServer.URL).Run(); err != nil {
t.Fatalf("up: %v", err)
}
n1.MustUp()
if d, _ := time.ParseDuration(os.Getenv("TS_POST_UP_SLEEP")); d > 0 {
t.Logf("Sleeping for %v to give 'up' time to misbehave (https://github.com/tailscale/tailscale/issues/1840) ...", d)
time.Sleep(d)
}
var ip string
if err := tstest.WaitFor(20*time.Second, func() error {
out, err := n1.Tailscale("ip").Output()
if err != nil {
return err
t.Logf("Got IP: %v", n1.AwaitIP(t))
n1.AwaitRunning(t)
d1.MustCleanShutdown(t)
t.Logf("number of HTTP logcatcher requests: %v", env.LogCatcher.numRequests())
}
func TestOneNodeUp_Auth(t *testing.T) {
t.Parallel()
bins := buildTestBinaries(t)
env := newTestEnv(t, bins)
defer env.Close()
env.Control.RequireAuth = true
n1 := newTestNode(t, env)
d1 := n1.StartDaemon(t)
defer d1.Kill()
n1.AwaitListening(t)
st := n1.MustStatus(t)
t.Logf("Status: %s", st.BackendState)
t.Logf("Running up --login-server=%s ...", env.ControlServer.URL)
cmd := n1.Tailscale("up", "--login-server="+env.ControlServer.URL)
var authCountAtomic int32
cmd.Stdout = &authURLParserWriter{fn: func(urlStr string) error {
if env.Control.CompleteAuth(urlStr) {
atomic.AddInt32(&authCountAtomic, 1)
t.Logf("completed auth path %s", urlStr)
return nil
}
err := fmt.Errorf("Failed to complete auth path to %q", urlStr)
t.Log(err)
return err
}}
cmd.Stderr = cmd.Stdout
if err := cmd.Run(); err != nil {
t.Fatalf("up: %v", err)
}
t.Logf("Got IP: %v", n1.AwaitIP(t))
n1.AwaitRunning(t)
if n := atomic.LoadInt32(&authCountAtomic); n != 1 {
t.Errorf("Auth URLs completed = %d; want 1", n)
}
d1.MustCleanShutdown(t)
}
func TestTwoNodes(t *testing.T) {
t.Parallel()
bins := buildTestBinaries(t)
env := newTestEnv(t, bins)
defer env.Close()
// Create two nodes:
n1 := newTestNode(t, env)
d1 := n1.StartDaemon(t)
defer d1.Kill()
n2 := newTestNode(t, env)
d2 := n2.StartDaemon(t)
defer d2.Kill()
n1.AwaitListening(t)
n2.AwaitListening(t)
n1.MustUp()
n2.MustUp()
n1.AwaitRunning(t)
n2.AwaitRunning(t)
if err := tstest.WaitFor(2*time.Second, func() error {
st := n1.MustStatus(t)
if len(st.Peer) == 0 {
return errors.New("no peers")
}
if len(st.Peer) > 1 {
return fmt.Errorf("got %d peers; want 1", len(st.Peer))
}
peer := st.Peer[st.Peers()[0]]
if peer.ID == st.Self.ID {
return errors.New("peer is self")
}
ip = string(out)
return nil
}); err != nil {
t.Error(err)
}
t.Logf("Got IP: %v", ip)
dcmd.Process.Signal(os.Interrupt)
ps, err := dcmd.Process.Wait()
if err != nil {
t.Fatalf("tailscaled Wait: %v", err)
}
if ps.ExitCode() != 0 {
t.Errorf("tailscaled ExitCode = %d; want 0", ps.ExitCode())
}
t.Logf("number of HTTP logcatcher requests: %v", env.LogCatcher.numRequests())
if err := env.TrafficTrap.Err(); err != nil {
t.Errorf("traffic trap: %v", err)
t.Logf("logs: %s", env.LogCatcher.logsString())
}
d1.MustCleanShutdown(t)
d2.MustCleanShutdown(t)
}
// testBinaries are the paths to a tailscaled and tailscale binary.
@@ -139,16 +208,18 @@ type testBinaries struct {
// if they fail to compile.
func buildTestBinaries(t testing.TB) *testBinaries {
td := t.TempDir()
build(t, td, "tailscale.com/cmd/tailscaled", "tailscale.com/cmd/tailscale")
return &testBinaries{
dir: td,
daemon: build(t, td, "tailscale.com/cmd/tailscaled"),
cli: build(t, td, "tailscale.com/cmd/tailscale"),
daemon: filepath.Join(td, "tailscaled"+exe()),
cli: filepath.Join(td, "tailscale"+exe()),
}
}
// testEnv contains the test environment (set of servers) used by one
// or more nodes.
type testEnv struct {
t testing.TB
Binaries *testBinaries
LogCatcher *logCatcher
@@ -168,6 +239,9 @@ type testEnv struct {
//
// Call Close to shut everything down.
func newTestEnv(t testing.TB, bins *testBinaries) *testEnv {
if runtime.GOOS == "windows" {
t.Skip("not tested/working on Windows yet")
}
derpMap, derpShutdown := runDERPAndStun(t, logger.Discard)
logc := new(logCatcher)
control := &testcontrol.Server{
@@ -175,6 +249,7 @@ func newTestEnv(t testing.TB, bins *testBinaries) *testEnv {
}
trafficTrap := new(trafficTrap)
e := &testEnv{
t: t,
Binaries: bins,
LogCatcher: logc,
LogCatcherServer: httptest.NewServer(logc),
@@ -184,10 +259,16 @@ func newTestEnv(t testing.TB, bins *testBinaries) *testEnv {
TrafficTrapServer: httptest.NewServer(trafficTrap),
derpShutdown: derpShutdown,
}
e.Control.BaseURL = e.ControlServer.URL
return e
}
func (e *testEnv) Close() error {
if err := e.TrafficTrap.Err(); err != nil {
e.t.Errorf("traffic trap: %v", err)
e.t.Logf("logs: %s", e.LogCatcher.logsString())
}
e.LogCatcherServer.Close()
e.TrafficTrapServer.Close()
e.ControlServer.Close()
@@ -218,9 +299,28 @@ func newTestNode(t *testing.T, env *testEnv) *testNode {
}
}
type Daemon struct {
Process *os.Process
}
func (d *Daemon) Kill() {
d.Process.Kill()
}
func (d *Daemon) MustCleanShutdown(t testing.TB) {
d.Process.Signal(os.Interrupt)
ps, err := d.Process.Wait()
if err != nil {
t.Fatalf("tailscaled Wait: %v", err)
}
if ps.ExitCode() != 0 {
t.Errorf("tailscaled ExitCode = %d; want 0", ps.ExitCode())
}
}
// StartDaemon starts the node's tailscaled, failing if it fails to
// start.
func (n *testNode) StartDaemon(t testing.TB) *exec.Cmd {
func (n *testNode) StartDaemon(t testing.TB) *Daemon {
cmd := exec.Command(n.env.Binaries.daemon,
"--tun=userspace-networking",
"--state="+n.stateFile,
@@ -234,7 +334,17 @@ func (n *testNode) StartDaemon(t testing.TB) *exec.Cmd {
if err := cmd.Start(); err != nil {
t.Fatalf("starting tailscaled: %v", err)
}
return cmd
return &Daemon{
Process: cmd.Process,
}
}
func (n *testNode) MustUp() {
t := n.env.t
t.Logf("Running up --login-server=%s ...", n.env.ControlServer.URL)
if err := n.Tailscale("up", "--login-server="+n.env.ControlServer.URL).Run(); err != nil {
t.Fatalf("up: %v", err)
}
}
// AwaitListening waits for the tailscaled to be serving local clients
@@ -252,6 +362,40 @@ func (n *testNode) AwaitListening(t testing.TB) {
}
}
func (n *testNode) AwaitIP(t testing.TB) (ips string) {
t.Helper()
if err := tstest.WaitFor(20*time.Second, func() error {
out, err := n.Tailscale("ip").Output()
if err != nil {
return err
}
ips = string(out)
return nil
}); err != nil {
t.Fatalf("awaiting an IP address: %v", err)
}
if ips == "" {
t.Fatalf("returned IP address was blank")
}
return ips
}
func (n *testNode) AwaitRunning(t testing.TB) {
t.Helper()
if err := tstest.WaitFor(20*time.Second, func() error {
st, err := n.Status()
if err != nil {
return err
}
if st.BackendState != "Running" {
return fmt.Errorf("in state %q", st.BackendState)
}
return nil
}); err != nil {
t.Fatalf("failure/timeout waiting for transition to Running status: %v", err)
}
}
// Tailscale returns a command that runs the tailscale CLI with the provided arguments.
// It does not start the process.
func (n *testNode) Tailscale(arg ...string) *exec.Cmd {
@@ -261,15 +405,23 @@ func (n *testNode) Tailscale(arg ...string) *exec.Cmd {
return cmd
}
func (n *testNode) MustStatus(tb testing.TB) *ipnstate.Status {
tb.Helper()
func (n *testNode) Status() (*ipnstate.Status, error) {
out, err := n.Tailscale("status", "--json").CombinedOutput()
if err != nil {
tb.Fatalf("getting status: %v, %s", err, out)
return nil, fmt.Errorf("running tailscale status: %v, %s", err, out)
}
st := new(ipnstate.Status)
if err := json.Unmarshal(out, st); err != nil {
tb.Fatalf("parsing status json: %v, from: %s", err, out)
return nil, fmt.Errorf("decoding tailscale status JSON: %w", err)
}
return st, nil
}
func (n *testNode) MustStatus(tb testing.TB) *ipnstate.Status {
tb.Helper()
st, err := n.Status()
if err != nil {
tb.Fatal(err)
}
return st
}
@@ -291,21 +443,44 @@ func findGo(t testing.TB) string {
} else if !fi.Mode().IsRegular() {
t.Fatalf("%v is unexpected %v", goBin, fi.Mode())
}
t.Logf("using go binary %v", goBin)
return goBin
}
func build(t testing.TB, outDir, target string) string {
exe := ""
if runtime.GOOS == "windows" {
exe = ".exe"
// buildMu limits our use of "go build" to one at a time, so we don't
// fight Go's built-in caching trying to do the same build concurrently.
var buildMu sync.Mutex
func build(t testing.TB, outDir string, targets ...string) {
buildMu.Lock()
defer buildMu.Unlock()
t0 := time.Now()
defer func() { t.Logf("built %s in %v", targets, time.Since(t0).Round(time.Millisecond)) }()
goBin := findGo(t)
cmd := exec.Command(goBin, "install")
if version.IsRace() {
cmd.Args = append(cmd.Args, "-race")
}
bin := filepath.Join(outDir, path.Base(target)) + exe
errOut, err := exec.Command(findGo(t), "build", "-o", bin, target).CombinedOutput()
if err != nil {
t.Fatalf("failed to build %v: %v, %s", target, err, errOut)
cmd.Args = append(cmd.Args, targets...)
cmd.Env = append(os.Environ(), "GOARCH="+runtime.GOARCH, "GOBIN="+outDir)
errOut, err := cmd.CombinedOutput()
if err == nil {
return
}
return bin
if strings.Contains(string(errOut), "when GOBIN is set") {
// Fallback slow path for cross-compiled binaries.
for _, target := range targets {
outFile := filepath.Join(outDir, path.Base(target)+exe())
cmd := exec.Command(goBin, "build", "-o", outFile, target)
cmd.Env = append(os.Environ(), "GOARCH="+runtime.GOARCH)
if errOut, err := cmd.CombinedOutput(); err != nil {
t.Fatalf("failed to build %v with %v: %v, %s", target, goBin, err, errOut)
}
}
return
}
t.Fatalf("failed to build %v with %v: %v, %s", targets, goBin, err, errOut)
}
// logCatcher is a minimal logcatcher for the logtail upload client.
@@ -378,6 +553,9 @@ func (lc *logCatcher) ServeHTTP(w http.ResponseWriter, r *http.Request) {
} else {
for _, ent := range jreq {
fmt.Fprintf(&lc.buf, "%s\n", strings.TrimSpace(ent.Text))
if *verbose {
fmt.Fprintf(os.Stderr, "%s\n", strings.TrimSpace(ent.Text))
}
}
}
w.WriteHeader(200) // must have no content, but not a 204
@@ -454,3 +632,23 @@ func runDERPAndStun(t testing.TB, logf logger.Logf) (derpMap *tailcfg.DERPMap, c
return m, cleanup
}
type authURLParserWriter struct {
buf bytes.Buffer
fn func(urlStr string) error
}
var authURLRx = regexp.MustCompile(`(https?://\S+/auth/\S+)`)
func (w *authURLParserWriter) Write(p []byte) (n int, err error) {
n, err = w.buf.Write(p)
m := authURLRx.FindSubmatch(w.buf.Bytes())
if m != nil {
urlStr := string(m[1])
w.buf.Reset() // so it's not matched again
if err := w.fn(urlStr); err != nil {
return 0, err
}
}
return n, err
}

View File

@@ -17,6 +17,8 @@ import (
"log"
"math/rand"
"net/http"
"net/url"
"sort"
"strings"
"sync"
"time"
@@ -34,19 +36,43 @@ import (
// Server is a control plane server. Its zero value is ready for use.
// Everything is stored in-memory in one tailnet.
type Server struct {
Logf logger.Logf // nil means to use the log package
DERPMap *tailcfg.DERPMap // nil means to use prod DERP map
Logf logger.Logf // nil means to use the log package
DERPMap *tailcfg.DERPMap // nil means to use prod DERP map
RequireAuth bool
BaseURL string // must be set to e.g. "http://127.0.0.1:1234" with no trailing URL
Verbose bool
initMuxOnce sync.Once
mux *http.ServeMux
mu sync.Mutex
pubKey wgkey.Key
privKey wgkey.Private
nodes map[tailcfg.NodeKey]*tailcfg.Node
users map[tailcfg.NodeKey]*tailcfg.User
logins map[tailcfg.NodeKey]*tailcfg.Login
updates map[tailcfg.NodeID]chan updateType
mu sync.Mutex
pubKey wgkey.Key
privKey wgkey.Private
nodes map[tailcfg.NodeKey]*tailcfg.Node
users map[tailcfg.NodeKey]*tailcfg.User
logins map[tailcfg.NodeKey]*tailcfg.Login
updates map[tailcfg.NodeID]chan updateType
authPath map[string]*AuthPath
nodeKeyAuthed map[tailcfg.NodeKey]bool // key => true once authenticated
}
type AuthPath struct {
nodeKey tailcfg.NodeKey
closeOnce sync.Once
ch chan struct{}
success bool
}
func (ap *AuthPath) completeSuccessfully() {
ap.success = true
close(ap.ch)
}
// CompleteSuccessfully completes the login path successfully, as if
// the user did the whole auth dance.
func (ap *AuthPath) CompleteSuccessfully() {
ap.closeOnce.Do(ap.completeSuccessfully)
}
func (s *Server) logf(format string, a ...interface{}) {
@@ -142,6 +168,18 @@ func (s *Server) Node(nodeKey tailcfg.NodeKey) *tailcfg.Node {
return s.nodes[nodeKey].Clone()
}
func (s *Server) AllNodes() (nodes []*tailcfg.Node) {
s.mu.Lock()
defer s.mu.Unlock()
for _, n := range s.nodes {
nodes = append(nodes, n.Clone())
}
sort.Slice(nodes, func(i, j int) bool {
return nodes[i].StableID < nodes[j].StableID
})
return nodes
}
func (s *Server) getUser(nodeKey tailcfg.NodeKey) (*tailcfg.User, *tailcfg.Login) {
s.mu.Lock()
defer s.mu.Unlock()
@@ -178,6 +216,56 @@ func (s *Server) getUser(nodeKey tailcfg.NodeKey) (*tailcfg.User, *tailcfg.Login
return user, login
}
// authPathDone returns a close-only struct that's closed when the
// authPath ("/auth/XXXXXX") has authenticated.
func (s *Server) authPathDone(authPath string) <-chan struct{} {
s.mu.Lock()
defer s.mu.Unlock()
if a, ok := s.authPath[authPath]; ok {
return a.ch
}
return nil
}
func (s *Server) addAuthPath(authPath string, nodeKey tailcfg.NodeKey) {
s.mu.Lock()
defer s.mu.Unlock()
if s.authPath == nil {
s.authPath = map[string]*AuthPath{}
}
s.authPath[authPath] = &AuthPath{
ch: make(chan struct{}),
nodeKey: nodeKey,
}
}
// CompleteAuth marks the provided path or URL (containing
// "/auth/...") as successfully authenticated, unblocking any
// requests blocked on that in serveRegister.
func (s *Server) CompleteAuth(authPathOrURL string) bool {
i := strings.Index(authPathOrURL, "/auth/")
if i == -1 {
return false
}
authPath := authPathOrURL[i:]
s.mu.Lock()
defer s.mu.Unlock()
ap, ok := s.authPath[authPath]
if !ok {
return false
}
if ap.nodeKey.IsZero() {
panic("zero AuthPath.NodeKey")
}
if s.nodeKeyAuthed == nil {
s.nodeKeyAuthed = map[tailcfg.NodeKey]bool{}
}
s.nodeKeyAuthed[ap.nodeKey] = true
ap.CompleteSuccessfully()
return true
}
func (s *Server) serveRegister(w http.ResponseWriter, r *http.Request, mkey tailcfg.MachineKey) {
var req tailcfg.RegisterRequest
if err := s.decode(mkey, r.Body, &req); err != nil {
@@ -189,28 +277,65 @@ func (s *Server) serveRegister(w http.ResponseWriter, r *http.Request, mkey tail
if req.NodeKey.IsZero() {
panic("serveRegister: request has zero node key")
}
if s.Verbose {
j, _ := json.MarshalIndent(req, "", "\t")
log.Printf("Got %T: %s", req, j)
}
// If this is a followup request, wait until interactive followup URL visit complete.
if req.Followup != "" {
followupURL, err := url.Parse(req.Followup)
if err != nil {
panic(err)
}
doneCh := s.authPathDone(followupURL.Path)
select {
case <-r.Context().Done():
return
case <-doneCh:
}
// TODO(bradfitz): support a side test API to mark an
// auth as failued so we can send an error response in
// some follow-ups? For now all are successes.
}
user, login := s.getUser(req.NodeKey)
s.mu.Lock()
if s.nodes == nil {
s.nodes = map[tailcfg.NodeKey]*tailcfg.Node{}
}
machineAuthorized := true // TODO: add Server.RequireMachineAuth
s.nodes[req.NodeKey] = &tailcfg.Node{
ID: tailcfg.NodeID(user.ID),
StableID: tailcfg.StableNodeID(fmt.Sprintf("TESTCTRL%08x", int(user.ID))),
User: user.ID,
Machine: mkey,
Key: req.NodeKey,
MachineAuthorized: true,
MachineAuthorized: machineAuthorized,
}
requireAuth := s.RequireAuth
if requireAuth && s.nodeKeyAuthed[req.NodeKey] {
requireAuth = false
}
s.mu.Unlock()
authURL := ""
if requireAuth {
randHex := make([]byte, 10)
crand.Read(randHex)
authPath := fmt.Sprintf("/auth/%x", randHex)
s.addAuthPath(authPath, req.NodeKey)
authURL = s.BaseURL + authPath
}
res, err := s.encode(mkey, false, tailcfg.RegisterResponse{
User: *user,
Login: *login,
NodeKeyExpired: false,
MachineAuthorized: true,
AuthURL: "", // all good; TODO(bradfitz): add ways to not start all good.
MachineAuthorized: machineAuthorized,
AuthURL: authURL,
})
if err != nil {
go panic(fmt.Sprintf("serveRegister: encode: %v", err))
@@ -254,6 +379,21 @@ func sendUpdate(dst chan<- updateType, updateType updateType) {
}
}
func (s *Server) UpdateNode(n *tailcfg.Node) (peersToUpdate []tailcfg.NodeID) {
s.mu.Lock()
defer s.mu.Unlock()
if n.Key.IsZero() {
panic("zero nodekey")
}
s.nodes[n.Key] = n.Clone()
for _, n2 := range s.nodes {
if n.ID != n2.ID {
peersToUpdate = append(peersToUpdate, n2.ID)
}
}
return peersToUpdate
}
func (s *Server) serveMap(w http.ResponseWriter, r *http.Request, mkey tailcfg.MachineKey) {
ctx := r.Context()
@@ -279,10 +419,8 @@ func (s *Server) serveMap(w http.ResponseWriter, r *http.Request, mkey tailcfg.M
if !req.ReadOnly {
endpoints := filterInvalidIPv6Endpoints(req.Endpoints)
node.Endpoints = endpoints
// TODO: more
// TODO: register node,
//s.UpdateEndpoint(mkey, req.NodeKey,
// XXX
node.DiscoKey = req.DiscoKey
peersToUpdate = s.UpdateNode(node)
}
nodeID := node.ID
@@ -389,6 +527,12 @@ func (s *Server) MapResponse(req *tailcfg.MapRequest) (res *tailcfg.MapResponse,
CollectServices: "true",
PacketFilter: tailcfg.FilterAllowAll,
}
for _, p := range s.AllNodes() {
if p.StableID != node.StableID {
res.Peers = append(res.Peers, p)
}
}
res.Node.Addresses = []netaddr.IPPrefix{
netaddr.MustParseIPPrefix(fmt.Sprintf("100.64.%d.%d/32", uint8(node.ID>>8), uint8(node.ID))),
}

11
version/race.go Normal file
View File

@@ -0,0 +1,11 @@
// Copyright (c) 2021 Tailscale Inc & AUTHORS All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build race
package version
// IsRace reports whether the current binary was built with the Go
// race detector enabled.
func IsRace() bool { return true }

11
version/race_off.go Normal file
View File

@@ -0,0 +1,11 @@
// Copyright (c) 2021 Tailscale Inc & AUTHORS All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !race
package version
// IsRace reports whether the current binary was built with the Go
// race detector enabled.
func IsRace() bool { return false }

View File

@@ -12,6 +12,7 @@ import (
"io"
"log"
"net"
"os"
"strconv"
"strings"
"sync"
@@ -452,6 +453,9 @@ func (ns *Impl) acceptTCP(r *tcp.ForwarderRequest) {
func (ns *Impl) forwardTCP(client *gonet.TCPConn, wq *waiter.Queue, dialAddr tcpip.Address, dialPort uint16) {
defer client.Close()
dialAddrStr := net.JoinHostPort(dialAddr.String(), strconv.Itoa(int(dialPort)))
if alt := os.Getenv(fmt.Sprintf("TAILSCALE_INCOMING_REMAP_%s_%d", dialAddr, dialPort)); alt != "" {
dialAddrStr = alt
}
ns.logf("[v2] netstack: forwarding incoming connection to %s", dialAddrStr)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()