Compare commits

...

2 Commits

Author SHA1 Message Date
Irbe Krumina
73289838e4 ttest/integration: adds an integration test for logout
Signed-off-by: Irbe Krumina <irbe@tailscale.com>
2023-08-01 20:36:01 +01:00
Irbe Krumina
e43a6cbc29 control/controlclient,ipn/ipn: ensures that logout succeeds when no nodekey is present
Ensures that if a user runs the logout command when not logged in, the command
does not fail.
Ensures that if logout is ran whilst the backend is in a non-logged-in state
_and_ does not have a node key, the backend and the control client will be brought
to a logged out state
(for example, a continuous loop over a failed auth attempt will
now be cancelled when logout command is ran)

Updates #3833

Signed-off-by: Irbe Krumina <irbe@tailscale.com>
2023-08-01 20:36:01 +01:00
4 changed files with 73 additions and 3 deletions

View File

@@ -237,6 +237,9 @@ func (c *Auto) sendNewMapRequest() {
}()
}
// cancelAuth ensures that the auth routine, that might be currently sleeping,
// wakes up and checks for a new login/logout request that needs to be
// fulfilled.
func (c *Auto) cancelAuth() {
c.mu.Lock()
if c.authCancel != nil {

View File

@@ -446,6 +446,17 @@ func (c *Direct) doLogin(ctx context.Context, opt loginOpt) (mustRegen bool, new
regen := opt.Regen
if opt.Logout {
c.logf("logging out...")
// check if logout from control server is a no-op early so that we don't
// call control server unnecessarily
if persist.PrivateNodeKey.IsZero() {
// This must is an attempt to log out a node that is not currently
// logged in. We are not erroring out here to ensure that a user who
// hits logout twice will not be faced with errors as well as so
// that logout command can be used to cancel failed login attempts
// (that would otherwise be retried in a loop)
c.logf("no nodeky found, skipping logging out from control server")
return false, "", nil, nil
}
} else {
if expired {
c.logf("Old key expired -> regen=true")
@@ -504,9 +515,6 @@ func (c *Direct) doLogin(ctx context.Context, opt loginOpt) (mustRegen bool, new
nlPub := persist.NetworkLockKey.Public()
if tryingNewKey.IsZero() {
if opt.Logout {
return false, "", nil, errors.New("no nodekey to log out")
}
log.Fatalf("tryingNewKey is empty, give up")
}

View File

@@ -3881,6 +3881,14 @@ func (b *LocalBackend) Logout() {
b.logout(context.Background(), false)
}
// Logout tells the controlclient that we want to log out, and transitions the
// local engine to the logged-out state. It also waits for one auth routine
// attempt to log out from control plane.
// After local engine has been transitioned and control client successfully logged out (auth routine):
// - local backend state should be NeedsLogin
// - auth routine should be sleeping and waiting for a login/logout attempt
// - client login goal should be set to nil
// - node key should be removed from control server and local state
func (b *LocalBackend) LogoutSync(ctx context.Context) error {
return b.logout(ctx, true)
}

View File

@@ -670,6 +670,37 @@ func TestLogoutRemovesAllPeers(t *testing.T) {
wantNode0PeerCount(expectedPeers) // all existing peers and the new node
}
func TestLogout(t *testing.T) {
t.Parallel()
env := newTestEnv(t)
n := newTestNode(t, env)
d := n.StartDaemon()
n.MustLogin()
n.AwaitRunning()
// 1. a user hits logout twice - should not error
n.MustLogOut()
n.AwaitNeedsLogin()
n.MustLogOut() // a second logout should not error out
n.AwaitNeedsLogin()
// 2. a user tries to log out after failed login attempt- should not error
// and should bring client and backend to a logged out state
n.MustLogin()
n.AwaitRunning()
cmd := n.Tailscale("login", "--login-server=fakecontrol.io", "--timeout=2s")
if err := cmd.Run(); err == nil {
t.Fatalf("wanted login to fakecontrol.io to fail, but it did not: %v", cmd.Stdout)
}
n.AwaitNeedsLogin()
n.MustLogOut()
n.AwaitNeedsLogin()
// TODO (irbekrm): figure out some way how to test that local backend and
// client have actually logged out, maybe look at the statefile..
d.MustCleanShutdown(t)
}
// testEnv contains the test environment (set of servers) used by one
// or more nodes.
type testEnv struct {
@@ -959,6 +990,22 @@ func (n *testNode) MustUp(extraArgs ...string) {
}
}
func (n *testNode) MustLogin(extraArgs ...string) {
t := n.env.t
args := []string{
"login",
"--login-server=" + n.env.ControlServer.URL,
}
args = append(args, extraArgs...)
cmd := n.Tailscale(args...)
t.Logf("Running %v ...", cmd)
cmd.Stdout = nil // in case --verbose-tailscale was set
cmd.Stderr = nil // in case --verbose-tailscale was set
if b, err := cmd.CombinedOutput(); err != nil {
t.Fatalf("up: %v, %v", string(b), err)
}
}
func (n *testNode) MustDown() {
t := n.env.t
t.Logf("Running down ...")
@@ -1058,6 +1105,10 @@ func (n *testNode) AwaitRunning() {
}
}
// TODO (irbekrm): for this and other funcs that wait for state we should
// actually test that the backend remains in the desired state for some period
// of time (i.e 5s) - some operations involve a sequence of state changes and we
// really want to check the final state, not to catch some intermediate one
// AwaitNeedsLogin waits for n to reach the IPN state "NeedsLogin".
func (n *testNode) AwaitNeedsLogin() {
t := n.env.t