Compare commits

...

4 Commits

Author SHA1 Message Date
Josh Bleecher Snyder
2e8d9a47c1 convert more things to use coarsetime 2021-07-19 15:32:57 -07:00
Josh Bleecher Snyder
b9351f1dcf switch to monotonicCoarse in magicsock
this is all fine...except for the part where we are measuring latency.
2021-07-19 15:32:57 -07:00
Josh Bleecher Snyder
068ff32ccd temporarily disable filter logging
to get rid of time.Now calls as I experiment
2021-07-19 15:32:56 -07:00
Josh Bleecher Snyder
bcf23313b7 tstime: add MonotonicCoarse
MonotonicCoarse provides a coarse monotonic time.
On some platforms, it is implemented in assembly,
which lets us do much less work than time.Now,
which gets a high precision monotonic time and
a high precision wall time.

The assembly code is tied to a particular Go release
because it reaches into the Go internals
in order to switch to the system stack for the vdso call.

On my darwin/arm64 machine, there is no perf difference.
On my linux/amd64 machine, MonotonicCoarse is 5x faster (50ns -> 10ns).
On my linux/arm64 VM, MonotonicCoarse is 16x faster (64ns -> 4ns).

We could also use this in the rate limiter and magicsock,
which are two other uses of time.Now that show up in the CPU pprof
when doing throughput benchmarking.

Signed-off-by: Josh Bleecher Snyder <josh@tailscale.com>
2021-07-19 15:32:56 -07:00
10 changed files with 355 additions and 59 deletions

View File

@@ -20,6 +20,7 @@ import (
"inet.af/netaddr"
"tailscale.com/tailcfg"
"tailscale.com/tstime"
"tailscale.com/types/key"
"tailscale.com/util/dnsname"
)
@@ -90,7 +91,7 @@ type PeerStatus struct {
RxBytes int64
TxBytes int64
Created time.Time // time registered with tailcontrol
LastWrite time.Time // time last packet sent
LastWrite int64 // time last packet sent
LastSeen time.Time // last seen to tailcontrol
LastHandshake time.Time // with local wireguard
KeepAlive bool
@@ -256,7 +257,7 @@ func (sb *StatusBuilder) AddPeer(peer key.Public, st *PeerStatus) {
if v := st.LastSeen; !v.IsZero() {
e.LastSeen = v
}
if v := st.LastWrite; !v.IsZero() {
if v := st.LastWrite; v != 0 {
e.LastWrite = v
}
if st.InNetworkMap {
@@ -320,7 +321,7 @@ table tbody tr:nth-child(even) td { background-color: #f5f5f5; }
f("<tr><th>Peer</th><th>OS</th><th>Node</th><th>Owner</th><th>Rx</th><th>Tx</th><th>Activity</th><th>Connection</th></tr>\n")
f("</thead>\n<tbody>\n")
now := time.Now()
now := tstime.MonotonicCoarse()
var peers []*PeerStatus
for _, peer := range st.Peers() {
@@ -334,10 +335,10 @@ table tbody tr:nth-child(even) td { background-color: #f5f5f5; }
for _, ps := range peers {
var actAgo string
if !ps.LastWrite.IsZero() {
ago := now.Sub(ps.LastWrite)
actAgo = ago.Round(time.Second).String() + " ago"
if ago < 5*time.Minute {
if ps.LastWrite != 0 {
ago := now - ps.LastWrite
actAgo = fmt.Sprintf("%ds ago", ago)
if ago < 5*60 /*time.Minute */ {
actAgo = "<b>" + actAgo + "</b>"
}
}
@@ -378,7 +379,7 @@ table tbody tr:nth-child(even) td { background-color: #f5f5f5; }
f("<td>")
// TODO: let server report this active bool instead
active := !ps.LastWrite.IsZero() && time.Since(ps.LastWrite) < 2*time.Minute
active := ps.LastWrite != 0 && tstime.MonotonicCoarse()-ps.LastWrite < 120 /*2*time.Minute*/
if active {
if ps.Relay != "" && ps.CurAddr == "" {
f("relay <b>%s</b>", html.EscapeString(ps.Relay))

View File

@@ -18,6 +18,7 @@ import (
"golang.zx2c4.com/wireguard/tun"
"inet.af/netaddr"
"tailscale.com/net/packet"
"tailscale.com/tstime"
"tailscale.com/types/ipproto"
"tailscale.com/types/logger"
"tailscale.com/wgengine/filter"
@@ -363,7 +364,7 @@ func (t *Wrapper) filterOut(p *packet.Parsed) filter.Response {
// noteActivity records that there was a read or write at the current time.
func (t *Wrapper) noteActivity() {
atomic.StoreInt64(&t.lastActivityAtomic, time.Now().Unix())
atomic.StoreInt64(&t.lastActivityAtomic, tstime.MonotonicCoarse())
}
// IdleDuration reports how long it's been since the last read or write to this device.
@@ -371,8 +372,8 @@ func (t *Wrapper) noteActivity() {
// Its value is only accurate to roughly second granularity.
// If there's never been activity, the duration is since 1970.
func (t *Wrapper) IdleDuration() time.Duration {
sec := atomic.LoadInt64(&t.lastActivityAtomic)
return time.Since(time.Unix(sec, 0))
elapsed := tstime.MonotonicCoarse() - atomic.LoadInt64(&t.lastActivityAtomic)
return time.Duration(elapsed) * time.Second
}
func (t *Wrapper) Read(buf []byte, offset int) (int, error) {

11
tstime/monocoarse_asm.go Normal file
View File

@@ -0,0 +1,11 @@
// +build go1.16,!go1.17
// +build linux,amd64 linux,arm64
package tstime
// MonotonicCoarse returns the number of monotonic seconds elapsed
// since an unspecified starting point, at low precision.
// It is only meaningful when compared to the
// result of previous MonotonicCoarse calls.
// On some platforms, MonotonicCoarse is much faster than time.Now.
func MonotonicCoarse() int64

View File

@@ -0,0 +1,105 @@
// Copyright (c) 2021 Tailscale Inc & AUTHORS All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Adapted from code in the Go runtime package at Go 1.16.6:
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build go1.16
// +build !go1.17
#include "go_asm.h"
#include "textflag.h"
#define get_tls(r) MOVQ TLS, r
#define g(r) 0(r)(TLS*1)
#define SYS_clock_gettime 228
// Hard-coded offsets into runtime structs.
// Generated by adding the following code
// to package runtime and then executing
// and empty func main:
//
// func init() {
// println("#define g_m", unsafe.Offsetof(g{}.m))
// println("#define g_sched", unsafe.Offsetof(g{}.sched))
// println("#define gobuf_sp", unsafe.Offsetof(g{}.sched.sp))
// println("#define m_g0", unsafe.Offsetof(m{}.g0))
// println("#define m_curg", unsafe.Offsetof(m{}.curg))
// println("#define m_vdsoSP", unsafe.Offsetof(m{}.vdsoSP))
// println("#define m_vdsoPC", unsafe.Offsetof(m{}.vdsoPC))
// }
#define g_m 48
#define g_sched 56
#define gobuf_sp 0
#define m_g0 0
#define m_curg 192
#define m_vdsoSP 832
#define m_vdsoPC 840
#define CLOCK_MONOTONIC_COARSE 6
// func MonotonicCoarse() int64
TEXT ·MonotonicCoarse(SB),NOSPLIT,$16-8
// Switch to g0 stack.
MOVQ SP, R12 // Save old SP; R12 unchanged by C code.
get_tls(CX)
MOVQ g(CX), AX
MOVQ g_m(AX), BX // BX unchanged by C code.
// Set vdsoPC and vdsoSP for SIGPROF traceback.
// Save the old values on stack and restore them on exit,
// so this function is reentrant.
MOVQ m_vdsoPC(BX), CX
MOVQ m_vdsoSP(BX), DX
MOVQ CX, 0(SP)
MOVQ DX, 8(SP)
LEAQ ret+0(FP), DX
MOVQ -8(DX), CX
MOVQ CX, m_vdsoPC(BX)
MOVQ DX, m_vdsoSP(BX)
CMPQ AX, m_curg(BX) // Only switch if on curg.
JNE noswitch
MOVQ m_g0(BX), DX
MOVQ (g_sched+gobuf_sp)(DX), SP // Set SP to g0 stack
noswitch:
SUBQ $16, SP // Space for results
ANDQ $~15, SP // Align for C code
MOVL $CLOCK_MONOTONIC_COARSE, DI
LEAQ 0(SP), SI
MOVQ runtime·vdsoClockgettimeSym(SB), AX
CMPQ AX, $0
JEQ fallback
CALL AX
ret:
MOVQ 0(SP), AX // sec
MOVQ 8(SP), DX // nsec
MOVQ R12, SP // Restore real SP
// Restore vdsoPC, vdsoSP
// We don't worry about being signaled between the two stores.
// If we are not in a signal handler, we'll restore vdsoSP to 0,
// and no one will care about vdsoPC. If we are in a signal handler,
// we cannot receive another signal.
MOVQ 8(SP), CX
MOVQ CX, m_vdsoSP(BX)
MOVQ 0(SP), CX
MOVQ CX, m_vdsoPC(BX)
// sec is in AX; return it
MOVQ AX, ret+0(FP)
RET
fallback:
MOVQ $SYS_clock_gettime, AX
SYSCALL
JMP ret

View File

@@ -0,0 +1,132 @@
// Copyright (c) 2021 Tailscale Inc & AUTHORS All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Adapted from code in the Go runtime package at Go 1.16.6:
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build go1.16
// +build !go1.17
#include "go_asm.h"
#include "textflag.h"
#define SYS_clock_gettime 113
// Hard-coded offsets into runtime structs.
// Generated by adding the following code
// to package runtime and then executing
// and empty func main:
//
// func init() {
// println("#define g_m", unsafe.Offsetof(g{}.m))
// println("#define g_sched", unsafe.Offsetof(g{}.sched))
// println("#define gobuf_sp", unsafe.Offsetof(g{}.sched.sp))
// println("#define g_stack", unsafe.Offsetof(g{}.stack))
// println("#define stack_lo", unsafe.Offsetof(g{}.stack.lo))
// println("#define m_g0", unsafe.Offsetof(m{}.g0))
// println("#define m_curg", unsafe.Offsetof(m{}.curg))
// println("#define m_vdsoSP", unsafe.Offsetof(m{}.vdsoSP))
// println("#define m_vdsoPC", unsafe.Offsetof(m{}.vdsoPC))
// println("#define m_gsignal", unsafe.Offsetof(m{}.gsignal))
// }
#define g_m 48
#define g_sched 56
#define gobuf_sp 0
#define g_stack 0
#define stack_lo 0
#define m_g0 0
#define m_curg 192
#define m_vdsoSP 832
#define m_vdsoPC 840
#define m_gsignal 80
#define CLOCK_MONOTONIC_COARSE 6
// func MonotonicCoarse() int64
TEXT ·MonotonicCoarse(SB),NOSPLIT,$24-8
MOVD RSP, R20 // R20 is unchanged by C code
MOVD RSP, R1
MOVD g_m(g), R21 // R21 = m
// Set vdsoPC and vdsoSP for SIGPROF traceback.
// Save the old values on stack and restore them on exit,
// so this function is reentrant.
MOVD m_vdsoPC(R21), R2
MOVD m_vdsoSP(R21), R3
MOVD R2, 8(RSP)
MOVD R3, 16(RSP)
MOVD LR, m_vdsoPC(R21)
MOVD R20, m_vdsoSP(R21)
MOVD m_curg(R21), R0
CMP g, R0
BNE noswitch
MOVD m_g0(R21), R3
MOVD (g_sched+gobuf_sp)(R3), R1 // Set RSP to g0 stack
noswitch:
SUB $32, R1
BIC $15, R1
MOVD R1, RSP
MOVW $CLOCK_MONOTONIC_COARSE, R0
MOVD runtime·vdsoClockgettimeSym(SB), R2
CBZ R2, fallback
// Store g on gsignal's stack, so if we receive a signal
// during VDSO code we can find the g.
// If we don't have a signal stack, we won't receive signal,
// so don't bother saving g.
// When using cgo, we already saved g on TLS, also don't save
// g here.
// Also don't save g if we are already on the signal stack.
// We won't get a nested signal.
MOVBU runtime·iscgo(SB), R22
CBNZ R22, nosaveg
MOVD m_gsignal(R21), R22 // g.m.gsignal
CBZ R22, nosaveg
CMP g, R22
BEQ nosaveg
MOVD (g_stack+stack_lo)(R22), R22 // g.m.gsignal.stack.lo
MOVD g, (R22)
BL (R2)
MOVD ZR, (R22) // clear g slot, R22 is unchanged by C code
B finish
nosaveg:
BL (R2)
B finish
fallback:
MOVD $SYS_clock_gettime, R8
SVC
finish:
MOVD 0(RSP), R3 // sec
MOVD 8(RSP), R5 // nsec
MOVD R20, RSP // restore SP
// Restore vdsoPC, vdsoSP
// We don't worry about being signaled between the two stores.
// If we are not in a signal handler, we'll restore vdsoSP to 0,
// and no one will care about vdsoPC. If we are in a signal handler,
// we cannot receive another signal.
MOVD 16(RSP), R1
MOVD R1, m_vdsoSP(R21)
MOVD 8(RSP), R1
MOVD R1, m_vdsoPC(R21)
// sec is in R3; return it
MOVD R3, ret+0(FP)
RET

16
tstime/monocoarse_std.go Normal file
View File

@@ -0,0 +1,16 @@
// +build !go1.16 go1.17 !linux !amd64,!arm64
package tstime
import "time"
var referenceTime = time.Now()
// MonotonicCoarse returns the number of monotonic seconds elapsed
// since an unspecified starting point, at low precision.
// It is only meaningful when compared to the
// result of previous MonotonicCoarse calls.
// On some platforms, MonotonicCoarse is much faster than time.Now.
func MonotonicCoarse() int64 {
return int64(time.Since(referenceTime).Seconds())
}

29
tstime/monocoarse_test.go Normal file
View File

@@ -0,0 +1,29 @@
package tstime
import (
"testing"
"time"
)
func TestMonotonicCoarse(t *testing.T) {
t.Parallel()
start := MonotonicCoarse()
for n := 0; n < 30; n++ {
end := MonotonicCoarse()
if end == start {
time.Sleep(100 * time.Millisecond)
continue
}
if end-start != 1 {
t.Errorf("monotonic coarse time jumped: %v seconds", end-start)
}
return // success
}
t.Errorf("monotonic coarse time did not progress after 3s")
}
func BenchmarkMonotonicCoarse(b *testing.B) {
for i := 0; i < b.N; i++ {
MonotonicCoarse()
}
}

View File

@@ -232,6 +232,7 @@ func init() {
}
func (f *Filter) logRateLimit(runflags RunFlags, q *packet.Parsed, dir direction, r Response, why string) {
return
if !f.loggingAllowed(q) {
return
}

View File

@@ -353,7 +353,7 @@ type addrSet struct {
mu sync.Mutex // guards following fields
lastSend time.Time
lastSend int64
// roamAddr is non-nil if/when we receive a correctly signed
// WireGuard packet from an unexpected address. If so, we

View File

@@ -884,8 +884,8 @@ func (c *Conn) Ping(peer *tailcfg.Node, res *ipnstate.PingResult, cb func(*ipnst
}
// c.mu must be held
func (c *Conn) populateCLIPingResponseLocked(res *ipnstate.PingResult, latency time.Duration, ep netaddr.IPPort) {
res.LatencySeconds = latency.Seconds()
func (c *Conn) populateCLIPingResponseLocked(res *ipnstate.PingResult, latency int64, ep netaddr.IPPort) {
res.LatencySeconds = float64(latency)
if ep.IP() != derpMagicIPAddr {
res.Endpoint = ep.String()
return
@@ -3145,13 +3145,13 @@ type discoEndpoint struct {
mu sync.Mutex // Lock ordering: Conn.mu, then discoEndpoint.mu
heartBeatTimer *time.Timer // nil when idle
lastSend time.Time // last time there was outgoing packets sent to this peer (from wireguard-go)
lastFullPing time.Time // last time we pinged all endpoints
lastSend int64 // last time there was outgoing packets sent to this peer (from wireguard-go)
lastFullPing int64 // last time we pinged all endpoints
derpAddr netaddr.IPPort // fallback/bootstrap path, if non-zero (non-zero for well-behaved clients)
bestAddr addrLatency // best non-DERP path; zero if none
bestAddrAt time.Time // time best address re-confirmed
trustBestAddrUntil time.Time // time when bestAddr expires
bestAddrAt int64 // time best address re-confirmed
trustBestAddrUntil int64 // time when bestAddr expires
sentPing map[stun.TxID]sentPing
endpointState map[netaddr.IPPort]*endpointState
isCallMeMaybeEP map[netaddr.IPPort]bool
@@ -3169,11 +3169,11 @@ const (
// try to keep an established discoEndpoint peering alive.
// It's also the idle time at which we stop doing STUN queries to
// keep NAT mappings alive.
sessionActiveTimeout = 2 * time.Minute
sessionActiveTimeout = 120 // seconds 2 * time.Minute
// upgradeInterval is how often we try to upgrade to a better path
// even if we have some non-DERP route that works.
upgradeInterval = 1 * time.Minute
upgradeInterval = 60 // seconds
// heartbeatInterval is how often pings to the best UDP address
// are sent.
@@ -3183,7 +3183,7 @@ const (
// to an endpoint. (Except in the case of CallMeMaybe frames
// resetting the counter, as the first pings likely didn't through
// the firewall)
discoPingInterval = 5 * time.Second
discoPingInterval = 5 // seconds
// pingTimeoutDuration is how long we wait for a pong reply before
// assuming it's never coming.
@@ -3191,11 +3191,11 @@ const (
// trustUDPAddrDuration is how long we trust a UDP address as the exclusive
// path (without using DERP) without having heard a Pong reply.
trustUDPAddrDuration = 5 * time.Second
trustUDPAddrDuration = 5 // seconds * time.Second
// goodEnoughLatency is the latency at or under which we don't
// try to upgrade to a better path.
goodEnoughLatency = 5 * time.Millisecond
goodEnoughLatency = 1 // second 5 * time.Millisecond
// derpInactiveCleanupTime is how long a non-home DERP connection
// needs to be idle (last written to) before we close it.
@@ -3218,7 +3218,7 @@ type endpointState struct {
// all fields guarded by discoEndpoint.mu
// lastPing is the last (outgoing) ping time.
lastPing time.Time
lastPing int64
// lastGotPing, if non-zero, means that this was an endpoint
// that we learned about at runtime (from an incoming ping)
@@ -3265,15 +3265,15 @@ func (de *discoEndpoint) deleteEndpointLocked(ep netaddr.IPPort) {
const pongHistoryCount = 64
type pongReply struct {
latency time.Duration
pongAt time.Time // when we received the pong
latency int64 // seconds
pongAt int64 // when we received the pong
from netaddr.IPPort // the pong's src (usually same as endpoint map key)
pongSrc netaddr.IPPort // what they reported they heard
}
type sentPing struct {
to netaddr.IPPort
at time.Time
at int64
timer *time.Timer // timeout timer
purpose discoPingPurpose
}
@@ -3321,9 +3321,9 @@ func (de *discoEndpoint) DstToBytes() []byte { return packIPPort(de.fakeWGAddr)
// addr may be non-zero.
//
// de.mu must be held.
func (de *discoEndpoint) addrForSendLocked(now time.Time) (udpAddr, derpAddr netaddr.IPPort) {
func (de *discoEndpoint) addrForSendLocked(now int64) (udpAddr, derpAddr netaddr.IPPort) {
udpAddr = de.bestAddr.IPPort
if udpAddr.IsZero() || now.After(de.trustBestAddrUntil) {
if udpAddr.IsZero() || now > de.trustBestAddrUntil {
// We had a bestAddr but it expired so send both to it
// and DERP.
derpAddr = de.derpAddr
@@ -3339,18 +3339,18 @@ func (de *discoEndpoint) heartbeat() {
de.heartBeatTimer = nil
if de.lastSend.IsZero() {
if de.lastSend == 0 {
// Shouldn't happen.
return
}
if time.Since(de.lastSend) > sessionActiveTimeout {
if tstime.MonotonicCoarse()-de.lastSend > sessionActiveTimeout {
// Session's idle. Stop heartbeating.
de.c.logf("[v1] magicsock: disco: ending heartbeats for idle session to %v (%v)", de.publicKey.ShortString(), de.discoShort)
return
}
now := time.Now()
now := tstime.MonotonicCoarse()
udpAddr, _ := de.addrForSendLocked(now)
if !udpAddr.IsZero() {
// We have a preferred path. Ping that every 2 seconds.
@@ -3368,24 +3368,24 @@ func (de *discoEndpoint) heartbeat() {
// a better path.
//
// de.mu must be held.
func (de *discoEndpoint) wantFullPingLocked(now time.Time) bool {
if de.bestAddr.IsZero() || de.lastFullPing.IsZero() {
func (de *discoEndpoint) wantFullPingLocked(now int64) bool {
if de.bestAddr.IsZero() || de.lastFullPing == 0 {
return true
}
if now.After(de.trustBestAddrUntil) {
if now > de.trustBestAddrUntil {
return true
}
if de.bestAddr.latency <= goodEnoughLatency {
return false
}
if now.Sub(de.lastFullPing) >= upgradeInterval {
if now-de.lastFullPing >= upgradeInterval {
return true
}
return false
}
func (de *discoEndpoint) noteActiveLocked() {
de.lastSend = time.Now()
de.lastSend = tstime.MonotonicCoarse()
if de.heartBeatTimer == nil {
de.heartBeatTimer = time.AfterFunc(heartbeatInterval, de.heartbeat)
}
@@ -3399,12 +3399,12 @@ func (de *discoEndpoint) cliPing(res *ipnstate.PingResult, cb func(*ipnstate.Pin
de.pendingCLIPings = append(de.pendingCLIPings, pendingCLIPing{res, cb})
now := time.Now()
now := tstime.MonotonicCoarse()
udpAddr, derpAddr := de.addrForSendLocked(now)
if !derpAddr.IsZero() {
de.startPingLocked(derpAddr, now, pingCLI)
}
if !udpAddr.IsZero() && now.Before(de.trustBestAddrUntil) {
if !udpAddr.IsZero() && now < de.trustBestAddrUntil {
// Already have an active session, so just ping the address we're using.
// Otherwise "tailscale ping" results to a node on the local network
// can look like they're bouncing between, say 10.0.0.0/9 and the peer's
@@ -3419,11 +3419,11 @@ func (de *discoEndpoint) cliPing(res *ipnstate.PingResult, cb func(*ipnstate.Pin
}
func (de *discoEndpoint) send(b []byte) error {
now := time.Now()
now := tstime.MonotonicCoarse()
de.mu.Lock()
udpAddr, derpAddr := de.addrForSendLocked(now)
if udpAddr.IsZero() || now.After(de.trustBestAddrUntil) {
if udpAddr.IsZero() || now > de.trustBestAddrUntil {
de.sendPingsLocked(now, true)
}
de.noteActiveLocked()
@@ -3452,7 +3452,7 @@ func (de *discoEndpoint) pingTimeout(txid stun.TxID) {
if !ok {
return
}
if debugDisco || de.bestAddr.IsZero() || time.Now().After(de.trustBestAddrUntil) {
if debugDisco || de.bestAddr.IsZero() || tstime.MonotonicCoarse() > de.trustBestAddrUntil {
de.c.logf("[v1] magicsock: disco: timeout waiting for pong %x from %v (%v, %v)", txid[:6], sp.to, de.publicKey.ShortString(), de.discoShort)
}
de.removeSentPingLocked(txid, sp)
@@ -3504,7 +3504,7 @@ const (
pingCLI
)
func (de *discoEndpoint) startPingLocked(ep netaddr.IPPort, now time.Time, purpose discoPingPurpose) {
func (de *discoEndpoint) startPingLocked(ep netaddr.IPPort, now int64, purpose discoPingPurpose) {
if purpose != pingCLI {
st, ok := de.endpointState[ep]
if !ok {
@@ -3530,7 +3530,7 @@ func (de *discoEndpoint) startPingLocked(ep netaddr.IPPort, now time.Time, purpo
go de.sendDiscoPing(ep, txid, logLevel)
}
func (de *discoEndpoint) sendPingsLocked(now time.Time, sendCallMeMaybe bool) {
func (de *discoEndpoint) sendPingsLocked(now int64, sendCallMeMaybe bool) {
de.lastFullPing = now
var sentAny bool
for ep, st := range de.endpointState {
@@ -3538,7 +3538,7 @@ func (de *discoEndpoint) sendPingsLocked(now time.Time, sendCallMeMaybe bool) {
de.deleteEndpointLocked(ep)
continue
}
if !st.lastPing.IsZero() && now.Sub(st.lastPing) < discoPingInterval {
if st.lastPing != 0 && now-st.lastPing < discoPingInterval {
continue
}
@@ -3652,7 +3652,7 @@ func (de *discoEndpoint) noteConnectivityChange() {
de.mu.Lock()
defer de.mu.Unlock()
de.trustBestAddrUntil = time.Time{}
de.trustBestAddrUntil = 0
}
// handlePongConnLocked handles a Pong message (a reply to an earlier ping).
@@ -3670,8 +3670,8 @@ func (de *discoEndpoint) handlePongConnLocked(m *disco.Pong, src netaddr.IPPort)
}
de.removeSentPingLocked(m.TxID, sp)
now := time.Now()
latency := now.Sub(sp.at)
now := tstime.MonotonicCoarse()
latency := now - sp.at
if !isDerp {
st, ok := de.endpointState[sp.to]
@@ -3691,7 +3691,7 @@ func (de *discoEndpoint) handlePongConnLocked(m *disco.Pong, src netaddr.IPPort)
}
if sp.purpose != pingHeartbeat {
de.c.logf("[v1] magicsock: disco: %v<-%v (%v, %v) got pong tx=%x latency=%v pong.src=%v%v", de.c.discoShort, de.discoShort, de.publicKey.ShortString(), src, m.TxID[:6], latency.Round(time.Millisecond), m.Src, logger.ArgWriter(func(bw *bufio.Writer) {
de.c.logf("[v1] magicsock: disco: %v<-%v (%v, %v) got pong tx=%x latency=%v pong.src=%v%v", de.c.discoShort, de.discoShort, de.publicKey.ShortString(), src, m.TxID[:6], latency, m.Src, logger.ArgWriter(func(bw *bufio.Writer) {
if sp.to != src {
fmt.Fprintf(bw, " ping.to=%v", sp.to)
}
@@ -3715,7 +3715,7 @@ func (de *discoEndpoint) handlePongConnLocked(m *disco.Pong, src netaddr.IPPort)
if de.bestAddr.IPPort == thisPong.IPPort {
de.bestAddr.latency = latency
de.bestAddrAt = now
de.trustBestAddrUntil = now.Add(trustUDPAddrDuration)
de.trustBestAddrUntil = now + trustUDPAddrDuration
}
}
}
@@ -3723,7 +3723,7 @@ func (de *discoEndpoint) handlePongConnLocked(m *disco.Pong, src netaddr.IPPort)
// addrLatency is an IPPort with an associated latency.
type addrLatency struct {
netaddr.IPPort
latency time.Duration
latency int64 // seconds
}
// betterAddr reports whether a is a better addr to use than b.
@@ -3822,22 +3822,22 @@ func (de *discoEndpoint) handleCallMeMaybe(m *disco.CallMeMaybe) {
// Zero out all the lastPing times to force sendPingsLocked to send new ones,
// even if it's been less than 5 seconds ago.
for _, st := range de.endpointState {
st.lastPing = time.Time{}
st.lastPing = 0
}
de.sendPingsLocked(time.Now(), false)
de.sendPingsLocked(tstime.MonotonicCoarse(), false)
}
func (de *discoEndpoint) populatePeerStatus(ps *ipnstate.PeerStatus) {
de.mu.Lock()
defer de.mu.Unlock()
if de.lastSend.IsZero() {
if de.lastSend == 0 {
return
}
ps.LastWrite = de.lastSend
now := time.Now()
now := tstime.MonotonicCoarse()
if udpAddr, derpAddr := de.addrForSendLocked(now); !udpAddr.IsZero() && derpAddr.IsZero() {
ps.CurAddr = udpAddr.String()
}
@@ -3856,12 +3856,12 @@ func (de *discoEndpoint) stopAndReset() {
// Zero these fields so if the user re-starts the network, the discovery
// state isn't a mix of before & after two sessions.
de.lastSend = time.Time{}
de.lastFullPing = time.Time{}
de.lastFullPing = 0
de.bestAddr = addrLatency{}
de.bestAddrAt = time.Time{}
de.trustBestAddrUntil = time.Time{}
de.bestAddrAt = 0
de.trustBestAddrUntil = 0
for _, es := range de.endpointState {
es.lastPing = time.Time{}
es.lastPing = 0
}
for txid, sp := range de.sentPing {