Compare commits
13 Commits
icio/json-
...
jonathan/d
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
82ca894ff5 | ||
|
|
156cd53e77 | ||
|
|
5c0e08fbbd | ||
|
|
d0c50c6072 | ||
|
|
6bbf98bef4 | ||
|
|
e1078686b3 | ||
|
|
c261fb198f | ||
|
|
5668de272c | ||
|
|
005e20a45e | ||
|
|
196ae1cd74 | ||
|
|
f3f2f72f96 | ||
|
|
e07c1573f6 | ||
|
|
984cd1cab0 |
@@ -28,6 +28,7 @@ import (
|
||||
"strings"
|
||||
|
||||
"tailscale.com/hostinfo"
|
||||
"tailscale.com/types/lazy"
|
||||
"tailscale.com/types/logger"
|
||||
"tailscale.com/util/cmpver"
|
||||
"tailscale.com/version"
|
||||
@@ -249,9 +250,13 @@ func (up *Updater) getUpdateFunction() (fn updateFunction, canAutoUpdate bool) {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
var canAutoUpdateCache lazy.SyncValue[bool]
|
||||
|
||||
// CanAutoUpdate reports whether auto-updating via the clientupdate package
|
||||
// is supported for the current os/distro.
|
||||
func CanAutoUpdate() bool {
|
||||
func CanAutoUpdate() bool { return canAutoUpdateCache.Get(canAutoUpdateUncached) }
|
||||
|
||||
func canAutoUpdateUncached() bool {
|
||||
if version.IsMacSysExt() {
|
||||
// Macsys uses Sparkle for auto-updates, which doesn't have an update
|
||||
// function in this package.
|
||||
|
||||
@@ -2215,6 +2215,22 @@ spec:
|
||||
https://tailscale.com/kb/1019/subnets#use-your-subnet-routes-from-other-devices
|
||||
Defaults to false.
|
||||
type: boolean
|
||||
useLetsEncryptStagingEnvironment:
|
||||
description: |-
|
||||
Set UseLetsEncryptStagingEnvironment to true to issue TLS
|
||||
certificates for any HTTPS endpoints exposed to the tailnet from
|
||||
LetsEncrypt's staging environment.
|
||||
https://letsencrypt.org/docs/staging-environment/
|
||||
This setting only affects Tailscale Ingress resources.
|
||||
By default Ingress TLS certificates are issued from LetsEncrypt's
|
||||
production environment.
|
||||
Changing this setting true -> false, will result in any
|
||||
existing certs being re-issued from the production environment.
|
||||
Changing this setting false (default) -> true, when certs have already
|
||||
been provisioned from production environment will NOT result in certs
|
||||
being re-issued from the staging environment before they need to be
|
||||
renewed.
|
||||
type: boolean
|
||||
status:
|
||||
description: |-
|
||||
Status of the ProxyClass. This is set and managed automatically.
|
||||
|
||||
@@ -2685,6 +2685,22 @@ spec:
|
||||
Defaults to false.
|
||||
type: boolean
|
||||
type: object
|
||||
useLetsEncryptStagingEnvironment:
|
||||
description: |-
|
||||
Set UseLetsEncryptStagingEnvironment to true to issue TLS
|
||||
certificates for any HTTPS endpoints exposed to the tailnet from
|
||||
LetsEncrypt's staging environment.
|
||||
https://letsencrypt.org/docs/staging-environment/
|
||||
This setting only affects Tailscale Ingress resources.
|
||||
By default Ingress TLS certificates are issued from LetsEncrypt's
|
||||
production environment.
|
||||
Changing this setting true -> false, will result in any
|
||||
existing certs being re-issued from the production environment.
|
||||
Changing this setting false (default) -> true, when certs have already
|
||||
been provisioned from production environment will NOT result in certs
|
||||
being re-issued from the staging environment before they need to be
|
||||
renewed.
|
||||
type: boolean
|
||||
type: object
|
||||
status:
|
||||
description: |-
|
||||
|
||||
@@ -228,12 +228,11 @@ func (r *HAIngressReconciler) maybeProvision(ctx context.Context, hostname strin
|
||||
return false, fmt.Errorf("error getting VIPService %q: %w", hostname, err)
|
||||
}
|
||||
}
|
||||
// Generate the VIPService comment for new or existing VIPService. This
|
||||
// checks and ensures that VIPService's owner references are updated for
|
||||
// this Ingress and errors if that is not possible (i.e. because it
|
||||
// appears that the VIPService has been created by a non-operator
|
||||
// actor).
|
||||
svcComment, err := r.ownerRefsComment(existingVIPSvc)
|
||||
// Generate the VIPService owner annotation for new or existing VIPService.
|
||||
// This checks and ensures that VIPService's owner references are updated
|
||||
// for this Ingress and errors if that is not possible (i.e. because it
|
||||
// appears that the VIPService has been created by a non-operator actor).
|
||||
updatedAnnotations, err := r.ownerAnnotations(existingVIPSvc)
|
||||
if err != nil {
|
||||
const instr = "To proceed, you can either manually delete the existing VIPService or choose a different MagicDNS name at `.spec.tls.hosts[0] in the Ingress definition"
|
||||
msg := fmt.Sprintf("error ensuring ownership of VIPService %s: %v. %s", hostname, err, instr)
|
||||
@@ -313,11 +312,13 @@ func (r *HAIngressReconciler) maybeProvision(ctx context.Context, hostname strin
|
||||
vipPorts = append(vipPorts, "80")
|
||||
}
|
||||
|
||||
const managedVIPServiceComment = "This VIPService is managed by the Tailscale Kubernetes Operator, do not modify"
|
||||
vipSvc := &tailscale.VIPService{
|
||||
Name: serviceName,
|
||||
Tags: tags,
|
||||
Ports: vipPorts,
|
||||
Comment: svcComment,
|
||||
Name: serviceName,
|
||||
Tags: tags,
|
||||
Ports: vipPorts,
|
||||
Comment: managedVIPServiceComment,
|
||||
Annotations: updatedAnnotations,
|
||||
}
|
||||
if existingVIPSvc != nil {
|
||||
vipSvc.Addrs = existingVIPSvc.Addrs
|
||||
@@ -328,7 +329,7 @@ func (r *HAIngressReconciler) maybeProvision(ctx context.Context, hostname strin
|
||||
if existingVIPSvc == nil ||
|
||||
!reflect.DeepEqual(vipSvc.Tags, existingVIPSvc.Tags) ||
|
||||
!reflect.DeepEqual(vipSvc.Ports, existingVIPSvc.Ports) ||
|
||||
!strings.EqualFold(vipSvc.Comment, existingVIPSvc.Comment) {
|
||||
!ownersAreSetAndEqual(vipSvc, existingVIPSvc) {
|
||||
logger.Infof("Ensuring VIPService exists and is up to date")
|
||||
if err := r.tsClient.CreateOrUpdateVIPService(ctx, vipSvc); err != nil {
|
||||
return false, fmt.Errorf("error creating VIPService: %w", err)
|
||||
@@ -669,34 +670,34 @@ func (r *HAIngressReconciler) cleanupVIPService(ctx context.Context, name tailcf
|
||||
if svc == nil {
|
||||
return false, nil
|
||||
}
|
||||
c, err := parseComment(svc)
|
||||
o, err := parseOwnerAnnotation(svc)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("error parsing VIPService comment")
|
||||
return false, fmt.Errorf("error parsing VIPService owner annotation")
|
||||
}
|
||||
if c == nil || len(c.OwnerRefs) == 0 {
|
||||
if o == nil || len(o.OwnerRefs) == 0 {
|
||||
return false, nil
|
||||
}
|
||||
// Comparing with the operatorID only means that we will not be able to
|
||||
// clean up VIPServices in cases where the operator was deleted from the
|
||||
// cluster before deleting the Ingress. Perhaps the comparison could be
|
||||
// 'if or.OperatorID === r.operatorID || or.ingressUID == r.ingressUID'.
|
||||
ix := slices.IndexFunc(c.OwnerRefs, func(or OwnerRef) bool {
|
||||
ix := slices.IndexFunc(o.OwnerRefs, func(or OwnerRef) bool {
|
||||
return or.OperatorID == r.operatorID
|
||||
})
|
||||
if ix == -1 {
|
||||
return false, nil
|
||||
}
|
||||
if len(c.OwnerRefs) == 1 {
|
||||
if len(o.OwnerRefs) == 1 {
|
||||
logger.Infof("Deleting VIPService %q", name)
|
||||
return false, r.tsClient.DeleteVIPService(ctx, name)
|
||||
}
|
||||
c.OwnerRefs = slices.Delete(c.OwnerRefs, ix, ix+1)
|
||||
o.OwnerRefs = slices.Delete(o.OwnerRefs, ix, ix+1)
|
||||
logger.Infof("Deleting VIPService %q", name)
|
||||
json, err := json.Marshal(c)
|
||||
json, err := json.Marshal(o)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("error marshalling updated VIPService owner reference: %w", err)
|
||||
}
|
||||
svc.Comment = string(json)
|
||||
svc.Annotations[ownerAnnotation] = string(json)
|
||||
return true, r.tsClient.CreateOrUpdateVIPService(ctx, svc)
|
||||
}
|
||||
|
||||
@@ -783,6 +784,15 @@ func (a *HAIngressReconciler) numberPodsAdvertising(ctx context.Context, pgName
|
||||
return count, nil
|
||||
}
|
||||
|
||||
const ownerAnnotation = "tailscale.com/owner-references"
|
||||
|
||||
// ownerAnnotationValue is the content of the VIPService.Annotation[ownerAnnotation] field.
|
||||
type ownerAnnotationValue struct {
|
||||
// OwnerRefs is a list of owner references that identify all operator
|
||||
// instances that manage this VIPService.
|
||||
OwnerRefs []OwnerRef `json:"ownerRefs,omitempty"`
|
||||
}
|
||||
|
||||
// OwnerRef is an owner reference that uniquely identifies a Tailscale
|
||||
// Kubernetes operator instance.
|
||||
type OwnerRef struct {
|
||||
@@ -790,48 +800,67 @@ type OwnerRef struct {
|
||||
OperatorID string `json:"operatorID,omitempty"`
|
||||
}
|
||||
|
||||
// comment is the content of the VIPService.Comment field.
|
||||
type comment struct {
|
||||
// OwnerRefs is a list of owner references that identify all operator
|
||||
// instances that manage this VIPService.
|
||||
OwnerRefs []OwnerRef `json:"ownerRefs,omitempty"`
|
||||
}
|
||||
|
||||
// ownerRefsComment return VIPService Comment that includes owner reference for this
|
||||
// operator instance for the provided VIPService. If the VIPService is nil, a
|
||||
// new comment with owner ref is returned. If the VIPService is not nil, the
|
||||
// existing comment is returned with the owner reference added, if not already
|
||||
// present. If the VIPService is not nil, but does not contain a comment we
|
||||
// return an error as this likely means that the VIPService was created by
|
||||
// somthing other than a Tailscale Kubernetes operator.
|
||||
func (r *HAIngressReconciler) ownerRefsComment(svc *tailscale.VIPService) (string, error) {
|
||||
// ownerAnnotations returns the updated annotations required to ensure this
|
||||
// instance of the operator is included as an owner. If the VIPService is not
|
||||
// nil, but does not contain an owner we return an error as this likely means
|
||||
// that the VIPService was created by somthing other than a Tailscale
|
||||
// Kubernetes operator.
|
||||
func (r *HAIngressReconciler) ownerAnnotations(svc *tailscale.VIPService) (map[string]string, error) {
|
||||
ref := OwnerRef{
|
||||
OperatorID: r.operatorID,
|
||||
}
|
||||
if svc == nil {
|
||||
c := &comment{OwnerRefs: []OwnerRef{ref}}
|
||||
c := ownerAnnotationValue{OwnerRefs: []OwnerRef{ref}}
|
||||
json, err := json.Marshal(c)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("[unexpected] unable to marshal VIPService comment contents: %w, please report this", err)
|
||||
return nil, fmt.Errorf("[unexpected] unable to marshal VIPService owner annotation contents: %w, please report this", err)
|
||||
}
|
||||
return string(json), nil
|
||||
return map[string]string{
|
||||
ownerAnnotation: string(json),
|
||||
}, nil
|
||||
}
|
||||
c, err := parseComment(svc)
|
||||
o, err := parseOwnerAnnotation(svc)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("error parsing existing VIPService comment: %w", err)
|
||||
return nil, err
|
||||
}
|
||||
if c == nil || len(c.OwnerRefs) == 0 {
|
||||
return "", fmt.Errorf("VIPService %s exists, but does not contain Comment field with owner references- not proceeding as this is likely a resource created by something other than a Tailscale Kubernetes Operator", svc.Name)
|
||||
if o == nil || len(o.OwnerRefs) == 0 {
|
||||
return nil, fmt.Errorf("VIPService %s exists, but does not contain owner annotation with owner references; not proceeding as this is likely a resource created by something other than the Tailscale Kubernetes operator", svc.Name)
|
||||
}
|
||||
if slices.Contains(c.OwnerRefs, ref) { // up to date
|
||||
return svc.Comment, nil
|
||||
if slices.Contains(o.OwnerRefs, ref) { // up to date
|
||||
return svc.Annotations, nil
|
||||
}
|
||||
c.OwnerRefs = append(c.OwnerRefs, ref)
|
||||
json, err := json.Marshal(c)
|
||||
o.OwnerRefs = append(o.OwnerRefs, ref)
|
||||
json, err := json.Marshal(o)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("error marshalling updated owner references: %w", err)
|
||||
return nil, fmt.Errorf("error marshalling updated owner references: %w", err)
|
||||
}
|
||||
return string(json), nil
|
||||
|
||||
newAnnots := make(map[string]string, len(svc.Annotations)+1)
|
||||
for k, v := range svc.Annotations {
|
||||
newAnnots[k] = v
|
||||
}
|
||||
newAnnots[ownerAnnotation] = string(json)
|
||||
return newAnnots, nil
|
||||
}
|
||||
|
||||
// parseOwnerAnnotation returns nil if no valid owner found.
|
||||
func parseOwnerAnnotation(vipSvc *tailscale.VIPService) (*ownerAnnotationValue, error) {
|
||||
if vipSvc.Annotations == nil || vipSvc.Annotations[ownerAnnotation] == "" {
|
||||
return nil, nil
|
||||
}
|
||||
o := &ownerAnnotationValue{}
|
||||
if err := json.Unmarshal([]byte(vipSvc.Annotations[ownerAnnotation]), o); err != nil {
|
||||
return nil, fmt.Errorf("error parsing VIPService %s annotation %q: %w", ownerAnnotation, vipSvc.Annotations[ownerAnnotation], err)
|
||||
}
|
||||
return o, nil
|
||||
}
|
||||
|
||||
func ownersAreSetAndEqual(a, b *tailscale.VIPService) bool {
|
||||
return a != nil && b != nil &&
|
||||
a.Annotations != nil && b.Annotations != nil &&
|
||||
a.Annotations[ownerAnnotation] != "" &&
|
||||
b.Annotations[ownerAnnotation] != "" &&
|
||||
strings.EqualFold(a.Annotations[ownerAnnotation], b.Annotations[ownerAnnotation])
|
||||
}
|
||||
|
||||
// ensureCertResources ensures that the TLS Secret for an HA Ingress and RBAC
|
||||
@@ -877,18 +906,6 @@ func (r *HAIngressReconciler) cleanupCertResources(ctx context.Context, pgName s
|
||||
return nil
|
||||
}
|
||||
|
||||
// parseComment returns VIPService comment or nil if none found or not matching the expected format.
|
||||
func parseComment(vipSvc *tailscale.VIPService) (*comment, error) {
|
||||
if vipSvc.Comment == "" {
|
||||
return nil, nil
|
||||
}
|
||||
c := &comment{}
|
||||
if err := json.Unmarshal([]byte(vipSvc.Comment), c); err != nil {
|
||||
return nil, fmt.Errorf("error parsing VIPService Comment field %q: %w", vipSvc.Comment, err)
|
||||
}
|
||||
return c, nil
|
||||
}
|
||||
|
||||
// requeueInterval returns a time duration between 5 and 10 minutes, which is
|
||||
// the period of time after which an HA Ingress, whose VIPService has been newly
|
||||
// created or changed, needs to be requeued. This is to protect against
|
||||
|
||||
@@ -745,8 +745,10 @@ func TestIngressPGReconciler_MultiCluster(t *testing.T) {
|
||||
|
||||
// Simulate existing VIPService from another cluster
|
||||
existingVIPSvc := &tailscale.VIPService{
|
||||
Name: "svc:my-svc",
|
||||
Comment: `{"ownerrefs":[{"operatorID":"operator-2"}]}`,
|
||||
Name: "svc:my-svc",
|
||||
Annotations: map[string]string{
|
||||
ownerAnnotation: `{"ownerrefs":[{"operatorID":"operator-2"}]}`,
|
||||
},
|
||||
}
|
||||
ft.vipServices = map[tailcfg.ServiceName]*tailscale.VIPService{
|
||||
"svc:my-svc": existingVIPSvc,
|
||||
@@ -763,17 +765,17 @@ func TestIngressPGReconciler_MultiCluster(t *testing.T) {
|
||||
t.Fatal("VIPService not found")
|
||||
}
|
||||
|
||||
c := &comment{}
|
||||
if err := json.Unmarshal([]byte(vipSvc.Comment), c); err != nil {
|
||||
t.Fatalf("parsing comment: %v", err)
|
||||
o, err := parseOwnerAnnotation(vipSvc)
|
||||
if err != nil {
|
||||
t.Fatalf("parsing owner annotation: %v", err)
|
||||
}
|
||||
|
||||
wantOwnerRefs := []OwnerRef{
|
||||
{OperatorID: "operator-2"},
|
||||
{OperatorID: "operator-1"},
|
||||
}
|
||||
if !reflect.DeepEqual(c.OwnerRefs, wantOwnerRefs) {
|
||||
t.Errorf("incorrect owner refs\ngot: %+v\nwant: %+v", c.OwnerRefs, wantOwnerRefs)
|
||||
if !reflect.DeepEqual(o.OwnerRefs, wantOwnerRefs) {
|
||||
t.Errorf("incorrect owner refs\ngot: %+v\nwant: %+v", o.OwnerRefs, wantOwnerRefs)
|
||||
}
|
||||
|
||||
// Delete the Ingress and verify VIPService still exists with one owner ref
|
||||
@@ -790,15 +792,15 @@ func TestIngressPGReconciler_MultiCluster(t *testing.T) {
|
||||
t.Fatal("VIPService was incorrectly deleted")
|
||||
}
|
||||
|
||||
c = &comment{}
|
||||
if err := json.Unmarshal([]byte(vipSvc.Comment), c); err != nil {
|
||||
t.Fatalf("parsing comment after deletion: %v", err)
|
||||
o, err = parseOwnerAnnotation(vipSvc)
|
||||
if err != nil {
|
||||
t.Fatalf("parsing owner annotation: %v", err)
|
||||
}
|
||||
|
||||
wantOwnerRefs = []OwnerRef{
|
||||
{OperatorID: "operator-2"},
|
||||
}
|
||||
if !reflect.DeepEqual(c.OwnerRefs, wantOwnerRefs) {
|
||||
t.Errorf("incorrect owner refs after deletion\ngot: %+v\nwant: %+v", c.OwnerRefs, wantOwnerRefs)
|
||||
if !reflect.DeepEqual(o.OwnerRefs, wantOwnerRefs) {
|
||||
t.Errorf("incorrect owner refs after deletion\ngot: %+v\nwant: %+v", o.OwnerRefs, wantOwnerRefs)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -6,6 +6,7 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"go.uber.org/zap"
|
||||
@@ -15,17 +16,18 @@ import (
|
||||
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client/fake"
|
||||
"tailscale.com/ipn"
|
||||
tsapi "tailscale.com/k8s-operator/apis/v1alpha1"
|
||||
"tailscale.com/kube/kubetypes"
|
||||
"tailscale.com/tstest"
|
||||
"tailscale.com/types/ptr"
|
||||
"tailscale.com/util/mak"
|
||||
)
|
||||
|
||||
func TestTailscaleIngress(t *testing.T) {
|
||||
tsIngressClass := &networkingv1.IngressClass{ObjectMeta: metav1.ObjectMeta{Name: "tailscale"}, Spec: networkingv1.IngressClassSpec{Controller: "tailscale.com/ts-ingress"}}
|
||||
fc := fake.NewFakeClient(tsIngressClass)
|
||||
fc := fake.NewFakeClient(ingressClass())
|
||||
ft := &fakeTSClient{}
|
||||
fakeTsnetServer := &fakeTSNetServer{certDomains: []string{"foo.com"}}
|
||||
zl, err := zap.NewDevelopment()
|
||||
@@ -46,45 +48,8 @@ func TestTailscaleIngress(t *testing.T) {
|
||||
}
|
||||
|
||||
// 1. Resources get created for regular Ingress
|
||||
ing := &networkingv1.Ingress{
|
||||
TypeMeta: metav1.TypeMeta{Kind: "Ingress", APIVersion: "networking.k8s.io/v1"},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test",
|
||||
Namespace: "default",
|
||||
// The apiserver is supposed to set the UID, but the fake client
|
||||
// doesn't. So, set it explicitly because other code later depends
|
||||
// on it being set.
|
||||
UID: types.UID("1234-UID"),
|
||||
},
|
||||
Spec: networkingv1.IngressSpec{
|
||||
IngressClassName: ptr.To("tailscale"),
|
||||
DefaultBackend: &networkingv1.IngressBackend{
|
||||
Service: &networkingv1.IngressServiceBackend{
|
||||
Name: "test",
|
||||
Port: networkingv1.ServiceBackendPort{
|
||||
Number: 8080,
|
||||
},
|
||||
},
|
||||
},
|
||||
TLS: []networkingv1.IngressTLS{
|
||||
{Hosts: []string{"default-test"}},
|
||||
},
|
||||
},
|
||||
}
|
||||
mustCreate(t, fc, ing)
|
||||
mustCreate(t, fc, &corev1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test",
|
||||
Namespace: "default",
|
||||
},
|
||||
Spec: corev1.ServiceSpec{
|
||||
ClusterIP: "1.2.3.4",
|
||||
Ports: []corev1.ServicePort{{
|
||||
Port: 8080,
|
||||
Name: "http"},
|
||||
},
|
||||
},
|
||||
})
|
||||
mustCreate(t, fc, ingress())
|
||||
mustCreate(t, fc, service())
|
||||
|
||||
expectReconciled(t, ingR, "default", "test")
|
||||
|
||||
@@ -114,6 +79,9 @@ func TestTailscaleIngress(t *testing.T) {
|
||||
mak.Set(&secret.Data, "device_fqdn", []byte("foo.tailnetxyz.ts.net"))
|
||||
})
|
||||
expectReconciled(t, ingR, "default", "test")
|
||||
|
||||
// Get the ingress and update it with expected changes
|
||||
ing := ingress()
|
||||
ing.Finalizers = append(ing.Finalizers, "tailscale.com/finalizer")
|
||||
ing.Status.LoadBalancer = networkingv1.IngressLoadBalancerStatus{
|
||||
Ingress: []networkingv1.IngressLoadBalancerIngress{
|
||||
@@ -143,8 +111,7 @@ func TestTailscaleIngress(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestTailscaleIngressHostname(t *testing.T) {
|
||||
tsIngressClass := &networkingv1.IngressClass{ObjectMeta: metav1.ObjectMeta{Name: "tailscale"}, Spec: networkingv1.IngressClassSpec{Controller: "tailscale.com/ts-ingress"}}
|
||||
fc := fake.NewFakeClient(tsIngressClass)
|
||||
fc := fake.NewFakeClient(ingressClass())
|
||||
ft := &fakeTSClient{}
|
||||
fakeTsnetServer := &fakeTSNetServer{certDomains: []string{"foo.com"}}
|
||||
zl, err := zap.NewDevelopment()
|
||||
@@ -165,45 +132,8 @@ func TestTailscaleIngressHostname(t *testing.T) {
|
||||
}
|
||||
|
||||
// 1. Resources get created for regular Ingress
|
||||
ing := &networkingv1.Ingress{
|
||||
TypeMeta: metav1.TypeMeta{Kind: "Ingress", APIVersion: "networking.k8s.io/v1"},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test",
|
||||
Namespace: "default",
|
||||
// The apiserver is supposed to set the UID, but the fake client
|
||||
// doesn't. So, set it explicitly because other code later depends
|
||||
// on it being set.
|
||||
UID: types.UID("1234-UID"),
|
||||
},
|
||||
Spec: networkingv1.IngressSpec{
|
||||
IngressClassName: ptr.To("tailscale"),
|
||||
DefaultBackend: &networkingv1.IngressBackend{
|
||||
Service: &networkingv1.IngressServiceBackend{
|
||||
Name: "test",
|
||||
Port: networkingv1.ServiceBackendPort{
|
||||
Number: 8080,
|
||||
},
|
||||
},
|
||||
},
|
||||
TLS: []networkingv1.IngressTLS{
|
||||
{Hosts: []string{"default-test"}},
|
||||
},
|
||||
},
|
||||
}
|
||||
mustCreate(t, fc, ing)
|
||||
mustCreate(t, fc, &corev1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test",
|
||||
Namespace: "default",
|
||||
},
|
||||
Spec: corev1.ServiceSpec{
|
||||
ClusterIP: "1.2.3.4",
|
||||
Ports: []corev1.ServicePort{{
|
||||
Port: 8080,
|
||||
Name: "http"},
|
||||
},
|
||||
},
|
||||
})
|
||||
mustCreate(t, fc, ingress())
|
||||
mustCreate(t, fc, service())
|
||||
|
||||
expectReconciled(t, ingR, "default", "test")
|
||||
|
||||
@@ -241,8 +171,10 @@ func TestTailscaleIngressHostname(t *testing.T) {
|
||||
mak.Set(&secret.Data, "device_fqdn", []byte("foo.tailnetxyz.ts.net"))
|
||||
})
|
||||
expectReconciled(t, ingR, "default", "test")
|
||||
ing.Finalizers = append(ing.Finalizers, "tailscale.com/finalizer")
|
||||
|
||||
// Get the ingress and update it with expected changes
|
||||
ing := ingress()
|
||||
ing.Finalizers = append(ing.Finalizers, "tailscale.com/finalizer")
|
||||
expectEqual(t, fc, ing)
|
||||
|
||||
// 3. Ingress proxy with capability version >= 110 advertises HTTPS endpoint
|
||||
@@ -299,10 +231,9 @@ func TestTailscaleIngressWithProxyClass(t *testing.T) {
|
||||
Annotations: map[string]string{"bar.io/foo": "some-val"},
|
||||
Pod: &tsapi.Pod{Annotations: map[string]string{"foo.io/bar": "some-val"}}}},
|
||||
}
|
||||
tsIngressClass := &networkingv1.IngressClass{ObjectMeta: metav1.ObjectMeta{Name: "tailscale"}, Spec: networkingv1.IngressClassSpec{Controller: "tailscale.com/ts-ingress"}}
|
||||
fc := fake.NewClientBuilder().
|
||||
WithScheme(tsapi.GlobalScheme).
|
||||
WithObjects(pc, tsIngressClass).
|
||||
WithObjects(pc, ingressClass()).
|
||||
WithStatusSubresource(pc).
|
||||
Build()
|
||||
ft := &fakeTSClient{}
|
||||
@@ -326,45 +257,8 @@ func TestTailscaleIngressWithProxyClass(t *testing.T) {
|
||||
|
||||
// 1. Ingress is created with no ProxyClass specified, default proxy
|
||||
// resources get configured.
|
||||
ing := &networkingv1.Ingress{
|
||||
TypeMeta: metav1.TypeMeta{Kind: "Ingress", APIVersion: "networking.k8s.io/v1"},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test",
|
||||
Namespace: "default",
|
||||
// The apiserver is supposed to set the UID, but the fake client
|
||||
// doesn't. So, set it explicitly because other code later depends
|
||||
// on it being set.
|
||||
UID: types.UID("1234-UID"),
|
||||
},
|
||||
Spec: networkingv1.IngressSpec{
|
||||
IngressClassName: ptr.To("tailscale"),
|
||||
DefaultBackend: &networkingv1.IngressBackend{
|
||||
Service: &networkingv1.IngressServiceBackend{
|
||||
Name: "test",
|
||||
Port: networkingv1.ServiceBackendPort{
|
||||
Number: 8080,
|
||||
},
|
||||
},
|
||||
},
|
||||
TLS: []networkingv1.IngressTLS{
|
||||
{Hosts: []string{"default-test"}},
|
||||
},
|
||||
},
|
||||
}
|
||||
mustCreate(t, fc, ing)
|
||||
mustCreate(t, fc, &corev1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test",
|
||||
Namespace: "default",
|
||||
},
|
||||
Spec: corev1.ServiceSpec{
|
||||
ClusterIP: "1.2.3.4",
|
||||
Ports: []corev1.ServicePort{{
|
||||
Port: 8080,
|
||||
Name: "http"},
|
||||
},
|
||||
},
|
||||
})
|
||||
mustCreate(t, fc, ingress())
|
||||
mustCreate(t, fc, service())
|
||||
|
||||
expectReconciled(t, ingR, "default", "test")
|
||||
|
||||
@@ -432,54 +326,19 @@ func TestTailscaleIngressWithServiceMonitor(t *testing.T) {
|
||||
ObservedGeneration: 1,
|
||||
}}},
|
||||
}
|
||||
ing := &networkingv1.Ingress{
|
||||
TypeMeta: metav1.TypeMeta{Kind: "Ingress", APIVersion: "networking.k8s.io/v1"},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test",
|
||||
Namespace: "default",
|
||||
// The apiserver is supposed to set the UID, but the fake client
|
||||
// doesn't. So, set it explicitly because other code later depends
|
||||
// on it being set.
|
||||
UID: types.UID("1234-UID"),
|
||||
Labels: map[string]string{
|
||||
"tailscale.com/proxy-class": "metrics",
|
||||
},
|
||||
},
|
||||
Spec: networkingv1.IngressSpec{
|
||||
IngressClassName: ptr.To("tailscale"),
|
||||
DefaultBackend: &networkingv1.IngressBackend{
|
||||
Service: &networkingv1.IngressServiceBackend{
|
||||
Name: "test",
|
||||
Port: networkingv1.ServiceBackendPort{
|
||||
Number: 8080,
|
||||
},
|
||||
},
|
||||
},
|
||||
TLS: []networkingv1.IngressTLS{
|
||||
{Hosts: []string{"default-test"}},
|
||||
},
|
||||
},
|
||||
}
|
||||
svc := &corev1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test",
|
||||
Namespace: "default",
|
||||
},
|
||||
Spec: corev1.ServiceSpec{
|
||||
ClusterIP: "1.2.3.4",
|
||||
Ports: []corev1.ServicePort{{
|
||||
Port: 8080,
|
||||
Name: "http"},
|
||||
},
|
||||
},
|
||||
}
|
||||
crd := &apiextensionsv1.CustomResourceDefinition{ObjectMeta: metav1.ObjectMeta{Name: serviceMonitorCRD}}
|
||||
tsIngressClass := &networkingv1.IngressClass{ObjectMeta: metav1.ObjectMeta{Name: "tailscale"}, Spec: networkingv1.IngressClassSpec{Controller: "tailscale.com/ts-ingress"}}
|
||||
|
||||
// Create fake client with ProxyClass, IngressClass, Ingress with metrics ProxyClass, and Service
|
||||
ing := ingress()
|
||||
ing.Labels = map[string]string{
|
||||
LabelProxyClass: "metrics",
|
||||
}
|
||||
fc := fake.NewClientBuilder().
|
||||
WithScheme(tsapi.GlobalScheme).
|
||||
WithObjects(pc, tsIngressClass, ing, svc).
|
||||
WithObjects(pc, ingressClass(), ing, service()).
|
||||
WithStatusSubresource(pc).
|
||||
Build()
|
||||
|
||||
ft := &fakeTSClient{}
|
||||
fakeTsnetServer := &fakeTSNetServer{certDomains: []string{"foo.com"}}
|
||||
zl, err := zap.NewDevelopment()
|
||||
@@ -560,3 +419,118 @@ func TestTailscaleIngressWithServiceMonitor(t *testing.T) {
|
||||
expectMissing[corev1.Service](t, fc, "operator-ns", metricsResourceName(shortName))
|
||||
// ServiceMonitor gets garbage collected when the Service is deleted - we cannot test that here.
|
||||
}
|
||||
|
||||
func TestIngressLetsEncryptStaging(t *testing.T) {
|
||||
cl := tstest.NewClock(tstest.ClockOpts{})
|
||||
zl := zap.Must(zap.NewDevelopment())
|
||||
|
||||
pcLEStaging, pcLEStagingFalse, pcOther := proxyClassesForLEStagingTest()
|
||||
|
||||
testCases := testCasesForLEStagingTests(pcLEStaging, pcLEStagingFalse, pcOther)
|
||||
|
||||
for _, tt := range testCases {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
builder := fake.NewClientBuilder().
|
||||
WithScheme(tsapi.GlobalScheme)
|
||||
|
||||
builder = builder.WithObjects(pcLEStaging, pcLEStagingFalse, pcOther).
|
||||
WithStatusSubresource(pcLEStaging, pcLEStagingFalse, pcOther)
|
||||
|
||||
fc := builder.Build()
|
||||
|
||||
if tt.proxyClassPerResource != "" || tt.defaultProxyClass != "" {
|
||||
name := tt.proxyClassPerResource
|
||||
if name == "" {
|
||||
name = tt.defaultProxyClass
|
||||
}
|
||||
setProxyClassReady(t, fc, cl, name)
|
||||
}
|
||||
|
||||
mustCreate(t, fc, ingressClass())
|
||||
mustCreate(t, fc, service())
|
||||
ing := ingress()
|
||||
if tt.proxyClassPerResource != "" {
|
||||
ing.Labels = map[string]string{
|
||||
LabelProxyClass: tt.proxyClassPerResource,
|
||||
}
|
||||
}
|
||||
mustCreate(t, fc, ing)
|
||||
|
||||
ingR := &IngressReconciler{
|
||||
Client: fc,
|
||||
ssr: &tailscaleSTSReconciler{
|
||||
Client: fc,
|
||||
tsClient: &fakeTSClient{},
|
||||
tsnetServer: &fakeTSNetServer{certDomains: []string{"test-host"}},
|
||||
defaultTags: []string{"tag:test"},
|
||||
operatorNamespace: "operator-ns",
|
||||
proxyImage: "tailscale/tailscale:test",
|
||||
},
|
||||
logger: zl.Sugar(),
|
||||
defaultProxyClass: tt.defaultProxyClass,
|
||||
}
|
||||
|
||||
expectReconciled(t, ingR, "default", "test")
|
||||
|
||||
_, shortName := findGenName(t, fc, "default", "test", "ingress")
|
||||
sts := &appsv1.StatefulSet{}
|
||||
if err := fc.Get(context.Background(), client.ObjectKey{Namespace: "operator-ns", Name: shortName}, sts); err != nil {
|
||||
t.Fatalf("failed to get StatefulSet: %v", err)
|
||||
}
|
||||
|
||||
if tt.useLEStagingEndpoint {
|
||||
verifyEnvVar(t, sts, "TS_DEBUG_ACME_DIRECTORY_URL", letsEncryptStagingEndpoint)
|
||||
} else {
|
||||
verifyEnvVarNotPresent(t, sts, "TS_DEBUG_ACME_DIRECTORY_URL")
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func ingressClass() *networkingv1.IngressClass {
|
||||
return &networkingv1.IngressClass{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "tailscale"},
|
||||
Spec: networkingv1.IngressClassSpec{Controller: "tailscale.com/ts-ingress"},
|
||||
}
|
||||
}
|
||||
|
||||
func service() *corev1.Service {
|
||||
return &corev1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test",
|
||||
Namespace: "default",
|
||||
},
|
||||
Spec: corev1.ServiceSpec{
|
||||
ClusterIP: "1.2.3.4",
|
||||
Ports: []corev1.ServicePort{{
|
||||
Port: 8080,
|
||||
Name: "http"},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func ingress() *networkingv1.Ingress {
|
||||
return &networkingv1.Ingress{
|
||||
TypeMeta: metav1.TypeMeta{Kind: "Ingress", APIVersion: "networking.k8s.io/v1"},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test",
|
||||
Namespace: "default",
|
||||
UID: types.UID("1234-UID"),
|
||||
},
|
||||
Spec: networkingv1.IngressSpec{
|
||||
IngressClassName: ptr.To("tailscale"),
|
||||
DefaultBackend: &networkingv1.IngressBackend{
|
||||
Service: &networkingv1.IngressServiceBackend{
|
||||
Name: "test",
|
||||
Port: networkingv1.ServiceBackendPort{
|
||||
Number: 8080,
|
||||
},
|
||||
},
|
||||
},
|
||||
TLS: []networkingv1.IngressTLS{
|
||||
{Hosts: []string{"default-test"}},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -302,7 +302,10 @@ func (r *ProxyGroupReconciler) maybeProvision(ctx context.Context, pg *tsapi.Pro
|
||||
if err != nil {
|
||||
return fmt.Errorf("error generating StatefulSet spec: %w", err)
|
||||
}
|
||||
ss = applyProxyClassToStatefulSet(proxyClass, ss, nil, logger)
|
||||
cfg := &tailscaleSTSConfig{
|
||||
proxyType: string(pg.Spec.Type),
|
||||
}
|
||||
ss = applyProxyClassToStatefulSet(proxyClass, ss, cfg, logger)
|
||||
capver, err := r.capVerForPG(ctx, pg, logger)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error getting device info: %w", err)
|
||||
|
||||
@@ -518,6 +518,60 @@ func TestIngressAdvertiseServicesConfigPreserved(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
func proxyClassesForLEStagingTest() (*tsapi.ProxyClass, *tsapi.ProxyClass, *tsapi.ProxyClass) {
|
||||
pcLEStaging := &tsapi.ProxyClass{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "le-staging",
|
||||
Generation: 1,
|
||||
},
|
||||
Spec: tsapi.ProxyClassSpec{
|
||||
UseLetsEncryptStagingEnvironment: true,
|
||||
},
|
||||
}
|
||||
|
||||
pcLEStagingFalse := &tsapi.ProxyClass{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "le-staging-false",
|
||||
Generation: 1,
|
||||
},
|
||||
Spec: tsapi.ProxyClassSpec{
|
||||
UseLetsEncryptStagingEnvironment: false,
|
||||
},
|
||||
}
|
||||
|
||||
pcOther := &tsapi.ProxyClass{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "other",
|
||||
Generation: 1,
|
||||
},
|
||||
Spec: tsapi.ProxyClassSpec{},
|
||||
}
|
||||
|
||||
return pcLEStaging, pcLEStagingFalse, pcOther
|
||||
}
|
||||
|
||||
func setProxyClassReady(t *testing.T, fc client.Client, cl *tstest.Clock, name string) *tsapi.ProxyClass {
|
||||
t.Helper()
|
||||
pc := &tsapi.ProxyClass{}
|
||||
if err := fc.Get(context.Background(), client.ObjectKey{Name: name}, pc); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
pc.Status = tsapi.ProxyClassStatus{
|
||||
Conditions: []metav1.Condition{{
|
||||
Type: string(tsapi.ProxyClassReady),
|
||||
Status: metav1.ConditionTrue,
|
||||
Reason: reasonProxyClassValid,
|
||||
Message: reasonProxyClassValid,
|
||||
LastTransitionTime: metav1.Time{Time: cl.Now().Truncate(time.Second)},
|
||||
ObservedGeneration: pc.Generation,
|
||||
}},
|
||||
}
|
||||
if err := fc.Status().Update(context.Background(), pc); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
return pc
|
||||
}
|
||||
|
||||
func verifyProxyGroupCounts(t *testing.T, r *ProxyGroupReconciler, wantIngress, wantEgress int) {
|
||||
t.Helper()
|
||||
if r.ingressProxyGroups.Len() != wantIngress {
|
||||
@@ -541,6 +595,16 @@ func verifyEnvVar(t *testing.T, sts *appsv1.StatefulSet, name, expectedValue str
|
||||
t.Errorf("%s environment variable not found", name)
|
||||
}
|
||||
|
||||
func verifyEnvVarNotPresent(t *testing.T, sts *appsv1.StatefulSet, name string) {
|
||||
t.Helper()
|
||||
for _, env := range sts.Spec.Template.Spec.Containers[0].Env {
|
||||
if env.Name == name {
|
||||
t.Errorf("environment variable %s should not be present", name)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func expectProxyGroupResources(t *testing.T, fc client.WithWatch, pg *tsapi.ProxyGroup, shouldExist bool, cfgHash string, proxyClass *tsapi.ProxyClass) {
|
||||
t.Helper()
|
||||
|
||||
@@ -618,3 +682,146 @@ func addNodeIDToStateSecrets(t *testing.T, fc client.WithWatch, pg *tsapi.ProxyG
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestProxyGroupLetsEncryptStaging(t *testing.T) {
|
||||
cl := tstest.NewClock(tstest.ClockOpts{})
|
||||
zl := zap.Must(zap.NewDevelopment())
|
||||
|
||||
// Set up test cases- most are shared with non-HA Ingress.
|
||||
type proxyGroupLETestCase struct {
|
||||
leStagingTestCase
|
||||
pgType tsapi.ProxyGroupType
|
||||
}
|
||||
pcLEStaging, pcLEStagingFalse, pcOther := proxyClassesForLEStagingTest()
|
||||
sharedTestCases := testCasesForLEStagingTests(pcLEStaging, pcLEStagingFalse, pcOther)
|
||||
var tests []proxyGroupLETestCase
|
||||
for _, tt := range sharedTestCases {
|
||||
tests = append(tests, proxyGroupLETestCase{
|
||||
leStagingTestCase: tt,
|
||||
pgType: tsapi.ProxyGroupTypeIngress,
|
||||
})
|
||||
}
|
||||
tests = append(tests, proxyGroupLETestCase{
|
||||
leStagingTestCase: leStagingTestCase{
|
||||
name: "egress_pg_with_staging_proxyclass",
|
||||
proxyClassPerResource: "le-staging",
|
||||
useLEStagingEndpoint: false,
|
||||
},
|
||||
pgType: tsapi.ProxyGroupTypeEgress,
|
||||
})
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
builder := fake.NewClientBuilder().
|
||||
WithScheme(tsapi.GlobalScheme)
|
||||
|
||||
// Pre-populate the fake client with ProxyClasses.
|
||||
builder = builder.WithObjects(pcLEStaging, pcLEStagingFalse, pcOther).
|
||||
WithStatusSubresource(pcLEStaging, pcLEStagingFalse, pcOther)
|
||||
|
||||
fc := builder.Build()
|
||||
|
||||
// If the test case needs a ProxyClass to exist, ensure it is set to Ready.
|
||||
if tt.proxyClassPerResource != "" || tt.defaultProxyClass != "" {
|
||||
name := tt.proxyClassPerResource
|
||||
if name == "" {
|
||||
name = tt.defaultProxyClass
|
||||
}
|
||||
setProxyClassReady(t, fc, cl, name)
|
||||
}
|
||||
|
||||
// Create ProxyGroup
|
||||
pg := &tsapi.ProxyGroup{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test",
|
||||
},
|
||||
Spec: tsapi.ProxyGroupSpec{
|
||||
Type: tt.pgType,
|
||||
Replicas: ptr.To[int32](1),
|
||||
ProxyClass: tt.proxyClassPerResource,
|
||||
},
|
||||
}
|
||||
mustCreate(t, fc, pg)
|
||||
|
||||
reconciler := &ProxyGroupReconciler{
|
||||
tsNamespace: tsNamespace,
|
||||
proxyImage: testProxyImage,
|
||||
defaultTags: []string{"tag:test"},
|
||||
defaultProxyClass: tt.defaultProxyClass,
|
||||
Client: fc,
|
||||
tsClient: &fakeTSClient{},
|
||||
l: zl.Sugar(),
|
||||
clock: cl,
|
||||
}
|
||||
|
||||
expectReconciled(t, reconciler, "", pg.Name)
|
||||
|
||||
// Verify that the StatefulSet created for ProxyGrup has
|
||||
// the expected setting for the staging endpoint.
|
||||
sts := &appsv1.StatefulSet{}
|
||||
if err := fc.Get(context.Background(), client.ObjectKey{Namespace: tsNamespace, Name: pg.Name}, sts); err != nil {
|
||||
t.Fatalf("failed to get StatefulSet: %v", err)
|
||||
}
|
||||
|
||||
if tt.useLEStagingEndpoint {
|
||||
verifyEnvVar(t, sts, "TS_DEBUG_ACME_DIRECTORY_URL", letsEncryptStagingEndpoint)
|
||||
} else {
|
||||
verifyEnvVarNotPresent(t, sts, "TS_DEBUG_ACME_DIRECTORY_URL")
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
type leStagingTestCase struct {
|
||||
name string
|
||||
// ProxyClass set on ProxyGroup or Ingress resource.
|
||||
proxyClassPerResource string
|
||||
// Default ProxyClass.
|
||||
defaultProxyClass string
|
||||
useLEStagingEndpoint bool
|
||||
}
|
||||
|
||||
// Shared test cases for LE staging endpoint configuration for ProxyGroup and
|
||||
// non-HA Ingress.
|
||||
func testCasesForLEStagingTests(pcLEStaging, pcLEStagingFalse, pcOther *tsapi.ProxyClass) []leStagingTestCase {
|
||||
return []leStagingTestCase{
|
||||
{
|
||||
name: "with_staging_proxyclass",
|
||||
proxyClassPerResource: "le-staging",
|
||||
useLEStagingEndpoint: true,
|
||||
},
|
||||
{
|
||||
name: "with_staging_proxyclass_false",
|
||||
proxyClassPerResource: "le-staging-false",
|
||||
useLEStagingEndpoint: false,
|
||||
},
|
||||
{
|
||||
name: "with_other_proxyclass",
|
||||
proxyClassPerResource: "other",
|
||||
useLEStagingEndpoint: false,
|
||||
},
|
||||
{
|
||||
name: "no_proxyclass",
|
||||
proxyClassPerResource: "",
|
||||
useLEStagingEndpoint: false,
|
||||
},
|
||||
{
|
||||
name: "with_default_staging_proxyclass",
|
||||
proxyClassPerResource: "",
|
||||
defaultProxyClass: "le-staging",
|
||||
useLEStagingEndpoint: true,
|
||||
},
|
||||
{
|
||||
name: "with_default_other_proxyclass",
|
||||
proxyClassPerResource: "",
|
||||
defaultProxyClass: "other",
|
||||
useLEStagingEndpoint: false,
|
||||
},
|
||||
{
|
||||
name: "with_default_staging_proxyclass_false",
|
||||
proxyClassPerResource: "",
|
||||
defaultProxyClass: "le-staging-false",
|
||||
useLEStagingEndpoint: false,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -102,6 +102,8 @@ const (
|
||||
|
||||
envVarTSLocalAddrPort = "TS_LOCAL_ADDR_PORT"
|
||||
defaultLocalAddrPort = 9002 // metrics and health check port
|
||||
|
||||
letsEncryptStagingEndpoint = "https://acme-staging-v02.api.letsencrypt.org/directory"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -783,6 +785,17 @@ func applyProxyClassToStatefulSet(pc *tsapi.ProxyClass, ss *appsv1.StatefulSet,
|
||||
enableEndpoints(ss, metricsEnabled, debugEnabled)
|
||||
}
|
||||
}
|
||||
if pc.Spec.UseLetsEncryptStagingEnvironment && (stsCfg.proxyType == proxyTypeIngressResource || stsCfg.proxyType == string(tsapi.ProxyGroupTypeIngress)) {
|
||||
for i, c := range ss.Spec.Template.Spec.Containers {
|
||||
if c.Name == "tailscale" {
|
||||
ss.Spec.Template.Spec.Containers[i].Env = append(ss.Spec.Template.Spec.Containers[i].Env, corev1.EnvVar{
|
||||
Name: "TS_DEBUG_ACME_DIRECTORY_URL",
|
||||
Value: letsEncryptStagingEndpoint,
|
||||
})
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if pc.Spec.StatefulSet == nil {
|
||||
return ss
|
||||
|
||||
@@ -136,6 +136,17 @@ func debugCmd() *ffcli.Command {
|
||||
Exec: runLocalCreds,
|
||||
ShortHelp: "Print how to access Tailscale LocalAPI",
|
||||
},
|
||||
{
|
||||
Name: "localapi",
|
||||
ShortUsage: "tailscale debug localapi [<method>] <path> [<body| \"-\">]",
|
||||
Exec: runLocalAPI,
|
||||
ShortHelp: "Call a LocalAPI method directly",
|
||||
FlagSet: (func() *flag.FlagSet {
|
||||
fs := newFlagSet("localapi")
|
||||
fs.BoolVar(&localAPIFlags.verbose, "v", false, "verbose; dump HTTP headers")
|
||||
return fs
|
||||
})(),
|
||||
},
|
||||
{
|
||||
Name: "restun",
|
||||
ShortUsage: "tailscale debug restun",
|
||||
@@ -451,6 +462,81 @@ func runLocalCreds(ctx context.Context, args []string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func looksLikeHTTPMethod(s string) bool {
|
||||
if len(s) > len("OPTIONS") {
|
||||
return false
|
||||
}
|
||||
for _, r := range s {
|
||||
if r < 'A' || r > 'Z' {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
var localAPIFlags struct {
|
||||
verbose bool
|
||||
}
|
||||
|
||||
func runLocalAPI(ctx context.Context, args []string) error {
|
||||
if len(args) == 0 {
|
||||
return errors.New("expected at least one argument")
|
||||
}
|
||||
method := "GET"
|
||||
if looksLikeHTTPMethod(args[0]) {
|
||||
method = args[0]
|
||||
args = args[1:]
|
||||
if len(args) == 0 {
|
||||
return errors.New("expected at least one argument after method")
|
||||
}
|
||||
}
|
||||
path := args[0]
|
||||
if !strings.HasPrefix(path, "/localapi/") {
|
||||
if !strings.Contains(path, "/") {
|
||||
path = "/localapi/v0/" + path
|
||||
} else {
|
||||
path = "/localapi/" + path
|
||||
}
|
||||
}
|
||||
|
||||
var body io.Reader
|
||||
if len(args) > 1 {
|
||||
if args[1] == "-" {
|
||||
fmt.Fprintf(Stderr, "# reading request body from stdin...\n")
|
||||
all, err := io.ReadAll(os.Stdin)
|
||||
if err != nil {
|
||||
return fmt.Errorf("reading Stdin: %q", err)
|
||||
}
|
||||
body = bytes.NewReader(all)
|
||||
} else {
|
||||
body = strings.NewReader(args[1])
|
||||
}
|
||||
}
|
||||
req, err := http.NewRequest(method, "http://local-tailscaled.sock"+path, body)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Fprintf(Stderr, "# doing request %s %s\n", method, path)
|
||||
|
||||
res, err := localClient.DoLocalRequest(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
is2xx := res.StatusCode >= 200 && res.StatusCode <= 299
|
||||
if localAPIFlags.verbose {
|
||||
res.Write(Stdout)
|
||||
} else {
|
||||
if !is2xx {
|
||||
fmt.Fprintf(Stderr, "# Response status %s\n", res.Status)
|
||||
}
|
||||
io.Copy(Stdout, res.Body)
|
||||
}
|
||||
if is2xx {
|
||||
return nil
|
||||
}
|
||||
return errors.New(res.Status)
|
||||
}
|
||||
|
||||
type localClientRoundTripper struct{}
|
||||
|
||||
func (localClientRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) {
|
||||
|
||||
@@ -27,6 +27,8 @@ type VIPService struct {
|
||||
Addrs []string `json:"addrs,omitempty"`
|
||||
// Comment is an optional text string for display in the admin panel.
|
||||
Comment string `json:"comment,omitempty"`
|
||||
// Annotations are optional key-value pairs that can be used to store arbitrary metadata.
|
||||
Annotations map[string]string `json:"annotations,omitempty"`
|
||||
// Ports are the ports of a VIPService that will be configured via Tailscale serve config.
|
||||
// If set, any node wishing to advertise this VIPService must have this port configured via Tailscale serve.
|
||||
Ports []string `json:"ports,omitempty"`
|
||||
|
||||
@@ -2380,12 +2380,10 @@ func (b *LocalBackend) Start(opts ipn.Options) error {
|
||||
}
|
||||
b.applyPrefsToHostinfoLocked(hostinfo, prefs)
|
||||
|
||||
b.setNetMapLocked(nil)
|
||||
persistv := prefs.Persist().AsStruct()
|
||||
if persistv == nil {
|
||||
persistv = new(persist.Persist)
|
||||
}
|
||||
b.updateFilterLocked(nil, ipn.PrefsView{})
|
||||
|
||||
if b.portpoll != nil {
|
||||
b.portpollOnce.Do(func() {
|
||||
@@ -3481,18 +3479,20 @@ func (b *LocalBackend) onTailnetDefaultAutoUpdate(au bool) {
|
||||
// can still manually enable auto-updates on this node.
|
||||
return
|
||||
}
|
||||
b.logf("using tailnet default auto-update setting: %v", au)
|
||||
prefsClone := prefs.AsStruct()
|
||||
prefsClone.AutoUpdate.Apply = opt.NewBool(au)
|
||||
_, err := b.editPrefsLockedOnEntry(&ipn.MaskedPrefs{
|
||||
Prefs: *prefsClone,
|
||||
AutoUpdateSet: ipn.AutoUpdatePrefsMask{
|
||||
ApplySet: true,
|
||||
},
|
||||
}, unlock)
|
||||
if err != nil {
|
||||
b.logf("failed to apply tailnet-wide default for auto-updates (%v): %v", au, err)
|
||||
return
|
||||
if clientupdate.CanAutoUpdate() {
|
||||
b.logf("using tailnet default auto-update setting: %v", au)
|
||||
prefsClone := prefs.AsStruct()
|
||||
prefsClone.AutoUpdate.Apply = opt.NewBool(au)
|
||||
_, err := b.editPrefsLockedOnEntry(&ipn.MaskedPrefs{
|
||||
Prefs: *prefsClone,
|
||||
AutoUpdateSet: ipn.AutoUpdatePrefsMask{
|
||||
ApplySet: true,
|
||||
},
|
||||
}, unlock)
|
||||
if err != nil {
|
||||
b.logf("failed to apply tailnet-wide default for auto-updates (%v): %v", au, err)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -5920,6 +5920,9 @@ func (b *LocalBackend) requestEngineStatusAndWait() {
|
||||
b.logf("requestEngineStatusAndWait: got status update.")
|
||||
}
|
||||
|
||||
// [controlclient.Auto] implements [auditlog.Transport].
|
||||
var _ auditlog.Transport = (*controlclient.Auto)(nil)
|
||||
|
||||
// setControlClientLocked sets the control client to cc,
|
||||
// which may be nil.
|
||||
//
|
||||
@@ -5927,12 +5930,12 @@ func (b *LocalBackend) requestEngineStatusAndWait() {
|
||||
func (b *LocalBackend) setControlClientLocked(cc controlclient.Client) {
|
||||
b.cc = cc
|
||||
b.ccAuto, _ = cc.(*controlclient.Auto)
|
||||
if b.auditLogger != nil {
|
||||
if t, ok := b.cc.(auditlog.Transport); ok && b.auditLogger != nil {
|
||||
if err := b.auditLogger.SetProfileID(b.pm.CurrentProfile().ID()); err != nil {
|
||||
b.logf("audit logger set profile ID failure: %v", err)
|
||||
}
|
||||
|
||||
if err := b.auditLogger.Start(b.ccAuto); err != nil {
|
||||
if err := b.auditLogger.Start(t); err != nil {
|
||||
b.logf("audit logger start failure: %v", err)
|
||||
}
|
||||
}
|
||||
@@ -7531,6 +7534,7 @@ func (b *LocalBackend) resetForProfileChangeLockedOnEntry(unlock unlockOnce) err
|
||||
return nil
|
||||
}
|
||||
b.setNetMapLocked(nil) // Reset netmap.
|
||||
b.updateFilterLocked(nil, ipn.PrefsView{})
|
||||
// Reset the NetworkMap in the engine
|
||||
b.e.SetNetworkMap(new(netmap.NetworkMap))
|
||||
if prevCC := b.resetControlClientLocked(); prevCC != nil {
|
||||
|
||||
@@ -1510,6 +1510,15 @@ func TestReconfigureAppConnector(t *testing.T) {
|
||||
func TestBackfillAppConnectorRoutes(t *testing.T) {
|
||||
// Create backend with an empty app connector.
|
||||
b := newTestBackend(t)
|
||||
// newTestBackend creates a backend with a non-nil netmap,
|
||||
// but this test requires a nil netmap.
|
||||
// Otherwise, instead of backfilling, [LocalBackend.reconfigAppConnectorLocked]
|
||||
// uses the domains and routes from netmap's [appctype.AppConnectorAttr].
|
||||
// Additionally, a non-nil netmap makes reconfigAppConnectorLocked
|
||||
// asynchronous, resulting in a flaky test.
|
||||
// Therefore, we set the netmap to nil to simulate a fresh backend start
|
||||
// or a profile switch where the netmap is not yet available.
|
||||
b.setNetMapLocked(nil)
|
||||
if err := b.Start(ipn.Options{}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
@@ -735,12 +735,10 @@ func TestStateMachine(t *testing.T) {
|
||||
// b.Shutdown() explicitly ourselves.
|
||||
previousCC.assertShutdown(false)
|
||||
|
||||
// Note: unpause happens because ipn needs to get at least one netmap
|
||||
// on startup, otherwise UIs can't show the node list, login
|
||||
// name, etc when in state ipn.Stopped.
|
||||
// Arguably they shouldn't try. But they currently do.
|
||||
nn := notifies.drain(2)
|
||||
cc.assertCalls("New", "Login")
|
||||
// We already have a netmap for this node,
|
||||
// and WantRunning is false, so cc should be paused.
|
||||
cc.assertCalls("New", "Login", "pause")
|
||||
c.Assert(nn[0].Prefs, qt.IsNotNil)
|
||||
c.Assert(nn[1].State, qt.IsNotNil)
|
||||
c.Assert(nn[0].Prefs.WantRunning(), qt.IsFalse)
|
||||
@@ -751,7 +749,11 @@ func TestStateMachine(t *testing.T) {
|
||||
// When logged in but !WantRunning, ipn leaves us unpaused to retrieve
|
||||
// the first netmap. Simulate that netmap being received, after which
|
||||
// it should pause us, to avoid wasting CPU retrieving unnecessarily
|
||||
// additional netmap updates.
|
||||
// additional netmap updates. Since our LocalBackend instance already
|
||||
// has a netmap, we will reset it to nil to simulate the first netmap
|
||||
// retrieval.
|
||||
b.setNetMapLocked(nil)
|
||||
cc.assertCalls("unpause")
|
||||
//
|
||||
// TODO: really the various GUIs and prefs should be refactored to
|
||||
// not require the netmap structure at all when starting while
|
||||
@@ -853,7 +855,7 @@ func TestStateMachine(t *testing.T) {
|
||||
// The last test case is the most common one: restarting when both
|
||||
// logged in and WantRunning.
|
||||
t.Logf("\n\nStart5")
|
||||
notifies.expect(1)
|
||||
notifies.expect(2)
|
||||
c.Assert(b.Start(ipn.Options{}), qt.IsNil)
|
||||
{
|
||||
// NOTE: cc.Shutdown() is correct here, since we didn't call
|
||||
@@ -861,30 +863,32 @@ func TestStateMachine(t *testing.T) {
|
||||
previousCC.assertShutdown(false)
|
||||
cc.assertCalls("New", "Login")
|
||||
|
||||
nn := notifies.drain(1)
|
||||
nn := notifies.drain(2)
|
||||
cc.assertCalls()
|
||||
c.Assert(nn[0].Prefs, qt.IsNotNil)
|
||||
c.Assert(nn[0].Prefs.LoggedOut(), qt.IsFalse)
|
||||
c.Assert(nn[0].Prefs.WantRunning(), qt.IsTrue)
|
||||
c.Assert(b.State(), qt.Equals, ipn.NoState)
|
||||
// We're logged in and have a valid netmap, so we should
|
||||
// be in the Starting state.
|
||||
c.Assert(nn[1].State, qt.IsNotNil)
|
||||
c.Assert(*nn[1].State, qt.Equals, ipn.Starting)
|
||||
c.Assert(b.State(), qt.Equals, ipn.Starting)
|
||||
}
|
||||
|
||||
// Control server accepts our valid key from before.
|
||||
t.Logf("\n\nLoginFinished5")
|
||||
notifies.expect(1)
|
||||
notifies.expect(0)
|
||||
cc.send(nil, "", true, &netmap.NetworkMap{
|
||||
SelfNode: (&tailcfg.Node{MachineAuthorized: true}).View(),
|
||||
})
|
||||
{
|
||||
nn := notifies.drain(1)
|
||||
notifies.drain(0)
|
||||
cc.assertCalls()
|
||||
// NOTE: No LoginFinished message since no interactive
|
||||
// login was needed.
|
||||
c.Assert(nn[0].State, qt.IsNotNil)
|
||||
c.Assert(ipn.Starting, qt.Equals, *nn[0].State)
|
||||
// NOTE: No prefs change this time. WantRunning stays true.
|
||||
// We were in Starting in the first place, so that doesn't
|
||||
// change either.
|
||||
// change either, so we don't expect any notifications.
|
||||
c.Assert(ipn.Starting, qt.Equals, b.State())
|
||||
}
|
||||
t.Logf("\n\nExpireKey")
|
||||
|
||||
@@ -517,6 +517,7 @@ _Appears in:_
|
||||
| `statefulSet` _[StatefulSet](#statefulset)_ | Configuration parameters for the proxy's StatefulSet. Tailscale<br />Kubernetes operator deploys a StatefulSet for each of the user<br />configured proxies (Tailscale Ingress, Tailscale Service, Connector). | | |
|
||||
| `metrics` _[Metrics](#metrics)_ | Configuration for proxy metrics. Metrics are currently not supported<br />for egress proxies and for Ingress proxies that have been configured<br />with tailscale.com/experimental-forward-cluster-traffic-via-ingress<br />annotation. Note that the metrics are currently considered unstable<br />and will likely change in breaking ways in the future - we only<br />recommend that you use those for debugging purposes. | | |
|
||||
| `tailscale` _[TailscaleConfig](#tailscaleconfig)_ | TailscaleConfig contains options to configure the tailscale-specific<br />parameters of proxies. | | |
|
||||
| `useLetsEncryptStagingEnvironment` _boolean_ | Set UseLetsEncryptStagingEnvironment to true to issue TLS<br />certificates for any HTTPS endpoints exposed to the tailnet from<br />LetsEncrypt's staging environment.<br />https://letsencrypt.org/docs/staging-environment/<br />This setting only affects Tailscale Ingress resources.<br />By default Ingress TLS certificates are issued from LetsEncrypt's<br />production environment.<br />Changing this setting true -> false, will result in any<br />existing certs being re-issued from the production environment.<br />Changing this setting false (default) -> true, when certs have already<br />been provisioned from production environment will NOT result in certs<br />being re-issued from the staging environment before they need to be<br />renewed. | | |
|
||||
|
||||
|
||||
#### ProxyClassStatus
|
||||
|
||||
@@ -66,6 +66,21 @@ type ProxyClassSpec struct {
|
||||
// parameters of proxies.
|
||||
// +optional
|
||||
TailscaleConfig *TailscaleConfig `json:"tailscale,omitempty"`
|
||||
// Set UseLetsEncryptStagingEnvironment to true to issue TLS
|
||||
// certificates for any HTTPS endpoints exposed to the tailnet from
|
||||
// LetsEncrypt's staging environment.
|
||||
// https://letsencrypt.org/docs/staging-environment/
|
||||
// This setting only affects Tailscale Ingress resources.
|
||||
// By default Ingress TLS certificates are issued from LetsEncrypt's
|
||||
// production environment.
|
||||
// Changing this setting true -> false, will result in any
|
||||
// existing certs being re-issued from the production environment.
|
||||
// Changing this setting false (default) -> true, when certs have already
|
||||
// been provisioned from production environment will NOT result in certs
|
||||
// being re-issued from the staging environment before they need to be
|
||||
// renewed.
|
||||
// +optional
|
||||
UseLetsEncryptStagingEnvironment bool `json:"useLetsEncryptStagingEnvironment,omitempty"`
|
||||
}
|
||||
|
||||
type TailscaleConfig struct {
|
||||
|
||||
@@ -13,7 +13,7 @@ import (
|
||||
)
|
||||
|
||||
func TestGetState(t *testing.T) {
|
||||
st, err := GetState()
|
||||
st, err := getState()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
@@ -161,7 +161,7 @@ func (m *Monitor) InterfaceState() *State {
|
||||
}
|
||||
|
||||
func (m *Monitor) interfaceStateUncached() (*State, error) {
|
||||
return GetState()
|
||||
return getState()
|
||||
}
|
||||
|
||||
// SetTailscaleInterfaceName sets the name of the Tailscale interface. For
|
||||
|
||||
@@ -466,7 +466,7 @@ var getPAC func() string
|
||||
// It does not set the returned State.IsExpensive. The caller can populate that.
|
||||
//
|
||||
// Deprecated: use netmon.Monitor.InterfaceState instead.
|
||||
func GetState() (*State, error) {
|
||||
func getState() (*State, error) {
|
||||
s := &State{
|
||||
InterfaceIPs: make(map[string][]netip.Prefix),
|
||||
Interface: make(map[string]Interface),
|
||||
|
||||
@@ -26,6 +26,9 @@ func TestPackageDocs(t *testing.T) {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if fi.Mode().IsDir() && path == ".git" {
|
||||
return filepath.SkipDir // No documentation lives in .git
|
||||
}
|
||||
if fi.Mode().IsRegular() && strings.HasSuffix(path, ".go") {
|
||||
if strings.HasSuffix(path, "_test.go") {
|
||||
return nil
|
||||
|
||||
@@ -61,7 +61,11 @@ func ConnectContext(ctx context.Context, path string) (net.Conn, error) {
|
||||
if ctx.Err() != nil {
|
||||
return nil, ctx.Err()
|
||||
}
|
||||
time.Sleep(250 * time.Millisecond)
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return nil, ctx.Err()
|
||||
case <-time.After(250 * time.Millisecond):
|
||||
}
|
||||
continue
|
||||
}
|
||||
return c, err
|
||||
|
||||
@@ -120,6 +120,7 @@ func startControl(t *testing.T) (controlURL string, control *testcontrol.Server)
|
||||
Proxied: true,
|
||||
},
|
||||
MagicDNSDomain: "tail-scale.ts.net",
|
||||
Logf: t.Logf,
|
||||
}
|
||||
control.HTTPTestServer = httptest.NewUnstartedServer(control)
|
||||
control.HTTPTestServer.Start()
|
||||
@@ -221,7 +222,7 @@ func startServer(t *testing.T, ctx context.Context, controlURL, hostname string)
|
||||
getCertForTesting: testCertRoot.getCert,
|
||||
}
|
||||
if *verboseNodes {
|
||||
s.Logf = log.Printf
|
||||
s.Logf = t.Logf
|
||||
}
|
||||
t.Cleanup(func() { s.Close() })
|
||||
|
||||
|
||||
@@ -1942,6 +1942,8 @@ func (n *testNode) AwaitIP6() netip.Addr {
|
||||
|
||||
// AwaitRunning waits for n to reach the IPN state "Running".
|
||||
func (n *testNode) AwaitRunning() {
|
||||
t := n.env.t
|
||||
t.Helper()
|
||||
n.AwaitBackendState("Running")
|
||||
}
|
||||
|
||||
@@ -2015,7 +2017,7 @@ func (n *testNode) Status() (*ipnstate.Status, error) {
|
||||
}
|
||||
st := new(ipnstate.Status)
|
||||
if err := json.Unmarshal(out, st); err != nil {
|
||||
return nil, fmt.Errorf("decoding tailscale status JSON: %w", err)
|
||||
return nil, fmt.Errorf("decoding tailscale status JSON: %w\njson:\n%s", err, out)
|
||||
}
|
||||
return st, nil
|
||||
}
|
||||
|
||||
599
tstest/mts/mts.go
Normal file
599
tstest/mts/mts.go
Normal file
@@ -0,0 +1,599 @@
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
//go:build linux || darwin
|
||||
|
||||
// The mts ("Multiple Tailscale") command runs multiple tailscaled instances for
|
||||
// development, managing their directories and sockets, and lets you easily direct
|
||||
// tailscale CLI commands to them.
|
||||
package main
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"maps"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"slices"
|
||||
"strings"
|
||||
"sync"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"tailscale.com/client/local"
|
||||
"tailscale.com/types/bools"
|
||||
"tailscale.com/types/lazy"
|
||||
"tailscale.com/util/mak"
|
||||
)
|
||||
|
||||
func usage(args ...any) {
|
||||
var format string
|
||||
if len(args) > 0 {
|
||||
format, args = args[0].(string), args[1:]
|
||||
}
|
||||
if format != "" {
|
||||
format = strings.TrimSpace(format) + "\n\n"
|
||||
fmt.Fprintf(os.Stderr, format, args...)
|
||||
}
|
||||
io.WriteString(os.Stderr, strings.TrimSpace(`
|
||||
usage:
|
||||
|
||||
mts server <subcommand> # manage tailscaled instances
|
||||
mts server run # run the mts server (parent process of all tailscaled)
|
||||
mts server list # list all tailscaled and their state
|
||||
mts server list <name> # show details of named instance
|
||||
mts server add <name> # add+start new named tailscaled
|
||||
mts server start <name> # start a previously added tailscaled
|
||||
mts server stop <name> # stop & remove a named tailscaled
|
||||
mts server rm <name> # stop & remove a named tailscaled
|
||||
mts server logs [-f] <name> # get/follow tailscaled logs
|
||||
|
||||
mts <inst-name> [tailscale CLI args] # run Tailscale CLI against a named instance
|
||||
e.g.
|
||||
mts gmail1 up
|
||||
mts github2 status --json
|
||||
`)+"\n")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
func main() {
|
||||
// Don't use flag.Parse here; we mostly just delegate through
|
||||
// to the Tailscale CLI.
|
||||
|
||||
if len(os.Args) < 2 {
|
||||
usage()
|
||||
}
|
||||
firstArg, args := os.Args[1], os.Args[2:]
|
||||
if firstArg == "server" || firstArg == "s" {
|
||||
if err := runMTSServer(args); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
} else {
|
||||
var c Client
|
||||
inst := firstArg
|
||||
c.RunCommand(inst, args)
|
||||
}
|
||||
}
|
||||
|
||||
func runMTSServer(args []string) error {
|
||||
if len(args) == 0 {
|
||||
usage()
|
||||
}
|
||||
cmd, args := args[0], args[1:]
|
||||
if cmd == "run" {
|
||||
var s Server
|
||||
return s.Run()
|
||||
}
|
||||
|
||||
// Commands other than "run" all use the HTTP client to
|
||||
// hit the mts server over its unix socket.
|
||||
var c Client
|
||||
|
||||
switch cmd {
|
||||
default:
|
||||
usage("unknown mts server subcommand %q", cmd)
|
||||
case "list", "ls":
|
||||
list, err := c.List()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(args) == 0 {
|
||||
names := slices.Sorted(maps.Keys(list.Instances))
|
||||
for _, name := range names {
|
||||
running := list.Instances[name].Running
|
||||
fmt.Printf("%10s %s\n", bools.IfElse(running, "RUNNING", "stopped"), name)
|
||||
}
|
||||
} else {
|
||||
for _, name := range args {
|
||||
inst, ok := list.Instances[name]
|
||||
if !ok {
|
||||
return fmt.Errorf("no instance named %q", name)
|
||||
}
|
||||
je := json.NewEncoder(os.Stdout)
|
||||
je.SetIndent("", " ")
|
||||
if err := je.Encode(inst); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
case "rm":
|
||||
if len(args) == 0 {
|
||||
return fmt.Errorf("missing instance name(s) to remove")
|
||||
}
|
||||
log.SetFlags(0)
|
||||
for _, name := range args {
|
||||
ok, err := c.Remove(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if ok {
|
||||
log.Printf("%s deleted.", name)
|
||||
} else {
|
||||
log.Printf("%s didn't exist.", name)
|
||||
}
|
||||
}
|
||||
case "stop":
|
||||
if len(args) == 0 {
|
||||
return fmt.Errorf("missing instance name(s) to stop")
|
||||
}
|
||||
log.SetFlags(0)
|
||||
for _, name := range args {
|
||||
ok, err := c.Stop(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if ok {
|
||||
log.Printf("%s stopped.", name)
|
||||
} else {
|
||||
log.Printf("%s didn't exist.", name)
|
||||
}
|
||||
}
|
||||
case "start", "restart":
|
||||
list, err := c.List()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
shouldStop := cmd == "restart"
|
||||
for _, arg := range args {
|
||||
is, ok := list.Instances[arg]
|
||||
if !ok {
|
||||
return fmt.Errorf("no instance named %q", arg)
|
||||
}
|
||||
if is.Running {
|
||||
if shouldStop {
|
||||
if _, err := c.Stop(arg); err != nil {
|
||||
return fmt.Errorf("stopping %q: %w", arg, err)
|
||||
}
|
||||
} else {
|
||||
log.SetFlags(0)
|
||||
log.Printf("%s already running.", arg)
|
||||
continue
|
||||
}
|
||||
}
|
||||
// Creating an existing one starts it up.
|
||||
if err := c.Create(arg); err != nil {
|
||||
return fmt.Errorf("starting %q: %w", arg, err)
|
||||
}
|
||||
}
|
||||
case "add":
|
||||
if len(args) == 0 {
|
||||
return fmt.Errorf("missing instance name(s) to add")
|
||||
}
|
||||
for _, name := range args {
|
||||
if err := c.Create(name); err != nil {
|
||||
return fmt.Errorf("creating %q: %w", name, err)
|
||||
}
|
||||
}
|
||||
case "logs":
|
||||
fs := flag.NewFlagSet("logs", flag.ExitOnError)
|
||||
fs.Usage = func() { usage() }
|
||||
follow := fs.Bool("f", false, "follow logs")
|
||||
fs.Parse(args)
|
||||
log.Printf("Parsed; following=%v, args=%q", *follow, fs.Args())
|
||||
if fs.NArg() != 1 {
|
||||
usage()
|
||||
}
|
||||
cmd := bools.IfElse(*follow, "tail", "cat")
|
||||
args := []string{cmd}
|
||||
if *follow {
|
||||
args = append(args, "-f")
|
||||
}
|
||||
path, err := exec.LookPath(cmd)
|
||||
if err != nil {
|
||||
return fmt.Errorf("looking up %q: %w", cmd, err)
|
||||
}
|
||||
args = append(args, instLogsFile(fs.Arg(0)))
|
||||
log.Fatal(syscall.Exec(path, args, os.Environ()))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type Client struct {
|
||||
}
|
||||
|
||||
func (c *Client) client() *http.Client {
|
||||
return &http.Client{
|
||||
Transport: &http.Transport{
|
||||
DialContext: func(ctx context.Context, network, addr string) (net.Conn, error) {
|
||||
return net.Dial("unix", mtsSock())
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func getJSON[T any](res *http.Response, err error) (T, error) {
|
||||
var ret T
|
||||
if err != nil {
|
||||
return ret, err
|
||||
}
|
||||
defer res.Body.Close()
|
||||
if res.StatusCode != http.StatusOK {
|
||||
body, _ := io.ReadAll(res.Body)
|
||||
return ret, fmt.Errorf("unexpected status: %v: %s", res.Status, body)
|
||||
}
|
||||
if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
|
||||
return ret, err
|
||||
}
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
func (c *Client) List() (listResponse, error) {
|
||||
return getJSON[listResponse](c.client().Get("http://mts/list"))
|
||||
}
|
||||
|
||||
func (c *Client) Remove(name string) (found bool, err error) {
|
||||
return getJSON[bool](c.client().PostForm("http://mts/rm", url.Values{
|
||||
"name": []string{name},
|
||||
}))
|
||||
}
|
||||
|
||||
func (c *Client) Stop(name string) (found bool, err error) {
|
||||
return getJSON[bool](c.client().PostForm("http://mts/stop", url.Values{
|
||||
"name": []string{name},
|
||||
}))
|
||||
}
|
||||
|
||||
func (c *Client) Create(name string) error {
|
||||
req, err := http.NewRequest("POST", "http://mts/create/"+name, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
resp, err := c.client().Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
body, _ := io.ReadAll(resp.Body)
|
||||
return fmt.Errorf("unexpected status: %v: %s", resp.Status, body)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Client) RunCommand(name string, args []string) {
|
||||
sock := instSock(name)
|
||||
lc := &local.Client{
|
||||
Socket: sock,
|
||||
UseSocketOnly: true,
|
||||
}
|
||||
probeCtx, cancel := context.WithTimeout(context.Background(), 250*time.Millisecond)
|
||||
defer cancel()
|
||||
if _, err := lc.StatusWithoutPeers(probeCtx); err != nil {
|
||||
log.Fatalf("instance %q not running? start with 'mts server start %q'; got error: %v", name, name, err)
|
||||
}
|
||||
args = append([]string{"run", "tailscale.com/cmd/tailscale", "--socket=" + sock}, args...)
|
||||
cmd := exec.Command("go", args...)
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
cmd.Stdin = os.Stdin
|
||||
err := cmd.Run()
|
||||
if err == nil {
|
||||
os.Exit(0)
|
||||
}
|
||||
if exitErr, ok := err.(*exec.ExitError); ok {
|
||||
os.Exit(exitErr.ExitCode())
|
||||
}
|
||||
panic(err)
|
||||
}
|
||||
|
||||
type Server struct {
|
||||
lazyTailscaled lazy.GValue[string]
|
||||
|
||||
mu sync.Mutex
|
||||
cmds map[string]*exec.Cmd // running tailscaled instances
|
||||
}
|
||||
|
||||
func (s *Server) tailscaled() string {
|
||||
v, err := s.lazyTailscaled.GetErr(func() (string, error) {
|
||||
out, err := exec.Command("go", "list", "-f", "{{.Target}}", "tailscale.com/cmd/tailscaled").CombinedOutput()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return strings.TrimSpace(string(out)), nil
|
||||
})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
func (s *Server) Run() error {
|
||||
if err := os.MkdirAll(mtsRoot(), 0700); err != nil {
|
||||
return err
|
||||
}
|
||||
sock := mtsSock()
|
||||
os.Remove(sock)
|
||||
log.Printf("Multi-Tailscaled Server running; listening on %q ...", sock)
|
||||
ln, err := net.Listen("unix", sock)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return http.Serve(ln, s)
|
||||
}
|
||||
|
||||
var validNameRx = regexp.MustCompile(`^[a-zA-Z0-9_-]+$`)
|
||||
|
||||
func validInstanceName(name string) bool {
|
||||
return validNameRx.MatchString(name)
|
||||
}
|
||||
|
||||
func (s *Server) InstanceRunning(name string) bool {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
_, ok := s.cmds[name]
|
||||
return ok
|
||||
}
|
||||
|
||||
func (s *Server) Stop(name string) {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
if cmd, ok := s.cmds[name]; ok {
|
||||
if err := cmd.Process.Kill(); err != nil {
|
||||
log.Printf("error killing %q: %v", name, err)
|
||||
}
|
||||
delete(s.cmds, name)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Server) RunInstance(name string) error {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
if _, ok := s.cmds[name]; ok {
|
||||
return fmt.Errorf("instance %q already running", name)
|
||||
}
|
||||
|
||||
if !validInstanceName(name) {
|
||||
return fmt.Errorf("invalid instance name %q", name)
|
||||
}
|
||||
dir := filepath.Join(mtsRoot(), name)
|
||||
if err := os.MkdirAll(dir, 0700); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
env := os.Environ()
|
||||
env = append(env, "TS_DEBUG_LOG_RATE=all")
|
||||
if ef, err := os.Open(instEnvFile(name)); err == nil {
|
||||
defer ef.Close()
|
||||
sc := bufio.NewScanner(ef)
|
||||
for sc.Scan() {
|
||||
t := strings.TrimSpace(sc.Text())
|
||||
if strings.HasPrefix(t, "#") || !strings.Contains(t, "=") {
|
||||
continue
|
||||
}
|
||||
env = append(env, t)
|
||||
}
|
||||
} else if os.IsNotExist(err) {
|
||||
// Write an example one.
|
||||
os.WriteFile(instEnvFile(name), fmt.Appendf(nil, "# Example mts env.txt file; uncomment/add stuff you want for %q\n\n#TS_DEBUG_MAP=1\n#TS_DEBUG_REGISTER=1\n#TS_NO_LOGS_NO_SUPPORT=1\n", name), 0600)
|
||||
}
|
||||
|
||||
extraArgs := []string{"--verbose=1"}
|
||||
if af, err := os.Open(instArgsFile(name)); err == nil {
|
||||
extraArgs = nil // clear default args
|
||||
defer af.Close()
|
||||
sc := bufio.NewScanner(af)
|
||||
for sc.Scan() {
|
||||
t := strings.TrimSpace(sc.Text())
|
||||
if strings.HasPrefix(t, "#") || t == "" {
|
||||
continue
|
||||
}
|
||||
extraArgs = append(extraArgs, t)
|
||||
}
|
||||
} else if os.IsNotExist(err) {
|
||||
// Write an example one.
|
||||
os.WriteFile(instArgsFile(name), fmt.Appendf(nil, "# Example mts args.txt file for instance %q.\n# One line per extra arg to tailscaled; no magic string quoting\n\n--verbose=1\n#--socks5-server=127.0.0.1:5000\n", name), 0600)
|
||||
}
|
||||
|
||||
log.Printf("Running Tailscale daemon %q in %q", name, dir)
|
||||
|
||||
args := []string{
|
||||
"--tun=userspace-networking",
|
||||
"--statedir=" + filepath.Join(dir),
|
||||
"--socket=" + filepath.Join(dir, "tailscaled.sock"),
|
||||
}
|
||||
args = append(args, extraArgs...)
|
||||
|
||||
cmd := exec.Command(s.tailscaled(), args...)
|
||||
cmd.Dir = dir
|
||||
cmd.Env = env
|
||||
|
||||
out, err := cmd.StdoutPipe()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cmd.Stderr = cmd.Stdout
|
||||
|
||||
logs := instLogsFile(name)
|
||||
logFile, err := os.OpenFile(logs, os.O_CREATE|os.O_WRONLY|os.O_APPEND|os.O_TRUNC, 0644)
|
||||
if err != nil {
|
||||
return fmt.Errorf("opening logs file: %w", err)
|
||||
}
|
||||
|
||||
go func() {
|
||||
bs := bufio.NewScanner(out)
|
||||
for bs.Scan() {
|
||||
// TODO(bradfitz): record in memory too, serve via HTTP
|
||||
line := strings.TrimSpace(bs.Text())
|
||||
fmt.Fprintf(logFile, "%s\n", line)
|
||||
fmt.Printf("tailscaled[%s]: %s\n", name, line)
|
||||
}
|
||||
}()
|
||||
|
||||
if err := cmd.Start(); err != nil {
|
||||
return err
|
||||
}
|
||||
go func() {
|
||||
err := cmd.Wait()
|
||||
logFile.Close()
|
||||
log.Printf("Tailscale daemon %q exited: %v", name, err)
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
delete(s.cmds, name)
|
||||
}()
|
||||
|
||||
mak.Set(&s.cmds, name, cmd)
|
||||
return nil
|
||||
}
|
||||
|
||||
type listResponse struct {
|
||||
// Instances maps instance name to its details.
|
||||
Instances map[string]listResponseInstance `json:"instances"`
|
||||
}
|
||||
|
||||
type listResponseInstance struct {
|
||||
Name string `json:"name"`
|
||||
Dir string `json:"dir"`
|
||||
Sock string `json:"sock"`
|
||||
Running bool `json:"running"`
|
||||
Env string `json:"env"`
|
||||
Args string `json:"args"`
|
||||
Logs string `json:"logs"`
|
||||
}
|
||||
|
||||
func writeJSON(w http.ResponseWriter, v any) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
e := json.NewEncoder(w)
|
||||
e.SetIndent("", " ")
|
||||
e.Encode(v)
|
||||
}
|
||||
|
||||
func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
if r.URL.Path == "/list" {
|
||||
var res listResponse
|
||||
for _, name := range s.InstanceNames() {
|
||||
mak.Set(&res.Instances, name, listResponseInstance{
|
||||
Name: name,
|
||||
Dir: instDir(name),
|
||||
Sock: instSock(name),
|
||||
Running: s.InstanceRunning(name),
|
||||
Env: instEnvFile(name),
|
||||
Args: instArgsFile(name),
|
||||
Logs: instLogsFile(name),
|
||||
})
|
||||
}
|
||||
writeJSON(w, res)
|
||||
return
|
||||
}
|
||||
if r.URL.Path == "/rm" || r.URL.Path == "/stop" {
|
||||
shouldRemove := r.URL.Path == "/rm"
|
||||
if r.Method != "POST" {
|
||||
http.Error(w, "POST required", http.StatusMethodNotAllowed)
|
||||
return
|
||||
}
|
||||
target := r.FormValue("name")
|
||||
var ok bool
|
||||
for _, name := range s.InstanceNames() {
|
||||
if name != target {
|
||||
continue
|
||||
}
|
||||
ok = true
|
||||
s.Stop(name)
|
||||
if shouldRemove {
|
||||
if err := os.RemoveAll(instDir(name)); err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
}
|
||||
break
|
||||
}
|
||||
writeJSON(w, ok)
|
||||
return
|
||||
}
|
||||
if inst, ok := strings.CutPrefix(r.URL.Path, "/create/"); ok {
|
||||
if !s.InstanceRunning(inst) {
|
||||
if err := s.RunInstance(inst); err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
}
|
||||
fmt.Fprintf(w, "OK\n")
|
||||
return
|
||||
}
|
||||
if r.URL.Path == "/" {
|
||||
fmt.Fprintf(w, "This is mts, the multi-tailscaled server.\n")
|
||||
return
|
||||
}
|
||||
http.NotFound(w, r)
|
||||
}
|
||||
|
||||
func (s *Server) InstanceNames() []string {
|
||||
var ret []string
|
||||
des, err := os.ReadDir(mtsRoot())
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return nil
|
||||
}
|
||||
panic(err)
|
||||
}
|
||||
for _, de := range des {
|
||||
if !de.IsDir() {
|
||||
continue
|
||||
}
|
||||
ret = append(ret, de.Name())
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
func mtsRoot() string {
|
||||
dir, err := os.UserConfigDir()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return filepath.Join(dir, "multi-tailscale-dev")
|
||||
}
|
||||
|
||||
func instDir(name string) string {
|
||||
return filepath.Join(mtsRoot(), name)
|
||||
}
|
||||
|
||||
func instSock(name string) string {
|
||||
return filepath.Join(instDir(name), "tailscaled.sock")
|
||||
}
|
||||
|
||||
func instEnvFile(name string) string {
|
||||
return filepath.Join(mtsRoot(), name, "env.txt")
|
||||
}
|
||||
|
||||
func instArgsFile(name string) string {
|
||||
return filepath.Join(mtsRoot(), name, "args.txt")
|
||||
}
|
||||
|
||||
func instLogsFile(name string) string {
|
||||
return filepath.Join(mtsRoot(), name, "logs.txt")
|
||||
}
|
||||
|
||||
func mtsSock() string {
|
||||
return filepath.Join(mtsRoot(), "mts.sock")
|
||||
}
|
||||
@@ -1580,6 +1580,11 @@ type fwdDNSLinkSelector struct {
|
||||
}
|
||||
|
||||
func (ls fwdDNSLinkSelector) PickLink(ip netip.Addr) (linkName string) {
|
||||
// sandboxed macOS needs some extra hand-holding for loopback addresses.
|
||||
if ip.IsLoopback() && version.IsSandboxedMacOS() {
|
||||
return "lo0"
|
||||
}
|
||||
|
||||
if ls.ue.isDNSIPOverTailscale.Load()(ip) {
|
||||
return ls.tunName
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user