-
Notifications
You must be signed in to change notification settings - Fork 3
/
operator_command.go
96 lines (87 loc) · 3.1 KB
/
operator_command.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
package main
import (
"context"
"sigs.k8s.io/controller-runtime/pkg/webhook"
"time"
pipeline "github.com/ccremer/go-command-pipeline"
"github.com/go-logr/logr"
"github.com/vshn/provider-exoscale/apis"
"github.com/vshn/provider-exoscale/operator"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/leaderelection/resourcelock"
"sigs.k8s.io/controller-runtime/pkg/manager"
"github.com/urfave/cli/v2"
ctrl "sigs.k8s.io/controller-runtime"
)
type operatorCommand struct {
LeaderElectionEnabled bool
WebhookCertDir string
manager manager.Manager
kubeconfig *rest.Config
}
func newOperatorCommand() *cli.Command {
command := &operatorCommand{}
return &cli.Command{
Name: "operator",
Usage: "Start provider in operator mode",
Action: command.execute,
Flags: []cli.Flag{
newLeaderElectionEnabledFlag(&command.LeaderElectionEnabled),
newWebhookTLSCertDirFlag(&command.WebhookCertDir),
},
}
}
func (c *operatorCommand) execute(ctx *cli.Context) error {
_ = LogMetadata(ctx)
log := logr.FromContextOrDiscard(ctx.Context).WithName(ctx.Command.Name)
log.Info("Setting up controllers", "config", c)
ctrl.SetLogger(log)
p := pipeline.NewPipeline[context.Context]()
p.WithBeforeHooks(
func(step pipeline.Step[context.Context]) {
log.V(1).Info(step.Name)
},
)
p.AddStepFromFunc("get config", func(ctx context.Context) error {
cfg, err := ctrl.GetConfig()
c.kubeconfig = cfg
return err
})
p.AddStepFromFunc("create manager", func(ctx context.Context) error {
mgr, err := ctrl.NewManager(c.kubeconfig, ctrl.Options{
// controller-runtime uses both ConfigMaps and Leases for leader election by default.
// Leases expire after 15 seconds, with a 10-second renewal deadline.
// We've observed leader loss due to renewal deadlines being exceeded when under high load - i.e.
// hundreds of reconciles per second and ~200rps to the API server.
// Switching to Leases only and longer leases appears to alleviate this.
LeaderElection: c.LeaderElectionEnabled,
LeaderElectionID: "leader-election-provider-exoscale",
LeaderElectionResourceLock: resourcelock.LeasesResourceLock,
LeaseDuration: func() *time.Duration { d := 60 * time.Second; return &d }(),
RenewDeadline: func() *time.Duration { d := 50 * time.Second; return &d }(),
WebhookServer: webhook.NewServer(webhook.Options{
Port: 9443,
CertDir: c.WebhookCertDir,
}),
})
c.manager = mgr
return err
})
p.AddStep(p.WithNestedSteps("register schemes", nil,
p.NewStep("register API schemes", func(ctx context.Context) error {
return apis.AddToScheme(c.manager.GetScheme())
}),
))
p.AddStepFromFunc("setup controllers", func(ctx context.Context) error {
return operator.SetupControllers(c.manager)
})
p.AddStep(p.When(pipeline.Bool[context.Context](c.WebhookCertDir != ""), "setup webhook server",
func(ctx context.Context) error {
return operator.SetupWebhooks(c.manager)
}))
p.AddStepFromFunc("run manager", func(ctx context.Context) error {
log.Info("Starting manager")
return c.manager.Start(ctx)
})
return p.RunWithContext(ctx.Context)
}