Skip to content

Commit 5903907

Browse files
committed
detangles reconciler and pool manager
1 parent 7464dae commit 5903907

File tree

2 files changed

+184
-174
lines changed

2 files changed

+184
-174
lines changed

pkg/controller/launch/controller.go

Lines changed: 1 addition & 174 deletions
Original file line numberDiff line numberDiff line change
@@ -3,48 +3,21 @@ package launch
33
import (
44
"github.com/sapcc/kubernikus/pkg/api/models"
55
"github.com/sapcc/kubernikus/pkg/apis/kubernikus/v1"
6-
"github.com/sapcc/kubernikus/pkg/client/openstack"
76
"github.com/sapcc/kubernikus/pkg/controller/base"
87
"github.com/sapcc/kubernikus/pkg/controller/config"
98
"github.com/sapcc/kubernikus/pkg/controller/metrics"
10-
"github.com/sapcc/kubernikus/pkg/templates"
119

1210
"github.com/go-kit/kit/log"
13-
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
1411
"k8s.io/client-go/tools/record"
1512
)
1613

17-
type PoolManager interface {
18-
GetStatus() (*PoolStatus, error)
19-
SetStatus(*PoolStatus) error
20-
CreateNode() (string, error)
21-
DeleteNode(string) error
22-
}
23-
2414
type LaunchReconciler struct {
2515
config.Clients
2616

2717
Recorder record.EventRecorder
2818
Logger log.Logger
2919
}
3020

31-
type PoolStatus struct {
32-
Nodes []string
33-
Running int
34-
Starting int
35-
Stopping int
36-
Needed int
37-
UnNeeded int
38-
}
39-
40-
type ConcretePoolManager struct {
41-
config.Clients
42-
43-
Kluster *v1.Kluster
44-
Pool *models.NodePool
45-
Logger log.Logger
46-
}
47-
4821
func NewController(factories config.Factories, clients config.Clients, recorder record.EventRecorder, logger log.Logger) base.Controller {
4922
logger = log.With(logger,
5023
"controller", "launch")
@@ -85,7 +58,7 @@ func (lr *LaunchReconciler) Reconcile(kluster *v1.Kluster) (requeueRequested boo
8558

8659
func (lr *LaunchReconciler) reconcilePool(kluster *v1.Kluster, pool *models.NodePool) (status *PoolStatus, requeue bool, err error) {
8760

88-
pm := lr.newNodePoolManager(kluster, pool)
61+
pm := lr.newPoolManager(kluster, pool)
8962
status, err = pm.GetStatus()
9063
if err != nil {
9164
return
@@ -127,149 +100,3 @@ func (lr *LaunchReconciler) reconcilePool(kluster *v1.Kluster, pool *models.Node
127100
err = pm.SetStatus(status)
128101
return
129102
}
130-
131-
func (lr *LaunchReconciler) newNodePoolManager(kluster *v1.Kluster, pool *models.NodePool) PoolManager {
132-
logger := log.With(lr.Logger,
133-
"kluster", kluster.Spec.Name,
134-
"project", kluster.Account(),
135-
"pool", pool.Name)
136-
137-
var pm PoolManager
138-
pm = &ConcretePoolManager{lr.Clients, kluster, pool, logger}
139-
pm = &EventingPoolManager{pm, kluster, lr.Recorder}
140-
pm = &LoggingPoolManager{pm, logger}
141-
pm = &InstrumentingPoolManager{pm,
142-
metrics.LaunchOperationsLatency,
143-
metrics.LaunchOperationsTotal,
144-
metrics.LaunchSuccessfulOperationsTotal,
145-
metrics.LaunchFailedOperationsTotal,
146-
}
147-
148-
return pm
149-
}
150-
151-
func (cpm *ConcretePoolManager) GetStatus() (status *PoolStatus, err error) {
152-
status = &PoolStatus{}
153-
nodes, err := cpm.Clients.Openstack.GetNodes(cpm.Kluster, cpm.Pool)
154-
if err != nil {
155-
return status, err
156-
}
157-
158-
return &PoolStatus{
159-
Nodes: cpm.nodeIDs(nodes),
160-
Running: cpm.running(nodes),
161-
Starting: cpm.starting(nodes),
162-
Stopping: cpm.stopping(nodes),
163-
Needed: cpm.needed(nodes),
164-
UnNeeded: cpm.unNeeded(nodes),
165-
}, nil
166-
}
167-
168-
func (cpm *ConcretePoolManager) SetStatus(status *PoolStatus) error {
169-
newInfo := models.NodePoolInfo{
170-
Name: cpm.Pool.Name,
171-
Size: cpm.Pool.Size,
172-
Running: int64(status.Running + status.Starting),
173-
Healthy: int64(status.Running),
174-
Schedulable: int64(status.Running),
175-
}
176-
177-
copy, err := cpm.Clients.Kubernikus.Kubernikus().Klusters(cpm.Kluster.Namespace).Get(cpm.Kluster.Name, metav1.GetOptions{})
178-
if err != nil {
179-
return err
180-
}
181-
182-
for i, curInfo := range copy.Status.NodePools {
183-
if curInfo.Name == newInfo.Name {
184-
if curInfo == newInfo {
185-
return nil
186-
}
187-
188-
copy.Status.NodePools[i] = newInfo
189-
_, err = cpm.Clients.Kubernikus.Kubernikus().Klusters(copy.Namespace).Update(copy)
190-
return err
191-
}
192-
}
193-
194-
return nil
195-
}
196-
197-
func (cpm *ConcretePoolManager) CreateNode() (id string, err error) {
198-
secret, err := cpm.Clients.Kubernetes.CoreV1().Secrets(cpm.Kluster.Namespace).Get(cpm.Kluster.GetName(), metav1.GetOptions{})
199-
if err != nil {
200-
return "", err
201-
}
202-
203-
userdata, err := templates.Ignition.GenerateNode(cpm.Kluster, secret)
204-
if err != nil {
205-
return "", err
206-
}
207-
208-
id, err = cpm.Clients.Openstack.CreateNode(cpm.Kluster, cpm.Pool, userdata)
209-
if err != nil {
210-
return "", err
211-
}
212-
213-
return id, nil
214-
}
215-
216-
func (cpm *ConcretePoolManager) DeleteNode(id string) (err error) {
217-
if err = cpm.Clients.Openstack.DeleteNode(cpm.Kluster, id); err != nil {
218-
return err
219-
}
220-
return nil
221-
}
222-
223-
func (cpm *ConcretePoolManager) nodeIDs(nodes []openstack.Node) []string {
224-
result := []string{}
225-
for _, n := range nodes {
226-
result = append(result, n.ID)
227-
}
228-
return result
229-
}
230-
231-
func (cpm *ConcretePoolManager) starting(nodes []openstack.Node) int {
232-
var count int = 0
233-
for _, n := range nodes {
234-
if n.Starting() {
235-
count = count + 1
236-
}
237-
}
238-
return count
239-
}
240-
241-
func (cpm *ConcretePoolManager) stopping(nodes []openstack.Node) int {
242-
var count int = 0
243-
for _, n := range nodes {
244-
if n.Stopping() {
245-
count = count + 1
246-
}
247-
}
248-
return count
249-
}
250-
251-
func (cpm *ConcretePoolManager) running(nodes []openstack.Node) int {
252-
var count int = 0
253-
for _, n := range nodes {
254-
if n.Running() {
255-
count = count + 1
256-
}
257-
}
258-
return count
259-
}
260-
261-
func (cpm *ConcretePoolManager) needed(nodes []openstack.Node) int {
262-
needed := int(cpm.Pool.Size) - cpm.running(nodes) - cpm.starting(nodes)
263-
if needed < 0 {
264-
return 0
265-
}
266-
return needed
267-
}
268-
269-
func (cpm ConcretePoolManager) unNeeded(nodes []openstack.Node) int {
270-
unneeded := cpm.running(nodes) + cpm.starting(nodes) - int(cpm.Pool.Size)
271-
if unneeded < 0 {
272-
return 0
273-
}
274-
return unneeded
275-
}
Lines changed: 183 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,183 @@
1+
package launch
2+
3+
import (
4+
"github.com/sapcc/kubernikus/pkg/api/models"
5+
"github.com/sapcc/kubernikus/pkg/apis/kubernikus/v1"
6+
"github.com/sapcc/kubernikus/pkg/client/openstack"
7+
"github.com/sapcc/kubernikus/pkg/controller/config"
8+
"github.com/sapcc/kubernikus/pkg/controller/metrics"
9+
"github.com/sapcc/kubernikus/pkg/templates"
10+
11+
"github.com/go-kit/kit/log"
12+
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
13+
)
14+
15+
type PoolManager interface {
16+
GetStatus() (*PoolStatus, error)
17+
SetStatus(*PoolStatus) error
18+
CreateNode() (string, error)
19+
DeleteNode(string) error
20+
}
21+
22+
type PoolStatus struct {
23+
Nodes []string
24+
Running int
25+
Starting int
26+
Stopping int
27+
Needed int
28+
UnNeeded int
29+
}
30+
31+
type ConcretePoolManager struct {
32+
config.Clients
33+
34+
Kluster *v1.Kluster
35+
Pool *models.NodePool
36+
Logger log.Logger
37+
}
38+
39+
func (lr *LaunchReconciler) newPoolManager(kluster *v1.Kluster, pool *models.NodePool) PoolManager {
40+
logger := log.With(lr.Logger,
41+
"kluster", kluster.Spec.Name,
42+
"project", kluster.Account(),
43+
"pool", pool.Name)
44+
45+
var pm PoolManager
46+
pm = &ConcretePoolManager{lr.Clients, kluster, pool, logger}
47+
pm = &EventingPoolManager{pm, kluster, lr.Recorder}
48+
pm = &LoggingPoolManager{pm, logger}
49+
pm = &InstrumentingPoolManager{pm,
50+
metrics.LaunchOperationsLatency,
51+
metrics.LaunchOperationsTotal,
52+
metrics.LaunchSuccessfulOperationsTotal,
53+
metrics.LaunchFailedOperationsTotal,
54+
}
55+
56+
return pm
57+
}
58+
59+
func (cpm *ConcretePoolManager) GetStatus() (status *PoolStatus, err error) {
60+
status = &PoolStatus{}
61+
nodes, err := cpm.Clients.Openstack.GetNodes(cpm.Kluster, cpm.Pool)
62+
if err != nil {
63+
return status, err
64+
}
65+
66+
return &PoolStatus{
67+
Nodes: cpm.nodeIDs(nodes),
68+
Running: cpm.running(nodes),
69+
Starting: cpm.starting(nodes),
70+
Stopping: cpm.stopping(nodes),
71+
Needed: cpm.needed(nodes),
72+
UnNeeded: cpm.unNeeded(nodes),
73+
}, nil
74+
}
75+
76+
func (cpm *ConcretePoolManager) SetStatus(status *PoolStatus) error {
77+
newInfo := models.NodePoolInfo{
78+
Name: cpm.Pool.Name,
79+
Size: cpm.Pool.Size,
80+
Running: int64(status.Running + status.Starting),
81+
Healthy: int64(status.Running),
82+
Schedulable: int64(status.Running),
83+
}
84+
85+
copy, err := cpm.Clients.Kubernikus.Kubernikus().Klusters(cpm.Kluster.Namespace).Get(cpm.Kluster.Name, metav1.GetOptions{})
86+
if err != nil {
87+
return err
88+
}
89+
90+
for i, curInfo := range copy.Status.NodePools {
91+
if curInfo.Name == newInfo.Name {
92+
if curInfo == newInfo {
93+
return nil
94+
}
95+
96+
copy.Status.NodePools[i] = newInfo
97+
_, err = cpm.Clients.Kubernikus.Kubernikus().Klusters(copy.Namespace).Update(copy)
98+
return err
99+
}
100+
}
101+
102+
return nil
103+
}
104+
105+
func (cpm *ConcretePoolManager) CreateNode() (id string, err error) {
106+
secret, err := cpm.Clients.Kubernetes.CoreV1().Secrets(cpm.Kluster.Namespace).Get(cpm.Kluster.GetName(), metav1.GetOptions{})
107+
if err != nil {
108+
return "", err
109+
}
110+
111+
userdata, err := templates.Ignition.GenerateNode(cpm.Kluster, secret)
112+
if err != nil {
113+
return "", err
114+
}
115+
116+
id, err = cpm.Clients.Openstack.CreateNode(cpm.Kluster, cpm.Pool, userdata)
117+
if err != nil {
118+
return "", err
119+
}
120+
121+
return id, nil
122+
}
123+
124+
func (cpm *ConcretePoolManager) DeleteNode(id string) (err error) {
125+
if err = cpm.Clients.Openstack.DeleteNode(cpm.Kluster, id); err != nil {
126+
return err
127+
}
128+
return nil
129+
}
130+
131+
func (cpm *ConcretePoolManager) nodeIDs(nodes []openstack.Node) []string {
132+
result := []string{}
133+
for _, n := range nodes {
134+
result = append(result, n.ID)
135+
}
136+
return result
137+
}
138+
139+
func (cpm *ConcretePoolManager) starting(nodes []openstack.Node) int {
140+
var count int = 0
141+
for _, n := range nodes {
142+
if n.Starting() {
143+
count = count + 1
144+
}
145+
}
146+
return count
147+
}
148+
149+
func (cpm *ConcretePoolManager) stopping(nodes []openstack.Node) int {
150+
var count int = 0
151+
for _, n := range nodes {
152+
if n.Stopping() {
153+
count = count + 1
154+
}
155+
}
156+
return count
157+
}
158+
159+
func (cpm *ConcretePoolManager) running(nodes []openstack.Node) int {
160+
var count int = 0
161+
for _, n := range nodes {
162+
if n.Running() {
163+
count = count + 1
164+
}
165+
}
166+
return count
167+
}
168+
169+
func (cpm *ConcretePoolManager) needed(nodes []openstack.Node) int {
170+
needed := int(cpm.Pool.Size) - cpm.running(nodes) - cpm.starting(nodes)
171+
if needed < 0 {
172+
return 0
173+
}
174+
return needed
175+
}
176+
177+
func (cpm ConcretePoolManager) unNeeded(nodes []openstack.Node) int {
178+
unneeded := cpm.running(nodes) + cpm.starting(nodes) - int(cpm.Pool.Size)
179+
if unneeded < 0 {
180+
return 0
181+
}
182+
return unneeded
183+
}

0 commit comments

Comments
 (0)