@@ -11,9 +11,11 @@ import (
11
11
autopilot "github.com/k0sproject/k0s/pkg/apis/autopilot/v1beta2"
12
12
"github.com/k0sproject/k0s/pkg/apis/k0s/v1beta1"
13
13
"github.com/k0sproject/k0s/pkg/etcd"
14
+ embeddedclusterv1beta1 "github.com/replicatedhq/embedded-cluster-kinds/apis/v1beta1"
14
15
"github.com/sirupsen/logrus"
15
16
"github.com/urfave/cli/v2"
16
17
corev1 "k8s.io/api/core/v1"
18
+ "k8s.io/apimachinery/pkg/labels"
17
19
"sigs.k8s.io/controller-runtime/pkg/client"
18
20
19
21
"github.com/replicatedhq/embedded-cluster/pkg/defaults"
55
57
k0s = "/usr/local/bin/k0s"
56
58
)
57
59
60
+ var haWarningMessage = "WARNING: High-availability clusters must maintain at least three controller nodes, but resetting this node will leave only two. This can lead to a loss of functionality and non-recoverable failures. You should re-add a third node as soon as possible."
61
+
58
62
// deleteNode removes the node from the cluster
59
63
func (h * hostInfo ) deleteNode (ctx context.Context ) error {
60
64
if h .KclientError != nil {
@@ -287,6 +291,43 @@ func checkErrPrompt(c *cli.Context, err error) bool {
287
291
return prompts .New ().Confirm ("Do you want to continue anyway?" , false )
288
292
}
289
293
294
+ // maybePrintHAWarning prints a warning message when the user is running a reset a node
295
+ // in a high availability cluster and there are only 3 control nodes.
296
+ func maybePrintHAWarning (c * cli.Context ) error {
297
+ kubeconfig := defaults .PathToKubeConfig ()
298
+ if _ , err := os .Stat (kubeconfig ); err != nil {
299
+ return nil
300
+ }
301
+
302
+ os .Setenv ("KUBECONFIG" , kubeconfig )
303
+ kubecli , err := kubeutils .KubeClient ()
304
+ if err != nil {
305
+ return fmt .Errorf ("unable to create kube client: %w" , err )
306
+ }
307
+ embeddedclusterv1beta1 .AddToScheme (kubecli .Scheme ())
308
+
309
+ if in , err := kubeutils .GetLatestInstallation (c .Context , kubecli ); err != nil {
310
+ return fmt .Errorf ("unable to get installation: %w" , err )
311
+ } else if ! in .Spec .HighAvailability {
312
+ return nil
313
+ }
314
+
315
+ opts := & client.ListOptions {
316
+ LabelSelector : labels .SelectorFromSet (
317
+ labels.Set {"node-role.kubernetes.io/control-plane" : "true" },
318
+ ),
319
+ }
320
+ var nodes corev1.NodeList
321
+ if err := kubecli .List (c .Context , & nodes , opts ); err != nil {
322
+ return fmt .Errorf ("unable to list nodes: %w" , err )
323
+ }
324
+ if len (nodes .Items ) == 3 {
325
+ logrus .Warn (haWarningMessage )
326
+ logrus .Info ("" )
327
+ }
328
+ return nil
329
+ }
330
+
290
331
var resetCommand = & cli.Command {
291
332
Name : "reset" ,
292
333
Before : func (c * cli.Context ) error {
@@ -315,6 +356,10 @@ var resetCommand = &cli.Command{
315
356
},
316
357
Usage : fmt .Sprintf ("Remove %s from the current node" , binName ),
317
358
Action : func (c * cli.Context ) error {
359
+ if err := maybePrintHAWarning (c ); err != nil && ! c .Bool ("force" ) {
360
+ return err
361
+ }
362
+
318
363
logrus .Info ("This will remove this node from the cluster and completely reset it, removing all data stored on the node." )
319
364
logrus .Info ("Do not reset another node until this is complete." )
320
365
if ! c .Bool ("force" ) && ! c .Bool ("no-prompt" ) && ! prompts .New ().Confirm ("Do you want to continue?" , false ) {
0 commit comments