@@ -31,12 +31,12 @@ As a workaround, update the value of the `spec.persistentVolumeClaimRetentionPol
31
31
[source,terminal]
32
32
----
33
33
# Just in case
34
- export KUBECONFIG=${MGMT_KUBECONFIG}
34
+ $ export KUBECONFIG=${MGMT_KUBECONFIG}
35
35
36
36
# Scale down deployments
37
- oc scale deployment -n ${HC_CLUSTER_NS}-${HC_CLUSTER_NAME} --replicas=0 --all
38
- oc scale statefulset.apps -n ${HC_CLUSTER_NS}-${HC_CLUSTER_NAME} --replicas=0 --all
39
- sleep 15
37
+ $ oc scale deployment -n ${HC_CLUSTER_NS}-${HC_CLUSTER_NAME} --replicas=0 --all
38
+ $ oc scale statefulset.apps -n ${HC_CLUSTER_NS}-${HC_CLUSTER_NAME} --replicas=0 --all
39
+ $ sleep 15
40
40
----
41
41
42
42
. Delete the `NodePool` objects by entering these commands:
@@ -60,17 +60,17 @@ for m in $(oc get machines -n ${HC_CLUSTER_NS}-${HC_CLUSTER_NAME} -o name); do
60
60
oc delete -n ${HC_CLUSTER_NS} -${HC_CLUSTER_NAME} ${m} || true
61
61
done
62
62
63
- oc delete machineset -n ${HC_CLUSTER_NS} -${HC_CLUSTER_NAME} -- all || true
63
+ $ oc delete machineset -n ${HC_CLUSTER_NS} -${HC_CLUSTER_NAME} -- all || true
64
64
----
65
65
66
66
. Delete the cluster object by entering these commands:
67
67
+
68
68
[source,terminal]
69
69
----
70
70
# Cluster
71
- C_NAME=$(oc get cluster -n ${HC_CLUSTER_NS} -${HC_CLUSTER_NAME} -o name)
72
- oc patch -n ${HC_CLUSTER_NS} -${HC_CLUSTER_NAME} ${C_NAME} -- type=json -- patch='[ { "op":"remove", "path": "/metadata/finalizers" }]'
73
- oc delete cluster.cluster.x-k8s.io -n ${HC_CLUSTER_NS} -${HC_CLUSTER_NAME} -- all
71
+ $ C_NAME=$(oc get cluster -n ${HC_CLUSTER_NS} -${HC_CLUSTER_NAME} -o name)
72
+ $ oc patch -n ${HC_CLUSTER_NS} -${HC_CLUSTER_NAME} ${C_NAME} -- type=json -- patch='[ { "op":"remove", "path": "/metadata/finalizers" }]'
73
+ $ oc delete cluster.cluster.x-k8s.io -n ${HC_CLUSTER_NS} -${HC_CLUSTER_NAME} -- all
74
74
----
75
75
76
76
. Delete the AWS machines (Kubernetes objects) by entering these commands. Do not worry about deleting the real AWS machines. The cloud instances will not be affected.
90
90
[source,terminal]
91
91
----
92
92
# Delete HCP and ControlPlane HC NS
93
- oc patch -n ${HC_CLUSTER_NS} -${HC_CLUSTER_NAME} hostedcontrolplane.hypershift.openshift.io ${HC_CLUSTER_NAME} -- type=json -- patch='[ { "op":"remove", "path": "/metadata/finalizers" }]'
94
- oc delete hostedcontrolplane.hypershift.openshift.io -n ${HC_CLUSTER_NS} -${HC_CLUSTER_NAME} -- all
95
- oc delete ns ${HC_CLUSTER_NS} -${HC_CLUSTER_NAME} || true
93
+ $ oc patch -n ${HC_CLUSTER_NS} -${HC_CLUSTER_NAME} hostedcontrolplane.hypershift.openshift.io ${HC_CLUSTER_NAME} -- type=json -- patch='[ { "op":"remove", "path": "/metadata/finalizers" }]'
94
+ $ oc delete hostedcontrolplane.hypershift.openshift.io -n ${HC_CLUSTER_NS} -${HC_CLUSTER_NAME} -- all
95
+ $ oc delete ns ${HC_CLUSTER_NS} -${HC_CLUSTER_NAME} || true
96
96
----
97
97
98
98
. Delete the `HostedCluster` and HC namespace objects by entering these commands:
99
99
+
100
100
[source,terminal]
101
101
----
102
102
# Delete HC and HC Namespace
103
- oc -n ${HC_CLUSTER_NS} patch hostedclusters ${HC_CLUSTER_NAME} -p '{"metadata":{"finalizers":null}}' -- type merge || true
104
- oc delete hc -n ${HC_CLUSTER_NS} ${HC_CLUSTER_NAME} || true
105
- oc delete ns ${HC_CLUSTER_NS} || true
103
+ $ oc -n ${HC_CLUSTER_NS} patch hostedclusters ${HC_CLUSTER_NAME} -p '{"metadata":{"finalizers":null}}' -- type merge || true
104
+ $ oc delete hc -n ${HC_CLUSTER_NS} ${HC_CLUSTER_NAME} || true
105
+ $ oc delete ns ${HC_CLUSTER_NS} || true
106
106
----
107
107
108
108
.Verification
@@ -112,17 +112,17 @@ oc delete ns ${HC_CLUSTER_NS} || true
112
112
[source,terminal]
113
113
----
114
114
# Validations
115
- export KUBECONFIG=${MGMT2_KUBECONFIG}
115
+ $ export KUBECONFIG=${MGMT2_KUBECONFIG}
116
116
117
- oc get hc -n ${HC_CLUSTER_NS}
118
- oc get np -n ${HC_CLUSTER_NS}
119
- oc get pod -n ${HC_CLUSTER_NS} -${HC_CLUSTER_NAME}
120
- oc get machines -n ${HC_CLUSTER_NS} -${HC_CLUSTER_NAME}
117
+ $ oc get hc -n ${HC_CLUSTER_NS}
118
+ $ oc get np -n ${HC_CLUSTER_NS}
119
+ $ oc get pod -n ${HC_CLUSTER_NS} -${HC_CLUSTER_NAME}
120
+ $ oc get machines -n ${HC_CLUSTER_NS} -${HC_CLUSTER_NAME}
121
121
122
122
# Inside the HostedCluster
123
- export KUBECONFIG=${HC_KUBECONFIG}
124
- oc get clusterversion
125
- oc get nodes
123
+ $ export KUBECONFIG=${HC_KUBECONFIG}
124
+ $ oc get clusterversion
125
+ $ oc get nodes
126
126
----
127
127
128
128
.Next steps
0 commit comments