Skip to content

feat: Support add data disks for storing container data and images #229

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -70,6 +70,17 @@ spec:
type: string
type: object
type: array
formatDataDisk:
default: false
description: FormatDataDisk specifies whether to mount data disks
to an existing instance when adding it to the cluster. This allows
you to add data disks for storing container data and images. If
FormatDataDisk is set to true, and the Elastic Compute Service (ECS)
instances already have data disks mounted, but the file system on
the last data disk is not initialized, the system will automatically
format the disk to ext4 and mount it to /var/lib/containerd and
/var/lib/kubelet.
type: boolean
imageSelectorTerms:
description: ImageSelectorTerms is a list of or image selector terms.
The terms are ORed.
Expand Down
4 changes: 4 additions & 0 deletions pkg/apis/v1alpha1/ecsnodeclass.go
Original file line number Diff line number Diff line change
Expand Up @@ -83,6 +83,10 @@ type ECSNodeClassSpec struct {
// +kubebuilder:validation:Items=Enum=cloud;cloud_efficiency;cloud_ssd;cloud_essd;cloud_auto;cloud_essd_entry
// +optional
DataDisksCategories []string `json:"dataDiskCategories,omitempty"`
// FormatDataDisk specifies whether to mount data disks to an existing instance when adding it to the cluster. This allows you to add data disks for storing container data and images. If FormatDataDisk is set to true, and the Elastic Compute Service (ECS) instances already have data disks mounted, but the file system on the last data disk is not initialized, the system will automatically format the disk to ext4 and mount it to /var/lib/containerd and /var/lib/kubelet.
// +kubebuilder:default:=false
// +optional
FormatDataDisk bool `json:"formatDataDisk,omitempty"`
// Tags to be applied on ecs resources like instances and launch templates.
// +kubebuilder:validation:XValidation:message="empty tag keys aren't supported",rule="self.all(k, k != '')"
// +kubebuilder:validation:XValidation:message="tag contains a restricted tag matching ecs:ecs-cluster-name",rule="self.all(k, k !='ecs:ecs-cluster-name')"
Expand Down
8 changes: 5 additions & 3 deletions pkg/providers/cluster/ackmanaged.go
Original file line number Diff line number Diff line change
Expand Up @@ -142,9 +142,10 @@ func (a *ACKManaged) UserData(ctx context.Context,
labels map[string]string,
taints []corev1.Taint,
kubeletCfg *v1alpha1.KubeletConfiguration,
userData *string) (string, error) {
userData *string,
formatDataDisk bool) (string, error) {

attach, err := a.getClusterAttachScripts(ctx)
attach, err := a.getClusterAttachScripts(formatDataDisk, ctx)
if err != nil {
return "", err
}
Expand Down Expand Up @@ -174,13 +175,14 @@ func (a *ACKManaged) FeatureFlags() FeatureFlags {
}
}

func (a *ACKManaged) getClusterAttachScripts(ctx context.Context) (string, error) {
func (a *ACKManaged) getClusterAttachScripts(formatDataDisk bool, ctx context.Context) (string, error) {
if cachedScript, ok := a.cache.Get(a.clusterID); ok {
return cachedScript.(string), nil
}

reqPara := &ackclient.DescribeClusterAttachScriptsRequest{
KeepInstanceName: tea.Bool(true),
FormatDisk: tea.Bool(formatDataDisk),
}
resp, err := a.ackClient.DescribeClusterAttachScripts(tea.String(a.clusterID), reqPara)
if err != nil {
Expand Down
2 changes: 1 addition & 1 deletion pkg/providers/cluster/custom.go
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ func NewCustom() *Custom {
return &Custom{}
}

func (c *Custom) UserData(ctx context.Context, labels map[string]string, taints []corev1.Taint, configuration *v1alpha1.KubeletConfiguration, userData *string) (string, error) {
func (c *Custom) UserData(ctx context.Context, labels map[string]string, taints []corev1.Taint, configuration *v1alpha1.KubeletConfiguration, userData *string, formatDataDisk bool) (string, error) {
return base64.StdEncoding.EncodeToString([]byte(lo.FromPtr(userData))), nil
}

Expand Down
2 changes: 1 addition & 1 deletion pkg/providers/cluster/types.go
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,7 @@ type Image struct {
// Provider can be implemented to generate userdata
type Provider interface {
ClusterType() string
UserData(context.Context, map[string]string, []corev1.Taint, *v1alpha1.KubeletConfiguration, *string) (string, error)
UserData(context.Context, map[string]string, []corev1.Taint, *v1alpha1.KubeletConfiguration, *string, bool) (string, error)
GetClusterCNI(context.Context) (string, error)
LivenessProbe(*http.Request) error
GetSupportedImages(string) ([]Image, error)
Expand Down
2 changes: 1 addition & 1 deletion pkg/providers/instance/instance.go
Original file line number Diff line number Diff line change
Expand Up @@ -688,7 +688,7 @@ func (p *DefaultProvider) buildUserData(ctx context.Context, capacityType string
}) {
taints = append(taints, karpv1.UnregisteredNoExecuteTaint)
}
return p.clusterProvider.UserData(ctx, labels, taints, kubeletCfg, nodeClass.Spec.UserData)
return p.clusterProvider.UserData(ctx, labels, taints, kubeletCfg, nodeClass.Spec.UserData, nodeClass.Spec.FormatDataDisk)
}

func resolveKubeletConfiguration(nodeClass *v1alpha1.ECSNodeClass) *v1alpha1.KubeletConfiguration {
Expand Down