Skip to content

Commit 951e6eb

Browse files
YashasG98l-technicore
authored andcommitted
Made changes to support UHP volumes
1 parent 40b3cbb commit 951e6eb

File tree

18 files changed

+1195
-142
lines changed

18 files changed

+1195
-142
lines changed

hack/existing-standalone-cluster-env-template.sh

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -75,3 +75,4 @@ export MNT_TARGET_SUBNET_ID=""
7575
export MNT_TARGET_COMPARTMENT_ID=""
7676

7777
export STATIC_SNAPSHOT_COMPARTMENT_ID=""
78+
export CREATE_UHP_NODEPOOL="false"

hack/run_e2e_test.sh

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -30,6 +30,8 @@ check-env "FSS_VOLUME_HANDLE" $FSS_VOLUME_HANDLE
3030
check-env "MNT_TARGET_ID" $MNT_TARGET_ID
3131
check-env "MNT_TARGET_SUBNET_ID" $MNT_TARGET_SUBNET_ID
3232
check-env "MNT_TARGET_COMPARTMENT_ID" $MNT_TARGET_COMPARTMENT_ID
33+
check-env "CREATE_UHP_NODEPOOL" $CREATE_UHP_NODEPOOL
34+
3335

3436
function set_image_pull_repo_and_delete_namespace_flag () {
3537
if [ -z "$IMAGE_PULL_REPO" ]; then
@@ -56,7 +58,8 @@ function run_e2e_tests_existing_cluster() {
5658
--reserved-ip=${RESERVED_IP} \
5759
--architecture=${ARCHITECTURE} \
5860
--volume-handle=${FSS_VOLUME_HANDLE} \
59-
--static-snapshot-compartment-id=${STATIC_SNAPSHOT_COMPARTMENT_ID}
61+
--static-snapshot-compartment-id=${STATIC_SNAPSHOT_COMPARTMENT_ID} \
62+
--create-uhp-nodepool=${CREATE_UHP_NODEPOOL}
6063
retval=$?
6164
return $retval
6265
}

pkg/csi-util/utils.go

Lines changed: 8 additions & 45 deletions
Original file line numberDiff line numberDiff line change
@@ -59,10 +59,11 @@ const (
5959
ociVolumeBackupID = "volume.beta.kubernetes.io/oci-volume-source"
6060

6161
// Block Volume Performance Units
62-
VpusPerGB = "vpusPerGB"
63-
LowCostPerformanceOption = 0
64-
BalancedPerformanceOption = 10
65-
HigherPerformanceOption = 20
62+
VpusPerGB = "vpusPerGB"
63+
LowCostPerformanceOption = 0
64+
BalancedPerformanceOption = 10
65+
HigherPerformanceOption = 20
66+
MaxUltraHighPerformanceOption = 120
6667

6768
InTransitEncryptionPackageName = "oci-fss-utils"
6869
FIPS_ENABLED_FILE_PATH = "/host/proc/sys/crypto/fips_enabled"
@@ -78,7 +79,7 @@ type Util struct {
7879

7980
var (
8081
DiskByPathPatternPV = `/dev/disk/by-path/pci-\w{4}:\w{2}:\w{2}\.\d+-scsi-\d+:\d+:\d+:\d+$`
81-
DiskByPathPatternISCSI = `/dev/disk/by-path/ip-[\w\.]+:\d+-iscsi-[\w\.\-:]+-lun-1$`
82+
DiskByPathPatternISCSI = `/dev/disk/by-path/ip-[\w\.]+:\d+-iscsi-[\w\.\-:]+-lun-\d+$`
8283
)
8384

8485
type FSSVolumeHandler struct {
@@ -187,10 +188,9 @@ func ExtractBlockVolumePerformanceLevel(attribute string) (int64, error) {
187188
if err != nil {
188189
return 0, status.Errorf(codes.InvalidArgument, "unable to parse performance level value %s as int64", attribute)
189190
}
190-
if vpusPerGB != LowCostPerformanceOption && vpusPerGB != BalancedPerformanceOption && vpusPerGB != HigherPerformanceOption {
191+
if vpusPerGB < LowCostPerformanceOption || vpusPerGB > MaxUltraHighPerformanceOption {
191192
return 0, status.Errorf(codes.InvalidArgument, "invalid performance option : %s provided for "+
192-
"storage class. supported performance options are 0 for low cost, 10 for balanced and 20 for higher"+
193-
" performance", attribute)
193+
"storage class. Supported values for performance options are between %d and %d", attribute, LowCostPerformanceOption, MaxUltraHighPerformanceOption)
194194
}
195195
return vpusPerGB, nil
196196
}
@@ -416,43 +416,6 @@ func FindMount(target string) ([]string, error) {
416416
return sources, nil
417417
}
418418

419-
func Rescan(logger *zap.SugaredLogger, devicePath string) error {
420-
421-
lsblkargs := []string{"-n", "-o", "NAME", devicePath}
422-
lsblkcmd := exec.Command("lsblk", lsblkargs...)
423-
lsblkoutput, err := lsblkcmd.CombinedOutput()
424-
if err != nil {
425-
return fmt.Errorf("Failed to find device name associated with devicePath %s", devicePath)
426-
}
427-
deviceName := strings.TrimSpace(string(lsblkoutput))
428-
if strings.HasPrefix(deviceName, "/dev/") {
429-
deviceName = strings.TrimPrefix(deviceName, "/dev/")
430-
}
431-
logger.With("deviceName", deviceName).Info("Rescanning")
432-
433-
// run command dd iflag=direct if=/dev/<device_name> of=/dev/null count=1
434-
// https://docs.oracle.com/en-us/iaas/Content/Block/Tasks/rescanningdisk.htm#Rescanni
435-
devicePathFileArg := fmt.Sprintf("if=%s", devicePath)
436-
args := []string{"iflag=direct", devicePathFileArg, "of=/dev/null", "count=1"}
437-
cmd := exec.Command("dd", args...)
438-
output, err := cmd.CombinedOutput()
439-
if err != nil {
440-
return fmt.Errorf("command failed: %v\narguments: %s\nOutput: %v\n", err, "dd", string(output))
441-
}
442-
logger.With("command", "dd", "output", string(output)).Debug("dd output")
443-
// run command echo 1 | tee /sys/class/block/%s/device/rescan
444-
// https://docs.oracle.com/en-us/iaas/Content/Block/Tasks/rescanningdisk.htm#Rescanni
445-
cmdStr := fmt.Sprintf("echo 1 | tee /sys/class/block/%s/device/rescan", deviceName)
446-
cmd = exec.Command("bash", "-c", cmdStr)
447-
output, err = cmd.CombinedOutput()
448-
if err != nil {
449-
return fmt.Errorf("command failed: %v\narguments: %s\nOutput: %v\n", err, cmdStr, string(output))
450-
}
451-
logger.With("command", cmdStr, "output", string(output)).Debug("rescan output")
452-
453-
return nil
454-
}
455-
456419
func GetBlockSizeBytes(logger *zap.SugaredLogger, devicePath string) (int64, error) {
457420
args := []string{"--getsize64", devicePath}
458421
cmd := exec.Command("blockdev", args...)

pkg/csi/driver/bv_controller.go

Lines changed: 66 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -62,6 +62,8 @@ const (
6262
newBackupAvailableTimeout = 45 * time.Second
6363
needResize = "needResize"
6464
newSize = "newSize"
65+
multipathEnabled = "multipathEnabled"
66+
multipathDevices = "multipathDevices"
6567
//device is the consistent device path that would be used for paravirtualized attachment
6668
device = "device"
6769
)
@@ -239,7 +241,18 @@ func (d *BlockVolumeControllerDriver) CreateVolume(ctx context.Context, req *csi
239241
volumeName := req.Name
240242

241243
dimensionsMap := make(map[string]string)
244+
245+
volumeParams, err := extractVolumeParameters(log, req.GetParameters())
246+
if err != nil {
247+
log.With(zap.Error(err)).Error("Failed to parse storageclass parameters.")
248+
metricDimension = util.GetMetricDimensionForComponent(util.ErrValidation, util.CSIStorageType)
249+
dimensionsMap[metrics.ComponentDimension] = metricDimension
250+
metrics.SendMetricData(d.metricPusher, metrics.PVProvision, time.Since(startTime).Seconds(), dimensionsMap)
251+
return nil, status.Errorf(codes.InvalidArgument, "failed to parse storageclass parameters %v", err)
252+
}
253+
242254
dimensionsMap[metrics.ResourceOCIDDimension] = volumeName
255+
dimensionsMap[metrics.VolumeVpusPerGBDimension] = strconv.Itoa(int(volumeParams.vpusPerGB))
243256

244257
srcSnapshotId := ""
245258
srcVolumeId := ""
@@ -370,15 +383,6 @@ func (d *BlockVolumeControllerDriver) CreateVolume(ctx context.Context, req *csi
370383
return nil, fmt.Errorf("duplicate volume %q exists", volumeName)
371384
}
372385

373-
volumeParams, err := extractVolumeParameters(log, req.GetParameters())
374-
if err != nil {
375-
log.With(zap.Error(err)).Error("Failed to parse storageclass parameters.")
376-
metricDimension = util.GetMetricDimensionForComponent(util.ErrValidation, metricType)
377-
dimensionsMap[metrics.ComponentDimension] = metricDimension
378-
metrics.SendMetricData(d.metricPusher, metric, time.Since(startTime).Seconds(), dimensionsMap)
379-
return nil, status.Errorf(codes.InvalidArgument, "failed to parse storageclass parameters %v", err)
380-
}
381-
382386
provisionedVolume := core.Volume{}
383387

384388
if len(volumes) > 0 {
@@ -641,7 +645,16 @@ func (d *BlockVolumeControllerDriver) ControllerPublishVolume(ctx context.Contex
641645
//Checking if Volume state is already Attached or Attachment (from above condition) is completed
642646
if volumeAttached.GetLifecycleState() == core.VolumeAttachmentLifecycleStateAttached {
643647
log.With("instanceID", id).Info("Volume is already ATTACHED to the Node.")
644-
return generatePublishContext(volumeAttachmentOptions, log, volumeAttached, vpusPerGB, req.VolumeContext[needResize], req.VolumeContext[newSize]), nil
648+
resp, err := generatePublishContext(volumeAttachmentOptions, log, volumeAttached, vpusPerGB, req.VolumeContext[needResize], req.VolumeContext[newSize])
649+
if err != nil {
650+
log.With(zap.Error(err)).Error("Failed to generate publish context")
651+
errorType = util.GetError(err)
652+
csiMetricDimension = util.GetMetricDimensionForComponent(errorType, util.CSIStorageType)
653+
dimensionsMap[metrics.ComponentDimension] = csiMetricDimension
654+
metrics.SendMetricData(d.metricPusher, metrics.PVAttach, time.Since(startTime).Seconds(), dimensionsMap)
655+
return nil, status.Errorf(codes.Internal, "Failed to generate publish context: %s", err)
656+
}
657+
return resp, nil
645658
}
646659
}
647660
}
@@ -686,11 +699,25 @@ func (d *BlockVolumeControllerDriver) ControllerPublishVolume(ctx context.Contex
686699
csiMetricDimension = util.GetMetricDimensionForComponent(util.Success, util.CSIStorageType)
687700
dimensionsMap[metrics.ComponentDimension] = csiMetricDimension
688701
metrics.SendMetricData(d.metricPusher, metrics.PVAttach, time.Since(startTime).Seconds(), dimensionsMap)
689-
return generatePublishContext(volumeAttachmentOptions, log, volumeAttached, vpusPerGB, req.VolumeContext[needResize], req.VolumeContext[newSize]), nil
690-
702+
resp, err := generatePublishContext(volumeAttachmentOptions, log, volumeAttached, vpusPerGB, req.VolumeContext[needResize], req.VolumeContext[newSize])
703+
if err != nil {
704+
log.With(zap.Error(err)).Error("Failed to generate publish context")
705+
errorType = util.GetError(err)
706+
csiMetricDimension = util.GetMetricDimensionForComponent(errorType, util.CSIStorageType)
707+
dimensionsMap[metrics.ComponentDimension] = csiMetricDimension
708+
metrics.SendMetricData(d.metricPusher, metrics.PVAttach, time.Since(startTime).Seconds(), dimensionsMap)
709+
return nil, status.Errorf(codes.Internal, "Failed to generate publish context: %s", err)
710+
}
711+
return resp, nil
691712
}
692713

693-
func generatePublishContext(volumeAttachmentOptions VolumeAttachmentOption, log *zap.SugaredLogger, volumeAttached core.VolumeAttachment, vpusPerGB string, needsResize string, expectedSize string) *csi.ControllerPublishVolumeResponse {
714+
func generatePublishContext(volumeAttachmentOptions VolumeAttachmentOption, log *zap.SugaredLogger, volumeAttached core.VolumeAttachment, vpusPerGB string, needsResize string, expectedSize string) (*csi.ControllerPublishVolumeResponse, error) {
715+
multipath := "false"
716+
717+
if volumeAttached.GetIsMultipath() != nil {
718+
multipath = strconv.FormatBool(*volumeAttached.GetIsMultipath())
719+
}
720+
694721
if volumeAttachmentOptions.useParavirtualizedAttachment {
695722
log.With("volumeAttachedId", *volumeAttached.GetId()).Info("Publishing paravirtualized Volume Completed.")
696723
return &csi.ControllerPublishVolumeResponse{
@@ -700,24 +727,36 @@ func generatePublishContext(volumeAttachmentOptions VolumeAttachmentOption, log
700727
csi_util.VpusPerGB: vpusPerGB,
701728
needResize: needsResize,
702729
newSize: expectedSize,
730+
multipathEnabled: multipath,
703731
},
704-
}
732+
}, nil
705733
}
706734
iSCSIVolumeAttached := volumeAttached.(core.IScsiVolumeAttachment)
735+
multiPathDevicesJson := []byte{}
736+
if len(iSCSIVolumeAttached.MultipathDevices) > 0 {
737+
var err error
738+
multiPathDevicesJson, err = json.Marshal(iSCSIVolumeAttached.MultipathDevices)
739+
if err != nil {
740+
return nil, err
741+
}
742+
}
707743

708744
log.With("volumeAttachedId", *volumeAttached.GetId()).Info("Publishing iSCSI Volume Completed.")
709745

710746
return &csi.ControllerPublishVolumeResponse{
711747
PublishContext: map[string]string{
712748
attachmentType: attachmentTypeISCSI,
749+
device: *volumeAttached.GetDevice(),
713750
disk.ISCSIIQN: *iSCSIVolumeAttached.Iqn,
714751
disk.ISCSIIP: *iSCSIVolumeAttached.Ipv4,
715752
disk.ISCSIPORT: strconv.Itoa(*iSCSIVolumeAttached.Port),
716753
csi_util.VpusPerGB: vpusPerGB,
717754
needResize: needsResize,
718755
newSize: expectedSize,
756+
multipathEnabled: multipath,
757+
multipathDevices: string(multiPathDevicesJson),
719758
},
720-
}
759+
}, nil
721760
}
722761

723762
// ControllerUnpublishVolume detaches the given volume from the node
@@ -797,6 +836,18 @@ func (d *BlockVolumeControllerDriver) ControllerUnpublishVolume(ctx context.Cont
797836
return nil, status.Errorf(codes.Unknown, "timed out waiting for volume to be detached %s", err)
798837
}
799838

839+
multipath := false
840+
841+
if attachedVolume.GetIsMultipath() != nil {
842+
multipath = *attachedVolume.GetIsMultipath()
843+
}
844+
845+
// sleeping to ensure block volume plugin logs out of iscsi connections on nodes before delete
846+
if multipath {
847+
log.Info("Waiting for 90 seconds to ensure block volume plugin logs out of iscsi connections on nodes")
848+
time.Sleep(90 * time.Second)
849+
}
850+
800851
log.Info("Un-publishing Volume Completed")
801852
csiMetricDimension = util.GetMetricDimensionForComponent(util.Success, util.CSIStorageType)
802853
dimensionsMap[metrics.ComponentDimension] = csiMetricDimension

pkg/csi/driver/bv_controller_test.go

Lines changed: 36 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -278,12 +278,25 @@ func (c *MockBlockStorageClient) GetVolume(ctx context.Context, id string) (*cor
278278
}, nil
279279
} else if id == "valid_volume_id_valid_old_size_fail" {
280280
ad := "zkJl:US-ASHBURN-AD-1"
281+
vpuspergb := int64(10)
281282
var oldSizeInBytes int64 = 2147483648
282283
oldSizeInGB := csi_util.RoundUpSize(oldSizeInBytes, 1*client.GiB)
283284
return &core.Volume{
284285
Id: &id,
285286
AvailabilityDomain: &ad,
286287
SizeInGBs: &oldSizeInGB,
288+
VpusPerGB: &vpuspergb,
289+
}, nil
290+
} else if id == "uhp_volume_id" {
291+
ad := "zkJl:US-ASHBURN-AD-1"
292+
vpuspergb := int64(40)
293+
var oldSizeInBytes int64 = 2147483648
294+
oldSizeInGB := csi_util.RoundUpSize(oldSizeInBytes, 1*client.GiB)
295+
return &core.Volume{
296+
Id: &id,
297+
AvailabilityDomain: &ad,
298+
SizeInGBs: &oldSizeInGB,
299+
VpusPerGB: &vpuspergb,
287300
}, nil
288301
} else {
289302
return volumes[id], nil
@@ -1222,6 +1235,24 @@ func TestControllerDriver_ControllerExpandVolume(t *testing.T) {
12221235
want: nil,
12231236
wantErr: errors.New("Update volume failed"),
12241237
},
1238+
{
1239+
name: "Uhp volume expand success in ControllerExpandVolume",
1240+
fields: fields{},
1241+
args: args{
1242+
ctx: nil,
1243+
req: &csi.ControllerExpandVolumeRequest{
1244+
VolumeId: "uhp_volume_id",
1245+
CapacityRange: &csi.CapacityRange{
1246+
RequiredBytes: int64(csi_util.MaximumVolumeSizeInBytes),
1247+
},
1248+
},
1249+
},
1250+
want: &csi.ControllerExpandVolumeResponse{
1251+
CapacityBytes: int64(csi_util.MaximumVolumeSizeInBytes),
1252+
NodeExpansionRequired: true,
1253+
},
1254+
wantErr: nil,
1255+
},
12251256
}
12261257
for _, tt := range tests {
12271258
t.Run(tt.name, func(t *testing.T) {
@@ -1236,7 +1267,9 @@ func TestControllerDriver_ControllerExpandVolume(t *testing.T) {
12361267
if tt.wantErr == nil && err != nil {
12371268
t.Errorf("got error %q, want none", err)
12381269
}
1239-
if tt.wantErr != nil && !strings.Contains(err.Error(), tt.wantErr.Error()) {
1270+
if tt.wantErr != nil && err == nil {
1271+
t.Errorf("want error %q, got none", tt.wantErr)
1272+
} else if tt.wantErr != nil && !strings.Contains(err.Error(), tt.wantErr.Error()) {
12401273
t.Errorf("want error %q to include %q", err, tt.wantErr)
12411274
}
12421275
if !reflect.DeepEqual(got, tt.want) {
@@ -1503,9 +1536,9 @@ func TestExtractVolumeParameters(t *testing.T) {
15031536
volumeParameters: VolumeParameters{
15041537
diskEncryptionKey: "",
15051538
attachmentParameter: make(map[string]string),
1506-
vpusPerGB: 10,
1539+
vpusPerGB: 40,
15071540
},
1508-
wantErr: true,
1541+
wantErr: false,
15091542
},
15101543
"if invalid parameter for performance level then return error": {
15111544
storageParameters: map[string]string{

0 commit comments

Comments
 (0)