Skip to content
This repository was archived by the owner on Apr 18, 2024. It is now read-only.

Commit 902cd63

Browse files
committed
Fixed NVME based deployments. Added logic to detect data tiering if using DenseIO shapes and 1 or more Block Volumes for HDFS.
1 parent 5fbd880 commit 902cd63

File tree

1 file changed

+26
-29
lines changed

1 file changed

+26
-29
lines changed

v6/scripts/deploy_on_oci.py

Lines changed: 26 additions & 29 deletions
Original file line numberDiff line numberDiff line change
@@ -31,6 +31,10 @@
3131
input_host_list = 'None'
3232
license_file = 'None'
3333
host_fqdn_list = []
34+
# Data Tiering - Enabled when using Heterogenous storage in your cluster, automatically turns on if using DenseIO shapes
35+
# and a Block Volume count greater than 0. Defaults to 'False"
36+
data_tiering = 'False'
37+
nvme_disks = 0
3438

3539
#
3640
# Custom Global Parameters - Customize below here
@@ -72,10 +76,6 @@
7276
# For Postgres this is 5432
7377
meta_db_port = '3306'
7478

75-
# Data Tiering - set this to 'True' if you wish to use Heterogenous storage in your cluster.
76-
# This enables mixing local storage and Block Volumes to achieve higher HDFS capacity
77-
data_tiering = 'False'
78-
7979
# Define Remote Parcel URL & Distribution Rate if desired
8080
remote_parcel_url = 'https://archive.cloudera.com/cdh6/' + cluster_version + '/parcels' # type: str
8181
parcel_distribution_rate = "1024000" # type: int
@@ -636,7 +636,7 @@ def build_cluster_host_list(host_fqdn_list):
636636
print('Cluster Host List: %s' % cluster_host_list)
637637

638638

639-
def build_disk_lists():
639+
def build_disk_lists(disk_count, data_tiering):
640640
"""
641641
Build Disk Lists for use with HDFS and YARN
642642
:return:
@@ -645,7 +645,23 @@ def build_disk_lists():
645645
dfs_data_dir_list = ''
646646
global yarn_data_dir_list
647647
yarn_data_dir_list = ''
648+
if 'DenseIO' in worker_shape:
649+
if int(disk_count) >= 1:
650+
data_tiering = 'True'
651+
if worker_shape == 'BM.DenseIO2.52':
652+
nvme_disks = 8
653+
if worker_shape == 'VM.DenseIO2.24':
654+
nvme_disks = 4
655+
if worker_shape == 'VM.DenseIO2.16':
656+
nvme_disks = 2
657+
if worker_shape == 'VM.DenseIO2.8':
658+
nvme_disks = 1
659+
if worker_shape == 'BM.HPC2.36':
660+
nvme_disks = 1
661+
648662
if data_tiering == 'False':
663+
if nvme_disks >= 1:
664+
disk_count = nvme_disks
649665
for x in range(0, int(disk_count)):
650666
if x is 0:
651667
dfs_data_dir_list += "/data%d/dfs/dn" % x
@@ -654,37 +670,18 @@ def build_disk_lists():
654670
dfs_data_dir_list += ",/data%d/dfs/dn" % x
655671
yarn_data_dir_list += ",/data%d/yarn/nm" % x
656672

657-
elif data_tiering == 'True':
658-
global local_disks
659-
if worker_shape == 'BM.DenseIO2.52':
660-
local_disks = 8
661-
662-
if worker_shape == 'VM.DenseIO2.24':
663-
local_disks = 4
664-
665-
if worker_shape == 'VM.DenseIO2.16':
666-
local_disks = 2
667-
668-
if worker_shape == 'VM.DenseIO2.8':
669-
local_disks = 1
670-
671-
if worker_shape == 'BM.HPC2.36':
672-
local_disks = 1
673-
674-
total_disk_count = disk_count + local_disks
673+
if data_tiering == 'True':
674+
total_disk_count = int(disk_count) + nvme_disks
675675
for x in range(0, int(total_disk_count)):
676676
if x is 0:
677677
dfs_data_dir_list += "[DISK]/data%d/dfs/dn" % x
678-
elif x < local_disks:
678+
yarn_data_dir_list += "/data%d/yarn/nm" % x
679+
elif x < nvme_disks:
679680
dfs_data_dir_list += ",[DISK]/data%d/dfs/dn" % x
680681
else:
681682
dfs_data_dir_list += ",[ARCHIVE]/data%d/dfs/dn" % x
682683
yarn_data_dir_list += ",/data%d/yarn/nm" % x
683684

684-
else:
685-
print('Invalid Data Tiering flag - expected True or False: %s\n' % data_tiering)
686-
sys.exit()
687-
688685

689686
def add_hosts_to_cluster(cluster_host_list):
690687
"""
@@ -2423,7 +2420,7 @@ def build_cloudera_cluster():
24232420
:return:
24242421
"""
24252422
parse_ssh_key()
2426-
build_disk_lists()
2423+
build_disk_lists(disk_count, data_tiering)
24272424
try:
24282425
api_response = users_api.read_user2(admin_user_name)
24292426
if api_response.auth_roles:

0 commit comments

Comments
 (0)