Skip to content

Commit 3122e9f

Browse files
author
dmytro_velychko3
committed
feat: refactoring
1 parent 3e3477e commit 3122e9f

File tree

6 files changed

+150
-97
lines changed

6 files changed

+150
-97
lines changed

README.md

Lines changed: 22 additions & 17 deletions
Large diffs are not rendered by default.

cluster.tf

Lines changed: 37 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,37 @@
1+
resource "databricks_cluster" "cluster" {
2+
for_each = { for cluster in var.clusters : cluster.cluster_name => cluster }
3+
4+
cluster_name = each.value.cluster_name
5+
spark_version = each.value.spark_version
6+
spark_conf = each.value.spark_conf
7+
spark_env_vars = each.value.spark_env_vars
8+
data_security_mode = each.value.data_security_mode
9+
node_type_id = each.value.node_type_id
10+
autotermination_minutes = each.value.autotermination_minutes
11+
12+
autoscale {
13+
min_workers = each.value.min_workers
14+
max_workers = each.value.max_workers
15+
}
16+
17+
azure_attributes {
18+
availability = each.value.availability
19+
first_on_demand = each.value.first_on_demand
20+
spot_bid_max_price = each.value.spot_bid_max_price
21+
}
22+
23+
dynamic "cluster_log_conf" {
24+
for_each = each.value.cluster_log_conf_destination != null ? [each.value.cluster_log_conf_destination] : []
25+
content {
26+
dbfs {
27+
destination = cluster_log_conf.value
28+
}
29+
}
30+
}
31+
32+
lifecycle {
33+
ignore_changes = [
34+
state
35+
]
36+
}
37+
}

main.tf

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -47,14 +47,14 @@ resource "databricks_sql_endpoint" "this" {
4747
for_each = { for endpoint in var.sql_endpoint : (endpoint.name) => endpoint }
4848

4949
name = "${each.key}${local.suffix}"
50-
cluster_size = coalesce(each.value.cluster_size, "2X-Small")
51-
min_num_clusters = coalesce(each.value.min_num_clusters, 0)
52-
max_num_clusters = coalesce(each.value.max_num_clusters, 1)
53-
auto_stop_mins = coalesce(each.value.auto_stop_mins, "30")
54-
enable_photon = coalesce(each.value.enable_photon, false)
55-
enable_serverless_compute = coalesce(each.value.enable_serverless_compute, false)
56-
spot_instance_policy = coalesce(each.value.spot_instance_policy, "COST_OPTIMIZED")
57-
warehouse_type = coalesce(each.value.warehouse_type, "PRO")
50+
cluster_size = each.value.cluster_size
51+
min_num_clusters = each.value.min_num_clusters
52+
max_num_clusters = each.value.max_num_clusters
53+
auto_stop_mins = each.value.auto_stop_mins
54+
enable_photon = each.value.enable_photon
55+
enable_serverless_compute = each.value.enable_serverless_compute
56+
spot_instance_policy = each.value.spot_instance_policy
57+
warehouse_type = each.value.warehouse_type
5858

5959
lifecycle {
6060
ignore_changes = [state, num_clusters]

permissions.tf

Lines changed: 21 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -1,40 +1,39 @@
11
locals {
2-
secrets_acl_objects_list = flatten([for param in local.secret_scope_object : [
3-
#secrets_acl_objects_list = flatten([for param in var.secret_scope_object : [
2+
secrets_acl_objects_list = flatten([for param in var.secret_scope_object : [
43
for permission in param.acl : {
54
scope = param.scope_name, principal = permission.principal, permission = permission.permission
65
}] if param.acl != null
76
])
8-
}
9-
10-
resource "databricks_permissions" "default_cluster" {
11-
for_each = coalesce(flatten([values(var.iam)[*].default_cluster_permission, "none"])...) != "none" ? var.default_cluster_id : {}
127

13-
cluster_id = each.value
8+
secret_scope_object = [for param in var.secret_scope : {
9+
scope_name = databricks_secret_scope.this[param.scope_name].name
10+
acl = param.acl
11+
} if param.acl != null]
12+
}
1413

15-
dynamic "access_control" {
16-
for_each = { for k, v in var.iam : k => v.default_cluster_permission if v.default_cluster_permission != null }
17-
content {
18-
group_name = databricks_group.this[access_control.key].display_name
19-
permission_level = access_control.value
20-
}
14+
resource "databricks_cluster_policy" "this" {
15+
for_each = {
16+
for param in var.custom_cluster_policies : (param.name) => param.definition
17+
if param.definition != null
2118
}
19+
20+
name = each.key
21+
definition = jsonencode(each.value)
2222
}
2323

24-
resource "databricks_permissions" "cluster_policy" {
24+
resource "databricks_permissions" "clusters" {
2525
for_each = {
26-
for policy in var.cluster_policies_object : (policy.name) => policy
27-
#for policy in var.cluster_policies_object : (policy.name) => policy
28-
if policy.can_use != null
26+
for v in var.clusters : (v.cluster_name) => v
27+
if length(v.permissions) != 0
2928
}
3029

31-
cluster_policy_id = each.value.id
30+
cluster_id = databricks_cluster.cluster[each.key].id
3231

3332
dynamic "access_control" {
34-
for_each = each.value.can_use
33+
for_each = each.value.permissions
3534
content {
36-
group_name = databricks_group.this[access_control.value].display_name
37-
permission_level = "CAN_USE"
35+
group_name = databricks_group.this[access_control.value.group_name].display_name
36+
permission_level = access_control.value.permission_level
3837
}
3938
}
4039
}
@@ -56,7 +55,7 @@ resource "databricks_permissions" "unity_cluster" {
5655
resource "databricks_permissions" "sql_endpoint" {
5756
for_each = {
5857
for endpoint in var.sql_endpoint : (endpoint.name) => endpoint
59-
if endpoint.permissions != null
58+
if length(endpoint.permissions) != 0
6059
}
6160

6261
sql_endpoint_id = databricks_sql_endpoint.this[each.key].id

secrets.tf

Lines changed: 2 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -9,23 +9,12 @@ locals {
99
scope_name = param.scope_name, key = secret.key, string_value = secret.string_value
1010
}] if param.secrets != null
1111
])
12-
13-
secret_scope_object = [for param in var.secret_scope : {
14-
scope_name = databricks_secret_scope.this[param.scope_name].name
15-
acl = param.acl
16-
} if param.acl != null]
17-
18-
#cluster_policies_object = [for policy in var.custom_cluster_policies : {
19-
# id = databricks_cluster_policy.this[policy.name].id
20-
# name = databricks_cluster_policy.this[policy.name].name
21-
# can_use = policy.can_use
22-
#} if policy.definition != null && var.sku == "premium"]
2312
}
2413

2514
# Secret Scope with SP secrets for mounting Azure Data Lake Storage
2615
resource "databricks_secret_scope" "main" {
2716
name = "main"
28-
initial_manage_principal = var.sku == "premium" ? null : "users"
17+
initial_manage_principal = null
2918
}
3019

3120
resource "databricks_secret" "main" {
@@ -44,7 +33,7 @@ resource "databricks_secret_scope" "this" {
4433
}
4534

4635
name = each.key
47-
initial_manage_principal = var.sku == "premium" ? null : "users"
36+
initial_manage_principal = null
4837
}
4938

5039
resource "databricks_secret" "this" {

variables.tf

Lines changed: 60 additions & 37 deletions
Original file line numberDiff line numberDiff line change
@@ -61,43 +61,22 @@ variable "iam" {
6161
}
6262
}
6363

64-
# Default Cluster and Cluster Policy variables
65-
variable "default_cluster_id" {
66-
type = map(string)
67-
description = "Single value of default Cluster id created by 'databricks-runtime' module"
68-
default = {}
69-
}
70-
71-
variable "cluster_policies_object" {
72-
type = list(object({
73-
id = string
74-
name = string
75-
can_use = list(string)
76-
}))
77-
description = "List of objects that provides an ability to grant custom workspace group a permission to use(CAN_USE) cluster policy"
78-
default = [{
79-
id = null
80-
name = null
81-
can_use = null
82-
}]
83-
}
84-
8564
# SQL Endpoint variables
8665
variable "sql_endpoint" {
8766
type = set(object({
8867
name = string
89-
cluster_size = optional(string)
90-
min_num_clusters = optional(number)
91-
max_num_clusters = optional(number)
92-
auto_stop_mins = optional(string)
93-
enable_photon = optional(bool)
94-
enable_serverless_compute = optional(bool)
95-
spot_instance_policy = optional(string)
96-
warehouse_type = optional(string)
68+
cluster_size = optional(string, "2X-Small")
69+
min_num_clusters = optional(number, 0)
70+
max_num_clusters = optional(number, 1)
71+
auto_stop_mins = optional(string, "30")
72+
enable_photon = optional(bool, false)
73+
enable_serverless_compute = optional(bool, false)
74+
spot_instance_policy = optional(string, "COST_OPTIMIZED")
75+
warehouse_type = optional(string, "PRO")
9776
permissions = optional(set(object({
9877
group_name = string
9978
permission_level = string
100-
})))
79+
})), [])
10180
}))
10281
description = "Set of objects with parameters to configure SQL Endpoint and assign permissions to it for certain custom groups"
10382
default = []
@@ -254,12 +233,6 @@ EOT
254233
}]
255234
}
256235

257-
variable "sku" {
258-
type = string
259-
description = "The sku to use for the Databricks Workspace: [standard|premium|trial]"
260-
default = "premium"
261-
}
262-
263236
variable "key_vault_id" {
264237
type = string
265238
description = "ID of the Key Vault instance where the Secret resides"
@@ -277,4 +250,54 @@ variable "mountpoints" {
277250
}))
278251
description = "Mountpoints for databricks"
279252
default = {}
280-
}
253+
}
254+
255+
variable "custom_cluster_policies" {
256+
type = list(object({
257+
name = string
258+
can_use = list(string)
259+
definition = any
260+
assigned = bool
261+
}))
262+
description = <<-EOT
263+
Provides an ability to create custom cluster policy, assign it to cluster and grant CAN_USE permissions on it to certain custom groups
264+
name - name of custom cluster policy to create
265+
can_use - list of string, where values are custom group names, there groups have to be created with Terraform;
266+
definition - JSON document expressed in Databricks Policy Definition Language. No need to call 'jsonencode()' function on it when providing a value;
267+
assigned - boolean flag which assigns policy to default 'shared autoscaling' cluster, only single custom policy could be assigned;
268+
EOT
269+
default = [{
270+
name = null
271+
can_use = null
272+
definition = null
273+
assigned = false
274+
}]
275+
validation {
276+
condition = length([for policy in var.custom_cluster_policies : policy.assigned if policy.assigned]) <= 1
277+
error_message = "Only single cluster policy assignment allowed. Please set 'assigned' parameter to 'true' for exact one or none policy"
278+
}
279+
}
280+
281+
variable "clusters" {
282+
type = set(object({
283+
cluster_name = string
284+
spark_version = optional(string, "11.3.x-scala2.12")
285+
spark_conf = optional(map(any), {})
286+
spark_env_vars = optional(map(any), {})
287+
data_security_mode = optional(string, "USER_ISOLATION")
288+
node_type_id = optional(string, "Standard_D3_v2")
289+
autotermination_minutes = optional(number, 30)
290+
min_workers = optional(number, 1)
291+
max_workers = optional(number, 2)
292+
availability = optional(string, "ON_DEMAND_AZURE")
293+
first_on_demand = optional(number, 0)
294+
spot_bid_max_price = optional(number, 1)
295+
cluster_log_conf_destination = optional(string, null)
296+
permissions = optional(set(object({
297+
group_name = string
298+
permission_level = string
299+
})), [])
300+
}))
301+
description = "Set of objects with parameters to configure Databricks clusters and assign permissions to it for certain custom groups"
302+
default = []
303+
}

0 commit comments

Comments
 (0)