Skip to content

Commit 44fa819

Browse files
committed
variables consolidation
Signed-off-by: junior <junior@users.noreply.github.com>
1 parent a8bd969 commit 44fa819

File tree

2 files changed

+231
-241
lines changed

2 files changed

+231
-241
lines changed

main.tf

Lines changed: 0 additions & 240 deletions
Original file line numberDiff line numberDiff line change
@@ -177,159 +177,6 @@ module "oke_cluster_autoscaler" {
177177
depends_on = [module.oke, module.oke_node_pool]
178178
}
179179

180-
################################################################################
181-
# Variables: OKE Cluster
182-
################################################################################
183-
## OKE Cluster Details
184-
variable "create_new_oke_cluster" {
185-
default = true
186-
description = "Creates a new OKE cluster, node pool and network resources"
187-
}
188-
variable "existent_oke_cluster_id" {
189-
default = ""
190-
description = "Using existent OKE Cluster. Only the application and services will be provisioned. If select cluster autoscaler feature, you need to get the node pool id and enter when required"
191-
}
192-
variable "create_new_compartment_for_oke" {
193-
default = false
194-
description = "Creates new compartment for OKE Nodes and OCI Services deployed. NOTE: The creation of the compartment increases the deployment time by at least 3 minutes, and can increase by 15 minutes when destroying"
195-
}
196-
variable "oke_compartment_description" {
197-
default = "Compartment for OKE, Nodes and Services"
198-
}
199-
variable "cluster_cni_type" {
200-
default = "FLANNEL_OVERLAY"
201-
description = "The CNI type to use for the cluster. Valid values are: FLANNEL_OVERLAY or OCI_VCN_IP_NATIVE"
202-
203-
validation {
204-
condition = var.cluster_cni_type == "FLANNEL_OVERLAY" || var.cluster_cni_type == "OCI_VCN_IP_NATIVE"
205-
error_message = "Sorry, but OKE currently only supports FLANNEL_OVERLAY or OCI_VCN_IP_NATIVE CNI types."
206-
}
207-
}
208-
209-
## OKE Encryption details
210-
variable "use_encryption_from_oci_vault" {
211-
default = false
212-
description = "By default, Oracle manages the keys that encrypts Kubernetes Secrets at Rest in Etcd, but you can choose a key from a vault that you have access to, if you want greater control over the key's lifecycle and how it's used"
213-
}
214-
variable "create_new_encryption_key" {
215-
default = false
216-
description = "Creates new vault and key on OCI Vault/Key Management/KMS and assign to boot volume of the worker nodes"
217-
}
218-
variable "existent_encryption_key_id" {
219-
default = ""
220-
description = "Use an existent master encryption key to encrypt boot volume and object storage bucket. NOTE: If the key resides in a different compartment or in a different tenancy, make sure you have the proper policies to access, or the provision of the worker nodes will fail"
221-
}
222-
variable "create_vault_policies_for_group" {
223-
default = false
224-
description = "Creates policies to allow the user applying the stack to manage vault and keys. If you are on the Administrators group or already have the policies for a compartment, this policy is not needed. If you do not have access to allow the policy, ask your administrator to include it for you"
225-
}
226-
variable "user_admin_group_for_vault_policy" {
227-
default = "Administrators"
228-
description = "User Identity Group to allow manage vault and keys. The user running the Terraform scripts or Applying the ORM Stack need to be on this group"
229-
}
230-
231-
## OKE Autoscaler
232-
variable "cluster_autoscaler_enabled" {
233-
default = true
234-
description = "Enables OKE cluster autoscaler. Node pools will auto scale based on the resources usage"
235-
}
236-
variable "node_pool_initial_num_worker_nodes_1" {
237-
default = 3
238-
description = "The number of worker nodes in the node pool. If enable Cluster Autoscaler, will assume the minimum number of nodes on the node pool to be scheduled by the Kubernetes (pool1)"
239-
}
240-
variable "node_pool_max_num_worker_nodes_1" {
241-
default = 10
242-
description = "Maximum number of nodes on the node pool to be scheduled by the Kubernetes (pool1)"
243-
}
244-
variable "existent_oke_nodepool_id_for_autoscaler_1" {
245-
default = ""
246-
description = "Nodepool Id of the existent OKE to use with Cluster Autoscaler (pool1)"
247-
}
248-
249-
################################################################################
250-
# Variables: OKE Node Pool 1
251-
################################################################################
252-
## OKE Node Pool Details
253-
variable "k8s_version" {
254-
default = "Latest"
255-
description = "Kubernetes version installed on your Control Plane and worker nodes. If not version select, will use the latest available."
256-
}
257-
### Node Pool 1
258-
variable "node_pool_name_1" {
259-
default = "pool1"
260-
description = "Name of the node pool"
261-
}
262-
variable "extra_initial_node_labels_1" {
263-
default = []
264-
description = "Extra initial node labels to be added to the node pool"
265-
}
266-
variable "node_pool_cni_type_1" {
267-
default = "FLANNEL_OVERLAY"
268-
description = "The CNI type to use for the cluster. Valid values are: FLANNEL_OVERLAY or OCI_VCN_IP_NATIVE"
269-
270-
validation {
271-
condition = var.node_pool_cni_type_1 == "FLANNEL_OVERLAY" || var.node_pool_cni_type_1 == "OCI_VCN_IP_NATIVE"
272-
error_message = "Sorry, but OKE currently only supports FLANNEL_OVERLAY or OCI_VCN_IP_NATIVE CNI types."
273-
}
274-
}
275-
276-
#### ocpus and memory are only used if flex shape is selected
277-
variable "node_pool_instance_shape_1" {
278-
type = map(any)
279-
default = {
280-
"instanceShape" = "VM.Standard.E4.Flex"
281-
"ocpus" = 2
282-
"memory" = 16
283-
}
284-
description = "A shape is a template that determines the number of OCPUs, amount of memory, and other resources allocated to a newly created instance for the Worker Node. Select at least 2 OCPUs and 16GB of memory if using Flex shapes"
285-
}
286-
variable "node_pool_shape_specific_ad_1" {
287-
description = "The number of the AD to get the shape for the node pool"
288-
type = number
289-
default = 0
290-
291-
validation {
292-
condition = var.node_pool_shape_specific_ad_1 >= 0 && var.node_pool_shape_specific_ad_1 <= 3
293-
error_message = "Invalid AD number, should be 0 to get all ADs or 1, 2 or 3 to be a specific AD."
294-
}
295-
}
296-
variable "node_pool_boot_volume_size_in_gbs_1" {
297-
default = "60"
298-
description = "Specify a custom boot volume size (in GB)"
299-
}
300-
variable "image_operating_system_1" {
301-
default = "Oracle Linux"
302-
description = "The OS/image installed on all nodes in the node pool."
303-
}
304-
variable "image_operating_system_version_1" {
305-
default = "8"
306-
description = "The OS/image version installed on all nodes in the node pool."
307-
}
308-
variable "generate_public_ssh_key" {
309-
default = true
310-
}
311-
variable "public_ssh_key" {
312-
default = ""
313-
description = "In order to access your private nodes with a public SSH key you will need to set up a bastion host (a.k.a. jump box). If using public nodes, bastion is not needed. Left blank to not import keys."
314-
}
315-
316-
################################################################################
317-
# Variables: Dynamic Group and Policies for OKE
318-
################################################################################
319-
# Create Dynamic Group and Policies
320-
variable "create_dynamic_group_for_nodes_in_compartment" {
321-
default = true
322-
description = "Creates dynamic group of Nodes in the compartment. Note: You need to have proper rights on the Tenancy. If you only have rights in a compartment, uncheck and ask you administrator to create the Dynamic Group for you"
323-
}
324-
variable "existent_dynamic_group_for_nodes_in_compartment" {
325-
default = ""
326-
description = "Enter previous created Dynamic Group for the policies"
327-
}
328-
variable "create_compartment_policies" {
329-
default = true
330-
description = "Creates policies that will reside on the compartment. e.g.: Policies to support Cluster Autoscaler, OCI Logging datasource on Grafana"
331-
}
332-
333180
resource "oci_identity_compartment" "oke_compartment" {
334181
compartment_id = var.compartment_ocid
335182
name = "${local.app_name_normalized}-${local.deploy_id}"
@@ -342,93 +189,6 @@ locals {
342189
oke_compartment_ocid = var.create_new_compartment_for_oke ? oci_identity_compartment.oke_compartment.0.id : var.compartment_ocid
343190
}
344191

345-
# Available OCI Services
346-
data "oci_core_services" "all_services_network" {
347-
filter {
348-
name = "name"
349-
values = ["All .* Services In Oracle Services Network"]
350-
regex = true
351-
}
352-
}
353-
354-
################################################################################
355-
# Variables: OKE Network
356-
################################################################################
357-
# OKE Network Visibility (Workers, Endpoint and Load Balancers)
358-
variable "cluster_workers_visibility" {
359-
default = "Private"
360-
description = "The Kubernetes worker nodes that are created will be hosted in public or private subnet(s)"
361-
362-
validation {
363-
condition = var.cluster_workers_visibility == "Private" || var.cluster_workers_visibility == "Public"
364-
error_message = "Sorry, but cluster visibility can only be Private or Public."
365-
}
366-
}
367-
variable "cluster_endpoint_visibility" {
368-
default = "Public"
369-
description = "The Kubernetes cluster that is created will be hosted on a public subnet with a public IP address auto-assigned or on a private subnet. If Private, additional configuration will be necessary to run kubectl commands"
370-
371-
validation {
372-
condition = var.cluster_endpoint_visibility == "Private" || var.cluster_endpoint_visibility == "Public"
373-
error_message = "Sorry, but cluster endpoint visibility can only be Private or Public."
374-
}
375-
}
376-
variable "cluster_load_balancer_visibility" {
377-
default = "Public"
378-
description = "The Load Balancer that is created will be hosted on a public subnet with a public IP address auto-assigned or on a private subnet. This affects the Kubernetes services, ingress controller and other load balancers resources"
379-
380-
validation {
381-
condition = var.cluster_load_balancer_visibility == "Private" || var.cluster_load_balancer_visibility == "Public"
382-
error_message = "Sorry, but cluster load balancer visibility can only be Private or Public."
383-
}
384-
}
385-
variable "pods_network_visibility" {
386-
default = "Public"
387-
description = "The PODs that are created will be hosted on a public subnet with a public IP address auto-assigned or on a private subnet. This affects the Kubernetes services and pods"
388-
389-
validation {
390-
condition = var.pods_network_visibility == "Private" || var.pods_network_visibility == "Public"
391-
error_message = "Sorry, but PODs Network visibility can only be Private or Public."
392-
}
393-
}
394-
395-
# OKE Network Resources
396-
## Subnets
397-
# VCN Variables
398-
variable "create_subnets" {
399-
default = true
400-
description = "Create subnets for OKE: Endpoint, Nodes, Load Balancers. If CNI Type OCI_VCN_IP_NATIVE, also creates the PODs VCN. If FSS Mount Targets, also creates the FSS Mount Targets Subnet"
401-
}
402-
variable "create_pod_network_subnet" {
403-
default = false
404-
description = "Create PODs Network subnet for OKE. To be used with CNI Type OCI_VCN_IP_NATIVE"
405-
}
406-
variable "existent_oke_k8s_endpoint_subnet_ocid" {
407-
default = ""
408-
description = "The OCID of the subnet where the Kubernetes cluster endpoint will be hosted"
409-
}
410-
variable "existent_oke_nodes_subnet_ocid" {
411-
default = ""
412-
description = "The OCID of the subnet where the Kubernetes worker nodes will be hosted"
413-
}
414-
variable "existent_oke_load_balancer_subnet_ocid" {
415-
default = ""
416-
description = "The OCID of the subnet where the Kubernetes load balancers will be hosted"
417-
}
418-
variable "existent_oke_vcn_native_pod_networking_subnet_ocid" {
419-
default = ""
420-
description = "The OCID of the subnet where the Kubernetes VCN Native Pod Networking will be hosted"
421-
}
422-
variable "existent_oke_fss_mount_targets_subnet_ocid" {
423-
default = ""
424-
description = "The OCID of the subnet where the Kubernetes FSS mount targets will be hosted"
425-
}
426-
# variable "existent_apigw_fn_subnet_ocid" {
427-
# default = ""
428-
# description = "The OCID of the subnet where the API Gateway and Functions will be hosted"
429-
# }
430-
431-
432192
# OKE Subnets definitions
433193
locals {
434194
subnets_oke = concat(local.subnets_oke_standard, local.subnet_vcn_native_pod_networking, local.subnet_bastion, local.subnet_fss_mount_targets)

0 commit comments

Comments
 (0)