This project aims to set up a Kubernetes cluster using kubeadm, Flannel, and Vagrant, followed by deploying an NGINX server using Helm and Terraform.
- Vagrant and VirtualBox installed on your system
- Helm and Terraform installed on your system
- Basic knowledge of Kubernetes, Helm, and Terraform
mkdir k8s-vagrant
cd k8s-vagrant
Vagrant.configure("2") do |config| config.vm.define "master" do |master| master.vm.box = "ubuntu/bionic64" master.vm.hostname = "master" master.vm.network "private_network", ip: "192.168.50.10" master.vm.provider "virtualbox" do |vb| vb.memory = "2048" vb.cpus = 2 end end
(1..2).each do |i| config.vm.define "worker#{i}" do |worker| worker.vm.box = "ubuntu/bionic64" worker.vm.hostname = "worker#{i}" worker.vm.network "private_network", ip: "192.168.50.1#{i + 0}" worker.vm.provider "virtualbox" do |vb| vb.memory = "2048" vb.cpus = 2 end end end end
vagrant up
vagrant ssh master
vagrant ssh worker1
vagrant ssh worker2
#edit hosts files using vim or nano editors
sudo vim /etc/hosts
#Add master and workers dns entries
192.168.50.10 master
192.168.50.11 worker1
192.168.50.12 worker2
#save and quit
# Disable swap: sudo swapoff -a sudo sed -i '/ swap / s/^\(.*\)$/#\1/g' /etc/fstab
#Create config file for modules: sudo tee /etc/modules-load.d/containerd.conf<<EOF overlay br_netfilter EOF
#Load modules: sudo modprobe overlay sudo modprobe br_netfilter #Create another config file for sysctl: sudo tee /etc/sysctl.d/kubernetes.conf
<<EOF
net.bridge.bridge-nf-call-ip6tables = 1 net.bridge.bridge-nf-call-iptables = 1 net.ipv4.ip_forward = 1 EOF
#Apply sysctl parameters: sudo sysctl --system
#Update apt source list: sudo apt-get update
#Install containerd (or Docker which contains containerd): sudo apt-get install docker.io -y
#Configure containerd for the cgroup driver used by kubeadm (systemd): sudo mkdir -p /etc/containerd sudo containerd config default | sudo tee /etc/containerd/config.toml sudo sed -i 's/SystemdCgroup = false/SystemdCgroup = true/g' /etc/containerd/config.toml
#Restart and enable containerd: sudo systemctl restart containerd sudo systemctl enable containerd
#Install helper tools: sudo apt-get install -y apt-transport-https ca-certificates curl gpg sudo mkdir -p -m 755 /etc/apt/keyrings
#Download the public signing key for the Kubernetes package repositories: curl -fsSL https://pkgs.k8s.io/core:/stable:/v1.29/deb/Release.key | sudo gpg --dearmor -o /etc/apt/keyrings/kubernetes-apt-keyring.gpg
#Add the Kubernetes apt repository for v1.29: echo 'deb [signed-by=/etc/apt/keyrings/kubernetes-apt-keyring.gpg] https://pkgs.k8s.io/core:/stable:/v1.29/deb/ /' | sudo tee /etc/apt/sources.list.d/kubernetes.list
sudo apt-get update sudo apt-get install -y kubelet kubeadm kubectl sudo apt-mark hold kubelet kubeadm kubectl
#Initialize the cluster:
#This will take some minutes
#If you use master node and workers with 192.168.X.X subnets use pod CIDR as following:(our case)
sudo kubeadm init --apiserver-advertise-address=192.168.50.10 --pod-network-cidr=10.244.0.0/16
# If you use any subnets (ex:10.10.X.X) use pod CIDR as following:
sudo kubeadm init --apiserver-advertise-address=10.10.X.X --pod-network-cidr=192.168.0.0/16
#Copy the kubeconfig file to the user's home directory and change ownership:
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
# Apply network plugin:
# If you use master node and workers with 192.168.X.X subnets, apply Flannel:
kubectl apply -f https://github.com/coreos/flannel/raw/master/Documentation/kube-flannel.yml
# If you use any subnets (ex:10.10.X.X), apply Calico:
kubectl create -f https://raw.githubusercontent.com/projectcalico/calico/v3.28.0/manifests/tigera-operator.yaml
#Use the kubeadm join command generated when you initialize the cluster. If you miss it, use this command to generate it:
#don't forget sudo
kubeadm token create --print-join-command
#Use the generated command on worker nodes to join the cluster.
# something like that
# sudo kubeadm join 192.168.50.10:6443 --token zbgmvn.oc1keoodyvdqrnko \
--discovery-token-ca-cert-hash sha256:acbc86ffda15234ceb3493a81d10ef5d9601eee59c548303c7251a90336031fe
helm create nginx-chart
nginx-chart
|-- Chart.yaml
|-- charts
|-- templates
| |-- NOTES.txt
| |-- _helpers.tpl
| |-- deployment.yaml
| |-- ingress.yaml
| `-- service.yaml
`-- values.yaml
cd nginx-chart
rm -rf templates/*
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ .Release.Name }}
labels:
{{- include "nginx-chart.labels" . | nindent 4 }}
spec:
replicas: {{ .Values.replicaCount }}
selector:
matchLabels:
{{- include "nginx-chart.selectorLabels" . | nindent 6 }}
template:
metadata:
labels:
{{- include "nginx-chart.selectorLabels" . | nindent 8 }}
spec:
containers:
- name: {{ .Chart.Name }}
image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default "latest" }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
ports:
- name: http
containerPort: {{ .Values.service.port }}
protocol: TCP
apiVersion: v1
kind: Service
metadata:
name: {{ .Release.Name }}-svc
labels:
{{- include "nginx-chart.labels" . | nindent 4 }}
spec:
type: {{ .Values.service.type }}
ports:
- port: {{ .Values.service.port }}
targetPort: http
protocol: TCP
name: http
selector:
{{- include "nginx-chart.selectorLabels" . | nindent 4 }}
{{/* Expand the name of the chart. */}} {{- define "nginx-chart.name" -}} {{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} {{- end }}
{{/* Create chart name and version as used by the chart label. */}} {{- define "nginx-chart.chart" -}} {{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} {{- end }}
{{/* Common labels */}} {{- define "nginx-chart.labels" -}} helm.sh/chart: {{ include "nginx-chart.chart" . }} {{ include "nginx-chart.selectorLabels" . }} {{- if .Chart.AppVersion }} app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} {{- end }} app.kubernetes.io/managed-by: {{ .Release.Service }} {{- end }}
{{/* Selector labels */}} {{- define "nginx-chart.selectorLabels" -}} app.kubernetes.io/name: {{ include "nginx-chart.name" . }} app.kubernetes.io/instance: {{ .Release.Name }} app: {{ .Release.Name }} {{- end }}
replicaCount: 1
image:
repository: nginx
pullPolicy: IfNotPresent
tag: ""
service:
type: NodePort
port: 80
mkdir terraform-helm-nginx
cd terraform-helm-nginx
#SSH to master node
vagrant ssh master
cat ~/.kube/config
#copy the output
#On terraform-helm-nginx folder create folder with name .kube
mkdir .kube
cd .kube
vim config
#put the output here and save file
terraform {
required_providers {
kubernetes = {
source = "hashicorp/kubernetes"
version = "~> 2.0"
}
helm = {
source = "hashicorp/helm"
version = "~> 2.0"
}
}
required_version = ">= 1.0"
}
provider "kubernetes" {
config_path = var.kube_config_path
}
provider "helm" {
kubernetes {
config_path = var.kube_config_path
}
}
module "nginx" {
source = "../terraform-apps/nginx"
kube_config_path = var.kube_config_path
}
resource "kubernetes_namespace" "nginx-k8s" {
metadata {
name = "nginx-k8s"
}
}
- Specifies the required Helm and Kubernetes providers.
- Configures the Kubernetes provider to use your kubeconfig file.
- Configures the Helm provider to use your Kubernetes context.
- Defines a Helm release resource to deploy the Nginx chart from the Bitnami repository.
variable "kube_config_path" {
description = "Path to the kubeconfig file"
type = string
default = "./.kube/config"
}
output "nginx_release_name" { description = "The name of the Nginx Helm release" value = module.nginx.nginx_release_name }
output "nginx_release_status" { description = "The status of the Nginx Helm release" value = module.nginx.nginx_release_status }
terraform-apps/
├── nginx/
│ ├── main.tf
│ ├── variables.tf
│ └── outputs.tf
terraform { required_providers { helm = { source = "hashicorp/helm" version = "~> 2" } } }
provider "helm" { kubernetes { config_path = var.kube_config_path } }
resource "helm_release" "nginx" { name = "nginx" chart = "../terraform-apps/nginx/nginx-chart" # Path to your local Helm chart version = "1.16.0" # Adjust the version if needed
set { name = "service.type" value = "NodePort" } }
variable "kube_config_path" {
description = "Path to the kubeconfig file"
type = string
default = "./.kube/config"
}
output "nginx_release_name" {
description = "The name of the Helm release"
value = helm_release.nginx.name
}
output "nginx_release_status" {
description = "The status of the Helm release"
value = helm_release.nginx.status
}
data "kubernetes_service" "nginx" {
metadata {
name = helm_release.nginx.name
namespace = helm_release.nginx.namespace
}
}
mv nginx-chart/ terraform-app/nginx/
project
├── terraform-k8s-cluster/
│ ├── main.tf
│ ├── variables.tf
│ └── outputs.tf
└── terraform-apps/
└── nginx/
├── main.tf
├── variables.tf
└── outputs.tf
cd terraform-k8s-cluster
terraform init
terraform apply
kubectl get svc
http://WorkerNodeIP:port
http://192.168.50.11:31126