Skip to content

Commit c8a5e87

Browse files
authored
Merge pull request #67 from Lemoncode/aks-segunda-edicion
Aks segunda edición
2 parents dac0cfb + 0e7448f commit c8a5e87

File tree

12 files changed

+229
-45
lines changed

12 files changed

+229
-45
lines changed

.gitignore

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -30,3 +30,4 @@ app/
3030
.aws/
3131
lab/
3232
private.readme.md
33+
04-cloud/00-aks/03-virtual-kubelet/auth.json
Lines changed: 37 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,37 @@
1+
# Local .terraform directories
2+
**/.terraform/*
3+
4+
# .tfstate files
5+
*.tfstate
6+
*.tfstate.*
7+
8+
# Crash log files
9+
crash.log
10+
crash.*.log
11+
12+
# Exclude all .tfvars files, which are likely to contain sentitive data, such as
13+
# password, private keys, and other secrets. These should not be part of version
14+
# control as they are data points which are potentially sensitive and subject
15+
# to change depending on the environment.
16+
#
17+
*.tfvars
18+
19+
# Ignore override files as they are usually used to override resources locally and so
20+
# are not checked in
21+
override.tf
22+
override.tf.json
23+
*_override.tf
24+
*_override.tf.json
25+
26+
# Include override files you do wish to add to version control using negated pattern
27+
#
28+
# !example_override.tf
29+
30+
# Include tfplan files to ignore the plan output of command: terraform plan -out=tfplan
31+
# example: *tfplan*
32+
33+
# Ignore CLI configuration files
34+
.terraformrc
35+
terraform.rc
36+
37+
.terraform.lock.hcl

04-cloud/00-aks/00-mi-primer-aks/00-crear-mi-primer-aks.sh

Lines changed: 13 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -7,20 +7,24 @@ brew install azure-cli
77
#También podemos crear un contenedor con Azure CLI. Así no tenemos que instalarlo en nuestro local ;-)
88
docker run -it --rm microsoft/azure-cli sh
99

10+
# Para ver el subconjunto de serviios que puedes gestionar a través de la linea de comandos:
11+
az
12+
1013
#Iniciamos sesión en nuestra cuenta de Azure
1114
az login
1215

1316
#Creamos un grupo de recursos en una ubicación concreta
14-
RESOURCE_GROUP="Mi-Primer-AKS"
17+
RESOURCE_GROUP="Lemoncode-CLI"
1518
LOCATION="northeurope"
1619

1720
az group create -n ${RESOURCE_GROUP} -l ${LOCATION}
1821

1922
#Creamos el clúster de AKS
20-
AKS_NAME="lemoncode-aks"
23+
AKS_NAME="aks-lemoncode-cli"
2124

2225
#https://docs.microsoft.com/en-us/cli/azure/aks?view=azure-cli-latest#az_aks_create
23-
az aks create -g ${RESOURCE_GROUP} -n ${AKS_NAME} \
26+
az aks create -g ${RESOURCE_GROUP} \
27+
-n ${AKS_NAME} \
2428
--node-count 1 --generate-ssh-keys
2529

2630
#Instalamos kubectl en local si no lo tenemos. En este caso en el contenedor con Azure CLI
@@ -32,14 +36,17 @@ az aks get-credentials -g ${RESOURCE_GROUP} -n ${AKS_NAME}
3236
#Recuperamos los nodos de nuestro clúster (en este ejemplo solo deberíamos de tener 1)
3337
kubectl get nodes
3438

35-
#Recuperamos todos los servicios desplegados en nuestro clúster
36-
kubectl get services --all-namespaces
37-
3839
#Escalar el número de nodos en el clúster
3940
az aks scale -g ${RESOURCE_GROUP} -n ${AKS_NAME} --node-count 3
4041

4142
#Ahora deberíamos tener 3 nodos en lugar de 1
4243
kubectl get nodes
4344

45+
# Crear un Wordpress en nuestro clúster
46+
kubectl apply -f wordpress.yaml
47+
48+
# Comprobar lo que acabamos de desplegar
49+
kubectl get all -n wordpress
50+
4451
#Si eliminamos el grupo de recursos eliminaremos el clúster
4552
az group delete -n ${RESOURCE_GROUP} --yes --no-wait
Lines changed: 26 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,26 @@
1+
provider "azurerm" {
2+
features {}
3+
}
4+
5+
resource "azurerm_resource_group" "rg" {
6+
name = "Lemoncode-Terraform"
7+
location = "West Europe"
8+
}
9+
10+
resource "azurerm_kubernetes_cluster" "k8s" {
11+
name = "aks-lemoncode-tf"
12+
resource_group_name = azurerm_resource_group.rg.name
13+
location = azurerm_resource_group.rg.location
14+
dns_prefix = "aks-lemoncode-tf"
15+
16+
default_node_pool {
17+
name = "default"
18+
node_count = 1
19+
vm_size = "Standard_DS2_v2"
20+
}
21+
22+
identity {
23+
type = "SystemAssigned"
24+
}
25+
26+
}
Lines changed: 113 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,113 @@
1+
apiVersion: v1
2+
kind: Namespace
3+
metadata:
4+
name: wordpress
5+
6+
---
7+
apiVersion: apps/v1
8+
kind: Deployment
9+
metadata:
10+
name: wordpress-mysql
11+
namespace: wordpress
12+
labels:
13+
app: wordpress
14+
spec:
15+
selector:
16+
matchLabels:
17+
app: wordpress
18+
tier: mysql
19+
strategy:
20+
type: Recreate
21+
template:
22+
metadata:
23+
labels:
24+
app: wordpress
25+
tier: mysql
26+
spec:
27+
containers:
28+
- image: mysql:5.6
29+
name: mysql
30+
env:
31+
- name: MYSQL_ROOT_PASSWORD
32+
value: wp_password
33+
ports:
34+
- containerPort: 3306
35+
name: mysql
36+
volumeMounts:
37+
- name: mysql-storage
38+
mountPath: /var/lib/mysql
39+
volumes:
40+
- name: mysql-storage
41+
emptyDir: {}
42+
43+
---
44+
apiVersion: v1
45+
kind: Service
46+
metadata:
47+
name: wordpress-mysql
48+
namespace: wordpress
49+
labels:
50+
app: wordpress
51+
spec:
52+
ports:
53+
- port: 3306
54+
selector:
55+
app: wordpress
56+
tier: mysql
57+
clusterIP: None
58+
59+
---
60+
apiVersion: apps/v1
61+
kind: Deployment
62+
metadata:
63+
name: wordpress
64+
namespace: wordpress
65+
labels:
66+
app: wordpress
67+
spec:
68+
selector:
69+
matchLabels:
70+
app: wordpress
71+
tier: frontend
72+
replicas: 2
73+
strategy:
74+
type: Recreate
75+
template:
76+
metadata:
77+
labels:
78+
app: wordpress
79+
tier: frontend
80+
spec:
81+
containers:
82+
- image: wordpress:4.8-apache
83+
name: wordpress
84+
env:
85+
- name: WORDPRESS_DB_HOST
86+
value: wordpress-mysql
87+
- name: WORDPRESS_DB_PASSWORD
88+
value: wp_password
89+
ports:
90+
- containerPort: 80
91+
name: wordpress
92+
volumeMounts:
93+
- name: wordpress-storage
94+
mountPath: /var/www/html
95+
volumes:
96+
- name: wordpress-storage
97+
emptyDir: {}
98+
99+
---
100+
apiVersion: v1
101+
kind: Service
102+
metadata:
103+
name: wordpress
104+
namespace: wordpress
105+
labels:
106+
app: wordpress
107+
spec:
108+
ports:
109+
- port: 80
110+
selector:
111+
app: wordpress
112+
tier: frontend
113+
type: LoadBalancer

04-cloud/00-aks/02-cluster-autoscaler/cluster-autoscaler.sh

Lines changed: 5 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,3 @@
1-
21
# Variables
32
RESOURCE_GROUP="Cluster-Autoscaler"
43
AKS_NAME="lemoncode-autoscaler"
@@ -8,8 +7,10 @@ LOCATION="northeurope"
87
az group create -n ${RESOURCE_GROUP} -l ${LOCATION}
98

109
#Crear cluster de AKS
11-
az aks create -g ${RESOURCE_GROUP} -n ${AKS_NAME} \
12-
--node-count 1 --generate-ssh-keys
10+
az aks create -g ${RESOURCE_GROUP} \
11+
-n ${AKS_NAME} \
12+
--node-count 1 \
13+
--generate-ssh-keys
1314

1415
# Recuperar el contexto para este clúster
1516
az aks get-credentials -n $AKS_NAME -g $RESOURCE_GROUP
@@ -30,7 +31,7 @@ az aks update \
3031
--name $AKS_NAME \
3132
--enable-cluster-autoscaler \
3233
--min-count 1 \
33-
--max-count 3
34+
--max-count 5
3435

3536
#Mientras esto se materializa puedes ver en el portal de Azure que el virtual machine scaleset está aumentando
3637
#el número de instancias. Este proceso puede llevar varios minutos ya que tiene que dar de alta las VMs

04-cloud/00-aks/03-virtual-kubelet/02-virtual-kubelet.sh

Lines changed: 6 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -36,13 +36,13 @@ az network vnet subnet create \
3636
--address-prefixes 192.168.2.0/24
3737

3838
#Crear un service principal
39-
az ad sp create-for-rbac --skip-assignment
40-
SP_ID="a53cffe7-cb13-40d5-aefd-e36c5569869c"
41-
SP_PASSWORD="qqbZ0z3l2Zac104_wvh0HBj_-KyTzFeVO~"
39+
az ad sp create-for-rbac --name kubelet-demo > auth.json
40+
CLIENT_ID=$(jq -r '.appId' auth.json)
41+
PASSWORD=$(jq -r '.password' auth.json)
4242

4343
#Asignamos permisos a la red virtual para que el cluster pueda gestionarla
4444
VNET_ID=$(az network vnet show --resource-group $RESOURCE_GROUP --name $AKS_VNET --query id -o tsv)
45-
az role assignment create --assignee $SP_ID --scope $VNET_ID --role Contributor
45+
az role assignment create --assignee $CLIENT_ID --scope $VNET_ID --role Contributor
4646

4747
#Obtenemos el ID de la subnet donde va a ir el cluster de AKS
4848
SUBNET_ID=$(az network vnet subnet show --resource-group $RESOURCE_GROUP --vnet-name $AKS_VNET --name $AKS_SUBNET --query id -o tsv)
@@ -53,12 +53,9 @@ az aks create \
5353
--name $AKS_NAME \
5454
--node-count 1 \
5555
--network-plugin azure \
56-
--service-cidr 10.0.0.0/16 \
57-
--dns-service-ip 10.0.0.10 \
58-
--docker-bridge-address 172.17.0.1/16 \
5956
--vnet-subnet-id $SUBNET_ID \
60-
--service-principal $SP_ID \
61-
--client-secret $SP_PASSWORD
57+
--service-principal $CLIENT_ID \
58+
--client-secret $PASSWORD
6259

6360
# Recuperar el contexto para este clúster
6461
az aks get-credentials -n $AKS_NAME -g $RESOURCE_GROUP

04-cloud/00-aks/03-virtual-kubelet/manifests/nginx-deployment.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@ metadata:
55
labels:
66
app: nginx
77
spec:
8-
replicas: 3
8+
replicas: 20
99
selector:
1010
matchLabels:
1111
app: nginx

04-cloud/00-aks/04-keda/03-keda.sh

Lines changed: 21 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,22 @@
1+
#KEDA
2+
# https://www.returngis.net/2020/06/autoescalar-tus-aplicaciones-en-kubernetes-con-keda/
3+
4+
#Variables
5+
RESOURCE_GROUP="KEDA"
6+
AKS_NAME="lemoncode-keda"
7+
8+
#Creamos el grupo de recursos
9+
az group create -n ${RESOURCE_GROUP} -l ${LOCATION}
10+
11+
#Creamos un cluster
12+
az aks create -g ${RESOURCE_GROUP} \
13+
-n ${AKS_NAME} \
14+
--node-count 1 \
15+
--generate-ssh-keys
16+
17+
#Configuramos kubectl para comunicarnos con nuestro nuevo clúster
18+
az aks get-credentials -g ${RESOURCE_GROUP} -n ${AKS_NAME}
19+
120
#Para entender KEDA primero necesitas saber cómo autoescalan los pods dentro de un clúster
221

322
### Ejemplo de autoescalado sin KEDA
@@ -6,34 +25,17 @@ kubectl apply -f 04-cloud/00-aks/04-keda/manifests/autoscale-with-hpa.yml
625
kubectl autoscale deployment web --cpu-percent=30 --min=1 --max=5
726
kubectl get hpa --watch
827

9-
ab -n 50000 -c 200 http://51.104.177.27/
28+
ab -n 50000 -c 200 http://20.82.253.97/
1029

1130
kubectl describe hpa web
1231

13-
#KEDA
14-
# https://www.returngis.net/2020/06/autoescalar-tus-aplicaciones-en-kubernetes-con-keda/
15-
16-
#Variables
17-
RESOURCE_GROUP="KEDA"
18-
AKS_NAME="lemoncode-keda"
19-
2032
#Instalar Helm
2133
brew install helm
2234

2335
#Añadir el repo de KEDA
2436
helm repo add kedacore https://kedacore.github.io/charts
2537
helm repo update
2638

27-
#Creamos el grupo de recursos
28-
az group create -n ${RESOURCE_GROUP} -l ${LOCATION}
29-
30-
#Creamos un cluster
31-
az aks create -g ${RESOURCE_GROUP} -n ${AKS_NAME} \
32-
--node-count 1 --generate-ssh-keys
33-
34-
#Configuramos kubectl para comunicarnos con nuestro nuevo clúster
35-
az aks get-credentials -g ${RESOURCE_GROUP} -n ${AKS_NAME}
36-
3739
#Creamos un namespace llamado keda
3840
kubectl create namespace keda
3941

@@ -47,7 +49,7 @@ kubectl get pods -n keda --watch
4749
#Para ello nos apoyamos en un servicio llamado Azure Storage
4850

4951
#Creamos una cuenta de almacenamiento
50-
STORAGE_NAME="boxoftasks"
52+
STORAGE_NAME="lemonboxoftasks"
5153
az storage account create --name $STORAGE_NAME --resource-group $RESOURCE_GROUP
5254
ACCOUNT_KEY=$(az storage account keys list --resource-group $RESOURCE_GROUP --account-name $STORAGE_NAME --query "[0].value" --output tsv)
5355

04-cloud/00-aks/05-azure-active-directory/azure-ad-aks.sh

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -57,7 +57,7 @@ az aks get-credentials --resource-group $RESOURCE_GROUP --name $AKS_NAME --overw
5757

5858
#Vamos a probar the north remembers creando un pod dentro de este namespace
5959
#Lanzar este comando te pedirá autenticación
60-
kubectl run --generator=run-pod/v1 nginx-north --image=nginx --namespace the-north-remembers
60+
kubectl run nginx-north --image=nginx --namespace the-north-remembers
6161
kubectl get po -n the-north-remembers
6262

6363
#Si intentamos acceder al namespace de los Lanister o el default nos dará error
@@ -69,7 +69,7 @@ kubectl get po
6969
az aks get-credentials --resource-group $RESOURCE_GROUP --name $AKS_NAME --overwrite-existing
7070

7171
#Intentamos la misma operación pero en el namespace de los Lanister
72-
kubectl run --generator=run-pod/v1 nginx-kings-landing --image=nginx --namespace kings-landing
72+
kubectl run nginx-kings-landing --image=nginx --namespace kings-landing
7373
#Si intentamos entrar en The North Remembers da un error
7474
kubectl get po -n the-north-remembers
7575

0 commit comments

Comments
 (0)