@@ -50,7 +50,7 @@ deploy_pmm_server() {
50
50
--set platform=" ${platform} " \
51
51
" https://percona-charts.storage.googleapis.com/pmm-server-${PMM_SERVER_VERSION} .tgz"
52
52
fi
53
- SERVICE=" postgres"
53
+ local SERVICE=" postgres"
54
54
until kubectl -n " ${NAMESPACE} " exec monitoring-0 -- bash -c " pgrep -x $SERVICE >/dev/null" ; do
55
55
echo " Retry $retry "
56
56
sleep 5
@@ -63,13 +63,13 @@ deploy_pmm_server() {
63
63
}
64
64
65
65
get_pmm_api_key () {
66
- ADMIN_PASSWORD=$( kubectl -n " ${NAMESPACE} " exec monitoring-0 -- bash -c " printenv | grep ADMIN_PASSWORD | cut -d '=' -f2" )
66
+ local ADMIN_PASSWORD=$( kubectl -n " ${NAMESPACE} " exec monitoring-0 -- bash -c " printenv | grep ADMIN_PASSWORD | cut -d '=' -f2" )
67
67
echo $( curl --insecure -X POST -H " Content-Type: application/json" -d ' {"name":"operator", "role": "Admin"}' " https://admin:$ADMIN_PASSWORD @" $( get_service_ip monitoring-service) " /graph/api/auth/keys" | jq .key)
68
68
}
69
69
70
70
deploy_minio () {
71
- accessKey=" $( kubectl -n " ${NAMESPACE} " get secret minio-secret -o jsonpath=' {.data.AWS_ACCESS_KEY_ID}' | base64 -d) "
72
- secretKey=" $( kubectl -n " ${NAMESPACE} " get secret minio-secret -o jsonpath=' {.data.AWS_SECRET_ACCESS_KEY}' | base64 -d) "
71
+ local accessKey=" $( kubectl -n " ${NAMESPACE} " get secret minio-secret -o jsonpath=' {.data.AWS_ACCESS_KEY_ID}' | base64 -d) "
72
+ local secretKey=" $( kubectl -n " ${NAMESPACE} " get secret minio-secret -o jsonpath=' {.data.AWS_SECRET_ACCESS_KEY}' | base64 -d) "
73
73
74
74
helm uninstall -n " ${NAMESPACE} " minio-service || :
75
75
helm repo remove minio || :
@@ -299,6 +299,7 @@ get_mysql_users() {
299
299
300
300
get_service_ip () {
301
301
local service=$1
302
+
302
303
while (kubectl get service/$service -n " ${NAMESPACE} " -o ' jsonpath={.spec.type}' 2>&1 || :) | grep -q NotFound; do
303
304
sleep 1
304
305
done
@@ -379,16 +380,43 @@ wait_pod() {
379
380
set -o xtrace
380
381
}
381
382
383
+ wait_deployment () {
384
+ local name=$1
385
+ local target_namespace=${2:- " $namespace " }
386
+
387
+ sleep 10
388
+ set +o xtrace
389
+ retry=0
390
+ echo -n $name
391
+ until [ -n " $( kubectl -n ${target_namespace} get deployment $name -o jsonpath=' {.status.replicas}' ) " \
392
+ -a " $( kubectl -n ${target_namespace} get deployment $name -o jsonpath=' {.status.replicas}' ) " \
393
+ == " $( kubectl -n ${target_namespace} get deployment $name -o jsonpath=' {.status.readyReplicas}' ) " ]; do
394
+ sleep 1
395
+ echo -n .
396
+ let retry+=1
397
+ if [ $retry -ge 360 ]; then
398
+ kubectl logs $( get_operator_pod) -c operator \
399
+ | grep -v ' level=info' \
400
+ | grep -v ' level=debug' \
401
+ | tail -100
402
+ echo max retry count $retry reached. something went wrong with operator or kubernetes cluster
403
+ exit 1
404
+ fi
405
+ done
406
+ echo
407
+ set -o xtrace
408
+ }
409
+
382
410
check_auto_tuning () {
383
- RAM_SIZE=$1
384
- RDS_MEM_INSTANCE=12582880
385
- CUSTOM_INNODB_SIZE=$2
386
- CUSTOM_CONNECTIONS=$3
411
+ local RAM_SIZE=$1
412
+ local RDS_MEM_INSTANCE=12582880
413
+ local CUSTOM_INNODB_SIZE=$2
414
+ local CUSTOM_CONNECTIONS=$3
387
415
388
- INNODB_SIZE=$( run_mysql \
416
+ local INNODB_SIZE=$( run_mysql \
389
417
' SELECT @@innodb_buffer_pool_size;' \
390
418
" -h $( get_haproxy_svc " $( get_cluster_name) " ) -uroot -proot_password" )
391
- CONNECTIONS=$( run_mysql \
419
+ local CONNECTIONS=$( run_mysql \
392
420
' SELECT @@max_connections;' \
393
421
" -h $( get_haproxy_svc " $( get_cluster_name) " ) -uroot -proot_password" )
394
422
@@ -451,9 +479,8 @@ get_primary_from_haproxy() {
451
479
verify_certificate_sans () {
452
480
local certificate=$1
453
481
local expected_sans=$2
454
-
455
- have=$( mktemp)
456
- want=$( mktemp)
482
+ local have=$( mktemp)
483
+ local want=$( mktemp)
457
484
458
485
kubectl -n " ${NAMESPACE} " get certificate " ${certificate} " -o jsonpath=' {.spec.dnsNames}' | jq ' .' > " ${have} "
459
486
echo " ${expected_sans} " | jq ' .' > " ${want} "
@@ -462,21 +489,19 @@ verify_certificate_sans() {
462
489
}
463
490
464
491
check_passwords_leak () {
465
-
466
- secrets=$( kubectl get secrets -o json | jq -r ' .items[].data | to_entries | .[] | select(.key | (endswith(".crt") or endswith(".key") or endswith(".pub") or endswith(".pem") or endswith(".p12")) | not) | .value' )
467
-
468
- passwords=" $( for i in $secrets ; do base64 -d <<< $i ; echo ; done) $secrets "
469
- pods=$( kubectl -n " ${NAMESPACE} " get pods -o name | awk -F " /" ' {print $2}' )
492
+ local secrets=$( kubectl get secrets -o json | jq -r ' .items[].data | to_entries | .[] | select(.key | (endswith(".crt") or endswith(".key") or endswith(".pub") or endswith(".pem") or endswith(".p12")) | not) | .value' )
493
+ local passwords=" $( for i in $secrets ; do base64 -d <<< $i ; echo ; done) $secrets "
494
+ local pods=$( kubectl -n " ${NAMESPACE} " get pods -o name | awk -F " /" ' {print $2}' )
470
495
471
496
collect_logs () {
472
497
NS=$1
473
498
for p in $pods ; do
474
- containers=$( kubectl -n " $NS " get pod $p -o jsonpath=' {.spec.containers[*].name}' )
499
+ local containers=$( kubectl -n " $NS " get pod $p -o jsonpath=' {.spec.containers[*].name}' )
475
500
for c in $containers ; do
476
501
kubectl -n " $NS " logs $p -c $c > ${TEMP_DIR} /logs_output-$p -$c .txt
477
502
echo logs saved in: ${TEMP_DIR} /logs_output-$p -$c .txt
478
503
for pass in $passwords ; do
479
- count=$( grep -c --fixed-strings -- " $pass " ${TEMP_DIR} /logs_output-$p -$c .txt || :)
504
+ local count=$( grep -c --fixed-strings -- " $pass " ${TEMP_DIR} /logs_output-$p -$c .txt || :)
480
505
if [[ $count != 0 ]]; then
481
506
echo leaked passwords are found in log ${TEMP_DIR} /logs_output-$p -$c .txt
482
507
false
@@ -489,7 +514,77 @@ check_passwords_leak() {
489
514
490
515
collect_logs $NAMESPACE
491
516
if [ -n " $OPERATOR_NS " ]; then
492
- pods=$( kubectl -n " ${OPERATOR_NS} " get pods -o name | awk -F " /" ' {print $2}' )
517
+ local pods=$( kubectl -n " ${OPERATOR_NS} " get pods -o name | awk -F " /" ' {print $2}' )
493
518
collect_logs $OPERATOR_NS
494
519
fi
495
520
}
521
+
522
+ deploy_chaos_mesh () {
523
+ destroy_chaos_mesh
524
+
525
+ helm repo add chaos-mesh https://charts.chaos-mesh.org
526
+ helm install chaos-mesh chaos-mesh/chaos-mesh --namespace=${NAMESPACE} --set chaosDaemon.runtime=containerd --set chaosDaemon.socketPath=/run/containerd/containerd.sock --set dashboard.create=false --version 2.5.1
527
+ sleep 10
528
+ }
529
+
530
+ destroy_chaos_mesh () {
531
+ local chaos_mesh_ns=$( helm list --all-namespaces --filter chaos-mesh | tail -n1 | awk -F' ' ' {print $2}' | sed ' s/NAMESPACE//' )
532
+
533
+ for i in $( kubectl api-resources | grep chaos-mesh | awk ' {print $1}' ) ; do timeout 30 kubectl delete ${i} --all --all-namespaces || : ; done
534
+ if [ -n " ${chaos_mesh_ns} " ]; then
535
+ helm uninstall chaos-mesh --namespace ${chaos_mesh_ns} || :
536
+ fi
537
+ timeout 30 kubectl delete crd $( kubectl get crd | grep ' chaos-mesh.org' | awk ' {print $1}' ) || :
538
+ timeout 30 kubectl delete clusterrolebinding $( kubectl get clusterrolebinding | grep ' chaos-mesh' | awk ' {print $1}' ) || :
539
+ timeout 30 kubectl delete clusterrole $( kubectl get clusterrole | grep ' chaos-mesh' | awk ' {print $1}' ) || :
540
+ timeout 30 kubectl delete MutatingWebhookConfiguration $( kubectl get MutatingWebhookConfiguration | grep ' chaos-mesh' | awk ' {print $1}' ) || :
541
+ timeout 30 kubectl delete ValidatingWebhookConfiguration $( kubectl get ValidatingWebhookConfiguration | grep ' chaos-mesh' | awk ' {print $1}' ) || :
542
+ timeout 30 kubectl delete ValidatingWebhookConfiguration $( kubectl get ValidatingWebhookConfiguration | grep ' validate-auth' | awk ' {print $1}' ) || :
543
+ }
544
+
545
+ kill_pods () {
546
+ local ns=$1
547
+ local selector=$2
548
+ local pod_label=$3
549
+ local label_value=$4
550
+
551
+ if [ " ${selector} " == " pod" ]; then
552
+ yq eval '
553
+ .metadata.name = "chaos-pod-kill-' ${RANDOM} ' " |
554
+ del(.spec.selector.pods.test-namespace) |
555
+ .spec.selector.pods.' ${ns} ' [0] = "' ${pod_label} ' "' ${TESTS_CONFIG_DIR} /chaos-pod-kill.yml \
556
+ | kubectl apply --namespace ${ns} -f -
557
+ elif [ " ${selector} " == " label" ]; then
558
+ yq eval '
559
+ .metadata.name = "chaos-kill-label-' ${RANDOM} ' " |
560
+ .spec.mode = "all" |
561
+ del(.spec.selector.pods) |
562
+ .spec.selector.labelSelectors."' ${pod_label} ' " = "' ${label_value} ' "' ${TESTS_CONFIG_DIR} /chaos-pod-kill.yml \
563
+ | kubectl apply --namespace ${ns} -f -
564
+ fi
565
+ sleep 5
566
+ }
567
+
568
+ failure_pod () {
569
+ local ns=$1
570
+ local pod=$2
571
+
572
+ yq eval '
573
+ .metadata.name = "chaos-pod-failure-' ${RANDOM} ' " |
574
+ del(.spec.selector.pods.test-namespace) |
575
+ .spec.selector.pods.' ${ns} ' [0] = "' ${pod} ' "' ${TESTS_CONFIG_DIR} /chaos-pod-failure.yml \
576
+ | kubectl apply --namespace ${ns} -f -
577
+ sleep 5
578
+ }
579
+
580
+ network_loss () {
581
+ local ns=$1
582
+ local pod=$2
583
+
584
+ yq eval '
585
+ .metadata.name = "chaos-pod-network-loss-' ${RANDOM} ' " |
586
+ del(.spec.selector.pods.test-namespace) |
587
+ .spec.selector.pods.' ${ns} ' [0] = "' ${pod} ' "' ${TESTS_CONFIG_DIR} /chaos-network-loss.yml \
588
+ | kubectl apply --namespace ${ns} -f -
589
+ sleep 5
590
+ }
0 commit comments