diff --git a/e2e-tests/demand-backup-physical-sharded/compare/find-sharded.json b/e2e-tests/demand-backup-physical-aws/compare/find-sharded.json similarity index 100% rename from e2e-tests/demand-backup-physical-sharded/compare/find-sharded.json rename to e2e-tests/demand-backup-physical-aws/compare/find-sharded.json diff --git a/e2e-tests/demand-backup-physical-sharded/compare/find.json b/e2e-tests/demand-backup-physical-aws/compare/find.json similarity index 100% rename from e2e-tests/demand-backup-physical-sharded/compare/find.json rename to e2e-tests/demand-backup-physical-aws/compare/find.json diff --git a/e2e-tests/demand-backup-physical/compare/statefulset_some-name-rs0_restore-oc.yml b/e2e-tests/demand-backup-physical-aws/compare/statefulset_some-name-rs0_restore-oc.yml similarity index 100% rename from e2e-tests/demand-backup-physical/compare/statefulset_some-name-rs0_restore-oc.yml rename to e2e-tests/demand-backup-physical-aws/compare/statefulset_some-name-rs0_restore-oc.yml diff --git a/e2e-tests/demand-backup-physical/compare/statefulset_some-name-rs0_restore.yml b/e2e-tests/demand-backup-physical-aws/compare/statefulset_some-name-rs0_restore.yml similarity index 100% rename from e2e-tests/demand-backup-physical/compare/statefulset_some-name-rs0_restore.yml rename to e2e-tests/demand-backup-physical-aws/compare/statefulset_some-name-rs0_restore.yml diff --git a/e2e-tests/demand-backup-physical-sharded/conf/backup.yml b/e2e-tests/demand-backup-physical-aws/conf/backup-aws-s3.yml similarity index 100% rename from e2e-tests/demand-backup-physical-sharded/conf/backup.yml rename to e2e-tests/demand-backup-physical-aws/conf/backup-aws-s3.yml diff --git a/e2e-tests/demand-backup-physical-sharded/conf/restore.yml b/e2e-tests/demand-backup-physical-aws/conf/restore.yml similarity index 100% rename from e2e-tests/demand-backup-physical-sharded/conf/restore.yml rename to e2e-tests/demand-backup-physical-aws/conf/restore.yml diff --git a/e2e-tests/demand-backup-physical/conf/secrets.yml b/e2e-tests/demand-backup-physical-aws/conf/secrets.yml similarity index 100% rename from e2e-tests/demand-backup-physical/conf/secrets.yml rename to e2e-tests/demand-backup-physical-aws/conf/secrets.yml diff --git a/e2e-tests/demand-backup-physical-aws/conf/some-name.yml b/e2e-tests/demand-backup-physical-aws/conf/some-name.yml new file mode 100644 index 0000000000..14880f6ab7 --- /dev/null +++ b/e2e-tests/demand-backup-physical-aws/conf/some-name.yml @@ -0,0 +1,67 @@ +apiVersion: psmdb.percona.com/v1 +kind: PerconaServerMongoDB +metadata: + finalizers: + - percona.com/delete-psmdb-pvc + name: some-name +spec: + #platform: openshift + image: + imagePullPolicy: Always + updateStrategy: SmartUpdate + backup: + enabled: true + image: perconalab/percona-server-mongodb-operator:1.1.0-backup + storages: + aws-s3: + main: true + type: s3 + s3: + credentialsSecret: aws-s3-secret + region: us-east-1 + bucket: operator-testing + prefix: psmdb-demand-backup-physical + insecureSkipTLSVerify: false + replsets: + - name: rs0 + affinity: + antiAffinityTopologyKey: none + resources: + limits: + cpu: 500m + memory: 1G + requests: + cpu: 100m + memory: 0.1G + volumeSpec: + persistentVolumeClaim: + resources: + requests: + storage: 3Gi + expose: + enabled: false + type: ClusterIP + size: 3 + configuration: | + operationProfiling: + mode: slowOp + slowOpThresholdMs: 100 + security: + enableEncryption: true + redactClientLogData: false + setParameter: + ttlMonitorSleepSecs: 60 + wiredTigerConcurrentReadTransactions: 128 + wiredTigerConcurrentWriteTransactions: 128 + storage: + engine: wiredTiger + wiredTiger: + collectionConfig: + blockCompressor: snappy + engineConfig: + directoryForIndexes: false + journalCompressor: snappy + indexConfig: + prefixCompression: true + secrets: + users: some-users diff --git a/e2e-tests/demand-backup-physical-aws/run b/e2e-tests/demand-backup-physical-aws/run new file mode 100755 index 0000000000..0dd0767545 --- /dev/null +++ b/e2e-tests/demand-backup-physical-aws/run @@ -0,0 +1,63 @@ +#!/bin/bash + +set -o errexit + +test_dir=$(realpath "$(dirname "$0")") +. "${test_dir}/../functions" +set_debug + +if [ -n "$SKIP_BACKUPS_TO_AWS_GCP_AZURE" ]; then + desc 'Skip tests related to AWS S3 Cloud Storage' + exit 0 +fi + +create_infra "${namespace}" + +apply_s3_storage_secrets + +desc 'Testing on not sharded cluster' + +echo "Creating PSMDB cluster" +cluster="some-name" +kubectl_bin apply -f "${test_dir}/conf/secrets.yml" +apply_cluster "${test_dir}/conf/${cluster}.yml" +kubectl_bin apply -f "${conf_dir}/client_with_tls.yml" + +echo "Check if all pods started" +wait_for_running ${cluster}-rs0 3 +wait_cluster_consistency ${cluster} + +sleep 60 # give time for resync to start + +wait_for_pbm_operations ${cluster} + +echo 'Writing test data' +run_mongo \ + 'db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})' \ + "userAdmin:userAdmin123456@${cluster}-rs0.${namespace}" +sleep 1 +run_mongo \ + 'use myApp\n db.test.insert({ x: 100500 })' \ + "myApp:myPass@${cluster}-rs0.${namespace}" +sleep 5 +compare_mongo_cmd "find" "myApp:myPass@${cluster}-rs0-0.${cluster}-rs0.${namespace}" +compare_mongo_cmd "find" "myApp:myPass@${cluster}-rs0-1.${cluster}-rs0.${namespace}" +compare_mongo_cmd "find" "myApp:myPass@${cluster}-rs0-2.${cluster}-rs0.${namespace}" + +echo 'Running backups' +backup_name_aws="backup-aws-s3" + +run_backup aws-s3 ${backup_name_aws} 'physical' +wait_backup "${backup_name_aws}" +check_backup_in_storage ${backup_name_aws} s3 rs0 + +echo 'Drop collection' +run_mongo 'use myApp\n db.test.drop()' "myApp:myPass@${cluster}-rs0.${namespace}" + +echo 'Check backup and restore -- aws-s3' +run_restore ${backup_name_aws} +run_recovery_check ${backup_name_aws} + +destroy "$namespace" + +desc 'test passed' diff --git a/e2e-tests/demand-backup-physical/compare/find-sharded.json b/e2e-tests/demand-backup-physical-azure/compare/find-sharded.json similarity index 100% rename from e2e-tests/demand-backup-physical/compare/find-sharded.json rename to e2e-tests/demand-backup-physical-azure/compare/find-sharded.json diff --git a/e2e-tests/demand-backup-physical/compare/find.json b/e2e-tests/demand-backup-physical-azure/compare/find.json similarity index 100% rename from e2e-tests/demand-backup-physical/compare/find.json rename to e2e-tests/demand-backup-physical-azure/compare/find.json diff --git a/e2e-tests/demand-backup-physical-azure/compare/statefulset_some-name-rs0_restore-oc.yml b/e2e-tests/demand-backup-physical-azure/compare/statefulset_some-name-rs0_restore-oc.yml new file mode 100644 index 0000000000..274ba54ba5 --- /dev/null +++ b/e2e-tests/demand-backup-physical-azure/compare/statefulset_some-name-rs0_restore-oc.yml @@ -0,0 +1,259 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + annotations: + percona.com/restore-in-progress: "true" + generation: 2 + labels: + app.kubernetes.io/component: mongod + app.kubernetes.io/instance: some-name + app.kubernetes.io/managed-by: percona-server-mongodb-operator + app.kubernetes.io/name: percona-server-mongodb + app.kubernetes.io/part-of: percona-server-mongodb + app.kubernetes.io/replset: rs0 + name: some-name-rs0 + ownerReferences: + - controller: true + kind: PerconaServerMongoDB + name: some-name +spec: + podManagementPolicy: OrderedReady + replicas: 3 + revisionHistoryLimit: 10 + selector: + matchLabels: + app.kubernetes.io/component: mongod + app.kubernetes.io/instance: some-name + app.kubernetes.io/managed-by: percona-server-mongodb-operator + app.kubernetes.io/name: percona-server-mongodb + app.kubernetes.io/part-of: percona-server-mongodb + app.kubernetes.io/replset: rs0 + serviceName: some-name-rs0 + template: + metadata: + annotations: {} + labels: + app.kubernetes.io/component: mongod + app.kubernetes.io/instance: some-name + app.kubernetes.io/managed-by: percona-server-mongodb-operator + app.kubernetes.io/name: percona-server-mongodb + app.kubernetes.io/part-of: percona-server-mongodb + app.kubernetes.io/replset: rs0 + spec: + containers: + - args: + - --bind_ip_all + - --auth + - --dbpath=/data/db + - --port=27017 + - --replSet=rs0 + - --storageEngine=wiredTiger + - --relaxPermChecks + - --sslAllowInvalidCertificates + - --clusterAuthMode=x509 + - --tlsMode=preferTLS + - --enableEncryption + - --encryptionKeyFile=/etc/mongodb-encryption/encryption-key + - --wiredTigerCacheSizeGB=0.25 + - --wiredTigerIndexPrefixCompression=true + - --config=/etc/mongodb-config/mongod.conf + - --quiet + command: + - /opt/percona/physical-restore-ps-entry.sh + env: + - name: SERVICE_NAME + value: some-name + - name: MONGODB_PORT + value: "27017" + - name: MONGODB_REPLSET + value: rs0 + - name: PBM_AGENT_MONGODB_USERNAME + valueFrom: + secretKeyRef: + key: MONGODB_BACKUP_USER_ESCAPED + name: internal-some-name-users + optional: false + - name: PBM_AGENT_MONGODB_PASSWORD + valueFrom: + secretKeyRef: + key: MONGODB_BACKUP_PASSWORD_ESCAPED + name: internal-some-name-users + optional: false + - name: PBM_AGENT_SIDECAR + value: "true" + - name: PBM_AGENT_SIDECAR_SLEEP + value: "5" + - name: POD_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name + - name: PBM_MONGODB_URI + value: mongodb://$(PBM_AGENT_MONGODB_USERNAME):$(PBM_AGENT_MONGODB_PASSWORD)@$(POD_NAME) + envFrom: + - secretRef: + name: internal-some-name-users + optional: false + imagePullPolicy: Always + livenessProbe: + exec: + command: + - /opt/percona/mongodb-healthcheck + - k8s + - liveness + - --ssl + - --sslInsecure + - --sslCAFile + - /etc/mongodb-ssl/ca.crt + - --sslPEMKeyFile + - /tmp/tls.pem + - --startupDelaySeconds + - "7200" + failureThreshold: 4 + initialDelaySeconds: 60 + periodSeconds: 30 + successThreshold: 1 + timeoutSeconds: 10 + name: mongod + ports: + - containerPort: 27017 + name: mongodb + protocol: TCP + readinessProbe: + exec: + command: + - /opt/percona/mongodb-healthcheck + - k8s + - readiness + - --component + - mongod + failureThreshold: 8 + initialDelaySeconds: 10 + periodSeconds: 3 + successThreshold: 1 + timeoutSeconds: 2 + resources: + limits: + cpu: 500m + memory: 1G + requests: + cpu: 100m + memory: 100M + securityContext: + runAsNonRoot: true + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /data/db + name: mongod-data + - mountPath: /etc/mongodb-secrets + name: some-name-mongodb-keyfile + readOnly: true + - mountPath: /etc/mongodb-ssl + name: ssl + readOnly: true + - mountPath: /etc/mongodb-ssl-internal + name: ssl-internal + readOnly: true + - mountPath: /etc/mongodb-config + name: config + - mountPath: /opt/percona + name: bin + - mountPath: /etc/mongodb-encryption + name: some-name-mongodb-encryption-key + readOnly: true + - mountPath: /etc/users-secret + name: users-secret-file + - mountPath: /etc/pbm/ + name: pbm-config + readOnly: true + workingDir: /data/db + dnsPolicy: ClusterFirst + initContainers: + - command: + - /init-entrypoint.sh + imagePullPolicy: Always + name: mongo-init + resources: + limits: + cpu: 500m + memory: 1G + requests: + cpu: 100m + memory: 100M + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /data/db + name: mongod-data + - mountPath: /opt/percona + name: bin + - command: + - bash + - -c + - install -D /usr/bin/pbm /opt/percona/pbm && install -D /usr/bin/pbm-agent /opt/percona/pbm-agent + imagePullPolicy: Always + name: pbm-init + resources: {} + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /data/db + name: mongod-data + - mountPath: /opt/percona + name: bin + restartPolicy: Always + schedulerName: default-scheduler + securityContext: {} + serviceAccount: default + serviceAccountName: default + terminationGracePeriodSeconds: 60 + volumes: + - name: some-name-mongodb-keyfile + secret: + defaultMode: 288 + optional: false + secretName: some-name-mongodb-keyfile + - emptyDir: {} + name: bin + - configMap: + defaultMode: 420 + name: some-name-rs0-mongod + optional: true + name: config + - name: some-name-mongodb-encryption-key + secret: + defaultMode: 288 + optional: false + secretName: some-name-mongodb-encryption-key + - name: ssl + secret: + defaultMode: 288 + optional: false + secretName: some-name-ssl + - name: ssl-internal + secret: + defaultMode: 288 + optional: true + secretName: some-name-ssl-internal + - name: users-secret-file + secret: + defaultMode: 420 + secretName: internal-some-name-users + - name: pbm-config + secret: + defaultMode: 420 + secretName: some-name-pbm-config + updateStrategy: + type: OnDelete + volumeClaimTemplates: + - metadata: + name: mongod-data + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 3Gi + status: + phase: Pending diff --git a/e2e-tests/demand-backup-physical-azure/compare/statefulset_some-name-rs0_restore.yml b/e2e-tests/demand-backup-physical-azure/compare/statefulset_some-name-rs0_restore.yml new file mode 100644 index 0000000000..08273fb3df --- /dev/null +++ b/e2e-tests/demand-backup-physical-azure/compare/statefulset_some-name-rs0_restore.yml @@ -0,0 +1,261 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + annotations: + percona.com/restore-in-progress: "true" + generation: 2 + labels: + app.kubernetes.io/component: mongod + app.kubernetes.io/instance: some-name + app.kubernetes.io/managed-by: percona-server-mongodb-operator + app.kubernetes.io/name: percona-server-mongodb + app.kubernetes.io/part-of: percona-server-mongodb + app.kubernetes.io/replset: rs0 + name: some-name-rs0 + ownerReferences: + - controller: true + kind: PerconaServerMongoDB + name: some-name +spec: + podManagementPolicy: OrderedReady + replicas: 3 + revisionHistoryLimit: 10 + selector: + matchLabels: + app.kubernetes.io/component: mongod + app.kubernetes.io/instance: some-name + app.kubernetes.io/managed-by: percona-server-mongodb-operator + app.kubernetes.io/name: percona-server-mongodb + app.kubernetes.io/part-of: percona-server-mongodb + app.kubernetes.io/replset: rs0 + serviceName: some-name-rs0 + template: + metadata: + annotations: {} + labels: + app.kubernetes.io/component: mongod + app.kubernetes.io/instance: some-name + app.kubernetes.io/managed-by: percona-server-mongodb-operator + app.kubernetes.io/name: percona-server-mongodb + app.kubernetes.io/part-of: percona-server-mongodb + app.kubernetes.io/replset: rs0 + spec: + containers: + - args: + - --bind_ip_all + - --auth + - --dbpath=/data/db + - --port=27017 + - --replSet=rs0 + - --storageEngine=wiredTiger + - --relaxPermChecks + - --sslAllowInvalidCertificates + - --clusterAuthMode=x509 + - --tlsMode=preferTLS + - --enableEncryption + - --encryptionKeyFile=/etc/mongodb-encryption/encryption-key + - --wiredTigerCacheSizeGB=0.25 + - --wiredTigerIndexPrefixCompression=true + - --config=/etc/mongodb-config/mongod.conf + - --quiet + command: + - /opt/percona/physical-restore-ps-entry.sh + env: + - name: SERVICE_NAME + value: some-name + - name: MONGODB_PORT + value: "27017" + - name: MONGODB_REPLSET + value: rs0 + - name: PBM_AGENT_MONGODB_USERNAME + valueFrom: + secretKeyRef: + key: MONGODB_BACKUP_USER_ESCAPED + name: internal-some-name-users + optional: false + - name: PBM_AGENT_MONGODB_PASSWORD + valueFrom: + secretKeyRef: + key: MONGODB_BACKUP_PASSWORD_ESCAPED + name: internal-some-name-users + optional: false + - name: PBM_AGENT_SIDECAR + value: "true" + - name: PBM_AGENT_SIDECAR_SLEEP + value: "5" + - name: POD_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name + - name: PBM_MONGODB_URI + value: mongodb://$(PBM_AGENT_MONGODB_USERNAME):$(PBM_AGENT_MONGODB_PASSWORD)@$(POD_NAME) + envFrom: + - secretRef: + name: internal-some-name-users + optional: false + imagePullPolicy: Always + livenessProbe: + exec: + command: + - /opt/percona/mongodb-healthcheck + - k8s + - liveness + - --ssl + - --sslInsecure + - --sslCAFile + - /etc/mongodb-ssl/ca.crt + - --sslPEMKeyFile + - /tmp/tls.pem + - --startupDelaySeconds + - "7200" + failureThreshold: 4 + initialDelaySeconds: 60 + periodSeconds: 30 + successThreshold: 1 + timeoutSeconds: 10 + name: mongod + ports: + - containerPort: 27017 + name: mongodb + protocol: TCP + readinessProbe: + exec: + command: + - /opt/percona/mongodb-healthcheck + - k8s + - readiness + - --component + - mongod + failureThreshold: 8 + initialDelaySeconds: 10 + periodSeconds: 3 + successThreshold: 1 + timeoutSeconds: 2 + resources: + limits: + cpu: 500m + memory: 1G + requests: + cpu: 100m + memory: 100M + securityContext: + runAsNonRoot: true + runAsUser: 1001 + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /data/db + name: mongod-data + - mountPath: /etc/mongodb-secrets + name: some-name-mongodb-keyfile + readOnly: true + - mountPath: /etc/mongodb-ssl + name: ssl + readOnly: true + - mountPath: /etc/mongodb-ssl-internal + name: ssl-internal + readOnly: true + - mountPath: /etc/mongodb-config + name: config + - mountPath: /opt/percona + name: bin + - mountPath: /etc/mongodb-encryption + name: some-name-mongodb-encryption-key + readOnly: true + - mountPath: /etc/users-secret + name: users-secret-file + - mountPath: /etc/pbm/ + name: pbm-config + readOnly: true + workingDir: /data/db + dnsPolicy: ClusterFirst + initContainers: + - command: + - /init-entrypoint.sh + imagePullPolicy: Always + name: mongo-init + resources: + limits: + cpu: 500m + memory: 1G + requests: + cpu: 100m + memory: 100M + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /data/db + name: mongod-data + - mountPath: /opt/percona + name: bin + - command: + - bash + - -c + - install -D /usr/bin/pbm /opt/percona/pbm && install -D /usr/bin/pbm-agent /opt/percona/pbm-agent + imagePullPolicy: Always + name: pbm-init + resources: {} + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /data/db + name: mongod-data + - mountPath: /opt/percona + name: bin + restartPolicy: Always + schedulerName: default-scheduler + securityContext: + fsGroup: 1001 + serviceAccount: default + serviceAccountName: default + terminationGracePeriodSeconds: 60 + volumes: + - name: some-name-mongodb-keyfile + secret: + defaultMode: 288 + optional: false + secretName: some-name-mongodb-keyfile + - emptyDir: {} + name: bin + - configMap: + defaultMode: 420 + name: some-name-rs0-mongod + optional: true + name: config + - name: some-name-mongodb-encryption-key + secret: + defaultMode: 288 + optional: false + secretName: some-name-mongodb-encryption-key + - name: ssl + secret: + defaultMode: 288 + optional: false + secretName: some-name-ssl + - name: ssl-internal + secret: + defaultMode: 288 + optional: true + secretName: some-name-ssl-internal + - name: users-secret-file + secret: + defaultMode: 420 + secretName: internal-some-name-users + - name: pbm-config + secret: + defaultMode: 420 + secretName: some-name-pbm-config + updateStrategy: + type: OnDelete + volumeClaimTemplates: + - metadata: + name: mongod-data + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 3Gi + status: + phase: Pending diff --git a/e2e-tests/demand-backup-physical/conf/backup.yml b/e2e-tests/demand-backup-physical-azure/conf/backup-azure-blob.yml similarity index 100% rename from e2e-tests/demand-backup-physical/conf/backup.yml rename to e2e-tests/demand-backup-physical-azure/conf/backup-azure-blob.yml diff --git a/e2e-tests/demand-backup-physical/conf/restore.yml b/e2e-tests/demand-backup-physical-azure/conf/restore.yml similarity index 100% rename from e2e-tests/demand-backup-physical/conf/restore.yml rename to e2e-tests/demand-backup-physical-azure/conf/restore.yml diff --git a/e2e-tests/demand-backup-physical-azure/conf/secrets.yml b/e2e-tests/demand-backup-physical-azure/conf/secrets.yml new file mode 100644 index 0000000000..df4f20924b --- /dev/null +++ b/e2e-tests/demand-backup-physical-azure/conf/secrets.yml @@ -0,0 +1,16 @@ +apiVersion: v1 +kind: Secret +metadata: + name: some-users +type: Opaque +data: + MONGODB_BACKUP_USER: YmFja3VwJCMl + MONGODB_BACKUP_PASSWORD: YmFja3VwMTIzNDU2Iw== + MONGODB_DATABASE_ADMIN_USER: ZGF0YWJhc2VBZG1pbg== + MONGODB_DATABASE_ADMIN_PASSWORD: ZGF0YWJhc2VBZG1pbjEyMzQ1Ng== + MONGODB_USER_ADMIN_USER: dXNlckFkbWlu + MONGODB_USER_ADMIN_PASSWORD: dXNlckFkbWluMTIzNDU2 + MONGODB_CLUSTER_ADMIN_USER: Y2x1c3RlckFkbWlu + MONGODB_CLUSTER_ADMIN_PASSWORD: Y2x1c3RlckFkbWluMTIzNDU2 + MONGODB_CLUSTER_MONITOR_USER: Y2x1c3Rlck1vbml0b3I= + MONGODB_CLUSTER_MONITOR_PASSWORD: Y2x1c3Rlck1vbml0b3IxMjM0NTY= diff --git a/e2e-tests/demand-backup-physical-azure/conf/some-name.yml b/e2e-tests/demand-backup-physical-azure/conf/some-name.yml new file mode 100644 index 0000000000..9cf6240fb4 --- /dev/null +++ b/e2e-tests/demand-backup-physical-azure/conf/some-name.yml @@ -0,0 +1,64 @@ +apiVersion: psmdb.percona.com/v1 +kind: PerconaServerMongoDB +metadata: + finalizers: + - percona.com/delete-psmdb-pvc + name: some-name +spec: + #platform: openshift + image: + imagePullPolicy: Always + updateStrategy: SmartUpdate + backup: + enabled: true + image: perconalab/percona-server-mongodb-operator:1.1.0-backup + storages: + azure-blob: + type: azure + azure: + container: operator-testing + prefix: psmdb-demand-backup-physical + credentialsSecret: azure-secret + replsets: + - name: rs0 + affinity: + antiAffinityTopologyKey: none + resources: + limits: + cpu: 500m + memory: 1G + requests: + cpu: 100m + memory: 0.1G + volumeSpec: + persistentVolumeClaim: + resources: + requests: + storage: 3Gi + expose: + enabled: false + type: ClusterIP + size: 3 + configuration: | + operationProfiling: + mode: slowOp + slowOpThresholdMs: 100 + security: + enableEncryption: true + redactClientLogData: false + setParameter: + ttlMonitorSleepSecs: 60 + wiredTigerConcurrentReadTransactions: 128 + wiredTigerConcurrentWriteTransactions: 128 + storage: + engine: wiredTiger + wiredTiger: + collectionConfig: + blockCompressor: snappy + engineConfig: + directoryForIndexes: false + journalCompressor: snappy + indexConfig: + prefixCompression: true + secrets: + users: some-users diff --git a/e2e-tests/demand-backup-physical-azure/run b/e2e-tests/demand-backup-physical-azure/run new file mode 100755 index 0000000000..9d7bec3204 --- /dev/null +++ b/e2e-tests/demand-backup-physical-azure/run @@ -0,0 +1,62 @@ +#!/bin/bash + +set -o errexit + +test_dir=$(realpath "$(dirname "$0")") +. "${test_dir}/../functions" +set_debug + +if [ -n "$SKIP_BACKUPS_TO_AWS_GCP_AZURE" ]; then + desc 'Skip tests related to AZURE Cloud Storage' + exit 0 +fi + +create_infra "${namespace}" + +apply_s3_storage_secrets + +desc 'Testing on not sharded cluster' + +echo 'Creating PSMDB cluster' +cluster="some-name" +kubectl_bin apply -f "${test_dir}/conf/secrets.yml" +apply_cluster "${test_dir}/conf/${cluster}.yml" +kubectl_bin apply -f "${conf_dir}/client_with_tls.yml" + +echo "check if all pods started" +wait_for_running ${cluster}-rs0 3 +wait_cluster_consistency ${cluster} + +sleep 60 # give time for resync to start + +wait_for_pbm_operations ${cluster} + +echo 'Writing test data' +run_mongo \ + 'db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})' \ + "userAdmin:userAdmin123456@${cluster}-rs0.${namespace}" +sleep 1 +run_mongo \ + 'use myApp\n db.test.insert({ x: 100500 })' \ + "myApp:myPass@${cluster}-rs0.${namespace}" +sleep 5 +compare_mongo_cmd "find" "myApp:myPass@${cluster}-rs0-0.${cluster}-rs0.${namespace}" +compare_mongo_cmd "find" "myApp:myPass@${cluster}-rs0-1.${cluster}-rs0.${namespace}" +compare_mongo_cmd "find" "myApp:myPass@${cluster}-rs0-2.${cluster}-rs0.${namespace}" + +echo 'Running AZURE backup' +backup_name_azure="backup-azure-blob" + +run_backup azure-blob ${backup_name_azure} 'physical' +wait_backup "${backup_name_azure}" +check_backup_in_storage ${backup_name_azure} azure rs0 + +echo 'Drop collection' +run_mongo 'use myApp\n db.test.drop()' "myApp:myPass@${cluster}-rs0.${namespace}" +echo 'check backup and restore -- azure-blob' +run_restore ${backup_name_azure} +run_recovery_check ${backup_name_azure} + +destroy "$namespace" + +desc 'test passed' diff --git a/e2e-tests/demand-backup-physical-gcp/compare/find-sharded.json b/e2e-tests/demand-backup-physical-gcp/compare/find-sharded.json new file mode 100644 index 0000000000..117b05ec2b --- /dev/null +++ b/e2e-tests/demand-backup-physical-gcp/compare/find-sharded.json @@ -0,0 +1,3 @@ +switched to db myApp +{ "_id" : , "x" : 100501 } +bye diff --git a/e2e-tests/demand-backup-physical-gcp/compare/find.json b/e2e-tests/demand-backup-physical-gcp/compare/find.json new file mode 100644 index 0000000000..74495091bf --- /dev/null +++ b/e2e-tests/demand-backup-physical-gcp/compare/find.json @@ -0,0 +1,3 @@ +switched to db myApp +{ "_id" : , "x" : 100500 } +bye diff --git a/e2e-tests/demand-backup-physical-gcp/compare/statefulset_some-name-rs0_restore-oc.yml b/e2e-tests/demand-backup-physical-gcp/compare/statefulset_some-name-rs0_restore-oc.yml new file mode 100644 index 0000000000..274ba54ba5 --- /dev/null +++ b/e2e-tests/demand-backup-physical-gcp/compare/statefulset_some-name-rs0_restore-oc.yml @@ -0,0 +1,259 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + annotations: + percona.com/restore-in-progress: "true" + generation: 2 + labels: + app.kubernetes.io/component: mongod + app.kubernetes.io/instance: some-name + app.kubernetes.io/managed-by: percona-server-mongodb-operator + app.kubernetes.io/name: percona-server-mongodb + app.kubernetes.io/part-of: percona-server-mongodb + app.kubernetes.io/replset: rs0 + name: some-name-rs0 + ownerReferences: + - controller: true + kind: PerconaServerMongoDB + name: some-name +spec: + podManagementPolicy: OrderedReady + replicas: 3 + revisionHistoryLimit: 10 + selector: + matchLabels: + app.kubernetes.io/component: mongod + app.kubernetes.io/instance: some-name + app.kubernetes.io/managed-by: percona-server-mongodb-operator + app.kubernetes.io/name: percona-server-mongodb + app.kubernetes.io/part-of: percona-server-mongodb + app.kubernetes.io/replset: rs0 + serviceName: some-name-rs0 + template: + metadata: + annotations: {} + labels: + app.kubernetes.io/component: mongod + app.kubernetes.io/instance: some-name + app.kubernetes.io/managed-by: percona-server-mongodb-operator + app.kubernetes.io/name: percona-server-mongodb + app.kubernetes.io/part-of: percona-server-mongodb + app.kubernetes.io/replset: rs0 + spec: + containers: + - args: + - --bind_ip_all + - --auth + - --dbpath=/data/db + - --port=27017 + - --replSet=rs0 + - --storageEngine=wiredTiger + - --relaxPermChecks + - --sslAllowInvalidCertificates + - --clusterAuthMode=x509 + - --tlsMode=preferTLS + - --enableEncryption + - --encryptionKeyFile=/etc/mongodb-encryption/encryption-key + - --wiredTigerCacheSizeGB=0.25 + - --wiredTigerIndexPrefixCompression=true + - --config=/etc/mongodb-config/mongod.conf + - --quiet + command: + - /opt/percona/physical-restore-ps-entry.sh + env: + - name: SERVICE_NAME + value: some-name + - name: MONGODB_PORT + value: "27017" + - name: MONGODB_REPLSET + value: rs0 + - name: PBM_AGENT_MONGODB_USERNAME + valueFrom: + secretKeyRef: + key: MONGODB_BACKUP_USER_ESCAPED + name: internal-some-name-users + optional: false + - name: PBM_AGENT_MONGODB_PASSWORD + valueFrom: + secretKeyRef: + key: MONGODB_BACKUP_PASSWORD_ESCAPED + name: internal-some-name-users + optional: false + - name: PBM_AGENT_SIDECAR + value: "true" + - name: PBM_AGENT_SIDECAR_SLEEP + value: "5" + - name: POD_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name + - name: PBM_MONGODB_URI + value: mongodb://$(PBM_AGENT_MONGODB_USERNAME):$(PBM_AGENT_MONGODB_PASSWORD)@$(POD_NAME) + envFrom: + - secretRef: + name: internal-some-name-users + optional: false + imagePullPolicy: Always + livenessProbe: + exec: + command: + - /opt/percona/mongodb-healthcheck + - k8s + - liveness + - --ssl + - --sslInsecure + - --sslCAFile + - /etc/mongodb-ssl/ca.crt + - --sslPEMKeyFile + - /tmp/tls.pem + - --startupDelaySeconds + - "7200" + failureThreshold: 4 + initialDelaySeconds: 60 + periodSeconds: 30 + successThreshold: 1 + timeoutSeconds: 10 + name: mongod + ports: + - containerPort: 27017 + name: mongodb + protocol: TCP + readinessProbe: + exec: + command: + - /opt/percona/mongodb-healthcheck + - k8s + - readiness + - --component + - mongod + failureThreshold: 8 + initialDelaySeconds: 10 + periodSeconds: 3 + successThreshold: 1 + timeoutSeconds: 2 + resources: + limits: + cpu: 500m + memory: 1G + requests: + cpu: 100m + memory: 100M + securityContext: + runAsNonRoot: true + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /data/db + name: mongod-data + - mountPath: /etc/mongodb-secrets + name: some-name-mongodb-keyfile + readOnly: true + - mountPath: /etc/mongodb-ssl + name: ssl + readOnly: true + - mountPath: /etc/mongodb-ssl-internal + name: ssl-internal + readOnly: true + - mountPath: /etc/mongodb-config + name: config + - mountPath: /opt/percona + name: bin + - mountPath: /etc/mongodb-encryption + name: some-name-mongodb-encryption-key + readOnly: true + - mountPath: /etc/users-secret + name: users-secret-file + - mountPath: /etc/pbm/ + name: pbm-config + readOnly: true + workingDir: /data/db + dnsPolicy: ClusterFirst + initContainers: + - command: + - /init-entrypoint.sh + imagePullPolicy: Always + name: mongo-init + resources: + limits: + cpu: 500m + memory: 1G + requests: + cpu: 100m + memory: 100M + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /data/db + name: mongod-data + - mountPath: /opt/percona + name: bin + - command: + - bash + - -c + - install -D /usr/bin/pbm /opt/percona/pbm && install -D /usr/bin/pbm-agent /opt/percona/pbm-agent + imagePullPolicy: Always + name: pbm-init + resources: {} + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /data/db + name: mongod-data + - mountPath: /opt/percona + name: bin + restartPolicy: Always + schedulerName: default-scheduler + securityContext: {} + serviceAccount: default + serviceAccountName: default + terminationGracePeriodSeconds: 60 + volumes: + - name: some-name-mongodb-keyfile + secret: + defaultMode: 288 + optional: false + secretName: some-name-mongodb-keyfile + - emptyDir: {} + name: bin + - configMap: + defaultMode: 420 + name: some-name-rs0-mongod + optional: true + name: config + - name: some-name-mongodb-encryption-key + secret: + defaultMode: 288 + optional: false + secretName: some-name-mongodb-encryption-key + - name: ssl + secret: + defaultMode: 288 + optional: false + secretName: some-name-ssl + - name: ssl-internal + secret: + defaultMode: 288 + optional: true + secretName: some-name-ssl-internal + - name: users-secret-file + secret: + defaultMode: 420 + secretName: internal-some-name-users + - name: pbm-config + secret: + defaultMode: 420 + secretName: some-name-pbm-config + updateStrategy: + type: OnDelete + volumeClaimTemplates: + - metadata: + name: mongod-data + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 3Gi + status: + phase: Pending diff --git a/e2e-tests/demand-backup-physical-gcp/compare/statefulset_some-name-rs0_restore.yml b/e2e-tests/demand-backup-physical-gcp/compare/statefulset_some-name-rs0_restore.yml new file mode 100644 index 0000000000..08273fb3df --- /dev/null +++ b/e2e-tests/demand-backup-physical-gcp/compare/statefulset_some-name-rs0_restore.yml @@ -0,0 +1,261 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + annotations: + percona.com/restore-in-progress: "true" + generation: 2 + labels: + app.kubernetes.io/component: mongod + app.kubernetes.io/instance: some-name + app.kubernetes.io/managed-by: percona-server-mongodb-operator + app.kubernetes.io/name: percona-server-mongodb + app.kubernetes.io/part-of: percona-server-mongodb + app.kubernetes.io/replset: rs0 + name: some-name-rs0 + ownerReferences: + - controller: true + kind: PerconaServerMongoDB + name: some-name +spec: + podManagementPolicy: OrderedReady + replicas: 3 + revisionHistoryLimit: 10 + selector: + matchLabels: + app.kubernetes.io/component: mongod + app.kubernetes.io/instance: some-name + app.kubernetes.io/managed-by: percona-server-mongodb-operator + app.kubernetes.io/name: percona-server-mongodb + app.kubernetes.io/part-of: percona-server-mongodb + app.kubernetes.io/replset: rs0 + serviceName: some-name-rs0 + template: + metadata: + annotations: {} + labels: + app.kubernetes.io/component: mongod + app.kubernetes.io/instance: some-name + app.kubernetes.io/managed-by: percona-server-mongodb-operator + app.kubernetes.io/name: percona-server-mongodb + app.kubernetes.io/part-of: percona-server-mongodb + app.kubernetes.io/replset: rs0 + spec: + containers: + - args: + - --bind_ip_all + - --auth + - --dbpath=/data/db + - --port=27017 + - --replSet=rs0 + - --storageEngine=wiredTiger + - --relaxPermChecks + - --sslAllowInvalidCertificates + - --clusterAuthMode=x509 + - --tlsMode=preferTLS + - --enableEncryption + - --encryptionKeyFile=/etc/mongodb-encryption/encryption-key + - --wiredTigerCacheSizeGB=0.25 + - --wiredTigerIndexPrefixCompression=true + - --config=/etc/mongodb-config/mongod.conf + - --quiet + command: + - /opt/percona/physical-restore-ps-entry.sh + env: + - name: SERVICE_NAME + value: some-name + - name: MONGODB_PORT + value: "27017" + - name: MONGODB_REPLSET + value: rs0 + - name: PBM_AGENT_MONGODB_USERNAME + valueFrom: + secretKeyRef: + key: MONGODB_BACKUP_USER_ESCAPED + name: internal-some-name-users + optional: false + - name: PBM_AGENT_MONGODB_PASSWORD + valueFrom: + secretKeyRef: + key: MONGODB_BACKUP_PASSWORD_ESCAPED + name: internal-some-name-users + optional: false + - name: PBM_AGENT_SIDECAR + value: "true" + - name: PBM_AGENT_SIDECAR_SLEEP + value: "5" + - name: POD_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name + - name: PBM_MONGODB_URI + value: mongodb://$(PBM_AGENT_MONGODB_USERNAME):$(PBM_AGENT_MONGODB_PASSWORD)@$(POD_NAME) + envFrom: + - secretRef: + name: internal-some-name-users + optional: false + imagePullPolicy: Always + livenessProbe: + exec: + command: + - /opt/percona/mongodb-healthcheck + - k8s + - liveness + - --ssl + - --sslInsecure + - --sslCAFile + - /etc/mongodb-ssl/ca.crt + - --sslPEMKeyFile + - /tmp/tls.pem + - --startupDelaySeconds + - "7200" + failureThreshold: 4 + initialDelaySeconds: 60 + periodSeconds: 30 + successThreshold: 1 + timeoutSeconds: 10 + name: mongod + ports: + - containerPort: 27017 + name: mongodb + protocol: TCP + readinessProbe: + exec: + command: + - /opt/percona/mongodb-healthcheck + - k8s + - readiness + - --component + - mongod + failureThreshold: 8 + initialDelaySeconds: 10 + periodSeconds: 3 + successThreshold: 1 + timeoutSeconds: 2 + resources: + limits: + cpu: 500m + memory: 1G + requests: + cpu: 100m + memory: 100M + securityContext: + runAsNonRoot: true + runAsUser: 1001 + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /data/db + name: mongod-data + - mountPath: /etc/mongodb-secrets + name: some-name-mongodb-keyfile + readOnly: true + - mountPath: /etc/mongodb-ssl + name: ssl + readOnly: true + - mountPath: /etc/mongodb-ssl-internal + name: ssl-internal + readOnly: true + - mountPath: /etc/mongodb-config + name: config + - mountPath: /opt/percona + name: bin + - mountPath: /etc/mongodb-encryption + name: some-name-mongodb-encryption-key + readOnly: true + - mountPath: /etc/users-secret + name: users-secret-file + - mountPath: /etc/pbm/ + name: pbm-config + readOnly: true + workingDir: /data/db + dnsPolicy: ClusterFirst + initContainers: + - command: + - /init-entrypoint.sh + imagePullPolicy: Always + name: mongo-init + resources: + limits: + cpu: 500m + memory: 1G + requests: + cpu: 100m + memory: 100M + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /data/db + name: mongod-data + - mountPath: /opt/percona + name: bin + - command: + - bash + - -c + - install -D /usr/bin/pbm /opt/percona/pbm && install -D /usr/bin/pbm-agent /opt/percona/pbm-agent + imagePullPolicy: Always + name: pbm-init + resources: {} + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /data/db + name: mongod-data + - mountPath: /opt/percona + name: bin + restartPolicy: Always + schedulerName: default-scheduler + securityContext: + fsGroup: 1001 + serviceAccount: default + serviceAccountName: default + terminationGracePeriodSeconds: 60 + volumes: + - name: some-name-mongodb-keyfile + secret: + defaultMode: 288 + optional: false + secretName: some-name-mongodb-keyfile + - emptyDir: {} + name: bin + - configMap: + defaultMode: 420 + name: some-name-rs0-mongod + optional: true + name: config + - name: some-name-mongodb-encryption-key + secret: + defaultMode: 288 + optional: false + secretName: some-name-mongodb-encryption-key + - name: ssl + secret: + defaultMode: 288 + optional: false + secretName: some-name-ssl + - name: ssl-internal + secret: + defaultMode: 288 + optional: true + secretName: some-name-ssl-internal + - name: users-secret-file + secret: + defaultMode: 420 + secretName: internal-some-name-users + - name: pbm-config + secret: + defaultMode: 420 + secretName: some-name-pbm-config + updateStrategy: + type: OnDelete + volumeClaimTemplates: + - metadata: + name: mongod-data + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 3Gi + status: + phase: Pending diff --git a/e2e-tests/demand-backup-physical-gcp/conf/backup-gcp-cs.yml b/e2e-tests/demand-backup-physical-gcp/conf/backup-gcp-cs.yml new file mode 100644 index 0000000000..13c039b842 --- /dev/null +++ b/e2e-tests/demand-backup-physical-gcp/conf/backup-gcp-cs.yml @@ -0,0 +1,10 @@ +apiVersion: psmdb.percona.com/v1 +kind: PerconaServerMongoDBBackup +metadata: + finalizers: + - percona.com/delete-backup + name: +spec: + type: physical + clusterName: some-name + storageName: diff --git a/e2e-tests/demand-backup-physical-gcp/conf/restore.yml b/e2e-tests/demand-backup-physical-gcp/conf/restore.yml new file mode 100644 index 0000000000..32ab3c4b9a --- /dev/null +++ b/e2e-tests/demand-backup-physical-gcp/conf/restore.yml @@ -0,0 +1,7 @@ +apiVersion: psmdb.percona.com/v1 +kind: PerconaServerMongoDBRestore +metadata: + name: +spec: + clusterName: some-name + backupName: diff --git a/e2e-tests/demand-backup-physical-gcp/conf/secrets.yml b/e2e-tests/demand-backup-physical-gcp/conf/secrets.yml new file mode 100644 index 0000000000..df4f20924b --- /dev/null +++ b/e2e-tests/demand-backup-physical-gcp/conf/secrets.yml @@ -0,0 +1,16 @@ +apiVersion: v1 +kind: Secret +metadata: + name: some-users +type: Opaque +data: + MONGODB_BACKUP_USER: YmFja3VwJCMl + MONGODB_BACKUP_PASSWORD: YmFja3VwMTIzNDU2Iw== + MONGODB_DATABASE_ADMIN_USER: ZGF0YWJhc2VBZG1pbg== + MONGODB_DATABASE_ADMIN_PASSWORD: ZGF0YWJhc2VBZG1pbjEyMzQ1Ng== + MONGODB_USER_ADMIN_USER: dXNlckFkbWlu + MONGODB_USER_ADMIN_PASSWORD: dXNlckFkbWluMTIzNDU2 + MONGODB_CLUSTER_ADMIN_USER: Y2x1c3RlckFkbWlu + MONGODB_CLUSTER_ADMIN_PASSWORD: Y2x1c3RlckFkbWluMTIzNDU2 + MONGODB_CLUSTER_MONITOR_USER: Y2x1c3Rlck1vbml0b3I= + MONGODB_CLUSTER_MONITOR_PASSWORD: Y2x1c3Rlck1vbml0b3IxMjM0NTY= diff --git a/e2e-tests/demand-backup-physical-gcp/conf/some-name.yml b/e2e-tests/demand-backup-physical-gcp/conf/some-name.yml new file mode 100644 index 0000000000..148b0b41c6 --- /dev/null +++ b/e2e-tests/demand-backup-physical-gcp/conf/some-name.yml @@ -0,0 +1,67 @@ +apiVersion: psmdb.percona.com/v1 +kind: PerconaServerMongoDB +metadata: + finalizers: + - percona.com/delete-psmdb-pvc + name: some-name +spec: + #platform: openshift + image: + imagePullPolicy: Always + updateStrategy: SmartUpdate + backup: + enabled: true + image: perconalab/percona-server-mongodb-operator:1.1.0-backup + storages: + gcp-cs: + type: s3 + s3: + credentialsSecret: gcp-cs-secret + region: us-east-1 + bucket: operator-testing + prefix: psmdb-demand-backup-physical + endpointUrl: https://storage.googleapis.com + insecureSkipTLSVerify: false + replsets: + - name: rs0 + affinity: + antiAffinityTopologyKey: none + resources: + limits: + cpu: 500m + memory: 1G + requests: + cpu: 100m + memory: 0.1G + volumeSpec: + persistentVolumeClaim: + resources: + requests: + storage: 3Gi + expose: + enabled: false + type: ClusterIP + size: 3 + configuration: | + operationProfiling: + mode: slowOp + slowOpThresholdMs: 100 + security: + enableEncryption: true + redactClientLogData: false + setParameter: + ttlMonitorSleepSecs: 60 + wiredTigerConcurrentReadTransactions: 128 + wiredTigerConcurrentWriteTransactions: 128 + storage: + engine: wiredTiger + wiredTiger: + collectionConfig: + blockCompressor: snappy + engineConfig: + directoryForIndexes: false + journalCompressor: snappy + indexConfig: + prefixCompression: true + secrets: + users: some-users diff --git a/e2e-tests/demand-backup-physical-gcp/run b/e2e-tests/demand-backup-physical-gcp/run new file mode 100755 index 0000000000..6ad9305d5c --- /dev/null +++ b/e2e-tests/demand-backup-physical-gcp/run @@ -0,0 +1,62 @@ +#!/bin/bash + +set -o errexit + +test_dir=$(realpath "$(dirname "$0")") +. "${test_dir}/../functions" +set_debug + +if [ -n "$SKIP_BACKUPS_TO_AWS_GCP_AZURE" ]; then + desc 'Skip tests related to GCP Cloud Storage' + exit 0 +fi + +create_infra "${namespace}" + +apply_s3_storage_secrets + +desc 'Testing on not sharded cluster' + +echo "Creating PSMDB cluster" +cluster="some-name" +kubectl_bin apply -f "${test_dir}/conf/secrets.yml" +apply_cluster "${test_dir}/conf/${cluster}.yml" +kubectl_bin apply -f "${conf_dir}/client_with_tls.yml" + +echo 'Check if all pods started' +wait_for_running ${cluster}-rs0 3 +wait_cluster_consistency ${cluster} + +sleep 60 # give time for resync to start + +wait_for_pbm_operations ${cluster} + +echo 'Writing test data' +run_mongo \ + 'db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})' \ + "userAdmin:userAdmin123456@${cluster}-rs0.${namespace}" +sleep 1 +run_mongo \ + 'use myApp\n db.test.insert({ x: 100500 })' \ + "myApp:myPass@${cluster}-rs0.${namespace}" +sleep 5 +compare_mongo_cmd "find" "myApp:myPass@${cluster}-rs0-0.${cluster}-rs0.${namespace}" +compare_mongo_cmd "find" "myApp:myPass@${cluster}-rs0-1.${cluster}-rs0.${namespace}" +compare_mongo_cmd "find" "myApp:myPass@${cluster}-rs0-2.${cluster}-rs0.${namespace}" + +echo 'Running GCP backups' +backup_name_gcp="backup-gcp-cs" + +run_backup gcp-cs ${backup_name_gcp} 'physical' +wait_backup "${backup_name_gcp}" +check_backup_in_storage ${backup_name_gcp} gcs rs0 + +echo 'Drop collection' +run_mongo 'use myApp\n db.test.drop()' "myApp:myPass@${cluster}-rs0.${namespace}" +echo 'check backup and restore -- gcp-cs' +run_restore ${backup_name_gcp} +run_recovery_check ${backup_name_gcp} + +destroy "$namespace" + +desc 'test passed' diff --git a/e2e-tests/demand-backup-physical-minio/compare/find-sharded.json b/e2e-tests/demand-backup-physical-minio/compare/find-sharded.json new file mode 100644 index 0000000000..117b05ec2b --- /dev/null +++ b/e2e-tests/demand-backup-physical-minio/compare/find-sharded.json @@ -0,0 +1,3 @@ +switched to db myApp +{ "_id" : , "x" : 100501 } +bye diff --git a/e2e-tests/demand-backup-physical-minio/compare/find.json b/e2e-tests/demand-backup-physical-minio/compare/find.json new file mode 100644 index 0000000000..74495091bf --- /dev/null +++ b/e2e-tests/demand-backup-physical-minio/compare/find.json @@ -0,0 +1,3 @@ +switched to db myApp +{ "_id" : , "x" : 100500 } +bye diff --git a/e2e-tests/demand-backup-physical/compare/statefulset_some-name-rs0_restore-arbiter-nv-oc.yml b/e2e-tests/demand-backup-physical-minio/compare/statefulset_some-name-rs0_restore-arbiter-nv-oc.yml similarity index 100% rename from e2e-tests/demand-backup-physical/compare/statefulset_some-name-rs0_restore-arbiter-nv-oc.yml rename to e2e-tests/demand-backup-physical-minio/compare/statefulset_some-name-rs0_restore-arbiter-nv-oc.yml diff --git a/e2e-tests/demand-backup-physical/compare/statefulset_some-name-rs0_restore-arbiter-nv.yml b/e2e-tests/demand-backup-physical-minio/compare/statefulset_some-name-rs0_restore-arbiter-nv.yml similarity index 100% rename from e2e-tests/demand-backup-physical/compare/statefulset_some-name-rs0_restore-arbiter-nv.yml rename to e2e-tests/demand-backup-physical-minio/compare/statefulset_some-name-rs0_restore-arbiter-nv.yml diff --git a/e2e-tests/demand-backup-physical-minio/compare/statefulset_some-name-rs0_restore-oc.yml b/e2e-tests/demand-backup-physical-minio/compare/statefulset_some-name-rs0_restore-oc.yml new file mode 100644 index 0000000000..274ba54ba5 --- /dev/null +++ b/e2e-tests/demand-backup-physical-minio/compare/statefulset_some-name-rs0_restore-oc.yml @@ -0,0 +1,259 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + annotations: + percona.com/restore-in-progress: "true" + generation: 2 + labels: + app.kubernetes.io/component: mongod + app.kubernetes.io/instance: some-name + app.kubernetes.io/managed-by: percona-server-mongodb-operator + app.kubernetes.io/name: percona-server-mongodb + app.kubernetes.io/part-of: percona-server-mongodb + app.kubernetes.io/replset: rs0 + name: some-name-rs0 + ownerReferences: + - controller: true + kind: PerconaServerMongoDB + name: some-name +spec: + podManagementPolicy: OrderedReady + replicas: 3 + revisionHistoryLimit: 10 + selector: + matchLabels: + app.kubernetes.io/component: mongod + app.kubernetes.io/instance: some-name + app.kubernetes.io/managed-by: percona-server-mongodb-operator + app.kubernetes.io/name: percona-server-mongodb + app.kubernetes.io/part-of: percona-server-mongodb + app.kubernetes.io/replset: rs0 + serviceName: some-name-rs0 + template: + metadata: + annotations: {} + labels: + app.kubernetes.io/component: mongod + app.kubernetes.io/instance: some-name + app.kubernetes.io/managed-by: percona-server-mongodb-operator + app.kubernetes.io/name: percona-server-mongodb + app.kubernetes.io/part-of: percona-server-mongodb + app.kubernetes.io/replset: rs0 + spec: + containers: + - args: + - --bind_ip_all + - --auth + - --dbpath=/data/db + - --port=27017 + - --replSet=rs0 + - --storageEngine=wiredTiger + - --relaxPermChecks + - --sslAllowInvalidCertificates + - --clusterAuthMode=x509 + - --tlsMode=preferTLS + - --enableEncryption + - --encryptionKeyFile=/etc/mongodb-encryption/encryption-key + - --wiredTigerCacheSizeGB=0.25 + - --wiredTigerIndexPrefixCompression=true + - --config=/etc/mongodb-config/mongod.conf + - --quiet + command: + - /opt/percona/physical-restore-ps-entry.sh + env: + - name: SERVICE_NAME + value: some-name + - name: MONGODB_PORT + value: "27017" + - name: MONGODB_REPLSET + value: rs0 + - name: PBM_AGENT_MONGODB_USERNAME + valueFrom: + secretKeyRef: + key: MONGODB_BACKUP_USER_ESCAPED + name: internal-some-name-users + optional: false + - name: PBM_AGENT_MONGODB_PASSWORD + valueFrom: + secretKeyRef: + key: MONGODB_BACKUP_PASSWORD_ESCAPED + name: internal-some-name-users + optional: false + - name: PBM_AGENT_SIDECAR + value: "true" + - name: PBM_AGENT_SIDECAR_SLEEP + value: "5" + - name: POD_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name + - name: PBM_MONGODB_URI + value: mongodb://$(PBM_AGENT_MONGODB_USERNAME):$(PBM_AGENT_MONGODB_PASSWORD)@$(POD_NAME) + envFrom: + - secretRef: + name: internal-some-name-users + optional: false + imagePullPolicy: Always + livenessProbe: + exec: + command: + - /opt/percona/mongodb-healthcheck + - k8s + - liveness + - --ssl + - --sslInsecure + - --sslCAFile + - /etc/mongodb-ssl/ca.crt + - --sslPEMKeyFile + - /tmp/tls.pem + - --startupDelaySeconds + - "7200" + failureThreshold: 4 + initialDelaySeconds: 60 + periodSeconds: 30 + successThreshold: 1 + timeoutSeconds: 10 + name: mongod + ports: + - containerPort: 27017 + name: mongodb + protocol: TCP + readinessProbe: + exec: + command: + - /opt/percona/mongodb-healthcheck + - k8s + - readiness + - --component + - mongod + failureThreshold: 8 + initialDelaySeconds: 10 + periodSeconds: 3 + successThreshold: 1 + timeoutSeconds: 2 + resources: + limits: + cpu: 500m + memory: 1G + requests: + cpu: 100m + memory: 100M + securityContext: + runAsNonRoot: true + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /data/db + name: mongod-data + - mountPath: /etc/mongodb-secrets + name: some-name-mongodb-keyfile + readOnly: true + - mountPath: /etc/mongodb-ssl + name: ssl + readOnly: true + - mountPath: /etc/mongodb-ssl-internal + name: ssl-internal + readOnly: true + - mountPath: /etc/mongodb-config + name: config + - mountPath: /opt/percona + name: bin + - mountPath: /etc/mongodb-encryption + name: some-name-mongodb-encryption-key + readOnly: true + - mountPath: /etc/users-secret + name: users-secret-file + - mountPath: /etc/pbm/ + name: pbm-config + readOnly: true + workingDir: /data/db + dnsPolicy: ClusterFirst + initContainers: + - command: + - /init-entrypoint.sh + imagePullPolicy: Always + name: mongo-init + resources: + limits: + cpu: 500m + memory: 1G + requests: + cpu: 100m + memory: 100M + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /data/db + name: mongod-data + - mountPath: /opt/percona + name: bin + - command: + - bash + - -c + - install -D /usr/bin/pbm /opt/percona/pbm && install -D /usr/bin/pbm-agent /opt/percona/pbm-agent + imagePullPolicy: Always + name: pbm-init + resources: {} + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /data/db + name: mongod-data + - mountPath: /opt/percona + name: bin + restartPolicy: Always + schedulerName: default-scheduler + securityContext: {} + serviceAccount: default + serviceAccountName: default + terminationGracePeriodSeconds: 60 + volumes: + - name: some-name-mongodb-keyfile + secret: + defaultMode: 288 + optional: false + secretName: some-name-mongodb-keyfile + - emptyDir: {} + name: bin + - configMap: + defaultMode: 420 + name: some-name-rs0-mongod + optional: true + name: config + - name: some-name-mongodb-encryption-key + secret: + defaultMode: 288 + optional: false + secretName: some-name-mongodb-encryption-key + - name: ssl + secret: + defaultMode: 288 + optional: false + secretName: some-name-ssl + - name: ssl-internal + secret: + defaultMode: 288 + optional: true + secretName: some-name-ssl-internal + - name: users-secret-file + secret: + defaultMode: 420 + secretName: internal-some-name-users + - name: pbm-config + secret: + defaultMode: 420 + secretName: some-name-pbm-config + updateStrategy: + type: OnDelete + volumeClaimTemplates: + - metadata: + name: mongod-data + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 3Gi + status: + phase: Pending diff --git a/e2e-tests/demand-backup-physical-minio/compare/statefulset_some-name-rs0_restore.yml b/e2e-tests/demand-backup-physical-minio/compare/statefulset_some-name-rs0_restore.yml new file mode 100644 index 0000000000..08273fb3df --- /dev/null +++ b/e2e-tests/demand-backup-physical-minio/compare/statefulset_some-name-rs0_restore.yml @@ -0,0 +1,261 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + annotations: + percona.com/restore-in-progress: "true" + generation: 2 + labels: + app.kubernetes.io/component: mongod + app.kubernetes.io/instance: some-name + app.kubernetes.io/managed-by: percona-server-mongodb-operator + app.kubernetes.io/name: percona-server-mongodb + app.kubernetes.io/part-of: percona-server-mongodb + app.kubernetes.io/replset: rs0 + name: some-name-rs0 + ownerReferences: + - controller: true + kind: PerconaServerMongoDB + name: some-name +spec: + podManagementPolicy: OrderedReady + replicas: 3 + revisionHistoryLimit: 10 + selector: + matchLabels: + app.kubernetes.io/component: mongod + app.kubernetes.io/instance: some-name + app.kubernetes.io/managed-by: percona-server-mongodb-operator + app.kubernetes.io/name: percona-server-mongodb + app.kubernetes.io/part-of: percona-server-mongodb + app.kubernetes.io/replset: rs0 + serviceName: some-name-rs0 + template: + metadata: + annotations: {} + labels: + app.kubernetes.io/component: mongod + app.kubernetes.io/instance: some-name + app.kubernetes.io/managed-by: percona-server-mongodb-operator + app.kubernetes.io/name: percona-server-mongodb + app.kubernetes.io/part-of: percona-server-mongodb + app.kubernetes.io/replset: rs0 + spec: + containers: + - args: + - --bind_ip_all + - --auth + - --dbpath=/data/db + - --port=27017 + - --replSet=rs0 + - --storageEngine=wiredTiger + - --relaxPermChecks + - --sslAllowInvalidCertificates + - --clusterAuthMode=x509 + - --tlsMode=preferTLS + - --enableEncryption + - --encryptionKeyFile=/etc/mongodb-encryption/encryption-key + - --wiredTigerCacheSizeGB=0.25 + - --wiredTigerIndexPrefixCompression=true + - --config=/etc/mongodb-config/mongod.conf + - --quiet + command: + - /opt/percona/physical-restore-ps-entry.sh + env: + - name: SERVICE_NAME + value: some-name + - name: MONGODB_PORT + value: "27017" + - name: MONGODB_REPLSET + value: rs0 + - name: PBM_AGENT_MONGODB_USERNAME + valueFrom: + secretKeyRef: + key: MONGODB_BACKUP_USER_ESCAPED + name: internal-some-name-users + optional: false + - name: PBM_AGENT_MONGODB_PASSWORD + valueFrom: + secretKeyRef: + key: MONGODB_BACKUP_PASSWORD_ESCAPED + name: internal-some-name-users + optional: false + - name: PBM_AGENT_SIDECAR + value: "true" + - name: PBM_AGENT_SIDECAR_SLEEP + value: "5" + - name: POD_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name + - name: PBM_MONGODB_URI + value: mongodb://$(PBM_AGENT_MONGODB_USERNAME):$(PBM_AGENT_MONGODB_PASSWORD)@$(POD_NAME) + envFrom: + - secretRef: + name: internal-some-name-users + optional: false + imagePullPolicy: Always + livenessProbe: + exec: + command: + - /opt/percona/mongodb-healthcheck + - k8s + - liveness + - --ssl + - --sslInsecure + - --sslCAFile + - /etc/mongodb-ssl/ca.crt + - --sslPEMKeyFile + - /tmp/tls.pem + - --startupDelaySeconds + - "7200" + failureThreshold: 4 + initialDelaySeconds: 60 + periodSeconds: 30 + successThreshold: 1 + timeoutSeconds: 10 + name: mongod + ports: + - containerPort: 27017 + name: mongodb + protocol: TCP + readinessProbe: + exec: + command: + - /opt/percona/mongodb-healthcheck + - k8s + - readiness + - --component + - mongod + failureThreshold: 8 + initialDelaySeconds: 10 + periodSeconds: 3 + successThreshold: 1 + timeoutSeconds: 2 + resources: + limits: + cpu: 500m + memory: 1G + requests: + cpu: 100m + memory: 100M + securityContext: + runAsNonRoot: true + runAsUser: 1001 + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /data/db + name: mongod-data + - mountPath: /etc/mongodb-secrets + name: some-name-mongodb-keyfile + readOnly: true + - mountPath: /etc/mongodb-ssl + name: ssl + readOnly: true + - mountPath: /etc/mongodb-ssl-internal + name: ssl-internal + readOnly: true + - mountPath: /etc/mongodb-config + name: config + - mountPath: /opt/percona + name: bin + - mountPath: /etc/mongodb-encryption + name: some-name-mongodb-encryption-key + readOnly: true + - mountPath: /etc/users-secret + name: users-secret-file + - mountPath: /etc/pbm/ + name: pbm-config + readOnly: true + workingDir: /data/db + dnsPolicy: ClusterFirst + initContainers: + - command: + - /init-entrypoint.sh + imagePullPolicy: Always + name: mongo-init + resources: + limits: + cpu: 500m + memory: 1G + requests: + cpu: 100m + memory: 100M + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /data/db + name: mongod-data + - mountPath: /opt/percona + name: bin + - command: + - bash + - -c + - install -D /usr/bin/pbm /opt/percona/pbm && install -D /usr/bin/pbm-agent /opt/percona/pbm-agent + imagePullPolicy: Always + name: pbm-init + resources: {} + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /data/db + name: mongod-data + - mountPath: /opt/percona + name: bin + restartPolicy: Always + schedulerName: default-scheduler + securityContext: + fsGroup: 1001 + serviceAccount: default + serviceAccountName: default + terminationGracePeriodSeconds: 60 + volumes: + - name: some-name-mongodb-keyfile + secret: + defaultMode: 288 + optional: false + secretName: some-name-mongodb-keyfile + - emptyDir: {} + name: bin + - configMap: + defaultMode: 420 + name: some-name-rs0-mongod + optional: true + name: config + - name: some-name-mongodb-encryption-key + secret: + defaultMode: 288 + optional: false + secretName: some-name-mongodb-encryption-key + - name: ssl + secret: + defaultMode: 288 + optional: false + secretName: some-name-ssl + - name: ssl-internal + secret: + defaultMode: 288 + optional: true + secretName: some-name-ssl-internal + - name: users-secret-file + secret: + defaultMode: 420 + secretName: internal-some-name-users + - name: pbm-config + secret: + defaultMode: 420 + secretName: some-name-pbm-config + updateStrategy: + type: OnDelete + volumeClaimTemplates: + - metadata: + name: mongod-data + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 3Gi + status: + phase: Pending diff --git a/e2e-tests/demand-backup-physical-minio/conf/backup-minio.yml b/e2e-tests/demand-backup-physical-minio/conf/backup-minio.yml new file mode 100644 index 0000000000..13c039b842 --- /dev/null +++ b/e2e-tests/demand-backup-physical-minio/conf/backup-minio.yml @@ -0,0 +1,10 @@ +apiVersion: psmdb.percona.com/v1 +kind: PerconaServerMongoDBBackup +metadata: + finalizers: + - percona.com/delete-backup + name: +spec: + type: physical + clusterName: some-name + storageName: diff --git a/e2e-tests/demand-backup-physical-minio/conf/restore.yml b/e2e-tests/demand-backup-physical-minio/conf/restore.yml new file mode 100644 index 0000000000..32ab3c4b9a --- /dev/null +++ b/e2e-tests/demand-backup-physical-minio/conf/restore.yml @@ -0,0 +1,7 @@ +apiVersion: psmdb.percona.com/v1 +kind: PerconaServerMongoDBRestore +metadata: + name: +spec: + clusterName: some-name + backupName: diff --git a/e2e-tests/demand-backup-physical-minio/conf/secrets.yml b/e2e-tests/demand-backup-physical-minio/conf/secrets.yml new file mode 100644 index 0000000000..df4f20924b --- /dev/null +++ b/e2e-tests/demand-backup-physical-minio/conf/secrets.yml @@ -0,0 +1,16 @@ +apiVersion: v1 +kind: Secret +metadata: + name: some-users +type: Opaque +data: + MONGODB_BACKUP_USER: YmFja3VwJCMl + MONGODB_BACKUP_PASSWORD: YmFja3VwMTIzNDU2Iw== + MONGODB_DATABASE_ADMIN_USER: ZGF0YWJhc2VBZG1pbg== + MONGODB_DATABASE_ADMIN_PASSWORD: ZGF0YWJhc2VBZG1pbjEyMzQ1Ng== + MONGODB_USER_ADMIN_USER: dXNlckFkbWlu + MONGODB_USER_ADMIN_PASSWORD: dXNlckFkbWluMTIzNDU2 + MONGODB_CLUSTER_ADMIN_USER: Y2x1c3RlckFkbWlu + MONGODB_CLUSTER_ADMIN_PASSWORD: Y2x1c3RlckFkbWluMTIzNDU2 + MONGODB_CLUSTER_MONITOR_USER: Y2x1c3Rlck1vbml0b3I= + MONGODB_CLUSTER_MONITOR_PASSWORD: Y2x1c3Rlck1vbml0b3IxMjM0NTY= diff --git a/e2e-tests/demand-backup-physical/conf/some-name-arbiter-nv.yml b/e2e-tests/demand-backup-physical-minio/conf/some-name-arbiter-nv.yml similarity index 72% rename from e2e-tests/demand-backup-physical/conf/some-name-arbiter-nv.yml rename to e2e-tests/demand-backup-physical-minio/conf/some-name-arbiter-nv.yml index 5b4eeebc1c..efddd14ab0 100644 --- a/e2e-tests/demand-backup-physical/conf/some-name-arbiter-nv.yml +++ b/e2e-tests/demand-backup-physical-minio/conf/some-name-arbiter-nv.yml @@ -13,15 +13,6 @@ spec: enabled: true image: perconalab/percona-server-mongodb-operator:1.1.0-backup storages: - aws-s3: - main: true - type: s3 - s3: - credentialsSecret: aws-s3-secret - region: us-east-1 - bucket: operator-testing - prefix: psmdb-demand-backup-physical - insecureSkipTLSVerify: false minio: type: s3 s3: @@ -30,28 +21,6 @@ spec: bucket: operator-testing endpointUrl: http://minio-service:9000/ insecureSkipTLSVerify: false - gcp-cs: - type: s3 - s3: - credentialsSecret: gcp-cs-secret - region: us-east-1 - bucket: operator-testing - prefix: psmdb-demand-backup-physical - endpointUrl: https://storage.googleapis.com - insecureSkipTLSVerify: false - azure-blob: - type: azure - azure: - container: operator-testing - prefix: psmdb-demand-backup-physical - credentialsSecret: azure-secret - - tasks: - - name: weekly - enabled: true - schedule: "0 0 * * 0" - compressionType: gzip - storageName: aws-s3 replsets: - name: rs0 affinity: diff --git a/e2e-tests/demand-backup-physical-minio/conf/some-name.yml b/e2e-tests/demand-backup-physical-minio/conf/some-name.yml new file mode 100644 index 0000000000..a4cc48eea5 --- /dev/null +++ b/e2e-tests/demand-backup-physical-minio/conf/some-name.yml @@ -0,0 +1,66 @@ +apiVersion: psmdb.percona.com/v1 +kind: PerconaServerMongoDB +metadata: + finalizers: + - percona.com/delete-psmdb-pvc + name: some-name +spec: + #platform: openshift + image: + imagePullPolicy: Always + updateStrategy: SmartUpdate + backup: + enabled: true + image: perconalab/percona-server-mongodb-operator:1.1.0-backup + storages: + minio: + type: s3 + s3: + credentialsSecret: minio-secret + region: us-east-1 + bucket: operator-testing + endpointUrl: http://minio-service:9000/ + insecureSkipTLSVerify: false + replsets: + - name: rs0 + affinity: + antiAffinityTopologyKey: none + resources: + limits: + cpu: 500m + memory: 1G + requests: + cpu: 100m + memory: 0.1G + volumeSpec: + persistentVolumeClaim: + resources: + requests: + storage: 3Gi + expose: + enabled: false + type: ClusterIP + size: 3 + configuration: | + operationProfiling: + mode: slowOp + slowOpThresholdMs: 100 + security: + enableEncryption: true + redactClientLogData: false + setParameter: + ttlMonitorSleepSecs: 60 + wiredTigerConcurrentReadTransactions: 128 + wiredTigerConcurrentWriteTransactions: 128 + storage: + engine: wiredTiger + wiredTiger: + collectionConfig: + blockCompressor: snappy + engineConfig: + directoryForIndexes: false + journalCompressor: snappy + indexConfig: + prefixCompression: true + secrets: + users: some-users diff --git a/e2e-tests/demand-backup-physical-minio/run b/e2e-tests/demand-backup-physical-minio/run new file mode 100755 index 0000000000..8e2d310210 --- /dev/null +++ b/e2e-tests/demand-backup-physical-minio/run @@ -0,0 +1,75 @@ +#!/bin/bash + +set -o errexit + +test_dir=$(realpath "$(dirname "$0")") +. "${test_dir}/../functions" +set_debug + +create_infra "${namespace}" +deploy_minio +apply_s3_storage_secrets + +desc 'Testing on not sharded cluster' + +echo 'Creating PSMDB cluster' +cluster="some-name" +kubectl_bin apply -f "${test_dir}/conf/secrets.yml" +apply_cluster "${test_dir}/conf/${cluster}.yml" +kubectl_bin apply -f "${conf_dir}/client_with_tls.yml" + +echo "check if all pods started" +wait_for_running ${cluster}-rs0 3 +wait_cluster_consistency ${cluster} + +sleep 60 # give time for resync to start + +wait_for_pbm_operations ${cluster} + +echo 'Writing test data' +run_mongo \ + 'db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})' \ + "userAdmin:userAdmin123456@${cluster}-rs0.${namespace}" +sleep 1 +run_mongo \ + 'use myApp\n db.test.insert({ x: 100500 })' \ + "myApp:myPass@${cluster}-rs0.${namespace}" +sleep 5 +compare_mongo_cmd "find" "myApp:myPass@${cluster}-rs0-0.${cluster}-rs0.${namespace}" +compare_mongo_cmd "find" "myApp:myPass@${cluster}-rs0-1.${cluster}-rs0.${namespace}" +compare_mongo_cmd "find" "myApp:myPass@${cluster}-rs0-2.${cluster}-rs0.${namespace}" + +echo 'Running backups' +backup_name_minio="backup-minio" +run_backup minio ${backup_name_minio} 'physical' +wait_backup "${backup_name_minio}" + +echo 'Drop collection' +run_mongo 'use myApp\n db.test.drop()' "myApp:myPass@${cluster}-rs0.${namespace}" +echo 'check backup and restore -- minio' +backup_dest_minio=$(get_backup_dest "${backup_name_minio}") +run_restore ${backup_name_minio} +run_recovery_check ${backup_name_minio} + +desc 'Testing with arbiter and non-voting nodes' + +apply_cluster "${test_dir}/conf/${cluster}-arbiter-nv.yml" +echo "check if all pods started" +wait_for_running ${cluster}-rs0 3 +wait_cluster_consistency ${cluster} + +echo 'Running backups' +backup_name_minio="backup-minio-arbiter-nv" +run_backup minio ${backup_name_minio} 'physical' +wait_backup "${backup_name_minio}" + +echo 'Drop collection' +run_mongo 'use myApp\n db.test.drop()' "myApp:myPass@${cluster}-rs0.${namespace}" +echo 'check backup and restore -- minio' +backup_dest_minio=$(get_backup_dest "${backup_name_minio}") +run_restore ${backup_name_minio} +run_recovery_check ${backup_name_minio} "_restore-arbiter-nv" + +destroy "$namespace" + +desc 'test passed' diff --git a/e2e-tests/demand-backup-physical-parallel/compare/find-sharded.json b/e2e-tests/demand-backup-physical-parallel/compare/find-sharded.json new file mode 100644 index 0000000000..117b05ec2b --- /dev/null +++ b/e2e-tests/demand-backup-physical-parallel/compare/find-sharded.json @@ -0,0 +1,3 @@ +switched to db myApp +{ "_id" : , "x" : 100501 } +bye diff --git a/e2e-tests/demand-backup-physical-parallel/compare/find.json b/e2e-tests/demand-backup-physical-parallel/compare/find.json new file mode 100644 index 0000000000..74495091bf --- /dev/null +++ b/e2e-tests/demand-backup-physical-parallel/compare/find.json @@ -0,0 +1,3 @@ +switched to db myApp +{ "_id" : , "x" : 100500 } +bye diff --git a/e2e-tests/demand-backup-physical-parallel/conf/backup-aws-s3.yml b/e2e-tests/demand-backup-physical-parallel/conf/backup-aws-s3.yml new file mode 100644 index 0000000000..13c039b842 --- /dev/null +++ b/e2e-tests/demand-backup-physical-parallel/conf/backup-aws-s3.yml @@ -0,0 +1,10 @@ +apiVersion: psmdb.percona.com/v1 +kind: PerconaServerMongoDBBackup +metadata: + finalizers: + - percona.com/delete-backup + name: +spec: + type: physical + clusterName: some-name + storageName: diff --git a/e2e-tests/demand-backup-physical-parallel/conf/backup-azure-blob.yml b/e2e-tests/demand-backup-physical-parallel/conf/backup-azure-blob.yml new file mode 100644 index 0000000000..13c039b842 --- /dev/null +++ b/e2e-tests/demand-backup-physical-parallel/conf/backup-azure-blob.yml @@ -0,0 +1,10 @@ +apiVersion: psmdb.percona.com/v1 +kind: PerconaServerMongoDBBackup +metadata: + finalizers: + - percona.com/delete-backup + name: +spec: + type: physical + clusterName: some-name + storageName: diff --git a/e2e-tests/demand-backup-physical-parallel/conf/backup-gcp-cs.yml b/e2e-tests/demand-backup-physical-parallel/conf/backup-gcp-cs.yml new file mode 100644 index 0000000000..13c039b842 --- /dev/null +++ b/e2e-tests/demand-backup-physical-parallel/conf/backup-gcp-cs.yml @@ -0,0 +1,10 @@ +apiVersion: psmdb.percona.com/v1 +kind: PerconaServerMongoDBBackup +metadata: + finalizers: + - percona.com/delete-backup + name: +spec: + type: physical + clusterName: some-name + storageName: diff --git a/e2e-tests/demand-backup-physical-parallel/conf/backup-minio.yml b/e2e-tests/demand-backup-physical-parallel/conf/backup-minio.yml new file mode 100644 index 0000000000..13c039b842 --- /dev/null +++ b/e2e-tests/demand-backup-physical-parallel/conf/backup-minio.yml @@ -0,0 +1,10 @@ +apiVersion: psmdb.percona.com/v1 +kind: PerconaServerMongoDBBackup +metadata: + finalizers: + - percona.com/delete-backup + name: +spec: + type: physical + clusterName: some-name + storageName: diff --git a/e2e-tests/demand-backup-physical-parallel/conf/secrets.yml b/e2e-tests/demand-backup-physical-parallel/conf/secrets.yml new file mode 100644 index 0000000000..df4f20924b --- /dev/null +++ b/e2e-tests/demand-backup-physical-parallel/conf/secrets.yml @@ -0,0 +1,16 @@ +apiVersion: v1 +kind: Secret +metadata: + name: some-users +type: Opaque +data: + MONGODB_BACKUP_USER: YmFja3VwJCMl + MONGODB_BACKUP_PASSWORD: YmFja3VwMTIzNDU2Iw== + MONGODB_DATABASE_ADMIN_USER: ZGF0YWJhc2VBZG1pbg== + MONGODB_DATABASE_ADMIN_PASSWORD: ZGF0YWJhc2VBZG1pbjEyMzQ1Ng== + MONGODB_USER_ADMIN_USER: dXNlckFkbWlu + MONGODB_USER_ADMIN_PASSWORD: dXNlckFkbWluMTIzNDU2 + MONGODB_CLUSTER_ADMIN_USER: Y2x1c3RlckFkbWlu + MONGODB_CLUSTER_ADMIN_PASSWORD: Y2x1c3RlckFkbWluMTIzNDU2 + MONGODB_CLUSTER_MONITOR_USER: Y2x1c3Rlck1vbml0b3I= + MONGODB_CLUSTER_MONITOR_PASSWORD: Y2x1c3Rlck1vbml0b3IxMjM0NTY= diff --git a/e2e-tests/demand-backup-physical/conf/some-name.yml b/e2e-tests/demand-backup-physical-parallel/conf/some-name.yml similarity index 100% rename from e2e-tests/demand-backup-physical/conf/some-name.yml rename to e2e-tests/demand-backup-physical-parallel/conf/some-name.yml diff --git a/e2e-tests/demand-backup-physical-parallel/run b/e2e-tests/demand-backup-physical-parallel/run new file mode 100755 index 0000000000..87acc78d96 --- /dev/null +++ b/e2e-tests/demand-backup-physical-parallel/run @@ -0,0 +1,68 @@ +#!/bin/bash + +set -o errexit + +test_dir=$(realpath "$(dirname "$0")") +. "${test_dir}/../functions" +set_debug + +create_infra "${namespace}" + +deploy_minio +apply_s3_storage_secrets + +desc 'Testing on not sharded cluster' + +echo 'Creating PSMDB cluster' +cluster="some-name" +kubectl_bin apply -f "${test_dir}/conf/secrets.yml" +apply_cluster "${test_dir}/conf/${cluster}.yml" +kubectl_bin apply -f "${conf_dir}/client_with_tls.yml" + +echo 'Check if all pods started' +wait_for_running ${cluster}-rs0 3 +wait_cluster_consistency ${cluster} + +sleep 60 # give time for resync to start + +wait_for_pbm_operations ${cluster} + +echo 'Writing test data' +run_mongo \ + 'db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})' \ + "userAdmin:userAdmin123456@${cluster}-rs0.${namespace}" +sleep 1 +run_mongo \ + 'use myApp\n db.test.insert({ x: 100500 })' \ + "myApp:myPass@${cluster}-rs0.${namespace}" +sleep 5 +compare_mongo_cmd "find" "myApp:myPass@${cluster}-rs0-0.${cluster}-rs0.${namespace}" +compare_mongo_cmd "find" "myApp:myPass@${cluster}-rs0-1.${cluster}-rs0.${namespace}" +compare_mongo_cmd "find" "myApp:myPass@${cluster}-rs0-2.${cluster}-rs0.${namespace}" + +echo 'Running backups' +backup_name_minio="backup-minio" +run_backup minio ${backup_name_minio} 'physical' +if [ -z "$SKIP_BACKUPS_TO_AWS_GCP_AZURE" ]; then + backup_name_aws="backup-aws-s3" + backup_name_gcp="backup-gcp-cs" + backup_name_azure="backup-azure-blob" + + run_backup aws-s3 ${backup_name_aws} 'physical' + run_backup gcp-cs ${backup_name_gcp} 'physical' + run_backup azure-blob ${backup_name_azure} 'physical' + + wait_backup "${backup_name_aws}" + check_backup_in_storage ${backup_name_aws} s3 rs0 + + wait_backup "${backup_name_gcp}" + check_backup_in_storage ${backup_name_gcp} gcs rs0 + + wait_backup "${backup_name_azure}" + check_backup_in_storage ${backup_name_azure} azure rs0 +fi +wait_backup "${backup_name_minio}" + +destroy "$namespace" + +desc 'test passed' diff --git a/e2e-tests/demand-backup-physical-sharded-aws/compare/find-sharded.json b/e2e-tests/demand-backup-physical-sharded-aws/compare/find-sharded.json new file mode 100644 index 0000000000..117b05ec2b --- /dev/null +++ b/e2e-tests/demand-backup-physical-sharded-aws/compare/find-sharded.json @@ -0,0 +1,3 @@ +switched to db myApp +{ "_id" : , "x" : 100501 } +bye diff --git a/e2e-tests/demand-backup-physical-sharded-aws/compare/find.json b/e2e-tests/demand-backup-physical-sharded-aws/compare/find.json new file mode 100644 index 0000000000..74495091bf --- /dev/null +++ b/e2e-tests/demand-backup-physical-sharded-aws/compare/find.json @@ -0,0 +1,3 @@ +switched to db myApp +{ "_id" : , "x" : 100500 } +bye diff --git a/e2e-tests/demand-backup-physical-sharded/compare/statefulset_some-name-rs0_restore_sharded-oc.yml b/e2e-tests/demand-backup-physical-sharded-aws/compare/statefulset_some-name-rs0_restore_sharded-oc.yml similarity index 100% rename from e2e-tests/demand-backup-physical-sharded/compare/statefulset_some-name-rs0_restore_sharded-oc.yml rename to e2e-tests/demand-backup-physical-sharded-aws/compare/statefulset_some-name-rs0_restore_sharded-oc.yml diff --git a/e2e-tests/demand-backup-physical-sharded/compare/statefulset_some-name-rs0_restore_sharded.yml b/e2e-tests/demand-backup-physical-sharded-aws/compare/statefulset_some-name-rs0_restore_sharded.yml similarity index 100% rename from e2e-tests/demand-backup-physical-sharded/compare/statefulset_some-name-rs0_restore_sharded.yml rename to e2e-tests/demand-backup-physical-sharded-aws/compare/statefulset_some-name-rs0_restore_sharded.yml diff --git a/e2e-tests/demand-backup-physical-sharded-aws/conf/backup-aws-s3.yml b/e2e-tests/demand-backup-physical-sharded-aws/conf/backup-aws-s3.yml new file mode 100644 index 0000000000..13c039b842 --- /dev/null +++ b/e2e-tests/demand-backup-physical-sharded-aws/conf/backup-aws-s3.yml @@ -0,0 +1,10 @@ +apiVersion: psmdb.percona.com/v1 +kind: PerconaServerMongoDBBackup +metadata: + finalizers: + - percona.com/delete-backup + name: +spec: + type: physical + clusterName: some-name + storageName: diff --git a/e2e-tests/demand-backup-physical-sharded-aws/conf/restore.yml b/e2e-tests/demand-backup-physical-sharded-aws/conf/restore.yml new file mode 100644 index 0000000000..32ab3c4b9a --- /dev/null +++ b/e2e-tests/demand-backup-physical-sharded-aws/conf/restore.yml @@ -0,0 +1,7 @@ +apiVersion: psmdb.percona.com/v1 +kind: PerconaServerMongoDBRestore +metadata: + name: +spec: + clusterName: some-name + backupName: diff --git a/e2e-tests/demand-backup-physical-sharded-aws/conf/secrets.yml b/e2e-tests/demand-backup-physical-sharded-aws/conf/secrets.yml new file mode 100644 index 0000000000..61fc9675b0 --- /dev/null +++ b/e2e-tests/demand-backup-physical-sharded-aws/conf/secrets.yml @@ -0,0 +1,16 @@ +kind: Secret +apiVersion: v1 +metadata: + name: some-users +data: + MONGODB_BACKUP_PASSWORD: YmFja3VwMTIzNDU2 + MONGODB_BACKUP_USER: YmFja3Vw + MONGODB_CLUSTER_ADMIN_PASSWORD: Y2x1c3RlckFkbWluMTIzNDU2 + MONGODB_CLUSTER_ADMIN_USER: Y2x1c3RlckFkbWlu + MONGODB_CLUSTER_MONITOR_PASSWORD: Y2x1c3Rlck1vbml0b3IxMjM0NTY= + MONGODB_CLUSTER_MONITOR_USER: Y2x1c3Rlck1vbml0b3I= + MONGODB_USER_ADMIN_PASSWORD: dXNlckFkbWluMTIzNDU2 + MONGODB_USER_ADMIN_USER: dXNlckFkbWlu + MONGODB_DATABASE_ADMIN_USER: ZGF0YWJhc2VBZG1pbg== + MONGODB_DATABASE_ADMIN_PASSWORD: ZGF0YWJhc2VBZG1pbjEyMzQ1Ng== +type: Opaque diff --git a/e2e-tests/demand-backup-physical-sharded-aws/conf/some-name-sharded.yml b/e2e-tests/demand-backup-physical-sharded-aws/conf/some-name-sharded.yml new file mode 100644 index 0000000000..f798b0ac97 --- /dev/null +++ b/e2e-tests/demand-backup-physical-sharded-aws/conf/some-name-sharded.yml @@ -0,0 +1,120 @@ +apiVersion: psmdb.percona.com/v1 +kind: PerconaServerMongoDB +metadata: + name: some-name +spec: + #platform: openshift + image: + imagePullPolicy: Always + updateStrategy: SmartUpdate + backup: + enabled: true + image: perconalab/percona-server-mongodb-operator:1.1.0-backup + storages: + aws-s3: + main: true + type: s3 + s3: + credentialsSecret: aws-s3-secret + region: us-east-1 + bucket: operator-testing + prefix: psmdb-demand-backup-physical-sharded + insecureSkipTLSVerify: false + sharding: + enabled: true + mongos: + size: 3 + affinity: + antiAffinityTopologyKey: none + resources: + limits: + cpu: 500m + memory: 1G + requests: + cpu: 100m + memory: 0.1G + expose: + type: LoadBalancer + configsvrReplSet: + affinity: + antiAffinityTopologyKey: none + resources: + limits: + cpu: 500m + memory: 1G + requests: + cpu: 100m + memory: 0.1G + volumeSpec: + persistentVolumeClaim: + resources: + requests: + storage: 3Gi + expose: + enabled: false + type: ClusterIP + size: 3 + configuration: | + operationProfiling: + mode: slowOp + slowOpThresholdMs: 100 + security: + enableEncryption: true + redactClientLogData: false + setParameter: + ttlMonitorSleepSecs: 60 + wiredTigerConcurrentReadTransactions: 128 + wiredTigerConcurrentWriteTransactions: 128 + storage: + engine: wiredTiger + wiredTiger: + collectionConfig: + blockCompressor: snappy + engineConfig: + directoryForIndexes: false + journalCompressor: snappy + indexConfig: + prefixCompression: true + replsets: + - name: rs0 + affinity: + antiAffinityTopologyKey: none + resources: + limits: + cpu: 500m + memory: 1G + requests: + cpu: 100m + memory: 0.1G + volumeSpec: + persistentVolumeClaim: + resources: + requests: + storage: 3Gi + expose: + enabled: false + type: ClusterIP + size: 3 + configuration: | + operationProfiling: + mode: slowOp + slowOpThresholdMs: 100 + security: + enableEncryption: true + redactClientLogData: false + setParameter: + ttlMonitorSleepSecs: 60 + wiredTigerConcurrentReadTransactions: 128 + wiredTigerConcurrentWriteTransactions: 128 + storage: + engine: wiredTiger + wiredTiger: + collectionConfig: + blockCompressor: snappy + engineConfig: + directoryForIndexes: false + journalCompressor: snappy + indexConfig: + prefixCompression: true + secrets: + users: some-users diff --git a/e2e-tests/demand-backup-physical-sharded-aws/run b/e2e-tests/demand-backup-physical-sharded-aws/run new file mode 100755 index 0000000000..38772c8d38 --- /dev/null +++ b/e2e-tests/demand-backup-physical-sharded-aws/run @@ -0,0 +1,72 @@ +#!/bin/bash + +set -o errexit + +test_dir=$(realpath "$(dirname "$0")") +. "${test_dir}/../functions" +set_debug + +if [ -n "$SKIP_BACKUPS_TO_AWS_GCP_AZURE" ]; then + desc 'Skip tests related to AWS S3 Cloud Storage' + exit 0 +fi + +create_infra "${namespace}" + +apply_s3_storage_secrets + +### Case 1: Backup and restore on sharded cluster +desc 'Testing on sharded cluster' + +echo "Creating PSMDB cluster" +cluster="some-name" +kubectl_bin apply -f "${conf_dir}/secrets.yml" +apply_cluster "${test_dir}/conf/${cluster}-sharded.yml" +kubectl_bin apply -f "${conf_dir}/client_with_tls.yml" + +echo "Check if all pods started" +wait_for_running ${cluster}-rs0 3 +wait_for_running ${cluster}-cfg 3 +wait_for_running ${cluster}-mongos 3 +wait_cluster_consistency ${cluster} + +lbEndpoint=$(kubectl_bin get svc $cluster-mongos -o=jsonpath='{.status}' \ + | jq -r 'select(.loadBalancer != null and .loadBalancer.ingress != null and .loadBalancer.ingress != []) | .loadBalancer.ingress[0] | if .ip then .ip else .hostname end') +if [ -z $lbEndpoint ]; then + echo "mongos service not exported correctly" + exit 1 +fi + +run_mongos \ + 'db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})' \ + "userAdmin:userAdmin123456@${cluster}-mongos.${namespace}" +sleep 1 +run_mongos \ + 'use myApp\n db.test.insert({ x: 100501 })' \ + "myApp:myPass@${cluster}-mongos.${namespace}" +sleep 5 +compare_mongos_cmd "find" "myApp:myPass@${cluster}-mongos.${namespace}" "-sharded" + +# wait for stable timestamp in wiredtiger +echo 'waiting 60 seconds for stable timestamp in wiredtiger' +sleep 80 + +echo 'Running AWS storage backups' +backup_name_aws="backup-aws-s3-sharded" + +run_backup aws-s3 ${backup_name_aws} 'physical' + +wait_backup "${backup_name_aws}" +check_backup_in_storage ${backup_name_aws} s3 rs0 +check_backup_in_storage ${backup_name_aws} s3 cfg + +echo "Drop collection" +run_mongos 'use myApp\n db.test.drop()' "myApp:myPass@${cluster}-mongos.${namespace}" +echo 'Check backup and restore -- aws-s3' +run_restore ${backup_name_aws} "_restore_sharded" +run_recovery_check ${backup_name_aws} "_restore_sharded" 'sharded' +check_exported_mongos_service_endpoint "$lbEndpoint" + +destroy "$namespace" + +desc 'test passed' diff --git a/e2e-tests/demand-backup-physical-sharded-azure/compare/find-sharded.json b/e2e-tests/demand-backup-physical-sharded-azure/compare/find-sharded.json new file mode 100644 index 0000000000..117b05ec2b --- /dev/null +++ b/e2e-tests/demand-backup-physical-sharded-azure/compare/find-sharded.json @@ -0,0 +1,3 @@ +switched to db myApp +{ "_id" : , "x" : 100501 } +bye diff --git a/e2e-tests/demand-backup-physical-sharded-azure/compare/find.json b/e2e-tests/demand-backup-physical-sharded-azure/compare/find.json new file mode 100644 index 0000000000..74495091bf --- /dev/null +++ b/e2e-tests/demand-backup-physical-sharded-azure/compare/find.json @@ -0,0 +1,3 @@ +switched to db myApp +{ "_id" : , "x" : 100500 } +bye diff --git a/e2e-tests/demand-backup-physical-sharded-azure/compare/statefulset_some-name-rs0_restore_sharded-oc.yml b/e2e-tests/demand-backup-physical-sharded-azure/compare/statefulset_some-name-rs0_restore_sharded-oc.yml new file mode 100644 index 0000000000..cdd5647ea3 --- /dev/null +++ b/e2e-tests/demand-backup-physical-sharded-azure/compare/statefulset_some-name-rs0_restore_sharded-oc.yml @@ -0,0 +1,260 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + annotations: + percona.com/restore-in-progress: "true" + generation: 2 + labels: + app.kubernetes.io/component: mongod + app.kubernetes.io/instance: some-name + app.kubernetes.io/managed-by: percona-server-mongodb-operator + app.kubernetes.io/name: percona-server-mongodb + app.kubernetes.io/part-of: percona-server-mongodb + app.kubernetes.io/replset: rs0 + name: some-name-rs0 + ownerReferences: + - controller: true + kind: PerconaServerMongoDB + name: some-name +spec: + podManagementPolicy: OrderedReady + replicas: 3 + revisionHistoryLimit: 10 + selector: + matchLabels: + app.kubernetes.io/component: mongod + app.kubernetes.io/instance: some-name + app.kubernetes.io/managed-by: percona-server-mongodb-operator + app.kubernetes.io/name: percona-server-mongodb + app.kubernetes.io/part-of: percona-server-mongodb + app.kubernetes.io/replset: rs0 + serviceName: some-name-rs0 + template: + metadata: + annotations: {} + labels: + app.kubernetes.io/component: mongod + app.kubernetes.io/instance: some-name + app.kubernetes.io/managed-by: percona-server-mongodb-operator + app.kubernetes.io/name: percona-server-mongodb + app.kubernetes.io/part-of: percona-server-mongodb + app.kubernetes.io/replset: rs0 + spec: + containers: + - args: + - --bind_ip_all + - --auth + - --dbpath=/data/db + - --port=27017 + - --replSet=rs0 + - --storageEngine=wiredTiger + - --relaxPermChecks + - --sslAllowInvalidCertificates + - --clusterAuthMode=x509 + - --tlsMode=preferTLS + - --shardsvr + - --enableEncryption + - --encryptionKeyFile=/etc/mongodb-encryption/encryption-key + - --wiredTigerCacheSizeGB=0.25 + - --wiredTigerIndexPrefixCompression=true + - --config=/etc/mongodb-config/mongod.conf + - --quiet + command: + - /opt/percona/physical-restore-ps-entry.sh + env: + - name: SERVICE_NAME + value: some-name + - name: MONGODB_PORT + value: "27017" + - name: MONGODB_REPLSET + value: rs0 + - name: PBM_AGENT_MONGODB_USERNAME + valueFrom: + secretKeyRef: + key: MONGODB_BACKUP_USER_ESCAPED + name: internal-some-name-users + optional: false + - name: PBM_AGENT_MONGODB_PASSWORD + valueFrom: + secretKeyRef: + key: MONGODB_BACKUP_PASSWORD_ESCAPED + name: internal-some-name-users + optional: false + - name: PBM_AGENT_SIDECAR + value: "true" + - name: PBM_AGENT_SIDECAR_SLEEP + value: "5" + - name: POD_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name + - name: PBM_MONGODB_URI + value: mongodb://$(PBM_AGENT_MONGODB_USERNAME):$(PBM_AGENT_MONGODB_PASSWORD)@$(POD_NAME) + envFrom: + - secretRef: + name: internal-some-name-users + optional: false + imagePullPolicy: Always + livenessProbe: + exec: + command: + - /opt/percona/mongodb-healthcheck + - k8s + - liveness + - --ssl + - --sslInsecure + - --sslCAFile + - /etc/mongodb-ssl/ca.crt + - --sslPEMKeyFile + - /tmp/tls.pem + - --startupDelaySeconds + - "7200" + failureThreshold: 4 + initialDelaySeconds: 60 + periodSeconds: 30 + successThreshold: 1 + timeoutSeconds: 10 + name: mongod + ports: + - containerPort: 27017 + name: mongodb + protocol: TCP + readinessProbe: + exec: + command: + - /opt/percona/mongodb-healthcheck + - k8s + - readiness + - --component + - mongod + failureThreshold: 8 + initialDelaySeconds: 10 + periodSeconds: 3 + successThreshold: 1 + timeoutSeconds: 2 + resources: + limits: + cpu: 500m + memory: 1G + requests: + cpu: 100m + memory: 100M + securityContext: + runAsNonRoot: true + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /data/db + name: mongod-data + - mountPath: /etc/mongodb-secrets + name: some-name-mongodb-keyfile + readOnly: true + - mountPath: /etc/mongodb-ssl + name: ssl + readOnly: true + - mountPath: /etc/mongodb-ssl-internal + name: ssl-internal + readOnly: true + - mountPath: /etc/mongodb-config + name: config + - mountPath: /opt/percona + name: bin + - mountPath: /etc/mongodb-encryption + name: some-name-mongodb-encryption-key + readOnly: true + - mountPath: /etc/users-secret + name: users-secret-file + - mountPath: /etc/pbm/ + name: pbm-config + readOnly: true + workingDir: /data/db + dnsPolicy: ClusterFirst + initContainers: + - command: + - /init-entrypoint.sh + imagePullPolicy: Always + name: mongo-init + resources: + limits: + cpu: 500m + memory: 1G + requests: + cpu: 100m + memory: 100M + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /data/db + name: mongod-data + - mountPath: /opt/percona + name: bin + - command: + - bash + - -c + - install -D /usr/bin/pbm /opt/percona/pbm && install -D /usr/bin/pbm-agent /opt/percona/pbm-agent + imagePullPolicy: Always + name: pbm-init + resources: {} + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /data/db + name: mongod-data + - mountPath: /opt/percona + name: bin + restartPolicy: Always + schedulerName: default-scheduler + securityContext: {} + serviceAccount: default + serviceAccountName: default + terminationGracePeriodSeconds: 60 + volumes: + - name: some-name-mongodb-keyfile + secret: + defaultMode: 288 + optional: false + secretName: some-name-mongodb-keyfile + - emptyDir: {} + name: bin + - configMap: + defaultMode: 420 + name: some-name-rs0-mongod + optional: true + name: config + - name: some-name-mongodb-encryption-key + secret: + defaultMode: 288 + optional: false + secretName: some-name-mongodb-encryption-key + - name: ssl + secret: + defaultMode: 288 + optional: false + secretName: some-name-ssl + - name: ssl-internal + secret: + defaultMode: 288 + optional: true + secretName: some-name-ssl-internal + - name: users-secret-file + secret: + defaultMode: 420 + secretName: internal-some-name-users + - name: pbm-config + secret: + defaultMode: 420 + secretName: some-name-pbm-config + updateStrategy: + type: OnDelete + volumeClaimTemplates: + - metadata: + name: mongod-data + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 3Gi + status: + phase: Pending diff --git a/e2e-tests/demand-backup-physical-sharded-azure/compare/statefulset_some-name-rs0_restore_sharded.yml b/e2e-tests/demand-backup-physical-sharded-azure/compare/statefulset_some-name-rs0_restore_sharded.yml new file mode 100644 index 0000000000..8ce5f50874 --- /dev/null +++ b/e2e-tests/demand-backup-physical-sharded-azure/compare/statefulset_some-name-rs0_restore_sharded.yml @@ -0,0 +1,262 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + annotations: + percona.com/restore-in-progress: "true" + generation: 2 + labels: + app.kubernetes.io/component: mongod + app.kubernetes.io/instance: some-name + app.kubernetes.io/managed-by: percona-server-mongodb-operator + app.kubernetes.io/name: percona-server-mongodb + app.kubernetes.io/part-of: percona-server-mongodb + app.kubernetes.io/replset: rs0 + name: some-name-rs0 + ownerReferences: + - controller: true + kind: PerconaServerMongoDB + name: some-name +spec: + podManagementPolicy: OrderedReady + replicas: 3 + revisionHistoryLimit: 10 + selector: + matchLabels: + app.kubernetes.io/component: mongod + app.kubernetes.io/instance: some-name + app.kubernetes.io/managed-by: percona-server-mongodb-operator + app.kubernetes.io/name: percona-server-mongodb + app.kubernetes.io/part-of: percona-server-mongodb + app.kubernetes.io/replset: rs0 + serviceName: some-name-rs0 + template: + metadata: + annotations: {} + labels: + app.kubernetes.io/component: mongod + app.kubernetes.io/instance: some-name + app.kubernetes.io/managed-by: percona-server-mongodb-operator + app.kubernetes.io/name: percona-server-mongodb + app.kubernetes.io/part-of: percona-server-mongodb + app.kubernetes.io/replset: rs0 + spec: + containers: + - args: + - --bind_ip_all + - --auth + - --dbpath=/data/db + - --port=27017 + - --replSet=rs0 + - --storageEngine=wiredTiger + - --relaxPermChecks + - --sslAllowInvalidCertificates + - --clusterAuthMode=x509 + - --tlsMode=preferTLS + - --shardsvr + - --enableEncryption + - --encryptionKeyFile=/etc/mongodb-encryption/encryption-key + - --wiredTigerCacheSizeGB=0.25 + - --wiredTigerIndexPrefixCompression=true + - --config=/etc/mongodb-config/mongod.conf + - --quiet + command: + - /opt/percona/physical-restore-ps-entry.sh + env: + - name: SERVICE_NAME + value: some-name + - name: MONGODB_PORT + value: "27017" + - name: MONGODB_REPLSET + value: rs0 + - name: PBM_AGENT_MONGODB_USERNAME + valueFrom: + secretKeyRef: + key: MONGODB_BACKUP_USER_ESCAPED + name: internal-some-name-users + optional: false + - name: PBM_AGENT_MONGODB_PASSWORD + valueFrom: + secretKeyRef: + key: MONGODB_BACKUP_PASSWORD_ESCAPED + name: internal-some-name-users + optional: false + - name: PBM_AGENT_SIDECAR + value: "true" + - name: PBM_AGENT_SIDECAR_SLEEP + value: "5" + - name: POD_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name + - name: PBM_MONGODB_URI + value: mongodb://$(PBM_AGENT_MONGODB_USERNAME):$(PBM_AGENT_MONGODB_PASSWORD)@$(POD_NAME) + envFrom: + - secretRef: + name: internal-some-name-users + optional: false + imagePullPolicy: Always + livenessProbe: + exec: + command: + - /opt/percona/mongodb-healthcheck + - k8s + - liveness + - --ssl + - --sslInsecure + - --sslCAFile + - /etc/mongodb-ssl/ca.crt + - --sslPEMKeyFile + - /tmp/tls.pem + - --startupDelaySeconds + - "7200" + failureThreshold: 4 + initialDelaySeconds: 60 + periodSeconds: 30 + successThreshold: 1 + timeoutSeconds: 10 + name: mongod + ports: + - containerPort: 27017 + name: mongodb + protocol: TCP + readinessProbe: + exec: + command: + - /opt/percona/mongodb-healthcheck + - k8s + - readiness + - --component + - mongod + failureThreshold: 8 + initialDelaySeconds: 10 + periodSeconds: 3 + successThreshold: 1 + timeoutSeconds: 2 + resources: + limits: + cpu: 500m + memory: 1G + requests: + cpu: 100m + memory: 100M + securityContext: + runAsNonRoot: true + runAsUser: 1001 + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /data/db + name: mongod-data + - mountPath: /etc/mongodb-secrets + name: some-name-mongodb-keyfile + readOnly: true + - mountPath: /etc/mongodb-ssl + name: ssl + readOnly: true + - mountPath: /etc/mongodb-ssl-internal + name: ssl-internal + readOnly: true + - mountPath: /etc/mongodb-config + name: config + - mountPath: /opt/percona + name: bin + - mountPath: /etc/mongodb-encryption + name: some-name-mongodb-encryption-key + readOnly: true + - mountPath: /etc/users-secret + name: users-secret-file + - mountPath: /etc/pbm/ + name: pbm-config + readOnly: true + workingDir: /data/db + dnsPolicy: ClusterFirst + initContainers: + - command: + - /init-entrypoint.sh + imagePullPolicy: Always + name: mongo-init + resources: + limits: + cpu: 500m + memory: 1G + requests: + cpu: 100m + memory: 100M + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /data/db + name: mongod-data + - mountPath: /opt/percona + name: bin + - command: + - bash + - -c + - install -D /usr/bin/pbm /opt/percona/pbm && install -D /usr/bin/pbm-agent /opt/percona/pbm-agent + imagePullPolicy: Always + name: pbm-init + resources: {} + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /data/db + name: mongod-data + - mountPath: /opt/percona + name: bin + restartPolicy: Always + schedulerName: default-scheduler + securityContext: + fsGroup: 1001 + serviceAccount: default + serviceAccountName: default + terminationGracePeriodSeconds: 60 + volumes: + - name: some-name-mongodb-keyfile + secret: + defaultMode: 288 + optional: false + secretName: some-name-mongodb-keyfile + - emptyDir: {} + name: bin + - configMap: + defaultMode: 420 + name: some-name-rs0-mongod + optional: true + name: config + - name: some-name-mongodb-encryption-key + secret: + defaultMode: 288 + optional: false + secretName: some-name-mongodb-encryption-key + - name: ssl + secret: + defaultMode: 288 + optional: false + secretName: some-name-ssl + - name: ssl-internal + secret: + defaultMode: 288 + optional: true + secretName: some-name-ssl-internal + - name: users-secret-file + secret: + defaultMode: 420 + secretName: internal-some-name-users + - name: pbm-config + secret: + defaultMode: 420 + secretName: some-name-pbm-config + updateStrategy: + type: OnDelete + volumeClaimTemplates: + - metadata: + name: mongod-data + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 3Gi + status: + phase: Pending diff --git a/e2e-tests/demand-backup-physical-sharded-azure/conf/backup-azure-blob.yml b/e2e-tests/demand-backup-physical-sharded-azure/conf/backup-azure-blob.yml new file mode 100644 index 0000000000..13c039b842 --- /dev/null +++ b/e2e-tests/demand-backup-physical-sharded-azure/conf/backup-azure-blob.yml @@ -0,0 +1,10 @@ +apiVersion: psmdb.percona.com/v1 +kind: PerconaServerMongoDBBackup +metadata: + finalizers: + - percona.com/delete-backup + name: +spec: + type: physical + clusterName: some-name + storageName: diff --git a/e2e-tests/demand-backup-physical-sharded-azure/conf/restore.yml b/e2e-tests/demand-backup-physical-sharded-azure/conf/restore.yml new file mode 100644 index 0000000000..32ab3c4b9a --- /dev/null +++ b/e2e-tests/demand-backup-physical-sharded-azure/conf/restore.yml @@ -0,0 +1,7 @@ +apiVersion: psmdb.percona.com/v1 +kind: PerconaServerMongoDBRestore +metadata: + name: +spec: + clusterName: some-name + backupName: diff --git a/e2e-tests/demand-backup-physical-sharded-azure/conf/secrets.yml b/e2e-tests/demand-backup-physical-sharded-azure/conf/secrets.yml new file mode 100644 index 0000000000..61fc9675b0 --- /dev/null +++ b/e2e-tests/demand-backup-physical-sharded-azure/conf/secrets.yml @@ -0,0 +1,16 @@ +kind: Secret +apiVersion: v1 +metadata: + name: some-users +data: + MONGODB_BACKUP_PASSWORD: YmFja3VwMTIzNDU2 + MONGODB_BACKUP_USER: YmFja3Vw + MONGODB_CLUSTER_ADMIN_PASSWORD: Y2x1c3RlckFkbWluMTIzNDU2 + MONGODB_CLUSTER_ADMIN_USER: Y2x1c3RlckFkbWlu + MONGODB_CLUSTER_MONITOR_PASSWORD: Y2x1c3Rlck1vbml0b3IxMjM0NTY= + MONGODB_CLUSTER_MONITOR_USER: Y2x1c3Rlck1vbml0b3I= + MONGODB_USER_ADMIN_PASSWORD: dXNlckFkbWluMTIzNDU2 + MONGODB_USER_ADMIN_USER: dXNlckFkbWlu + MONGODB_DATABASE_ADMIN_USER: ZGF0YWJhc2VBZG1pbg== + MONGODB_DATABASE_ADMIN_PASSWORD: ZGF0YWJhc2VBZG1pbjEyMzQ1Ng== +type: Opaque diff --git a/e2e-tests/demand-backup-physical-sharded-azure/conf/some-name-sharded.yml b/e2e-tests/demand-backup-physical-sharded-azure/conf/some-name-sharded.yml new file mode 100644 index 0000000000..70bf32c9a7 --- /dev/null +++ b/e2e-tests/demand-backup-physical-sharded-azure/conf/some-name-sharded.yml @@ -0,0 +1,117 @@ +apiVersion: psmdb.percona.com/v1 +kind: PerconaServerMongoDB +metadata: + name: some-name +spec: + #platform: openshift + image: + imagePullPolicy: Always + updateStrategy: SmartUpdate + backup: + enabled: true + image: perconalab/percona-server-mongodb-operator:1.1.0-backup + storages: + azure-blob: + type: azure + azure: + container: operator-testing + prefix: psmdb-demand-backup-physical-sharded + credentialsSecret: azure-secret + sharding: + enabled: true + mongos: + size: 3 + affinity: + antiAffinityTopologyKey: none + resources: + limits: + cpu: 500m + memory: 1G + requests: + cpu: 100m + memory: 0.1G + expose: + type: LoadBalancer + configsvrReplSet: + affinity: + antiAffinityTopologyKey: none + resources: + limits: + cpu: 500m + memory: 1G + requests: + cpu: 100m + memory: 0.1G + volumeSpec: + persistentVolumeClaim: + resources: + requests: + storage: 3Gi + expose: + enabled: false + type: ClusterIP + size: 3 + configuration: | + operationProfiling: + mode: slowOp + slowOpThresholdMs: 100 + security: + enableEncryption: true + redactClientLogData: false + setParameter: + ttlMonitorSleepSecs: 60 + wiredTigerConcurrentReadTransactions: 128 + wiredTigerConcurrentWriteTransactions: 128 + storage: + engine: wiredTiger + wiredTiger: + collectionConfig: + blockCompressor: snappy + engineConfig: + directoryForIndexes: false + journalCompressor: snappy + indexConfig: + prefixCompression: true + replsets: + - name: rs0 + affinity: + antiAffinityTopologyKey: none + resources: + limits: + cpu: 500m + memory: 1G + requests: + cpu: 100m + memory: 0.1G + volumeSpec: + persistentVolumeClaim: + resources: + requests: + storage: 3Gi + expose: + enabled: false + type: ClusterIP + size: 3 + configuration: | + operationProfiling: + mode: slowOp + slowOpThresholdMs: 100 + security: + enableEncryption: true + redactClientLogData: false + setParameter: + ttlMonitorSleepSecs: 60 + wiredTigerConcurrentReadTransactions: 128 + wiredTigerConcurrentWriteTransactions: 128 + storage: + engine: wiredTiger + wiredTiger: + collectionConfig: + blockCompressor: snappy + engineConfig: + directoryForIndexes: false + journalCompressor: snappy + indexConfig: + prefixCompression: true + secrets: + users: some-users diff --git a/e2e-tests/demand-backup-physical-sharded-azure/run b/e2e-tests/demand-backup-physical-sharded-azure/run new file mode 100755 index 0000000000..60ec83af3e --- /dev/null +++ b/e2e-tests/demand-backup-physical-sharded-azure/run @@ -0,0 +1,71 @@ +#!/bin/bash + +set -o errexit + +test_dir=$(realpath "$(dirname "$0")") +. "${test_dir}/../functions" +set_debug + +if [ -n "$SKIP_BACKUPS_TO_AWS_GCP_AZURE" ]; then + desc 'Skip tests related to AZURE Cloud Storage' + exit 0 +fi + +create_infra "${namespace}" + +apply_s3_storage_secrets + +### Case 1: Backup and restore on sharded cluster +desc 'Testing on sharded cluster' + +echo "Creating PSMDB cluster" +cluster="some-name" +kubectl_bin apply -f "${conf_dir}/secrets.yml" +apply_cluster "${test_dir}/conf/${cluster}-sharded.yml" +kubectl_bin apply -f "${conf_dir}/client_with_tls.yml" + +echo "Check if all pods started" +wait_for_running ${cluster}-rs0 3 +wait_for_running ${cluster}-cfg 3 +wait_for_running ${cluster}-mongos 3 +wait_cluster_consistency ${cluster} + +lbEndpoint=$(kubectl_bin get svc $cluster-mongos -o=jsonpath='{.status}' \ + | jq -r 'select(.loadBalancer != null and .loadBalancer.ingress != null and .loadBalancer.ingress != []) | .loadBalancer.ingress[0] | if .ip then .ip else .hostname end') +if [ -z $lbEndpoint ]; then + echo "Mongos service not exported correctly" + exit 1 +fi + +run_mongos \ + 'db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})' \ + "userAdmin:userAdmin123456@${cluster}-mongos.${namespace}" +sleep 1 +run_mongos \ + 'use myApp\n db.test.insert({ x: 100501 })' \ + "myApp:myPass@${cluster}-mongos.${namespace}" +sleep 5 +compare_mongos_cmd "find" "myApp:myPass@${cluster}-mongos.${namespace}" "-sharded" + +# wait for stable timestamp in wiredtiger +echo 'Waiting 60 seconds for stable timestamp in wiredtiger' +sleep 80 + +echo 'Running AZURE storage backups' +backup_name_azure="backup-azure-blob-sharded" +run_backup azure-blob ${backup_name_azure} 'physical' + +wait_backup "${backup_name_azure}" +check_backup_in_storage ${backup_name_azure} azure rs0 +check_backup_in_storage ${backup_name_azure} azure cfg + +echo "Drop collection" +run_mongos 'use myApp\n db.test.drop()' "myApp:myPass@${cluster}-mongos.${namespace}" +echo 'Check backup and restore -- azure-blob' +run_restore ${backup_name_azure} "_restore_sharded" +run_recovery_check ${backup_name_azure} "_restore_sharded" 'sharded' +check_exported_mongos_service_endpoint "$lbEndpoint" + +destroy "$namespace" + +desc 'test passed' diff --git a/e2e-tests/demand-backup-physical-sharded-gcp/compare/find-sharded.json b/e2e-tests/demand-backup-physical-sharded-gcp/compare/find-sharded.json new file mode 100644 index 0000000000..117b05ec2b --- /dev/null +++ b/e2e-tests/demand-backup-physical-sharded-gcp/compare/find-sharded.json @@ -0,0 +1,3 @@ +switched to db myApp +{ "_id" : , "x" : 100501 } +bye diff --git a/e2e-tests/demand-backup-physical-sharded-gcp/compare/find.json b/e2e-tests/demand-backup-physical-sharded-gcp/compare/find.json new file mode 100644 index 0000000000..74495091bf --- /dev/null +++ b/e2e-tests/demand-backup-physical-sharded-gcp/compare/find.json @@ -0,0 +1,3 @@ +switched to db myApp +{ "_id" : , "x" : 100500 } +bye diff --git a/e2e-tests/demand-backup-physical-sharded-gcp/compare/statefulset_some-name-rs0_restore_sharded-oc.yml b/e2e-tests/demand-backup-physical-sharded-gcp/compare/statefulset_some-name-rs0_restore_sharded-oc.yml new file mode 100644 index 0000000000..cdd5647ea3 --- /dev/null +++ b/e2e-tests/demand-backup-physical-sharded-gcp/compare/statefulset_some-name-rs0_restore_sharded-oc.yml @@ -0,0 +1,260 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + annotations: + percona.com/restore-in-progress: "true" + generation: 2 + labels: + app.kubernetes.io/component: mongod + app.kubernetes.io/instance: some-name + app.kubernetes.io/managed-by: percona-server-mongodb-operator + app.kubernetes.io/name: percona-server-mongodb + app.kubernetes.io/part-of: percona-server-mongodb + app.kubernetes.io/replset: rs0 + name: some-name-rs0 + ownerReferences: + - controller: true + kind: PerconaServerMongoDB + name: some-name +spec: + podManagementPolicy: OrderedReady + replicas: 3 + revisionHistoryLimit: 10 + selector: + matchLabels: + app.kubernetes.io/component: mongod + app.kubernetes.io/instance: some-name + app.kubernetes.io/managed-by: percona-server-mongodb-operator + app.kubernetes.io/name: percona-server-mongodb + app.kubernetes.io/part-of: percona-server-mongodb + app.kubernetes.io/replset: rs0 + serviceName: some-name-rs0 + template: + metadata: + annotations: {} + labels: + app.kubernetes.io/component: mongod + app.kubernetes.io/instance: some-name + app.kubernetes.io/managed-by: percona-server-mongodb-operator + app.kubernetes.io/name: percona-server-mongodb + app.kubernetes.io/part-of: percona-server-mongodb + app.kubernetes.io/replset: rs0 + spec: + containers: + - args: + - --bind_ip_all + - --auth + - --dbpath=/data/db + - --port=27017 + - --replSet=rs0 + - --storageEngine=wiredTiger + - --relaxPermChecks + - --sslAllowInvalidCertificates + - --clusterAuthMode=x509 + - --tlsMode=preferTLS + - --shardsvr + - --enableEncryption + - --encryptionKeyFile=/etc/mongodb-encryption/encryption-key + - --wiredTigerCacheSizeGB=0.25 + - --wiredTigerIndexPrefixCompression=true + - --config=/etc/mongodb-config/mongod.conf + - --quiet + command: + - /opt/percona/physical-restore-ps-entry.sh + env: + - name: SERVICE_NAME + value: some-name + - name: MONGODB_PORT + value: "27017" + - name: MONGODB_REPLSET + value: rs0 + - name: PBM_AGENT_MONGODB_USERNAME + valueFrom: + secretKeyRef: + key: MONGODB_BACKUP_USER_ESCAPED + name: internal-some-name-users + optional: false + - name: PBM_AGENT_MONGODB_PASSWORD + valueFrom: + secretKeyRef: + key: MONGODB_BACKUP_PASSWORD_ESCAPED + name: internal-some-name-users + optional: false + - name: PBM_AGENT_SIDECAR + value: "true" + - name: PBM_AGENT_SIDECAR_SLEEP + value: "5" + - name: POD_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name + - name: PBM_MONGODB_URI + value: mongodb://$(PBM_AGENT_MONGODB_USERNAME):$(PBM_AGENT_MONGODB_PASSWORD)@$(POD_NAME) + envFrom: + - secretRef: + name: internal-some-name-users + optional: false + imagePullPolicy: Always + livenessProbe: + exec: + command: + - /opt/percona/mongodb-healthcheck + - k8s + - liveness + - --ssl + - --sslInsecure + - --sslCAFile + - /etc/mongodb-ssl/ca.crt + - --sslPEMKeyFile + - /tmp/tls.pem + - --startupDelaySeconds + - "7200" + failureThreshold: 4 + initialDelaySeconds: 60 + periodSeconds: 30 + successThreshold: 1 + timeoutSeconds: 10 + name: mongod + ports: + - containerPort: 27017 + name: mongodb + protocol: TCP + readinessProbe: + exec: + command: + - /opt/percona/mongodb-healthcheck + - k8s + - readiness + - --component + - mongod + failureThreshold: 8 + initialDelaySeconds: 10 + periodSeconds: 3 + successThreshold: 1 + timeoutSeconds: 2 + resources: + limits: + cpu: 500m + memory: 1G + requests: + cpu: 100m + memory: 100M + securityContext: + runAsNonRoot: true + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /data/db + name: mongod-data + - mountPath: /etc/mongodb-secrets + name: some-name-mongodb-keyfile + readOnly: true + - mountPath: /etc/mongodb-ssl + name: ssl + readOnly: true + - mountPath: /etc/mongodb-ssl-internal + name: ssl-internal + readOnly: true + - mountPath: /etc/mongodb-config + name: config + - mountPath: /opt/percona + name: bin + - mountPath: /etc/mongodb-encryption + name: some-name-mongodb-encryption-key + readOnly: true + - mountPath: /etc/users-secret + name: users-secret-file + - mountPath: /etc/pbm/ + name: pbm-config + readOnly: true + workingDir: /data/db + dnsPolicy: ClusterFirst + initContainers: + - command: + - /init-entrypoint.sh + imagePullPolicy: Always + name: mongo-init + resources: + limits: + cpu: 500m + memory: 1G + requests: + cpu: 100m + memory: 100M + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /data/db + name: mongod-data + - mountPath: /opt/percona + name: bin + - command: + - bash + - -c + - install -D /usr/bin/pbm /opt/percona/pbm && install -D /usr/bin/pbm-agent /opt/percona/pbm-agent + imagePullPolicy: Always + name: pbm-init + resources: {} + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /data/db + name: mongod-data + - mountPath: /opt/percona + name: bin + restartPolicy: Always + schedulerName: default-scheduler + securityContext: {} + serviceAccount: default + serviceAccountName: default + terminationGracePeriodSeconds: 60 + volumes: + - name: some-name-mongodb-keyfile + secret: + defaultMode: 288 + optional: false + secretName: some-name-mongodb-keyfile + - emptyDir: {} + name: bin + - configMap: + defaultMode: 420 + name: some-name-rs0-mongod + optional: true + name: config + - name: some-name-mongodb-encryption-key + secret: + defaultMode: 288 + optional: false + secretName: some-name-mongodb-encryption-key + - name: ssl + secret: + defaultMode: 288 + optional: false + secretName: some-name-ssl + - name: ssl-internal + secret: + defaultMode: 288 + optional: true + secretName: some-name-ssl-internal + - name: users-secret-file + secret: + defaultMode: 420 + secretName: internal-some-name-users + - name: pbm-config + secret: + defaultMode: 420 + secretName: some-name-pbm-config + updateStrategy: + type: OnDelete + volumeClaimTemplates: + - metadata: + name: mongod-data + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 3Gi + status: + phase: Pending diff --git a/e2e-tests/demand-backup-physical-sharded-gcp/compare/statefulset_some-name-rs0_restore_sharded.yml b/e2e-tests/demand-backup-physical-sharded-gcp/compare/statefulset_some-name-rs0_restore_sharded.yml new file mode 100644 index 0000000000..8ce5f50874 --- /dev/null +++ b/e2e-tests/demand-backup-physical-sharded-gcp/compare/statefulset_some-name-rs0_restore_sharded.yml @@ -0,0 +1,262 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + annotations: + percona.com/restore-in-progress: "true" + generation: 2 + labels: + app.kubernetes.io/component: mongod + app.kubernetes.io/instance: some-name + app.kubernetes.io/managed-by: percona-server-mongodb-operator + app.kubernetes.io/name: percona-server-mongodb + app.kubernetes.io/part-of: percona-server-mongodb + app.kubernetes.io/replset: rs0 + name: some-name-rs0 + ownerReferences: + - controller: true + kind: PerconaServerMongoDB + name: some-name +spec: + podManagementPolicy: OrderedReady + replicas: 3 + revisionHistoryLimit: 10 + selector: + matchLabels: + app.kubernetes.io/component: mongod + app.kubernetes.io/instance: some-name + app.kubernetes.io/managed-by: percona-server-mongodb-operator + app.kubernetes.io/name: percona-server-mongodb + app.kubernetes.io/part-of: percona-server-mongodb + app.kubernetes.io/replset: rs0 + serviceName: some-name-rs0 + template: + metadata: + annotations: {} + labels: + app.kubernetes.io/component: mongod + app.kubernetes.io/instance: some-name + app.kubernetes.io/managed-by: percona-server-mongodb-operator + app.kubernetes.io/name: percona-server-mongodb + app.kubernetes.io/part-of: percona-server-mongodb + app.kubernetes.io/replset: rs0 + spec: + containers: + - args: + - --bind_ip_all + - --auth + - --dbpath=/data/db + - --port=27017 + - --replSet=rs0 + - --storageEngine=wiredTiger + - --relaxPermChecks + - --sslAllowInvalidCertificates + - --clusterAuthMode=x509 + - --tlsMode=preferTLS + - --shardsvr + - --enableEncryption + - --encryptionKeyFile=/etc/mongodb-encryption/encryption-key + - --wiredTigerCacheSizeGB=0.25 + - --wiredTigerIndexPrefixCompression=true + - --config=/etc/mongodb-config/mongod.conf + - --quiet + command: + - /opt/percona/physical-restore-ps-entry.sh + env: + - name: SERVICE_NAME + value: some-name + - name: MONGODB_PORT + value: "27017" + - name: MONGODB_REPLSET + value: rs0 + - name: PBM_AGENT_MONGODB_USERNAME + valueFrom: + secretKeyRef: + key: MONGODB_BACKUP_USER_ESCAPED + name: internal-some-name-users + optional: false + - name: PBM_AGENT_MONGODB_PASSWORD + valueFrom: + secretKeyRef: + key: MONGODB_BACKUP_PASSWORD_ESCAPED + name: internal-some-name-users + optional: false + - name: PBM_AGENT_SIDECAR + value: "true" + - name: PBM_AGENT_SIDECAR_SLEEP + value: "5" + - name: POD_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name + - name: PBM_MONGODB_URI + value: mongodb://$(PBM_AGENT_MONGODB_USERNAME):$(PBM_AGENT_MONGODB_PASSWORD)@$(POD_NAME) + envFrom: + - secretRef: + name: internal-some-name-users + optional: false + imagePullPolicy: Always + livenessProbe: + exec: + command: + - /opt/percona/mongodb-healthcheck + - k8s + - liveness + - --ssl + - --sslInsecure + - --sslCAFile + - /etc/mongodb-ssl/ca.crt + - --sslPEMKeyFile + - /tmp/tls.pem + - --startupDelaySeconds + - "7200" + failureThreshold: 4 + initialDelaySeconds: 60 + periodSeconds: 30 + successThreshold: 1 + timeoutSeconds: 10 + name: mongod + ports: + - containerPort: 27017 + name: mongodb + protocol: TCP + readinessProbe: + exec: + command: + - /opt/percona/mongodb-healthcheck + - k8s + - readiness + - --component + - mongod + failureThreshold: 8 + initialDelaySeconds: 10 + periodSeconds: 3 + successThreshold: 1 + timeoutSeconds: 2 + resources: + limits: + cpu: 500m + memory: 1G + requests: + cpu: 100m + memory: 100M + securityContext: + runAsNonRoot: true + runAsUser: 1001 + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /data/db + name: mongod-data + - mountPath: /etc/mongodb-secrets + name: some-name-mongodb-keyfile + readOnly: true + - mountPath: /etc/mongodb-ssl + name: ssl + readOnly: true + - mountPath: /etc/mongodb-ssl-internal + name: ssl-internal + readOnly: true + - mountPath: /etc/mongodb-config + name: config + - mountPath: /opt/percona + name: bin + - mountPath: /etc/mongodb-encryption + name: some-name-mongodb-encryption-key + readOnly: true + - mountPath: /etc/users-secret + name: users-secret-file + - mountPath: /etc/pbm/ + name: pbm-config + readOnly: true + workingDir: /data/db + dnsPolicy: ClusterFirst + initContainers: + - command: + - /init-entrypoint.sh + imagePullPolicy: Always + name: mongo-init + resources: + limits: + cpu: 500m + memory: 1G + requests: + cpu: 100m + memory: 100M + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /data/db + name: mongod-data + - mountPath: /opt/percona + name: bin + - command: + - bash + - -c + - install -D /usr/bin/pbm /opt/percona/pbm && install -D /usr/bin/pbm-agent /opt/percona/pbm-agent + imagePullPolicy: Always + name: pbm-init + resources: {} + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /data/db + name: mongod-data + - mountPath: /opt/percona + name: bin + restartPolicy: Always + schedulerName: default-scheduler + securityContext: + fsGroup: 1001 + serviceAccount: default + serviceAccountName: default + terminationGracePeriodSeconds: 60 + volumes: + - name: some-name-mongodb-keyfile + secret: + defaultMode: 288 + optional: false + secretName: some-name-mongodb-keyfile + - emptyDir: {} + name: bin + - configMap: + defaultMode: 420 + name: some-name-rs0-mongod + optional: true + name: config + - name: some-name-mongodb-encryption-key + secret: + defaultMode: 288 + optional: false + secretName: some-name-mongodb-encryption-key + - name: ssl + secret: + defaultMode: 288 + optional: false + secretName: some-name-ssl + - name: ssl-internal + secret: + defaultMode: 288 + optional: true + secretName: some-name-ssl-internal + - name: users-secret-file + secret: + defaultMode: 420 + secretName: internal-some-name-users + - name: pbm-config + secret: + defaultMode: 420 + secretName: some-name-pbm-config + updateStrategy: + type: OnDelete + volumeClaimTemplates: + - metadata: + name: mongod-data + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 3Gi + status: + phase: Pending diff --git a/e2e-tests/demand-backup-physical-sharded-gcp/conf/backup-gcp-cs.yml b/e2e-tests/demand-backup-physical-sharded-gcp/conf/backup-gcp-cs.yml new file mode 100644 index 0000000000..13c039b842 --- /dev/null +++ b/e2e-tests/demand-backup-physical-sharded-gcp/conf/backup-gcp-cs.yml @@ -0,0 +1,10 @@ +apiVersion: psmdb.percona.com/v1 +kind: PerconaServerMongoDBBackup +metadata: + finalizers: + - percona.com/delete-backup + name: +spec: + type: physical + clusterName: some-name + storageName: diff --git a/e2e-tests/demand-backup-physical-sharded-gcp/conf/restore.yml b/e2e-tests/demand-backup-physical-sharded-gcp/conf/restore.yml new file mode 100644 index 0000000000..32ab3c4b9a --- /dev/null +++ b/e2e-tests/demand-backup-physical-sharded-gcp/conf/restore.yml @@ -0,0 +1,7 @@ +apiVersion: psmdb.percona.com/v1 +kind: PerconaServerMongoDBRestore +metadata: + name: +spec: + clusterName: some-name + backupName: diff --git a/e2e-tests/demand-backup-physical-sharded-gcp/conf/secrets.yml b/e2e-tests/demand-backup-physical-sharded-gcp/conf/secrets.yml new file mode 100644 index 0000000000..61fc9675b0 --- /dev/null +++ b/e2e-tests/demand-backup-physical-sharded-gcp/conf/secrets.yml @@ -0,0 +1,16 @@ +kind: Secret +apiVersion: v1 +metadata: + name: some-users +data: + MONGODB_BACKUP_PASSWORD: YmFja3VwMTIzNDU2 + MONGODB_BACKUP_USER: YmFja3Vw + MONGODB_CLUSTER_ADMIN_PASSWORD: Y2x1c3RlckFkbWluMTIzNDU2 + MONGODB_CLUSTER_ADMIN_USER: Y2x1c3RlckFkbWlu + MONGODB_CLUSTER_MONITOR_PASSWORD: Y2x1c3Rlck1vbml0b3IxMjM0NTY= + MONGODB_CLUSTER_MONITOR_USER: Y2x1c3Rlck1vbml0b3I= + MONGODB_USER_ADMIN_PASSWORD: dXNlckFkbWluMTIzNDU2 + MONGODB_USER_ADMIN_USER: dXNlckFkbWlu + MONGODB_DATABASE_ADMIN_USER: ZGF0YWJhc2VBZG1pbg== + MONGODB_DATABASE_ADMIN_PASSWORD: ZGF0YWJhc2VBZG1pbjEyMzQ1Ng== +type: Opaque diff --git a/e2e-tests/demand-backup-physical-sharded-gcp/conf/some-name-sharded.yml b/e2e-tests/demand-backup-physical-sharded-gcp/conf/some-name-sharded.yml new file mode 100644 index 0000000000..4a8caf5d53 --- /dev/null +++ b/e2e-tests/demand-backup-physical-sharded-gcp/conf/some-name-sharded.yml @@ -0,0 +1,120 @@ +apiVersion: psmdb.percona.com/v1 +kind: PerconaServerMongoDB +metadata: + name: some-name +spec: + #platform: openshift + image: + imagePullPolicy: Always + updateStrategy: SmartUpdate + backup: + enabled: true + image: perconalab/percona-server-mongodb-operator:1.1.0-backup + storages: + gcp-cs: + type: s3 + s3: + credentialsSecret: gcp-cs-secret + region: us-east-1 + bucket: operator-testing + prefix: psmdb-demand-backup-physical-sharded + endpointUrl: https://storage.googleapis.com + insecureSkipTLSVerify: false + sharding: + enabled: true + mongos: + size: 3 + affinity: + antiAffinityTopologyKey: none + resources: + limits: + cpu: 500m + memory: 1G + requests: + cpu: 100m + memory: 0.1G + expose: + type: LoadBalancer + configsvrReplSet: + affinity: + antiAffinityTopologyKey: none + resources: + limits: + cpu: 500m + memory: 1G + requests: + cpu: 100m + memory: 0.1G + volumeSpec: + persistentVolumeClaim: + resources: + requests: + storage: 3Gi + expose: + enabled: false + type: ClusterIP + size: 3 + configuration: | + operationProfiling: + mode: slowOp + slowOpThresholdMs: 100 + security: + enableEncryption: true + redactClientLogData: false + setParameter: + ttlMonitorSleepSecs: 60 + wiredTigerConcurrentReadTransactions: 128 + wiredTigerConcurrentWriteTransactions: 128 + storage: + engine: wiredTiger + wiredTiger: + collectionConfig: + blockCompressor: snappy + engineConfig: + directoryForIndexes: false + journalCompressor: snappy + indexConfig: + prefixCompression: true + replsets: + - name: rs0 + affinity: + antiAffinityTopologyKey: none + resources: + limits: + cpu: 500m + memory: 1G + requests: + cpu: 100m + memory: 0.1G + volumeSpec: + persistentVolumeClaim: + resources: + requests: + storage: 3Gi + expose: + enabled: false + type: ClusterIP + size: 3 + configuration: | + operationProfiling: + mode: slowOp + slowOpThresholdMs: 100 + security: + enableEncryption: true + redactClientLogData: false + setParameter: + ttlMonitorSleepSecs: 60 + wiredTigerConcurrentReadTransactions: 128 + wiredTigerConcurrentWriteTransactions: 128 + storage: + engine: wiredTiger + wiredTiger: + collectionConfig: + blockCompressor: snappy + engineConfig: + directoryForIndexes: false + journalCompressor: snappy + indexConfig: + prefixCompression: true + secrets: + users: some-users diff --git a/e2e-tests/demand-backup-physical-sharded-gcp/run b/e2e-tests/demand-backup-physical-sharded-gcp/run new file mode 100755 index 0000000000..c1d1808861 --- /dev/null +++ b/e2e-tests/demand-backup-physical-sharded-gcp/run @@ -0,0 +1,72 @@ +#!/bin/bash + +set -o errexit + +test_dir=$(realpath "$(dirname "$0")") +. "${test_dir}/../functions" +set_debug + +if [ -n "$SKIP_BACKUPS_TO_AWS_GCP_AZURE" ]; then + desc 'Skip tests related to GCP Cloud Storage' + exit 0 +fi + +create_infra "${namespace}" + +apply_s3_storage_secrets + +### Case 1: Backup and restore on sharded cluster +desc 'Testing on sharded cluster' + +echo "Creating PSMDB cluster" +cluster="some-name" +kubectl_bin apply -f "${conf_dir}/secrets.yml" +apply_cluster "${test_dir}/conf/${cluster}-sharded.yml" +kubectl_bin apply -f "${conf_dir}/client_with_tls.yml" + +echo "Check if all pods started" +wait_for_running ${cluster}-rs0 3 +wait_for_running ${cluster}-cfg 3 +wait_for_running ${cluster}-mongos 3 +wait_cluster_consistency ${cluster} + +lbEndpoint=$(kubectl_bin get svc $cluster-mongos -o=jsonpath='{.status}' \ + | jq -r 'select(.loadBalancer != null and .loadBalancer.ingress != null and .loadBalancer.ingress != []) | .loadBalancer.ingress[0] | if .ip then .ip else .hostname end') +if [ -z $lbEndpoint ]; then + echo "Mongos service not exported correctly" + exit 1 +fi + +run_mongos \ + 'db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})' \ + "userAdmin:userAdmin123456@${cluster}-mongos.${namespace}" +sleep 1 +run_mongos \ + 'use myApp\n db.test.insert({ x: 100501 })' \ + "myApp:myPass@${cluster}-mongos.${namespace}" +sleep 5 +compare_mongos_cmd "find" "myApp:myPass@${cluster}-mongos.${namespace}" "-sharded" + +# wait for stable timestamp in wiredtiger +echo 'Waiting 60 seconds for stable timestamp in wiredtiger' +sleep 80 + +echo 'Running GCP storage backups' +backup_name_gcp="backup-gcp-cs-sharded" + +run_backup gcp-cs ${backup_name_gcp} 'physical' + +wait_backup "${backup_name_gcp}" +check_backup_in_storage ${backup_name_gcp} gcs rs0 +check_backup_in_storage ${backup_name_gcp} gcs cfg + +echo "Drop collection" +run_mongos 'use myApp\n db.test.drop()' "myApp:myPass@${cluster}-mongos.${namespace}" +echo 'Check backup and restore -- gcp-cs' +run_restore ${backup_name_gcp} "_restore_sharded" +run_recovery_check ${backup_name_gcp} "_restore_sharded" 'sharded' +check_exported_mongos_service_endpoint "$lbEndpoint" + +destroy "$namespace" + +desc 'test passed' diff --git a/e2e-tests/demand-backup-physical-sharded-minio/compare/find-sharded.json b/e2e-tests/demand-backup-physical-sharded-minio/compare/find-sharded.json new file mode 100644 index 0000000000..117b05ec2b --- /dev/null +++ b/e2e-tests/demand-backup-physical-sharded-minio/compare/find-sharded.json @@ -0,0 +1,3 @@ +switched to db myApp +{ "_id" : , "x" : 100501 } +bye diff --git a/e2e-tests/demand-backup-physical-sharded-minio/compare/find.json b/e2e-tests/demand-backup-physical-sharded-minio/compare/find.json new file mode 100644 index 0000000000..74495091bf --- /dev/null +++ b/e2e-tests/demand-backup-physical-sharded-minio/compare/find.json @@ -0,0 +1,3 @@ +switched to db myApp +{ "_id" : , "x" : 100500 } +bye diff --git a/e2e-tests/demand-backup-physical-sharded-minio/compare/statefulset_some-name-rs0_restore_sharded-oc.yml b/e2e-tests/demand-backup-physical-sharded-minio/compare/statefulset_some-name-rs0_restore_sharded-oc.yml new file mode 100644 index 0000000000..cdd5647ea3 --- /dev/null +++ b/e2e-tests/demand-backup-physical-sharded-minio/compare/statefulset_some-name-rs0_restore_sharded-oc.yml @@ -0,0 +1,260 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + annotations: + percona.com/restore-in-progress: "true" + generation: 2 + labels: + app.kubernetes.io/component: mongod + app.kubernetes.io/instance: some-name + app.kubernetes.io/managed-by: percona-server-mongodb-operator + app.kubernetes.io/name: percona-server-mongodb + app.kubernetes.io/part-of: percona-server-mongodb + app.kubernetes.io/replset: rs0 + name: some-name-rs0 + ownerReferences: + - controller: true + kind: PerconaServerMongoDB + name: some-name +spec: + podManagementPolicy: OrderedReady + replicas: 3 + revisionHistoryLimit: 10 + selector: + matchLabels: + app.kubernetes.io/component: mongod + app.kubernetes.io/instance: some-name + app.kubernetes.io/managed-by: percona-server-mongodb-operator + app.kubernetes.io/name: percona-server-mongodb + app.kubernetes.io/part-of: percona-server-mongodb + app.kubernetes.io/replset: rs0 + serviceName: some-name-rs0 + template: + metadata: + annotations: {} + labels: + app.kubernetes.io/component: mongod + app.kubernetes.io/instance: some-name + app.kubernetes.io/managed-by: percona-server-mongodb-operator + app.kubernetes.io/name: percona-server-mongodb + app.kubernetes.io/part-of: percona-server-mongodb + app.kubernetes.io/replset: rs0 + spec: + containers: + - args: + - --bind_ip_all + - --auth + - --dbpath=/data/db + - --port=27017 + - --replSet=rs0 + - --storageEngine=wiredTiger + - --relaxPermChecks + - --sslAllowInvalidCertificates + - --clusterAuthMode=x509 + - --tlsMode=preferTLS + - --shardsvr + - --enableEncryption + - --encryptionKeyFile=/etc/mongodb-encryption/encryption-key + - --wiredTigerCacheSizeGB=0.25 + - --wiredTigerIndexPrefixCompression=true + - --config=/etc/mongodb-config/mongod.conf + - --quiet + command: + - /opt/percona/physical-restore-ps-entry.sh + env: + - name: SERVICE_NAME + value: some-name + - name: MONGODB_PORT + value: "27017" + - name: MONGODB_REPLSET + value: rs0 + - name: PBM_AGENT_MONGODB_USERNAME + valueFrom: + secretKeyRef: + key: MONGODB_BACKUP_USER_ESCAPED + name: internal-some-name-users + optional: false + - name: PBM_AGENT_MONGODB_PASSWORD + valueFrom: + secretKeyRef: + key: MONGODB_BACKUP_PASSWORD_ESCAPED + name: internal-some-name-users + optional: false + - name: PBM_AGENT_SIDECAR + value: "true" + - name: PBM_AGENT_SIDECAR_SLEEP + value: "5" + - name: POD_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name + - name: PBM_MONGODB_URI + value: mongodb://$(PBM_AGENT_MONGODB_USERNAME):$(PBM_AGENT_MONGODB_PASSWORD)@$(POD_NAME) + envFrom: + - secretRef: + name: internal-some-name-users + optional: false + imagePullPolicy: Always + livenessProbe: + exec: + command: + - /opt/percona/mongodb-healthcheck + - k8s + - liveness + - --ssl + - --sslInsecure + - --sslCAFile + - /etc/mongodb-ssl/ca.crt + - --sslPEMKeyFile + - /tmp/tls.pem + - --startupDelaySeconds + - "7200" + failureThreshold: 4 + initialDelaySeconds: 60 + periodSeconds: 30 + successThreshold: 1 + timeoutSeconds: 10 + name: mongod + ports: + - containerPort: 27017 + name: mongodb + protocol: TCP + readinessProbe: + exec: + command: + - /opt/percona/mongodb-healthcheck + - k8s + - readiness + - --component + - mongod + failureThreshold: 8 + initialDelaySeconds: 10 + periodSeconds: 3 + successThreshold: 1 + timeoutSeconds: 2 + resources: + limits: + cpu: 500m + memory: 1G + requests: + cpu: 100m + memory: 100M + securityContext: + runAsNonRoot: true + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /data/db + name: mongod-data + - mountPath: /etc/mongodb-secrets + name: some-name-mongodb-keyfile + readOnly: true + - mountPath: /etc/mongodb-ssl + name: ssl + readOnly: true + - mountPath: /etc/mongodb-ssl-internal + name: ssl-internal + readOnly: true + - mountPath: /etc/mongodb-config + name: config + - mountPath: /opt/percona + name: bin + - mountPath: /etc/mongodb-encryption + name: some-name-mongodb-encryption-key + readOnly: true + - mountPath: /etc/users-secret + name: users-secret-file + - mountPath: /etc/pbm/ + name: pbm-config + readOnly: true + workingDir: /data/db + dnsPolicy: ClusterFirst + initContainers: + - command: + - /init-entrypoint.sh + imagePullPolicy: Always + name: mongo-init + resources: + limits: + cpu: 500m + memory: 1G + requests: + cpu: 100m + memory: 100M + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /data/db + name: mongod-data + - mountPath: /opt/percona + name: bin + - command: + - bash + - -c + - install -D /usr/bin/pbm /opt/percona/pbm && install -D /usr/bin/pbm-agent /opt/percona/pbm-agent + imagePullPolicy: Always + name: pbm-init + resources: {} + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /data/db + name: mongod-data + - mountPath: /opt/percona + name: bin + restartPolicy: Always + schedulerName: default-scheduler + securityContext: {} + serviceAccount: default + serviceAccountName: default + terminationGracePeriodSeconds: 60 + volumes: + - name: some-name-mongodb-keyfile + secret: + defaultMode: 288 + optional: false + secretName: some-name-mongodb-keyfile + - emptyDir: {} + name: bin + - configMap: + defaultMode: 420 + name: some-name-rs0-mongod + optional: true + name: config + - name: some-name-mongodb-encryption-key + secret: + defaultMode: 288 + optional: false + secretName: some-name-mongodb-encryption-key + - name: ssl + secret: + defaultMode: 288 + optional: false + secretName: some-name-ssl + - name: ssl-internal + secret: + defaultMode: 288 + optional: true + secretName: some-name-ssl-internal + - name: users-secret-file + secret: + defaultMode: 420 + secretName: internal-some-name-users + - name: pbm-config + secret: + defaultMode: 420 + secretName: some-name-pbm-config + updateStrategy: + type: OnDelete + volumeClaimTemplates: + - metadata: + name: mongod-data + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 3Gi + status: + phase: Pending diff --git a/e2e-tests/demand-backup-physical-sharded-minio/compare/statefulset_some-name-rs0_restore_sharded.yml b/e2e-tests/demand-backup-physical-sharded-minio/compare/statefulset_some-name-rs0_restore_sharded.yml new file mode 100644 index 0000000000..8ce5f50874 --- /dev/null +++ b/e2e-tests/demand-backup-physical-sharded-minio/compare/statefulset_some-name-rs0_restore_sharded.yml @@ -0,0 +1,262 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + annotations: + percona.com/restore-in-progress: "true" + generation: 2 + labels: + app.kubernetes.io/component: mongod + app.kubernetes.io/instance: some-name + app.kubernetes.io/managed-by: percona-server-mongodb-operator + app.kubernetes.io/name: percona-server-mongodb + app.kubernetes.io/part-of: percona-server-mongodb + app.kubernetes.io/replset: rs0 + name: some-name-rs0 + ownerReferences: + - controller: true + kind: PerconaServerMongoDB + name: some-name +spec: + podManagementPolicy: OrderedReady + replicas: 3 + revisionHistoryLimit: 10 + selector: + matchLabels: + app.kubernetes.io/component: mongod + app.kubernetes.io/instance: some-name + app.kubernetes.io/managed-by: percona-server-mongodb-operator + app.kubernetes.io/name: percona-server-mongodb + app.kubernetes.io/part-of: percona-server-mongodb + app.kubernetes.io/replset: rs0 + serviceName: some-name-rs0 + template: + metadata: + annotations: {} + labels: + app.kubernetes.io/component: mongod + app.kubernetes.io/instance: some-name + app.kubernetes.io/managed-by: percona-server-mongodb-operator + app.kubernetes.io/name: percona-server-mongodb + app.kubernetes.io/part-of: percona-server-mongodb + app.kubernetes.io/replset: rs0 + spec: + containers: + - args: + - --bind_ip_all + - --auth + - --dbpath=/data/db + - --port=27017 + - --replSet=rs0 + - --storageEngine=wiredTiger + - --relaxPermChecks + - --sslAllowInvalidCertificates + - --clusterAuthMode=x509 + - --tlsMode=preferTLS + - --shardsvr + - --enableEncryption + - --encryptionKeyFile=/etc/mongodb-encryption/encryption-key + - --wiredTigerCacheSizeGB=0.25 + - --wiredTigerIndexPrefixCompression=true + - --config=/etc/mongodb-config/mongod.conf + - --quiet + command: + - /opt/percona/physical-restore-ps-entry.sh + env: + - name: SERVICE_NAME + value: some-name + - name: MONGODB_PORT + value: "27017" + - name: MONGODB_REPLSET + value: rs0 + - name: PBM_AGENT_MONGODB_USERNAME + valueFrom: + secretKeyRef: + key: MONGODB_BACKUP_USER_ESCAPED + name: internal-some-name-users + optional: false + - name: PBM_AGENT_MONGODB_PASSWORD + valueFrom: + secretKeyRef: + key: MONGODB_BACKUP_PASSWORD_ESCAPED + name: internal-some-name-users + optional: false + - name: PBM_AGENT_SIDECAR + value: "true" + - name: PBM_AGENT_SIDECAR_SLEEP + value: "5" + - name: POD_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name + - name: PBM_MONGODB_URI + value: mongodb://$(PBM_AGENT_MONGODB_USERNAME):$(PBM_AGENT_MONGODB_PASSWORD)@$(POD_NAME) + envFrom: + - secretRef: + name: internal-some-name-users + optional: false + imagePullPolicy: Always + livenessProbe: + exec: + command: + - /opt/percona/mongodb-healthcheck + - k8s + - liveness + - --ssl + - --sslInsecure + - --sslCAFile + - /etc/mongodb-ssl/ca.crt + - --sslPEMKeyFile + - /tmp/tls.pem + - --startupDelaySeconds + - "7200" + failureThreshold: 4 + initialDelaySeconds: 60 + periodSeconds: 30 + successThreshold: 1 + timeoutSeconds: 10 + name: mongod + ports: + - containerPort: 27017 + name: mongodb + protocol: TCP + readinessProbe: + exec: + command: + - /opt/percona/mongodb-healthcheck + - k8s + - readiness + - --component + - mongod + failureThreshold: 8 + initialDelaySeconds: 10 + periodSeconds: 3 + successThreshold: 1 + timeoutSeconds: 2 + resources: + limits: + cpu: 500m + memory: 1G + requests: + cpu: 100m + memory: 100M + securityContext: + runAsNonRoot: true + runAsUser: 1001 + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /data/db + name: mongod-data + - mountPath: /etc/mongodb-secrets + name: some-name-mongodb-keyfile + readOnly: true + - mountPath: /etc/mongodb-ssl + name: ssl + readOnly: true + - mountPath: /etc/mongodb-ssl-internal + name: ssl-internal + readOnly: true + - mountPath: /etc/mongodb-config + name: config + - mountPath: /opt/percona + name: bin + - mountPath: /etc/mongodb-encryption + name: some-name-mongodb-encryption-key + readOnly: true + - mountPath: /etc/users-secret + name: users-secret-file + - mountPath: /etc/pbm/ + name: pbm-config + readOnly: true + workingDir: /data/db + dnsPolicy: ClusterFirst + initContainers: + - command: + - /init-entrypoint.sh + imagePullPolicy: Always + name: mongo-init + resources: + limits: + cpu: 500m + memory: 1G + requests: + cpu: 100m + memory: 100M + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /data/db + name: mongod-data + - mountPath: /opt/percona + name: bin + - command: + - bash + - -c + - install -D /usr/bin/pbm /opt/percona/pbm && install -D /usr/bin/pbm-agent /opt/percona/pbm-agent + imagePullPolicy: Always + name: pbm-init + resources: {} + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /data/db + name: mongod-data + - mountPath: /opt/percona + name: bin + restartPolicy: Always + schedulerName: default-scheduler + securityContext: + fsGroup: 1001 + serviceAccount: default + serviceAccountName: default + terminationGracePeriodSeconds: 60 + volumes: + - name: some-name-mongodb-keyfile + secret: + defaultMode: 288 + optional: false + secretName: some-name-mongodb-keyfile + - emptyDir: {} + name: bin + - configMap: + defaultMode: 420 + name: some-name-rs0-mongod + optional: true + name: config + - name: some-name-mongodb-encryption-key + secret: + defaultMode: 288 + optional: false + secretName: some-name-mongodb-encryption-key + - name: ssl + secret: + defaultMode: 288 + optional: false + secretName: some-name-ssl + - name: ssl-internal + secret: + defaultMode: 288 + optional: true + secretName: some-name-ssl-internal + - name: users-secret-file + secret: + defaultMode: 420 + secretName: internal-some-name-users + - name: pbm-config + secret: + defaultMode: 420 + secretName: some-name-pbm-config + updateStrategy: + type: OnDelete + volumeClaimTemplates: + - metadata: + name: mongod-data + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 3Gi + status: + phase: Pending diff --git a/e2e-tests/demand-backup-physical-sharded-minio/conf/backup-minio.yml b/e2e-tests/demand-backup-physical-sharded-minio/conf/backup-minio.yml new file mode 100644 index 0000000000..13c039b842 --- /dev/null +++ b/e2e-tests/demand-backup-physical-sharded-minio/conf/backup-minio.yml @@ -0,0 +1,10 @@ +apiVersion: psmdb.percona.com/v1 +kind: PerconaServerMongoDBBackup +metadata: + finalizers: + - percona.com/delete-backup + name: +spec: + type: physical + clusterName: some-name + storageName: diff --git a/e2e-tests/demand-backup-physical-sharded-minio/conf/restore.yml b/e2e-tests/demand-backup-physical-sharded-minio/conf/restore.yml new file mode 100644 index 0000000000..32ab3c4b9a --- /dev/null +++ b/e2e-tests/demand-backup-physical-sharded-minio/conf/restore.yml @@ -0,0 +1,7 @@ +apiVersion: psmdb.percona.com/v1 +kind: PerconaServerMongoDBRestore +metadata: + name: +spec: + clusterName: some-name + backupName: diff --git a/e2e-tests/demand-backup-physical-sharded-minio/conf/secrets.yml b/e2e-tests/demand-backup-physical-sharded-minio/conf/secrets.yml new file mode 100644 index 0000000000..61fc9675b0 --- /dev/null +++ b/e2e-tests/demand-backup-physical-sharded-minio/conf/secrets.yml @@ -0,0 +1,16 @@ +kind: Secret +apiVersion: v1 +metadata: + name: some-users +data: + MONGODB_BACKUP_PASSWORD: YmFja3VwMTIzNDU2 + MONGODB_BACKUP_USER: YmFja3Vw + MONGODB_CLUSTER_ADMIN_PASSWORD: Y2x1c3RlckFkbWluMTIzNDU2 + MONGODB_CLUSTER_ADMIN_USER: Y2x1c3RlckFkbWlu + MONGODB_CLUSTER_MONITOR_PASSWORD: Y2x1c3Rlck1vbml0b3IxMjM0NTY= + MONGODB_CLUSTER_MONITOR_USER: Y2x1c3Rlck1vbml0b3I= + MONGODB_USER_ADMIN_PASSWORD: dXNlckFkbWluMTIzNDU2 + MONGODB_USER_ADMIN_USER: dXNlckFkbWlu + MONGODB_DATABASE_ADMIN_USER: ZGF0YWJhc2VBZG1pbg== + MONGODB_DATABASE_ADMIN_PASSWORD: ZGF0YWJhc2VBZG1pbjEyMzQ1Ng== +type: Opaque diff --git a/e2e-tests/demand-backup-physical-sharded-minio/conf/some-name-sharded.yml b/e2e-tests/demand-backup-physical-sharded-minio/conf/some-name-sharded.yml new file mode 100644 index 0000000000..78ffea08a5 --- /dev/null +++ b/e2e-tests/demand-backup-physical-sharded-minio/conf/some-name-sharded.yml @@ -0,0 +1,119 @@ +apiVersion: psmdb.percona.com/v1 +kind: PerconaServerMongoDB +metadata: + name: some-name +spec: + #platform: openshift + image: + imagePullPolicy: Always + updateStrategy: SmartUpdate + backup: + enabled: true + image: perconalab/percona-server-mongodb-operator:1.1.0-backup + storages: + minio: + type: s3 + s3: + credentialsSecret: minio-secret + region: us-east-1 + bucket: operator-testing + endpointUrl: http://minio-service:9000/ + insecureSkipTLSVerify: false + sharding: + enabled: true + mongos: + size: 3 + affinity: + antiAffinityTopologyKey: none + resources: + limits: + cpu: 500m + memory: 1G + requests: + cpu: 100m + memory: 0.1G + expose: + type: LoadBalancer + configsvrReplSet: + affinity: + antiAffinityTopologyKey: none + resources: + limits: + cpu: 500m + memory: 1G + requests: + cpu: 100m + memory: 0.1G + volumeSpec: + persistentVolumeClaim: + resources: + requests: + storage: 3Gi + expose: + enabled: false + type: ClusterIP + size: 3 + configuration: | + operationProfiling: + mode: slowOp + slowOpThresholdMs: 100 + security: + enableEncryption: true + redactClientLogData: false + setParameter: + ttlMonitorSleepSecs: 60 + wiredTigerConcurrentReadTransactions: 128 + wiredTigerConcurrentWriteTransactions: 128 + storage: + engine: wiredTiger + wiredTiger: + collectionConfig: + blockCompressor: snappy + engineConfig: + directoryForIndexes: false + journalCompressor: snappy + indexConfig: + prefixCompression: true + replsets: + - name: rs0 + affinity: + antiAffinityTopologyKey: none + resources: + limits: + cpu: 500m + memory: 1G + requests: + cpu: 100m + memory: 0.1G + volumeSpec: + persistentVolumeClaim: + resources: + requests: + storage: 3Gi + expose: + enabled: false + type: ClusterIP + size: 3 + configuration: | + operationProfiling: + mode: slowOp + slowOpThresholdMs: 100 + security: + enableEncryption: true + redactClientLogData: false + setParameter: + ttlMonitorSleepSecs: 60 + wiredTigerConcurrentReadTransactions: 128 + wiredTigerConcurrentWriteTransactions: 128 + storage: + engine: wiredTiger + wiredTiger: + collectionConfig: + blockCompressor: snappy + engineConfig: + directoryForIndexes: false + journalCompressor: snappy + indexConfig: + prefixCompression: true + secrets: + users: some-users diff --git a/e2e-tests/demand-backup-physical-sharded-minio/run b/e2e-tests/demand-backup-physical-sharded-minio/run new file mode 100755 index 0000000000..5327b46e1a --- /dev/null +++ b/e2e-tests/demand-backup-physical-sharded-minio/run @@ -0,0 +1,64 @@ +#!/bin/bash + +set -o errexit + +test_dir=$(realpath "$(dirname "$0")") +. "${test_dir}/../functions" +set_debug + +create_infra "${namespace}" + +deploy_minio +apply_s3_storage_secrets + +### Case 1: Backup and restore on sharded cluster +desc 'Testing on sharded cluster' + +echo "Creating PSMDB cluster" +cluster="some-name" +kubectl_bin apply -f "${conf_dir}/secrets.yml" +apply_cluster "${test_dir}/conf/${cluster}-sharded.yml" +kubectl_bin apply -f "${conf_dir}/client_with_tls.yml" + +echo "check if all pods started" +wait_for_running ${cluster}-rs0 3 +wait_for_running ${cluster}-cfg 3 +wait_for_running ${cluster}-mongos 3 +wait_cluster_consistency ${cluster} + +lbEndpoint=$(kubectl_bin get svc $cluster-mongos -o=jsonpath='{.status}' \ + | jq -r 'select(.loadBalancer != null and .loadBalancer.ingress != null and .loadBalancer.ingress != []) | .loadBalancer.ingress[0] | if .ip then .ip else .hostname end') +if [ -z $lbEndpoint ]; then + echo "Mongos service not exported correctly" + exit 1 +fi + +run_mongos \ + 'db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})' \ + "userAdmin:userAdmin123456@${cluster}-mongos.${namespace}" +sleep 1 +run_mongos \ + 'use myApp\n db.test.insert({ x: 100501 })' \ + "myApp:myPass@${cluster}-mongos.${namespace}" +sleep 5 +compare_mongos_cmd "find" "myApp:myPass@${cluster}-mongos.${namespace}" "-sharded" + +# wait for stable timestamp in wiredtiger +echo 'Waiting 60 seconds for stable timestamp in wiredtiger' +sleep 80 + +echo 'Running minio backups' +backup_name_minio="backup-minio-sharded" +run_backup minio ${backup_name_minio} 'physical' +wait_backup "${backup_name_minio}" + +echo "Drop collection" +run_mongos 'use myApp\n db.test.drop()' "myApp:myPass@${cluster}-mongos.${namespace}" +echo 'Check backup and restore -- minio' +backup_dest_minio=$(get_backup_dest "${backup_name_minio}") +run_restore ${backup_name_minio} "_restore_sharded" +run_recovery_check ${backup_name_minio} "_restore_sharded" 'sharded' + +destroy "$namespace" + +desc 'test passed' diff --git a/e2e-tests/demand-backup-physical-sharded-parallel/compare/find-sharded.json b/e2e-tests/demand-backup-physical-sharded-parallel/compare/find-sharded.json new file mode 100644 index 0000000000..117b05ec2b --- /dev/null +++ b/e2e-tests/demand-backup-physical-sharded-parallel/compare/find-sharded.json @@ -0,0 +1,3 @@ +switched to db myApp +{ "_id" : , "x" : 100501 } +bye diff --git a/e2e-tests/demand-backup-physical-sharded-parallel/compare/find.json b/e2e-tests/demand-backup-physical-sharded-parallel/compare/find.json new file mode 100644 index 0000000000..74495091bf --- /dev/null +++ b/e2e-tests/demand-backup-physical-sharded-parallel/compare/find.json @@ -0,0 +1,3 @@ +switched to db myApp +{ "_id" : , "x" : 100500 } +bye diff --git a/e2e-tests/demand-backup-physical-sharded-parallel/compare/statefulset_some-name-rs0_restore_sharded-oc.yml b/e2e-tests/demand-backup-physical-sharded-parallel/compare/statefulset_some-name-rs0_restore_sharded-oc.yml new file mode 100644 index 0000000000..cdd5647ea3 --- /dev/null +++ b/e2e-tests/demand-backup-physical-sharded-parallel/compare/statefulset_some-name-rs0_restore_sharded-oc.yml @@ -0,0 +1,260 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + annotations: + percona.com/restore-in-progress: "true" + generation: 2 + labels: + app.kubernetes.io/component: mongod + app.kubernetes.io/instance: some-name + app.kubernetes.io/managed-by: percona-server-mongodb-operator + app.kubernetes.io/name: percona-server-mongodb + app.kubernetes.io/part-of: percona-server-mongodb + app.kubernetes.io/replset: rs0 + name: some-name-rs0 + ownerReferences: + - controller: true + kind: PerconaServerMongoDB + name: some-name +spec: + podManagementPolicy: OrderedReady + replicas: 3 + revisionHistoryLimit: 10 + selector: + matchLabels: + app.kubernetes.io/component: mongod + app.kubernetes.io/instance: some-name + app.kubernetes.io/managed-by: percona-server-mongodb-operator + app.kubernetes.io/name: percona-server-mongodb + app.kubernetes.io/part-of: percona-server-mongodb + app.kubernetes.io/replset: rs0 + serviceName: some-name-rs0 + template: + metadata: + annotations: {} + labels: + app.kubernetes.io/component: mongod + app.kubernetes.io/instance: some-name + app.kubernetes.io/managed-by: percona-server-mongodb-operator + app.kubernetes.io/name: percona-server-mongodb + app.kubernetes.io/part-of: percona-server-mongodb + app.kubernetes.io/replset: rs0 + spec: + containers: + - args: + - --bind_ip_all + - --auth + - --dbpath=/data/db + - --port=27017 + - --replSet=rs0 + - --storageEngine=wiredTiger + - --relaxPermChecks + - --sslAllowInvalidCertificates + - --clusterAuthMode=x509 + - --tlsMode=preferTLS + - --shardsvr + - --enableEncryption + - --encryptionKeyFile=/etc/mongodb-encryption/encryption-key + - --wiredTigerCacheSizeGB=0.25 + - --wiredTigerIndexPrefixCompression=true + - --config=/etc/mongodb-config/mongod.conf + - --quiet + command: + - /opt/percona/physical-restore-ps-entry.sh + env: + - name: SERVICE_NAME + value: some-name + - name: MONGODB_PORT + value: "27017" + - name: MONGODB_REPLSET + value: rs0 + - name: PBM_AGENT_MONGODB_USERNAME + valueFrom: + secretKeyRef: + key: MONGODB_BACKUP_USER_ESCAPED + name: internal-some-name-users + optional: false + - name: PBM_AGENT_MONGODB_PASSWORD + valueFrom: + secretKeyRef: + key: MONGODB_BACKUP_PASSWORD_ESCAPED + name: internal-some-name-users + optional: false + - name: PBM_AGENT_SIDECAR + value: "true" + - name: PBM_AGENT_SIDECAR_SLEEP + value: "5" + - name: POD_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name + - name: PBM_MONGODB_URI + value: mongodb://$(PBM_AGENT_MONGODB_USERNAME):$(PBM_AGENT_MONGODB_PASSWORD)@$(POD_NAME) + envFrom: + - secretRef: + name: internal-some-name-users + optional: false + imagePullPolicy: Always + livenessProbe: + exec: + command: + - /opt/percona/mongodb-healthcheck + - k8s + - liveness + - --ssl + - --sslInsecure + - --sslCAFile + - /etc/mongodb-ssl/ca.crt + - --sslPEMKeyFile + - /tmp/tls.pem + - --startupDelaySeconds + - "7200" + failureThreshold: 4 + initialDelaySeconds: 60 + periodSeconds: 30 + successThreshold: 1 + timeoutSeconds: 10 + name: mongod + ports: + - containerPort: 27017 + name: mongodb + protocol: TCP + readinessProbe: + exec: + command: + - /opt/percona/mongodb-healthcheck + - k8s + - readiness + - --component + - mongod + failureThreshold: 8 + initialDelaySeconds: 10 + periodSeconds: 3 + successThreshold: 1 + timeoutSeconds: 2 + resources: + limits: + cpu: 500m + memory: 1G + requests: + cpu: 100m + memory: 100M + securityContext: + runAsNonRoot: true + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /data/db + name: mongod-data + - mountPath: /etc/mongodb-secrets + name: some-name-mongodb-keyfile + readOnly: true + - mountPath: /etc/mongodb-ssl + name: ssl + readOnly: true + - mountPath: /etc/mongodb-ssl-internal + name: ssl-internal + readOnly: true + - mountPath: /etc/mongodb-config + name: config + - mountPath: /opt/percona + name: bin + - mountPath: /etc/mongodb-encryption + name: some-name-mongodb-encryption-key + readOnly: true + - mountPath: /etc/users-secret + name: users-secret-file + - mountPath: /etc/pbm/ + name: pbm-config + readOnly: true + workingDir: /data/db + dnsPolicy: ClusterFirst + initContainers: + - command: + - /init-entrypoint.sh + imagePullPolicy: Always + name: mongo-init + resources: + limits: + cpu: 500m + memory: 1G + requests: + cpu: 100m + memory: 100M + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /data/db + name: mongod-data + - mountPath: /opt/percona + name: bin + - command: + - bash + - -c + - install -D /usr/bin/pbm /opt/percona/pbm && install -D /usr/bin/pbm-agent /opt/percona/pbm-agent + imagePullPolicy: Always + name: pbm-init + resources: {} + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /data/db + name: mongod-data + - mountPath: /opt/percona + name: bin + restartPolicy: Always + schedulerName: default-scheduler + securityContext: {} + serviceAccount: default + serviceAccountName: default + terminationGracePeriodSeconds: 60 + volumes: + - name: some-name-mongodb-keyfile + secret: + defaultMode: 288 + optional: false + secretName: some-name-mongodb-keyfile + - emptyDir: {} + name: bin + - configMap: + defaultMode: 420 + name: some-name-rs0-mongod + optional: true + name: config + - name: some-name-mongodb-encryption-key + secret: + defaultMode: 288 + optional: false + secretName: some-name-mongodb-encryption-key + - name: ssl + secret: + defaultMode: 288 + optional: false + secretName: some-name-ssl + - name: ssl-internal + secret: + defaultMode: 288 + optional: true + secretName: some-name-ssl-internal + - name: users-secret-file + secret: + defaultMode: 420 + secretName: internal-some-name-users + - name: pbm-config + secret: + defaultMode: 420 + secretName: some-name-pbm-config + updateStrategy: + type: OnDelete + volumeClaimTemplates: + - metadata: + name: mongod-data + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 3Gi + status: + phase: Pending diff --git a/e2e-tests/demand-backup-physical-sharded-parallel/compare/statefulset_some-name-rs0_restore_sharded.yml b/e2e-tests/demand-backup-physical-sharded-parallel/compare/statefulset_some-name-rs0_restore_sharded.yml new file mode 100644 index 0000000000..8ce5f50874 --- /dev/null +++ b/e2e-tests/demand-backup-physical-sharded-parallel/compare/statefulset_some-name-rs0_restore_sharded.yml @@ -0,0 +1,262 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + annotations: + percona.com/restore-in-progress: "true" + generation: 2 + labels: + app.kubernetes.io/component: mongod + app.kubernetes.io/instance: some-name + app.kubernetes.io/managed-by: percona-server-mongodb-operator + app.kubernetes.io/name: percona-server-mongodb + app.kubernetes.io/part-of: percona-server-mongodb + app.kubernetes.io/replset: rs0 + name: some-name-rs0 + ownerReferences: + - controller: true + kind: PerconaServerMongoDB + name: some-name +spec: + podManagementPolicy: OrderedReady + replicas: 3 + revisionHistoryLimit: 10 + selector: + matchLabels: + app.kubernetes.io/component: mongod + app.kubernetes.io/instance: some-name + app.kubernetes.io/managed-by: percona-server-mongodb-operator + app.kubernetes.io/name: percona-server-mongodb + app.kubernetes.io/part-of: percona-server-mongodb + app.kubernetes.io/replset: rs0 + serviceName: some-name-rs0 + template: + metadata: + annotations: {} + labels: + app.kubernetes.io/component: mongod + app.kubernetes.io/instance: some-name + app.kubernetes.io/managed-by: percona-server-mongodb-operator + app.kubernetes.io/name: percona-server-mongodb + app.kubernetes.io/part-of: percona-server-mongodb + app.kubernetes.io/replset: rs0 + spec: + containers: + - args: + - --bind_ip_all + - --auth + - --dbpath=/data/db + - --port=27017 + - --replSet=rs0 + - --storageEngine=wiredTiger + - --relaxPermChecks + - --sslAllowInvalidCertificates + - --clusterAuthMode=x509 + - --tlsMode=preferTLS + - --shardsvr + - --enableEncryption + - --encryptionKeyFile=/etc/mongodb-encryption/encryption-key + - --wiredTigerCacheSizeGB=0.25 + - --wiredTigerIndexPrefixCompression=true + - --config=/etc/mongodb-config/mongod.conf + - --quiet + command: + - /opt/percona/physical-restore-ps-entry.sh + env: + - name: SERVICE_NAME + value: some-name + - name: MONGODB_PORT + value: "27017" + - name: MONGODB_REPLSET + value: rs0 + - name: PBM_AGENT_MONGODB_USERNAME + valueFrom: + secretKeyRef: + key: MONGODB_BACKUP_USER_ESCAPED + name: internal-some-name-users + optional: false + - name: PBM_AGENT_MONGODB_PASSWORD + valueFrom: + secretKeyRef: + key: MONGODB_BACKUP_PASSWORD_ESCAPED + name: internal-some-name-users + optional: false + - name: PBM_AGENT_SIDECAR + value: "true" + - name: PBM_AGENT_SIDECAR_SLEEP + value: "5" + - name: POD_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name + - name: PBM_MONGODB_URI + value: mongodb://$(PBM_AGENT_MONGODB_USERNAME):$(PBM_AGENT_MONGODB_PASSWORD)@$(POD_NAME) + envFrom: + - secretRef: + name: internal-some-name-users + optional: false + imagePullPolicy: Always + livenessProbe: + exec: + command: + - /opt/percona/mongodb-healthcheck + - k8s + - liveness + - --ssl + - --sslInsecure + - --sslCAFile + - /etc/mongodb-ssl/ca.crt + - --sslPEMKeyFile + - /tmp/tls.pem + - --startupDelaySeconds + - "7200" + failureThreshold: 4 + initialDelaySeconds: 60 + periodSeconds: 30 + successThreshold: 1 + timeoutSeconds: 10 + name: mongod + ports: + - containerPort: 27017 + name: mongodb + protocol: TCP + readinessProbe: + exec: + command: + - /opt/percona/mongodb-healthcheck + - k8s + - readiness + - --component + - mongod + failureThreshold: 8 + initialDelaySeconds: 10 + periodSeconds: 3 + successThreshold: 1 + timeoutSeconds: 2 + resources: + limits: + cpu: 500m + memory: 1G + requests: + cpu: 100m + memory: 100M + securityContext: + runAsNonRoot: true + runAsUser: 1001 + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /data/db + name: mongod-data + - mountPath: /etc/mongodb-secrets + name: some-name-mongodb-keyfile + readOnly: true + - mountPath: /etc/mongodb-ssl + name: ssl + readOnly: true + - mountPath: /etc/mongodb-ssl-internal + name: ssl-internal + readOnly: true + - mountPath: /etc/mongodb-config + name: config + - mountPath: /opt/percona + name: bin + - mountPath: /etc/mongodb-encryption + name: some-name-mongodb-encryption-key + readOnly: true + - mountPath: /etc/users-secret + name: users-secret-file + - mountPath: /etc/pbm/ + name: pbm-config + readOnly: true + workingDir: /data/db + dnsPolicy: ClusterFirst + initContainers: + - command: + - /init-entrypoint.sh + imagePullPolicy: Always + name: mongo-init + resources: + limits: + cpu: 500m + memory: 1G + requests: + cpu: 100m + memory: 100M + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /data/db + name: mongod-data + - mountPath: /opt/percona + name: bin + - command: + - bash + - -c + - install -D /usr/bin/pbm /opt/percona/pbm && install -D /usr/bin/pbm-agent /opt/percona/pbm-agent + imagePullPolicy: Always + name: pbm-init + resources: {} + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /data/db + name: mongod-data + - mountPath: /opt/percona + name: bin + restartPolicy: Always + schedulerName: default-scheduler + securityContext: + fsGroup: 1001 + serviceAccount: default + serviceAccountName: default + terminationGracePeriodSeconds: 60 + volumes: + - name: some-name-mongodb-keyfile + secret: + defaultMode: 288 + optional: false + secretName: some-name-mongodb-keyfile + - emptyDir: {} + name: bin + - configMap: + defaultMode: 420 + name: some-name-rs0-mongod + optional: true + name: config + - name: some-name-mongodb-encryption-key + secret: + defaultMode: 288 + optional: false + secretName: some-name-mongodb-encryption-key + - name: ssl + secret: + defaultMode: 288 + optional: false + secretName: some-name-ssl + - name: ssl-internal + secret: + defaultMode: 288 + optional: true + secretName: some-name-ssl-internal + - name: users-secret-file + secret: + defaultMode: 420 + secretName: internal-some-name-users + - name: pbm-config + secret: + defaultMode: 420 + secretName: some-name-pbm-config + updateStrategy: + type: OnDelete + volumeClaimTemplates: + - metadata: + name: mongod-data + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 3Gi + status: + phase: Pending diff --git a/e2e-tests/demand-backup-physical-sharded-parallel/conf/backup-aws-s3.yml b/e2e-tests/demand-backup-physical-sharded-parallel/conf/backup-aws-s3.yml new file mode 100644 index 0000000000..13c039b842 --- /dev/null +++ b/e2e-tests/demand-backup-physical-sharded-parallel/conf/backup-aws-s3.yml @@ -0,0 +1,10 @@ +apiVersion: psmdb.percona.com/v1 +kind: PerconaServerMongoDBBackup +metadata: + finalizers: + - percona.com/delete-backup + name: +spec: + type: physical + clusterName: some-name + storageName: diff --git a/e2e-tests/demand-backup-physical-sharded-parallel/conf/backup-azure-blob.yml b/e2e-tests/demand-backup-physical-sharded-parallel/conf/backup-azure-blob.yml new file mode 100644 index 0000000000..13c039b842 --- /dev/null +++ b/e2e-tests/demand-backup-physical-sharded-parallel/conf/backup-azure-blob.yml @@ -0,0 +1,10 @@ +apiVersion: psmdb.percona.com/v1 +kind: PerconaServerMongoDBBackup +metadata: + finalizers: + - percona.com/delete-backup + name: +spec: + type: physical + clusterName: some-name + storageName: diff --git a/e2e-tests/demand-backup-physical-sharded-parallel/conf/backup-gcp-cs.yml b/e2e-tests/demand-backup-physical-sharded-parallel/conf/backup-gcp-cs.yml new file mode 100644 index 0000000000..13c039b842 --- /dev/null +++ b/e2e-tests/demand-backup-physical-sharded-parallel/conf/backup-gcp-cs.yml @@ -0,0 +1,10 @@ +apiVersion: psmdb.percona.com/v1 +kind: PerconaServerMongoDBBackup +metadata: + finalizers: + - percona.com/delete-backup + name: +spec: + type: physical + clusterName: some-name + storageName: diff --git a/e2e-tests/demand-backup-physical-sharded-parallel/conf/backup-minio.yml b/e2e-tests/demand-backup-physical-sharded-parallel/conf/backup-minio.yml new file mode 100644 index 0000000000..13c039b842 --- /dev/null +++ b/e2e-tests/demand-backup-physical-sharded-parallel/conf/backup-minio.yml @@ -0,0 +1,10 @@ +apiVersion: psmdb.percona.com/v1 +kind: PerconaServerMongoDBBackup +metadata: + finalizers: + - percona.com/delete-backup + name: +spec: + type: physical + clusterName: some-name + storageName: diff --git a/e2e-tests/demand-backup-physical-sharded-parallel/conf/secrets.yml b/e2e-tests/demand-backup-physical-sharded-parallel/conf/secrets.yml new file mode 100644 index 0000000000..61fc9675b0 --- /dev/null +++ b/e2e-tests/demand-backup-physical-sharded-parallel/conf/secrets.yml @@ -0,0 +1,16 @@ +kind: Secret +apiVersion: v1 +metadata: + name: some-users +data: + MONGODB_BACKUP_PASSWORD: YmFja3VwMTIzNDU2 + MONGODB_BACKUP_USER: YmFja3Vw + MONGODB_CLUSTER_ADMIN_PASSWORD: Y2x1c3RlckFkbWluMTIzNDU2 + MONGODB_CLUSTER_ADMIN_USER: Y2x1c3RlckFkbWlu + MONGODB_CLUSTER_MONITOR_PASSWORD: Y2x1c3Rlck1vbml0b3IxMjM0NTY= + MONGODB_CLUSTER_MONITOR_USER: Y2x1c3Rlck1vbml0b3I= + MONGODB_USER_ADMIN_PASSWORD: dXNlckFkbWluMTIzNDU2 + MONGODB_USER_ADMIN_USER: dXNlckFkbWlu + MONGODB_DATABASE_ADMIN_USER: ZGF0YWJhc2VBZG1pbg== + MONGODB_DATABASE_ADMIN_PASSWORD: ZGF0YWJhc2VBZG1pbjEyMzQ1Ng== +type: Opaque diff --git a/e2e-tests/demand-backup-physical-sharded/conf/some-name-sharded.yml b/e2e-tests/demand-backup-physical-sharded-parallel/conf/some-name-sharded.yml similarity index 100% rename from e2e-tests/demand-backup-physical-sharded/conf/some-name-sharded.yml rename to e2e-tests/demand-backup-physical-sharded-parallel/conf/some-name-sharded.yml diff --git a/e2e-tests/demand-backup-physical-sharded-parallel/run b/e2e-tests/demand-backup-physical-sharded-parallel/run new file mode 100755 index 0000000000..7e0fd7cf96 --- /dev/null +++ b/e2e-tests/demand-backup-physical-sharded-parallel/run @@ -0,0 +1,81 @@ +#!/bin/bash + +set -o errexit + +test_dir=$(realpath "$(dirname "$0")") +. "${test_dir}/../functions" +set_debug + +create_infra "${namespace}" + +deploy_minio +apply_s3_storage_secrets + +### Case 1: Backup on sharded cluster +desc 'Testing on sharded cluster' + +echo "Creating PSMDB cluster" +cluster="some-name" +kubectl_bin apply -f "${conf_dir}/secrets.yml" +apply_cluster "${test_dir}/conf/${cluster}-sharded.yml" +kubectl_bin apply -f "${conf_dir}/client_with_tls.yml" + +echo "check if all pods started" +wait_for_running ${cluster}-rs0 3 +wait_for_running ${cluster}-cfg 3 +wait_for_running ${cluster}-mongos 3 +wait_cluster_consistency ${cluster} + +lbEndpoint=$(kubectl_bin get svc $cluster-mongos -o=jsonpath='{.status}' \ + | jq -r 'select(.loadBalancer != null and .loadBalancer.ingress != null and .loadBalancer.ingress != []) | .loadBalancer.ingress[0] | if .ip then .ip else .hostname end') +if [ -z $lbEndpoint ]; then + echo "Mongos service not exported correctly" + exit 1 +fi + +run_mongos \ + 'db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})' \ + "userAdmin:userAdmin123456@${cluster}-mongos.${namespace}" +sleep 1 +run_mongos \ + 'use myApp\n db.test.insert({ x: 100501 })' \ + "myApp:myPass@${cluster}-mongos.${namespace}" +sleep 5 +compare_mongos_cmd "find" "myApp:myPass@${cluster}-mongos.${namespace}" "-sharded" + +# wait for stable timestamp in wiredtiger +echo 'Waiting 60 seconds for stable timestamp in wiredtiger' +sleep 80 + +echo 'Running backups' +backup_name_minio="backup-minio-sharded" +run_backup minio ${backup_name_minio} 'physical' +if [ -z "$SKIP_BACKUPS_TO_AWS_GCP_AZURE" ]; then + backup_name_aws="backup-aws-s3-sharded" + backup_name_gcp="backup-gcp-cs-sharded" + backup_name_azure="backup-azure-blob-sharded" + + run_backup aws-s3 ${backup_name_aws} 'physical' + run_backup gcp-cs ${backup_name_gcp} 'physical' + run_backup azure-blob ${backup_name_azure} 'physical' + + wait_backup "${backup_name_aws}" + check_backup_in_storage ${backup_name_aws} s3 rs0 + check_backup_in_storage ${backup_name_aws} s3 cfg + + wait_backup "${backup_name_gcp}" + check_backup_in_storage ${backup_name_gcp} gcs rs0 + check_backup_in_storage ${backup_name_gcp} gcs cfg + + wait_backup "${backup_name_azure}" + check_backup_in_storage ${backup_name_azure} azure rs0 + check_backup_in_storage ${backup_name_azure} azure cfg +fi +wait_backup "${backup_name_minio}" + +echo "Drop collection" +run_mongos 'use myApp\n db.test.drop()' "myApp:myPass@${cluster}-mongos.${namespace}" + +destroy "$namespace" + +desc 'test passed' diff --git a/e2e-tests/demand-backup-physical-sharded/run b/e2e-tests/demand-backup-physical-sharded/run deleted file mode 100755 index c9acee8299..0000000000 --- a/e2e-tests/demand-backup-physical-sharded/run +++ /dev/null @@ -1,158 +0,0 @@ -#!/bin/bash - -set -o errexit - -test_dir=$(realpath "$(dirname "$0")") -. "${test_dir}/../functions" -set_debug - -run_backup() { - local storage=$1 - local backup_name=$2 - - cat $test_dir/conf/backup.yml | - $sed -e "s/name:/name: ${backup_name}/" | - $sed -e "s/storageName:/storageName: ${storage}/" | - kubectl_bin apply -f - -} - -run_restore() { - local backup_name=$1 - - cat $test_dir/conf/restore.yml | - $sed -e "s/name:/name: restore-${backup_name}/" | - $sed -e "s/backupName:/backupName: ${backup_name}/" | - kubectl_bin apply -f - -} - -run_recovery_check() { - local backup_name=$1 - local compare_suffix=${2:-"_restore"} - - wait_restore "${backup_name}" "${cluster}" "requested" "0" "3000" - echo - - compare_kubectl "statefulset/${cluster}-rs0" ${compare_suffix} - - # we don't wait for cluster readiness here because the annotation gets removed then - wait_restore "${backup_name}" "${cluster}" "ready" "0" "3000" - - if [ $(kubectl_bin get psmdb ${cluster} -o yaml | yq '.metadata.annotations."percona.com/resync-pbm"') == null ]; then - echo "psmdb/${cluster} should be annotated with percona.com/resync-pbm after a physical restore" - exit 1 - fi - echo - - wait_cluster_consistency ${cluster} 42 - wait_for_pbm_operations ${cluster} - - compare_mongos_cmd "find" "myApp:myPass@${cluster}-mongos.${namespace}" "-sharded" -} - -check_exported_mongos_service_endpoint() { - local host=$1 - - if [ "$host" != "$(kubectl_bin get psmdb $cluster -o=jsonpath='{.status.host}')" ]; then - echo "Exported host is not correct after the restore" - exit 1 - fi -} - -create_infra "${namespace}" - -deploy_minio -apply_s3_storage_secrets - -### Case 1: Backup and restore on sharded cluster -desc 'Testing on sharded cluster' - -echo "Creating PSMDB cluster" -cluster="some-name" -kubectl_bin apply -f "${conf_dir}/secrets.yml" -apply_cluster "${test_dir}/conf/${cluster}-sharded.yml" -kubectl_bin apply -f "${conf_dir}/client_with_tls.yml" - -echo "check if all pods started" -wait_for_running ${cluster}-rs0 3 -wait_for_running ${cluster}-cfg 3 -wait_for_running ${cluster}-mongos 3 -wait_cluster_consistency ${cluster} - -lbEndpoint=$(kubectl_bin get svc $cluster-mongos -o=jsonpath='{.status}' | - jq -r 'select(.loadBalancer != null and .loadBalancer.ingress != null and .loadBalancer.ingress != []) | .loadBalancer.ingress[0] | if .ip then .ip else .hostname end') -if [ -z $lbEndpoint ]; then - echo "mongos service not exported correctly" - exit 1 -fi - -run_mongos \ - 'db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})' \ - "userAdmin:userAdmin123456@${cluster}-mongos.${namespace}" -sleep 1 -run_mongos \ - 'use myApp\n db.test.insert({ x: 100501 })' \ - "myApp:myPass@${cluster}-mongos.${namespace}" -sleep 5 -compare_mongos_cmd "find" "myApp:myPass@${cluster}-mongos.${namespace}" "-sharded" - -# wait for stable timestamp in wiredtiger -echo 'waiting 60 seconds for stable timestamp in wiredtiger' -sleep 80 - -echo 'running backups' -backup_name_minio="backup-minio-sharded" -run_backup minio ${backup_name_minio} -if [ -z "$SKIP_BACKUPS_TO_AWS_GCP_AZURE" ]; then - backup_name_aws="backup-aws-s3-sharded" - backup_name_gcp="backup-gcp-cs-sharded" - backup_name_azure="backup-azure-blob-sharded" - - run_backup aws-s3 ${backup_name_aws} - run_backup gcp-cs ${backup_name_gcp} - run_backup azure-blob ${backup_name_azure} - - wait_backup "${backup_name_aws}" - check_backup_in_storage ${backup_name_aws} s3 rs0 - check_backup_in_storage ${backup_name_aws} s3 cfg - - wait_backup "${backup_name_gcp}" - check_backup_in_storage ${backup_name_gcp} gcs rs0 - check_backup_in_storage ${backup_name_gcp} gcs cfg - - wait_backup "${backup_name_azure}" - check_backup_in_storage ${backup_name_azure} azure rs0 - check_backup_in_storage ${backup_name_azure} azure cfg -fi -wait_backup "${backup_name_minio}" - -if [ -z "$SKIP_BACKUPS_TO_AWS_GCP_AZURE" ]; then - echo "drop collection" - run_mongos 'use myApp\n db.test.drop()' "myApp:myPass@${cluster}-mongos.${namespace}" - echo 'check backup and restore -- aws-s3' - run_restore ${backup_name_aws} "_restore_sharded" - run_recovery_check ${backup_name_aws} "_restore_sharded" - check_exported_mongos_service_endpoint "$lbEndpoint" - - echo "drop collection" - run_mongos 'use myApp\n db.test.drop()' "myApp:myPass@${cluster}-mongos.${namespace}" - echo 'check backup and restore -- gcp-cs' - run_restore ${backup_name_gcp} "_restore_sharded" - run_recovery_check ${backup_name_gcp} "_restore_sharded" - check_exported_mongos_service_endpoint "$lbEndpoint" - - echo "drop collection" - run_mongos 'use myApp\n db.test.drop()' "myApp:myPass@${cluster}-mongos.${namespace}" - echo 'check backup and restore -- azure-blob' - run_restore ${backup_name_azure} "_restore_sharded" - run_recovery_check ${backup_name_azure} "_restore_sharded" - check_exported_mongos_service_endpoint "$lbEndpoint" -fi - -echo "drop collection" -run_mongos 'use myApp\n db.test.drop()' "myApp:myPass@${cluster}-mongos.${namespace}" -echo 'check backup and restore -- minio' -backup_dest_minio=$(get_backup_dest "${backup_name_minio}") -run_restore ${backup_name_minio} "_restore_sharded" -run_recovery_check ${backup_name_minio} "_restore_sharded" - -destroy "$namespace" diff --git a/e2e-tests/demand-backup-physical/run b/e2e-tests/demand-backup-physical/run deleted file mode 100755 index b135e58225..0000000000 --- a/e2e-tests/demand-backup-physical/run +++ /dev/null @@ -1,157 +0,0 @@ -#!/bin/bash - -set -o errexit - -test_dir=$(realpath "$(dirname "$0")") -. "${test_dir}/../functions" -set_debug - -run_backup() { - local storage=$1 - local backup_name=$2 - - cat $test_dir/conf/backup.yml \ - | $sed -e "s/name:/name: ${backup_name}/" \ - | $sed -e "s/storageName:/storageName: ${storage}/" \ - | kubectl_bin apply -f - -} - -run_restore() { - local backup_name=$1 - - cat $test_dir/conf/restore.yml \ - | $sed -e "s/name:/name: restore-${backup_name}/" \ - | $sed -e "s/backupName:/backupName: ${backup_name}/" \ - | kubectl_bin apply -f - -} - -run_recovery_check() { - local backup_name=$1 - local compare_suffix=${2:-"_restore"} - - wait_restore "${backup_name}" "${cluster}" "requested" "0" "3000" - echo - - compare_kubectl "statefulset/${cluster}-rs0" ${compare_suffix} - - # we don't wait for cluster readiness here because the annotation gets removed then - wait_restore "${backup_name}" "${cluster}" "ready" "0" "1800" - - if [ $(kubectl_bin get psmdb ${cluster} -o yaml | yq '.metadata.annotations."percona.com/resync-pbm"') == null ]; then - echo "psmdb/${cluster} should be annotated with percona.com/resync-pbm after a physical restore" - exit 1 - fi - echo - - wait_cluster_consistency ${cluster} - wait_for_pbm_operations ${cluster} - - compare_mongo_cmd "find" "myApp:myPass@${cluster}-rs0-0.${cluster}-rs0.${namespace}" - compare_mongo_cmd "find" "myApp:myPass@${cluster}-rs0-1.${cluster}-rs0.${namespace}" - compare_mongo_cmd "find" "myApp:myPass@${cluster}-rs0-2.${cluster}-rs0.${namespace}" -} - -create_infra "${namespace}" - -deploy_minio -apply_s3_storage_secrets - -desc 'Testing on not sharded cluster' - -echo "Creating PSMDB cluster" -cluster="some-name" -kubectl_bin apply -f "${test_dir}/conf/secrets.yml" -apply_cluster "${test_dir}/conf/${cluster}.yml" -kubectl_bin apply -f "${conf_dir}/client_with_tls.yml" - -echo "check if all pods started" -wait_for_running ${cluster}-rs0 3 -wait_cluster_consistency ${cluster} - -sleep 60 # give time for resync to start - -wait_for_pbm_operations ${cluster} - -echo 'writing test data' -run_mongo \ - 'db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})' \ - "userAdmin:userAdmin123456@${cluster}-rs0.${namespace}" -sleep 1 -run_mongo \ - 'use myApp\n db.test.insert({ x: 100500 })' \ - "myApp:myPass@${cluster}-rs0.${namespace}" -sleep 5 -compare_mongo_cmd "find" "myApp:myPass@${cluster}-rs0-0.${cluster}-rs0.${namespace}" -compare_mongo_cmd "find" "myApp:myPass@${cluster}-rs0-1.${cluster}-rs0.${namespace}" -compare_mongo_cmd "find" "myApp:myPass@${cluster}-rs0-2.${cluster}-rs0.${namespace}" - -echo 'running backups' -backup_name_minio="backup-minio" -run_backup minio ${backup_name_minio} -if [ -z "$SKIP_BACKUPS_TO_AWS_GCP_AZURE" ]; then - backup_name_aws="backup-aws-s3" - backup_name_gcp="backup-gcp-cs" - backup_name_azure="backup-azure-blob" - - run_backup aws-s3 ${backup_name_aws} - run_backup gcp-cs ${backup_name_gcp} - run_backup azure-blob ${backup_name_azure} - - wait_backup "${backup_name_aws}" - check_backup_in_storage ${backup_name_aws} s3 rs0 - - wait_backup "${backup_name_gcp}" - check_backup_in_storage ${backup_name_gcp} gcs rs0 - - wait_backup "${backup_name_azure}" - check_backup_in_storage ${backup_name_azure} azure rs0 -fi -wait_backup "${backup_name_minio}" - -if [ -z "$SKIP_BACKUPS_TO_AWS_GCP_AZURE" ]; then - echo "drop collection" - run_mongo 'use myApp\n db.test.drop()' "myApp:myPass@${cluster}-rs0.${namespace}" - echo 'check backup and restore -- aws-s3' - run_restore ${backup_name_aws} - run_recovery_check ${backup_name_aws} - - echo "drop collection" - run_mongo 'use myApp\n db.test.drop()' "myApp:myPass@${cluster}-rs0.${namespace}" - echo 'check backup and restore -- gcp-cs' - run_restore ${backup_name_gcp} - run_recovery_check ${backup_name_gcp} - - echo "drop collection" - run_mongo 'use myApp\n db.test.drop()' "myApp:myPass@${cluster}-rs0.${namespace}" - echo 'check backup and restore -- azure-blob' - run_restore ${backup_name_azure} - run_recovery_check ${backup_name_azure} -fi - -echo "drop collection" -run_mongo 'use myApp\n db.test.drop()' "myApp:myPass@${cluster}-rs0.${namespace}" -echo 'check backup and restore -- minio' -backup_dest_minio=$(get_backup_dest "${backup_name_minio}") -run_restore ${backup_name_minio} -run_recovery_check ${backup_name_minio} - -desc 'Testing with arbiter and non-voting nodes' - -apply_cluster "${test_dir}/conf/${cluster}-arbiter-nv.yml" -echo "check if all pods started" -wait_for_running ${cluster}-rs0 3 -wait_cluster_consistency ${cluster} - -echo 'running backups' -backup_name_minio="backup-minio-arbiter-nv" -run_backup minio ${backup_name_minio} -wait_backup "${backup_name_minio}" - -echo "drop collection" -run_mongo 'use myApp\n db.test.drop()' "myApp:myPass@${cluster}-rs0.${namespace}" -echo 'check backup and restore -- minio' -backup_dest_minio=$(get_backup_dest "${backup_name_minio}") -run_restore ${backup_name_minio} -run_recovery_check ${backup_name_minio} "_restore-arbiter-nv" - -destroy "$namespace" diff --git a/e2e-tests/functions b/e2e-tests/functions index 615f8fbcb3..2b055f4fc9 100755 --- a/e2e-tests/functions +++ b/e2e-tests/functions @@ -766,6 +766,37 @@ compare_kubectl() { fi } +run_recovery_check() { + local backup_name=$1 + local compare_suffix=${2:-"_restore"} + local is_sharded=${3:-""} + + wait_restore "${backup_name}" "${cluster}" "requested" "0" "3000" + echo + + compare_kubectl "statefulset/${cluster}-rs0" ${compare_suffix} + + # we don't wait for cluster readiness here because the annotation gets removed then + wait_restore "${backup_name}" "${cluster}" "ready" "0" "3000" + + if [ $(kubectl_bin get psmdb ${cluster} -o yaml | yq '.metadata.annotations."percona.com/resync-pbm"') == null ]; then + echo "psmdb/${cluster} should be annotated with percona.com/resync-pbm after a physical restore" + exit 1 + fi + echo + + wait_cluster_consistency ${cluster} + wait_for_pbm_operations ${cluster} + + if [ -n "$is_sharded" ]; then + compare_mongos_cmd "find" "myApp:myPass@${cluster}-mongos.${namespace}" "-sharded" + else + compare_mongo_cmd "find" "myApp:myPass@${cluster}-rs0-0.${cluster}-rs0.${namespace}" + compare_mongo_cmd "find" "myApp:myPass@${cluster}-rs0-1.${cluster}-rs0.${namespace}" + compare_mongo_cmd "find" "myApp:myPass@${cluster}-rs0-2.${cluster}-rs0.${namespace}" + fi +} + run_mongo() { local command="$1" local uri="$2" @@ -958,6 +989,15 @@ get_mongo_primary() { fi } +check_exported_mongos_service_endpoint() { + local host=$1 + + if [ "$host" != "$(kubectl_bin get psmdb $cluster -o=jsonpath='{.status.host}')" ]; then + echo "Exported host is not correct after the restore" + exit 1 + fi +} + compare_mongo_user() { local uri="$1" local user="$2" diff --git a/e2e-tests/run b/e2e-tests/run index e90e6b3ff1..2d83b3f8a1 100755 --- a/e2e-tests/run +++ b/e2e-tests/run @@ -20,8 +20,16 @@ fail() { "$dir/demand-backup-eks-credentials-irsa/run" || fail "demand-backup-eks-credentials-irsa" "$dir/demand-backup-sharded/run" || fail "demand-backup-sharded" "$dir/demand-backup/run" || fail "demand-backup" -"$dir/demand-backup-physical/run" || fail "demand-backup-physical" -"$dir/demand-backup-physical-sharded/run" || fail "demand-backup-physical-sharded" +"$dir/demand-backup-physical-parallel/run" || fail "demand-backup-physical-parallel" +"$dir/demand-backup-physical-aws/run" || fail "demand-backup-physical-aws" +"$dir/demand-backup-physical-azure/run" || fail "demand-backup-physical-azure" +"$dir/demand-backup-physical-gcp/run" || fail "demand-backup-physical-gcp" +"$dir/demand-backup-physical-minio/run" || fail "demand-backup-physical-minio" +"$dir/demand-backup-physical-sharded-parallel/run" || fail "demand-backup-physical-sharded-parallel" +"$dir/demand-backup-physical-sharded-aws/run" || fail "demand-backup-physical-sharded-aws" +"$dir/demand-backup-physical-sharded-azure/run" || fail "demand-backup-physical-sharded-azure" +"$dir/demand-backup-physical-sharded-gcp/run" || fail "demand-backup-physical-sharded-gcp" +"$dir/demand-backup-physical-sharded-minio/run" || fail "demand-backup-physical-sharded-minio" "$dir/expose-sharded/run" || fail "expose-sharded" "$dir/ignore-labels-annotations/run" || fail "ignore-labels-annotations" "$dir/init-deploy/run" || fail "init-deploy" diff --git a/e2e-tests/run-distro.csv b/e2e-tests/run-distro.csv index 0511d0f970..6f9d77c50a 100644 --- a/e2e-tests/run-distro.csv +++ b/e2e-tests/run-distro.csv @@ -10,8 +10,16 @@ default-cr demand-backup demand-backup-incremental demand-backup-incremental-sharded -demand-backup-physical -demand-backup-physical-sharded +demand-backup-physical-parallel +demand-backup-physical-aws +demand-backup-physical-azure +demand-backup-physical-gcp +demand-backup-physical-minio +demand-backup-physical-sharded-parallel +demand-backup-physical-sharded-aws +demand-backup-physical-sharded-azure +demand-backup-physical-sharded-gcp +demand-backup-physical-sharded-minio demand-backup-sharded init-deploy ldap diff --git a/e2e-tests/run-minikube.csv b/e2e-tests/run-minikube.csv index 8a8a7a3f19..3b58bf6ccf 100644 --- a/e2e-tests/run-minikube.csv +++ b/e2e-tests/run-minikube.csv @@ -2,7 +2,11 @@ arbiter default-cr demand-backup demand-backup-incremental -demand-backup-physical +demand-backup-physical-parallel +demand-backup-physical-aws +demand-backup-physical-azure +demand-backup-physical-gcp +demand-backup-physical-minio limits liveness mongod-major-upgrade diff --git a/e2e-tests/run-pr.csv b/e2e-tests/run-pr.csv index d605749e28..2c8864d5a8 100644 --- a/e2e-tests/run-pr.csv +++ b/e2e-tests/run-pr.csv @@ -12,8 +12,16 @@ demand-backup-eks-credentials-irsa demand-backup-fs demand-backup-incremental demand-backup-incremental-sharded -demand-backup-physical -demand-backup-physical-sharded +demand-backup-physical-parallel +demand-backup-physical-aws +demand-backup-physical-azure +demand-backup-physical-gcp +demand-backup-physical-minio +demand-backup-physical-sharded-parallel +demand-backup-physical-sharded-aws +demand-backup-physical-sharded-azure +demand-backup-physical-sharded-gcp +demand-backup-physical-sharded-minio demand-backup-sharded expose-sharded finalizer diff --git a/e2e-tests/run-release.csv b/e2e-tests/run-release.csv index 6f4b9e4fff..ccf0a2d63d 100644 --- a/e2e-tests/run-release.csv +++ b/e2e-tests/run-release.csv @@ -13,8 +13,16 @@ demand-backup-eks-credentials-irsa demand-backup-fs demand-backup-incremental demand-backup-incremental-sharded -demand-backup-physical -demand-backup-physical-sharded +demand-backup-physical-parallel +demand-backup-physical-aws +demand-backup-physical-azure +demand-backup-physical-gcp +demand-backup-physical-minio +demand-backup-physical-sharded-parallel +demand-backup-physical-sharded-aws +demand-backup-physical-sharded-azure +demand-backup-physical-sharded-gcp +demand-backup-physical-sharded-minio demand-backup-sharded expose-sharded finalizer