diff --git a/.envrc b/.envrc new file mode 100644 index 000000000..369154dde --- /dev/null +++ b/.envrc @@ -0,0 +1,13 @@ +nix-build $PWD/default.nix -A env --out-link .nix-env + +PATH_add ".nix-env/bin" + +export LOCALHOST_PYTHON="$PWD/.nix-env/bin/python" + +# source .profile from `$env`. +# This is only used to set things interpolated by nix. +# All *static* things should live inside .envrc. +[[ -f ".nix-env/.profile" ]] && source_env ".nix-env/.profile" + +# allow local .envrc overrides +[[ -f .envrc.local ]] && source_env .envrc.local diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS new file mode 100644 index 000000000..fab9cb2c6 --- /dev/null +++ b/.github/CODEOWNERS @@ -0,0 +1,4 @@ +# This file designates code owners for different parts of the repository + +# Define code owners for all files in the repository +* @wireapp/customerops @julialongtin diff --git a/.github/ISSUE_TEMPLATE/bug-repport.md b/.github/ISSUE_TEMPLATE/bug-repport.md new file mode 100644 index 000000000..2eda5656d --- /dev/null +++ b/.github/ISSUE_TEMPLATE/bug-repport.md @@ -0,0 +1,44 @@ +--- +name: Bug Report +about: template to report a bug +title: "Bug: [BUG TITLE]" +labels: '' +assignees: '' +--- + +### Basic information + + + +* On-premises: +* Cloud-Provider: +* Installation type: +* Kubernetes version: +* Helm version: +* Installed with Kubespray: +* (Helm) Charts version: +* List of installed top-level charts: +* Other related technologies + version: + + +### What is the expected result? + + + + +### What is the actual result? + + + + +### How to reproduce the issue? + + diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml new file mode 100644 index 000000000..3ba13e0ce --- /dev/null +++ b/.github/ISSUE_TEMPLATE/config.yml @@ -0,0 +1 @@ +blank_issues_enabled: false diff --git a/.github/ISSUE_TEMPLATE/feature-request.md b/.github/ISSUE_TEMPLATE/feature-request.md new file mode 100644 index 000000000..1bb785913 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/feature-request.md @@ -0,0 +1,40 @@ +--- +name: Feature Request +about: Template for a question around the topic of deploying wire-server +title: "Feature: [FEATURE TITLE]" +labels: '' +assignees: '' +--- + + +### What kind of feature are you looking for? + + + + +### In which scenario would this feature be helpful? + + + + +### How do you imagine that this feature is being used? + + + + +### How did you install Wire? + + diff --git a/.github/ISSUE_TEMPLATE/question.md b/.github/ISSUE_TEMPLATE/question.md new file mode 100644 index 000000000..da4415b47 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/question.md @@ -0,0 +1,39 @@ +--- +name: Question +about: template for a question around the topic of deploying wire-server +title: "Question: [QUESTION TITLE]" +labels: '' +assignees: '' +--- + +### Basic information + + + +* On-premises: +* Cloud-Provider: +* Installation type: +* Kubernetes version: +* Helm version: +* Installed with Kubespray: +* (Helm) Charts version: +* List of installed top-level charts: +* Other related technologies + version: + + +### How did you install Wire? + + + + +### Question + + diff --git a/.github/PULL_REQUEST_TEMPLATE/config.yml b/.github/PULL_REQUEST_TEMPLATE/config.yml new file mode 100644 index 000000000..3ba13e0ce --- /dev/null +++ b/.github/PULL_REQUEST_TEMPLATE/config.yml @@ -0,0 +1 @@ +blank_issues_enabled: false diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md new file mode 100644 index 000000000..d3d3e8760 --- /dev/null +++ b/.github/pull_request_template.md @@ -0,0 +1,61 @@ +--- +name: Change (PR) +about: template for a PR that introduces a change +title: "[TITLE]" +labels: +assignees: +--- + + + +### Change type + + + +* [ ] Fix +* [ ] Feature + + +### Basic information + +Technology: + +Version: + +* [ ] I ran/applied the changes myself +* [ ] I can confirm that it was successful + + +### Motivation + + + + +### Objective + + + + +### Reason + + + + +### Use case + + diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml new file mode 100644 index 000000000..83a8c9ce9 --- /dev/null +++ b/.github/workflows/ci.yml @@ -0,0 +1,36 @@ +on: + push: + branches: [master, develop] + pull_request: + branches: [master, develop] +jobs: + build: + name: build + runs-on: ${{ matrix.os }} + strategy: + matrix: + os: + - ubuntu-latest + steps: + - uses: actions/checkout@v2 + with: + submodules: true + - uses: cachix/install-nix-action@v27 + - uses: cachix/cachix-action@v15 + with: + name: wire-server + signingKey: "${{ secrets.CACHIX_SIGNING_KEY }}" + + - name: Build the environment + run: nix-build -A env + - name: Install the environment + run: nix-env -f . -A env -i + - name: Install terraform + uses: hashicorp/setup-terraform@v3 + with: + terraform_version: "^1.3.7" + terraform_wrapper: false + - name: Check terraform init + run: | + cd terraform/environment + terraform init --backend=false diff --git a/.github/workflows/custom-artifact.yml b/.github/workflows/custom-artifact.yml new file mode 100644 index 000000000..293033cab --- /dev/null +++ b/.github/workflows/custom-artifact.yml @@ -0,0 +1,72 @@ +on: + push: + branches: [master, develop] + tags: [ v* ] + workflow_dispatch: +jobs: + offline: + name: Prepare custom offline package # Do not change this name, it is used to trigger deploy-wiab workflow + # Useful to skip expensive CI when writing docs + if: "!contains(github.event.head_commit.message, 'skip ci')" + runs-on: + group: wire-server-deploy + steps: + - uses: actions/checkout@v2 + with: + submodules: true + - uses: cachix/install-nix-action@v27 + - uses: cachix/cachix-action@v15 + with: + name: wire-server + signingKey: "${{ secrets.CACHIX_SIGNING_KEY }}" + + - name: Install nix environment + run: nix-env -f default.nix -iA env + + - name: Run offline build + run: ./offline/ci.sh HELM_CHART_EXCLUDE_LIST=elasticsearch-curator,fluent-bit,kibana,redis-cluster,inbucket,aws-ingress,backoffice,calling-test,nginx-ingress-controller + env: + GPG_PRIVATE_KEY: '${{ secrets.GPG_PRIVATE_KEY }}' + DOCKER_LOGIN: '${{ secrets.DOCKER_LOGIN }}' + + - name: Get upload name + id: upload_name + run: | + # FIXME: Tag with a nice release name using the github tag... + # SOURCE_TAG=${GITHUB_REF#refs/tags/} + echo ::set-output name=UPLOAD_NAME::$GITHUB_SHA-custom + # echo ::set-output name=UPLOAD_NAME::${SOURCE_TAG:-$GITHUB_SHA} + - name: Copy assets tarball to S3 + run: | + aws s3 cp assets.tgz s3://public.wire.com/artifacts/wire-server-deploy-static-${{ steps.upload_name.outputs.UPLOAD_NAME }}.tgz + echo "Uploaded to: https://s3-$AWS_REGION.amazonaws.com/public.wire.com/artifacts/wire-server-deploy-static-${{ steps.upload_name.outputs.UPLOAD_NAME }}.tgz" + env: + AWS_ACCESS_KEY_ID: '${{ secrets.AWS_ACCESS_KEY_ID }}' + AWS_SECRET_ACCESS_KEY: '${{ secrets.AWS_SECRET_ACCESS_KEY }}' + AWS_REGION: "eu-west-1" + + - name: Build and upload wire-server-deploy container + run: | + container_image=$(nix-build --no-out-link -A container) + skopeo copy --retry-times 10 --dest-creds "$DOCKER_LOGIN" \ + docker-archive:"$container_image" \ + "docker://quay.io/wire/wire-server-deploy:${{ steps.upload_name.outputs.UPLOAD_NAME }}" + env: + DOCKER_LOGIN: '${{ secrets.DOCKER_LOGIN }}' + + # Set output for deploy-wiab workflow to start + - name: Set output to trigger dependent workflow + if: success() + run: echo "::set-output name=trigger_next_workflow::true" + + - name: Deploy offline environment to hetzner + run: | + ./offline/cd.sh + env: + HCLOUD_TOKEN: '${{ secrets.HCLOUD_TOKEN }}' + + - name: Clean up hetzner environment; just in case + if: always() + run: (cd terraform/examples/wire-server-deploy-offline-hetzner ; terraform init && terraform destroy -auto-approve) + env: + HCLOUD_TOKEN: '${{ secrets.HCLOUD_TOKEN }}' diff --git a/.github/workflows/deploy-wiab.yml b/.github/workflows/deploy-wiab.yml new file mode 100644 index 000000000..a7c74e289 --- /dev/null +++ b/.github/workflows/deploy-wiab.yml @@ -0,0 +1,37 @@ +name: Deploy on Hetzner WIAB setup +on: + workflow_run: + workflows: ["Prepare custom offline package"] + types: + - completed + +jobs: + deploy: + runs-on: ubuntu-latest + concurrency: + group: autodeploy-script + cancel-in-progress: false + + steps: + # Step 1: Checkout the repository code + - name: Checkout code + uses: actions/checkout@v3 + + # Step 2: Set up SSH key for remote access + - name: Set up SSH key + uses: webfactory/ssh-agent@v0.5.3 + with: + ssh-private-key: ${{ secrets.WIAB_PRIVATE_SSH_KEY }} + + # Step 3: Get the latest commit SHA, for the artifact + - name: Get latest commit SHA + id: get_commit_sha + run: | + COMMIT_SHA=$(git rev-parse HEAD) + echo "commit_sha=$COMMIT_SHA" >> $GITHUB_ENV + + # Step 4: Run the autodeploy script + - name: Run Auto Deploy Script + run: | + cd bin + ./autodeploy.sh --artifact-hash ${{ env.COMMIT_SHA }} --target-domain wiab-test-box.wire.link --force-redeploy diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml new file mode 100644 index 000000000..81e4115ae --- /dev/null +++ b/.github/workflows/lint.yml @@ -0,0 +1,25 @@ +on: + push: + branches: [master, develop] + pull_request: + branches: [master, develop] +jobs: + build: + name: build + runs-on: ${{ matrix.os }} + strategy: + matrix: + os: + - ubuntu-latest + steps: + - uses: actions/checkout@v2 + with: + submodules: true + - uses: cachix/install-nix-action@v27 + - uses: cachix/cachix-action@v15 + with: + name: wire-server + signingKey: "${{ secrets.CACHIX_SIGNING_KEY }}" + + - name: Lint + run: make shellcheck ENV="no-env" diff --git a/.github/workflows/offline.yml b/.github/workflows/offline.yml new file mode 100644 index 000000000..e7f9268b3 --- /dev/null +++ b/.github/workflows/offline.yml @@ -0,0 +1,76 @@ +on: + push: + branches: [master, develop] + tags: [ v* ] + pull_request: + branches: [master, develop] +jobs: + offline: + name: Prepare offline package + # Useful to skip expensive CI when writing docs + if: "!contains(github.event.head_commit.message, 'skip ci')" + runs-on: + group: wire-server-deploy + steps: + - uses: actions/checkout@v2 + with: + submodules: true + - uses: cachix/install-nix-action@v27 + - uses: cachix/cachix-action@v15 + with: + name: wire-server + signingKey: "${{ secrets.CACHIX_SIGNING_KEY }}" + + - name: Install nix environment + run: nix-env -f default.nix -iA env + + - name: Run offline build + run: ./offline/ci.sh + env: + GPG_PRIVATE_KEY: '${{ secrets.GPG_PRIVATE_KEY }}' + DOCKER_LOGIN: '${{ secrets.DOCKER_LOGIN }}' + + - name: Get upload name + id: upload_name + run: | + # FIXME: Tag with a nice release name using the github tag... + # SOURCE_TAG=${GITHUB_REF#refs/tags/} + echo ::set-output name=UPLOAD_NAME::$GITHUB_SHA + # echo ::set-output name=UPLOAD_NAME::${SOURCE_TAG:-$GITHUB_SHA} + + - name: Copy assets tarball to S3 + run: | + aws s3 cp assets.tgz s3://public.wire.com/artifacts/wire-server-deploy-static-${{ steps.upload_name.outputs.UPLOAD_NAME }}.tgz + echo "Uploaded to: https://s3-$AWS_REGION.amazonaws.com/public.wire.com/artifacts/wire-server-deploy-static-${{ steps.upload_name.outputs.UPLOAD_NAME }}.tgz" + env: + AWS_ACCESS_KEY_ID: '${{ secrets.AWS_ACCESS_KEY_ID }}' + AWS_SECRET_ACCESS_KEY: '${{ secrets.AWS_SECRET_ACCESS_KEY }}' + AWS_REGION: "eu-west-1" + + - name: Build and upload wire-server-deploy container + run: | + container_image=$(nix-build --no-out-link -A container) + + skopeo copy --retry-times 10 --dest-creds "$DOCKER_LOGIN" \ + docker-archive:"$container_image" \ + "docker://quay.io/wire/wire-server-deploy:${{ steps.upload_name.outputs.UPLOAD_NAME }}" + env: + DOCKER_LOGIN: '${{ secrets.DOCKER_LOGIN }}' + + - name: Install terraform + uses: hashicorp/setup-terraform@v3 + with: + terraform_version: "^1.3.7" + terraform_wrapper: false + + - name: Deploy offline environment to hetzner + run: | + ./offline/cd.sh + env: + HCLOUD_TOKEN: '${{ secrets.HCLOUD_TOKEN }}' + + - name: Clean up hetzner environment; just in case + if: always() + run: (cd terraform/examples/wire-server-deploy-offline-hetzner ; terraform init && terraform destroy -auto-approve) + env: + HCLOUD_TOKEN: '${{ secrets.HCLOUD_TOKEN }}' diff --git a/.gitignore b/.gitignore index e2aa0b738..641b57d70 100644 --- a/.gitignore +++ b/.gitignore @@ -15,3 +15,19 @@ values-init-done *~ # Emacs autosave files \#*\# + +# Envrc local overrides +.envrc.local + +# Nix-created result symlinks +result +result-* + +.nix-env + +# for bin/secrets.sh +secrets_cache/ + +terraform.tfstate +terraform.tfstate.backup +kubeconfig.new diff --git a/.gitmodules b/.gitmodules new file mode 100644 index 000000000..6a2fe1249 --- /dev/null +++ b/.gitmodules @@ -0,0 +1,45 @@ +[submodule "ansible/roles-external/kubespray"] + path = ansible/roles-external/kubespray + url = https://github.com/kubernetes-sigs/kubespray.git +[submodule "ansible/roles-external/elasticsearch"] + path = ansible/roles-external/elasticsearch + url = https://github.com/elastic/ansible-elasticsearch.git +[submodule "ansible/roles-external/hostname"] + path = ansible/roles-external/hostname + url = https://github.com/ANXS/hostname.git +[submodule "ansible/roles-external/ANXS.apt"] + path = ansible/roles-external/ANXS.apt + url = https://github.com/ANXS/apt.git +[submodule "ansible/roles-external/ansible-role-java"] + path = ansible/roles-external/ansible-role-java + url = https://github.com/geerlingguy/ansible-role-java.git +[submodule "ansible/roles-external/ansible-role-ntp"] + path = ansible/roles-external/ansible-role-ntp + url = https://github.com/geerlingguy/ansible-role-ntp.git +[submodule "ansible/roles-external/ansible-cassandra"] + path = ansible/roles-external/ansible-cassandra + url = https://github.com/wireapp/ansible-cassandra.git +[submodule "ansible/roles-external/ansible-minio"] + path = ansible/roles-external/ansible-minio + url = https://github.com/wireapp/ansible-minio.git +[submodule "ansible/roles-external/ansible-tinc"] + path = ansible/roles-external/ansible-tinc + url = https://github.com/wireapp/ansible-tinc.git +[submodule "ansible/roles-external/admin_users"] + path = ansible/roles-external/admin_users + url = https://github.com/cchurch/ansible-role-admin-users.git +[submodule "ansible/roles-external/ansible-ntp-verify"] + path = ansible/roles-external/ansible-ntp-verify + url = https://github.com/wireapp/ansible-ntp-verify.git +[submodule "ansible/roles-external/logrotate"] + path = ansible/roles-external/logrotate + url = https://github.com/nickhammond/ansible-logrotate.git +[submodule "ansible/roles-external/sft"] + path = ansible/roles-external/sft + url = https://github.com/wireapp/ansible-sft.git +[submodule "ansible/roles-external/andrewrothstein.unarchive-deps"] + path = ansible/roles-external/andrewrothstein.unarchive-deps + url = https://github.com/andrewrothstein/ansible-unarchive-deps +[submodule "ansible/roles-external/cloudalchemy.node-exporter"] + path = ansible/roles-external/cloudalchemy.node-exporter + url = https://github.com/cloudalchemy/ansible-node-exporter diff --git a/CHANGELOG.md b/CHANGELOG.md index a95da70b8..ccdba0a54 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,357 @@ + + + +# 2021-08-27 + +## Fixes + +* [Documentation] Fix offline deploy redis installation instructions, and SFT node tagging. +* [Wire-Server-Metrics] Fix spacing. + +## Features + +* [Operations] Add a custom terraform rule to the base Makefile, to improve deployment flexibility with terraform. + + +# 2021-06-16 + +## Fixes + +* [Ansible] Prevent Minio installation from breaking when access or secret key contains `$` +* [CI] Ensure that the right version of wire-server is built into the air-gap bundle + + +# 2021-06-10 + +## Fixes + +* update Cassandra role (#455) +* fix automated Ansible deployment (#468) + + +# 2021-05-10 + +## Features + +* Airgap installer is available. See [./offline/docs.md] for rudimentary + instructions. We will integrate this into https://docs.wire.com/ over time +* Switched to nix+direnv for installing all the required dependencies for wire-server-deploy. If you do not want to use these tools you can use the [`quay.io/wire/wire-server-deploy`](https://quay.io/wire/wire-server-deploy) container image and mount wire-server-deploy into it. + +## Versions + +* wire version 2.106.0 when using the offline installer. However airgap + bundles for charts might be moved to wire-server repository in the future; to + decouple wire-server releases from the base platform. +* kubespray 2.15.0 (kubernetes 1.19.7) +* ansible-restund v0.2.6 (restund version v0.4.16b1.0.53) +* ansible-minio v2.1.0 +* ansible-cassandra version v0.1.3 +* ansible-elasticsearch 6.6.0 + + +## Breaking changes + +* Nix and direnv are used for installing all required tooling. + +* charts have been moved to wire-server. Chart lifecycle is now tied to + wire-server instead and is decoupled from the underlying platform. Charts in wire-server + should be installed with helm 3. + +* Our kubespray reference implementation has been bumped to kuberspray 2.15.0 + and kubernetes 1.19.7. This allows us to use Kubespray's support for offline deployments + and new Kubernetes API features. + + If you were using our reference playbooks for setting up kubernetes, there is + no direct upgrade path. Instead you should set up a new cluster; migrate the + deployments there, and then point to the new cluster. This is rather easy at + the moment as we only run stateless services in Kubernetes at this point. + +* Restund role was bumped and uses `docker` instead of `rkt` now. + We advice bringing up a fresh `restund` server; so that `rkt` is not installed. + See https://github.com/wireapp/ansible-restund/commit/4db0bc066ded89cf0ae061e3ccac59f3738b33d9 + + If you want to re-use your existing server we recommend: + + 1. ssh into your `restund` server. + 2. `systemctl stop restund.service` + 3. now outside again, run the `restund.yml` playbook. + + +# 2020-12-21 + +* brig: Add setExpiredUserCleanupTimeout to configmap (#399) see also: https://github.com/wireapp/wire-server/pull/1264 +* [helm] Remove duplicate fields from brig section in the example value files (#398) +* Add spar to the integration tests for brig (#397) + +# 2020-12-17 + +## Update instructions +A new mandatory option has been introduced to +`brig` and `galley` which in the future will be used for Wire federation. This domain name +is *not* optional even if federation is not used. + +Please update your `values/wire-server/values.yaml` to set `brig.optSettings.setFederationDomain` +and `galley.settings.federationDomain` (Note the slightly different option name). + +Because federation is not enabled yet the value of this option does not really +matter at this point, but we advise you to set it to the base domain of your +wire instalation. + +**NOTE**: These changes apply to chart version **0.129.0** and later eventhough +this release was made later than that **0.129.0** chart was published. We're sorry for the +inconvenience. + +## Features +* A chart has been added for setting up a single-node conferencing server (Also known as *SFT*) (#382) + +# 2020-12-07 + +## Update instructions + +The redis chart that we updated to exposes the redis service as +`redis-ephemeral-master` instead of `redis-ephemeral`. + +**You should update your `values/wire-server/values.yaml` to point gundeck to the new service name** +```diff + redis: +- host: redis-ephemeral ++ host: redis-ephemeral-master +``` + +If a gundeck crashes whilst deploying this release, it might not be able to +reconnect to redis until the release is fully rolled out. However this risk is +small. + + +### If you installed the `wire/redis-ephemeral` chart directly: + +``` +helm upgrade redis-ephemeral wire/redis-ephemeral -f +helm upgrade wire-server wire/wire-server -f +``` + +### If you installed the `wire/databases-ephemeral` chart: + +``` +helm upgrade databases-ephemeral wire/databases-ephemeral -f +helm upgrade wire-server wire/wire-server -f +``` + +## Features + +* The redis chart is now backed by https://github.com/bitnami/charts/tree/master/bitnami/redis (#380) +* Bump versions for webapp to latest production (#375, #386) +* Introduce helm chart for legalhold (#378) +* Add features endpoint to galley (#381) +* Add tracestate header to nginz logs (#376) +* Allow configuring customer extensions in brig (#279) +* Remove cookie domain configuration from brig (#239) + +## Bug fixes + +* Fix invalid ObjectMeta in nginx-ingress-services chart (#385) +* Fix fake-aws chart on Helm 3 (#379) + +## Internal Changes + +* New config parameters for federation (#384) + NOTE: This is not used yet. +* Update to newer version of helm s3 plugin (#373) +* Pin image version in cassandra-migrations and demo-smtp charts (#374) +* Ansible: Allow custom log dir when pulling logs from an instance (#372) + +# 2020-10-28 + +## Features + +* ansible/requirements.yml: Bump SFT for new checksum format (#361) +* Create SFT servers in two groups (#356) +* Skip creating SFT monitoring certs if there are no SFT servers (#357) +* Delete the SFT SRV record after provsioning (#368) +* Update message stats dashboard (#208) + +## Bug fixes / work-arounds + +* add support for cargohold s3Compatibility option (#364) + +## Documentation + +* Comment on email visibility feature flag (#276) + +## Internal + +* Better nix support (#362, #358, #367, #369) +* ansible/Makefile: Print errors correctly when ENV is not in order (#359) +* Makefile target to get logs (#355) +* Makefile target to decrypt sops containers (#354) +* [tf-module:push-notifications] Allow to define multiple apps per client platform (#347) + +# 2020-10-06 + +## Internal + +* Ansible & Terraform for bootstrapping Kubernetes (#343) +* Ansible & Terraform SFT improvements (#344, #346, #348) + +# 2020-09-28 + +## Features +* Documentation: Add galley feature flags and default AWS region to example values files (#328, #335) +* Privacy: Add logrotation of 3 days to all pod logs (#329) +* Security: Update TLS config: Drop CBC cipher suites (#323, #324) + +## Bug Fixes +* fix sanitized_request parsing on nginx (#330) + +## Internal +* Add automation for deploying SFT servers (#337, #341, #322) +* Add account number to output of terraform gundeck module (#326) +* remove issuance of a default search domain via the AWS dhcp servers. breaks dns lookup inside of k8s. (#338) +* [terraform-module:cargohold] Replace subnet IDs input with route table IDs (#331) +* [terraform-module] Introduce network load balancer (#299) + +# 2020-07-29 + +## Features + +* [tf-module:dns-records] Add output for FQDNs (#315) +* README.md: stop explicitly referring to the "develop" branch (#318) +* nginz redirect /teams/invitations/by-email to brig (#317) +* S3 support (#311, #316) +* Provide AWS_REGION variable to cargohold (#314) + +# 2020-07-13 + +## Features + +* Brig: Allow overriding optSettings.setRestrictUserCreation (#313) +* add a bash script for talking to s3 with AWS authentication V4. for testing s3 connection during installation. (#305) + +# 2020-07-07 + +## Notes + +This release contains a staging version of the webapp. So, you might want to be a bit more cautious or +even skip this one entirely. + +## Features + +None + +## Bug Fixes + +* [charts] Update frontend apps version: webapp (#308) +* removed unused replicaCount settings (#304) + +## Internal Changes + +* team-settings: Set default of `FEATURE_ENABLE_PAYMENT` to false (#294) +* [terraform modules] Add a module to create some DNS records (#298) + + +# 2020-06-26 + +## Features + +* [charts] introduce cert-manager support in `nginx-ingress-services` to automate TLS certificate + issuing. Please refer to the [docs](https://docs.wire.com/how-to/install/helm.html#how-to-direct-traffic-to-your-cluster) + or the issue [#280](https://github.com/wireapp/wire-server-deploy/pull/280) for more details. + +## Bug Fixes + +* [charts] Update frontend apps version: webapp, team-settings, due to a broken team-settings version (#300) + +## Internal Changes + +* cleanup scripts used in automation (#295) +* ongoing work in several Terraform modules: ingress, CORS, cargohold + +For more information, please refer to the [diff](https://github.com/wireapp/wire-server-deploy/compare/v2020-06-19...v2020-06-26) + +# 2020-06-19 + +## Features + +* Update all three frontends (webapp, team-settings, account-pages) to latest production release (#286) + +## Bug Fixes + +* Quote smsSender (#287) + +## Internal Changes + +* Add Github templates for Issues and PRs (#259, d5b16a99f0aa) + +# 2020-06-03 + +## Features + +- Add .elasticsearch.additionalWriteIndex to brig config (#277) +- Upgrade restund to include fix from wireapp/restund#3 (#278) + +## Internal Changes + +- TF modules: Ensure uniqueness of cargohold bucket name (#272) + +# 2020-05-25 + +- Fix typo in default galley helm values: teamSearchVisibility (#271) +- Make field brig.config.aws.sesQueue to be required if being used (#268) + +# 2020-05-15 + +## Upgrade Notes + +Deployment steps: +1. Deploy new version of all services as usual, make sure `galley.config.settings.enableIndexedBillingTeamMembers` is `false`. +1. Make sure `galley-migrate-data` job completes. +1. Set `galley.config.settings.enableIndexedBillingTeamMembers` to `true` and re-deploy the same version. + +## Features + +- Add aws region in brig and galley in prod values example file (#229) +- Add job to migrate galley data post-install/upgrade (#263) +- Add customSearchVisibility for galley chart (#252) +- Add indexedBillingTeamMember feature flag for galley (#251) +- Add maxFanoutSize to galley's options (#231) +- Add missing galley route to nginz (#223) +- Move to helm 3 (#236) +- All to set HTTP proxy environment vars for brig, cargohold, galley, gundeck, proxy, spar (#217) +- Add possibility to specify proxy env vars in Ansible inventory (#249) +- Add example for declaration of turns servers (#235) +- Skip memorizing the IPs of redis nodes if there are not any. (#224) +- Add a commented out block for specifying a non-default elasticsearch apt mirror (#225) + +## Bug Fixes + +- Fix helm --wait for cassandra (#253) +- Fix node_labels declaration example in inventory (#226) +- Fix smtpCredentials to match EmailSMTPCredentials in brig Options.hs (#265) + +## Internal Changes + +- Deploy instances (#238) +- Remove unused table (#222) +- Add TF module for brig to provide prekey locking, an event queue and (optionally) email sending services (#244) +- Add module to enable mobile push notification for Gundeck (#241) +- Add module to set up object storage (S3) on AWS for Cargohold (#243) +- Add terraform configuration from the offline environment. (#230) +- Add module to initialize state sharing on AWS (#234) +- Add missing cassandra host value for elasticsearch-index chart (#227) +- Ensure that no provider is defined in any of the modules (#257) + # 2020-04-24 ## Features diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 000000000..7953b48f1 --- /dev/null +++ b/Dockerfile @@ -0,0 +1,13 @@ +FROM nixos/nix + +COPY . /wire-server-deploy + +RUN nix-env -iA nixpkgs.bash nixpkgs.git + +RUN nix-build /wire-server-deploy/default.nix -A env --out-link /.nix-env + +RUN rm -rf /wire-server-deploy + +ENV PATH="/.nix-env/bin:$PATH" +ENV LOCALHOST_PYTHON="/.nix-env/bin/python" + diff --git a/Makefile b/Makefile new file mode 100644 index 000000000..2b50ebbeb --- /dev/null +++ b/Makefile @@ -0,0 +1,238 @@ +SHELL := /usr/bin/env bash -eo pipefail +MKFILE_DIR := $(abspath $(dir $(abspath $(firstword $(MAKEFILE_LIST))))) + +# NOTE: variables that can be defined/overwritten +#ENV_DIR or ENV + +# NOTE: internal variables +# Please ignore the following line if you're not a Wire employee +ENVIRONMENTS_DIR := $(abspath $(CURDIR)/../cailleach/environments) + + + +ifndef ENV_DIR +ifndef ENV +$(error "[ERR] Undefined variable: Please define either ENV or ENV_DIR") +else +ENV_DIR = $(ENVIRONMENTS_DIR)/$(ENV) +endif +endif + + + +######################################## TERRAFORM ############################# +export TF_DATA_DIR = $(ENV_DIR)/.terraform +TERRAFORM_WORKING_DIR = $(MKFILE_DIR)/terraform/environment + +# Since the variable directory and the terraform working directory are not the +# same using a file custom.tf allows specifying additional terraform +# instructions not (yet) covered by the terraform/environment functionality. A +# symlink is created before terraform init/apply/etc if such a file exists, +# otherwise it is removed (in case it was created during a previous Makefile +# invocation with a different ENV_DIR +.PHONY: custom-terraform +custom-terraform: + if [ -f $(ENV_DIR)/custom.tf ]; then ln -sf $(ENV_DIR)/custom.tf $(TERRAFORM_WORKING_DIR)/custom.tf; else rm -f $(TERRAFORM_WORKING_DIR)/custom.tf; fi + +# NOTE: leverage make's ability to noop if target already exists, but +# this also means that re-init must be triggered explicitly +$(TF_DATA_DIR): + cd $(TERRAFORM_WORKING_DIR) \ + && terraform init -backend-config=$(ENV_DIR)/backend.tfvars + +.PHONY: re-init +re-init: check-inputs-terraform custom-terraform + make --always-make $(TF_DATA_DIR) + +.PHONY: apply plan console destroy +apply plan console destroy: check-inputs-terraform custom-terraform $(TF_DATA_DIR) + cd $(TERRAFORM_WORKING_DIR) \ + && source $(ENV_DIR)/hcloud-token.dec \ + && terraform $(@) -var-file=$(ENV_DIR)/terraform.tfvars + +# FUTUREWORK: as of TF v0.14 second argument (MKFILE_DIR) is not supported anymore +.PHONY: force-unlock +force-unlock: +ifndef LOCK_ID + $(error "[ERR] Undefined variable: LOCK_ID") +endif + cd $(TERRAFORM_WORKING_DIR) \ + && terraform force-unlock $(LOCK_ID) $(MKFILE_DIR) + +.PHONY: output +output: check-inputs-terraform $(TF_DATA_DIR) + cd $(TERRAFORM_WORKING_DIR) \ + && terraform output -json + +# Usage: ENV=your-env TF_CMD="state show" make tf-cmd +.PHONY: tf-cmd +tf-cmd: check-inputs-terraform $(TF_DATA_DIR) + cd $(TERRAFORM_WORKING_DIR) \ + && terraform $(TF_CMD) + +.PHONY: create-inventory +create-inventory: check-inputs-terraform $(TF_DATA_DIR) + mkdir -p $(ENV_DIR)/gen + cd $(TERRAFORM_WORKING_DIR) \ + && terraform output -json inventory > $(ENV_DIR)/gen/terraform-inventory.yml + + +######################################## ANSIBLE ############################### +ANSIBLE_DIR = $(MKFILE_DIR)/ansible +export ANSIBLE_CONFIG = $(ANSIBLE_DIR)/ansible.cfg + + +.PHONY: bootstrap +bootstrap: check-inputs-ansible + ansible-playbook $(ANSIBLE_DIR)/bootstrap.yml \ + -i $(ENV_DIR)/gen/terraform-inventory.yml \ + -i $(ENV_DIR)/inventory \ + --private-key $(ENV_DIR)/operator-ssh.dec \ + -vv + +# Usage: ENV=bella make create-inventory renew-certs +# Then encrypt the new kubeconfig with sops +.PHONY: renew-certs +renew-certs: check-inputs-ansible + ansible-playbook ${ANSIBLE_DIR}/kubernetes-renew-certs.yml \ + -i ${ENV_DIR}/gen/terraform-inventory.yml \ + -i ${ENV_DIR}/inventory \ + --private-key ${ENV_DIR}/operator-ssh.dec \ + -vv + mv $(ANSIBLE_DIR)/kubeconfig.new ${ENV_DIR}/ + echo "Now run:" + echo "cd ${ENV_DIR} && sops -e kubeconfig.new > kubeconfig && mv kubeconfig.new kubeconfig.dec && git add kubeconfig" + +# Usage: ENV=bella make create-inventory fetch-kubeconfig +# Then encrypt the new kubeconfig with sops +.PHONY: fetch-kubeconfig +fetch-kubeconfig: check-inputs-ansible + ansible-playbook ${ANSIBLE_DIR}/kubernetes-fetch-kubeconfig.yml \ + -i ${ENV_DIR}/gen/terraform-inventory.yml \ + -i ${ENV_DIR}/inventory \ + --private-key ${ENV_DIR}/operator-ssh.dec \ + -vv + mv $(ANSIBLE_DIR)/kubeconfig.new ${ENV_DIR}/ + +.PHONY: provision-sft +provision-sft: check-inputs-ansible + ansible-playbook $(ANSIBLE_DIR)/provision-sft.yml \ + -i $(ENV_DIR)/gen/terraform-inventory.yml \ + -i $(ENV_DIR)/inventory \ + --private-key $(ENV_DIR)/operator-ssh.dec \ + -vv + +# FUTUREWORK: https://github.com/zinfra/backend-issues/issues/1763 +.PHONY: kube-minio-static-files +kube-minio-static-files: check-inputs-ansible check-inputs-helm + ansible-playbook $(ANSIBLE_DIR)/kube-minio-static-files.yml \ + -i $(ENV_DIR)/gen/terraform-inventory.yml \ + -i $(ENV_DIR)/inventory \ + --private-key $(ENV_DIR)/operator-ssh.dec \ + --extra-vars "service_cluster_ip=$$(KUBECONFIG=$(ENV_DIR)/kubeconfig.dec kubectl get service fake-aws-s3 -o json | jq -r .spec.clusterIP)" \ + -vv + +.PHONY: get-logs +get-logs: LOG_DIR ?= $(ENV_DIR) +get-logs: check-inputs-ansible + ansible-playbook $(ANSIBLE_DIR)/get-logs.yml \ + -i $(ENV_DIR)/gen/terraform-inventory.yml \ + -i $(ENV_DIR)/inventory \ + --private-key $(ENV_DIR)/operator-ssh.dec \ + --extra-vars "log_host=$(LOG_HOST)" \ + --extra-vars "log_service=$(LOG_SERVICE)" \ + --extra-vars "log_since='$(LOG_SINCE)'" \ + --extra-vars "log_until='$(LOG_UNTIL)'" \ + --extra-vars "log_dir=$(LOG_DIR)" + + + +######################################## HELM ################################## +.PHONY: deploy +deploy: check-inputs-helm + KUBECONFIG=$(ENV_DIR)/kubeconfig.dec \ + helmfile \ + --file $(ENV_DIR)/helmfile.yaml \ + sync \ + --concurrency 1 + + + +######################################## CREDENTIALS ########################### + +.PHONY: decrypt +decrypt: hcloud-token.dec operator-ssh.dec kubeconfig.dec + +.PHONY: hcloud-token.dec +.SILENT: hcloud-token.dec +hcloud-token.dec: check-env-dir + [ ! -e $(ENV_DIR)/$(basename $(@)) ] && exit 0 \ + || ( \ + sops -d $(ENV_DIR)/$(basename $(@)) > $(ENV_DIR)/$(@) || rm $(ENV_DIR)/$(@); \ + test -s $(ENV_DIR)/$(@) || (echo "[ERR] Decryption failed: $(basename $(@))" && exit 1) \ + ) + +.PHONY: operator-ssh.dec +.SILENT: operator-ssh.dec +operator-ssh.dec: check-env-dir + [ ! -e $(ENV_DIR)/$(basename $(@)) ] && exit 0 \ + || ( \ + sops -d $(ENV_DIR)/$(basename $(@)) > $(ENV_DIR)/$(@) || rm $(ENV_DIR)/$(@); \ + test -s $(ENV_DIR)/$(@) || (echo "[ERR] Decryption failed: $(basename $(@))" && exit 1); \ + chmod 0600 $(ENV_DIR)/$(@) \ + ) + +.PHONY: kubeconfig.dec +.SILENT: kubeconfig.dec +kubeconfig.dec: check-env-dir + [ ! -e $(ENV_DIR)/$(basename $(@)) ] && exit 0 \ + || ( \ + sops -d $(ENV_DIR)/$(basename $(@)) > $(ENV_DIR)/$(@) || rm $(ENV_DIR)/$(@); \ + test -s $(ENV_DIR)/$(@) || (echo "[ERR] Decryption failed: $(basename $(@))" && exit 1) \ + ) + +.PHONY: clean-decrypt +clean-decrypt: check-env-dir + rm $(ENV_DIR)/*.dec + + + +######################################## Fail-safes ############################ + +.PHONY: check-env-dir +check-env-dir: $(ENV_DIR) +$(ENV_DIR): + $(error "[ERR] Directory does not exist: $(@)") + + +.PHONY: check-inputs-terraform +check-inputs-terraform: $(ENV_DIR)/hcloud-token.dec + +$(ENV_DIR)/hcloud-token.dec: + $(error "[ERR] File does not exist: $(@)") + + +.PHONY: check-inputs-ansible +check-inputs-ansible: $(ENV_DIR)/inventory $(ENV_DIR)/gen/terraform-inventory.yml $(ENV_DIR)/operator-ssh.dec + +$(ENV_DIR)/inventory: + $(error "[ERR] Directory does not exist: $(@)") + +$(ENV_DIR)/gen/terraform-inventory.yml: + $(error "[ERR] File does not exist: $(@) - It's generated from Terraform output") + +$(ENV_DIR)/operator-ssh.dec: + $(error "[ERR] File does not exist: $(@) - It must contain the private key to ssh into servers") + + +.PHONY: check-inputs-helm +check-inputs-helm: $(ENV_DIR)/kubeconfig.dec + +$(ENV_DIR)/kubeconfig.dec: + $(error "[ERR] File does not exist: $(@) - Ensure that Kubernetes is installed") + +######################################## Lint ################################## + +.PHONY: shellcheck +shellcheck: + ./bin/shellcheck.sh diff --git a/README.md b/README.md index 8f58f4628..54f2fd9b5 100644 --- a/README.md +++ b/README.md @@ -20,27 +20,7 @@ All the documentation on how to make use of this repository is hosted on https:/ ## Contents -* `ansible/` contains ansible roles and playbooks to install kuberentes, cassandra, etc. See the [Administrator's Guide](https://docs.wire.com) for more info. +* `ansible/` contains Ansible roles and playbooks to install Kubernetes, Cassandra, etc. See the [Administrator's Guide](https://docs.wire.com) for more info. * `charts/` contains helm charts that can be installed on kubernetes. The charts are mirroed to S3 and can be used with `helm repo add wire https://s3-eu-west-1.amazonaws.com/public.wire.com/charts`. See the [Administrator's Guide](https://docs.wire.com) for more info. * `terraform/` contains some examples for provisioning servers. See the [Administrator's Guide](https://docs.wire.com) for more info. * `bin/` contains some helper bash scripts. Some are used in the [Administrator's Guide](https://docs.wire.com) when installing wire-server, and some are used for developers/maintainers of this repository. - -## For Maintainers of wire-server-deploy - -### git branches - -* `master` branch is the production branch and the one where helm charts are mirrored to S3, and recommended for use. The helm chart mirror can be added as follows: `helm repo add wire https://s3-eu-west-1.amazonaws.com/public.wire.com/charts` -* `develop` is bleeding-edge, your PRs should branch from here. There is a mirror to S3 you can use if you need to use bleeding edge helm charts: `helm repo add wire-develop https://s3-eu-west-1.amazonaws.com/public.wire.com/charts-develop`. Note that versioning here is done with git tags, not actual git commits, in order not to pollute the git history. - -### developing charts - -For local development, instead of `helm install wire/`, use - -```bash -./bin/update.sh ./charts/ # this will clean and re-package subcharts -helm install charts/ # specify a local file path -``` - -### ./bin/sync.sh - -This script is used to mirror the contents of this github repository with S3 to make it easier for us and external people to use helm charts. Usually CI will make use of this automatically on merge to master/develop, but you can also run that manually after bumping versions. diff --git a/ansible/.gitignore b/ansible/.gitignore index 066c8eb83..345d68016 100644 --- a/ansible/.gitignore +++ b/ansible/.gitignore @@ -1,6 +1,6 @@ -roles-external/* hosts.ini artifacts secrets *.retry output +roles-override diff --git a/ansible/Makefile b/ansible/Makefile index 6e0608d7a..c1213bc7a 100644 --- a/ansible/Makefile +++ b/ansible/Makefile @@ -1,23 +1,87 @@ +ANSIBLE_DIR:=$(shell dirname $(realpath $(firstword $(MAKEFILE_LIST)))) +# Please ignore this if you're not a wire employee +CAILLEACH_DIR:=$(abspath ${ANSIBLE_DIR}/../../cailleach) +SHELL:=/usr/bin/env bash -eo pipefail -default: download +.PHONY: provision-sft +provision-sft: check-env + ansible-playbook ${ANSIBLE_DIR}/provision-sft.yml \ + -i ${ENV_DIR}/gen/terraform-inventory.yml \ + -i ${ENV_DIR}/inventory \ + --private-key ${ENV_DIR}/operator-ssh.dec \ + -vv -.PHONY: download -download: download-ansible-roles download-kubespray download-cli-binaries +.PHONY: roles-override +.SILENT: roles-override +roles-override: + [ ! -e $(ENV_DIR)/kubespray.ref ] \ + || ( \ + rm -rf ./roles-override; \ + git clone https://github.com/kubernetes-sigs/kubespray ./roles-override/kubespray; \ + cd ./roles-override/kubespray; \ + git config advice.detachedHead false; \ + git checkout "$$(cat $(ENV_DIR)/kubespray.ref)"; \ + ) -.PHONY: download-kubespray -download-kubespray: - poetry run ansible-playbook -i localhost, -c local download_kubespray.yml +.PHONY: bootstrap +bootstrap: check-env roles-override + ansible-playbook ${ANSIBLE_DIR}/bootstrap.yml \ + -i ${ENV_DIR}/gen/terraform-inventory.yml \ + -i ${ENV_DIR}/inventory \ + --private-key ${ENV_DIR}/operator-ssh.dec \ + -vv -.PHONY: download-cli-binaries -download-cli-binaries: download-ansible-roles - # assumes /usr/local/bin is writable and on your PATH - # assumes amd64 (change variables if needed) - poetry run ansible-playbook -i localhost, -c local download_cli_binaries.yml +# FUTUREWORK: https://github.com/zinfra/backend-issues/issues/1763 +.PHONY: kube-minio-static-files +kube-minio-static-files: check-env + ansible-playbook ${ANSIBLE_DIR}/kube-minio-static-files.yml \ + -i ${ENV_DIR}/gen/terraform-inventory.yml \ + -i ${ENV_DIR}/inventory \ + --private-key ${ENV_DIR}/operator-ssh.dec \ + --extra-vars "service_cluster_ip=$$(KUBECONFIG=${ENV_DIR}/gen/artifacts/admin.conf kubectl get service fake-aws-s3 -o json | jq -r .spec.clusterIP)" \ + -vv -.PHONY: download-ansible-roles -download-ansible-roles: - poetry run ansible-galaxy install -r requirements.yml +LOG_UNTIL ?= "now" +.PHONY: get-logs +get-logs: check-env +ifndef LOG_HOST + $(error please define LOG_HOST) +endif +ifndef LOG_SERVICE + $(error please define LOG_SERVICE) +endif +ifndef LOG_SINCE + $(error please define LOG_SINCE) +endif + ansible-playbook ${ANSIBLE_DIR}/get-logs.yml \ + -i ${ENV_DIR}/gen/terraform-inventory.yml \ + -i ${ENV_DIR}/inventory.yml \ + --private-key ${ENV_DIR}/operator-ssh.dec \ + --extra-vars "log_host=${LOG_HOST}" \ + --extra-vars "log_service=${LOG_SERVICE}" \ + --extra-vars "log_since='${LOG_SINCE}'" \ + --extra-vars "log_until='${LOG_UNTIL}'" \ + --extra-vars "log_dir=${LOG_DIR}" + +.PHONY: ensure-env-dir +ensure-env-dir: +ifndef ENV_DIR +ifndef ENV + $(error please define either ENV or ENV_DIR) +else +ENV_DIR=${CAILLEACH_DIR}/environments/${ENV} +endif +endif + +${ENV_DIR}/inventory: + $(error please make sure ${ENV_DIR}/inventory exists) + +${ENV_DIR}/gen/terraform-inventory.yml: + $(error please make you have applied terraform for ${ENV_DIR}) + +${ENV_DIR}/operator-ssh.dec: + $(error please make sure ${ENV_DIR}/operator-ssh.dec exists and contains the private key to ssh into servers) + +.PHONY: check-env +check-env: ensure-env-dir ${ENV_DIR}/operator-ssh.dec ${ENV_DIR}/gen/terraform-inventory.yml ${ENV_DIR}/inventory -.PHONY: download-ansible-roles-force -download-ansible-roles-force: - poetry run ansible-galaxy install -r requirements.yml --force diff --git a/ansible/README.md b/ansible/README.md index 0dde82f54..916ebd3d6 100644 --- a/ansible/README.md +++ b/ansible/README.md @@ -3,3 +3,221 @@ In a production environment, some parts of the wire-server infrastructure (such as e.g. cassandra databases) are best configured outside kubernetes. Additionally, kubernetes can be rapidly set up with a project called kubespray, via ansible. This directory hosts a range of ansible playbooks to install kubernetes and databases necessary for wire-server. For documentation on usage, please refer to the [Administrator's Guide](https://docs.wire.com), notably the production installation. + + +## Bootstrap environment created by `terraform/environment` + +An 'environment' is supposed to represent all the setup required for the Wire +backend to function. + +'Bootstrapping' an environment means running a range of idempotent ansible +playbooks against servers specified in an inventory, resulting in a fully +functional environment. This action can be re-run as often as you want (e.g. in +case you change some variables or upgrade to new versions). + +At the moment, the environment can have SFT servers as well as machines on which +Kubernetes can be deployed on; more will be added. + +1. Please ensure `ENV_DIR` or `ENV` are exported as specified in the [docs in + the terraform folder](../terraform/README.md) +1. Ensure `$ENV_DIR/operator-ssh.dec` exists and contains an ssh key for the + environment. +1. Ensure that `make apply` and `make create-inventory` have been run for the + environment. Please refer to the [docs in the terraform + folder](../terraform/README.md) for details about how to run this. +1. Ensure all required variables are set in `$ENV_DIR/inventory/*.yml` +1. Running `make bootstrap` from this directory will bootstrap the + environment. + +## Bootstrap a Kubernetes cluster with Kubespray + +* while necessary Inventory hosts & groups are being defined/generated with Terraform + (see `terraform/environment/kubernetes.inventory.tf`), Kubespray Inventory variables + are supposed to be defined in `${ENV_DIR}/inventory/inventory.yml` +* after bootstrapping Kubernetes, a plain-text version of the kubeconfig file should + exist under `$ENV_DIR/kubeconfig.dec`[1] + +[1]For wire employees: Encrypt this file using the `sops` toolchain in +*cailleach*. + +## Operating SFT Servers + +There are a few things to consider while running SFT servers. + +1. Restarting SFT servers while a call is going on will drop the call. To avoid + this, we must provide 6 hours of grace period after stopping SRV record + announcements. +1. Let's encrypt will not issue more than 50 certificates per registered domain + per week. +1. Let's encrypt will not do more than 5 renewals per set of domains. + +To deal with these issues, we create 2 groups (blue and green) of the SFT +servers. These groups are configured like this in terraform: +```tfvars +sft_server_names_blue = ["1", "2"] # defaults to [] +sft_server_type_blue = "cx21" # defaults to "cx11" +sft_server_names_green = ["3", "4"] # defaults to [] +sft_server_type_green = "cx21" # defaults to "cx11" +``` + +Terraform will put all the SFT servers (blue and green) in a group called +`sft_servers` and additionally, it will put the blue servers in +`sft_servers_blue` group and green servers in `sft_servers_green` group. This +allows putting common variables in the `sft_servers` group and uncommon ones +(like `sft_artifact_file_url`) in the respective groups. + +To maintain uptime, at least one of the groups should be active. The size of the +groups should ideally be equal and one group must be able to support peak +traffic. + +### Deployment + +Assuming blue servers are serving version 42, green servers are serving version 43 and we want to upgrade to version 44. + +Note: The releases/artifacts for SFT can be found at: https://github.com/wearezeta/avs-service/releases + +We are going to be working on the `group_vars` files in the cailleach (https://github.com/zinfra/cailleach) repository, located under `environments/prod/inventory/group_vars/` + +In this case the initial group vars for the `sft_servers_blue` group would look +like this: + +```yaml +sft_servers_blue: + vars: + sft_artifact_file_url: "https://example.com/path/to/sftd_42.tar.gz" + sft_artifact_checksum: somechecksum_42 + srv_announcer_active: true +``` + +For `sft_servers_green`, `srv_announcer_active` must be `false`. + +1. Make sure all env variables like `ENV`, `ENV_DIR` are set. Here we are working on the `prod` environment, so we do `ENV='prod'` +2. Create terraform inventory (This section assumes all commands are executed + from the root of this repository) + ```bash + make -C terraform/environment create-inventory + ``` +3. Setup green servers to have version 44 and become active: + ```yaml + sft_servers_green: + vars: + sft_artifact_file_url: "https://example.com/path/to/sftd_44.tar.gz" + sft_artifact_checksum: somechecksum_44 + srv_announcer_active: true + ``` +4. At this point, you should create a Pull Request for the changes, and have it merged with `cailleach` once approved by the team. + The CI will now run ansible automatically, and the changes will take effect. The following lines are for reference only, and represent what the CI does, and what used to be done by hand at this point: + + Run ansible in Wire Server Deploy + ```yaml + make -C ansible provision-sft + ``` + + This will make sure that green SFT servers will have version 43 of sftd and + they are available. At this point we will have both blue green servers as + active. +5. Ensure that new servers function properly. If they don't you can set + `srv_announcer_active` to `false` for the green group, and make a PR against `cailleach`. +6. If the servers are working properly, setup the old servers to be deactivated: + ```yaml + sft_servers_blue: + vars: + sft_artifact_file_url: "https://example.com/path/to/sftd_42.tar.gz" + sft_artifact_checksum: somechecksum_42 + srv_announcer_active: false + ``` +7. At this point again, you should make and merge a Pull Request against `cailleach` with these changes, the following line represents what CI then does, and used to be done by hand: + Run ansible again + ```yaml + make -C ansible provision-sft + ``` +7. There is a race condition in stopping SRV announcers, which will mean that + sometimes a server will not get removed from the list. This can be found by + running this command: + ```bash + dig SRV _sft._tcp.. + ``` + + If an old server is found even after TTL for the record has expired, it must + be taken care of manually. It is safe to delete all the SRV records, they + should get re-populated within 20 seconds. + +### Decommission one specific server + +Assuming the Terraform variables look like this and we have to take down server +`"1"`. +```tfvars +sft_server_names_blue = ["1", "2"] # defaults to [] +sft_server_type_blue = "cx21" # defaults to "cx11" +sft_server_names_green = ["3", "4"] # defaults to [] +sft_server_type_green = "cx21" # defaults to "cx11" +environment = "staging" +``` + +#### When the server is active + +1. Add one more server to the blue group by replacing the first line with: + ```tfvars + sft_server_names_blue = ["1", "2", "5"] # These shouldn't overlap with the green ones + ``` +1. Run terraform (this will wait for approval) + ```bash + make -C terraform/environment init apply create-inventory + ``` +1. Set `srv_announcer_active` to `false` only for the host which is to be taken + down. Here the ansible host name would be `staging-sft-1` +1. Run ansible + ```bash + make -C ansible provision-sft + ``` +1. Ensure that the SRV records don't contain `sft1`, same as last step of deployment procedure. +1. Monitor `sft_calls` metric to make sure that there are no calls left. +1. Setup instance for deletion by removing it from `sft_server_names_blue`: + ```tfvars + sft_server_names_blue: ["2", "5"] + ``` +1. Run terraform (this will again wait for approval) + ```bash + make -C terraform/environment apply + ``` + +#### When the server is not active + +1. Remove the server from `sft_server_names_blue` and add a new name by + replacing the first line like this: + ```tfvars + sft_server_names_blue: ["2", "5"] + ``` +1. Run terraform (this will wait for approval) + ```bash + make -C terraform/environment init apply + ``` + +### Change server type of all servers + +Assuming: +1. Initial tfvars has these variables: + ``` + sft_server_names_blue = ["1", "2"] # defaults to [] + sft_server_type_blue = "cx21" # defaults to "cx11" + sft_server_names_green = ["3", "4"] # defaults to [] + sft_server_type_green = "cx21" # defaults to "cx11" + environment = "staging" + ``` +1. We want to make all the servers "cx31". +1. The blue group is active, green is not. + +We can do it like this: + +1. Replace all the green servers by changing `server_type`: + ``` + sft_server_type_green = "cx31" + ``` +1. Run terraform (will wait for approval) + ``` + make -C terraform/environment init apply create-inventory + ``` +1. Deploy the same version as blue to green by following steps in the deployment + procedure. +1. Once the blue servers are inactive and all the calls have finished, replace + them the same way as green servers. No need to make them active again. diff --git a/ansible/admin_users.yml b/ansible/admin_users.yml index 3977782c4..1871b0213 100644 --- a/ansible/admin_users.yml +++ b/ansible/admin_users.yml @@ -5,6 +5,7 @@ # See the `-k` and `-K` flags when running ansible-playbook. # - hosts: all + environment: "{{ proxy_env | default({}) }}" vars: admin_users: - username: admin diff --git a/ansible/ansible.cfg b/ansible/ansible.cfg index e969c3ec2..c9d74e2da 100644 --- a/ansible/ansible.cfg +++ b/ansible/ansible.cfg @@ -4,11 +4,14 @@ control_path = /tmp/ansible-%%r@%%h:%%p [defaults] retry_files_enabled = False -roles_path = ./roles-external:./roles +roles_path = ./roles-external:./roles:./roles-external/sft/roles:./roles-external/kubespray/roles gathering = smart host_key_checking = no +interpreter_python = /usr/bin/python3 + + [privilege_escalation] become = yes diff --git a/ansible/backup_rabbitmq.yml b/ansible/backup_rabbitmq.yml new file mode 100644 index 000000000..242622f1c --- /dev/null +++ b/ansible/backup_rabbitmq.yml @@ -0,0 +1,41 @@ +--- +- hosts: rmq-cluster + vars: + backup_dir: "/path/to/backup" # need to pass this as an extra var + tasks: + - name: Enable RabbitMQ management plugin + command: rabbitmq-plugins enable rabbitmq_management + become: true + + - name: Ensure backup directory exists + file: + path: "{{ backup_dir }}" + state: directory + + - name: Export RabbitMQ configurations + command: rabbitmqadmin export "{{ backup_dir }}/definitions.json" + + - name: Get RabbitMQ node directory + command: rabbitmqctl eval 'rabbit_mnesia:dir().' + register: node_directory + + - name: Set RabbitMQ node directory path + set_fact: + rabbitmq_node_dir: "{{ node_directory.stdout | regex_replace('\"', '') }}" + + - name: Stop RabbitMQ service + service: + name: rabbitmq-server + state: stopped + + - name: List contents of RabbitMQ node directory + command: ls {{ rabbitmq_node_dir }} + register: dir_contents + + - name: Back up RabbitMQ data directory + command: tar cvf "{{ backup_dir }}/rabbitmq-backup.tgz" {{ rabbitmq_node_dir }} + + - name: Start RabbitMQ service + service: + name: rabbitmq-server + state: started diff --git a/ansible/bootstrap.yml b/ansible/bootstrap.yml new file mode 100644 index 000000000..9351499ff --- /dev/null +++ b/ansible/bootstrap.yml @@ -0,0 +1,2 @@ +- import_playbook: ./provision-sft.yml +- import_playbook: ./kubernetes.yml diff --git a/ansible/cassandra-verify-ntp.yml b/ansible/cassandra-verify-ntp.yml index 4c7ca6ebd..21934ab54 100644 --- a/ansible/cassandra-verify-ntp.yml +++ b/ansible/cassandra-verify-ntp.yml @@ -8,5 +8,6 @@ # - hosts: cassandra any_errors_fatal: true + environment: "{{ proxy_env | default({}) }}" roles: - role: ansible-ntp-verify diff --git a/ansible/cassandra.yml b/ansible/cassandra.yml index 034532c5c..a67fb5435 100644 --- a/ansible/cassandra.yml +++ b/ansible/cassandra.yml @@ -1,5 +1,6 @@ - hosts: cassandra any_errors_fatal: true + environment: "{{ proxy_env | default({}) }}" vars: cassandra_keyspaces: - brig @@ -8,15 +9,13 @@ - spar # cassandra 3.11 doesn't support java 11 yet, use openjdk-8. java_packages: - - openjdk-8-jdk + - openjdk-8-jre-headless roles: - - role: hostname - tags: - - hostname - role: ansible-role-ntp tags: - ntp + when: not (offline|default(false)) - role: ansible-role-java tags: @@ -29,9 +28,10 @@ - role: ansible-ntp-verify tags: - ntp + when: not (offline|default(false)) tasks: # these are optional debug tasks to see that the cluser has come up sucessfully - - shell: nodetool status + - shell: nodetool -Dcom.sun.jndi.rmiURLParsing=legacy status register: nodetool_status tags: - cassandra diff --git a/ansible/db-operations/README.md b/ansible/db-operations/README.md new file mode 100644 index 000000000..f58513a5f --- /dev/null +++ b/ansible/db-operations/README.md @@ -0,0 +1,23 @@ +# db-operations + +Some of these playbooks might be useful to run or serve as operational documentation in environments in which cassandra and elasticsearch are installed on bare VMs (not inside kubernetes). They can be used when: + +* you installed cassandra and/or elasticsearch using ansible playbooks contained within wire-server-deploy +* you need to perform some specific maintenance work +* you are comfortable writing or changing ansible playbooks to fit your needs +* you are using ansible version 2.7.x (or possibly 2.9.x). They may no longer work in more recent versions of ansible. + +:warning: The playbooks here *were* in use by Wire for our production systems in the past; however we no longer make active use of them, and won't be able to provide much support for them. They are rather intended as a useful starting point by an operator able to write or change their own ansible playbooks and understand what they do. + +Here be dragons! + +That said, playbooks here can serve (but may not be sufficient, and still require you to understand what you're doing) in the following cases: + +- `cassandra_rolling_repair`: cassandra repairs were misconfigured; and you wish to repair all of them before re-activating the repair cron jobs +- `cassandra_restart`: Restart cassandra nodes one by one in a controller, graceful fashion. +- `cassandra_alter_keyspace`: you'd like to perform a cassandra migration to a new datacenter (this is a complicated topic, this playbook is not sufficient) +- `cassandra_cleanup`: you have scaled up the number of cassandra nodes (say from 3 to 6); then this allows you to free some disk space from the original cassandra nodes. Has no effect if there was no cluster topology change. +- `cassandra_(pre|post)_upgrade`: Useful when upgrading cassandra version (e.g. from 3.11 to 4.0) +- `elasticsearch_...` See the name of playbook or comment in the files for their purpose. + +While the original playbooks were in use and worked as expected, porting them over to wire-server-deploy causes some changes which have not been tested, so there might be small issues. If you find an issue, feel free to send a PR. diff --git a/ansible/db-operations/cassandra_alter_keyspace_replication.yml b/ansible/db-operations/cassandra_alter_keyspace_replication.yml new file mode 100644 index 000000000..d5b5dfb9c --- /dev/null +++ b/ansible/db-operations/cassandra_alter_keyspace_replication.yml @@ -0,0 +1,51 @@ +# See e.g. https://docs.k8ssandra.io/tasks/migrate/ +- name: 'alter keyspace replication' + vars_prompt: + - name: old + promt: "name of old datacenter" + private: no + - name: k8ssandra_dc_name + promt: "name of new datacenter in k8ssandra" + private: no + - name: phase + prompt: "enter phase: BEFORE (before connecting to k8ssandra), TWO (for replicating to two datacentres once they are alreay connected)" + private: no + hosts: cassandra + any_errors_fatal: yes + tasks: + - name: phase check + fail: + msg: "phase must be one of [BEFORE, TWO]" + when: phase not in ["BEFORE", "TWO"] + + - action: ec2_metadata_facts + + - when: phase == "BEFORE" + name: alter keyspace BEFORE + shell: > + /opt/cassandra/bin/cqlsh $(hostname) -e "ALTER KEYSPACE {{ item }} WITH replication = {'class': 'NetworkTopologyStrategy', '{{ old }}': 3}" + loop: + - "system_auth" + - "system_traces" + - "system_distributed" + - "spar" + - "brig" + - "gundeck" + - "galley" + + - when: phase == "BEFORE" + debug: + msg: Run a repair now using cassandra_rolling_repair.yml! + + - when: phase == "TWO" + name: alter keyspace to replicate to TWO datacentres + shell: > + /opt/cassandra/bin/cqlsh $(hostname) -e "ALTER KEYSPACE {{ item }} WITH replication = {'class': 'NetworkTopologyStrategy', '{{ old }}': 3, '{{ k8ssandra_dc_name }}': 3}" + loop: + - "system_auth" + - "system_traces" + - "system_distributed" + - "spar" + - "brig" + - "gundeck" + - "galley" diff --git a/ansible/db-operations/cassandra_cleanup.yml b/ansible/db-operations/cassandra_cleanup.yml new file mode 100644 index 000000000..364614e8e --- /dev/null +++ b/ansible/db-operations/cassandra_cleanup.yml @@ -0,0 +1,10 @@ +- name: 'Run "nodetool cleanup" serially (only necessary once after adding nodes to a cluster)' + hosts: "cassandra" + any_errors_fatal: yes + serial: 1 + tasks: + - include: tasks/cassandra_cluster_healthy.yml + - name: Run nodetool cleanup - wait for up to 1h, poll every 10 sec + shell: nodetool cleanup + async: 3600 + poll: 10 diff --git a/ansible/db-operations/cassandra_healthy.yml b/ansible/db-operations/cassandra_healthy.yml new file mode 100644 index 000000000..662656676 --- /dev/null +++ b/ansible/db-operations/cassandra_healthy.yml @@ -0,0 +1,6 @@ +- name: Check cluster is healthy + hosts: cassandra + any_errors_fatal: yes + gather_facts: no + tasks: + - include: tasks/cassandra_cluster_healthy.yml diff --git a/ansible/db-operations/cassandra_post_upgrade.yml b/ansible/db-operations/cassandra_post_upgrade.yml new file mode 100644 index 000000000..6a12db3f8 --- /dev/null +++ b/ansible/db-operations/cassandra_post_upgrade.yml @@ -0,0 +1,25 @@ +# +# Follow the guidelines from DataStax for upgrades. +# + hosts: "cassandra" + any_errors_fatal: yes + gather_facts: no + serial: 1 + vars: + cluster_name: default + vars_files: + - roles-external/ansible-cassandra/defaults/main.yml + tasks: + - action: ec2_metadata_facts + - include: tasks/cassandra_cluster_healthy.yml + vars: + cassandra_role: "cassandra_{{ cluster_name }}" + # TODO: Adjust this value accordingly! + expected_num_schemas: 1 + + - name: 'Cassandra: upgrade sstables' + shell: nodetool upgradesstables + + - include: roles-external/ansible-cassandra/tasks/repairs_backups.yml + vars: + cassandra_cluster_name: "{{ cluster_name }}" diff --git a/ansible/db-operations/cassandra_pre_upgrade.yml b/ansible/db-operations/cassandra_pre_upgrade.yml new file mode 100644 index 000000000..7cac3770c --- /dev/null +++ b/ansible/db-operations/cassandra_pre_upgrade.yml @@ -0,0 +1,29 @@ +- name: Ensure no ongoing repairs on any node and stop cronjobs + hosts: cassandra + gather_facts: yes + vars_files: + - roles/cassandra/defaults/main.yml + + tasks: + # First let's ensure that are no repairs on _any_ nodes + - include: tasks/cassandra_remove_cron.yml + vars: + cluster_name: default + - include: tasks/cassandra_wait_ongoing_repair.yml + +- name: Prepare the nodes + hosts: cassandra + any_errors_fatal: yes + gather_facts: no + serial: 1 + tasks: + - name: 'Cassandra: first upgrade sstables' + shell: nodetool upgradesstables + + - name: 'Cassandra: run repairs' + shell: nodetool repair -full -pr 2>&1 | systemd-cat -t cassandra_repair + + - include: tasks/cassandra_cluster_healthy.yml + + - name: 'Cassandra: backup the data' + shell: /usr/local/bin/cassandra_backup_{{ cluster_name }} 2>&1 | systemd-cat -t cassandra_daily_backup diff --git a/ansible/db-operations/cassandra_restart.yml b/ansible/db-operations/cassandra_restart.yml new file mode 100644 index 000000000..84b41d76d --- /dev/null +++ b/ansible/db-operations/cassandra_restart.yml @@ -0,0 +1,9 @@ +- name: restart cassandra nodes + hosts: "cassandra" + any_errors_fatal: yes + gather_facts: no + serial: 1 + tasks: + - include: tasks/cassandra_cluster_healthy.yml + - include: tasks/cassandra_down.yml + - include: tasks/cassandra_up.yml diff --git a/ansible/db-operations/cassandra_rolling_repair.yml b/ansible/db-operations/cassandra_rolling_repair.yml new file mode 100644 index 000000000..e03bdffbd --- /dev/null +++ b/ansible/db-operations/cassandra_rolling_repair.yml @@ -0,0 +1,21 @@ +# Remove repair crons on all nodes at once +- name: 'Rolling repair' + hosts: cassandra + any_errors_fatal: yes + tasks: + # First let's ensure that are no repairs on _any_ nodes + - include: tasks/cassandra_remove_repair_and_daily_backup_cron.yml + vars: + cluster_name: default + - include: tasks/cassandra_wait_ongoing_repair.yml + +# do a rolling repair +- name: 'Rolling repair' + hosts: cassandra + any_errors_fatal: yes + serial: 1 + tasks: + - include: tasks/cassandra_manual_repair.yml + +# run actual playbook again to re-enable cron jobs. +- import_playbook: "cassandra.yml" diff --git a/ansible/db-operations/elasticsearch_joined.yml b/ansible/db-operations/elasticsearch_joined.yml new file mode 100644 index 000000000..5cda74a99 --- /dev/null +++ b/ansible/db-operations/elasticsearch_joined.yml @@ -0,0 +1,45 @@ +# +# ansible-playbook elasticsearch_joined.yml -e "ip_to_check=1.2.3.4" +# +- name: Wait for a given new node to join the cluster and shard relocations to settle + hosts: elasticsearch + gather_facts: no + any_errors_fatal: yes + tasks: + - fail: + msg: You need to specify ip_to_check. See comment at the top of the playbook for usage. + when: not ip_to_check + + - name: 'Elasticsearch: Wait for HTTP port' + wait_for: port={{ routing_table[elasticsearch_role].http.exposed }} + + - name: 'Elasticsearch: Wait for node discovery' + shell: > + set -o pipefail; + curl -sSf http://localhost:{{ routing_table[elasticsearch_role].http.exposed }}/_nodes|jq ".nodes|keys|length" + args: + executable: /bin/bash + register: num_nodes + until: 'num_nodes.stdout|int == groups[elasticsearch_role]|length' + retries: 60 + delay: 5 + + - name: 'check ip_to_check={{ ip_to_check }} is part of the cluster' + shell: > + set -o pipefail; + curl -sSf 'http://localhost:{{ routing_table[elasticsearch_role].http.exposed }}/_cat/nodes?v&h=ip' | grep "{{ ip_to_check }}" + args: + executable: /bin/bash + + - include: tasks/elasticsearch_cluster_healthy.yml + + - name: 'Elasticsearch: Wait for shard relocation to finish' + shell: > + set -o pipefail; + curl -sSf http://localhost:{{ routing_table[elasticsearch_role].http.exposed }}/_cluster/health | jq .relocating_shards + args: + executable: /bin/bash + register: num_shards + until: 'num_shards.stdout|int == 0' + retries: 60 + delay: 15 diff --git a/ansible/db-operations/elasticsearch_restart.yml b/ansible/db-operations/elasticsearch_restart.yml new file mode 100644 index 000000000..0573e5418 --- /dev/null +++ b/ansible/db-operations/elasticsearch_restart.yml @@ -0,0 +1,7 @@ + hosts: elasticsearch + serial: 1 + any_errors_fatal: yes + tasks: + - include: tasks/elasticsearch_cluster_healthy.yml + - include: tasks/elasticsearch_down.yml + - include: tasks/elasticsearch_up.yml diff --git a/ansible/db-operations/elasticsearch_stop.yml b/ansible/db-operations/elasticsearch_stop.yml new file mode 100644 index 000000000..4ec3c6e01 --- /dev/null +++ b/ansible/db-operations/elasticsearch_stop.yml @@ -0,0 +1,35 @@ +# +# Example: +# ansible-playbook elasticsearch_stop.yml -e "ip_to_stop=1.2.3.4" +# +- name: checks + hosts: localhost + tasks: + - fail: + msg: "You need to specify ip_to_stop, it needs to be a valid ipv4. invalid:[{{ ip_to_stop }}] See comment at the top of the playbook for usage." + when: not ip_to_stop | ipaddr + +- name: Stop elasticsearch node + hosts: '{{ ip_to_stop }}' + any_errors_fatal: yes + serial: 1 + tasks: + - include: tasks/elasticsearch_shard_allocation.yml + vars: + exclude: "{{ ansible_default_ipv4.address }}" + + - pause: + seconds: 5 + + - name: 'Elasticsearch: Wait for shard relocation to finish' + shell: > + set -o pipefail; + curl -sSf http://localhost:9200/_cluster/health | jq .relocating_shards + args: + executable: /bin/bash + register: num_shards + until: 'num_shards.stdout|int == 0' + retries: 200 + delay: 15 + + - command: systemctl stop elasticsearch diff --git a/ansible/db-operations/tasks/cassandra_cluster_healthy.yml b/ansible/db-operations/tasks/cassandra_cluster_healthy.yml new file mode 100644 index 000000000..406f0f6fc --- /dev/null +++ b/ansible/db-operations/tasks/cassandra_cluster_healthy.yml @@ -0,0 +1,29 @@ +- name: 'Cassandra: gather number of schemas' + shell: nodetool describecluster | grep '[0-9a-f\]\{8\}-[0-9a-f]\{4\}-[0-9a-f]\{4\}-[0-9a-f]\{4\}-[0-9a-f]\{12\}' | wc -l + register: num_schemas + +- name: 'Cassandra: check for schema disagreements' + fail: msg="Schema disagreements. Please fix first." + when: 'expected_num_schemas is defined and (num_schemas.stdout|int > expected_num_schemas) or + num_schemas.stdout|int > 1' + +- name: 'Cassandra: check more for schema disagreements' + fail: msg="Schema disagreements. Please fix first." + when: 'num_schemas.stdout.find("UNREACHABLE") != -1' + +# can't use 'var:' in a task +- set_fact: + desired_nodes: "{{ groups[cassandra_role|replace('_seed','')]|default([])|length + groups[cassandra_role|replace('_seed','') + '_seed']|length }}" + +- debug: var=desired_nodes + +- name: 'Cassandra: check that the desired number of nodes is up' + shell: nodetool status | grep ^UN | wc -l + register: num_nodes + until: num_nodes.stdout|int == desired_nodes|int + retries: 10 + delay: 15 + when: dry_run is not defined + +- debug: var=num_nodes.stdout + when: dry_run is not defined diff --git a/ansible/db-operations/tasks/cassandra_down.yml b/ansible/db-operations/tasks/cassandra_down.yml new file mode 100644 index 000000000..4a933f5bd --- /dev/null +++ b/ansible/db-operations/tasks/cassandra_down.yml @@ -0,0 +1,6 @@ +- name: 'Cassandra: draining node...' + shell: nodetool drain + +- name: 'Cassandra: stopping the daemon' + shell: systemctl stop cassandra + ignore_errors: true diff --git a/ansible/db-operations/tasks/cassandra_manual_repair.yml b/ansible/db-operations/tasks/cassandra_manual_repair.yml new file mode 100644 index 000000000..1b5652250 --- /dev/null +++ b/ansible/db-operations/tasks/cassandra_manual_repair.yml @@ -0,0 +1,5 @@ +- name: Run repair - wait for up to 2h, poll every 10 sec + # This is copied from the crontab in cassandra/tasks/cron + shell: "flock -n /tmp/backup_repair_mutex /usr/local/bin/cassandra_repair_default | systemd-cat -t cassandra_repair/" + async: 7200 + poll: 10 diff --git a/ansible/db-operations/tasks/cassandra_remove_backup.yml b/ansible/db-operations/tasks/cassandra_remove_backup.yml new file mode 100644 index 000000000..195171d75 --- /dev/null +++ b/ansible/db-operations/tasks/cassandra_remove_backup.yml @@ -0,0 +1,47 @@ +# +# You will need to define: +# - path_to_backup_folders (as defined by http://man7.org/linux/man-pages/man1/find.1.html) +# +- name: Show disk size before at /mnt + shell: df -h /mnt + register: df_size + +- name: Show current disk state + debug: + msg: "{{ df_size.stdout_lines }}" + +- name: Gather size before + # -c simply gives you a grand total, just to have an idea of the difference + # in size. + # The last line will look like: " total" + shell: du -c {{ path_to_backup_folders }} | tail -n 1 | awk '{ print $1 }' + register: backup_folders_size + +- debug: + msg: "Size of backup folder = {{ backup_folders_size.stdout }}" + +- name: Find all files in the backup folder + shell: > + find {{ path_to_backup_folders }} -type f + register: files_to_delete + ignore_errors: yes + # If there are no files/directory, this fails but that's OK. Returns `[]` + # python is hard but: https://docs.ansible.com/ansible/2.7/user_guide/playbooks_error_handling.html + +- name: Show files to be deleted + debug: + msg: "{{ files_to_delete.stdout_lines }}" + +- name: Delete all files + file: + path: "{{ item }}" + state: absent + with_items: "{{ files_to_delete.stdout_lines }}" + +- name: Gather size after + # -c simply gives you a grand total, just to have an idea of the difference + shell: du -c {{ path_to_backup_folders }} | tail -n 1 | awk '{ print $1 }' + register: backup_folders_size + +- debug: + msg: "Size of backup folders = {{ backup_folders_size.stdout }}" diff --git a/ansible/db-operations/tasks/cassandra_remove_cron.yml b/ansible/db-operations/tasks/cassandra_remove_cron.yml new file mode 100644 index 000000000..c54ff3b0a --- /dev/null +++ b/ansible/db-operations/tasks/cassandra_remove_cron.yml @@ -0,0 +1,11 @@ +--- +- debug: var=cassandra_cluster_name +# Note that these should match the job names at roles/cassandra/tasks/cron.yml +- name: 'Remove cassandra cronjobs' + cron: + name: "{{ item }}" + state: absent + with_items: + - "cassandra_incremental_backup_{{ cassandra_cluster_name }}" + - "cassandra_backup_{{ cassandra_cluster_name }}" + - "cassandra_repair_{{ cassandra_cluster_name }}" diff --git a/ansible/db-operations/tasks/cassandra_remove_repair_and_daily_backup_cron.yml b/ansible/db-operations/tasks/cassandra_remove_repair_and_daily_backup_cron.yml new file mode 100644 index 000000000..ee7614127 --- /dev/null +++ b/ansible/db-operations/tasks/cassandra_remove_repair_and_daily_backup_cron.yml @@ -0,0 +1,8 @@ +--- +- name: 'Remove cassandra cronjobs' + cron: + name: "{{ item }}" + state: absent + with_items: + - "cassandra_repair_{{ cassandra_cluster_name }}" + - "cassandra_backup_{{ cassandra_cluster_name }}" diff --git a/ansible/db-operations/tasks/cassandra_up.yml b/ansible/db-operations/tasks/cassandra_up.yml new file mode 100644 index 000000000..e817fa63c --- /dev/null +++ b/ansible/db-operations/tasks/cassandra_up.yml @@ -0,0 +1,15 @@ +- include: runit_up.yml service=cassandra + +- name: 'Cassandra: Waiting for thrift port' + wait_for: > + port={{ routing_table[cassandra_role].rpc.exposed }} + host="{{ ansible_ec2_local_ipv4 }}" + +- name: 'Cassandra: Waiting for CQL port' + wait_for: > + port={{ routing_table[cassandra_role].native_transport.exposed }} + host="{{ ansible_ec2_local_ipv4 }}" + +- include: cassandra_cluster_healthy.yml + +- pause: seconds={{ cassandra_wait_after_restart|default(120) }} diff --git a/ansible/db-operations/tasks/cassandra_wait_ongoing_repair.yml b/ansible/db-operations/tasks/cassandra_wait_ongoing_repair.yml new file mode 100644 index 000000000..802169a03 --- /dev/null +++ b/ansible/db-operations/tasks/cassandra_wait_ongoing_repair.yml @@ -0,0 +1,19 @@ +# Note that HintedHandoff is cassandra 3.x only while HintsDispatcher is cassandra 2.x +# which is why the output of these will always be just 3 +# c.f.: +# http://docs.datastax.com/en/cassandra/latest/cassandra/operations/opsRepairNodesTOC.html +# http://docs.datastax.com/en/cassandra/latest/cassandra/tools/toolsTPstats.html +- name: 'Cassandra: ensure that there are no repair operations by thread pool (cassandra 2.x and 3.x)' + shell: nodetool tpstats | grep -E 'HintedHandoff|HintsDispatcher|ReadRepairStage|AntiEntropyStage' | awk '{print $2,$3}' + register: repair_operations + until: repair_operations.stdout == "0 0\n0 0\n0 0" + retries: 30 + delay: 30 + +# Fail if there are still ongoing repairs +- name: 'Cassandra: ensure that there are no ongoing repairs (cassandra 2.x and 3.x)' + shell: '! (nodetool tpstats | grep Repair#)' + register: ongoing_repair + until: ongoing_repair.rc == 0 and ongoing_repair.stdout == "" + retries: 30 + delay: 30 diff --git a/ansible/db-operations/tasks/elasticsearch_cluster_healthy.yml b/ansible/db-operations/tasks/elasticsearch_cluster_healthy.yml new file mode 100644 index 000000000..c2aa3af80 --- /dev/null +++ b/ansible/db-operations/tasks/elasticsearch_cluster_healthy.yml @@ -0,0 +1,10 @@ +--- +- name: 'Elasticsearch: wait for the cluster to become green' + shell: > + curl -sSf http://localhost:{{ routing_table[elasticsearch_role].http.exposed }}/_cluster/health \ + | jq ".status" + register: health + until: '"green" in health.stdout' + retries: 120 + delay: 10 + when: dry_run is not defined diff --git a/ansible/db-operations/tasks/elasticsearch_down.yml b/ansible/db-operations/tasks/elasticsearch_down.yml new file mode 100644 index 000000000..6c3b8efe8 --- /dev/null +++ b/ansible/db-operations/tasks/elasticsearch_down.yml @@ -0,0 +1,2 @@ +- include: elasticsearch_shard_allocation.yml exclude={{ ansible_default_ipv4.address }} +- command: systemctl stop elasticsearch diff --git a/ansible/db-operations/tasks/elasticsearch_shard_allocation.yml b/ansible/db-operations/tasks/elasticsearch_shard_allocation.yml new file mode 100644 index 000000000..edc7916fe --- /dev/null +++ b/ansible/db-operations/tasks/elasticsearch_shard_allocation.yml @@ -0,0 +1,8 @@ +--- +- name: 'Elasticsearch: toggle shard allocation' + shell: > + curl -sSf -XPUT http://localhost:{{ routing_table[elasticsearch_role].http.exposed }}/_cluster/settings -d '{ + "transient" : { + "cluster.routing.allocation.exclude._ip": {% if exclude is defined %}"{{ exclude }}"{% else %}null{% endif %} + } + }' diff --git a/ansible/db-operations/tasks/elasticsearch_up.yml b/ansible/db-operations/tasks/elasticsearch_up.yml new file mode 100644 index 000000000..fc4f9fa2d --- /dev/null +++ b/ansible/db-operations/tasks/elasticsearch_up.yml @@ -0,0 +1,20 @@ +--- +- include: runit_up.yml service=elasticsearch + +- name: 'Elasticsearch: Wait for HTTP port' + wait_for: port={{ routing_table[elasticsearch_role].http.exposed }} + +- name: 'Elasticsearch: Wait for node discovery' + shell: > + set -o pipefail; + curl -sSf http://localhost:{{ routing_table[elasticsearch_role].http.exposed }}/_nodes|jq ".nodes|keys|length" + args: + executable: /bin/bash + register: num_nodes + until: 'num_nodes.stdout|int == groups.{{ elasticsearch_role }}|length' + retries: 60 + delay: 5 + when: dry_run is not defined + +- include: elasticsearch_shard_allocation.yml +- include: elasticsearch_cluster_healthy.yml diff --git a/ansible/download_cli_binaries.yml b/ansible/download_cli_binaries.yml deleted file mode 100644 index e5d726c0c..000000000 --- a/ansible/download_cli_binaries.yml +++ /dev/null @@ -1,51 +0,0 @@ -# to install kubectl and helm globally on your path, run -# -# ansible-playbook -i localhost, -c local download_cli_binaries.yml -# -# to install to a local directory 'bin', run -# -# ansible-playbook -i localhost, -c local download_cli_binaries.yml -e local_installation=True -# -- name: download binaries - hosts: localhost - vars: - # Ensure these versions match the server-side ones (or are compatible) - # Also see download_kubespray.yml - kubernetes_helm_ver: v3.1.1 - kubectl_version: "1.14.2" - # default directories for a global installation - kubectl_bin_directory: "/usr/local/bin" - kubernetes_helm_bin_dir: "/usr/local/bin" - # Operarting system on which "kubectl" should run on - kubectl_os: "{{ os.stdout }}" # to override, use "linux" for Linux, "darwin" for MacOS X, "windows" for Windows - # Processor architecture "kubectl" should run on - kubectl_arch: "amd64" # other possible values: "386","arm64","arm","ppc64le","s390x" - pre_tasks: - - name: figure out OS - shell: if [[ "$OSTYPE" == "darwin"* ]]; then echo darwin; else echo linux; fi; - register: os - - - name: fix for kubectl role when on debian - set_fact: - unarchive_deps_xz_pkg: xz-utils - when: ansible_os_family == 'Debian' - - - name: Set local directory variables - set_fact: - kubectl_bin_directory: "{{ playbook_dir }}/bin" - kubernetes_helm_bin_dir: "{{ playbook_dir }}/bin" - kubectl_owner: "{{ lookup('env', 'USER') }}" - kubectl_group: "{{ lookup('env', 'USER') }}" - when: local_installation is defined and local_installation - - name: ensure dir exists - file: - path: "{{ kubectl_bin_directory }}" - state: directory - owner: "{{ kubectl_owner }}" - group: "{{ kubectl_owner }}" - when: local_installation is defined and local_installation - - roles: - - ansible-kubectl - - ansible-helm - diff --git a/ansible/download_kubespray.yml b/ansible/download_kubespray.yml deleted file mode 100644 index 1d9d4dfe3..000000000 --- a/ansible/download_kubespray.yml +++ /dev/null @@ -1,24 +0,0 @@ -# download a specific version of kubespray -# Note: installing it via ansible-galaxy would re-arrange its content (roles/etcd is misplaced) -# the reason for this behaviour is yet to be known -# Usage: see Makefile -- name: download kubespray - hosts: localhost - gather_facts: no - become: false - vars: - # kubespray release 2.10 branch from 2019-05-20 - # uses - # kube_version = "v1.14.2" - # helm_version = "v2.13.1" - # (if needed, these can be overridden in the hosts.ini under the [k8s-cluster:vars] section) - # also see download_cli_binaries.yml to see client-side versions of `kubectl` and `helm`. - kubespray_version: e2f5a9748e4dbfe2fdba7931198b0b5f1f4bdc7e - tasks: - - name: "sync kubespray to {{ kubespray_version }}" - git: - repo: 'https://github.com/kubernetes-sigs/kubespray.git' - dest: roles-external/kubespray - version: "{{ kubespray_version }}" - force: yes - depth: 1 diff --git a/ansible/elasticsearch.yml b/ansible/elasticsearch.yml index 5d846b52e..129c1075e 100644 --- a/ansible/elasticsearch.yml +++ b/ansible/elasticsearch.yml @@ -1,19 +1,36 @@ - name: elasticsearch hosts: elasticsearch_master gather_facts: true + environment: "{{ proxy_env | default({}) }}" vars: # The following sets java heap size to 1GB (default is 2GB) # comment that line when deploying on machines with >= 4GB memory. es_heap_size: "1g" # Put a hold on the ES package. - # Updating ES to a different version than 6.6 currently breaks its integration with Wire. + # Updating ES to a different version 7.x currently breaks its integration with Wire. + # + # Keep this version aligned with that of wire-server/charts/elasticsearch-ephemeral. + # Otherwise, the usage of this version would be completely untested! + es_version: "6.8.23" es_version_lock: true es_enable_xpack: false es_xpack_features: [] # disable features es_instance_name: "{{ ansible_hostname }}" + # Protect against potential information leak. + # For details see https://docs.wire.com/security-responses/log4shell.html + # + # CVE-2021-44228 + # CVE-2021-45046 + # + # FUTUREWORK: if we eventually upgrade to newer version of this role + # containing log4j >= 2.16 the following JVM parameter won't be necessary + # anymore. + es_jvm_custom_parameters: + - "-Dlog4j2.formatMsgNoLookups=True" + # bind to both site IP and localhost, but publish only site IP: # netstat -antlp | grep LISTEN # tcp 0 0 172.17.0.4:9200 0.0.0.0:* LISTEN 8663/java diff --git a/ansible/files/hetzner_server_libvirt_default_net.xml b/ansible/files/hetzner_server_libvirt_default_net.xml new file mode 100644 index 000000000..92dfd7e83 --- /dev/null +++ b/ansible/files/hetzner_server_libvirt_default_net.xml @@ -0,0 +1,10 @@ + + wirebox + + + + + + + + diff --git a/ansible/files/hetzner_server_nftables.conf.j2 b/ansible/files/hetzner_server_nftables.conf.j2 new file mode 100644 index 000000000..6cccc10b9 --- /dev/null +++ b/ansible/files/hetzner_server_nftables.conf.j2 @@ -0,0 +1,66 @@ +#!/usr/sbin/nft -f + +flush ruleset + +define KUBENODEIP = 192.168.122.21 +define COTURNIP = 192.168.122.23 +define INF_WAN = {{ ansible_default_ipv4.interface }} + +table inet filter { + chain block_definitions { + ct state established,related accept + ct state invalid drop + tcp flags != syn ct state new counter drop + counter drop +# log prefix "DROP " counter drop + } + chain INPUT { + type filter hook input priority 0; + ip protocol icmp icmp type echo-request counter accept + ip6 nexthdr ipv6-icmp icmpv6 type echo-request counter accept + ip6 nexthdr ipv6-icmp ip6 hoplimit 1 icmpv6 type { nd-neighbor-advert, nd-neighbor-solicit, nd-router-advert } counter accept + ip6 nexthdr ipv6-icmp ip6 hoplimit 255 icmpv6 type { nd-neighbor-advert, nd-neighbor-solicit, nd-router-advert } counter accept + iifname { lo, virbr0 } counter accept + tcp dport 22 counter accept comment "SSH incoming" + jump block_definitions + } + chain FORWARD { + type filter hook forward priority 0; + iifname virbr0 oifname $INF_WAN counter accept comment "allow internet for internal VMs, needed fo things like letsencrypt cert issue" + iifname virbr0 oifname virbr0 counter accept comment "allow traffic between VMs" + iifname $INF_WAN oifname virbr0 ct status dnat counter accept comment "allow DNAT forward from external interface to virbr0" + iifname docker0 oifname virbr0 counter accept + jump block_definitions + } + chain OUTPUT { + type filter hook output priority 0; + policy accept; + } +} +table ip nat { + chain PREROUTING { + type nat hook prerouting priority -100; + + iifname { $INF_WAN, virbr0 } tcp dport 80 fib daddr type local dnat to $KUBENODEIP:31772 comment "HTTP ingress" + iifname { $INF_WAN, virbr0 } tcp dport 443 fib daddr type local dnat to $KUBENODEIP:31773 comment "HTTPS ingress" + + iifname { $INF_WAN, virbr0 } tcp dport 3478 fib daddr type local dnat to $COTURNIP comment "COTURN control TCP" + iifname { $INF_WAN, virbr0 } udp dport 3478 fib daddr type local dnat to $COTURNIP comment "COTURN control UDP" + + iifname { $INF_WAN, virbr0 } udp dport 32768-61000 fib daddr type local dnat to $COTURNIP comment "COTURN UDP range" + + fib daddr type local counter jump DOCKER + } + chain POSTROUTING { + type nat hook postrouting priority 100; + oifname != docker0 ip saddr 172.17.0.0/16 counter masquerade + oifname $INF_WAN counter masquerade comment "masquerade outgoing traffic" + } + chain DOCKER { + iifname docker0 counter return + } + chain OUTPUT { + type nat hook output priority -100; policy accept; + ip daddr != 127.0.0.0/8 fib daddr type local counter jump DOCKER + } +} diff --git a/ansible/files/hetzner_server_sshd_config b/ansible/files/hetzner_server_sshd_config new file mode 100644 index 000000000..59c66cac3 --- /dev/null +++ b/ansible/files/hetzner_server_sshd_config @@ -0,0 +1,25 @@ +Port 22 + +AcceptEnv LANG LC_* +LogLevel verbose +PrintMotd no + +# Hardened algorithm configuration based on the output of 'ssh-audit' (https://github.com/jtesta/ssh-audit). + +KexAlgorithms sntrup761x25519-sha512@openssh.com,curve25519-sha256,curve25519-sha256@libssh.org,gss-curve25519-sha256-,diffie-hellman-group16-sha512,gss-group16-sha512-,diffie-hellman-group18-sha512 +Ciphers chacha20-poly1305@openssh.com,aes256-gcm@openssh.com +MACs hmac-sha2-512-etm@openssh.com + +HostKeyAlgorithms sk-ssh-ed25519-cert-v01@openssh.com,ssh-ed25519-cert-v01@openssh.com,rsa-sha2-512-cert-v01@openssh.com,sk-ssh-ed25519@openssh.com,ssh-ed25519,rsa-sha2-512 +CASignatureAlgorithms sk-ssh-ed25519@openssh.com,ssh-ed25519,rsa-sha2-512 +GSSAPIKexAlgorithms gss-curve25519-sha256-,gss-group16-sha512- +HostbasedAcceptedAlgorithms sk-ssh-ed25519-cert-v01@openssh.com,ssh-ed25519-cert-v01@openssh.com,sk-ssh-ed25519@openssh.com,ssh-ed25519,rsa-sha2-512-cert-v01@openssh.com,rsa-sha2-512 +PubkeyAcceptedAlgorithms sk-ssh-ed25519-cert-v01@openssh.com,ssh-ed25519-cert-v01@openssh.com,sk-ssh-ed25519@openssh.com,ssh-ed25519,rsa-sha2-512-cert-v01@openssh.com,rsa-sha2-512,ecdsa-sha2-nistp521 + +PasswordAuthentication no +PubkeyAuthentication yes +ChallengeResponseAuthentication no + +Subsystem sftp /usr/lib/openssh/sftp-server +UsePAM yes +X11Forwarding no diff --git a/ansible/files/registry/images.sh b/ansible/files/registry/images.sh index 56adbe8f1..bc7ad0514 100755 --- a/ansible/files/registry/images.sh +++ b/ansible/files/registry/images.sh @@ -9,7 +9,7 @@ registry_name="localhost" images=$(cat $SCRIPT_DIR/list_of_docker_images.txt) quay=$(cat $SCRIPT_DIR/list_of_docker_images.txt | grep "^quay.io" | awk -F quay.io/ '{print $2}' | grep -v '^$' ) gcr=$(cat $SCRIPT_DIR/list_of_docker_images.txt | grep "^gcr.io" | awk -F gcr.io/ '{print $2}' | grep -v '^$') -k8sgcr=$(cat $SCRIPT_DIR/list_of_docker_images.txt | grep "^k8s.gcr.io" | awk -F k8s.gcr.io/ '{print $2}' | grep -v '^$') +registryk8s=$(cat $SCRIPT_DIR/list_of_docker_images.txt | grep "^registry.k8s.io" | awk -F registry.k8s.io/ '{print $2}' | grep -v '^$') hub=$(cat $SCRIPT_DIR/list_of_docker_images.txt | grep -v gcr.io | grep -v quay.io) @@ -26,8 +26,8 @@ for image in ${quay[@]}; do mirror done; -prefix=k8s.gcr.io/ -for image in ${k8sgcr[@]}; do +prefix=registry.k8s.io/ +for image in ${registryk8s[@]}; do mirror done; diff --git a/ansible/files/registry/list_of_docker_images.txt b/ansible/files/registry/list_of_docker_images.txt index 1d97ce187..849ad826c 100644 --- a/ansible/files/registry/list_of_docker_images.txt +++ b/ansible/files/registry/list_of_docker_images.txt @@ -33,16 +33,16 @@ gcr.io/google_containers/pause-amd64:3.1 gcr.io/kubernetes-helm/tiller:v2.11.0 gcr.io/kubernetes-helm/tiller:v2.13.1 grafana/grafana:5.0.0 -k8s.gcr.io/cluster-proportional-autoscaler-amd64:1.4.0 -k8s.gcr.io/defaultbackend:1.4 -k8s.gcr.io/k8s-dns-node-cache:1.15.1 +registry.k8s.io/cluster-proportional-autoscaler-amd64:1.4.0 +registry.k8s.io/defaultbackend:1.4 +registry.k8s.io/k8s-dns-node-cache:1.15.1 lachlanevenson/k8s-helm:v2.13.1 localstack/localstack:0.8.7 mesosphere/aws-cli:1.14.5 metallb/controller:v0.7.3 metallb/speaker:v0.7.3 -minio/minio:RELEASE.2019-04-09T01-22-30Z -namshi/smtp:latest +minio/minio:RELEASE.2023-07-07T07-13-57Z +ixdotai/smtp:v0.5.2 library/nginx:1.15 quay.io/coreos/configmap-reload:v0.0.1 quay.io/coreos/etcd:v3.2.26 @@ -53,7 +53,6 @@ quay.io/coreos/grafana-watcher:v0.0.8 quay.io/coreos/prometheus-config-reloader:v0.20.0 quay.io/coreos/prometheus-operator:v0.20.0 quay.io/external_storage/local-volume-provisioner:v2.1.0 -quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.22.0 quay.io/prometheus/alertmanager:v0.15.1 quay.io/prometheus/node-exporter:v0.15.2 quay.io/prometheus/prometheus:v2.2.1 diff --git a/ansible/files/registry/mk-sub-certificate b/ansible/files/registry/mk-sub-certificate index 1a60f37ae..021d14349 100755 --- a/ansible/files/registry/mk-sub-certificate +++ b/ansible/files/registry/mk-sub-certificate @@ -19,7 +19,7 @@ set -ex echo $cn > dns_name ALLDOMAINS="" -for ONEREGISTRYIN in quay.io k8s.gcr.io gcr.io docker.caching.proxy.internal registry-1.docker.io auth.docker.io ${REGISTRIES}; do +for ONEREGISTRYIN in quay.io registry.k8s.io gcr.io docker.caching.proxy.internal registry-1.docker.io auth.docker.io ${REGISTRIES}; do ONEREGISTRY=$(echo ${ONEREGISTRYIN} | xargs) # Remove whitespace echo "Adding certificate for registry: $ONEREGISTRY" ALLDOMAINS="${ALLDOMAINS},DNS:${ONEREGISTRY}" diff --git a/ansible/files/serve-assets.service b/ansible/files/serve-assets.service new file mode 100644 index 000000000..cf866e908 --- /dev/null +++ b/ansible/files/serve-assets.service @@ -0,0 +1,3 @@ +[Service] +ExecStart=/usr/bin/python3 -m http.server 8080 +WorkingDirectory=/opt/assets/ diff --git a/ansible/get-logs.yml b/ansible/get-logs.yml new file mode 100644 index 000000000..1d1163636 --- /dev/null +++ b/ansible/get-logs.yml @@ -0,0 +1,33 @@ +- hosts: "{{ log_host }}" + tasks: + - assert: + msg: "'log_host' must be set and not empty" + that: + - log_host is defined + - log_host | length > 0 + - assert: + msg: "'log_service' must be set and not empty" + that: + - log_service is defined + - log_service | length > 0 + - assert: + msg: "'log_since' must be set and not empty" + that: + - log_since is defined + - log_since | length > 0 + + - name: get logs + shell: journalctl -u {{ log_service }} --since '{{ log_since }}' --until '{{ log_until | default('now', true) }}' + register: the_logs + - name: create logs directory + delegate_to: localhost + become: no + file: + state: directory + path: "{{ log_dir | default('./', true) }}" + - name: save logs + delegate_to: localhost + become: no + copy: + dest: "{{ log_dir | default('/tmp', true) }}/{{log_host}}-{{ log_service }}-{{ log_since }}-{{ log_until | default('now', true) }}.log" + content: "{{ the_logs.stdout }}" diff --git a/ansible/helm_external.yml b/ansible/helm_external.yml index 610c80b35..aed173a47 100644 --- a/ansible/helm_external.yml +++ b/ansible/helm_external.yml @@ -4,17 +4,9 @@ # # After any change to IPs/servers: # 1. run this playbook: -# poetry run ansible-playbook -i hosts.ini helm_external.yml -vv --diff +# ansible-playbook -i hosts.ini helm_external.yml -vv --diff # 2. re-run the helm upgrade specifying the override files. - -- hosts: - - elasticsearch - - cassandra - - minio - - redis - gather_facts: true - -- hosts: localhost +- hosts: "elasticsearch" become: false tasks: - name: Generate elasticsearch IPs for helm @@ -22,29 +14,35 @@ vars: external_dir_name: elasticsearch-external server_type: elasticsearch - network_interface: "{{ elasticsearch_network_interface | default('') }}" - when: '"elasticsearch" in groups' + network_interface: "{{ elasticsearch_network_interface }}" +- hosts: "minio" + become: false + tasks: - name: Generate minio IPs for helm include_tasks: tasks/helm_external.yml vars: external_dir_name: minio-external server_type: minio - network_interface: "{{ minio_network_interface | default('') }}" - when: '"minio" in groups' + network_interface: "{{ minio_network_interface }}" +- hosts: "cassandra" + become: false + tasks: - name: Generate cassandra IPs for helm include_tasks: tasks/helm_external.yml vars: external_dir_name: cassandra-external server_type: cassandra - network_interface: "{{ cassandra_network_interface | default('') }}" - when: '"cassandra" in groups' + network_interface: "{{ cassandra_network_interface }}" - - name: Generate redis IPs for helm +- hosts: "rmq-cluster" + become: false + tasks: + - name: Generate rabbitmq IPs for helm include_tasks: tasks/helm_external.yml vars: - external_dir_name: redis-external - server_type: redis - network_interface: "{{ redis_network_interface | default('') }}" - when: '"redis" in groups' + external_dir_name: rabbitmq-external + server_type: rmq-cluster + network_interface: "{{ rabbitmq_network_interface }}" + tags: rabbitmq-external diff --git a/ansible/hetzner-single-deploy.yml b/ansible/hetzner-single-deploy.yml new file mode 100644 index 000000000..4d086fe32 --- /dev/null +++ b/ansible/hetzner-single-deploy.yml @@ -0,0 +1,215 @@ +- hosts: all + become: true + vars: + artifact_hash: d8fe36747614968ea73ebd43d47b99364c52f9c1 + ubuntu_version: 22.04.5 + ssh_pubkey: "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIDPTGTo1lTqd3Ym/75MRyQvj8xZINO/GI6FzfIadSe5c backend+hetzner-dedicated-operator@wire.com" + tasks: + - name: set ipv4 forward + sysctl: + name: net.ipv4.ip_forward + value: '1' + sysctl_set: true + state: present + reload: true + - name: apt update + apt: update_cache=yes force_apt_get=yes + - name: apt upgrade + apt: upgrade=dist force_apt_get=yes + - name: install default packages + apt: + install_recommends: no + pkg: + - aptitude + - apt-transport-https + - bind9-host + - curl + - debian-goodies + - dnsutils + - git + - dnsmasq + - less + - lsof + - net-tools + - rsyslog + - screen + - sudo + - vim + - wget + - whois + - docker.io + - telnet + - python3-lxml + - qemu + - qemu-kvm + - qemu-utils + - libvirt-clients + - libvirt-daemon-system + - virtinst + - bridge-utils + - name: generate german locales + locale_gen: + name: de_DE.UTF-8 + state: present + - name: generate us locales + locale_gen: + name: en_US.UTF-8 + state: present + - name: set system language + lineinfile: + path: /etc/default/locale + regexp: '^#?LANG=' + line: 'LANG="en_US.UTF-8"' + - name: set keyboard layout + lineinfile: + path: /etc/default/keyboard + regexp: '^#?XKBLAYOUT=' + line: 'XKBLAYOUT="us"' + - name: set keyboard variant + lineinfile: + path: /etc/default/keyboard + regexp: '^#?XKVARIANT=' + line: 'XKBVARIANT="de"' + - name: add default user accounts + user: + name: demo + groups: sudo, kvm, docker + uid: 900 + state: present + shell: /bin/bash + password: "!" + - name: Adding SSH pubkey for user demo + authorized_key: + user: demo + state: present + key: "{{ ssh_pubkey }}" + - name: passwordless sudo + lineinfile: + dest: /etc/sudoers + regexp: '^%sudo' + line: "%sudo ALL=(ALL) NOPASSWD:ALL" + - name: set proper ACLs for libvirt and demo user + acl: + path: /home/demo + entity: libvirt-qemu + etype: user + permissions: x + state: present + - name: deploy sshd config + copy: + src: files/hetzner_server_sshd_config + dest: /etc/ssh/sshd_config + mode: 0644 + owner: root + group: root + notify: sshd | restart + - name: stop and disable dnsmasq service + service: + name: dnsmasq + state: stopped + enabled: false + - name: collect libvirt network facts + virt_net: + command: facts + - name: remove & stop libvirt default network + when: ansible_libvirt_networks["default"] is defined + virt_net: + command: "{{ item }}" + name: default + with_items: + - destroy + - undefine + - name: create new libvirt network with appropriate defaults (no iptables hook) + when: ansible_libvirt_networks["wirebox"] is not defined + virt_net: + name: wirebox + command: define + xml: '{{ lookup("file", "files/hetzner_server_libvirt_default_net.xml") }}' + - name: collect libvirt network facts after defining new network + virt_net: + command: facts + - name: start new default libvirt net + when: ansible_libvirt_networks["wirebox"].state != 'active' + virt_net: + name: wirebox + command: create + autostart: yes + - name: start new default libvirt net on boot + when: ansible_libvirt_networks["wirebox"].autostart != 'yes' + virt_net: + name: wirebox + autostart: yes + - name: check if nftables.conf is deployed already + stat: + path: /root/.nftables_deployed + register: nft_deployed + - name: deploy /etc/nftables.conf + template: + src: files/hetzner_server_nftables.conf.j2 + dest: /etc/nftables.conf + mode: 0750 + owner: root + group: root + notify: nftables | restart + when: not nft_deployed.stat.exists + - name: add local file flag after nftables deployment + file: + path: /root/.nftables_deployed + state: touch + modification_time: preserve + access_time: preserve + - name: deploy wire artifact, ubuntu iso + block: + - name: create wire-server-deploy directory for demo user + file: + path: /home/demo/wire-server-deploy + state: directory + owner: demo + group: demo + mode: 0775 + - name: check if wire-server-deploy-static-{{ artifact_hash }}.tgz exists + stat: + path: /home/demo/wire-server-deploy-static-{{ artifact_hash }}.tgz + get_checksum: False + register: artifact_archive_file_check + - name: download wire-server-deploy archive + shell: + cmd: curl -fsSLo /home/demo/wire-server-deploy-static-{{ artifact_hash }}.tgz https://s3-eu-west-1.amazonaws.com/public.wire.com/artifacts/wire-server-deploy-static-{{ artifact_hash }}.tgz + creates: /home/demo/wire-server-deploy-static-{{ artifact_hash }}.tgz + when: not artifact_archive_file_check.stat.exists + - name: check if wire-server-deploy folder contents exist + stat: + path: /home/demo/wire-server-deploy/containers-helm.tar + get_checksum: False + register: artifact_folder_content_check + - name: unpack wire-server-deploy archive + unarchive: + src: /home/demo/wire-server-deploy-static-{{ artifact_hash }}.tgz + dest: /home/demo/wire-server-deploy + remote_src: yes + when: not artifact_folder_content_check.stat.exists + - name: check if ubuntu iso exists + stat: + path: /home/demo/wire-server-deploy/ubuntu.iso + get_checksum: False + register: iso_file_check + - name: download ubuntu {{ ubuntu_version }} iso + shell: + cmd: curl -fsSLo /home/demo/wire-server-deploy/ubuntu.iso https://releases.ubuntu.com/jammy/ubuntu-{{ ubuntu_version }}-live-server-amd64.iso + creates: /home/demo/wire-server-deploy/ubuntu.iso + when: not iso_file_check.stat.exists + - name: set permissions inside wire-server-deploy via shell command (fails when using ansible directive) + shell: + cmd: sudo chmod -R 0775 /home/demo/wire-server-deploy; sudo chown -R demo:demo /home/demo + become_user: demo + + handlers: + - name: sshd | restart + service: + name: sshd + state: restarted + - name: nftables | restart + service: + name: nftables + enabled: true + state: restarted diff --git a/ansible/host_vars/localhost/python.yml b/ansible/host_vars/localhost/python.yml new file mode 100644 index 000000000..6c53b4b1b --- /dev/null +++ b/ansible/host_vars/localhost/python.yml @@ -0,0 +1 @@ +ansible_python_interpreter: "{{ lookup('env','LOCALHOST_PYTHON') }}" diff --git a/ansible/hosts.example-demo.ini b/ansible/inventory/demo/hosts.example.ini similarity index 91% rename from ansible/hosts.example-demo.ini rename to ansible/inventory/demo/hosts.example.ini index 0cb27f089..f4402b49d 100644 --- a/ansible/hosts.example-demo.ini +++ b/ansible/inventory/demo/hosts.example.ini @@ -42,3 +42,9 @@ docker_dns_servers_strict = False [k8s-cluster:vars] kube_network_plugin = flannel kubeconfig_localhost = True + +[minio:vars] +prefix = "example-" +domain = "example.com" +deeplink_title = "example.com environment" + diff --git a/ansible/inventory/offline/99-static b/ansible/inventory/offline/99-static new file mode 100644 index 000000000..c661ba4da --- /dev/null +++ b/ansible/inventory/offline/99-static @@ -0,0 +1,170 @@ +# In this section, add all machines in this installation. +# +# Ansible connects to the machine on `ansible_host` +# +# The machines talk to eachother on `ip` +# +# !!! if `ip` is not provided, ansible will default to the IP of the default +# interface. Which is probably not what you want +# +# ansible_host= +[all] +# kubenode1 ansible_host=100.89.110.8 ip=10.114.0.10 +# kubenode2 ansible_host=100.154.219.107 ip=10.114.0.8 +# kubenode3 ansible_host=100.227.143.169 ip=10.114.0.2 +# You could add more if capacity is needed +# kubenode4 .... + +# cassandra1 ansible_host=XXXX +# cassandra2 ansible_host=XXXX +# cassandra3 ansible_host=XXXX + +# elasticsearch1 ansible_host=XXXX +# elasticsearch2 ansible_host=XXXX +# elasticsearch3 ansible_host=XXXX +# +# minio1 ansible_host=XXXX +# minio2 ansible_host=XXXX +# minio3 ansible_host=XXXX +# +# rabbitmq1 ansible_host=XXXX +# rabbitmq2 ansible_host=XXXX +# rabbitmq3 ansible_host=XXXX +# +# If you are in an offline environment, add an assethost here, from which +# artifacts are served +# assethost ansible_host=100.89.14.74 ip=10.114.0.9 + +# If you need to tunnel ssh connections through a bastion host (because your +# nodes are not directly reachable from the machine running ansible), define a +# bastion host as well, and uncomment the [bastion] section below. +# +# bastion ansible_host=XXXX ansible_user=some_user + + +# Below variables are set for all machines in the inventory. +[all:vars] +# If you need to ssh as a user that's not the same user as the one running ansible +# ansible_user= +# ansible_password= +# ansible_sudo_pass= +# Keep in mind this user needs to be able to sudo passwordless. +# ansible_user = root +# +# Usually, you want to have a separate keypair to ssh to these boxes, +# and tell ansible where it is by setting `ansible_ssh_private_key_file`. +# ansible_ssh_private_key_file = ./dot_ssh/id_ed25519 +# Note adding it to the ssh agent won't work in scenarios where ansible +# execution is wrapped through a container, as the ssh-agent socket isn't +# accessible there. + +## In the next four sections, Cassandra, elasticsearch, rabbitmq, and MinIO need +# the name of the interface you want database services to run on +# specified. While this can be used to specify a private network just +# for databases, you must specify the interface name you want to run +# services on, even in the case that it is the only interface on the box. + +# Note: for offline configurations, this is required. the impact is that dns +# on the physical kubenodes does not perform requests against the kubernetes +# cluster, prefering the network provided DNS settings. +# resolvconf_mode: none + +[cassandra:vars] +# cassandra_network_interface = enp1s0 +# setting either cassandra backup directive to 'True' below requires a valid s3 bucket name as well +# also, enabling backups will install `awscli` via pip, which requires an internet connection +# cassandra_backup_enabled = False +# cassandra_incremental_backup_enabled = False +# cassandra_backup_s3_bucket = + + +[elasticsearch:vars] +# elasticsearch_network_interface = enp1s0 + +[minio:vars] +# minio_network_interface = enp1s0 + +### No longer used. generated by the nginz section of values/wire-server/values.yaml instead. +#prefix = "example-" +#domain = "example.com" +#deeplink_title = "example.com environment" + +# Rabbitmq specific variables +[rmq-cluster:vars] +# rabbitmq_network_interface = enp1s0 + +# For the following groups, add all nodes defined above to the sections below. +# Define any additional variables that should be set for these nodes. + +# Uncomment this is you use the bastion host +# [bastion] +# bastion + +# Add all nodes that should be the master +[kube-master] +# kubenode1 +# kubenode2 +# kubenode3 + +[etcd] +# !!! There MUST be an UNEVEN amount of etcd servers +# +# Uncomment if etcd and kubernetes are colocated +# +# kubenode1 etcd_member_name=etcd1 +# kubenode2 etcd_member_name=etcd2 +# kubenode3 etcd_member_name=etcd3 +# +# Uncomment if etcd cluster is separately deployed from kubernetes masters +# etcd1 etcd_member_name=etcd1 +# etcd2 etcd_member_name=etcd2 +# etcd3 etcd_member_name=etcd3 + +# Add all worker nodes here +[kube-node] +# kubenode1 +# kubenode2 +# kubenode3 + +# Additional worker nodes can be added +# You can label and annotate nodes. E.g. when deploying SFT you might want to +# deploy it only on certain nodes due to the public IP requirement. +# kubenode4 node_labels="{'wire.com/role': 'sftd'}" node_annotations="{'wire.com/external-ip': 'XXXX'}" +# kubenode5 node_labels="{'wire.com/role': 'sftd'}" node_annotations="{'wire.com/external-ip': 'XXXX'}" + +# leave this group as is +[k8s-cluster:children] +kube-master +kube-node + +# Add all cassandra nodes here +[cassandra] +# cassandra1 +# cassandra2 +# cassandra3 + +# add a cassandra seed +[cassandra_seed] +# cassandra1 + +# Add all elasticsearch nodes here +[elasticsearch] +# elasticsearch1 +# elasticsearch2 +# elasticsearch3 + +# leave this as is +[elasticsearch_master:children] +elasticsearch + +# Add all minio nodes here +[minio] +# minio1 +# minio2 +# minio3 + +# Add all rabbitmq nodes here +[rmq-cluster] +# rabbitmq1 +# rabbitmq2 +# rabbitmq3 diff --git a/ansible/inventory/offline/group_vars/all/offline.yml b/ansible/inventory/offline/group_vars/all/offline.yml new file mode 100644 index 000000000..1bc992613 --- /dev/null +++ b/ansible/inventory/offline/group_vars/all/offline.yml @@ -0,0 +1,74 @@ +# The assethost will host assets other machines will download +assethost_host: "{{ hostvars['assethost'].ansible_host }}:8080" +# When set to true; will set up all the repos below before continuing +# to bootstrap; such that no network access is needed +offline: true + +# This is copied from kubespray. We need it here too, as we run commands on hosts via the bastion too +ansible_ssh_common_args: "{% if 'bastion' in groups['all'] %} -o ProxyCommand='ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -W %h:%p {{ hostvars['bastion']['ansible_user'] }}@{{ hostvars['bastion']['ansible_host'] }} {% if ansible_ssh_private_key_file is defined %}-i {{ ansible_ssh_private_key_file }}{% endif %} ' {% endif %}" + +# This is used nowhere inside kubespray, only inside this file +# and our own playbooks +ubuntu_repos: "http://{{ assethost_host }}/debs-{{ ansible_distribution_release }}/public" +ubuntu_repo_base_url: "{{ ubuntu_repos }}" +ubuntu_repo_gpgkey: "{{ ubuntu_repos }}/gpg" + +docker_ubuntu_repo_base_url: "{{ ubuntu_repos }}" +docker_ubuntu_repo_gpgkey: "{{ ubuntu_repos }}/gpg" +# docker_ubuntu_repo_repokey: "9DC858229FC7DD38854AE2D88D81803C0EBFCD88" + +binaries_url: "http://{{ assethost_host }}/binaries" +nodelocaldns_ip: 10.233.0.10 + +kube_version: "v1.28.2" +etcd_version: "v3.5.10" + +kubeadm_download_url: "{{ binaries_url }}/kubeadm" +kubectl_download_url: "{{ binaries_url }}/kubectl" +kubelet_download_url: "{{ binaries_url }}/kubelet" +cni_download_url: "{{ binaries_url }}/cni-plugins-linux-{{ image_arch }}-{{ cni_version }}.tgz" +crictl_download_url: "{{ binaries_url }}/crictl-{{ crictl_version }}-linux-{{ image_arch }}.tar.gz" +calicoctl_download_url: "{{ binaries_url }}/calicoctl-linux-{{ image_arch }}" +calicoctl_alternate_download_url: "{{ binaries_url }}/calicoctl-linux-{{ image_arch }}" +calico_crds_download_url: "{{ binaries_url }}/{{ calico_version }}.tar.gz" +containerd_download_url: "{{ binaries_url }}/containerd-{{ containerd_version }}-linux-{{ image_arch }}.tar.gz" +runc_download_url: "{{ binaries_url }}/runc.{{ image_arch }}" +etcd_download_url: "{{ binaries_url }}/etcd-{{ etcd_version }}-linux-{{ image_arch }}.tar.gz" +nerdctl_download_url: "{{ binaries_url }}/nerdctl-{{ nerdctl_version }}-linux-{{ image_arch }}.tar.gz" +cassandra_url: "{{ binaries_url }}/apache-cassandra-3.11.16-bin.tar.gz" +prometheus_jmx_url: "{{ binaries_url }}/jmx_prometheus_javaagent-0.10.jar" + +# The elasticsearch role is turing-complete in determining the right repo, +# and elastic.co doesn't seem to advertise their -oss repo anymore either +# Just provide the .deb from the asset host +es_install_java: false +es_version: "6.8.23" +es_use_repository: false +es_custom_package_url: "{{ binaries_url }}/elasticsearch-oss-6.8.23.deb" + +minio_server_artifact_url: "{{ binaries_url }}/minio.RELEASE.2023-07-07T07-13-57Z" +minio_server_artifact_checksum: sha256:f6d1aadf4baec1556880e659748d7fbc6bc8d2dac3554f816e95492d3881660a +minio_server_upgrade_from_checksums : [ "sha256:2c7e6774a9befbba6a126791f363550f8f14e34008e100d0e0e57e2ad9b2ab8c" ] + +minio_client_artifact_url: "{{ binaries_url }}/mc.RELEASE.2023-10-24T05-18-28Z" +minio_client_artifact_checksum: sha256:5f12926b646b533cdea1a548c54460a4dad78f27b8b17d399ba02ff4ee368e4d +minio_client_upgrade_from_checksums : [ "sha256:59e184bd4e2c3a8a19837b0f0da3977bd4e301495a24e4a5d50e291728a1de51", "sha256:205a2dc5a49dc467f78228c43c7d368e376c6cc14492597a7c4fe195c291f074" ] + +kubeconfig_localhost: true +#resolvconf_mode: none + +# This defaults to true if http://169.254.169.254/latest/meta-data exists; which +# is also available in non-AWS. e.g. in Hetzner. Lets not let this autodetect in offline +is_aws_environment: False + +# IP address for the logging (for example QRadar) server +syslog_target_ip: 12.34.56.78 + +# OVERRIDES + +coredns_version: "v1.11.4" +ingress_nginx_version: "v1.10.6" +metrics_server_version: "v0.7.2" +ingress_nginx_kube_webhook_certgen_image_tag: "v20231011-8b53cabe0" +cert_manager_version: "v1.16.3" +nginx_image_tag: "1.25.4-alpine" \ No newline at end of file diff --git a/ansible/hosts.example.ini b/ansible/inventory/prod/hosts.example.ini similarity index 72% rename from ansible/hosts.example.ini rename to ansible/inventory/prod/hosts.example.ini index 0cde30f52..b095547f7 100644 --- a/ansible/hosts.example.ini +++ b/ansible/inventory/prod/hosts.example.ini @@ -9,13 +9,6 @@ minio01 ansible_host=X.X.X.X minio02 ansible_host=X.X.X.X minio03 ansible_host=X.X.X.X -# * 'ansible_host' is the IP to ssh into -# * set restund_network_interface to the interface that you want the process to bind to in the [all:vars] section -# * Optional: 'restund_peer_udp_advertise_addr' is the public IP to advertise for other turn servers if different than the ip on the 'restund_network_interface' -# If using 'restund_peer_udp_advertise_addr', make sure that UDP (!) traffic from any restund server (including itself) -# can reach that IP (for restund->restund communication) -restund01 ansible_host=X.X.X.X -restund02 ansible_host=X.X.X.X # * 'ansible_host' is the IP to ssh into # * 'ip' is the IP to bind to (if multiple network interfaces are in use) @@ -61,15 +54,10 @@ minio03 [minio:vars] minio_access_key = "REPLACE_THIS_WITH_THE_DESIRED_ACCESS_KEY" minio_secret_key = "REPLACE_THIS_WITH_THE_DESIRED_SECRET_KEY" +prefix = "example-" +domain = "example.com" +deeplink_title = "example.com environment" -[restund] -restund01 -restund02 - -[restund:vars] -## Set the network interface name for restund to bind to if you have more than one network interface -## If unset, defaults to the ansible_default_ipv4 (if defined) otherwise to eth0 -# restund_network_interface = eth0 ### KUBERNETES ### @@ -114,6 +102,9 @@ ansible_python_interpreter = /usr/bin/python3 # ansible_ssh_pass = ... # ansible_become_pass = ... +## if the servers need a proxy to access the Internet, configure it here: +# proxy_env = "{'http_proxy': 'http://proxy.example.com:8080', 'https_proxy': 'http://proxy.example.com:8080'}" + ### CASSANDRA section ### is_aws_environment = False @@ -128,15 +119,13 @@ is_aws_environment = False ## Set these in order to use an APT mirror other than the default. # es_apt_key = "https:///linux/ubuntu/gpg" -# es_apt_url = "deb [trusted=yes] https:///apt bionic stable" +# es_apt_url = "deb [trusted=yes] https:///apt jammy stable" ### MINIO section ### ## Set this to a name of a network interface (e.g. 'eth0'), on which you wish minio processes to talk to each other. # minio_network_interface = "ens123" -### RESTUND section ### -# restund_network_interface = "..." ### KUBERNETES section (see kubespray documentation for details) ### @@ -148,11 +137,5 @@ bootstrap_os = ubuntu # 'flannel' is preferred on bare-metal setups, in case you wish to use `metallb` kube_network_plugin = flannel -## You can update server-side versions for helm/kubernetes here on new releases. -## These server-side versions should match the client-side versions: -## See download_kubespray.yml for default server-side versions -## See download_cli_binaries.yml for default client-side versions -# kube_version = "v1.14.2" -# helm_version = "v2.13.1" ## download the kubeconfig after installing to localhost kubeconfig_localhost = true diff --git a/ansible/kube-minio-static-files.yml b/ansible/kube-minio-static-files.yml new file mode 100644 index 000000000..c7604c774 --- /dev/null +++ b/ansible/kube-minio-static-files.yml @@ -0,0 +1,54 @@ +# WARNING: This is not recommended for production use. +# +# FUTUREWORK: https://github.com/zinfra/backend-issues/issues/1763 +- hosts: minio + any_errors_fatal: true + become: true + gather_facts: true + vars: + minio_access_key: 'dummykey' + minio_secret_key: 'dummysecret' + tasks: + - name: "install minio client CLI" + import_role: + name: ansible-minio + tasks_from: install-client + + - name: "add 'local' mc config alias with correct credentials" + shell: "mc config host add local http://{{ service_cluster_ip }}:9000 '{{ minio_access_key }}' '{{ minio_secret_key }}'" + + - name: "create 'public' bucket" + shell: "mc mb --ignore-existing local/public" + + - name: "make the 'public' bucket world-accessible" + shell: "mc policy set public local/public" + run_once: true + + - name: "remove unneeded config aliases added by default" + shell: "mc config host rm {{ item }}" + with_items: + - gcs + - s3 + - play + + - name: "add some file to minio" + import_role: + name: minio-static-files + vars: + prefix: "" + domain: "${environment_name}.${root_domain}" + deeplink_title: "${environment_name}.${root_domain}" + +- hosts: minio + any_errors_fatal: true + become: true + gather_facts: true + tags: static-files + roles: + - role: minio-static-files + # Override these variables! + # FUTUREWORK: parse them from a configuration file shared with helm + # (as the domain needs to be known in helm override values.yaml) + prefix: "{{ minio_deeplink_prefix | default('example-') }}" + domain: "{{ minio_deeplink_domain | default('example.com') }}" + deeplink_title: "{{ minio_deeplink_domain | default('example.com environment') }}" diff --git a/ansible/kubernetes-fetch-kubeconfig.yml b/ansible/kubernetes-fetch-kubeconfig.yml new file mode 100644 index 000000000..7c63118da --- /dev/null +++ b/ansible/kubernetes-fetch-kubeconfig.yml @@ -0,0 +1,17 @@ +# Fetch the `kubeconfig` file. This is useful when the original `kubeconfig` has +# been lost. +# Run it with e.g. `ENV=bella make create-inventory fetch-kubeconfig`. + +- name: 'Fetch kubeconfig' + hosts: kube-master + tasks: + - name: download kubeconfig + ansible.builtin.fetch: + src: /etc/kubernetes/admin.conf + dest: ./kubeconfig.new + flat: true + + - name: notify user about kubeconfig + ansible.builtin.debug: + msg: + - "./kubeconfig.new has been downloaded to your machine" diff --git a/ansible/kubernetes-renew-certs.yml b/ansible/kubernetes-renew-certs.yml new file mode 100644 index 000000000..41fb1d600 --- /dev/null +++ b/ansible/kubernetes-renew-certs.yml @@ -0,0 +1,81 @@ +# See https://docs.wire.com/how-to/administrate/kubernetes/certificate-renewal/scenario-1_k8s-v1.14-kubespray.html +# +# Run "make renew-certs" to execute this playbook. +# +# Beware! This script only works if there a single control plane node! +# +# Comments: +# +# > Step 6. Make kubelet aware of the new certificate +# Restarting kubelet doesn't seem to be necessary +# +# > Step 7. Copy certificates over to all the other nodes +# This can be skipped in our case, too. It seems that non-cps files don't have these certificates. +# +# Also see https://github.com/kubernetes-sigs/kubespray/issues/5464#issuecomment-647022647 + +- name: 'Renew certificates' + hosts: kube-master + tasks: + - name: fail if there is more than 1 master + fail: + msg: This playbook only works when there is one master node. + when: groups['kube-master'] | length > 1 + - name: create backup dir + file: + dest: "/etc/kubernetes/backup-before-cert-renew/" + state: directory + + - name: create backups + copy: + remote_src: true + src: "/etc/kubernetes/{{ item }}" + dest: "/etc/kubernetes/backup-before-cert-renew/{{ item }}" + with_items: + - ssl + - admin.conf + - controller-manager.conf + - kubelet.conf + - scheduler.conf + + - name: renew certificates + register: command_output + args: + executable: /bin/bash + ansible.builtin.shell: | + set -eo pipefail + + kubeadm alpha certs renew apiserver-kubelet-client + kubeadm alpha certs renew apiserver + kubeadm alpha certs renew front-proxy-client + kubeadm alpha kubeconfig user --client-name system:kube-controller-manager > /etc/kubernetes/controller-manager.conf + kubeadm alpha kubeconfig user --client-name system:kube-scheduler > /etc/kubernetes/scheduler.conf + # note: if apiserver_loadbalancer_domain_name is not defined it might be that you talk to the cps directly + # in that case replace {{ apiserver_loadbalancer_domain_name }} with the public ip / domain of the cps + kubeadm alpha kubeconfig user --client-name system:node:$(hostname) --org system:nodes --apiserver-advertise-address={{ apiserver_loadbalancer_domain_name }} > /etc/kubernetes/kubelet.conf + + kubeadm alpha kubeconfig user --client-name kubernetes-admin --org system:masters > /etc/kubernetes/admin.conf + + - debug: + var: command_output.stdout_lines + + - name: restart processes + args: + executable: /bin/bash + ansible.builtin.shell: | + set -eo pipefail + + kill -s SIGHUP $(pidof kube-apiserver) + kill -s SIGHUP $(pidof kube-controller-manager) + kill -s SIGHUP $(pidof kube-scheduler) + + - name: download kubeconfig + ansible.builtin.fetch: + src: /etc/kubernetes/admin.conf + dest: ./kubeconfig.new + flat: true + + - name: notify user about kubeconfig + ansible.builtin.debug: + msg: + - "./kubeconfig.new has been downloaded to your machine" diff --git a/ansible/kubernetes.yml b/ansible/kubernetes.yml index d1651e7fd..a59fa2ef4 100644 --- a/ansible/kubernetes.yml +++ b/ansible/kubernetes.yml @@ -1,8 +1,47 @@ ---- -# This assumes you ran "make download" to download the kubespray files -# -- import_playbook: roles-external/kubespray/cluster.yml +- hosts: k8s-cluster + tasks: + - set_fact: + kubeconfig_localhost: true + # NOTE: stick with the default but expose it outside of Kubespray + artifacts_dir: "{{ inventory_dir }}/artifacts" + +- import_playbook: "{{ lookup('first_found', ['roles-override/kubespray/cluster.yml', 'roles-external/kubespray/cluster.yml']) }}" + +- hosts: k8s-cluster + tasks: + - name: Annotate nodes + command: "kubectl annotate node --overwrite {{ inventory_hostname }} {{ item.key }}={{ item.value }}" + with_dict: "{{ node_annotations | default({}) }}" + +- import_playbook: kubernetes_logging.yml + +- name: 'Bringing kubeconfig in place' + hosts: k8s-cluster + become: no + tasks: + - delegate_to: localhost + block: + - name: "Checking if 'kubeconfig' file already exists" + when: skip_kubeconfig_copying is undefined or skip_kubeconfig_copying == false + stat: + path: "{{ inventory_dir }}/../kubeconfig" + register: file_kubeconfig + - when: (skip_kubeconfig_copying is undefined or skip_kubeconfig_copying == false) and (not file_kubeconfig.stat.exists) + block: + - name: 'Renaming kubeconfig file provided by Kubespray' + copy: + src: "{{ artifacts_dir }}/admin.conf" + dest: "{{ inventory_dir }}/../kubeconfig.dec" + - debug: + msg: "TODO: Encrypt {{ inventory_dir }}/../kubeconfig.dec with sops" + +# Install systemd-coredump on all k8s-cluster nodes, if +# install_systemd_coredump is set to true in inventory (role defaults to false) +- hosts: k8s-cluster + roles: + - systemd-coredump - hosts: etcd + environment: "{{ proxy_env | default({}) }}" roles: - etcd-helpers diff --git a/ansible/kubernetes_logging.yml b/ansible/kubernetes_logging.yml new file mode 100644 index 000000000..e8ab2ada9 --- /dev/null +++ b/ansible/kubernetes_logging.yml @@ -0,0 +1,23 @@ +- hosts: k8s-cluster + environment: "{{ proxy_env | default({}) }}" + roles: + - role: logrotate + logrotate_scripts: + # The following will rotate pod logs once per day to keep no more than + # 3 days (maxage 1, rotate 2) of logs for data minimization/protection + # reasons. + # + # NOTE for wire-server-deploy maintainers: if you change the following + # options, ensure to also keep the documentation up-to-date, see the + # documentation introduced in + # https://github.com/wireapp/wire-docs/pull/79 + - name: podlogs + path: "/var/lib/docker/containers/*/*.log" + options: + - daily + - missingok + - rotate 2 + - maxage 1 + - copytruncate + - nocreate + - nocompress diff --git a/ansible/logging.yml b/ansible/logging.yml new file mode 100644 index 000000000..10e8a8c40 --- /dev/null +++ b/ansible/logging.yml @@ -0,0 +1,37 @@ +--- +# Configure using Rsyslog to monitor and forward log files to Qradar/remote logging +# ------------------------------------------------------------------ +# https://qradarinsights.com/2018/12/20/using-rsyslog-to-monitor-and-forward-log-files-to-qradar/ +# See https://wearezeta.atlassian.net/browse/JCT-62 + +- name: Change log file permission only on elasticsearch hosts + hosts: 'elasticsearch' + tasks: + # sudo chmod 775 /var/log/elasticsearch + - name: Change permissions of elasticsearch log file + become: true + file: + path: /var/log/elasticsearch + mode: '0775' + +- name: Using Rsyslog to monitor and forward log files + hosts: all + tasks: + - name: Copy the elasticsearch config + become: true + template: + src: templates/elasticsearch.conf.j2 + dest: /etc/rsyslog.d/55-elasticsearch.conf + + - name: Copy the logging config + become: true + template: + src: templates/logging.conf.j2 + dest: /etc/rsyslog.d/rfc5424-remote.conf + + # service syslog restart + - name: Restart the syslog service + become: true + service: + name: syslog + state: restarted diff --git a/ansible/minio.yml b/ansible/minio.yml index 4f07ac2ba..ea7ae0de5 100644 --- a/ansible/minio.yml +++ b/ansible/minio.yml @@ -9,39 +9,35 @@ any_errors_fatal: true become: true gather_facts: true + environment: "{{ proxy_env | default({}) }}" vars: minio_server_env_extra: "MINIO_BROWSER=off" bucket_names: - "dummy-bucket" - "assets" - "public" - layouts: + - "k8ssandra-backups" + minio_layouts: # The first minio instance on this server. - layout1: - servicename: "minio-server1" - minio_server_addr: ":9000" - minio_server_datadirs: "/var/lib/minio-server1" - minio_server_envfile: "/etc/default/minio-server1" + server1: + server_addr: ":9000" # The second minio instance on this server. - layout2: - servicename: "minio-server2" - minio_server_addr: ":9092" - minio_server_datadirs: "/var/lib/minio-server2" - minio_server_envfile: "/etc/default/minio-server2" + server2: + server_addr: ":9092" roles: - role: ansible-minio - layout: layout1 + minio_layout: server1 tags: - minio - role: ansible-minio - layout: layout2 + minio_layout: server2 tags: - minio tasks: - name: "check which buckets exists" shell: "mc ls def" environment: - MC_HOST_def: "http://{{ minio_access_key }}:{{ minio_secret_key }}@localhost{{ layouts.layout1.minio_server_addr }}" + MC_HOST_def: "http://{{ minio_access_key }}:{{ minio_secret_key }}@localhost{{ minio_layouts.server1.server_addr }}" run_once: true register: check_bucket tags: bucket-create @@ -49,14 +45,14 @@ - name: create bucket shell: "mc mb def/{{ item }}" environment: - MC_HOST_def: "http://{{ minio_access_key }}:{{ minio_secret_key }}@localhost{{ layouts.layout1.minio_server_addr }}" + MC_HOST_def: "http://{{ minio_access_key }}:{{ minio_secret_key }}@localhost{{ minio_layouts.server1.server_addr }}" run_once: true with_items: "{{ bucket_names }}" when: item not in check_bucket.stdout tags: bucket-create - name: "add 'local' mc config alias with correct credentials" - shell: "mc config host add local http://localhost{{ layouts.layout1.minio_server_addr }} {{ minio_access_key }} {{ minio_secret_key }}" + shell: "mc config host add local http://localhost{{ minio_layouts.server1.server_addr }} '{{ minio_access_key }}' '{{ minio_secret_key }}'" tags: mc-config - name: "make the 'public' bucket world-accessible" @@ -78,6 +74,7 @@ become: true gather_facts: true tags: static-files + environment: "{{ proxy_env | default({}) }}" roles: - role: minio-static-files # Override these variables! diff --git a/ansible/ntp.yml b/ansible/ntp.yml new file mode 100644 index 000000000..4c2f4c981 --- /dev/null +++ b/ansible/ntp.yml @@ -0,0 +1,28 @@ +- hosts: cassandra + any_errors_fatal: true + become: true + vars: + ntp_server: ntp.ubuntu.com # specify NTP server you wish to use here + tasks: + - name: Install NTP + apt: + name: ntp + state: present + + - name: Deploy ntp.conf + template: + src=ntp.conf.j2 + dest=/etc/ntp.conf + owner=root + mode=0644 + + - name: Restart ntp service + service: + name=ntp + state=restarted + + - name: Make sure NTP is started + service: + name=ntp + state=started + enabled=yes diff --git a/ansible/poetry.lock b/ansible/poetry.lock deleted file mode 100644 index 5ab717fbf..000000000 --- a/ansible/poetry.lock +++ /dev/null @@ -1,283 +0,0 @@ -[[package]] -category = "main" -description = "Radically simple IT automation" -name = "ansible" -optional = false -python-versions = "*" -version = "2.7.11" - -[[package]] -category = "main" -description = "Ansible Modules for Hashicorp Vault" -name = "ansible-modules-hashivault" -optional = false -python-versions = "*" -version = "3.17.7" - -[package.dependencies] -ansible = ">=2.0.0" -hvac = ">=0.7.0" -requests = "*" - -[[package]] -category = "main" -description = "Amazon Web Services Library" -name = "boto" -optional = false -python-versions = "*" -version = "2.49.0" - -[[package]] -category = "main" -description = "The AWS SDK for Python" -name = "boto3" -optional = false -python-versions = "*" -version = "1.9.181" - -[package.dependencies] -botocore = ">=1.12.181,<1.13.0" -jmespath = ">=0.7.1,<1.0.0" -s3transfer = ">=0.2.0,<0.3.0" - -[[package]] -category = "main" -description = "Low-level, data-driven core of boto 3." -name = "botocore" -optional = false -python-versions = "*" -version = "1.12.181" - -[package.dependencies] -docutils = ">=0.10" -jmespath = ">=0.7.1,<1.0.0" - -[package.dependencies.python-dateutil] -python = ">=2.7" -version = ">=2.1,<3.0.0" - -[package.dependencies.urllib3] -python = ">=2.7,<2.8 || >=3.4" -version = ">=1.20,<1.26" - -[[package]] -category = "main" -description = "Python package for providing Mozilla's CA Bundle." -name = "certifi" -optional = false -python-versions = "*" -version = "2019.6.16" - -[[package]] -category = "main" -description = "Universal encoding detector for Python 2 and 3" -name = "chardet" -optional = false -python-versions = "*" -version = "3.0.4" - -[[package]] -category = "main" -description = "DNS toolkit" -name = "dnspython" -optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" -version = "1.16.0" - -[[package]] -category = "main" -description = "Docutils -- Python Documentation Utilities" -name = "docutils" -optional = false -python-versions = "*" -version = "0.14" - -[[package]] -category = "main" -description = "Backport of the concurrent.futures package from Python 3" -marker = "python_version == \"2.6\" or python_version == \"2.7\"" -name = "futures" -optional = false -python-versions = ">=2.6, <3" -version = "3.2.0" - -[[package]] -category = "main" -description = "HashiCorp Vault API client" -name = "hvac" -optional = false -python-versions = "*" -version = "0.9.2" - -[package.dependencies] -requests = ">=2.21.0" - -[[package]] -category = "main" -description = "Internationalized Domain Names in Applications (IDNA)" -name = "idna" -optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" -version = "2.8" - -[[package]] -category = "main" -description = "A small but fast and easy to use stand-alone template engine written in pure python." -name = "jinja2" -optional = false -python-versions = "*" -version = "2.10.1" - -[package.dependencies] -MarkupSafe = ">=0.23" - -[[package]] -category = "main" -description = "JSON Matching Expressions" -name = "jmespath" -optional = false -python-versions = "*" -version = "0.9.4" - -[[package]] -category = "main" -description = "Safely add untrusted strings to HTML/XML markup." -name = "markupsafe" -optional = false -python-versions = ">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*" -version = "1.1.1" - -[[package]] -category = "main" -description = "A network address manipulation library for Python" -name = "netaddr" -optional = false -python-versions = "*" -version = "0.7.19" - -[[package]] -category = "main" -description = "Python Build Reasonableness" -name = "pbr" -optional = false -python-versions = "*" -version = "5.3.1" - -[[package]] -category = "main" -description = "Extensions to the standard Python datetime module" -marker = "python_version >= \"2.7\"" -name = "python-dateutil" -optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*" -version = "2.8.0" - -[package.dependencies] -six = ">=1.5" - -[[package]] -category = "main" -description = "YAML parser and emitter for Python" -name = "pyyaml" -optional = false -python-versions = "*" -version = "5.1.1" - -[[package]] -category = "main" -description = "Python HTTP for Humans." -name = "requests" -optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" -version = "2.22.0" - -[package.dependencies] -certifi = ">=2017.4.17" -chardet = ">=3.0.2,<3.1.0" -idna = ">=2.5,<2.9" -urllib3 = ">=1.21.1,<1.25.0 || >1.25.0,<1.25.1 || >1.25.1,<1.26" - -[[package]] -category = "main" -description = "a version of dict that keeps keys in insertion resp. sorted order" -marker = "platform_python_implementation == \"CPython\" and python_version <= \"2.7\"" -name = "ruamel.ordereddict" -optional = false -python-versions = "*" -version = "0.4.13" - -[[package]] -category = "main" -description = "ruamel.yaml is a YAML parser/emitter that supports roundtrip preservation of comments, seq/map flow style, and map key order" -name = "ruamel.yaml" -optional = false -python-versions = "*" -version = "0.15.97" - -[package.dependencies] -[package.dependencies."ruamel.ordereddict"] -python = "<=2.7" -version = "*" - -[[package]] -category = "main" -description = "An Amazon S3 Transfer Manager" -name = "s3transfer" -optional = false -python-versions = "*" -version = "0.2.1" - -[package.dependencies] -botocore = ">=1.12.36,<2.0.0" - -[package.dependencies.futures] -python = ">=2.6,<2.8" -version = ">=2.2.0,<4.0.0" - -[[package]] -category = "main" -description = "Python 2 and 3 compatibility utilities" -marker = "python_version >= \"2.7\"" -name = "six" -optional = false -python-versions = ">=2.6, !=3.0.*, !=3.1.*" -version = "1.12.0" - -[[package]] -category = "main" -description = "HTTP library with thread-safe connection pooling, file post, and more." -name = "urllib3" -optional = false -python-versions = "*" -version = "1.22" - -[metadata] -content-hash = "d523c92dd42556edd2d4bcaa8aeb4701507225f5e492169e7d5f0af854e89253" -python-versions = "^2.7 || >=3.5" - -[metadata.hashes] -ansible = ["e7e6de461b7d07cb4d8b2dd2a32b231af7c56e6bf39b851024671aaa52fd377e"] -ansible-modules-hashivault = ["1d323ba19e2b6459dfb5f14dee2cb79af8103a691c32fd65dc642bbf7eb82f2b"] -boto = ["147758d41ae7240dc989f0039f27da8ca0d53734be0eb869ef16e3adcfa462e8", "ea0d3b40a2d852767be77ca343b58a9e3a4b00d9db440efb8da74b4e58025e5a"] -boto3 = ["5e0e3e2a520cc289a36c053974afdd416292e0e8d2a7220a575cf992d6a890cd", "93625ceb73aa0eaab4fefba733562a6dfa1366b38a056edf5e53aa4fb97e0b3a"] -botocore = ["1a1594258b503adeb3d54b77c0b79151e546d76e8ebc62aa9258693e6cdd0f9b", "6dc20c4766cb0a4ff8b0993effe2550e3b4e4606265da37bcf8949610980da40"] -certifi = ["046832c04d4e752f37383b628bc601a7ea7211496b4638f6514d0e5b9acc4939", "945e3ba63a0b9f577b1395204e13c3a231f9bc0223888be653286534e5873695"] -chardet = ["84ab92ed1c4d4f16916e05906b6b75a6c0fb5db821cc65e70cbd64a3e2a5eaae", "fc323ffcaeaed0e0a02bf4d117757b98aed530d9ed4531e3e15460124c106691"] -dnspython = ["36c5e8e38d4369a08b6780b7f27d790a292b2b08eea01607865bf0936c558e01", "f69c21288a962f4da86e56c4905b49d11aba7938d3d740e80d9e366ee4f1632d"] -docutils = ["02aec4bd92ab067f6ff27a38a38a41173bf01bed8f89157768c1573f53e474a6", "51e64ef2ebfb29cae1faa133b3710143496eca21c530f3f71424d77687764274", "7a4bd47eaf6596e1295ecb11361139febe29b084a87bf005bf899f9a42edc3c6"] -futures = ["9ec02aa7d674acb8618afb127e27fde7fc68994c0437ad759fa094a574adb265", "ec0a6cb848cc212002b9828c3e34c675e0c9ff6741dc445cab6fdd4e1085d1f1"] -hvac = ["0e558949d55c81550b03f1bef08981d7d3d4be6346f80d587a94eb6bf59b19f1", "bc853edddf6e2bf4f771eeb534e70ff550aa7f1d2976afd52f767e60afbd4679"] -idna = ["c357b3f628cf53ae2c4c05627ecc484553142ca23264e593d327bcde5e9c3407", "ea8b7f6188e6fa117537c3df7da9fc686d485087abf6ac197f9c46432f7e4a3c"] -jinja2 = ["065c4f02ebe7f7cf559e49ee5a95fb800a9e4528727aec6f24402a5374c65013", "14dd6caf1527abb21f08f86c784eac40853ba93edb79552aa1e4b8aef1b61c7b"] -jmespath = ["3720a4b1bd659dd2eecad0666459b9788813e032b83e7ba58578e48254e0a0e6", "bde2aef6f44302dfb30320115b17d030798de8c4110e28d5cf6cf91a7a31074c"] -markupsafe = ["00bc623926325b26bb9605ae9eae8a215691f33cae5df11ca5424f06f2d1f473", "09027a7803a62ca78792ad89403b1b7a73a01c8cb65909cd876f7fcebd79b161", "09c4b7f37d6c648cb13f9230d847adf22f8171b1ccc4d5682398e77f40309235", "1027c282dad077d0bae18be6794e6b6b8c91d58ed8a8d89a89d59693b9131db5", "24982cc2533820871eba85ba648cd53d8623687ff11cbb805be4ff7b4c971aff", "29872e92839765e546828bb7754a68c418d927cd064fd4708fab9fe9c8bb116b", "43a55c2930bbc139570ac2452adf3d70cdbb3cfe5912c71cdce1c2c6bbd9c5d1", "46c99d2de99945ec5cb54f23c8cd5689f6d7177305ebff350a58ce5f8de1669e", "500d4957e52ddc3351cabf489e79c91c17f6e0899158447047588650b5e69183", "535f6fc4d397c1563d08b88e485c3496cf5784e927af890fb3c3aac7f933ec66", "62fe6c95e3ec8a7fad637b7f3d372c15ec1caa01ab47926cfdf7a75b40e0eac1", "6dd73240d2af64df90aa7c4e7481e23825ea70af4b4922f8ede5b9e35f78a3b1", "717ba8fe3ae9cc0006d7c451f0bb265ee07739daf76355d06366154ee68d221e", "79855e1c5b8da654cf486b830bd42c06e8780cea587384cf6545b7d9ac013a0b", "7c1699dfe0cf8ff607dbdcc1e9b9af1755371f92a68f706051cc8c37d447c905", "88e5fcfb52ee7b911e8bb6d6aa2fd21fbecc674eadd44118a9cc3863f938e735", "8defac2f2ccd6805ebf65f5eeb132adcf2ab57aa11fdf4c0dd5169a004710e7d", "98c7086708b163d425c67c7a91bad6e466bb99d797aa64f965e9d25c12111a5e", "9add70b36c5666a2ed02b43b335fe19002ee5235efd4b8a89bfcf9005bebac0d", "9bf40443012702a1d2070043cb6291650a0841ece432556f784f004937f0f32c", "ade5e387d2ad0d7ebf59146cc00c8044acbd863725f887353a10df825fc8ae21", "b00c1de48212e4cc9603895652c5c410df699856a2853135b3967591e4beebc2", "b1282f8c00509d99fef04d8ba936b156d419be841854fe901d8ae224c59f0be5", "b2051432115498d3562c084a49bba65d97cf251f5a331c64a12ee7e04dacc51b", "ba59edeaa2fc6114428f1637ffff42da1e311e29382d81b339c1817d37ec93c6", "c8716a48d94b06bb3b2524c2b77e055fb313aeb4ea620c8dd03a105574ba704f", "cd5df75523866410809ca100dc9681e301e3c27567cf498077e8551b6d20e42f", "e249096428b3ae81b08327a63a485ad0878de3fb939049038579ac0ef61e17e7"] -netaddr = ["38aeec7cdd035081d3a4c306394b19d677623bf76fa0913f6695127c7753aefd", "56b3558bd71f3f6999e4c52e349f38660e54a7a8a9943335f73dfc96883e08ca"] -pbr = ["9181e2a34d80f07a359ff1d0504fad3a47e00e1cf2c475b0aa7dcb030af54c40", "94bdc84da376b3dd5061aa0c3b6faffe943ee2e56fa4ff9bd63e1643932f34fc"] -python-dateutil = ["7e6584c74aeed623791615e26efd690f29817a27c73085b78e4bad02493df2fb", "c89805f6f4d64db21ed966fda138f8a5ed7a4fdbc1a8ee329ce1b74e3c74da9e"] -pyyaml = ["57acc1d8533cbe51f6662a55434f0dbecfa2b9eaf115bede8f6fd00115a0c0d3", "588c94b3d16b76cfed8e0be54932e5729cc185caffaa5a451e7ad2f7ed8b4043", "68c8dd247f29f9a0d09375c9c6b8fdc64b60810ebf07ba4cdd64ceee3a58c7b7", "70d9818f1c9cd5c48bb87804f2efc8692f1023dac7f1a1a5c61d454043c1d265", "86a93cccd50f8c125286e637328ff4eef108400dd7089b46a7be3445eecfa391", "a0f329125a926876f647c9fa0ef32801587a12328b4a3c741270464e3e4fa778", "a3c252ab0fa1bb0d5a3f6449a4826732f3eb6c0270925548cac342bc9b22c225", "b4bb4d3f5e232425e25dda21c070ce05168a786ac9eda43768ab7f3ac2770955", "cd0618c5ba5bda5f4039b9398bb7fb6a317bb8298218c3de25c47c4740e4b95e", "ceacb9e5f8474dcf45b940578591c7f3d960e82f926c707788a570b51ba59190", "fe6a88094b64132c4bb3b631412e90032e8cfe9745a58370462240b8cb7553cd"] -requests = ["11e007a8a2aa0323f5a921e9e6a2d7e4e67d9877e85773fba9ba6419025cbeb4", "9cf5292fcd0f598c671cfc1e0d7d1a7f13bb8085e9a590f48c010551dc6c4b31"] -"ruamel.ordereddict" = ["08b4b19fe518d32251a5338e039c4dc9eb0876f2919f94c9b8d2f9446ea80806", "150ce8e6c514a2a2b62753622a75874962561f8e5eeec81a3172ab952807bf0b", "45541836cbfdde630033cae7bbbe35acbac87a0ceec79f944b7a3bedd940fe78", "854dd4a524811b16111b1107d8a751e4ca064d2bb103d3d91deab75de36b6620", "aee2fa23e884249b4284b728888c553d551e5bfd4de2731f10153fd7813ec55f", "bf0a198c8ce5d973c24e5dba12d3abc254996788ca6ad8448eabc6aa710db149"] -"ruamel.yaml" = ["17dbf6b7362e7aee8494f7a0f5cffd44902a6331fe89ef0853b855a7930ab845", "23731c9efb79f3f5609dedffeb6c5c47a68125fd3d4b157d9fc71b1cd49076a9", "2bbdd598ae57bac20968cf9028cc67d37d83bdb7942a94b9478110bc72193148", "34586084cdd60845a3e1bece2b58f0a889be25450db8cc0ea143ddf0f40557a2", "35957fedbb287b01313bb5c556ffdc70c0277c3500213b5e73dfd8716f748d77", "414cb87a40974a575830b406ffab4ab8c6cbd82eeb73abd2a9d1397c1f0223e1", "428775be75db68d908b17e4e8dda424c410222f170dc173246aa63e972d094b3", "514f670f7d36519bda504d507edfe63e3c20489f86c86d42bc4d9a6dbdf82c7b", "5cb962c1ac6887c5da29138fbbe3b4b7705372eb54e599907fa63d4cd743246d", "5f6e30282cf70fb7754e1a5f101e27b5240009766376e131b31ab49f14fe81be", "86f8e010af6af0b4f42de2d0d9b19cb441e61d3416082186f9dd03c8552d13ad", "8d47ed1e557d546bd2dfe54f504d7274274602ff7a0652cde84c258ad6c2d96d", "98668876720bce1ac08562d8b93a564a80e3397e442c7ea19cebdcdf73da7f74", "9e1f0ddc18d8355dcf5586a5d90417df56074f237812b8682a93b62cca9d2043", "a7bc812a72a79d6b7dbb96fa5bee3950464b65ec055d3abc4db6572f2373a95c", "b72e13f9f206ee103247b07afd5a39c8b1aa98e8eba80ddba184d030337220ba", "bcff8ea9d916789e85e24beed8830c157fb8bc7c313e554733a8151540e66c01", "c76e78b3bab652069b8d6f7889b0e72f3455c2b854b2e0a8818393d149ad0a0d"] -s3transfer = ["6efc926738a3cd576c2a79725fed9afde92378aa5c6a957e3af010cb019fac9d", "b780f2411b824cb541dbcd2c713d0cb61c7d1bcadae204cdddda2b35cef493ba"] -six = ["3350809f0555b11f552448330d0b52d5f24c91a322ea4a15ef22629740f3761c", "d16a0141ec1a18405cd4ce8b4613101da75da0e9a7aec5bdd4fa804d0e0eba73"] -urllib3 = ["06330f386d6e4b195fbfc736b297f58c5a892e4440e54d294d7004e3a9bbea1b", "cc44da8e1145637334317feebd728bd869a35285b93cbb4cca2577da7e62db4f"] diff --git a/ansible/provision-sft.yml b/ansible/provision-sft.yml new file mode 100644 index 000000000..9f6a900fe --- /dev/null +++ b/ansible/provision-sft.yml @@ -0,0 +1,47 @@ +# This role requires access to s3 buckets and has a few variables that need to +# be set. When run with any variables missing, it will complain about those +# variables. +- hosts: localhost + become: false + roles: + - role: sft-monitoring-certs + when: "{{ (groups['sft_servers'] | length) > 0 }}" + +- hosts: sft_servers + roles: + - role: sft-server + - role: srv-announcer + tasks: + # The Ubuntu images provided by hetzner have systemd-resolved enabled, + # but don't use the nss module, and direct all traffic through the + # 127.0.0.53 stub resolver + # This one seems to be flaky. + # Instead, configure it to use /run/systemd/resolve/resolv.conf, which points to + # the DNS servers retrieved via DHCP directly + - name: Workaround systemd-resolved being flaky + file: + src: /run/systemd/resolve/resolv.conf + dest: /etc/resolv.conf + owner: root + group: root + state: link + +- hosts: localhost + tasks: + - when: "{{ (groups['sft_servers'] | length) > 0 }}" + block: + - name: Get all SRV recoreds + route53: + zone: "{{ root_domain }}" + type: "SRV" + record: "_sft._tcp.{{ environment_name }}.{{ root_domain }}" + state: get + register: srv_records + - name: Delete all SRV records + route53: + zone: "{{ root_domain }}" + type: "SRV" + record: "_sft._tcp.{{ environment_name }}.{{ root_domain }}" + state: "delete" + value: "{{ srv_records.set.value }}" + ttl: "{{ srv_records.set.ttl }}" diff --git a/ansible/pyproject.toml b/ansible/pyproject.toml deleted file mode 100644 index cf2e46100..000000000 --- a/ansible/pyproject.toml +++ /dev/null @@ -1,26 +0,0 @@ -[tool.poetry] -name = "wire-server-deploy" -version = "0.1.0" -description = "" -authors = ["jschaul "] - -[tool.poetry.dependencies] -python = "^2.7 || >=3.5" -ansible = "2.7.*" -ansible-modules-hashivault = "*" -boto = "*" -dnspython = "*" -netaddr = ">= 0.7" -boto3 = "*" -jinja2 = ">= 2.10.1" -pbr = "^5.2" -hvac = ">= 0.8" -jmespath = ">= 0.9.4" -"ruamel.yaml" = ">= 0.15.96" -PyYAML = "*" - -[tool.poetry.dev-dependencies] - -[build-system] -requires = ["poetry>=0.12"] -build-backend = "poetry.masonry.api" diff --git a/ansible/rabbitmq.yml b/ansible/rabbitmq.yml new file mode 100644 index 000000000..db49201d0 --- /dev/null +++ b/ansible/rabbitmq.yml @@ -0,0 +1,5 @@ +--- +- hosts: rmq-cluster + become: yes + roles: + - rabbitmq-cluster diff --git a/ansible/registry.yml b/ansible/registry.yml index 8616d3776..91646d4a1 100644 --- a/ansible/registry.yml +++ b/ansible/registry.yml @@ -90,7 +90,7 @@ with_items: - "{{ registry_dns_name }}" - quay.io - - k8s.gcr.io + - registry.k8s.io - gcr.io - docker.caching.proxy.internal - registry-1.docker.io diff --git a/ansible/requirements.yml b/ansible/requirements.yml deleted file mode 100644 index b4bcbff27..000000000 --- a/ansible/requirements.yml +++ /dev/null @@ -1,61 +0,0 @@ -# Requirements file is used to keep track of ansible role origins and versions. -# Files are downloaded to `./roles-external`. Do not edit those directly. -# -# See https://docs.ansible.com/ansible/galaxy.html#installing-multiple-roles-from-a-file -# -# To update existing roles: ( --force overrides existing folders) -# -# ansible-galaxy install -r requirements.yml --force - -- src: git+https://github.com/elastic/ansible-elasticsearch.git - name: elasticsearch - version: "6.6.0" # 2019-01-29 - -- src: git+https://github.com/ANXS/hostname.git - name: hostname - version: da6f329b2984e84d2248d4251e0c679c53dfbb30 # 2019-01-16 - -- src: git+https://github.com/ANXS/apt.git - name: ANXS.apt - version: v2.0.0 # 2018-11-03 - -- src: git+https://github.com/geerlingguy/ansible-role-java.git - name: ansible-role-java - version: 1.9.5 - -- src: git+https://github.com/geerlingguy/ansible-role-ntp.git - name: ansible-role-ntp - version: 1.6.2 - -- src: git+https://github.com/wireapp/ansible-cassandra.git - name: ansible-cassandra - version: v0.1.3 - -- src: git+https://github.com/wireapp/ansible-minio.git - name: ansible-minio - version: v1.0.2-wire - -- src: git+https://github.com/wireapp/ansible-restund.git - name: ansible-restund - version: v0.1.4 - -- src: git+https://github.com/wireapp/ansible-tinc.git - name: ansible-tinc - version: v0.1.0 - -- src: https://github.com/githubixx/ansible-role-kubectl.git - name: ansible-kubectl - # keep the kubectl version in sync with kubespray (see download_kubespray.yml) - version: "8.0.1+1.14.2" - -- src: https://github.com/andrewrothstein/ansible-kubernetes-helm.git - name: ansible-helm - version: "v1.3.7" # 2020-04-02 - -- src: https://github.com/cchurch/ansible-role-admin-users.git - name: admin_users - version: "0.7.2" - -- src: git+https://github.com/wireapp/ansible-ntp-verify.git - name: ansible-ntp-verify - version: v0.0.1 diff --git a/ansible/restund.yml b/ansible/restund.yml deleted file mode 100644 index 9b144b153..000000000 --- a/ansible/restund.yml +++ /dev/null @@ -1,38 +0,0 @@ -# Reminder that the pem file should look like: -# -----BEGIN CERTIFICATE----- -# --- ... CERT CONTENT ... -- -# -----END CERTIFICATE----- -# -----BEGIN CERTIFICATE----- -# --- ... INTERMEDIATE ..---- -# -----END CERTIFICATE---- -# -----BEGIN PRIVATE KEY----- -# --- .... PRIV KEY ----- -# -----END PRIVATE KEY----- -- name: provision - hosts: restund - gather_facts: yes - become: yes - any_errors_fatal: True - vars_prompt: - # More info to be found at the [demo-secrets.yaml](https://github.com/wireapp/wire-server-deploy/blob/master/values/wire-server/demo-secrets.example.yaml#L9-L12)" - - name: restund_zrest_secret - prompt: "Enter the restund_zrest_secret, which must match the brig.secrets.turn.secret in the helm configuration." - vars: - # This config will make restund run as root and listen on ports 80 and 443 - - restund_user: root - - restund_tls_certificate: "{{ lookup('file', '/tmp/tls_cert_and_priv_key.pem') }}" - - restund_udp_listen_port: 80 - - restund_tcp_listen_port: 80 - - restund_tls_listen_port: 443 - roles: - - role: hostname - tags: - - hostname - - - role: ansible-role-ntp - tags: - - ntp - - - role: ansible-restund - tags: - - restund diff --git a/ansible/roles-external/ANXS.apt b/ansible/roles-external/ANXS.apt new file mode 160000 index 000000000..f602ba7e8 --- /dev/null +++ b/ansible/roles-external/ANXS.apt @@ -0,0 +1 @@ +Subproject commit f602ba7e88abfbb3af6679a8ca47207dc3e9d9c4 diff --git a/ansible/roles-external/admin_users b/ansible/roles-external/admin_users new file mode 160000 index 000000000..d5bcef7e9 --- /dev/null +++ b/ansible/roles-external/admin_users @@ -0,0 +1 @@ +Subproject commit d5bcef7e925ee1acf4e42359f0a95ed788eea58f diff --git a/ansible/roles-external/andrewrothstein.unarchive-deps b/ansible/roles-external/andrewrothstein.unarchive-deps new file mode 160000 index 000000000..448554326 --- /dev/null +++ b/ansible/roles-external/andrewrothstein.unarchive-deps @@ -0,0 +1 @@ +Subproject commit 4485543262cfe04170d1ec02c8ccb95c44a7a222 diff --git a/ansible/roles-external/ansible-cassandra b/ansible/roles-external/ansible-cassandra new file mode 160000 index 000000000..f5c2467f5 --- /dev/null +++ b/ansible/roles-external/ansible-cassandra @@ -0,0 +1 @@ +Subproject commit f5c2467f5df08361769603e4571bbf65b1267e53 diff --git a/ansible/roles-external/ansible-minio b/ansible/roles-external/ansible-minio new file mode 160000 index 000000000..89803b5d0 --- /dev/null +++ b/ansible/roles-external/ansible-minio @@ -0,0 +1 @@ +Subproject commit 89803b5d0ed638ccf8e99f46f0dd4cc4f4bf5dcb diff --git a/ansible/roles-external/ansible-ntp-verify b/ansible/roles-external/ansible-ntp-verify new file mode 160000 index 000000000..4c3d0c67d --- /dev/null +++ b/ansible/roles-external/ansible-ntp-verify @@ -0,0 +1 @@ +Subproject commit 4c3d0c67d32d2d74444f4db45b2a4d2efdc7d590 diff --git a/ansible/roles-external/ansible-role-java b/ansible/roles-external/ansible-role-java new file mode 160000 index 000000000..e715e3c4b --- /dev/null +++ b/ansible/roles-external/ansible-role-java @@ -0,0 +1 @@ +Subproject commit e715e3c4b9bef3fc7716b7787daf95eafd8205fb diff --git a/ansible/roles-external/ansible-role-ntp b/ansible/roles-external/ansible-role-ntp new file mode 160000 index 000000000..af1ec6238 --- /dev/null +++ b/ansible/roles-external/ansible-role-ntp @@ -0,0 +1 @@ +Subproject commit af1ec62385c899a3e3f24407d8417adcdc9eea60 diff --git a/ansible/roles-external/ansible-tinc b/ansible/roles-external/ansible-tinc new file mode 160000 index 000000000..42951a951 --- /dev/null +++ b/ansible/roles-external/ansible-tinc @@ -0,0 +1 @@ +Subproject commit 42951a951f6381e387174178bf3bff228b6a5dc5 diff --git a/ansible/roles-external/cloudalchemy.node-exporter b/ansible/roles-external/cloudalchemy.node-exporter new file mode 160000 index 000000000..8dc13ae07 --- /dev/null +++ b/ansible/roles-external/cloudalchemy.node-exporter @@ -0,0 +1 @@ +Subproject commit 8dc13ae077e3da1a71c268b114cd4fb8103ced80 diff --git a/ansible/roles-external/elasticsearch b/ansible/roles-external/elasticsearch new file mode 160000 index 000000000..389a3ff45 --- /dev/null +++ b/ansible/roles-external/elasticsearch @@ -0,0 +1 @@ +Subproject commit 389a3ff45f8f51de95313ca0354cedcdc92b16f4 diff --git a/ansible/roles-external/hostname b/ansible/roles-external/hostname new file mode 160000 index 000000000..da6f329b2 --- /dev/null +++ b/ansible/roles-external/hostname @@ -0,0 +1 @@ +Subproject commit da6f329b2984e84d2248d4251e0c679c53dfbb30 diff --git a/ansible/roles-external/kubespray b/ansible/roles-external/kubespray new file mode 160000 index 000000000..64447e745 --- /dev/null +++ b/ansible/roles-external/kubespray @@ -0,0 +1 @@ +Subproject commit 64447e745e53d3f486356d03ecb195729a302ea1 diff --git a/ansible/roles-external/logrotate b/ansible/roles-external/logrotate new file mode 160000 index 000000000..91d570f68 --- /dev/null +++ b/ansible/roles-external/logrotate @@ -0,0 +1 @@ +Subproject commit 91d570f68c44261d2051a99a2b3c7d736306bf0d diff --git a/ansible/roles-external/sft b/ansible/roles-external/sft new file mode 160000 index 000000000..a11e1d918 --- /dev/null +++ b/ansible/roles-external/sft @@ -0,0 +1 @@ +Subproject commit a11e1d91826ea3d8ffee2e1ba23eb0dfe7c333b5 diff --git a/ansible/roles/etcd-helpers/templates/etcd-health.sh.j2 b/ansible/roles/etcd-helpers/templates/etcd-health.sh.j2 index 5cc87acf2..695c60b89 100755 --- a/ansible/roles/etcd-helpers/templates/etcd-health.sh.j2 +++ b/ansible/roles/etcd-helpers/templates/etcd-health.sh.j2 @@ -2,4 +2,4 @@ HOST={{ ansible_hostname }} -etcdctl --endpoints https://127.0.0.1:2379 --ca-file=/etc/ssl/etcd/ssl/ca.pem --cert-file=/etc/ssl/etcd/ssl/member-$HOST.pem --key-file=/etc/ssl/etcd/ssl/member-$HOST-key.pem --debug cluster-health +etcdctl --endpoints https://127.0.0.1:2379 --cacert=/etc/ssl/etcd/ssl/ca.pem --cert=/etc/ssl/etcd/ssl/member-$HOST.pem --key=/etc/ssl/etcd/ssl/member-$HOST-key.pem endpoint --cluster health diff --git a/ansible/roles/etcd-helpers/templates/etcdctl3.sh.j2 b/ansible/roles/etcd-helpers/templates/etcdctl3.sh.j2 index ba9307520..a2cc1e0e1 100755 --- a/ansible/roles/etcd-helpers/templates/etcdctl3.sh.j2 +++ b/ansible/roles/etcd-helpers/templates/etcdctl3.sh.j2 @@ -7,4 +7,7 @@ export ETCDCTL_CA_FILE=/etc/ssl/etcd/ssl/ca.pem export ETCDCTL_CERT=/etc/ssl/etcd/ssl/member-$HOST.pem export ETCDCTL_KEY=/etc/ssl/etcd/ssl/member-$HOST-key.pem +#to support etcdctl 3.14 +export ETCDCTL_CACERT=/etc/ssl/etcd/ssl/ca.pem + /usr/local/bin/etcdctl "$@" diff --git a/ansible/roles/minio-static-files/defaults/main.yml b/ansible/roles/minio-static-files/defaults/main.yml index 282717145..d549d1653 100644 --- a/ansible/roles/minio-static-files/defaults/main.yml +++ b/ansible/roles/minio-static-files/defaults/main.yml @@ -8,8 +8,8 @@ assetsURL: "https://{{ prefix }}assets.{{ domain }}" deeplink_config_json: "{{ assetsURL }}/public/deeplink.json" -backendURL: "https://{{ prefix }}https.{{ domain }}" -backendWSURL: "https://{{ prefix }}ssl.{{ domain }}" +backendURL: "https://{{ prefix }}nginz-https.{{ domain }}" +backendWSURL: "https://{{ prefix }}nginz-ssl.{{ domain }}" teamsURL: "https://{{ prefix }}teams.{{ domain }}" accountsURL: "https://{{ prefix }}account.{{ domain }}" diff --git a/ansible/roles/minio-static-files/tasks/main.yml b/ansible/roles/minio-static-files/tasks/main.yml index a96b39e98..5bccf9f90 100644 --- a/ansible/roles/minio-static-files/tasks/main.yml +++ b/ansible/roles/minio-static-files/tasks/main.yml @@ -1,3 +1,5 @@ +# FUTUREWORK: https://github.com/zinfra/backend-issues/issues/1763 +# - name: "create deeplink template files" template: src: "{{ item }}.j2" diff --git a/ansible/roles/rabbitmq-cluster/defaults/main.yml b/ansible/roles/rabbitmq-cluster/defaults/main.yml new file mode 100644 index 000000000..9c4dd9a82 --- /dev/null +++ b/ansible/roles/rabbitmq-cluster/defaults/main.yml @@ -0,0 +1,85 @@ +--- +# Whether you need to update hosts file or not, default false. +# This is useful when you are using AWS EC2 instance, whose default hostname is too long and doesn't have a meaning, +# like "ip-10-101-50-12.eu-central-1.compute.internal", but you want to use something shorter and meaningful as hostname. +# In this case you need to set this variable to true in order to update the hosts file, and you need to define a variable named "rabbitmq_hosts", +# with the following format: +# +# rabbitmq_hosts: | +# node-1-ip node-1-FQDN +# node-2-ip node-2-FQDN +# +# example: +# +# rabbitmq_hosts: | +# 10.0.0.10 eu-central-1-mq-master (whatever the command `hostname -f` outputs on this host) +# 10.0.0.11 eu-central-1-mq-slave-01 (whatever the command `hostname -f` outputs on this host) +# +update_hosts: false + +rabbitmq_cluster_master: ansnode1 +rabbitmq_hosts: | + 172.16.0.132 ansnode1 + 172.16.0.133 ansnode2 + 172.16.0.134 ansnode3 + +# erlang +# erlang_version: "1:20.2.2" +# erlang_download_url: "http://packages.erlang-solutions.com/site/esl/esl-erlang/FLAVOUR_1_general/esl-erlang_20.2.2-1~ubuntu~xenial_amd64.deb" +# erlang_pkg_name: "esl-erlang_20.2.2-1~ubuntu~xenial_amd64.deb" + +# cluster +rabbitmq_create_cluster: yes +rabbitmq_erlang_cookie: WKRBTTEQRYPTQOPUKSVF +# https://www.rabbitmq.com/configure.html#define-environment-variables +# When set to true this will cause RabbitMQ to use fully qualified names to identify nodes. +# This may prove useful on EC2. +# Note that it is not possible to switch between using short and long names without resetting the node. +rabbitmq_use_longname: 'false' + +# log rotate +rabbitmq_logrotate_period: weekly +rabbitmq_logrotate_amount: 20 + +# https://www.rabbitmq.com/install-debian.html +# The main setting that needs adjustment is the max number of open files, also known as ulimit -n. +# The default value on many operating systems is too low for a messaging broker (eg. 1024 on several Linux distributions). +# We recommend allowing for at least 65536 file descriptors for user rabbitmq in production environments. +# 4096 should be sufficient for most development workloads +rabbitmq_ulimit_open_files: 65536 + +# default ports +rabbitmq_tls_port: 5671 +rabbitmq_amqp_port: 5672 +rabbitmq_epmd_port: 4369 +rabbitmq_node_port: 25672 + +# plugins for HTTP API monitor +rabbitmq_plugins: + - rabbitmq_management + # - rabbitmq_management_agent + # - rabbitmq_shovel + # - rabbitmq_shovel_management + +# TLS/SSL support +enable_tls: false +# if true, only tls is supported which means amqp default port 5672 is not open anymore +tls_only: false + +tls_verify: "verify_none" +tls_fail_if_no_peer_cert: false + +cacertfile: "" +cacertfile_dest: "/etc/rabbitmq/cacert.pem" + +certfile: "" +certfile_dest: "/etc/rabbitmq/cert.pem" + +keyfile: "" +keyfile_dest: "/etc/rabbitmq/key.pem" + +# By default, queues within a RabbitMQ cluster are located on a single node (the node on which they were first declared). +# Queues can optionally be made mirrored across all nodes, or exactly N number of nodes +# By enabling this variable to true, there will be 1 queue master and 1 queue mirror. +# If the node running the queue master becomes unavailable, the queue mirror will be automatically promoted to master. +backup_queues_in_two_nodes: true diff --git a/ansible/roles/rabbitmq-cluster/handlers/main.yml b/ansible/roles/rabbitmq-cluster/handlers/main.yml new file mode 100644 index 000000000..e7533dfc3 --- /dev/null +++ b/ansible/roles/rabbitmq-cluster/handlers/main.yml @@ -0,0 +1,10 @@ +--- +# Handlers for RabbitMQ +- name: restart rabbitmq-server + service: name=rabbitmq-server state=restarted + +- name: start rabbitmq-server + service: name=rabbitmq-server state=started + +- name: stop rabbitmq-server + service: name=rabbitmq-server state=stopped diff --git a/ansible/roles/rabbitmq-cluster/tasks/cluster.yml b/ansible/roles/rabbitmq-cluster/tasks/cluster.yml new file mode 100644 index 000000000..447ccc2bb --- /dev/null +++ b/ansible/roles/rabbitmq-cluster/tasks/cluster.yml @@ -0,0 +1,16 @@ +--- +- name: make sure rabbitmq server is up + service: + name: rabbitmq-server + state: started + +- name: make sure rabbitmq app is up + command: rabbitmqctl start_app + +- name: check if already in cluster + command: rabbitmqctl cluster_status + register: cluster_status + changed_when: false + +- include_tasks: join_cluster.yml + when: cluster_status.stdout.find("rabbit@{{ rabbitmq_cluster_master }}") == -1 and (ansible_fqdn != rabbitmq_cluster_master) diff --git a/ansible/roles/rabbitmq-cluster/tasks/config.yml b/ansible/roles/rabbitmq-cluster/tasks/config.yml new file mode 100644 index 000000000..8abdbeebd --- /dev/null +++ b/ansible/roles/rabbitmq-cluster/tasks/config.yml @@ -0,0 +1,34 @@ +--- +- name: rabbitmq default file + template: + src: "{{ item.src }}" + dest: "{{ item.dest }}" + owner: root + group: root + mode: 0644 + with_items: + - { src: etc/default/rabbitmq-server.j2 , dest: /etc/default/rabbitmq-server } + - { src: etc/rabbitmq/rabbitmq.config.j2, dest: /etc/rabbitmq/rabbitmq.config } + # - { src: etc/rabbitmq/rabbitmq-env.conf.j2, dest: /etc/rabbitmq/rabbitmq-env.conf } + notify: + restart rabbitmq-server + +- name: restart rabbitmq-server + service: + name: rabbitmq-server + state: restarted + +# - name: Enable the plugins is installed +# rabbitmq_plugin: +# names: "{{ item }}" +# prefix: /usr/lib/rabbitmq +# state: enabled +# new_only: yes +# with_items: "{{ rabbitmq_plugins }}" +# notify: +# restart rabbitmq-server + +- name: restart rabbitmq-server + service: + name: rabbitmq-server + state: restarted diff --git a/ansible/roles/rabbitmq-cluster/tasks/configure_dns.yml b/ansible/roles/rabbitmq-cluster/tasks/configure_dns.yml new file mode 100644 index 000000000..41c0ca06e --- /dev/null +++ b/ansible/roles/rabbitmq-cluster/tasks/configure_dns.yml @@ -0,0 +1,14 @@ +--- +- name: Update /etc/hosts on rmq-cluster nodes + hosts: rmq-cluster + become: yes # This allows the playbook to run with elevated privileges + + tasks: + - name: Add entries to /etc/hosts + lineinfile: + path: /etc/hosts + regexp: "^{{ hostvars[item].ansible_default_ipv4.address }}\\s+{{ item }}\\s+rabbit@{{ item }}$" + line: "{{ hostvars[item].ansible_default_ipv4.address }} {{ item }} rabbit@{{ item }}" + insertbefore: EOF + with_items: "{{ groups['rmq-cluster'] }}" + when: item != inventory_hostname diff --git a/ansible/roles/rabbitmq-cluster/tasks/create_users.yml b/ansible/roles/rabbitmq-cluster/tasks/create_users.yml new file mode 100644 index 000000000..ce2222080 --- /dev/null +++ b/ansible/roles/rabbitmq-cluster/tasks/create_users.yml @@ -0,0 +1,16 @@ +--- +# rc 70: user already exists +- name: create test user + shell: rabbitmqctl add_user test test + register: res + failed_when: res.rc != 70 and res.rc != 0 + changed_when: res.rc != 70 + +- name: list permissions for test user + shell: rabbitmqctl list_permissions + register: list_permissions + changed_when: false + +- name: set permissions on / vhost + shell: rabbitmqctl set_permissions test ".*" ".*" ".*" + when: list_permissions.stdout.find("test") == -1 diff --git a/ansible/roles/rabbitmq-cluster/tasks/enable_ha_queues.yml b/ansible/roles/rabbitmq-cluster/tasks/enable_ha_queues.yml new file mode 100644 index 000000000..517b758da --- /dev/null +++ b/ansible/roles/rabbitmq-cluster/tasks/enable_ha_queues.yml @@ -0,0 +1,11 @@ +--- +- name: check if ha-mode is already enabled + shell: rabbitmqctl list_policies + register: list_policies + changed_when: false + +- name: set ha-mode to exactly two nodes for all queues for backup + shell: rabbitmqctl set_policy ha-exactly-two ".*" '{"ha-mode":"exactly","ha-params":2,"ha-sync-mode":"automatic"}' + register: res + failed_when: res.rc != 0 + when: list_policies.stdout.find("ha-exactly-two") == -1 diff --git a/ansible/roles/rabbitmq-cluster/tasks/erlang_cookie.yml b/ansible/roles/rabbitmq-cluster/tasks/erlang_cookie.yml new file mode 100644 index 000000000..be5385ef7 --- /dev/null +++ b/ansible/roles/rabbitmq-cluster/tasks/erlang_cookie.yml @@ -0,0 +1,23 @@ +--- +# Configure RabbitMQ for cluster +- name: backup old erlang cookie + shell: cp -a /var/lib/rabbitmq/.erlang.cookie /var/lib/rabbitmq/.erlang.cookie.old + changed_when: false + +- name: updating rabbitmq erlang cookie + template: + src: erlang.cookie.j2 + dest: /var/lib/rabbitmq/.erlang.cookie + owner: rabbitmq + group: rabbitmq + mode: 0400 + notify: + stop rabbitmq-server + +- meta: flush_handlers + +- name: remove old erlang cookie + file: + path: /var/lib/rabbitmq/.erlang.cookie.old + state: absent + changed_when: false diff --git a/ansible/roles/rabbitmq-cluster/tasks/hosts.yml b/ansible/roles/rabbitmq-cluster/tasks/hosts.yml new file mode 100644 index 000000000..94ebffd9a --- /dev/null +++ b/ansible/roles/rabbitmq-cluster/tasks/hosts.yml @@ -0,0 +1,6 @@ +- blockinfile: + path: /etc/hosts + block: "{{ rabbitmq_hosts }}" + owner: root + group: root + mode: 0644 diff --git a/ansible/roles/rabbitmq-cluster/tasks/install.yml b/ansible/roles/rabbitmq-cluster/tasks/install.yml new file mode 100644 index 000000000..7ee79ebc3 --- /dev/null +++ b/ansible/roles/rabbitmq-cluster/tasks/install.yml @@ -0,0 +1,8 @@ +- name: install rabbitmq-server + apt: + update_cache: yes + force: yes + pkg: "{{ item }}" + state: present + with_items: + - rabbitmq-server diff --git a/ansible/roles/rabbitmq-cluster/tasks/join_cluster.yml b/ansible/roles/rabbitmq-cluster/tasks/join_cluster.yml new file mode 100644 index 000000000..73f6039dc --- /dev/null +++ b/ansible/roles/rabbitmq-cluster/tasks/join_cluster.yml @@ -0,0 +1,9 @@ +--- +- name: stop rabbitmq app + command: rabbitmqctl stop_app + +- name: add this node to cluster + command: rabbitmqctl join_cluster rabbit@{{ rabbitmq_cluster_master }} + +- name: start rabbitmq app + command: rabbitmqctl start_app diff --git a/ansible/roles/rabbitmq-cluster/tasks/main.yml b/ansible/roles/rabbitmq-cluster/tasks/main.yml new file mode 100644 index 000000000..a097a80f1 --- /dev/null +++ b/ansible/roles/rabbitmq-cluster/tasks/main.yml @@ -0,0 +1,23 @@ +--- +- include_tasks: hosts.yml + when: update_hosts + +- include_tasks: install.yml + +- include_tasks: erlang_cookie.yml + when: rabbitmq_create_cluster + +- include_tasks: tls.yml + when: enable_tls + +- include_tasks: config.yml + +#- include_tasks: service.yml + +- include_tasks: cluster.yml + when: rabbitmq_create_cluster == true + +- include_tasks: create_users.yml + +- include_tasks: enable_ha_queues.yml + when: backup_queues_in_two_nodes diff --git a/ansible/roles/rabbitmq-cluster/tasks/service.yaml b/ansible/roles/rabbitmq-cluster/tasks/service.yaml new file mode 100644 index 000000000..5fa988d4f --- /dev/null +++ b/ansible/roles/rabbitmq-cluster/tasks/service.yaml @@ -0,0 +1,6 @@ +--- +- name: rabbitmq service started and enabled + service: + name: rabbitmq-server + enabled: yes + state: started diff --git a/ansible/roles/rabbitmq-cluster/tasks/tls.yml b/ansible/roles/rabbitmq-cluster/tasks/tls.yml new file mode 100644 index 000000000..07713fc46 --- /dev/null +++ b/ansible/roles/rabbitmq-cluster/tasks/tls.yml @@ -0,0 +1,13 @@ +--- +# Copy cacert, cert and key file for TLS/SSL +- name: make sure TLS/SSL certificates exists + template: + src: "{{ item.src }}" + dest: "{{ item.dest }}" + owner: root + group: rabbitmq + mode: 0644 + with_items: + - { src: "{{ cacertfile }}", dest: "{{ cacertfile_dest }}" } + - { src: "{{ certfile }}", dest: "{{ certfile_dest }}" } + - { src: "{{ keyfile }}", dest: "{{ keyfile_dest }}" } diff --git a/ansible/roles/rabbitmq-cluster/templates/erlang.cookie.j2 b/ansible/roles/rabbitmq-cluster/templates/erlang.cookie.j2 new file mode 100644 index 000000000..27fce6322 --- /dev/null +++ b/ansible/roles/rabbitmq-cluster/templates/erlang.cookie.j2 @@ -0,0 +1 @@ +{{ rabbitmq_erlang_cookie }} \ No newline at end of file diff --git a/ansible/roles/rabbitmq-cluster/templates/etc/default/rabbitmq-server.j2 b/ansible/roles/rabbitmq-cluster/templates/etc/default/rabbitmq-server.j2 new file mode 100644 index 000000000..c29664bc9 --- /dev/null +++ b/ansible/roles/rabbitmq-cluster/templates/etc/default/rabbitmq-server.j2 @@ -0,0 +1,13 @@ +# This file is sourced by /etc/init.d/rabbitmq-server. Its primary +# reason for existing is to allow adjustment of system limits for the +# rabbitmq-server process. +# +# Maximum number of open file handles. This will need to be increased +# to handle many simultaneous connections. Refer to the system +# documentation for ulimit (in man bash) for more information. +# + +ulimit -n {{ rabbitmq_ulimit_open_files }} + +ERL_EPMD_PORT={{ rabbitmq_epmd_port }} +RABBITMQ_NODE_PORT= {{ rabbitmq_node_port }} \ No newline at end of file diff --git a/ansible/roles/rabbitmq-cluster/templates/etc/rabbitmq/rabbitmq.config.j2 b/ansible/roles/rabbitmq-cluster/templates/etc/rabbitmq/rabbitmq.config.j2 new file mode 100644 index 000000000..73dbdb785 --- /dev/null +++ b/ansible/roles/rabbitmq-cluster/templates/etc/rabbitmq/rabbitmq.config.j2 @@ -0,0 +1,16 @@ +[ + {rabbit, [ +{% if tls_only %} + {tcp_listeners, []}, +{% endif %} +{% if enable_tls %} + {ssl_listeners, [{{ rabbitmq_tls_port }}]}, + {ssl_options, [{cacertfile,"{{ cacertfile_dest }}"}, + {certfile,"{{ certfile_dest }}"}, + {keyfile,"{{ keyfile_dest }}"}, + {verify,{{ tls_verify }}}, + {fail_if_no_peer_cert,{{tls_fail_if_no_peer_cert|lower}}}]}, +{% endif %} + {loopback_users, []} + ]} +]. \ No newline at end of file diff --git a/ansible/roles/systemd-coredump/defaults/main.yml b/ansible/roles/systemd-coredump/defaults/main.yml new file mode 100644 index 000000000..635151403 --- /dev/null +++ b/ansible/roles/systemd-coredump/defaults/main.yml @@ -0,0 +1,5 @@ +# Default install_systemd_coredump to False, as Debian makes it unnecessarily +# hard to ship this in an aptly offline bundle. +# +# Environments can explicitly set this to true in their inventory. +install_systemd_coredump: no diff --git a/ansible/roles/systemd-coredump/tasks/main.yml b/ansible/roles/systemd-coredump/tasks/main.yml new file mode 100644 index 000000000..f78a2cdcd --- /dev/null +++ b/ansible/roles/systemd-coredump/tasks/main.yml @@ -0,0 +1,5 @@ +- name: "Install systemd-coredump" + when: install_systemd_coredump | bool + apt: + name: systemd-coredump + state: latest diff --git a/ansible/seed-offline-containerd.yml b/ansible/seed-offline-containerd.yml new file mode 100644 index 000000000..8cfc7a9b0 --- /dev/null +++ b/ansible/seed-offline-containerd.yml @@ -0,0 +1,32 @@ +- name: Seed system containers + # Add etcd group here if you are deploying separate worker and master clusters + hosts: k8s-cluster + tags: system-containers + tasks: + - name: load containers + shell: | + for container in $(curl -q {{ assethost_host }}/containers-system/index.txt);do + curl -q "{{ assethost_host }}/containers-system/$container" | ctr -n=k8s.io images import - + done + +- name: Download helm containers + hosts: k8s-cluster + tags: containers-helm + tasks: + - name: load helm containers + shell: | + for container in $(curl -q {{ assethost_host }}/containers-helm/index.txt);do + curl -q "{{ assethost_host }}/containers-helm/$container" | ctr -n=k8s.io images import - + done + + +################################### Hack to tag the ingress-nginx container images ############### +#- name: Load ingress-controller containers +# hosts: k8s-cluster +# tags: containers-helm +# tasks: +# - name: load ingress-nginx containers +# shell: | +# sudo ctr -n=k8s.io images tag registry.k8s.io/ingress-nginx/kube-webhook-certgen:v20220916-gd32f8c343 registry.k8s.io/ingress-nginx/kube-webhook-certgen@sha256:39c5b2e3310dc4264d638ad28d9d1d96c4cbb2b2dcfb52368fe4e3c63f61e10f +# sudo ctr -n=k8s.io images tag registry.k8s.io/ingress-nginx/controller:v1.6.4 registry.k8s.io/ingress-nginx/controller:v1.6.4@sha256:15be4666c53052484dd2992efacf2f50ea77a78ae8aa21ccd91af6baaa7ea22f +#################################################################################################### diff --git a/ansible/seed-offline-docker.yml b/ansible/seed-offline-docker.yml new file mode 100644 index 000000000..7109b059a --- /dev/null +++ b/ansible/seed-offline-docker.yml @@ -0,0 +1,19 @@ +- name: Seed system containers + hosts: k8s-cluster:etcd + tags: system-containers + tasks: + - name: load containers + shell: | + for container in $(curl -q {{ assethost_host }}/containers-system/index.txt);do + curl -q "{{ assethost_host }}/containers-system/$container" | docker load + done + +- name: Download helm containers + hosts: k8s-cluster + tags: containers-helm + tasks: + - name: load containers + shell: | + for container in $(curl -q {{ assethost_host }}/containers-helm/index.txt);do + curl -q "{{ assethost_host }}/containers-helm/$container" | docker load + done diff --git a/ansible/setup-offline-sources.yml b/ansible/setup-offline-sources.yml new file mode 100644 index 000000000..9fafa7844 --- /dev/null +++ b/ansible/setup-offline-sources.yml @@ -0,0 +1,90 @@ +- name: Copy over binaries, debs and container images to the asset host and host them + hosts: assethost + tasks: + - file: + path: /opt/assets + state: directory + - name: Copy debs jammy + unarchive: + src: ../debs-jammy.tar + dest: /opt/assets + tags: + - debs + - name: Copy binaries + unarchive: + src: ../binaries.tar + dest: /opt/assets + tags: + - binaries + - name: Copy system containers + unarchive: + src: ../containers-system.tar + dest: /opt/assets + tags: + - containers-system + - containers + - name: Copy helm containers + unarchive: + src: ../containers-helm.tar + dest: /opt/assets + tags: + - containers-helm + - containers + - copy: + src: files/serve-assets.service + dest: /etc/systemd/system/serve-assets.service + - systemd: + name: serve-assets + state: restarted + enabled: yes + daemon-reload: yes + +- name: Set up offline repositories and remove online ones + hosts: k8s-cluster:etcd:cassandra:elasticsearch:minio:rmq-cluster + tasks: + - name: Bail if GPG is not installed or installable. + apt: + name: gpg + state: present + - name: Remove /etc/apt/sources.list to remove all online debian package repos + file: + path: /etc/apt/sources.list + state: absent + - name: Remove /etc/apt/sources.list.d/ to remove all online debian package repos + file: + path: /etc/apt/sources.list.d/ + state: absent + +####################################################################### +# If your offline repo's debian key has expired, uncomment this block. +############# +# If you had to uncomment this block, comment out the docker-ce repo logic in +# that starts at line 56 of +# roles-external/kubespray/roles/container-engine/docker/tasks/main.yml +# . comment out the 'ensure docker-ce repository public key is installed', and +# the 'ensure docker-ce repository is enabled' blocks. +####################################################################### +# - name: trust anything +# copy: +# dest: /etc/apt/apt.conf.d/90-baroque +# content: | +# Acquire::Check-Valid-Until false; +# Acquire::AllowInsecureRepositories true; +# Apt::Get::AllowUnauthenticated true; +# Acquire::AllowDowngradeToInsecureRepositories true; +############################# +# Otherwise, trust the repo. +############################# + - name: Register offline repo key + apt_key: + url: "{{ ubuntu_repo_gpgkey }}" + state: present + + - name: Register offline repo + apt_repository: + repo: "deb {{ ubuntu_repo_base_url }} {{ ansible_distribution_release }} main" + state: present + - name: Apt update + apt: + update_cache: yes + diff --git a/ansible/sync_time.yml b/ansible/sync_time.yml new file mode 100644 index 000000000..e12576888 --- /dev/null +++ b/ansible/sync_time.yml @@ -0,0 +1,27 @@ +--- +- name: Configure NTP on Cassandra nodes + hosts: cassandra + become: true + vars: + authoritative_node: "{{ groups['cassandra'][0] }}" + + tasks: + - name: Install NTP package + apt: + name: ntp + state: present + + - name: Configure NTP servers + lineinfile: + path: /etc/ntp.conf + line: "server {{ hostvars[authoritative_node].ansible_host }} prefer" + state: present + when: inventory_hostname != authoritative_node + + - name: Restart NTP service + service: + name: ntp + state: restarted + + - name: Print current date + command: date diff --git a/ansible/tasks/helm_external.yml b/ansible/tasks/helm_external.yml index 66374fe5a..f3ecc11ea 100644 --- a/ansible/tasks/helm_external.yml +++ b/ansible/tasks/helm_external.yml @@ -3,8 +3,10 @@ file: state: directory path: "{{ playbook_dir }}/../values/{{ external_dir_name }}" + delegate_to: localhost - name: write IPs for helm template: src: templates/helm_external.yaml.j2 dest: "{{ playbook_dir }}/../values/{{ external_dir_name }}/values.yaml" + delegate_to: localhost diff --git a/ansible/templates/elasticsearch.conf.j2 b/ansible/templates/elasticsearch.conf.j2 new file mode 100644 index 000000000..eb08db8d1 --- /dev/null +++ b/ansible/templates/elasticsearch.conf.j2 @@ -0,0 +1,2 @@ +module(load="imfile" PollingInterval="10") +input(type="imfile" File="/var/log/elasticsearch/elasticsearch-directory.log" Tag="elasticsearch") \ No newline at end of file diff --git a/ansible/templates/ntp.conf.j2 b/ansible/templates/ntp.conf.j2 new file mode 100644 index 000000000..0285c5054 --- /dev/null +++ b/ansible/templates/ntp.conf.j2 @@ -0,0 +1,9 @@ + +template# NTP Config + +driftfile /var/lib/ntp/drift +restrict default nomodify notrap nopeer noquery +restrict 127.0.0.1 +restrict ::1 + +server {{ ntp_server }} \ No newline at end of file diff --git a/ansible/templates/qradar.conf.j2 b/ansible/templates/qradar.conf.j2 new file mode 100644 index 000000000..a72393271 --- /dev/null +++ b/ansible/templates/qradar.conf.j2 @@ -0,0 +1 @@ +action(type="omfwd" Target="{{ syslog_target_ip }}" Port="514" Protocol="udp") \ No newline at end of file diff --git a/ansible/tinc.yml b/ansible/tinc.yml index 579d68ab7..cae84c42b 100644 --- a/ansible/tinc.yml +++ b/ansible/tinc.yml @@ -22,6 +22,7 @@ - name: Provision tinc server hosts: vpn + environment: "{{ proxy_env | default({}) }}" vars: tinc_build_root: "{{ hostvars['localhost']['build_dir'] }}" # CHANGE THIS if your default network interface is not eth0 diff --git a/bin/autodeploy.sh b/bin/autodeploy.sh new file mode 100755 index 000000000..4f0930c8a --- /dev/null +++ b/bin/autodeploy.sh @@ -0,0 +1,442 @@ +#!/usr/bin/env bash +# shellcheck disable=SC2087 +set -Eeuo pipefail + +msg() { + echo >&2 -e "${1-}" +} + +trap cleanup SIGINT SIGTERM ERR EXIT + +usage() { + cat </dev/null 2>&1 ; then + msg "INFO: DNS A record exists: $SUBDOMAIN.$TARGET_SYSTEM" + else + die "ERROR: DNS A record for $SUBDOMAIN.$TARGET_SYSTEM does not exist. Exiting. Please check DNS record set." + fi +done + +if ssh -q -o StrictHostKeyChecking=no -o ConnectTimeout=5 -p "$SSH_PORT" "$SSH_USER"@webapp."$TARGET_SYSTEM" id | grep -q "$SSH_USER"; then + msg "" + msg "INFO: Successfully logged into $TARGET_SYSTEM as $SSH_USER" +else + die "ERROR: Can't log into $TARGET_SYSTEM via SSH, please check SSH connectivity." +fi + + +if curl --head --silent --fail https://s3-eu-west-1.amazonaws.com/public.wire.com/artifacts/wire-server-deploy-static-"$ARTIFACT_HASH".tgz >/dev/null 2>&1 ; then + msg "INFO: Artifact exists https://s3-eu-west-1.amazonaws.com/public.wire.com/artifacts/wire-server-deploy-static-$ARTIFACT_HASH.tgz" +else + die "ERROR: No artifact found via https://s3-eu-west-1.amazonaws.com/public.wire.com/artifacts/wire-server-deploy-static-$ARTIFACT_HASH.tgz" +fi + +system_cleanup_meta() { + msg "" + msg "INFO: Cleaning up all VMs, docker resources and wire-server-deploy files on $TARGET_SYSTEM." + msg "" + sleep 5 + ssh -p "$SSH_PORT" -o StrictHostKeyChecking=no "$SSH_USER"@webapp."$TARGET_SYSTEM" "bash -s" < /dev/null; then + for VM in $(virsh list --all --name); do virsh destroy "$VM"; virsh undefine "$VM" --remove-all-storage; done + fi + if which docker > /dev/null; then + docker system prune -a -f + fi + rm -f /home/$DEMO_USER/.ssh/known_hosts + rm -rf /home/$DEMO_USER/wire-server-deploy + rm -f /home/$DEMO_USER/wire-server-deploy-static-*.tgz +} + +preprovision_hetzner() { + msg "" + msg "INFO: running local ansible playbook for inital server deployment." + msg "INFO: This will setup up the Hetzner system with basic defaults, download and unpack the wire-server-deploy artifact." + sleep 5 + # on Mac devices C.UTF-8 is not available + if [[ $(uname) == "Darwin" ]]; then + export LC_ALL=en_US.UTF-8 + else + export LC_ALL=C.UTF-8 + fi + ansible-playbook ../ansible/hetzner-single-deploy.yml -e "artifact_hash=$ARTIFACT_HASH" -e "ansible_ssh_common_args='-o ServerAliveInterval=30 -o ServerAliveCountMax=10 -o ControlMaster=auto -o ControlPersist=180m'" -i $SSH_USER@webapp."$TARGET_SYSTEM", --diff +} + +remote_deployment() { + msg() { + echo >&2 -e "${1-}" + } + cd $SCRIPT_DIR &>/dev/null || exit 1 + + bash bin/offline-vm-setup.sh + msg "" + while sudo virsh list --all | grep -Fq running; do + sleep 20 + msg "INFO: VM deployment still in progress ..." + done + sleep 20 + msg "" + msg "INFO: VM deployment done. Starting all VMs:" + msg "" + for VM in $(sudo virsh list --all --name); do sudo virsh start "$VM"; done + sleep 60 + + msg "" + msg "INFO: Setting up offline environment (this will take a while)." + msg "" + # Rather than sourcing wire-server-deploy/bin/offline-env.sh, we invoke + # the relevant commands below, declaring "d" as a function instead of an alias. + ZAUTH_CONTAINER=$(sudo docker load -i "$SCRIPT_DIR"/containers-adminhost/quay.io_wire_zauth_*.tar | awk '{print $3}') + export ZAUTH_CONTAINER + WSD_CONTAINER=$(sudo docker load -i "$SCRIPT_DIR"/containers-adminhost/container-wire-server-deploy.tgz | awk '{print $3}') + d() { + sudo docker run --network=host -v "${SSH_AUTH_SOCK:-nonexistent}":/ssh-agent -e SSH_AUTH_SOCK=/ssh-agent -v "$HOME"/.ssh:/root/.ssh -v "$PWD":/wire-server-deploy "$WSD_CONTAINER" "$@" + } + export -f d + + bash bin/offline-secrets.sh + + HOST_IP=$(dig @resolver4.opendns.com myip.opendns.com +short) + + cat >ansible/inventory/offline/hosts.ini</dev/null) + if [[ $? -eq 0 && -n "$podCIDR" ]]; then + sed -i "s|RELAY_NETWORKS: \".*\"|RELAY_NETWORKS: \":${podCIDR}\"|" $SMTP_VALUES_FILE + else + echo "Failed to fetch podSubnet. Attention using the default value: $(grep -i RELAY_NETWORKS $SMTP_VALUES_FILE)" + fi + d helm install demo-smtp ./charts/demo-smtp --values $SMTP_VALUES_FILE + + d helm install reaper ./charts/reaper + + cp values/wire-server/prod-values.example.yaml values/wire-server/values.yaml + sed -i "s/example.com/$TARGET_SYSTEM/g" values/wire-server/values.yaml + sed -i "s/# - \"turn::3478\"/- \"turn:$HOST_IP:3478\"/g" values/wire-server/values.yaml + sed -i "s/# - \"turn::3478?transport=tcp\"/- \"turn:$HOST_IP:3478?transport=tcp\"/g" values/wire-server/values.yaml + + d helm install wire-server ./charts/wire-server --timeout=15m0s --values ./values/wire-server/values.yaml --values ./values/wire-server/secrets.yaml + + sed -i "s/example.com/$TARGET_SYSTEM/g" values/webapp/prod-values.example.yaml + d helm install webapp ./charts/webapp --values ./values/webapp/prod-values.example.yaml + + sed -i "s/example.com/$TARGET_SYSTEM/g" values/team-settings/prod-values.example.yaml + d helm install team-settings ./charts/team-settings --values ./values/team-settings/prod-values.example.yaml --values ./values/team-settings/prod-secrets.example.yaml + + sed -i "s/example.com/$TARGET_SYSTEM/g" values/account-pages/prod-values.example.yaml + d helm install account-pages ./charts/account-pages --values ./values/account-pages/prod-values.example.yaml + + cp values/ingress-nginx-controller/prod-values.example.yaml ./values/ingress-nginx-controller/values.yaml + d helm install ingress-nginx-controller ./charts/ingress-nginx-controller --values ./values/ingress-nginx-controller/values.yaml + + KUBENODEIP=$(d kubectl get pods -l app.kubernetes.io/name=ingress-nginx -o=custom-columns=IP:.status.hostIP --no-headers) + sudo sed -i "s/define KUBENODEIP.*/define KUBENODEIP = $KUBENODEIP/" /etc/nftables.conf + sudo systemctl restart nftables + + INGRESSNODE=$(d kubectl get pods -l app.kubernetes.io/name=ingress-nginx -o=custom-columns=NODE:.spec.nodeName --no-headers) + d kubectl cordon "$INGRESSNODE" + + wget https://charts.jetstack.io/charts/cert-manager-v1.13.2.tgz + tar -C ./charts -xzf cert-manager-v1.13.2.tgz + + cp ./values/nginx-ingress-services/prod-values.example.yaml ./values/nginx-ingress-services/values.yaml + cp ./values/nginx-ingress-services/prod-secrets.example.yaml ./values/nginx-ingress-services/secrets.yaml + sed -i 's/useCertManager: false/useCertManager: true/g' values/nginx-ingress-services/values.yaml + sed -i 's/certmasterEmail:/certmasterEmail: backend+wiabautodeploy@wire.com/g' values/nginx-ingress-services/values.yaml + sed -i "s/example.com/$TARGET_SYSTEM/" values/nginx-ingress-services/values.yaml + + d kubectl create namespace cert-manager-ns + d helm upgrade --install -n cert-manager-ns --set 'installCRDs=true' cert-manager charts/cert-manager + + d kubectl uncordon "$INGRESSNODE" + + d helm upgrade --install nginx-ingress-services charts/nginx-ingress-services -f values/nginx-ingress-services/values.yaml + + d kubectl get certificate + + cp values/sftd/prod-values.example.yaml values/sftd/values.yaml + sed -i "s/webapp.example.com/webapp.$TARGET_SYSTEM/" values/sftd/values.yaml + sed -i "s/sftd.example.com/sftd.$TARGET_SYSTEM/" values/sftd/values.yaml + sed -i 's/name: letsencrypt-prod/name: letsencrypt-http01/' values/sftd/values.yaml + sed -i "s/replicaCount: 3/replicaCount: 1/" values/sftd/values.yaml + d kubectl label node kubenode1 wire.com/role=sftd + d helm upgrade --install sftd ./charts/sftd --set 'nodeSelector.wire\.com/role=sftd' --set 'node_annotations="{'wire\.com/external-ip': '"$HOST_IP"'}"' --values values/sftd/values.yaml + + ZREST_SECRET=$(grep -A1 turn values/wire-server/secrets.yaml | grep secret | tr -d '"' | awk '{print $NF}') + + cat >values/coturn/values.yaml<values/coturn/secrets.yaml</dev/null" || echo "false") +EXISTING_VMS=$(ssh -p "$SSH_PORT" -o StrictHostKeyChecking=no "$SSH_USER"@webapp."$TARGET_SYSTEM" "virsh list --all --name" || echo "false") +EXISTING_CONTAINERS=$(ssh -p "$SSH_PORT" -o StrictHostKeyChecking=no "$SSH_USER"@webapp."$TARGET_SYSTEM" "docker ps -q --all" || echo "false") + +if [[ "$EXISTING_INSTALL" != "false" && -n "$EXISTING_INSTALL" ]]; then + msg "" + msg "WARNING: existing wire-server-deploy installation found: $EXISTING_INSTALL" + DO_SYSTEM_CLEANUP=true +fi +if [[ "$EXISTING_VMS" != "false" && -n "$EXISTING_VMS" ]]; then + msg "" + msg "WARNING: existing libvirt VMs found: $EXISTING_VMS" + DO_SYSTEM_CLEANUP=true +fi +if [[ "$EXISTING_CONTAINERS" != "false" && -n "$EXISTING_CONTAINERS" ]]; then + echo "$EXISTING_CONTAINERS" + msg "" + msg "WARNING: existing Docker containers found." + DO_SYSTEM_CLEANUP=true +fi + +if [ "$DO_SYSTEM_CLEANUP" = false ]; then + msg "" + msg "INFO: Target system clean, no previous wire-server-deploy installation found." +fi +if [ "$DO_SYSTEM_CLEANUP" = true ] && [ "$FORCE_REDEPLOY" = 0 ]; then + msg "" + IFS= read -r -p "Do you want to wipe all wire-server-deploy components from $TARGET_SYSTEM? (y/n) " PROMPT_CLEANUP + if [[ $PROMPT_CLEANUP == "n" || $PROMPT_CLEANUP == "N" ]]; then + msg "" + die "Aborting, not cleaning up $TARGET_SYSTEM" + fi + system_cleanup_meta +fi +if [ "$DO_SYSTEM_CLEANUP" = true ] && [ "$FORCE_REDEPLOY" = 1 ]; then + system_cleanup_meta +fi + +msg "INFO: Commencing Wire-in-a-box deployment on $TARGET_SYSTEM." +preprovision_hetzner +ssh -p "$SSH_PORT" -o StrictHostKeyChecking=no -o ServerAliveInterval=30 -o ServerAliveCountMax=10 "$DEMO_USER"@webapp."$TARGET_SYSTEM" "bash -s" < coredns_config.yaml +sed -i coredns_config.yaml -e '/^[ ]*forward.*/{N;N;N;d;}' -e "s/^\([ ]*\)cache/\1forward . 127.0.0.53:9999 {\n\1 max_fails 0\n\1}\n\1cache/" +kubectl apply -f coredns_config.yaml +echo "Printing kubectl get configmap coredns -n kube-system --output yaml after updating" +kubectl get configmap coredns -n kube-system --output yaml +sleep 10 diff --git a/bin/increment_version.sh b/bin/increment_version.sh deleted file mode 100755 index 9ada42bd9..000000000 --- a/bin/increment_version.sh +++ /dev/null @@ -1,72 +0,0 @@ -#!/bin/bash - -# taken from https://github.com/TeslaGov/shell-semver/blob/master/semver/increment_version.sh (MIT license) - -# Increment a version string using Semantic Versioning (SemVer) terminology. - -# Parse command line options. - -while getopts ":Mmp" Option -do - case $Option in - M ) major=true;; - m ) minor=true;; - p ) patch=true;; - esac -done - -shift $(($OPTIND - 1)) - -begins_with_v=false -original_version=$1 - -# Check if tag begins with 'v' -if [[ $original_version == v* ]] ; -then - begins_with_v=true - version=$(echo -n $original_version | cut -d'v' -f 2) -else - version=$original_version -fi - - -# Build array from version string. - -a=( ${version//./ } ) - -# If version string is missing or has the wrong number of members, show usage message. - -if [ ${#a[@]} -ne 3 ] -then - echo "usage: $(basename $0) [-Mmp] major.minor.patch" - exit 1 -fi - -# Increment version numbers as requested. - -if [ ! -z $major ] -then - ((a[0]++)) - a[1]=0 - a[2]=0 -fi - -if [ ! -z $minor ] -then - ((a[1]++)) - a[2]=0 -fi - -if [ ! -z $patch ] -then - ((a[2]++)) -fi - -if [ "$begins_with_v" = true ] ; -then - echo "v${a[0]}.${a[1]}.${a[2]}" -else - echo "${a[0]}.${a[1]}.${a[2]}" -fi - - diff --git a/bin/information-gathering.sh b/bin/information-gathering.sh new file mode 100644 index 000000000..037f3d403 --- /dev/null +++ b/bin/information-gathering.sh @@ -0,0 +1,256 @@ +#!/bin/bash + +# This script is intended for use by on-premise users of the Wire (wire.com) backend at the request of the Wire support team. +# The script gathers information on the installation and system, and packages that information for easy transmission to the wire Support team, +# in order to assist with debugging issues. + +# Hello. +echo "# Begin Wire information gathering" + +# Ensure we are running in sudo mode. +echo "# Ensuring we are in sudo mode" + +# Check if the script is running with sudo +if [ "$EUID" -ne 0 ]; then + # If not, re-run the script with sudo + sudo "$0" "$@" + # Exit the original script, or we'll run it twice. + exit +fi + +# Now we are running as sudo. + +# Installing the required packages. +apt-get update +apt-get install -y sysbench hardinfo inxi virt-what lshw net-tools ubuntu-report + +# Setup +WORK_FOLDER="/tmp/wire-information-gathering/" +FINAL_FILE="/tmp/wire-information-gathering.tar.gz" +URL="http://distro.ibiblio.org/damnsmall/current/current.iso" + +# Clean work folder if we already ran this +rm -rf $WORK_FOLDER + +# Make a folder we will work in +mkdir -p $WORK_FOLDER + +# Gather the OS issue +ISSUE=$(tr -d '\\n\\l' < /etc/issue | head -n 1) + +# Display and save +echo "# 01. Issue is «$ISSUE»" +echo "$ISSUE" > $WORK_FOLDER/01-issue.txt + +# Utility to save files to our work folder +save_file(){ + NUMBER=$1 + NAME=$2 + FILE=$3 + echo "# $NUMBER. Saving $NAME" + echo "# This file contains the contents of the file «$FILE», starting here:" > $WORK_FOLDER/"$NUMBER"-"$NAME".txt + cat "$FILE" >> $WORK_FOLDER/"$NUMBER"-"$NAME".txt 2>/dev/null +} + +# Utility to run a command and save it to our work folder +save_command(){ + NUMBER=$1 + NAME=$2 + shift; shift; + COMMAND="$*" + echo "# $NUMBER. Saving $NAME" + echo "# This file contains the results of the command «$COMMAND», starting here:" > $WORK_FOLDER/"$NUMBER"-"$NAME".txt + $COMMAND >> $WORK_FOLDER/"$NUMBER"-"$NAME".txt 2>&1 +} + +# Save log files +save_file 02 dmesg /var/log/dmesg +save_file 03 kern.log /var/log/kern.log +save_file 04 boot.log /var/log/boot.log +save_file 05 auth.log /var/log/auth.log +save_file 06 dpkg.log /var/log/dpkg.log +save_file 07 faillog /var/log/faillog +save_file 08 syslog /var/log/syslog +save_file 09 ufw.log /var/log/ufw.log + +# Save a list of all installed packages +save_command 10 installed-packages apt list --installed + +# Save host file +save_file 11 etc-hosts /etc/hosts + +# Save the network hostname +save_command 12 network-hostname uname -n + +# Save the uname kernel info +save_command 13 kernel-info uname -a + +# Save hardware information +save_command 14 hardware-info lshw + +# Save CPU information +save_command 15 cpu-info lscpu + +# Save block devices information +save_command 16 block-devices lsblk -a + +# Save USB controller information +save_command 17 usb-controller lsusb -v + +# Save PCI information +save_command 18 pci-info lspci -v + +# Save partition table +save_command 19 partition fdisk -l + +# Save /proc/ information +save_file 20 proc-cmdline /proc/cmdline # Kernel command line information. +save_file 21 proc-console /proc/console # Information about current consoles including tty. +save_file 22 proc-devices /proc/devices # Device drivers currently configured for the running kernel. +save_file 23 proc-dma /proc/dma # Info about current DMA channels. +save_file 24 proc-fb /proc/fb # KernelFramebuffer devices. +save_file 25 proc-filesystems /proc/filesystems # Current filesystems supported by the kernel. +save_file 26 proc-iomem /proc/iomem # Current system memory map for devices. +save_file 27 proc-ioports /proc/ioports # Registered port regions for input output communication with device. +save_file 28 proc-loadavg /proc/loadavg # System load average. +save_file 29 proc-locks /proc/locks # Files currently locked by kernel. +save_file 30 proc-meminfo /proc/meminfo # Info about system memory (see above example). +save_file 31 proc-misc /proc/misc # Miscellaneous drivers registered for miscellaneous major device. +save_file 32 proc-modules /proc/modules # Currently loaded kernel modules. +save_file 33 proc-mounts /proc/mounts # List of all mounts in use by system. +save_file 34 proc-partitions /proc/partitions # Detailed info about partitions available to the system. +save_file 35 proc-pci /proc/pci # Information about every PCI device. +save_file 36 proc-stat /proc/stat # Record or various statistics kept from last reboot. +save_file 37 proc-swap /proc/swap # Information about swap space. +save_file 38 proc-uptime /proc/uptime # Uptime information (in seconds). +save_file 39 proc-version /proc/version # Kernel version, gcc version, and Linux distribution installed. + +# Save partition table +save_command 40 mount mount + +# Test DNS resolution +save_command 41 dns ping -c 3 google.com + +# Test ping/internet connectivy +save_command 42 ping ping -c 3 8.8.8.8 + +# Check disk space usage +save_command 43 disk-usage df -h + +# Check the current language +save_command 44 current-language set | grep -E '^(LANG|LC_)' + +# Save network information +save_command 45 network-info ifconfig -a + +# Save IP addresses +save_command 46 ip-addresses ip addr + +# Save network interfaces +save_file 47 network-interfaces netplan status + +# Save routing information +save_command 48 routing-info route -n + +# Save all open ports +save_command 49 open-ports netstat -tulpn + +# Save who is logged in +save_command 50 who-is-logged-in who + +# Save list of all running processes +save_command 51 running-processes ps faux + +# Save current user +save_command 52 current-user id + +# Save current date +save_command 53 current-date date + +# Save current UTC date +save_command 54 current-utc-date date --utc + +# Save routing tables +save_command 55 routing-tables iptables-save + +# Save uptime +save_command 56 uptime uptime + +# Run ubuntu-report and copy the output file +ubuntu-report --non-interactive 2>/dev/null +cp -f ~/.cache/ubuntu-report/* $WORK_FOLDER/57-ubuntu-report.txt + +# Save timezone +save_command 58 timezone timedatectl + +# Save locale +save_command 59 locale locale + +# Save APT sources +save_file 60 apt-sources /etc/apt/sources.list + +# Save APT sources.list.d files +save_command 61 apt-files ls -l /etc/apt/sources.list.d/* + +# Save APT sources.list.d contents +save_command 62 apt-contents cat -n /etc/apt/sources.list.d/* + +# Save crontab +save_file 63 crontab /etc/crontab + +# Save Cron files +save_command 64 cron-files ls -l /etc/cron.d/* + +# Save Cron contents +save_command 65 cron-contents cat -n /etc/cron.d/* + +# CPU Benchmark. +save_command 66 cpu-benchmark sysbench --test=cpu --cpu-max-prime=20000 run + +# File i/o benchmark command 1. +save_command 67 io-benchmark-1 sysbench --test=fileio --file-total-size=2G prepare + +# File i/o benchmark command 2. +save_command 68 io-benchmark-2 sysbench --test=fileio --file-total-size=2G --file-test-mode=rndrw run + +# File i/o benchmark command 3. +save_command 69 io-benchmark-3 sysbench --test=fileio --file-total-size=2G cleanup + +# Memory benchmark. +save_command 70 ram-benchmark sysbench --test=memory run + +# Hardinfo command. +save_command 71 hardinfo hardinfo + +# Inxi basic system information +save_command 72 inxi-basic inxi -F + +# Inxi full system information +save_command 73 inxi-full inxi -Fxz + +# Inxi hardware information +save_command 74 inxi-hardware inxi -xxx + +# Detect if we are running inside a virtual machine. +save_command 75 virt-what virt-what + +# Download an Ubuntu ISO so we can see the network speed. +save_command 76 internet-speed wget --progress=bar:force "$URL" -O "/tmp/test-file.iso" 2>&1 + +# Save the disk space usage (`du`) of the entire disk: +save_command 77 disk-usage du -hc / + +# Log. +echo "# Clean up temporary files" + +# Remove the file. +rm -f /tmp/test-file.iso + +# Log. +echo "# Compressing into a single file" + +# Compress everything into a single file. +tar -czvf $FINAL_FILE $WORK_FOLDER + +# Log. +echo "# Your information package has been saved to « $FINAL_FILE », please send it to the Wire support team." diff --git a/bin/offline-cluster.sh b/bin/offline-cluster.sh new file mode 100755 index 000000000..882224018 --- /dev/null +++ b/bin/offline-cluster.sh @@ -0,0 +1,55 @@ +#!/usr/bin/env bash + +set -eou pipefail + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +ANSIBLE_DIR="$(cd "$SCRIPT_DIR/../ansible" && pwd)" + +set -x + +ls $ANSIBLE_DIR/inventory/offline + +if [ -f "$ANSIBLE_DIR/inventory/offline/hosts.ini" ]; then + INVENTORY_FILE="$ANSIBLE_DIR/inventory/offline/hosts.ini" +elif [ -f "$ANSIBLE_DIR/inventory/offline/inventory.yml" ]; then + INVENTORY_FILE="$ANSIBLE_DIR/inventory/offline/inventory.yml" +else + echo "No inventory file in ansible/inventory/offline/. Please supply an $ANSIBLE_DIR/inventory/offline/inventory.yml or $ANSIBLE_DIR/inventory/offline/hosts.ini" + exit -1 +fi + +if [ -f "$ANSIBLE_DIR/inventory/offline/hosts.ini" ] && [ -f "$ANSIBLE_DIR/inventory/offline/inventory.yml" ]; then + echo "Both hosts.ini and inventory.yml provided in ansible/inventory/offline! Pick only one." + exit -1 +fi + +echo "using ansible inventory: $INVENTORY_FILE" + +# Populate the assethost, and prepare to install images from it. +# +# Copy over binaries and debs, serves assets from the asset host, and configure +# other hosts to fetch debs from it. +# +# If this step fails partway, and you know that parts of it completed, the `--skip-tags debs,binaries,containers,containers-helm,containers-other` tags may come in handy. +ansible-playbook -i $INVENTORY_FILE $ANSIBLE_DIR/setup-offline-sources.yml + +# Run kubespray until docker is installed and runs. This allows us to preseed the docker containers that +# are part of the offline bundle +ansible-playbook -i $INVENTORY_FILE $ANSIBLE_DIR/kubernetes.yml --tags bastion,bootstrap-os,preinstall,container-engine + +# With ctr being installed on all nodes that need it, seed all container images: +ansible-playbook -i $INVENTORY_FILE $ANSIBLE_DIR/seed-offline-containerd.yml + +# Install NTP +ansible-playbook -i $INVENTORY_FILE $ANSIBLE_DIR/sync_time.yml -v + +# Run the rest of kubespray. This should bootstrap a kubernetes cluster successfully: +ansible-playbook -i $INVENTORY_FILE $ANSIBLE_DIR/kubernetes.yml --skip-tags bootstrap-os,preinstall,container-engine,multus + +# Deploy all other services which don't run in kubernetes. +ansible-playbook -i $INVENTORY_FILE $ANSIBLE_DIR/cassandra.yml +ansible-playbook -i $INVENTORY_FILE $ANSIBLE_DIR/elasticsearch.yml +ansible-playbook -i $INVENTORY_FILE $ANSIBLE_DIR/minio.yml + +# create helm values that tell our helm charts what the IP addresses of cassandra, elasticsearch and minio are: +ansible-playbook -i $INVENTORY_FILE $ANSIBLE_DIR/helm_external.yml --skip-tags=rabbitmq-external diff --git a/bin/offline-deploy.sh b/bin/offline-deploy.sh new file mode 100755 index 000000000..bc38950c7 --- /dev/null +++ b/bin/offline-deploy.sh @@ -0,0 +1,22 @@ +#!/usr/bin/env bash + +set -euo pipefail + + +SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" + +# HACK: hack to stop ssh from idling the connection. Which it will do if there is no output. And ansible is not verbose enough +(while true; do echo "Still deploying..."; sleep 10; done) & +loop_pid=$! + +trap 'kill "$loop_pid"' EXIT + +ZAUTH_CONTAINER=$(sudo docker load -i $SCRIPT_DIR/../containers-adminhost/quay.io_wire_zauth_*.tar | awk '{print $3}') +export ZAUTH_CONTAINER + +WSD_CONTAINER=$(sudo docker load -i $SCRIPT_DIR/../containers-adminhost/container-wire-server-deploy.tgz | awk '{print $3}') + +./bin/offline-secrets.sh + +sudo docker run --network=host -v $SSH_AUTH_SOCK:/ssh-agent -e SSH_AUTH_SOCK=/ssh-agent -v $PWD:/wire-server-deploy $WSD_CONTAINER ./bin/offline-cluster.sh +sudo docker run --network=host -v $PWD:/wire-server-deploy $WSD_CONTAINER ./bin/offline-helm.sh diff --git a/bin/offline-env.sh b/bin/offline-env.sh new file mode 100755 index 000000000..670407d4d --- /dev/null +++ b/bin/offline-env.sh @@ -0,0 +1,11 @@ +#!/usr/bin/env bash + + +SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" + +ZAUTH_CONTAINER=$(sudo docker load -i $SCRIPT_DIR/../containers-adminhost/quay.io_wire_zauth_*.tar | awk '{print $3}') +export ZAUTH_CONTAINER + +WSD_CONTAINER=$(sudo docker load -i $SCRIPT_DIR/../containers-adminhost/container-wire-server-deploy.tgz | awk '{print $3}') + +alias d="sudo docker run -it --network=host -v ${SSH_AUTH_SOCK:-nonexistent}:/ssh-agent -e SSH_AUTH_SOCK=/ssh-agent -v $HOME/.ssh:/root/.ssh -v $PWD:/wire-server-deploy $WSD_CONTAINER" diff --git a/bin/offline-helm.sh b/bin/offline-helm.sh new file mode 100755 index 000000000..a7aa4ce15 --- /dev/null +++ b/bin/offline-helm.sh @@ -0,0 +1,47 @@ +#!/usr/bin/env bash + +set -euo pipefail +set -x + +helm upgrade --install --wait cassandra-external ./charts/cassandra-external --values ./values/cassandra-external/values.yaml +helm upgrade --install --wait elasticsearch-external ./charts/elasticsearch-external --values ./values/elasticsearch-external/values.yaml +helm upgrade --install --wait minio-external ./charts/minio-external --values ./values/minio-external/values.yaml +helm upgrade --install --wait fake-aws ./charts/fake-aws --values ./values/fake-aws/prod-values.example.yaml + +# ensure that the RELAY_NETWORKS value is set to the podCIDR +SMTP_VALUES_FILE="./values/demo-smtp/prod-values.example.yaml" +podCIDR=$(kubectl get configmap -n kube-system kubeadm-config -o yaml | grep -i 'podSubnet' | awk '{print $2}' 2>/dev/null) + +if [[ $? -eq 0 && -n "$podCIDR" ]]; then + sed -i "s|RELAY_NETWORKS: \".*\"|RELAY_NETWORKS: \":${podCIDR}\"|" $SMTP_VALUES_FILE +else + echo "Failed to fetch podSubnet. Attention using the default value: $(grep -i RELAY_NETWORKS $SMTP_VALUES_FILE)" +fi +helm upgrade --install --wait demo-smtp ./charts/demo-smtp --values $SMTP_VALUES_FILE + +helm upgrade --install --wait rabbitmq ./charts/rabbitmq --values ./values/rabbitmq/prod-values.example.yaml --values ./values/rabbitmq/prod-secrets.example.yaml +helm upgrade --install --wait databases-ephemeral ./charts/databases-ephemeral --values ./values/databases-ephemeral/prod-values.example.yaml +helm upgrade --install --wait reaper ./charts/reaper +helm upgrade --install --wait --timeout=30m0s wire-server ./charts/wire-server --values ./values/wire-server/prod-values.example.yaml --values ./values/wire-server/secrets.yaml + +# if charts/webapp directory exists +if [ -d "./charts/webapp" ]; then + helm upgrade --install --wait --timeout=15m0s webapp ./charts/webapp --values ./values/webapp/prod-values.example.yaml +fi + +if [ -d "./charts/account-pages" ]; then + helm upgrade --install --wait --timeout=15m0s account-pages ./charts/account-pages --values ./values/account-pages/prod-values.example.yaml +fi + +if [ -d "./charts/team-settings" ]; then + helm upgrade --install --wait --timeout=15m0s team-settings ./charts/team-settings --values ./values/team-settings/prod-values.example.yaml --values ./values/team-settings/prod-secrets.example.yaml +fi + +helm upgrade --install --wait --timeout=15m0s smallstep-accomp ./charts/smallstep-accomp --values ./values/smallstep-accomp/prod-values.example.yaml +helm upgrade --install --wait --timeout=15m0s ingress-nginx-controller ./charts/ingress-nginx-controller --values ./values/ingress-nginx-controller/hetzner-ci.example.yaml + +echo "Printing all pods status: " +kubectl get pods --all-namespaces -o wide +./bin/debug_logs.sh +# TODO: Requires certs; which we do not have in CI/CD at this point. future work =) (Would need cert-manager in offline package. That'd be neat) +# helm upgrade --install --wait nginx-ingress-services ./charts/nginx-ingress-services --values ./values/nginx-ingress-services/values.yaml --values ./values/nginx-ingress-services/secrets.yaml diff --git a/bin/offline-secrets.sh b/bin/offline-secrets.sh new file mode 100755 index 000000000..fb1b926f1 --- /dev/null +++ b/bin/offline-secrets.sh @@ -0,0 +1,95 @@ +#!/usr/bin/env bash +set -eu + +SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +ANSIBLE_DIR="$( cd "$SCRIPT_DIR/../ansible" && pwd )" +VALUES_DIR="$(cd "$SCRIPT_DIR/../values" && pwd)" + +ZAUTH_CONTAINER="${ZAUTH_CONTAINER:-quay.io/wire/zauth:latest}" + +zrest="$(tr -dc A-Za-z0-9 $VALUES_DIR/wire-server/secrets.yaml +brig: + secrets: + smtpPassword: dummyPassword + zAuth: + publicKeys: "$zauth_public" + privateKeys: "$zauth_private" + turn: + secret: "$zrest" + awsKeyId: dummykey + awsSecretKey: dummysecret + rabbitmq: + username: wire-server + password: verysecurepassword + # These are only necessary if you wish to support sign up via SMS/calls + # And require accounts at twilio.com / nexmo.com + setTwilio: |- + sid: "dummy" + token: "dummy" + setNexmo: |- + key: "dummy" + secret: "dummy" +cargohold: + secrets: + awsKeyId: "$minio_access_key" + awsSecretKey: "$minio_secret_key" + rabbitmq: + username: wire-server + password: verysecurepassword +cannon: + secrets: + rabbitmq: + username: wire-server + password: verysecurepassword +galley: + secrets: + awsKeyId: dummykey + awsSecretKey: dummysecret +gundeck: + secrets: + awsKeyId: dummykey + awsSecretKey: dummysecret + rabbitmq: + username: wire-server + password: verysecurepassword +nginz: + secrets: + zAuth: + publicKeys: "$zauth_public" + # only necessary in test environments (env="staging"). See charts/nginz/README.md + basicAuth: ":" +team-settings: + secrets: + # NOTE: This setting doesn't have to be changed for offline deploys as the team-settings + # container is pre-seeded + # It is just the empty "{}" json hashmap + configJson: "e30K" +background-worker: + secrets: + rabbitmq: + username: wire-server + password: verysecurepassword +EOF + +fi + +if [[ ! -f $ANSIBLE_DIR/inventory/offline/group_vars/all/secrets.yaml ]]; then + echo "Writing $ANSIBLE_DIR/inventory/offline/group_vars/all/secrets.yaml" + cat << EOT > $ANSIBLE_DIR/inventory/offline/group_vars/all/secrets.yaml +minio_access_key: "$minio_access_key" +minio_secret_key: "$minio_secret_key" +EOT +fi diff --git a/bin/offline-vm-setup.sh b/bin/offline-vm-setup.sh new file mode 100755 index 000000000..50ad78434 --- /dev/null +++ b/bin/offline-vm-setup.sh @@ -0,0 +1,230 @@ +#!/usr/bin/env bash + +set -Eeuo pipefail + +msg() { + echo >&2 -e "${1-}" +} + +if [[ $EUID -eq 0 ]]; then + msg "Please don't run me as root" 1>&2 + exit 1 +fi + +trap cleanup SIGINT SIGTERM ERR EXIT + +usage() { + cat </dev/null +} + +die() { + local msg=$1 + local code=${2-1} # default exit status 1 + msg "$msg" + exit "$code" +} + +parse_params() { + while :; do + case "${1-}" in + -h | --help) usage ;; + -v | --verbose) set -x ;; + --deploy-vm) DEPLOY_SINGLE_VM=1 ;; + -?*) die "Unknown option: $1" ;; + *) break ;; + esac + shift + done + return 0 +} + +parse_params "$@" + +SCRIPT_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" &>/dev/null && pwd -P) +DEPLOY_DIR="$(cd "$SCRIPT_DIR/../" && pwd)" +NOCLOUD_DIR=$DEPLOY_DIR/nocloud + +if [ ! -d "$NOCLOUD_DIR" ]; then + mkdir -p "$NOCLOUD_DIR" +fi + +if [[ -n "${DEPLOY_SINGLE_VM-}" ]]; then + VM_NAME=("$2") + VM_IP=("192.168.122.$(shuf -i100-240 -n1)") + VM_VCPU=(4) + VM_RAM=(8192) + VM_DISK=(100) + while grep -Fq "${VM_IP[0]}" /etc/hosts; do + VM_IP=("192.168.122.$(shuf -i100-240 -n1)") + done +else + VM_NAME=(assethost kubenode1 kubenode2 kubenode3 ansnode1 ansnode2 ansnode3) + VM_IP=(192.168.122.10 192.168.122.21 192.168.122.22 192.168.122.23 192.168.122.31 192.168.122.32 192.168.122.33) + VM_VCPU=(2 6 6 6 4 4 4) + VM_RAM=(4096 8192 8192 8192 8192 8192 8192) + VM_DISK=(100 100 100 100 350 350 350) +fi + +if [[ -f "$HOME"/.ssh/authorized_keys && -s "$HOME"/.ssh/authorized_keys ]]; then + SSHKEY_HUMAN=$(head -n 1 ~/.ssh/authorized_keys) +else + read -r -p "No local SSH keys for current user ""$USER"" found; please enter a vaild key now: " SSHKEY_HUMAN +fi + +if [[ -f "$HOME"/.ssh/id_ed25519 ]]; then + SSHKEY_DEMO=$(cat "$HOME"/.ssh/id_ed25519.pub) +else + ssh-keygen -t ed25519 -q -N '' -f "$HOME"/.ssh/id_ed25519 + SSHKEY_DEMO=$(cat "$HOME"/.ssh/id_ed25519.pub) +fi + +msg "" +msg "Including the following SSH Keys for VM deployment:" +msg "" +msg "Existing key from ~/.ssh/authorized_keys: ""$SSHKEY_HUMAN""" +msg "Local keypair key from ~/.ssh/id_ed25519: ""$SSHKEY_DEMO""" +msg "" + +nohup python3 -m http.server 3003 -d "$NOCLOUD_DIR" /dev/null 2>&1 & + +prepare_config() { + VM_DIR=$NOCLOUD_DIR/${VM_NAME[i]} + mkdir -p "$VM_DIR" + touch "$VM_DIR"/{vendor-data,meta-data} + cat >"$VM_DIR/user-data"< "$LOAD_SCRIPT" -chmod +x "$LOAD_SCRIPT" - -function download() { - local NAME=$1 - local VERSION=$2 - docker pull "quay.io/wire/$NAME:$VERSION" - docker save "quay.io/wire/$NAME:$VERSION" > "$FOLDER/$NAME-$VERSION.tar" - echo "load $NAME $VERSION" >> "$LOAD_SCRIPT" -} - -images=( brig galley gundeck cannon proxy spar cargohold nginz nginz_disco galley-schema gundeck-schema brig-schema spar-schema stern ) -for image in "${images[@]}"; do - download "$image" "$BACKEND_VERSION" -done - -download backoffice-frontend "$BACKOFFICE_FRONTEND_VERSION" - -download webapp "$WEBAPP_VERSION" -download account "$ACCOUNT_VERSION" - -# requires authentication! -download team-settings "$TEAM_VERSION" - -set +x - -echo "Done downloading docker images." -echo "You now have a folder $FOLDER" -echo "You can transfer that folder, or optionally create a tar file first that you then transfer, e.g." -echo " tar -czvf $FOLDER.tgz $FOLDER" -echo "" -echo "On the target machine:" -echo "1. (optionally:) uncompress using: tar -xzvf $FOLDER.tgz" -echo "2. cd $FOLDER" -echo "3. ./$(basename "$LOAD_SCRIPT")" diff --git a/bin/prod-init.sh b/bin/prod-init.sh index 86ea2e26f..0f1a75c78 100755 --- a/bin/prod-init.sh +++ b/bin/prod-init.sh @@ -14,7 +14,7 @@ cp -v $VALUES_DIR/wire-server/{prod-values.example,values}.yaml cp -v $VALUES_DIR/wire-server/{prod-secrets.example,secrets}.yaml cp -v $VALUES_DIR/databases-ephemeral/{prod-values.example,values}.yaml cp -v $VALUES_DIR/fake-aws/{prod-values.example,values}.yaml -cp -v $VALUES_DIR/nginx-ingress-controller/{prod-values.example,values}.yaml +cp -v $VALUES_DIR/ingress-nginx-controller/{prod-values.example,values}.yaml cp -v $VALUES_DIR/nginx-ingress-services/{prod-values.example,values}.yaml cp -v $VALUES_DIR/nginx-ingress-services/{prod-secrets.example,secrets}.yaml cp -v $VALUES_DIR/demo-smtp/{prod-values.example,values}.yaml diff --git a/bin/secrets.sh b/bin/secrets.sh index 219629707..744e65115 100755 --- a/bin/secrets.sh +++ b/bin/secrets.sh @@ -1,27 +1,53 @@ #!/usr/bin/env bash -SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" -ANSIBLE_DIR="$( cd "$SCRIPT_DIR/../ansible" && pwd )" +set -eu -mkdir -p "$ANSIBLE_DIR/secrets" +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +TOPLEVEL_DIR="$(cd "$SCRIPT_DIR/.." && pwd)" -zrest="${ANSIBLE_DIR}/secrets/restund_zrest_secret.txt" -zpub="${ANSIBLE_DIR}/secrets/zauth_public.txt" -zpriv="${ANSIBLE_DIR}/secrets/zauth_private.txt" -miniopub="${ANSIBLE_DIR}/secrets/minio_public.txt" -miniopriv="${ANSIBLE_DIR}/secrets/minio_private.txt" +# Generates fresh zauth, TURN/restund, nginx/basic-auth and minio secrets as one-secret-per file. This can be useful in ansible-based deployments. +# Then templates those secrets together in a secrets.yaml file for use in helm deployments. +# USAGE: +# ./bin/secrets.sh [ path-to-new-directory-for-secrets-output | default ./secrets_cache ] + +OUTPUT_DIR="${1:-"$TOPLEVEL_DIR/secrets_cache"}" +ZAUTH_CONTAINER="${ZAUTH_CONTAINER:-quay.io/wire/zauth:latest}" + +mkdir -p "$OUTPUT_DIR" + +zrest="${OUTPUT_DIR}/restund_zrest_secret.txt" +zpub="${OUTPUT_DIR}/zauth_public.txt" +zpriv="${OUTPUT_DIR}/zauth_private.txt" +miniopub="${OUTPUT_DIR}/minio_public.txt" +miniopriv="${OUTPUT_DIR}/minio_private.txt" +NGINZ_BASIC_CONFIG="${OUTPUT_DIR}/nginz_basic_auth_config.txt" +NGINZ_BASIC_PW="${OUTPUT_DIR}/nginz_basic_auth_password.txt" +NGINZ_BASIC_USER="${OUTPUT_DIR}/nginz_basic_auth_user.txt" + +command -v htpasswd >/dev/null 2>&1 || { + echo >&2 "htpasswd is not installed, aborting. Maybe try the httpd-tools or apache-utils packages?" + exit 1 +} +command -v openssl >/dev/null 2>&1 || { + echo >&2 "openssl is not installed, aborting." + exit 1 +} +command -v zauth >/dev/null 2>&1 || command -v docker >/dev/null 2>&1 || { + echo >&2 "zauth is not installed, and docker is also not installed, aborting. See wire-server and compile zauth, or install docker and try using \"docker run --rm quay.io/wire/zauth:latest\" instead." + exit 1 +} if [[ ! -f $miniopub || ! -f $miniopriv ]]; then echo "Generate a secret for minio (must match the cargohold AWS keys wire-server's secrets/values)..." - openssl rand -base64 64 | env LC_CTYPE=C tr -dc a-zA-Z0-9 | head -c 42 > "$miniopriv" - openssl rand -base64 64 | env LC_CTYPE=C tr -dc a-zA-Z0-9 | head -c 20 > "$miniopub" + openssl rand -base64 64 | env LC_CTYPE=C tr -dc a-zA-Z0-9 | head -c 42 >"$miniopriv" + openssl rand -base64 64 | env LC_CTYPE=C tr -dc a-zA-Z0-9 | head -c 20 >"$miniopub" else echo "re-using existing minio secrets" fi if [[ ! -f $zrest ]]; then echo "Generate a secret for the restund servers (must match the turn.secret key in brig's config)..." - openssl rand -base64 64 | env LC_CTYPE=C tr -dc a-zA-Z0-9 | head -c 42 > "$zrest" + openssl rand -base64 64 | env LC_CTYPE=C tr -dc a-zA-Z0-9 | head -c 42 >"$zrest" else echo "re-using existing restund secret" fi @@ -29,10 +55,89 @@ fi if [[ ! -f $zpriv || ! -f $zpub ]]; then echo "Generate private and public keys (used both by brig and nginz)..." TMP_KEYS=$(mktemp "/tmp/demo.keys.XXXXXXXXXXX") - zauth -m gen-keypair -i 1 > "$TMP_KEYS" - cat "$TMP_KEYS" | sed -n 's/public: \(.*\)/\1/p' > "$zpub" - cat "$TMP_KEYS" | sed -n 's/secret: \(.*\)/\1/p' > "$zpriv" + zauth -m gen-keypair -i 1 >"$TMP_KEYS" 2>/dev/null || + docker run --rm "$ZAUTH_CONTAINER" -m gen-keypair -i 1 >"$TMP_KEYS" + cat "$TMP_KEYS" | sed -n 's/public: \(.*\)/\1/p' >"$zpub" + cat "$TMP_KEYS" | sed -n 's/secret: \(.*\)/\1/p' >"$zpriv" else echo "re-using existing public/private keys" fi +if [[ ! -f $NGINZ_BASIC_PW || ! -f $NGINZ_BASIC_CONFIG || ! -f $NGINZ_BASIC_USER ]]; then + echo "creating basic auth password for nginz..." + echo basic-auth-user >"$NGINZ_BASIC_USER" + openssl rand -base64 64 | env LC_CTYPE=C tr -dc a-zA-Z0-9 | head -c 42 >"$NGINZ_BASIC_PW" + htpasswd -cb "$NGINZ_BASIC_CONFIG" "$(cat "$NGINZ_BASIC_USER")" "$(cat "$NGINZ_BASIC_PW")" +else + echo "re-using basic auth password for nginz" +fi + +echo "" +echo "1. You can use the generated $OUTPUT_DIR/secrets_ansible.yaml file as part of your ansible group_vars/. Copy this to your inventory." +echo "2. You could use the generated" +echo " $OUTPUT_DIR/secrets.yaml" +echo "as a basis for your helm overrides (copy to location of your choosing then adjust as needed)" + +echo " +# helm_vars/wire-server/secrets.yaml +nginz: + secrets: + # Note: basicAuth on some internal endpoints only active if + # nginz.env == staging, otherwise no effect + basicAuth: $(cat "$NGINZ_BASIC_CONFIG") + zAuth: + publicKeys: $(cat "$zpub") + +cannon: + secrets: + nginz: + zAuth: + publicKeys: $(cat "$zpub") +brig: + secrets: + zAuth: + publicKeys: $(cat "$zpub") + privateKeys: $(cat "$zpriv") + turn: + secret: $(cat "$zrest") + smtpPassword: dummyPassword + # these only need to be changed if using real AWS services + awsKeyId: dummykey + awsSecretKey: dummysecret + # These are only necessary if you wish to support sign up via SMS/calls + # And require accounts at twilio.com / nexmo.com + setTwilio: |- + sid: dummy + token: dummy + setNexmo: |- + key: dummy + secret: dummy +cargohold: + secrets: + awsKeyId: dummykey + awsSecretKey: dummysecret +galley: + secrets: + awsKeyId: dummykey + awsSecretKey: dummysecret +gundeck: + secrets: + awsKeyId: dummykey + awsSecretKey: dummysecret +proxy: + secrets: + proxy_config: |- + secrets { + youtube = ... + googlemaps = ... + soundcloud = ... + giphy = ... + spotify = Basic ... + } +" >"$OUTPUT_DIR/secrets.yaml" + +echo " +restund_zrest_secret: \"$(cat "$zrest")\" +minio_access_key: \"$(cat "$miniopub")\" +minio_secret_key: \"$(cat "$miniopriv")\" +" >"$OUTPUT_DIR/secrets_ansible.yaml" diff --git a/bin/set-version.sh b/bin/set-version.sh deleted file mode 100755 index 858d11fc6..000000000 --- a/bin/set-version.sh +++ /dev/null @@ -1,36 +0,0 @@ -#!/usr/bin/env bash - -USAGE="Write version to chart and subcharts (if any). Usage: $0 " -chart=${1:?$USAGE} -version=${2:?$USAGE} - -SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" -CHART_DIR="$( cd "$SCRIPT_DIR/../charts" && pwd )" -tempfile=$(mktemp) - -# (sed usage should be portable for both GNU sed and BSD (Mac OS) sed) - -function update_chart(){ - chart_file=$1 - sed -e "s/version: .*/version: $target_version/g" "$chart_file" > "$tempfile" && mv "$tempfile" "$chart_file" -} - -function write_versions() { - target_version=$1 - - # update chart version - update_chart Chart.yaml - - # update all dependencies, if any - if [ -a requirements.yaml ]; then - sed -e "s/ version: \".*\"/ version: \"$target_version\"/g" requirements.yaml > "$tempfile" && mv "$tempfile" requirements.yaml - deps=( $(helm dependency list | grep -v NAME | awk '{print $1}') ) - for dep in "${deps[@]}"; do - if [ -d "$CHART_DIR/$dep" ] && [ "$chart" != "$dep" ]; then - (cd "$CHART_DIR/$dep" && write_versions "$target_version") - fi - done - fi -} - -cd "$CHART_DIR/$chart" && write_versions "$version" diff --git a/bin/set-wire-server-image-version.sh b/bin/set-wire-server-image-version.sh deleted file mode 100755 index df5bacb38..000000000 --- a/bin/set-wire-server-image-version.sh +++ /dev/null @@ -1,13 +0,0 @@ -#!/usr/bin/env bash - -USAGE="$0 " -target_version=${1?$USAGE} - -charts=(brig cannon galley gundeck spar cargohold proxy cassandra-migrations backoffice elasticsearch-index) - -for chart in "${charts[@]}"; do - sed -i "s/ tag: .*/ tag: $target_version/g" "charts/$chart/values.yaml" -done - -#special case nginz -sed -i "s/ tag: .*/ tag: $target_version/g" "charts/nginz/values.yaml" diff --git a/bin/shellcheck.sh b/bin/shellcheck.sh new file mode 100755 index 000000000..8c4e0d4f6 --- /dev/null +++ b/bin/shellcheck.sh @@ -0,0 +1,33 @@ +#!/usr/bin/env bash + +set -eu + +# lint all shell scripts with ShellCheck +# FUTUREWORK: Fix issues of the explicitly (no globbing) excluded files. + +mapfile -t SHELL_FILES_TO_LINT < <( + git ls-files | + grep "\.sh$" | + grep -v "ansible/files/registry/images.sh" | + grep -v "ansible/files/registry/registry-run.sh" | + grep -v "ansible/files/registry/upload_image.sh" | + grep -v "ansible/files/registry/upload_image.sh" | + grep -v "bin/accept-invitation.sh" | + grep -v "bin/bootstrap/init.sh" | + grep -v "bin/demo-setup.sh" | + grep -v "bin/generate-image-list.sh" | + grep -v "bin/offline-cluster.sh" | + grep -v "bin/offline-deploy.sh" | + grep -v "bin/offline-env.sh" | + grep -v "bin/offline-secrets.sh" | + grep -v "bin/prod-init.sh" | + grep -v "bin/prod-setup.sh" | + grep -v "bin/secrets.sh" | + grep -v "bin/test-aws-s3-auth-v4.sh" | + grep -v "examples/team-provisioning-qr-codes/generate-user-pdf.sh" | + grep -v "nix/scripts/create-container-dump.sh" | + grep -v "nix/scripts/list-helm-containers.sh" | + grep -v "offline/cd.sh" +) + +shellcheck -x "${SHELL_FILES_TO_LINT[@]}" diff --git a/bin/sync.sh b/bin/sync.sh deleted file mode 100755 index 0aec03bbc..000000000 --- a/bin/sync.sh +++ /dev/null @@ -1,133 +0,0 @@ -#!/usr/bin/env bash - -# Synchronize helm charts in git with the hosted version on S3. -# The contents of /charts are thus made available under -# https://s3-eu-west-1.amazonaws.com/public.wire.com/charts -# To use the charts: -# helm repo add wire https://s3-eu-west-1.amazonaws.com/public.wire.com/charts -# helm search wire - -# This script uses the helm s3 plugin, -# for more info see https://github.com/hypnoglow/helm-s3 - -set -eo pipefail - -USAGE="Sync helm charts to S3. Usage: $0 to sync all charts or $0 to sync only a single one. --force-push can be used to override S3 artifacts. --reindex can be used to force a complete reindexing in case the index is malformed." - -branch=$(git rev-parse --abbrev-ref HEAD) -if [ $branch == "master" ]; then - PUBLIC_DIR="charts" - REPO_NAME="wire" -elif [ $branch == "develop" ]; then - PUBLIC_DIR="charts-develop" - REPO_NAME="wire-develop" -else - echo "You are not on master or develop. Synchronizing charts on a custom branch will push them to the charts-custom helm repository in order not to interfere with versioning on master/develop." - read -p "Are you sure you want to push to charts-custom? [yN] " -n 1 -r - echo - if [[ ! $REPLY =~ ^[Yy]$ ]] - then - exit 1 - fi - PUBLIC_DIR="charts-custom" - REPO_NAME="wire-custom" -fi - -SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" -TOP_LEVEL_DIR=$SCRIPT_DIR/.. -CHART_DIR=$TOP_LEVEL_DIR/charts -cd "$TOP_LEVEL_DIR" - -chart_dir=$1 - -# If ./sync.sh is run with a parameter, only synchronize one chart -if [ -n "$chart_dir" ] && [ -d "$chart_dir" ]; then - chart_name=$(basename $chart_dir) - echo "only syncing $chart_name" - charts=( "$chart_name" ) -else - charts=( $(find $CHART_DIR/ -maxdepth 1 -type d | sed -n "s=$CHART_DIR/\(.\+\)=\1 =p") ) -fi - -# install s3 plugin -# See https://github.com/hypnoglow/helm-s3/pull/56 for reason to use fork -s3_plugin_version=$(helm plugin list | grep "^s3 " | awk '{print $2}' || true) -if [[ $s3_plugin_version != "0.9.0" ]]; then - echo "not version 0.9.0 from steven-sheehy fork, upgrading or installing plugin..." - helm plugin remove s3 || true - helm plugin install https://github.com/steven-sheehy/helm-s3.git --version v0.9.0 -else - # double check we have the right version of the s3 plugin - plugin_sha=$(cat $HOME/.helm/plugins/helm-s3.git/.git/HEAD) - if [[ $plugin_sha != "f7ab4a8818f11380807da45a6c738faf98106d62" ]]; then - echo "git hash doesn't match forked s3-plugin version (or maybe there is a path issue and your plugins are installed elsewhere? Attempting to re-install..." - helm plugin remove s3 - helm plugin install https://github.com/steven-sheehy/helm-s3.git --version v0.9.0 - fi -fi - -# index/sync charts to S3 -export AWS_REGION=eu-west-1 - -# PUBLIC_DIR is set to 'charts' for master or 'charts-develop' for develop above. -S3_URL="s3://public.wire.com/$PUBLIC_DIR" -PUBLIC_URL="https://s3-eu-west-1.amazonaws.com/public.wire.com/$PUBLIC_DIR" - -# initialize index file only if file doesn't yet exist -if ! aws s3api head-object --bucket public.wire.com --key "$PUBLIC_DIR/index.yaml" &> /dev/null ; then - echo "initializing fresh index.yaml" - helm s3 init "$S3_URL" --publish "$PUBLIC_URL" -fi - -helm repo add "$PUBLIC_DIR" "$S3_URL" -helm repo add "$REPO_NAME" "$PUBLIC_URL" - -rm ./*.tgz &> /dev/null || true # clean any packaged files, if any -for chart in "${charts[@]}"; do - echo "Syncing chart $chart..." - "$SCRIPT_DIR/update.sh" "$CHART_DIR/$chart" - helm package "$CHART_DIR/${chart}" && sync - tgz=$(ls "${chart}"-*.tgz) - echo "syncing ${tgz}..." - # Push the artifact only if it doesn't already exist - if ! aws s3api head-object --bucket public.wire.com --key "$PUBLIC_DIR/${tgz}" &> /dev/null ; then - helm s3 push "$tgz" "$PUBLIC_DIR" - printf "\n--> pushed %s to S3\n\n" "$tgz" - else - if [[ $1 == *--force-push* || $2 == *--force-push* || $3 == *--force-push* ]]; then - helm s3 push "$tgz" "$PUBLIC_DIR" --force - printf "\n--> (!) force pushed %s to S3\n\n" "$tgz" - else - printf "\n--> %s not changed or not version bumped; doing nothing.\n\n" "$chart" - fi - fi - rm "$tgz" - -done - -if [[ $1 == *--reindex* || $2 == *--reindex* || $3 == *--reindex* ]]; then - printf "\n--> (!) Reindexing, this can take a few minutes...\n\n" - helm s3 reindex "$PUBLIC_DIR" --publish "$PUBLIC_URL" - # update local cache with newly pushed charts - helm repo update - # see all results - helm search "$REPO_NAME/" -l -else - # update local cache with newly pushed charts - helm repo update - printf "\n--> Not reindexing by default. Pass the --reindex flag in case the index.yaml is incomplete. See all wire charts using \n helm search $REPO_NAME/ -l\n\n" -fi - - -# TODO: improve the above script by exiting with an error if helm charts have changed but a version was not bumped. -# TODO: hash comparison won't work directly: helm package ... results in new md5 hashes each time, even if files don't change. This is due to files being ordered differently in the tar file. See -# * https://github.com/helm/helm/issues/3264 -# * https://github.com/helm/helm/issues/3612 -# cur_hash=($(md5sum ${tgz})) -# echo $cur_hash -# remote_hash=$(aws s3api head-object --bucket public.wire.com --key charts/${tgz} | jq '.ETag' -r| tr -d '"') -# echo $remote_hash -# if [ "$cur_hash" != "$remote_hash" ]; then -# echo "ERROR: Current hash should be the same as the remote hash. Please bump the version of chart {$chart}." -# exit 1 -# fi diff --git a/bin/test-aws-s3-auth-v4.sh b/bin/test-aws-s3-auth-v4.sh new file mode 100755 index 000000000..d736a4594 --- /dev/null +++ b/bin/test-aws-s3-auth-v4.sh @@ -0,0 +1,77 @@ +#!/bin/bash + +############################################# +# AWS S3 Authentication V4 Test Bash Script # +############################################# + +########### +# Purpose: +# to test s3 access from a node. + +######### +# Usage: +# file="" ./aws_s3.sh + +######### +# Options: +# file, bucket, gateway, body, region, and resource can be passed in as environment variables. + +file="${file=public/deeplink.json}" +bucket="${bucket=exxon-dev-assets-cargohold-vc9ayndi}" +gateway="${gateway=s3-eu-central-1.amazonaws.com}" +body="${body=''}" +region="${region=eu-central-1}" + +resource="${resource=${file}}" + +s3Key="$1" + +echo "enter the S3 secret coresponding to key ${s3Key}:" +read s3Secret + +amzDateValue="`date -u +'%Y%m%dT%H%M%SZ' | tr -d $'\n'`" +amzDateSubValue="`echo $amzDateValue | sed 's/T.*//' | tr -d $'\n'`" + +# values to verify results against: https://czak.pl/2015/09/15/s3-rest-api-with-curl.html +#bucket=my-precious-bucket +#gateway=s3.amazonaws.com +#region="us-east-1" +#resource="" +#s3Secret="wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY" +#s3Key="AKIAIOSFODNN7EXAMPLE" +#amzDateValue="20150915T124500Z" +#amzDateSubValue="20150915" +#body="" + +bodySHA256="`echo -en $body | sha256sum | sed 's/ .*//' | tr -d $'\n'`" +canonicalRequest="GET +/$resource + +host:${bucket}.${gateway} +x-amz-content-sha256:${bodySHA256} +x-amz-date:${amzDateValue} + +host;x-amz-content-sha256;x-amz-date +${bodySHA256}" + +canonicalRequestSHA256=`echo -en "$canonicalRequest" | openssl dgst -sha256 | sed 's/^.* //'` + +stringToSign="AWS4-HMAC-SHA256 +${amzDateValue} +${amzDateSubValue}/${region}/s3/aws4_request +${canonicalRequestSHA256}" + +dateKey=`echo -en "${amzDateSubValue}" | openssl dgst -sha256 -mac HMAC -macopt "key:AWS4${s3Secret}" | sed 's/^.* //'` +dateRegionKey=`echo -en "${region}" | openssl dgst -sha256 -mac HMAC -macopt "hexkey:${dateKey}" | sed 's/^.* //'` +dateRegionServiceKey=`echo -en "s3" | openssl dgst -sha256 -mac HMAC -macopt "hexkey:${dateRegionKey}" | sed 's/^.* //'` +signingKey=`echo -en "aws4_request" | openssl dgst -sha256 -mac HMAC -macopt "hexkey:${dateRegionServiceKey}" | sed 's/^.* //'` + +signature=`/bin/echo -en "${stringToSign}" | openssl dgst -sha256 -mac HMAC -macopt "hexkey:${signingKey}" | sed 's/^.* //'` + +curl -v https://${bucket}.${gateway}/${resource} \ + -H "Authorization: AWS4-HMAC-SHA256 \ + Credential=${s3Key}/${amzDateSubValue}/${region}/s3/aws4_request, \ + SignedHeaders=host;x-amz-content-sha256;x-amz-date, \ + Signature=${signature}" \ + -H "x-amz-content-sha256: ${bodySHA256}" \ + -H "x-amz-date: ${amzDateValue}" diff --git a/bin/update.sh b/bin/update.sh deleted file mode 100755 index 7758258fd..000000000 --- a/bin/update.sh +++ /dev/null @@ -1,32 +0,0 @@ -#!/usr/bin/env bash - -set -e - -USAGE="download and bundle dependent helm charts: $0 " -dir=${1:?$USAGE} - - -# nothing serves on localhost, remove that repo -helm repo remove local 2&> /dev/null || true - -# hacky workaround for helm's lack of recursive dependency update -# See https://github.com/helm/helm/issues/2247 -helmDepUp () { - local path - path=$1 - cd "$path" - # remove previous bundled versions of helm charts, if any - find . -name "*\.tgz" -delete - if [ -f requirements.yaml ]; then - echo "Updating dependencies in $path ..." - # very hacky bash, I'm sorry - for subpath in $(grep "file://" requirements.yaml | awk '{ print $2 }' | xargs -n 1 | cut -c 8-) - do - ( helmDepUp "$subpath" ) - done - helm dep up - echo "... updating in $path done." - fi -} - -helmDepUp "$dir" diff --git a/charts/account-pages/.helmignore b/charts/account-pages/.helmignore deleted file mode 100644 index f0c131944..000000000 --- a/charts/account-pages/.helmignore +++ /dev/null @@ -1,21 +0,0 @@ -# Patterns to ignore when building packages. -# This supports shell glob matching, relative path matching, and -# negation (prefixed with !). Only one pattern per line. -.DS_Store -# Common VCS dirs -.git/ -.gitignore -.bzr/ -.bzrignore -.hg/ -.hgignore -.svn/ -# Common backup files -*.swp -*.bak -*.tmp -*~ -# Various IDEs -.project -.idea/ -*.tmproj diff --git a/charts/account-pages/Chart.yaml b/charts/account-pages/Chart.yaml deleted file mode 100644 index b648fcba6..000000000 --- a/charts/account-pages/Chart.yaml +++ /dev/null @@ -1,4 +0,0 @@ -apiVersion: v1 -description: A Helm chart for the Wire account pages in Kubernetes -name: account-pages -version: 0.94.0 diff --git a/charts/account-pages/README.md b/charts/account-pages/README.md deleted file mode 100644 index f2c38b5f6..000000000 --- a/charts/account-pages/README.md +++ /dev/null @@ -1 +0,0 @@ -Basic web application that provides a frontend with functionality for account activation and password reset diff --git a/charts/account-pages/templates/_helpers.tpl b/charts/account-pages/templates/_helpers.tpl deleted file mode 100644 index 2fa76e2f3..000000000 --- a/charts/account-pages/templates/_helpers.tpl +++ /dev/null @@ -1,16 +0,0 @@ -{{/* vim: set filetype=mustache: */}} -{{/* -Expand the name of the chart. -*/}} -{{- define "account-pages.name" -}} -{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} -{{- end -}} - -{{/* -Create a default fully qualified app name. -We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). -*/}} -{{- define "account-pages.fullname" -}} -{{- $name := default .Chart.Name .Values.nameOverride -}} -{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} -{{- end -}} diff --git a/charts/account-pages/templates/deployment.yaml b/charts/account-pages/templates/deployment.yaml deleted file mode 100644 index f9fd55868..000000000 --- a/charts/account-pages/templates/deployment.yaml +++ /dev/null @@ -1,58 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: account-pages - labels: - wireService: account-pages - chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} - release: {{ .Release.Name }} - heritage: {{ .Release.Service }} -spec: - replicas: {{ .Values.replicaCount }} - strategy: - type: RollingUpdate - rollingUpdate: - maxUnavailable: 0 - maxSurge: {{ .Values.replicaCount | mul 2 }} - selector: - matchLabels: - wireService: account-pages - app: account-pages - template: - metadata: - labels: - wireService: account-pages - app: account-pages - release: {{ .Release.Name }} - spec: - containers: - - name: account-pages - image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" - env: - - name: BACKEND_REST - value: https://{{ .Values.config.externalUrls.backendRest }} - - name: APP_BASE - value: https://{{ .Values.config.externalUrls.appHost }} - {{- range $key, $val := .Values.envVars }} - - name: {{ $key }} - value: {{ $val | quote }} - {{- end }} - ports: - - name: http - containerPort: {{ .Values.service.http.internalPort }} - readinessProbe: - httpGet: - path: /_health/ - port: {{ .Values.service.http.internalPort }} - scheme: HTTP - livenessProbe: - initialDelaySeconds: 30 - timeoutSeconds: 3 - httpGet: - path: /_health/ - port: {{ .Values.service.http.internalPort }} - scheme: HTTP - resources: -{{ toYaml .Values.resources | indent 12 }} - dnsPolicy: ClusterFirst - restartPolicy: Always diff --git a/charts/account-pages/values.yaml b/charts/account-pages/values.yaml deleted file mode 100644 index b8c9e5abf..000000000 --- a/charts/account-pages/values.yaml +++ /dev/null @@ -1,49 +0,0 @@ -# Default values for the account-pages. -replicaCount: 1 -resources: - requests: - memory: "128Mi" - cpu: "100m" - limits: - memory: "512Mi" - cpu: "1" -image: - repository: quay.io/wire/account - tag: 2124-2.0.2-df000b-v0.24.26-production -service: - https: - externalPort: 443 - http: - internalPort: 8080 - -## The following has to be provided to deploy this chart - -#config: -# externalUrls: -# backendRest: nginz-https.example.com -# backendWebsocket: nginz-ssl.example.com -# appHost: account.example.com -# -# Some relevant environment options, have a look at -# https://github.com/wireapp/wire-account/wiki/Self-hosting -# NOTE: Without an empty dictionary, you will see warnings -# when overriding envVars -envVars: {} -# E.g. -# envVars: -# FEATURE_ENABLE_DEBUG: "true" -# You are likely to need at least following CSP headers -# due to the fact that you are likely to do cross sub-domain requests -# i.e., from account.example.com to nginz-https.example.com -# CSP_EXTRA_CONNECT_SRC: "https://*.example.com, wss://*.example.com" -# CSP_EXTRA_IMG_SRC: "https://*.example.com" -# CSP_EXTRA_SCRIPT_SRC: "https://*.example.com" -# CSP_EXTRA_DEFAULT_SRC: "https://*.example.com" -# CSP_EXTRA_FONT_SRC: "https://*.example.com" -# CSP_EXTRA_FRAME_SRC: "https://*.example.com" -# CSP_EXTRA_MANIFEST_SRC: "https://*.example.com" -# CSP_EXTRA_OBJECT_SRC: "https://*.example.com" -# CSP_EXTRA_MEDIA_SRC: "https://*.example.com" -# CSP_EXTRA_PREFETCH_SRC: "https://*.example.com" -# CSP_EXTRA_STYLE_SRC: "https://*.example.com" -# CSP_EXTRA_WORKER_SRC: "https://*.example.com" diff --git a/charts/aws-ingress/.helmignore b/charts/aws-ingress/.helmignore deleted file mode 100644 index f0c131944..000000000 --- a/charts/aws-ingress/.helmignore +++ /dev/null @@ -1,21 +0,0 @@ -# Patterns to ignore when building packages. -# This supports shell glob matching, relative path matching, and -# negation (prefixed with !). Only one pattern per line. -.DS_Store -# Common VCS dirs -.git/ -.gitignore -.bzr/ -.bzrignore -.hg/ -.hgignore -.svn/ -# Common backup files -*.swp -*.bak -*.tmp -*~ -# Various IDEs -.project -.idea/ -*.tmproj diff --git a/charts/aws-ingress/Chart.yaml b/charts/aws-ingress/Chart.yaml deleted file mode 100644 index 09f313b33..000000000 --- a/charts/aws-ingress/Chart.yaml +++ /dev/null @@ -1,4 +0,0 @@ -apiVersion: v1 -description: A Helm chart for ingresses (AWS specific) on Kubernetes -name: aws-ingress -version: 0.94.0 diff --git a/charts/aws-ingress/templates/ELB_account_pages_https.yaml b/charts/aws-ingress/templates/ELB_account_pages_https.yaml deleted file mode 100644 index 02ef36050..000000000 --- a/charts/aws-ingress/templates/ELB_account_pages_https.yaml +++ /dev/null @@ -1,24 +0,0 @@ -{{- if .Values.ingress.accountPages.enabled }} -kind: Service -apiVersion: v1 -metadata: - name: account-pages-elb-https - annotations: - # annotations are documented under https://kubernetes.io/docs/concepts/services-networking/service/ - service.beta.kubernetes.io/aws-load-balancer-ssl-ports: "{{ .Values.ingress.accountPages.https.externalPort }}" - service.beta.kubernetes.io/aws-load-balancer-backend-protocol: "http" - service.beta.kubernetes.io/aws-load-balancer-ssl-cert: "{{ .Values.ingress.accountPages.https.sslCert }}" - service.beta.kubernetes.io/aws-load-balancer-ssl-negotiation-policy: "{{ .Values.ingress.accountPages.https.sslPolicy }}" - external-dns.alpha.kubernetes.io/hostname: "{{ .Values.ingress.accountPages.https.hostname }}" - external-dns.alpha.kubernetes.io/ttl: "{{ .Values.ingress.accountPages.https.ttl }}" -spec: - type: LoadBalancer - selector: - wireService: account-pages - ports: - - name: https - protocol: TCP - port: {{ .Values.ingress.accountPages.https.externalPort }} - # NOTE: This value should match team settings http listening port - targetPort: {{ .Values.ingress.accountPages.http.accountPagesPort }} -{{- end }} diff --git a/charts/aws-ingress/templates/ELB_nginz_https.yaml b/charts/aws-ingress/templates/ELB_nginz_https.yaml deleted file mode 100644 index 64a0798fd..000000000 --- a/charts/aws-ingress/templates/ELB_nginz_https.yaml +++ /dev/null @@ -1,22 +0,0 @@ -kind: Service -apiVersion: v1 -metadata: - name: nginz-elb-https - annotations: - # annotations are documented under https://kubernetes.io/docs/concepts/services-networking/service/ - service.beta.kubernetes.io/aws-load-balancer-ssl-ports: "{{ .Values.ingress.nginz.https.externalPort }}" - service.beta.kubernetes.io/aws-load-balancer-backend-protocol: "http" - service.beta.kubernetes.io/aws-load-balancer-ssl-cert: "{{ .Values.ingress.nginz.https.sslCert }}" - service.beta.kubernetes.io/aws-load-balancer-ssl-negotiation-policy: "{{ .Values.ingress.nginz.https.sslPolicy }}" - external-dns.alpha.kubernetes.io/hostname: "{{ .Values.ingress.nginz.https.hostname }}" - external-dns.alpha.kubernetes.io/ttl: "{{ .Values.ingress.nginz.https.ttl }}" -spec: - type: LoadBalancer - selector: - wireService: nginz - ports: - - name: https - protocol: TCP - port: {{ .Values.ingress.nginz.https.externalPort }} - # NOTE: This value should match nginz http listening port on the nginz or wire-server server chart - targetPort: {{ .Values.ingress.nginz.http.httpPort }} diff --git a/charts/aws-ingress/templates/ELB_nginz_wss.yaml b/charts/aws-ingress/templates/ELB_nginz_wss.yaml deleted file mode 100644 index 10e6afb52..000000000 --- a/charts/aws-ingress/templates/ELB_nginz_wss.yaml +++ /dev/null @@ -1,23 +0,0 @@ -kind: Service -apiVersion: v1 -metadata: - name: nginz-elb-wss - annotations: - # annotations are documented under https://kubernetes.io/docs/concepts/services-networking/service/ - service.beta.kubernetes.io/aws-load-balancer-proxy-protocol: "*" - service.beta.kubernetes.io/aws-load-balancer-ssl-ports: "{{ .Values.ingress.nginz.wss.externalPort }}" - service.beta.kubernetes.io/aws-load-balancer-backend-protocol: "tcp" - service.beta.kubernetes.io/aws-load-balancer-ssl-cert: "{{ .Values.ingress.nginz.wss.sslCert }}" - service.beta.kubernetes.io/aws-load-balancer-ssl-negotiation-policy: "{{ .Values.ingress.nginz.wss.sslPolicy }}" - external-dns.alpha.kubernetes.io/hostname: "{{ .Values.ingress.nginz.wss.hostname }}" - external-dns.alpha.kubernetes.io/ttl: "{{ .Values.ingress.nginz.wss.ttl }}" -spec: - type: LoadBalancer - selector: - wireService: nginz - ports: - - name: wss - protocol: TCP - port: {{ .Values.ingress.nginz.wss.externalPort }} - # NOTE: This value should match nginz ws listening port on the nginz or wire-server server chart - targetPort: {{ .Values.ingress.nginz.ws.wsPort }} diff --git a/charts/aws-ingress/templates/ELB_s3minio_https.yaml b/charts/aws-ingress/templates/ELB_s3minio_https.yaml deleted file mode 100644 index 6b96001f0..000000000 --- a/charts/aws-ingress/templates/ELB_s3minio_https.yaml +++ /dev/null @@ -1,26 +0,0 @@ -{{- with .Values.ingress.s3minio }} -{{- if .enabled }} -kind: Service -apiVersion: v1 -metadata: - name: s3-elb-https - annotations: - # annotations are documented under https://kubernetes.io/docs/concepts/services-networking/service/ - service.beta.kubernetes.io/aws-load-balancer-ssl-ports: "{{ .https.externalPort }}" - service.beta.kubernetes.io/aws-load-balancer-backend-protocol: "http" - service.beta.kubernetes.io/aws-load-balancer-ssl-cert: "{{ .https.sslCert }}" - service.beta.kubernetes.io/aws-load-balancer-ssl-negotiation-policy: "{{ .https.sslPolicy }}" - external-dns.alpha.kubernetes.io/hostname: "{{ .https.hostname }}" - external-dns.alpha.kubernetes.io/ttl: "{{ .https.ttl }}" -spec: - type: LoadBalancer - selector: - {{ .selector.key }}: {{ .selector.value }} - ports: - - name: https - protocol: TCP - port: {{ .https.externalPort }} - # NOTE: This value should match s3 http listening port on the s3 service (minio) - targetPort: {{ .http.s3Port }} -{{- end }} -{{- end }} diff --git a/charts/aws-ingress/templates/ELB_team_settings_https.yaml b/charts/aws-ingress/templates/ELB_team_settings_https.yaml deleted file mode 100644 index 3476bad0f..000000000 --- a/charts/aws-ingress/templates/ELB_team_settings_https.yaml +++ /dev/null @@ -1,24 +0,0 @@ -{{- if .Values.ingress.teamSettings.enabled }} -kind: Service -apiVersion: v1 -metadata: - name: team-settings-elb-https - annotations: - # annotations are documented under https://kubernetes.io/docs/concepts/services-networking/service/ - service.beta.kubernetes.io/aws-load-balancer-ssl-ports: "{{ .Values.ingress.teamSettings.https.externalPort }}" - service.beta.kubernetes.io/aws-load-balancer-backend-protocol: "http" - service.beta.kubernetes.io/aws-load-balancer-ssl-cert: "{{ .Values.ingress.teamSettings.https.sslCert }}" - service.beta.kubernetes.io/aws-load-balancer-ssl-negotiation-policy: "{{ .Values.ingress.teamSettings.https.sslPolicy }}" - external-dns.alpha.kubernetes.io/hostname: "{{ .Values.ingress.teamSettings.https.hostname }}" - external-dns.alpha.kubernetes.io/ttl: "{{ .Values.ingress.teamSettings.https.ttl }}" -spec: - type: LoadBalancer - selector: - wireService: team-settings - ports: - - name: https - protocol: TCP - port: {{ .Values.ingress.teamSettings.https.externalPort }} - # NOTE: This value should match team settings http listening port - targetPort: {{ .Values.ingress.teamSettings.http.teamSettingsPort }} -{{- end }} diff --git a/charts/aws-ingress/templates/ELB_webapp_https.yaml b/charts/aws-ingress/templates/ELB_webapp_https.yaml deleted file mode 100644 index 7924e2a8a..000000000 --- a/charts/aws-ingress/templates/ELB_webapp_https.yaml +++ /dev/null @@ -1,22 +0,0 @@ -kind: Service -apiVersion: v1 -metadata: - name: webapp-elb-https - annotations: - # annotations are documented under https://kubernetes.io/docs/concepts/services-networking/service/ - service.beta.kubernetes.io/aws-load-balancer-ssl-ports: "{{ .Values.ingress.webapp.https.externalPort }}" - service.beta.kubernetes.io/aws-load-balancer-backend-protocol: "http" - service.beta.kubernetes.io/aws-load-balancer-ssl-cert: "{{ .Values.ingress.webapp.https.sslCert }}" - service.beta.kubernetes.io/aws-load-balancer-ssl-negotiation-policy: "{{ .Values.ingress.webapp.https.sslPolicy }}" - external-dns.alpha.kubernetes.io/hostname: "{{ .Values.ingress.webapp.https.hostname }}" - external-dns.alpha.kubernetes.io/ttl: "{{ .Values.ingress.webapp.https.ttl }}" -spec: - type: LoadBalancer - selector: - wireService: webapp - ports: - - name: https - protocol: TCP - port: {{ .Values.ingress.webapp.https.externalPort }} - # NOTE: This value should match webapp http listening port - targetPort: {{ .Values.ingress.webapp.http.webappPort }} diff --git a/charts/aws-ingress/templates/_helpers.tpl b/charts/aws-ingress/templates/_helpers.tpl deleted file mode 100644 index f7571afb1..000000000 --- a/charts/aws-ingress/templates/_helpers.tpl +++ /dev/null @@ -1,16 +0,0 @@ -{{/* vim: set filetype=mustache: */}} -{{/* -Expand the name of the chart. -*/}} -{{- define "aws-ingress.name" -}} -{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} -{{- end -}} - -{{/* -Create a default fully qualified app name. -We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). -*/}} -{{- define "aws-ingress.fullname" -}} -{{- $name := default .Chart.Name .Values.nameOverride -}} -{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} -{{- end -}} diff --git a/charts/aws-ingress/values.yaml b/charts/aws-ingress/values.yaml deleted file mode 100644 index 3373fc6cc..000000000 --- a/charts/aws-ingress/values.yaml +++ /dev/null @@ -1,65 +0,0 @@ -# The following ports must match the ports used in wire-server -# -# The sslCert strings must be replaced by real values, and the -# corresponding certificates uploaded, see -# https://aws.amazon.com/premiumsupport/knowledge-center/import-ssl-certificate-to-iam/ -# - -ingress: - webapp: - https: - externalPort: 443 - sslCert: arn:aws:iam::00000-accountnumber-00000:server-certificate/example.com - sslPolicy: ELBSecurityPolicy-TLS-1-2-2017-01 - hostname: webapp.example.com - ttl: 300 - http: - webappPort: 8080 - nginz: - https: - externalPort: 443 - sslCert: arn:aws:iam::00000-accountnumber-00000:server-certificate/example.com - sslPolicy: ELBSecurityPolicy-TLS-1-2-2017-01 - hostname: nginz-https.example.com - ttl: 300 - http: - httpPort: 8080 - wss: - externalPort: 443 - sslCert: arn:aws:iam::00000-accountnumber-00000:server-certificate/example.com - sslPolicy: ELBSecurityPolicy-TLS-1-2-2017-01 - hostname: nginz-ssl.example.com - ttl: 300 - ws: - wsPort: 8081 - s3minio: - enabled: false # set to true if you wish to use minio on AWS instead of using real S3 - https: - externalPort: 443 - sslCert: arn:aws:iam::00000-accountnumber-00000:server-certificate/example.com - sslPolicy: ELBSecurityPolicy-TLS-1-2-2017-01 - hostname: assets.example.com - ttl: 300 - http: - s3Port: 9000 - selector: - key: app - value: minio # (currently) fake-aws-s3 chart uses 'minio', minio-external chart uses 'minio-external' - teamSettings: - https: - externalPort: 443 - sslCert: arn:aws:iam::00000-accountnumber-00000:server-certificate/example.com - sslPolicy: ELBSecurityPolicy-TLS-1-2-2017-01 - hostname: teams.example.com - ttl: 300 - http: - teamSettingsPort: 8080 - accountPages: - https: - externalPort: 443 - sslCert: arn:aws:iam::00000-accountnumber-00000:server-certificate/example.com - sslPolicy: ELBSecurityPolicy-TLS-1-2-2017-01 - hostname: account.example.com - ttl: 300 - http: - accountPagesPort: 8080 diff --git a/charts/aws-storage/Chart.yaml b/charts/aws-storage/Chart.yaml deleted file mode 100644 index ed6881d90..000000000 --- a/charts/aws-storage/Chart.yaml +++ /dev/null @@ -1,4 +0,0 @@ -apiVersion: v1 -description: AWS storage classes -name: aws-storage -version: 0.94.0 diff --git a/charts/aws-storage/templates/storage_classes.yaml b/charts/aws-storage/templates/storage_classes.yaml deleted file mode 100644 index 5eeca4c1d..000000000 --- a/charts/aws-storage/templates/storage_classes.yaml +++ /dev/null @@ -1,18 +0,0 @@ ---- -kind: StorageClass -apiVersion: storage.k8s.io/v1 -metadata: - name: aws-ebs -provisioner: kubernetes.io/aws-ebs -parameters: - type: gp2 - zones: eu-central-1a, eu-central-1b, eu-central-1c ---- -kind: StorageClass -apiVersion: storage.k8s.io/v1 -metadata: - name: aws-ebs-retained -provisioner: kubernetes.io/aws-ebs -parameters: - type: gp2 -reclaimPolicy: Retain diff --git a/charts/aws-storage/values.yaml b/charts/aws-storage/values.yaml deleted file mode 100644 index 3a0c58a8e..000000000 --- a/charts/aws-storage/values.yaml +++ /dev/null @@ -1 +0,0 @@ -# Default values for aws-storage. diff --git a/charts/backoffice/Chart.yaml b/charts/backoffice/Chart.yaml deleted file mode 100644 index 5bd7a1fa6..000000000 --- a/charts/backoffice/Chart.yaml +++ /dev/null @@ -1,4 +0,0 @@ -apiVersion: v1 -description: Backoffice tool -name: backoffice -version: 0.94.0 diff --git a/charts/backoffice/README.md b/charts/backoffice/README.md deleted file mode 100644 index b91a36112..000000000 --- a/charts/backoffice/README.md +++ /dev/null @@ -1,19 +0,0 @@ -Backoffice frontend -=================== - -This chart provides a basic frontend app that is composed of nginx serving swagger and will soon be found here [here](https://github.com/wireapp/wire-server/blob/develop/tools/backoffice-frontend/README.md). It serves as a tool to perform operations on users and teams such as visualising their user profiles, suspending or even deleting accounts. It is used internally at Wire to provide customer support the means to respond to certain queries from our customers and can be used by anyone that decides to deploy it on their cluster(s). - -It is intended to be accessed, at the moment, only by means of port forwarding and therefore only available to cluster admins (or more generally, clusters users able to port forward). - -:warning: **DO NOT expose this chart to the public internet** with an ingress - doing so would give anyone the ability to read part of the user database, delete users, etc. - -Once the chart is installed, and given default values, you can access the frontend with 2 steps: - - * kubectl port-forward svc/backoffice 8080:8080 - * Open your local browser at http://localhost:8080 - -If you don't directly access your cluster from your machine, you can do the following (note the backoffice requires port 8080 to be used, but that port is already used by the API server of kubernetes, so use another port like 9999 as intermediate step): - -* in a terminal from a kubernetes-master node: `kubectl port-forward svc/backoffice 9999:8080` -* from another terminal on your machine: `ssh -L 8080:localhost:9999 -N` -* Access your local browser on http://localhost:8080 diff --git a/charts/backoffice/templates/NOTES.txt b/charts/backoffice/templates/NOTES.txt deleted file mode 100644 index d8052096f..000000000 --- a/charts/backoffice/templates/NOTES.txt +++ /dev/null @@ -1,6 +0,0 @@ -You have successfully deployed the backoffice tool! Given the default config, you can now access the frontend in 2 steps: - - * kubectl --namespace {{ .Release.Namespace }} port-forward svc/backoffice 8080:8080 - * Open your local browser at http://localhost:8080 - -Warning: **DO NOT expose this chart to the public internet** with an ingress without a means of authentication - doing so would give anyone the ability to read part of the user database, delete users, etc. diff --git a/charts/backoffice/templates/configmap.yaml b/charts/backoffice/templates/configmap.yaml deleted file mode 100644 index cfff8fbcc..000000000 --- a/charts/backoffice/templates/configmap.yaml +++ /dev/null @@ -1,173 +0,0 @@ -apiVersion: v1 -kind: ConfigMap -metadata: - name: backoffice -data: - stern.yaml: | - logNetStrings: True # log using netstrings encoding: - # http://cr.yp.to/proto/netstrings.txt - logLevel: {{ .Values.config.logLevel }} - stern: - host: 0.0.0.0 - port: 8081 - # Cannot listen on the same port as the frontend - brig: - host: brig - port: 8080 - galley: - host: galley - port: 8080 - gundeck: - host: gundeck - port: 8080 - # Both ibis and galeb should be made optional for - # installations where these services are not available - galeb: - host: galeb - port: 8080 - ibis: - host: ibis - port: 8080 - nginx.conf: | - worker_processes 1; - worker_rlimit_nofile 1024; - pid /tmp/nginx.pid; - - events { - worker_connections 1024; - multi_accept off; - } - - http { - # - # Sockets - # - - sendfile on; - tcp_nopush on; - tcp_nodelay on; - - # - # Timeouts - # - - client_body_timeout 60s; - client_header_timeout 60s; - keepalive_timeout 30s; - send_timeout 60s; - - # - # Mapping for websocket connections - # - - map $http_upgrade $connection_upgrade { - websocket upgrade; - default ''; - } - - # - # Body - # - - client_max_body_size 16M; - - # - # Headers - # - - ignore_invalid_headers off; - - server_tokens off; - server_names_hash_bucket_size 64; - server_name_in_redirect off; - types_hash_max_size 2048; - - large_client_header_buffers 4 8k; - - # - # MIME - # - - include /etc/nginx/mime.types; - default_type application/octet-stream; - - # - # Logging - # - - access_log /dev/stdout; - error_log stderr; - - # - # Gzip - # - - gzip on; - gzip_disable msie6; - gzip_vary on; - gzip_proxied any; - gzip_comp_level 6; - gzip_buffers 16 8k; - gzip_http_version 1.1; - gzip_min_length 1024; - gzip_types text/plain text/css application/json application/x-javascript text/xml application/xml application/xml+rss text/javascript; - - # - # SSL - # - - add_header Strict-Transport-Security max-age=31536000; - - map $scheme $server_https { - default off; - https on; - } - - ssl_session_cache builtin:1000 shared:SSL:10m; - ssl_session_timeout 5m; - ssl_prefer_server_ciphers on; - ssl_protocols TLSv1 TLSv1.1 TLSv1.2; - ssl_ciphers 'EECDH+ECDSA+AESGCM EECDH+aRSA+AESGCM EECDH+ECDSA+SHA384 EECDH+ECDSA+SHA256 EECDH+aRSA+SHA384 EECDH+aRSA+SHA256 EECDH+aRSA+RC4 EECDH EDH+aRSA RC4 !aNULL !eNULL !LOW !3DES !MD5 !EXP !PSK !SRP !DSS'; - - server { - listen {{ .Values.service.internalPort }}; - - # Backoffice code at /var/www - location / { - # NOTE: nginx's root is defined at compile time! This means that these roots - # depend on the values at the time of compilation for nginx, namely --conf-path - # and --prefix. If you don't use _full_ paths as root, they get resolved depending - # those prefixes... they really need to fix this! So we just assume that these - # paths can be created on any filesystem... - root /var/www/swagger-ui; - index index.html; - } - - # resources.json is needed by the backoffice app - location /api-docs { - # This asssumes the default location for the backoffice! - root /var/www/swagger-ui; - index resources.json; - } - - # The liveness/healthiness depends on stern - location /i/status { - proxy_pass http://localhost:8081; - proxy_http_version 1.1; - } - - rewrite ^/api-docs/stern /stern/api-docs?base_url={{ .Values.baseUrl }}/api break; - - # This path is used by swagger to fetch the docs from the service - location /stern { - proxy_pass http://localhost:8081; - proxy_http_version 1.1; - } - - # All others requests get proxied to stern, without the api prefix (which was added in the base_url above) - location ~ ^/api/(.*)$ { - proxy_pass http://localhost:8081/$1$is_args$query_string; - proxy_http_version 1.1; - } - } - } diff --git a/charts/backoffice/templates/deployment.yaml b/charts/backoffice/templates/deployment.yaml deleted file mode 100644 index ce5fe2880..000000000 --- a/charts/backoffice/templates/deployment.yaml +++ /dev/null @@ -1,62 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: backoffice - labels: - wireService: backoffice - chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} - release: {{ .Release.Name }} - heritage: {{ .Release.Service }} -spec: - replicas: {{ .Values.replicaCount }} - strategy: - type: RollingUpdate - rollingUpdate: - maxUnavailable: 0 - maxSurge: {{ .Values.replicaCount }} - selector: - matchLabels: - wireService: backoffice - template: - metadata: - labels: - wireService: backoffice - release: {{ .Release.Name }} - annotations: - # An annotation of the configmap checksum ensures changes to the configmap cause a redeployment upon `helm upgrade` - checksum/configmap: {{ include (print .Template.BasePath "/configmap.yaml") . | sha256sum }} - spec: - volumes: - - name: "backoffice-config" - configMap: - name: "backoffice" - containers: - - name: stern - image: "{{ .Values.images.stern.repository }}:{{ .Values.images.stern.tag }}" - imagePullPolicy: {{ default "" .Values.images.stern.pullPolicy | quote }} - volumeMounts: - - name: "backoffice-config" - mountPath: /etc/wire/stern/conf/stern.yaml - subPath: stern.yaml - - name: backoffice-frontend - image: "{{ .Values.images.frontend.repository }}:{{ .Values.images.frontend.tag }}" - imagePullPolicy: {{ default "" .Values.images.frontend.pullPolicy | quote }} - volumeMounts: - - name: "backoffice-config" - # We don't want to override existing files under /etc/nginx except for nginx.conf - mountPath: "/etc/nginx/nginx.conf" - subPath: nginx.conf - ports: - - containerPort: {{ .Values.service.internalPort }} - livenessProbe: - httpGet: - scheme: HTTP - path: /i/status - port: {{ .Values.service.internalPort }} - readinessProbe: - httpGet: - scheme: HTTP - path: /i/status - port: {{ .Values.service.internalPort }} - resources: -{{ toYaml .Values.resources | indent 12 }} diff --git a/charts/backoffice/templates/helpers.tpl b/charts/backoffice/templates/helpers.tpl deleted file mode 100644 index 3737976f8..000000000 --- a/charts/backoffice/templates/helpers.tpl +++ /dev/null @@ -1,8 +0,0 @@ -{{/* -override default fullname template to remove the .Release.Name from the definition in -https://github.com/kubernetes/charts/blob/master/stable/redis-ha/templates/_helpers.tpl -*/}} -{{- define "backoffice.fullname" -}} -{{- $name := default .Chart.Name .Values.nameOverride -}} -{{- printf "%s" $name | trunc 63 | trimSuffix "-" -}} -{{- end -}} diff --git a/charts/backoffice/templates/service.yaml b/charts/backoffice/templates/service.yaml deleted file mode 100644 index de343846e..000000000 --- a/charts/backoffice/templates/service.yaml +++ /dev/null @@ -1,18 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - name: backoffice - labels: - wireService: backoffice - chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} - release: {{ .Release.Name }} - heritage: {{ .Release.Service }} -spec: - type: ClusterIP - ports: - - name: http - port: {{ .Values.service.externalPort }} - targetPort: {{ .Values.service.internalPort }} - selector: - wireService: backoffice - release: {{ .Release.Name }} diff --git a/charts/backoffice/values.yaml b/charts/backoffice/values.yaml deleted file mode 100644 index 2869e5e4f..000000000 --- a/charts/backoffice/values.yaml +++ /dev/null @@ -1,23 +0,0 @@ -replicaCount: 1 -images: - frontend: - repository: quay.io/wire/backoffice-frontend - tag: 2.78.0 - pullPolicy: IfNotPresent - stern: - repository: quay.io/wire/stern - tag: 2.78.0 - pullPolicy: IfNotPresent -service: - internalPort: 8080 - externalPort: 8080 -resources: - requests: - memory: 128Mi - cpu: 125m - limits: - memory: 512Mi - cpu: 500m -config: - logLevel: Info -baseUrl: http://localhost:8080 \ No newline at end of file diff --git a/charts/brig/.helmignore b/charts/brig/.helmignore deleted file mode 100644 index f0c131944..000000000 --- a/charts/brig/.helmignore +++ /dev/null @@ -1,21 +0,0 @@ -# Patterns to ignore when building packages. -# This supports shell glob matching, relative path matching, and -# negation (prefixed with !). Only one pattern per line. -.DS_Store -# Common VCS dirs -.git/ -.gitignore -.bzr/ -.bzrignore -.hg/ -.hgignore -.svn/ -# Common backup files -*.swp -*.bak -*.tmp -*~ -# Various IDEs -.project -.idea/ -*.tmproj diff --git a/charts/brig/Chart.yaml b/charts/brig/Chart.yaml deleted file mode 100644 index 455755e78..000000000 --- a/charts/brig/Chart.yaml +++ /dev/null @@ -1,4 +0,0 @@ -apiVersion: v1 -description: Brig (part of Wire Server) - User management -name: brig -version: 0.94.0 diff --git a/charts/brig/README.md b/charts/brig/README.md deleted file mode 100644 index 506b0cfee..000000000 --- a/charts/brig/README.md +++ /dev/null @@ -1,6 +0,0 @@ -Note that brig depends on some provisioned storage, namely: - -- cassandra -- elasticsearch-directory - -These are dealt with independently from this chart. diff --git a/charts/brig/templates/conf/_turn-servers-v2.txt.tpl b/charts/brig/templates/conf/_turn-servers-v2.txt.tpl deleted file mode 100644 index 1474ffec3..000000000 --- a/charts/brig/templates/conf/_turn-servers-v2.txt.tpl +++ /dev/null @@ -1,4 +0,0 @@ -{{ define "turn-servers-v2.txt" }} -{{ range .Values.turnStatic.v2 }}{{ . }} -{{ end -}} -{{ end }} diff --git a/charts/brig/templates/conf/_turn-servers.txt.tpl b/charts/brig/templates/conf/_turn-servers.txt.tpl deleted file mode 100644 index c9fcb9404..000000000 --- a/charts/brig/templates/conf/_turn-servers.txt.tpl +++ /dev/null @@ -1,4 +0,0 @@ -{{ define "turn-servers.txt" }} -{{ range .Values.turnStatic.v1 }}{{ . }} -{{ end -}} -{{ end }} diff --git a/charts/brig/templates/configmap.yaml b/charts/brig/templates/configmap.yaml deleted file mode 100644 index fc2db48fa..000000000 --- a/charts/brig/templates/configmap.yaml +++ /dev/null @@ -1,188 +0,0 @@ -apiVersion: v1 -kind: ConfigMap -metadata: - name: "brig" - labels: - wireService: brig - chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} - release: {{ .Release.Name }} - heritage: {{ .Release.Service }} -data: - {{- with .Values.config }} - brig.yaml: | - logNetStrings: True # log using netstrings encoding: - # http://cr.yp.to/proto/netstrings.txt - logFormat: {{ .logFormat }} - logLevel: {{ .logLevel }} - - brig: - host: 0.0.0.0 - port: 8080 - - cassandra: - endpoint: - host: {{ .cassandra.host }} - port: 9042 - keyspace: brig - - elasticsearch: - url: http://{{ .elasticsearch.host }}:{{ .elasticsearch.port }} - index: {{ .elasticsearch.index }} - - cargohold: - host: cargohold - port: 8080 - - galley: - host: galley - port: 8080 - - gundeck: - host: gundeck - port: 8080 - - {{- with .aws }} - aws: - prekeyTable: {{ .prekeyTable }} - sqsEndpoint: {{ .sqsEndpoint | quote }} - dynamoDBEndpoint: {{ .dynamoDBEndpoint | quote }} - {{- end }} - - internalEvents: - queueType: sqs - queueName: {{ .aws.internalQueue }} - - emailSMS: - email: - {{- if .useSES }} - sesQueue: {{ .aws.sesQueue }} - sesEndpoint: {{ .aws.sesEndpoint | quote }} - {{- else }} - smtpEndpoint: - host: {{ .smtp.host }} - port: {{ .smtp.port }} - smtpConnType: {{ .smtp.connType }} - {{- if .smtp.username }} - smtpCredentials: - username: {{ .smtp.username }} - password: {{ .smtp.passwordFile }} - {{- end }} - {{- end }} - general: - templateDir: /usr/share/wire/templates - emailSender: {{ .emailSMS.general.emailSender }} - smsSender: {{ .emailSMS.general.smsSender }} - templateBranding: - {{- with .emailSMS.general.templateBranding }} - brand: {{ .brand }} - brandUrl: {{ .brandUrl }} - brandLabelUrl: {{ .brandLabelUrl }} - brandLogoUrl: {{ .brandLogoUrl }} - brandService: {{ .brandService }} - copyright: {{ .copyright }} - misuse: {{ .misuse }} - legal: {{ .legal }} - forgot: {{ .forgot }} - support: {{ .support }} - {{- end }} - - user: - {{- if .emailSMS.user }} - activationUrl: {{ .emailSMS.user.activationUrl }} - smsActivationUrl: {{ .emailSMS.user.smsActivationUrl }} - passwordResetUrl: {{ .emailSMS.user.passwordResetUrl }} - invitationUrl: {{ .emailSMS.user.invitationUrl }} - deletionUrl: {{ .emailSMS.user.deletionUrl }} - {{- else }} - activationUrl: {{ .externalUrls.nginz }}/activate?key=${key}&code=${code} - smsActivationUrl: {{ .externalUrls.nginz }}/v/${code} - passwordResetUrl: {{ .externalUrls.nginz }}/password-reset/${key}?code=${code} - invitationUrl: {{ .externalUrls.nginz }}/register?invitation_code=${code} - deletionUrl: {{ .externalUrls.nginz }}/users/delete?key=${key}&code=${code} - {{- end }} - - provider: - {{- if .emailSMS.provider }} - homeUrl: {{ .emailSMS.provider.homeUrl }} - providerActivationUrl: {{ .emailSMS.provider.providerActivationUrl }} - approvalUrl: {{ .emailSMS.provider.approvalUrl }} - approvalTo: {{ .emailSMS.provider.approvalTo }} - providerPwResetUrl: {{ .emailSMS.provider.providerPwResetUrl }} - {{- else }} - homeUrl: https://provider.localhost/ - providerActivationUrl: {{ .externalUrls.nginz }}/provider/activate?key=${key}&code=${code} - approvalUrl: {{ .externalUrls.nginz }}/provider/approve?key=${key}&code=${code} - approvalTo: success@simulator.amazonses.com - providerPwResetUrl: {{ .externalUrls.nginz }}/provider/password-reset?key=\${key}\&code=\${code} - {{- end }} - - team: - {{- if .externalUrls.teamSettings }} - tInvitationUrl: {{ .externalUrls.teamSettings }}/join/?team-code=${code} - {{- else }} - tInvitationUrl: {{ .externalUrls.nginz }}/register?team=${team}&team_code=${code} - {{- end }} - tActivationUrl: {{ .externalUrls.nginz }}/register?team=${team}&team_code=${code} - tCreatorWelcomeUrl: {{ .externalUrls.teamCreatorWelcome }} - tMemberWelcomeUrl: {{ .externalUrls.teamMemberWelcome }} - - zauth: - privateKeys: /etc/wire/brig/secrets/secretkey.txt - publicKeys: /etc/wire/brig/secrets/publickey.txt - {{- with .authSettings }} - authSettings: - keyIndex: {{ .keyIndex }} - userTokenTimeout: {{ .userTokenTimeout }} - sessionTokenTimeout: {{ .sessionTokenTimeout }} - accessTokenTimeout: {{ .accessTokenTimeout }} - providerTokenTimeout: {{ .providerTokenTimeout }} - legalHoldUserTokenTimeout: {{ .legalholdUserTokenTimeout }} - legalHoldAccessTokenTimeout: {{ .legalholdAccessTokenTimeout }} - {{- end }} - - turn: - servers: /etc/wire/brig/turn/turn-servers.txt - serversV2: /etc/wire/brig/turn/turn-servers-v2.txt - secret: /etc/wire/brig/secrets/turn-secret.txt - configTTL: 3600 # 1 hour - tokenTTL: 43200 # 12 hours - - {{- with .optSettings }} - optSettings: - setActivationTimeout: {{ .setActivationTimeout }} - setTeamInvitationTimeout: {{ .setTeamInvitationTimeout }} - setTwilio: /etc/wire/brig/secrets/twilio-credentials.yaml - setNexmo: /etc/wire/brig/secrets/nexmo-credentials.yaml - setUserMaxConnections: {{ .setUserMaxConnections }} - setCookieDomain: {{ .setCookieDomain }} - setCookieInsecure: {{ .setCookieInsecure }} - setUserCookieRenewAge: {{ .setUserCookieRenewAge }} - setUserCookieLimit: {{ .setUserCookieLimit }} - setUserCookieThrottle: - stdDev: {{ .setUserCookieThrottle.stdDev }} - retryAfter: {{ .setUserCookieThrottle.retryAfter }} - {{- if .setLimitFailedLogins }} - setLimitFailedLogins: - timeout: {{ .setLimitFailedLogins.timeout }} # seconds. if you reach the limit, how long do you have to wait to try again. - retryLimit: {{ .setLimitFailedLogins.retryLimit }} # how many times can you have a failed login in that timeframe. - {{- end }} - {{- if .setSuspendInactiveUsers }} - setSuspendInactiveUsers: - suspendTimeout: {{ .setSuspendInactiveUsers.suspendTimeout }} - {{- end }} - setRichInfoLimit: {{ .setRichInfoLimit }} - setDefaultLocale: en - setMaxTeamSize: {{ .setMaxTeamSize }} - setMaxConvSize: {{ .setMaxConvSize }} - setEmailVisibility: {{ .setEmailVisibility }} - setPropertyMaxKeyLen: {{ .setPropertyMaxKeyLen }} - setPropertyMaxValueLen: {{ .setPropertyMaxValueLen }} - setDeleteThrottleMillis: {{ .setDeleteThrottleMillis }} - {{- if .setSearchSameTeamOnly }} - setSearchSameTeamOnly: {{ .setSearchSameTeamOnly }} - {{- end }} - {{- if .setUserMaxPermClients }} - setUserMaxPermClients: {{ .setUserMaxPermClients }} - {{- end }} - {{- end }} - {{- end }} diff --git a/charts/brig/templates/deployment.yaml b/charts/brig/templates/deployment.yaml deleted file mode 100644 index c01004b46..000000000 --- a/charts/brig/templates/deployment.yaml +++ /dev/null @@ -1,102 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: brig - labels: - wireService: brig - chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} - release: {{ .Release.Name }} - heritage: {{ .Release.Service }} -spec: - replicas: {{ .Values.replicaCount }} - strategy: - type: RollingUpdate - rollingUpdate: - maxUnavailable: 0 - maxSurge: {{ .Values.replicaCount }} - selector: - matchLabels: - wireService: brig - template: - metadata: - labels: - wireService: brig - release: {{ .Release.Name }} - annotations: - # An annotation of the configmap checksum ensures changes to the configmap cause a redeployment upon `helm upgrade` - checksum/configmap: {{ include (print .Template.BasePath "/configmap.yaml") . | sha256sum }} - checksum/turnconfigmap: {{ include (print .Template.BasePath "/turnconfigmap.yaml") . | sha256sum }} - checksum/secret: {{ include (print .Template.BasePath "/secret.yaml") . | sha256sum }} - fluentbit.io/parser: json - spec: - volumes: - - name: "brig-config" - configMap: - name: "brig" - - name: "turn-servers" - configMap: - name: "turn" - - name: "brig-secrets" - secret: - secretName: "brig" - containers: - - name: brig - image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" - imagePullPolicy: {{ default "" .Values.imagePullPolicy | quote }} - volumeMounts: - - name: "brig-secrets" - mountPath: "/etc/wire/brig/secrets" - - name: "brig-config" - mountPath: "/etc/wire/brig/conf" - - name: "turn-servers" - mountPath: "/etc/wire/brig/turn" - env: - - name: LOG_LEVEL - value: {{ .Values.config.logLevel }} - - name: AWS_ACCESS_KEY_ID - valueFrom: - secretKeyRef: - name: brig - key: awsKeyId - - name: AWS_SECRET_ACCESS_KEY - valueFrom: - secretKeyRef: - name: brig - key: awsSecretKey - # TODO: Is this the best way to do this? - - name: AWS_REGION - value: "{{ .Values.config.aws.region }}" - {{- with .Values.config.proxy }} - {{- if .httpProxy }} - - name: http_proxy - value: {{ .httpProxy | quote }} - - name: HTTP_PROXY - value: {{ .httpProxy | quote }} - {{- end }} - {{- if .httpsProxy }} - - name: https_proxy - value: {{ .httpsProxy | quote }} - - name: HTTPS_PROXY - value: {{ .httpsProxy | quote }} - {{- end }} - {{- if .noProxyList }} - - name: no_proxy - value: {{ join "," .noProxyList | quote }} - - name: NO_PROXY - value: {{ join "," .noProxyList | quote }} - {{- end }} - {{- end }} - ports: - - containerPort: {{ .Values.service.internalPort }} - livenessProbe: - httpGet: - scheme: HTTP - path: /i/status - port: {{ .Values.service.internalPort }} - readinessProbe: - httpGet: - scheme: HTTP - path: /i/status - port: {{ .Values.service.internalPort }} - resources: -{{ toYaml .Values.resources | indent 12 }} diff --git a/charts/brig/templates/secret.yaml b/charts/brig/templates/secret.yaml deleted file mode 100644 index 46003f763..000000000 --- a/charts/brig/templates/secret.yaml +++ /dev/null @@ -1,26 +0,0 @@ -apiVersion: v1 -kind: Secret -metadata: - name: brig - labels: - wireService: brig - chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} - release: "{{ .Release.Name }}" - heritage: "{{ .Release.Service }}" -type: Opaque -data: - {{/* for_helm_linting is necessary only since the 'with' block below does not throw an error upon an empty .Values.secrets */}} - for_helm_linting: {{ required "No .secrets found in configuration. Did you forget to helm -f path/to/secrets.yaml ?" .Values.secrets | quote | b64enc | quote }} - - {{- with .Values.secrets }} - secretkey.txt: {{ .zAuth.privateKeys | b64enc | quote }} - publickey.txt: {{ .zAuth.publicKeys | b64enc | quote }} - turn-secret.txt: {{ .turn.secret | b64enc | quote }} - awsKeyId: {{ .awsKeyId | b64enc | quote }} - awsSecretKey: {{ .awsSecretKey | b64enc | quote }} - twilio-credentials.yaml: {{ .setTwilio | b64enc | quote }} - nexmo-credentials.yaml: {{ .setNexmo | b64enc | quote }} - {{- if (not $.Values.config.useSES) }} - smtp-password.txt: {{ .smtpPassword | b64enc | quote }} - {{- end }} - {{- end }} diff --git a/charts/brig/templates/service.yaml b/charts/brig/templates/service.yaml deleted file mode 100644 index 9a12b07ba..000000000 --- a/charts/brig/templates/service.yaml +++ /dev/null @@ -1,18 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - name: brig - labels: - wireService: brig - chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} - release: {{ .Release.Name }} - heritage: {{ .Release.Service }} -spec: - type: ClusterIP - ports: - - name: http - port: {{ .Values.service.externalPort }} - targetPort: {{ .Values.service.internalPort }} - selector: - wireService: brig - release: {{ .Release.Name }} diff --git a/charts/brig/templates/tests/brig-integration.yaml b/charts/brig/templates/tests/brig-integration.yaml deleted file mode 100644 index 6b0cc63ef..000000000 --- a/charts/brig/templates/tests/brig-integration.yaml +++ /dev/null @@ -1,82 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - name: "brig-integration" - labels: - wireService: brig-integration - chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} - release: {{ .Release.Name }} - heritage: {{ .Release.Service }} -spec: - type: ClusterIP - ports: - - port: 9000 - targetPort: 9000 - selector: - wireService: brig-integration - release: {{ .Release.Name }} ---- -apiVersion: v1 -kind: Pod -metadata: - name: "{{ .Release.Name }}-brig-integration" - annotations: - "helm.sh/hook": test-success - labels: - wireService: brig-integration - release: {{ .Release.Name }} -spec: - volumes: - - name: "brig-integration" - configMap: - name: "brig-integration" - # Needed in order to read some values from the brig service - - name: "brig-config" - configMap: - name: "brig" - - name: "brig-secrets" - secret: - secretName: "brig" - - name: "turn-servers" - configMap: - name: "turn" - - name: "brig-integration-secrets" - configMap: - name: "brig-integration-secrets" - containers: - - name: integration - image: "{{ .Values.image.repository }}-integration:{{ .Values.image.tag }}" - # TODO: Add TURN tests once we have an actual way to test it - # The brig-integration tests mutate the turn settings files before tests - # to get certain behaviour. This doesn't work on kubernetes because brig - # is a different pod than brig-integration and they can't both mouht the - # same file-system. - # The other test, "user.auth.cookies.limit", is skipped as it is flaky. - # This is tracked in https://github.com/zinfra/backend-issues/issues/1150. - command: ["brig-integration", "--pattern", "!/turn/ && !/user.auth.cookies.limit/"] - volumeMounts: - - name: "brig-integration" - mountPath: "/etc/wire/integration" - - name: "brig-config" - mountPath: "/etc/wire/brig/conf" - - name: "brig-secrets" - mountPath: "/etc/wire/brig/secrets" - - name: "turn-servers" - mountPath: "/etc/wire/brig/turn" - - name: "brig-integration-secrets" - # TODO: Maybe we should put integration yaml also under - # `/integration/conf` by default? Note that currently - # brig-integration cannot read config files from - # non-default locations - # (see corresp. TODO in galley.) - mountPath: "/etc/wire/integration-secrets" - - env: - # these dummy values are necessary for Amazonka's "Discover" - - name: AWS_ACCESS_KEY_ID - value: "dummy" - - name: AWS_SECRET_ACCESS_KEY - value: "dummy" - - name: AWS_REGION - value: "eu-west-1" - restartPolicy: Never diff --git a/charts/brig/templates/tests/configmap.yaml b/charts/brig/templates/tests/configmap.yaml deleted file mode 100644 index 4c72812f4..000000000 --- a/charts/brig/templates/tests/configmap.yaml +++ /dev/null @@ -1,36 +0,0 @@ -apiVersion: v1 -kind: ConfigMap -metadata: - name: "brig-integration" -data: - integration.yaml: | - brig: - # Full URL is set so that there can be a common cookiedomain between nginz and brig - # needed by some integration tests - host: brig.{{ .Release.Namespace }}.svc.cluster.local - port: {{ .Values.service.internalPort }} - - cannon: - host: cannon - port: 8080 - - galley: - host: galley - port: 8080 - - cargohold: - host: cargohold - port: 8080 - - nginz: - # Full URL is set so that there can be a common cookiedomain between nginz and brig - # needed by some integration tests - host: nginz-integration-http.{{ .Release.Namespace }}.svc.cluster.local - port: 8080 - - provider: - privateKey: /etc/wire/integration-secrets/provider-privatekey.pem - publicKey: /etc/wire/integration-secrets/provider-publickey.pem - cert: /etc/wire/integration-secrets/provider-publiccert.pem - botHost: https://brig-integration - botPort: 9000 diff --git a/charts/brig/templates/tests/nginz-service.yaml b/charts/brig/templates/tests/nginz-service.yaml deleted file mode 100644 index 598ff296d..000000000 --- a/charts/brig/templates/tests/nginz-service.yaml +++ /dev/null @@ -1,14 +0,0 @@ ---- -# this service is needed for brig integration tests and allows brig to talk directly to nginz over http -# (this is not how you should normally configure nginz - use an ingress instead) -apiVersion: v1 -kind: Service -metadata: - name: nginz-integration-http -spec: - type: ClusterIP - ports: - - port: 8080 - targetPort: 8080 - selector: - wireService: nginz diff --git a/charts/brig/templates/tests/secret.yaml b/charts/brig/templates/tests/secret.yaml deleted file mode 100644 index bfe877caf..000000000 --- a/charts/brig/templates/tests/secret.yaml +++ /dev/null @@ -1,66 +0,0 @@ -apiVersion: v1 -kind: ConfigMap -metadata: - name: brig-integration-secrets -data: - # These "secrets" are only used in tests and are therefore safe to be stored unencrypted - provider-privatekey.pem: | - -----BEGIN RSA PRIVATE KEY----- - MIIEpAIBAAKCAQEAu+Kg/PHHU3atXrUbKnw0G06FliXcNt3lMwl2os5twEDcPPFw - /feGiAKymxp+7JqZDrseS5D9THGrW+OQRIPHWvUBdiLfGrZqJO223DB6D8K2Su/o - dmnjZJ2z23rhXoEArTplu+Dg9K+c2LVeXTKVVPOaOzgtAB21XKRiQ4ermqgi3/nj - r03rXyq/qNkuNd6tNcg+HAfGxfGvvCSYBfiSbUKr/BeArYRcjzr/h5m1In6fG/if - 9GEI6m8dxHT9JbY53wiksowy6ajCuqskIFg87X883H+LA/d6X5CTiPv1VMxXdBUi - GPuC9IT/6CNQ1/LFt0P37ax58+LGYlaFo7lanQIDAQABAoIBAQC0doVy7zgpLsBv - Sz0AnbPe1pjxEwRlntRbJSfSULySALqJvs5s4adSVGUBHX3z/LousAP1SRpCppuU - 8wrLBFgjQVlaAzyQB84EEl+lNtrG8Jrvd2es9R/4sJDkqy50+yuPN5wnzWPFIjhg - 3jP5CHDu29y0LMzsY5yjkzDe9B0bueXEZVU+guRjhpwHHKOFeAr9J9bugFUwgeAr - jF0TztzFAb0fsUNPiQAho1J5PyjSVgItaPfAPv/p30ROG+rz+Rd5NSSvBC5F+yOo - azb84zzwCg/knAfIz7SOMRrmBh2qhGZFZ8gXdq65UaYv+cpT/qo28mpAT2vOkyeD - aPZp0ysBAoGBAOQROoDipe/5BTHBcXYuUE1qa4RIj3wgql5I8igXr4K6ppYBmaOg - DL2rrnqD86chv0P4l/XOomKFwYhVGXtqRkeYnk6mQXwNVkgqcGbY5PSNyMg5+ekq - jSOOPHGzzTWKzYuUDUpB/Lf6jbTv8fq2GYW3ZYiqQ/xiugOvglZrTE7NAoGBANLl - irjByfxAWGhzCrDx0x5MBpsetadI9wUA8u1BDdymsRg73FDn3z7NipVUAMDXMGVj - lqbCRlHESO2yP4GaPEA4FM+MbTZSuhAYV+SY07mEPLHF64/nJas83Zp91r5rhaqJ - L9rWCl3KJ5OUnr3YizCnHIW72FxjwtpjxHJLupsRAoGAGIbhy8qUHeKh9F/hW9xP - NoQjW+6Rv7+jktA1eqpRbbW1BJzXcQldVWiJMxPNuEOg1iZ98SlvvTi1P3wnaWZc - eIapP7wRfs3QYaJuxCC/Pq2g0ieqALFazGAXkALOJtvujvw1Ea9XBlIjuzmyxEuh - Iwg+Gxx0g0f6yTquwax4YGECgYEAnpAK3qKFNO1ECzQDo8oNy0ep59MNDPtlDhQK - katJus5xdCD9oq7TQKrVOTTxZAvmzTQ1PqfuqueDVYOhD9Zg2n/P1cRlEGTek99Z - pfvppB/yak6+r3FA9yBKFS/r1zuMQg3nNweav62QV/tz5pT7AdeDMGFtaPlwtTYx - qyWY5aECgYBPySbPccNj+xxQzxcti2y/UXjC04RgOA/Hm1D0exa0vBqS9uxlOdG8 - F47rKenpBrslvdfTVsCDB1xyP2ebWVzp6EqMycw6OLPxgo3fBfZ4pi6P+rByh0Cc - Lhfh+ET0CPnKCxtop3lUrn4ZvqchS0j3J+M0pDuqoWF5hfKxFhkEIw== - -----END RSA PRIVATE KEY----- - provider-publickey.pem: | - -----BEGIN PUBLIC KEY----- - MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAu+Kg/PHHU3atXrUbKnw0 - G06FliXcNt3lMwl2os5twEDcPPFw/feGiAKymxp+7JqZDrseS5D9THGrW+OQRIPH - WvUBdiLfGrZqJO223DB6D8K2Su/odmnjZJ2z23rhXoEArTplu+Dg9K+c2LVeXTKV - VPOaOzgtAB21XKRiQ4ermqgi3/njr03rXyq/qNkuNd6tNcg+HAfGxfGvvCSYBfiS - bUKr/BeArYRcjzr/h5m1In6fG/if9GEI6m8dxHT9JbY53wiksowy6ajCuqskIFg8 - 7X883H+LA/d6X5CTiPv1VMxXdBUiGPuC9IT/6CNQ1/LFt0P37ax58+LGYlaFo7la - nQIDAQAB - -----END PUBLIC KEY----- - provider-publiccert.pem: | - -----BEGIN CERTIFICATE----- - MIIDdjCCAl4CCQCm0AiwERR/qjANBgkqhkiG9w0BAQsFADB9MQswCQYDVQQGEwJE - RTEPMA0GA1UECAwGQmVybGluMQ8wDQYDVQQHDAZCZXJsaW4xGDAWBgNVBAoMD1dp - cmUgU3dpc3MgR21iSDERMA8GA1UEAwwId2lyZS5jb20xHzAdBgkqhkiG9w0BCQEW - EGJhY2tlbmRAd2lyZS5jb20wHhcNMTYwODA0MTMxNDQyWhcNMzYwNzMwMTMxNDQy - WjB9MQswCQYDVQQGEwJERTEPMA0GA1UECAwGQmVybGluMQ8wDQYDVQQHDAZCZXJs - aW4xGDAWBgNVBAoMD1dpcmUgU3dpc3MgR21iSDERMA8GA1UEAwwId2lyZS5jb20x - HzAdBgkqhkiG9w0BCQEWEGJhY2tlbmRAd2lyZS5jb20wggEiMA0GCSqGSIb3DQEB - AQUAA4IBDwAwggEKAoIBAQC74qD88cdTdq1etRsqfDQbToWWJdw23eUzCXaizm3A - QNw88XD994aIArKbGn7smpkOux5LkP1Mcatb45BEg8da9QF2It8atmok7bbcMHoP - wrZK7+h2aeNknbPbeuFegQCtOmW74OD0r5zYtV5dMpVU85o7OC0AHbVcpGJDh6ua - qCLf+eOvTetfKr+o2S413q01yD4cB8bF8a+8JJgF+JJtQqv8F4CthFyPOv+HmbUi - fp8b+J/0YQjqbx3EdP0ltjnfCKSyjDLpqMK6qyQgWDztfzzcf4sD93pfkJOI+/VU - zFd0FSIY+4L0hP/oI1DX8sW3Q/ftrHnz4sZiVoWjuVqdAgMBAAEwDQYJKoZIhvcN - AQELBQADggEBAEuwlHElIGR56KVC1dJiw238mDGjMfQzSP76Wi4zWS6/zZwJUuog - BkC+vacfju8UAMvL+vdqkjOVUHor84/2wuq0qn91AjOITD7tRAZB+XLXxsikKv/v - OXE3A/lCiNi882NegPyXAfFPp/71CIiTQZps1eQkAvhD5t5WiFYPESxDlvEJrHFY - XP4+pp8fL8YPS7iZNIq+z+P8yVIw+B/Hs0ht7wFIYN0xACbU8m9+Rs08JMoT16c+ - hZMuK3BWD3fzkQVfW0yMwz6fWRXB483ZmekGkgndOTDoJQMdJXZxHpI3t2FcxQYj - T45GXxRd18neXtuYa/OoAw9UQFDN5XfXN0g= - -----END CERTIFICATE----- diff --git a/charts/brig/templates/turnconfigmap.yaml b/charts/brig/templates/turnconfigmap.yaml deleted file mode 100644 index c9972fcd8..000000000 --- a/charts/brig/templates/turnconfigmap.yaml +++ /dev/null @@ -1,14 +0,0 @@ -apiVersion: v1 -kind: ConfigMap -metadata: - name: "turn" - labels: - wireService: brig - chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} - release: {{ .Release.Name }} - heritage: {{ .Release.Service }} -data: - turn-servers.txt: |2 -{{- include "turn-servers.txt" . | indent 4 }} - turn-servers-v2.txt: |2 -{{- include "turn-servers-v2.txt" . | indent 4 }} diff --git a/charts/brig/values.yaml b/charts/brig/values.yaml deleted file mode 100644 index 4fdc16b62..000000000 --- a/charts/brig/values.yaml +++ /dev/null @@ -1,82 +0,0 @@ -replicaCount: 3 -image: - repository: quay.io/wire/brig - tag: 2.78.0 -service: - externalPort: 8080 - internalPort: 8080 -resources: - requests: - memory: "256Mi" - cpu: "100m" - limits: - memory: "512Mi" - cpu: "500m" -config: - logLevel: Info - logFormat: JSON - cassandra: - host: aws-cassandra - elasticsearch: - host: elasticsearch-client - port: 9200 - index: directory - aws: - region: "eu-west-1" - sesEndpoint: https://email.eu-west-1.amazonaws.com - sqsEndpoint: https://sqs.eu-west-1.amazonaws.com - dynamoDBEndpoint: https://dynamodb.eu-west-1.amazonaws.com - useSES: true - emailSMS: - general: - templateBranding: - brand: Wire - brandUrl: https://wire.com - brandLabel: wire.com - brandLabelUrl: https://wire.com - brandLogoUrl: https://wire.com/p/img/email/logo-email-black.png - brandService: Wire Service Provider - copyright: © WIRE SWISS GmbH - misuse: misuse@wire.com - legal: https://wire.com/legal/ - forgot: https://wire.com/forgot/ - support: https://support.wire.com/ - authSettings: - keyIndex: 1 - userTokenTimeout: 4838400 - sessionTokenTimeout: 86400 - accessTokenTimeout: 900 - providerTokenTimeout: 900 - legalholdUserTokenTimeout: 4838400 - legalholdAccessTokenTimeout: 900 - optSettings: - setActivationTimeout: 1209600 - setTeamInvitationTimeout: 1814400 - setUserMaxConnections: 1000 - setCookieInsecure: false - setUserCookieRenewAge: 1209600 - setUserCookieLimit: 32 - setUserCookieThrottle: - stdDev: 3000 - retryAfter: 86400 - setRichInfoLimit: 5000 - setDefaultLocale: en - setMaxTeamSize: 500 - setMaxConvSize: 500 - setEmailVisibility: visible_to_self - setPropertyMaxKeyLen: 1024 - setPropertyMaxValueLen: 524288 - setDeleteThrottleMillis: 100 - # Allow search within same team only. Default: false - # setSearchSameTeamOnly: false|true - # Set max number of user clients. Default: 7 - # setUserMaxPermClients: - smtp: - passwordFile: /etc/wire/brig/secrets/smtp-password.txt - proxy: {} -turnStatic: - v1: - - turn:localhost:3478 - v2: - - turn:localhost:3478 - - turn:localhost:3478?transport=tcp diff --git a/charts/calling-test/.helmignore b/charts/calling-test/.helmignore deleted file mode 100644 index 0e8a0eb36..000000000 --- a/charts/calling-test/.helmignore +++ /dev/null @@ -1,23 +0,0 @@ -# Patterns to ignore when building packages. -# This supports shell glob matching, relative path matching, and -# negation (prefixed with !). Only one pattern per line. -.DS_Store -# Common VCS dirs -.git/ -.gitignore -.bzr/ -.bzrignore -.hg/ -.hgignore -.svn/ -# Common backup files -*.swp -*.bak -*.tmp -*.orig -*~ -# Various IDEs -.project -.idea/ -*.tmproj -.vscode/ diff --git a/charts/calling-test/Chart.yaml b/charts/calling-test/Chart.yaml deleted file mode 100644 index 241f86f4f..000000000 --- a/charts/calling-test/Chart.yaml +++ /dev/null @@ -1,21 +0,0 @@ -apiVersion: v2 -name: calling-test -description: Network testing tool for audio/video/signalling. See https://github.com/wireapp/avs-nwtesttool for more details. - -# A chart can be either an 'application' or a 'library' chart. -# -# Application charts are a collection of templates that can be packaged into versioned archives -# to be deployed. -# -# Library charts provide useful utilities or functions for the chart developer. They're included as -# a dependency of application charts to inject those utilities and functions into the rendering -# pipeline. Library charts do not define any templates and therefore cannot be deployed. -type: application - -# This is the chart version. This version number should be incremented each time you make changes -# to the chart and its templates, including the app version. -version: 0.94.0 - -# This is the version number of the application being deployed. This version number should be -# incremented each time you make changes to the application. -appVersion: 1.0.14 diff --git a/charts/calling-test/templates/NOTES.txt b/charts/calling-test/templates/NOTES.txt deleted file mode 100644 index 06a208ae4..000000000 --- a/charts/calling-test/templates/NOTES.txt +++ /dev/null @@ -1,5 +0,0 @@ -1. Get the application URL by running these commands: - - export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "calling-test.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") - echo "Visit http://127.0.0.1:8080 to use your application" - kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:8080 diff --git a/charts/calling-test/templates/_helpers.tpl b/charts/calling-test/templates/_helpers.tpl deleted file mode 100644 index d6f2ba6b4..000000000 --- a/charts/calling-test/templates/_helpers.tpl +++ /dev/null @@ -1,52 +0,0 @@ -{{/* vim: set filetype=mustache: */}} -{{/* -Expand the name of the chart. -*/}} -{{- define "calling-test.name" -}} -{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} -{{- end -}} - -{{/* -Create a default fully qualified app name. -We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). -If release name contains chart name it will be used as a full name. -*/}} -{{- define "calling-test.fullname" -}} -{{- if .Values.fullnameOverride -}} -{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} -{{- else -}} -{{- $name := default .Chart.Name .Values.nameOverride -}} -{{- if contains $name .Release.Name -}} -{{- .Release.Name | trunc 63 | trimSuffix "-" -}} -{{- else -}} -{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} -{{- end -}} -{{- end -}} -{{- end -}} - -{{/* -Create chart name and version as used by the chart label. -*/}} -{{- define "calling-test.chart" -}} -{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} -{{- end -}} - -{{/* -Common labels -*/}} -{{- define "calling-test.labels" -}} -helm.sh/chart: {{ include "calling-test.chart" . }} -{{ include "calling-test.selectorLabels" . }} -{{- if .Chart.AppVersion }} -app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} -{{- end }} -app.kubernetes.io/managed-by: {{ .Release.Service }} -{{- end -}} - -{{/* -Selector labels -*/}} -{{- define "calling-test.selectorLabels" -}} -app.kubernetes.io/name: {{ include "calling-test.name" . }} -app.kubernetes.io/instance: {{ .Release.Name }} -{{- end -}} diff --git a/charts/calling-test/templates/deployment.yaml b/charts/calling-test/templates/deployment.yaml deleted file mode 100644 index a8acfa18b..000000000 --- a/charts/calling-test/templates/deployment.yaml +++ /dev/null @@ -1,39 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: {{ include "calling-test.fullname" . }} - labels: - {{- include "calling-test.labels" . | nindent 4 }} -spec: - replicas: {{ .Values.replicaCount }} - selector: - matchLabels: - {{- include "calling-test.selectorLabels" . | nindent 6 }} - template: - metadata: - labels: - {{- include "calling-test.selectorLabels" . | nindent 8 }} - spec: - containers: - - name: {{ .Chart.Name }} - image: "{{ .Values.image.repository }}:{{ .Chart.AppVersion }}" - imagePullPolicy: {{ .Values.image.pullPolicy }} - env: - {{- range $key, $val := .Values.envVars }} - - name: {{ $key }} - value: {{ $val | quote }} - {{- end }} - ports: - - name: http - containerPort: 8080 - protocol: TCP - livenessProbe: - httpGet: - path: / - port: http - readinessProbe: - httpGet: - path: / - port: http - resources: - {{- toYaml .Values.resources | nindent 12 }} diff --git a/charts/calling-test/templates/service.yaml b/charts/calling-test/templates/service.yaml deleted file mode 100644 index f414c9694..000000000 --- a/charts/calling-test/templates/service.yaml +++ /dev/null @@ -1,15 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - name: {{ include "calling-test.fullname" . }} - labels: - {{- include "calling-test.labels" . | nindent 4 }} -spec: - type: {{ .Values.service.type }} - ports: - - port: {{ .Values.service.port }} - targetPort: http - protocol: TCP - name: http - selector: - {{- include "calling-test.selectorLabels" . | nindent 4 }} diff --git a/charts/calling-test/values.yaml b/charts/calling-test/values.yaml deleted file mode 100644 index 14b693ef1..000000000 --- a/charts/calling-test/values.yaml +++ /dev/null @@ -1,27 +0,0 @@ -replicaCount: 1 -image: - # note: the docker image tag is configured as 'appVersion' in Chart.yaml - repository: quay.io/wire/avs-nwtesttool - pullPolicy: IfNotPresent - -envVars: - # note: this should be overridden in every deployment - BACKEND_HTTPS_URL: https://nginz-https.example.com - -# These name overrides are used also for routing. -# Wire-server's nginz subchart will route /calling-test to this chart -# If you change this name, that functionality will break. -nameOverride: "calling-test" -fullnameOverride: "calling-test" - -service: - type: ClusterIP - port: 8080 - -resources: - limits: - cpu: 100m - memory: 128Mi - requests: - cpu: 100m - memory: 128Mi diff --git a/charts/cannon/.helmignore b/charts/cannon/.helmignore deleted file mode 100644 index f0c131944..000000000 --- a/charts/cannon/.helmignore +++ /dev/null @@ -1,21 +0,0 @@ -# Patterns to ignore when building packages. -# This supports shell glob matching, relative path matching, and -# negation (prefixed with !). Only one pattern per line. -.DS_Store -# Common VCS dirs -.git/ -.gitignore -.bzr/ -.bzrignore -.hg/ -.hgignore -.svn/ -# Common backup files -*.swp -*.bak -*.tmp -*~ -# Various IDEs -.project -.idea/ -*.tmproj diff --git a/charts/cannon/Chart.yaml b/charts/cannon/Chart.yaml deleted file mode 100644 index c91c6fbab..000000000 --- a/charts/cannon/Chart.yaml +++ /dev/null @@ -1,4 +0,0 @@ -apiVersion: v1 -description: A Helm chart for cannon in Kubernetes -name: cannon -version: 0.94.0 diff --git a/charts/cannon/templates/_helpers.tpl b/charts/cannon/templates/_helpers.tpl deleted file mode 100644 index df276d9cf..000000000 --- a/charts/cannon/templates/_helpers.tpl +++ /dev/null @@ -1,16 +0,0 @@ -{{/* vim: set filetype=mustache: */}} -{{/* -Expand the name of the chart. -*/}} -{{- define "cannon.name" -}} -{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} -{{- end -}} - -{{/* -Create a default fully qualified app name. -We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). -*/}} -{{- define "cannon.fullname" -}} -{{- $name := default .Chart.Name .Values.nameOverride -}} -{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} -{{- end -}} diff --git a/charts/cannon/templates/configmap.yaml b/charts/cannon/templates/configmap.yaml deleted file mode 100644 index a7057e26b..000000000 --- a/charts/cannon/templates/configmap.yaml +++ /dev/null @@ -1,18 +0,0 @@ -apiVersion: v1 -data: - cannon.yaml: | - logNetStrings: True # log using netstrings encoding: - # http://cr.yp.to/proto/netstrings.txt - logLevel: {{ .Values.config.logLevel }} - - cannon: - host: 0.0.0.0 - port: {{ .Values.service.externalPort }} - externalHostFile: /etc/wire/cannon/externalHost/host.txt - - gundeck: - host: gundeck - port: 8080 -kind: ConfigMap -metadata: - name: cannon diff --git a/charts/cannon/templates/headless-service.yaml b/charts/cannon/templates/headless-service.yaml deleted file mode 100644 index e8b0e2b36..000000000 --- a/charts/cannon/templates/headless-service.yaml +++ /dev/null @@ -1,27 +0,0 @@ -# Note, this is a Headless service https://kubernetes.io/docs/concepts/services-networking/service/#headless-services -# We use it this way so we can handle routing requests to specific cannons directly rather than distributing requests -# between pods. -# -# Read more about this technique in the StatefulSet guide: -# https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/ -apiVersion: v1 -kind: Service -metadata: - name: {{ .Values.service.name }} - labels: - wireService: cannon - chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} - release: {{ .Release.Name }} - heritage: {{ .Release.Service }} -spec: - type: ClusterIP - # This is what makes it a Headless Service - clusterIP: None - ports: - - name: http - port: {{ .Values.service.externalPort }} - targetPort: {{ .Values.service.internalPort }} - protocol: TCP - selector: - wireService: cannon - release: {{ .Release.Name }} diff --git a/charts/cannon/templates/statefulset.yaml b/charts/cannon/templates/statefulset.yaml deleted file mode 100644 index b30a6078a..000000000 --- a/charts/cannon/templates/statefulset.yaml +++ /dev/null @@ -1,85 +0,0 @@ -# Spins up pods with stable names; e.g. cannon-0 ... cannon- -# Specific pods can be accessed within the cluster at cannon-.cannon. -# (the second 'cannon' is the name of the headless service) -# Note: In fact, cannon-.cannon can also be used to access the service but assuming -# that we can have multiple namespaces accessing the same redis cluster, appending `.` -# makes the service unambiguous -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: cannon - labels: - wireService: cannon - chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} - release: {{ .Release.Name }} - heritage: {{ .Release.Service }} -spec: - serviceName: {{ .Values.service.name }} - selector: - matchLabels: - wireService: cannon - replicas: {{ .Values.replicaCount }} - updateStrategy: - type: RollingUpdate - podManagementPolicy: Parallel - template: - metadata: - labels: - wireService: cannon - release: {{ .Release.Name }} - annotations: - checksum/configmap: {{ include (print .Template.BasePath "/configmap.yaml") . | sha256sum }} - spec: - terminationGracePeriodSeconds: {{ .Values.drainTimeout }} # should be higher than the sleep duration of preStop - containers: - - name: cannon - image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" - lifecycle: - preStop: - # kubernetes by default immediately sends a SIGTERM to the container, - # which would cause cannon to exit, breaking existing websocket connections. - # Instead we sleep for a day. (SIGTERM is still sent, but after the preStop completes) - exec: - command: ["sleep", {{ .Values.drainTimeout | quote }} ] - volumeMounts: - - name: empty - mountPath: /etc/wire/cannon/externalHost - - name: cannon-config - mountPath: /etc/wire/cannon/conf - ports: - - name: http - containerPort: {{ .Values.service.internalPort }} - readinessProbe: - httpGet: - path: /i/status - port: {{ .Values.service.internalPort }} - scheme: HTTP - livenessProbe: - initialDelaySeconds: 30 - timeoutSeconds: 1 - httpGet: - path: /i/status - port: {{ .Values.service.internalPort }} - scheme: HTTP - resources: -{{ toYaml .Values.resources | indent 12 }} - initContainers: - - name: cannon-configurator - image: alpine - command: - - /bin/sh - args: - - -c - # e.g. cannon-0.cannon.production - - echo "${HOSTNAME}.{{ .Values.service.name }}.{{ .Release.Namespace }}" > /etc/wire/cannon/externalHost/host.txt - volumeMounts: - - name: empty - mountPath: /etc/wire/cannon/externalHost - dnsPolicy: ClusterFirst - restartPolicy: Always - volumes: - - name: cannon-config - configMap: - name: cannon - - name: empty - emptyDir: {} diff --git a/charts/cannon/values.yaml b/charts/cannon/values.yaml deleted file mode 100644 index 5dcdc3fec..000000000 --- a/charts/cannon/values.yaml +++ /dev/null @@ -1,19 +0,0 @@ -replicaCount: 3 -image: - repository: quay.io/wire/cannon - tag: 2.78.0 - pullPolicy: IfNotPresent -config: - logLevel: Info -resources: - requests: - memory: "256Mi" - cpu: "100m" - limits: - memory: "512Mi" - cpu: "500m" -service: - name: cannon - internalPort: 8080 - externalPort: 8080 -drainTimeout: 0 diff --git a/charts/cargohold/.helmignore b/charts/cargohold/.helmignore deleted file mode 100644 index f0c131944..000000000 --- a/charts/cargohold/.helmignore +++ /dev/null @@ -1,21 +0,0 @@ -# Patterns to ignore when building packages. -# This supports shell glob matching, relative path matching, and -# negation (prefixed with !). Only one pattern per line. -.DS_Store -# Common VCS dirs -.git/ -.gitignore -.bzr/ -.bzrignore -.hg/ -.hgignore -.svn/ -# Common backup files -*.swp -*.bak -*.tmp -*~ -# Various IDEs -.project -.idea/ -*.tmproj diff --git a/charts/cargohold/Chart.yaml b/charts/cargohold/Chart.yaml deleted file mode 100644 index ac59b2d65..000000000 --- a/charts/cargohold/Chart.yaml +++ /dev/null @@ -1,4 +0,0 @@ -apiVersion: v1 -description: Cargohold (part of Wire Server) - Asset storage -name: cargohold -version: 0.94.0 diff --git a/charts/cargohold/templates/configmap.yaml b/charts/cargohold/templates/configmap.yaml deleted file mode 100644 index acda5ecfa..000000000 --- a/charts/cargohold/templates/configmap.yaml +++ /dev/null @@ -1,31 +0,0 @@ -apiVersion: v1 -kind: ConfigMap -metadata: - name: "cargohold" -data: - cargohold.yaml: | - logNetStrings: True # log using netstrings encoding: http://cr.yp.to/proto/netstrings.txt - logLevel: {{ .Values.config.logLevel }} - - cargohold: - host: 0.0.0.0 - port: {{ .Values.service.internalPort }} - - aws: - {{- with .Values.config.aws }} - s3Bucket: {{ .s3Bucket }} - s3Endpoint: {{ .s3Endpoint }} - {{- if .s3DownloadEndpoint }} - s3DownloadEndpoint: {{ .s3DownloadEndpoint }} - {{- end }} - {{ if .cloudFront }} - cloudFront: - domain: {{ .cloudFront.domain }} - keyPairId: {{ .cloudFront.keyPairId }} - privateKey: {{ .cloudFront.privateKeyPath }} - {{ end }} - {{- end }} - - settings: - maxTotalBytes: 5368709120 - downloadLinkTTL: 300 # Seconds diff --git a/charts/cargohold/templates/deployment.yaml b/charts/cargohold/templates/deployment.yaml deleted file mode 100644 index 020d40a32..000000000 --- a/charts/cargohold/templates/deployment.yaml +++ /dev/null @@ -1,90 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: cargohold - labels: - wireService: cargohold - chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} - release: {{ .Release.Name }} - heritage: {{ .Release.Service }} -spec: - replicas: {{ .Values.replicaCount }} - strategy: - type: RollingUpdate - rollingUpdate: - maxUnavailable: 0 - maxSurge: {{ .Values.replicaCount }} - selector: - matchLabels: - wireService: cargohold - template: - metadata: - labels: - wireService: cargohold - release: {{ .Release.Name }} - annotations: - # An annotation of the configmap checksum ensures changes to the configmap cause a redeployment upon `helm upgrade` - checksum/configmap: {{ include (print .Template.BasePath "/configmap.yaml") . | sha256sum }} - checksum/secret: {{ include (print .Template.BasePath "/secret.yaml") . | sha256sum }} - spec: - volumes: - - name: "cargohold-config" - configMap: - name: "cargohold" - - name: "cargohold-secrets" - secret: - secretName: "cargohold" - containers: - - name: cargohold - image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" - imagePullPolicy: {{ default "" .Values.imagePullPolicy | quote }} - volumeMounts: - - name: "cargohold-secrets" - mountPath: "/etc/wire/cargohold/secrets" - - name: "cargohold-config" - mountPath: "/etc/wire/cargohold/conf" - env: - - name: AWS_ACCESS_KEY_ID - valueFrom: - secretKeyRef: - name: cargohold - key: awsKeyId - - name: AWS_SECRET_ACCESS_KEY - valueFrom: - secretKeyRef: - name: cargohold - key: awsSecretKey - {{- with .Values.config.proxy }} - {{- if .httpProxy }} - - name: http_proxy - value: {{ .httpProxy | quote }} - - name: HTTP_PROXY - value: {{ .httpProxy | quote }} - {{- end }} - {{- if .httpsProxy }} - - name: https_proxy - value: {{ .httpsProxy | quote }} - - name: HTTPS_PROXY - value: {{ .httpsProxy | quote }} - {{- end }} - {{- if .noProxyList }} - - name: no_proxy - value: {{ join "," .noProxyList | quote }} - - name: NO_PROXY - value: {{ join "," .noProxyList | quote }} - {{- end }} - {{- end }} - ports: - - containerPort: {{ .Values.service.internalPort }} - livenessProbe: - httpGet: - scheme: HTTP - path: /i/status - port: {{ .Values.service.internalPort }} - readinessProbe: - httpGet: - scheme: HTTP - path: /i/status - port: {{ .Values.service.internalPort }} - resources: -{{ toYaml .Values.resources | indent 12 }} diff --git a/charts/cargohold/templates/secret.yaml b/charts/cargohold/templates/secret.yaml deleted file mode 100644 index b5d3c1335..000000000 --- a/charts/cargohold/templates/secret.yaml +++ /dev/null @@ -1,22 +0,0 @@ -apiVersion: v1 -kind: Secret -metadata: - name: cargohold - labels: - app: cargohold - chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" - release: "{{ .Release.Name }}" - heritage: "{{ .Release.Service }}" -type: Opaque -data: - {{/* for_helm_linting is necessary only since the 'with' block below does not throw an error upon an empty .Values.secrets */}} - for_helm_linting: {{ required "No .secrets found in configuration. Did you forget to helm -f path/to/secrets.yaml ?" .Values.secrets | quote | b64enc | quote }} - - {{- with .Values.secrets }} - {{ if .cloudFront }} - cf-pk.pem: {{ .cloudFront.cfPrivateKey | b64enc | quote }} - {{ end }} - - awsKeyId: {{ .awsKeyId | b64enc | quote }} - awsSecretKey: {{ .awsSecretKey | b64enc | quote }} - {{- end }} diff --git a/charts/cargohold/templates/service.yaml b/charts/cargohold/templates/service.yaml deleted file mode 100644 index 3621ea652..000000000 --- a/charts/cargohold/templates/service.yaml +++ /dev/null @@ -1,18 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - name: cargohold - labels: - wireService: cargohold - chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} - release: {{ .Release.Name }} - heritage: {{ .Release.Service }} -spec: - type: ClusterIP - ports: - - name: http - port: {{ .Values.service.externalPort }} - targetPort: {{ .Values.service.internalPort }} - selector: - wireService: cargohold - release: {{ .Release.Name }} diff --git a/charts/cargohold/templates/tests/cargohold-integration.yaml b/charts/cargohold/templates/tests/cargohold-integration.yaml deleted file mode 100644 index 00b95ed6c..000000000 --- a/charts/cargohold/templates/tests/cargohold-integration.yaml +++ /dev/null @@ -1,20 +0,0 @@ -apiVersion: v1 -kind: Pod -metadata: - name: "{{ .Release.Name }}-cargohold-integration" - annotations: - "helm.sh/hook": test-success -spec: - volumes: - - name: "cargohold-integration" - configMap: - name: "cargohold-integration" - containers: - # NOTE: the bucket for these tests must be created. - # If using the wire-server/fake-aws-s3 chart, `dummy-bucket` will already be created. - - name: integration - image: "{{ .Values.image.repository }}-integration:{{ .Values.image.tag }}" - volumeMounts: - - name: "cargohold-integration" - mountPath: "/etc/wire/integration" - restartPolicy: Never diff --git a/charts/cargohold/templates/tests/configmap.yaml b/charts/cargohold/templates/tests/configmap.yaml deleted file mode 100644 index bb0ff67c8..000000000 --- a/charts/cargohold/templates/tests/configmap.yaml +++ /dev/null @@ -1,9 +0,0 @@ -apiVersion: v1 -kind: ConfigMap -metadata: - name: "cargohold-integration" -data: - integration.yaml: | - cargohold: - host: cargohold - port: {{ .Values.service.internalPort }} diff --git a/charts/cargohold/values.yaml b/charts/cargohold/values.yaml deleted file mode 100644 index 18c91f403..000000000 --- a/charts/cargohold/values.yaml +++ /dev/null @@ -1,19 +0,0 @@ -replicaCount: 3 -image: - repository: quay.io/wire/cargohold - tag: 2.78.0 -service: - externalPort: 8080 - internalPort: 8080 -resources: - requests: - memory: "256Mi" - cpu: "100m" - limits: - memory: "512Mi" - cpu: "500m" -config: - logLevel: Info - aws: - s3Bucket: assets - proxy: {} \ No newline at end of file diff --git a/charts/cassandra-ephemeral/Chart.yaml b/charts/cassandra-ephemeral/Chart.yaml deleted file mode 100644 index 93ee74242..000000000 --- a/charts/cassandra-ephemeral/Chart.yaml +++ /dev/null @@ -1,4 +0,0 @@ -apiVersion: v1 -description: Wrapper chart for incubator/cassandra with custom settings to be used as non-persistent cassandra during tests -name: cassandra-ephemeral -version: 0.94.0 diff --git a/charts/cassandra-ephemeral/requirements.yaml b/charts/cassandra-ephemeral/requirements.yaml deleted file mode 100644 index 053c4eab1..000000000 --- a/charts/cassandra-ephemeral/requirements.yaml +++ /dev/null @@ -1,5 +0,0 @@ -dependencies: -- name: cassandra - version: 0.13.3 - repository: https://kubernetes-charts-incubator.storage.googleapis.com - alias: cassandra-ephemeral diff --git a/charts/cassandra-ephemeral/templates/helpers.tpl b/charts/cassandra-ephemeral/templates/helpers.tpl deleted file mode 100644 index f2fa13b75..000000000 --- a/charts/cassandra-ephemeral/templates/helpers.tpl +++ /dev/null @@ -1,8 +0,0 @@ -{{/* -override default fullname template to remove the .Release.Name from the definition in -https://github.com/kubernetes/charts/blob/master/stable/redis-ha/templates/_helpers.tpl -*/}} -{{- define "cassandra.fullname" -}} -{{- $name := default .Chart.Name .Values.nameOverride -}} -{{- printf "%s" $name | trunc 63 | trimSuffix "-" -}} -{{- end -}} \ No newline at end of file diff --git a/charts/cassandra-ephemeral/values.yaml b/charts/cassandra-ephemeral/values.yaml deleted file mode 100644 index c6cec4d21..000000000 --- a/charts/cassandra-ephemeral/values.yaml +++ /dev/null @@ -1,21 +0,0 @@ -# See defaults in https://github.com/kubernetes/charts/blob/master/incubator/cassandra/values.yaml -cassandra-ephemeral: - persistence: - enabled: false - resources: - requests: - memory: "2.0Gi" - cpu: "1" - limits: - memory: "4.0Gi" - cpu: "4" - ## Change cassandra configuration paramaters below: - ## ref: http://docs.datastax.com/en/cassandra/3.0/cassandra/configuration/configCassandra_yaml.html - ## Recommended max heap size is 1/2 of system memory - ## Recommeneed heap new size is 1/4 of max heap size - ## ref: http://docs.datastax.com/en/cassandra/3.0/cassandra/operations/opsTuneJVM.html - config: - cluster_size: 1 - seed_size: 1 - max_heap_size: 2048M - heap_new_size: 1024M diff --git a/charts/cassandra-external/Chart.yaml b/charts/cassandra-external/Chart.yaml deleted file mode 100644 index 9361d2a2a..000000000 --- a/charts/cassandra-external/Chart.yaml +++ /dev/null @@ -1,4 +0,0 @@ -apiVersion: v1 -description: Refer to cassandra IPs located outside kubernetes by specifying IPs manually -name: cassandra-external -version: 0.94.0 diff --git a/charts/cassandra-external/templates/endpoint.yaml b/charts/cassandra-external/templates/endpoint.yaml deleted file mode 100644 index 90d2c3648..000000000 --- a/charts/cassandra-external/templates/endpoint.yaml +++ /dev/null @@ -1,38 +0,0 @@ -# create a headless service (thus creating dns name "cassandra-external") -# and a custom endpoint (thus forwarding traffic when resolving DNS to custom IPs) -kind: Service -apiVersion: v1 -metadata: - name: {{ .Chart.Name }} - labels: - app: {{ .Chart.Name }} - chart: {{ template "cassandra-external.chart" . }} - release: {{ .Release.Name }} - heritage: {{ .Release.Service }} -spec: - type: ClusterIP - clusterIP: None # create a headless service, we want no extra load balancing for cassandra - ports: - - name: cql - port: {{ .Values.portCql }} - targetPort: {{ .Values.portCql }} ---- -kind: Endpoints -apiVersion: v1 -metadata: - name: {{ .Chart.Name }} - labels: - app: {{ .Chart.Name }} - chart: {{ template "cassandra-external.chart" . }} - release: {{ .Release.Name }} - heritage: {{ .Release.Service }} -subsets: - - addresses: - {{- range .Values.IPs }} - - ip: {{ . }} - {{- end }} - ports: - # port and name in the endpoint must match port and name in the service - # see also https://docs.openshift.com/enterprise/3.0/dev_guide/integrating_external_services.html - - name: cql - port: {{ .Values.portCql }} diff --git a/charts/cassandra-external/templates/helpers.tpl b/charts/cassandra-external/templates/helpers.tpl deleted file mode 100644 index 50a29c979..000000000 --- a/charts/cassandra-external/templates/helpers.tpl +++ /dev/null @@ -1,11 +0,0 @@ -{{- define "cassandra-external.fullname" -}} -{{- $name := default .Chart.Name .Values.nameOverride -}} -{{- printf "%s" $name | trunc 63 | trimSuffix "-" -}} -{{- end -}} - -{{/* -Create chart name and version as used by the chart label. -*/}} -{{- define "cassandra-external.chart" -}} -{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} -{{- end -}} diff --git a/charts/cassandra-external/values.yaml b/charts/cassandra-external/values.yaml deleted file mode 100644 index b25ea918b..000000000 --- a/charts/cassandra-external/values.yaml +++ /dev/null @@ -1,6 +0,0 @@ -portCql: 9042 - -## Configure this helm chart with: -# IPs: -# - 1.2.3.4 -# - 5.6.7.8 diff --git a/charts/cassandra-migrations/.helmignore b/charts/cassandra-migrations/.helmignore deleted file mode 100644 index f0c131944..000000000 --- a/charts/cassandra-migrations/.helmignore +++ /dev/null @@ -1,21 +0,0 @@ -# Patterns to ignore when building packages. -# This supports shell glob matching, relative path matching, and -# negation (prefixed with !). Only one pattern per line. -.DS_Store -# Common VCS dirs -.git/ -.gitignore -.bzr/ -.bzrignore -.hg/ -.hgignore -.svn/ -# Common backup files -*.swp -*.bak -*.tmp -*~ -# Various IDEs -.project -.idea/ -*.tmproj diff --git a/charts/cassandra-migrations/Chart.yaml b/charts/cassandra-migrations/Chart.yaml deleted file mode 100644 index 76fd4cf9f..000000000 --- a/charts/cassandra-migrations/Chart.yaml +++ /dev/null @@ -1,4 +0,0 @@ -apiVersion: v1 -description: cassandra database schema migration for gundeck,brig,galley,spar -name: cassandra-migrations -version: 0.94.0 diff --git a/charts/cassandra-migrations/templates/NOTES.txt b/charts/cassandra-migrations/templates/NOTES.txt deleted file mode 100644 index 1c1f3d18a..000000000 --- a/charts/cassandra-migrations/templates/NOTES.txt +++ /dev/null @@ -1,7 +0,0 @@ -Upon error, check the init container logs with e.g. - -MIGRATIONS=$(kubectl --namespace {{ .Release.Namespace }} get pods | grep cassandra-migrations- | awk '{print $1}') -kubectl --namespace {{ .Release.Namespace }} logs "$MIGRATIONS" -c gundeck-schema -f -kubectl --namespace {{ .Release.Namespace }} logs "$MIGRATIONS" -c brig-schema -f -kubectl --namespace {{ .Release.Namespace }} logs "$MIGRATIONS" -c galley-schema -f -kubectl --namespace {{ .Release.Namespace }} logs "$MIGRATIONS" -c spar-schema -f diff --git a/charts/cassandra-migrations/templates/job.yaml b/charts/cassandra-migrations/templates/job.yaml deleted file mode 100644 index 96d7b3d63..000000000 --- a/charts/cassandra-migrations/templates/job.yaml +++ /dev/null @@ -1,85 +0,0 @@ -apiVersion: batch/v1 -kind: Job -metadata: - name: cassandra-migrations - labels: - wireService: cassandra-migrations - chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} - release: {{ .Release.Name }} - heritage: {{ .Release.Service }} -{{- if .Values.hook.enabled }} - annotations: - # when hook.enabled=true (default), this chart does not work standalone, but is intended as - # either a 'post'-subchart to a cassandra chart or a 'pre'-subchart to wire-server - # For the meaning of hooks, see https://docs.helm.sh/developing_charts/#hooks - "helm.sh/hook": {{ .Values.hook.type }}-install,{{ .Values.hook.type }}-upgrade - "helm.sh/hook-weight": "0" - "helm.sh/hook-delete-policy": "before-hook-creation" -{{- end }} -spec: - template: - metadata: - labels: - wireService: cassandra-migrations - release: {{ .Release.Name }} - spec: - restartPolicy: OnFailure - # specifying cassandra-migrations as initContainers executes them sequentially, rather than in parallel - # to avoid 'Column family ID mismatch' / schema disagreements - # see https://stackoverflow.com/questions/29030661/creating-new-table-with-cqlsh-on-existing-keyspace-column-family-id-mismatch#40325651 for details. - initContainers: - - name: gundeck-schema - image: "{{ .Values.images.gundeck }}:{{ .Values.images.tag }}" - command: - - gundeck-schema - - --host - - "{{ .Values.cassandra.host }}" - - --port - - "9042" - - --keyspace - - gundeck - - --replication-factor - - "{{ .Values.cassandra.replicaCount }}" - - - name: brig-schema - image: "{{ .Values.images.brig }}:{{ .Values.images.tag }}" - command: - - brig-schema - - --host - - "{{ .Values.cassandra.host }}" - - --port - - "9042" - - --keyspace - - brig - - --replication-factor - - "{{ .Values.cassandra.replicaCount }}" - - - name: galley-schema - image: "{{ .Values.images.galley }}:{{ .Values.images.tag }}" - command: - - galley-schema - - --host - - "{{ .Values.cassandra.host }}" - - --port - - "9042" - - --keyspace - - galley - - --replication-factor - - "{{ .Values.cassandra.replicaCount }}" - - - name: spar-schema - image: "{{ .Values.images.spar }}:{{ .Values.images.tag }}" - command: - - spar-schema - - --host - - "{{ .Values.cassandra.host }}" - - --port - - "9042" - - --keyspace - - spar - - --replication-factor - - "{{ .Values.cassandra.replicaCount }}" - containers: - - name: job-done - image: busybox - command: ['sh', '-c', 'echo "gundeck, brig, galley, spar schema cassandra-migrations completed. See initContainers for details with e.g. kubectl logs ... -c gundeck-schema"'] diff --git a/charts/cassandra-migrations/values.yaml b/charts/cassandra-migrations/values.yaml deleted file mode 100644 index 515f43748..000000000 --- a/charts/cassandra-migrations/values.yaml +++ /dev/null @@ -1,9 +0,0 @@ -hook: - enabled: true - type: pre -images: - tag: 2.78.0 - gundeck: quay.io/wire/gundeck-schema - brig: quay.io/wire/brig-schema - galley: quay.io/wire/galley-schema - spar: quay.io/wire/spar-schema \ No newline at end of file diff --git a/charts/databases-ephemeral/.helmignore b/charts/databases-ephemeral/.helmignore deleted file mode 100644 index f0c131944..000000000 --- a/charts/databases-ephemeral/.helmignore +++ /dev/null @@ -1,21 +0,0 @@ -# Patterns to ignore when building packages. -# This supports shell glob matching, relative path matching, and -# negation (prefixed with !). Only one pattern per line. -.DS_Store -# Common VCS dirs -.git/ -.gitignore -.bzr/ -.bzrignore -.hg/ -.hgignore -.svn/ -# Common backup files -*.swp -*.bak -*.tmp -*~ -# Various IDEs -.project -.idea/ -*.tmproj diff --git a/charts/databases-ephemeral/Chart.yaml b/charts/databases-ephemeral/Chart.yaml deleted file mode 100644 index e87b8db12..000000000 --- a/charts/databases-ephemeral/Chart.yaml +++ /dev/null @@ -1,4 +0,0 @@ -apiVersion: v1 -description: A Helm chart in-memory, ephemeral databases for use with wire-server https://github.com/wireapp/wire-server -name: databases-ephemeral -version: 0.94.0 diff --git a/charts/databases-ephemeral/requirements.yaml b/charts/databases-ephemeral/requirements.yaml deleted file mode 100644 index e779edb56..000000000 --- a/charts/databases-ephemeral/requirements.yaml +++ /dev/null @@ -1,36 +0,0 @@ -dependencies: -##################################################### -## dependent (demo, non-persistent, non-HA) databases -# -# Note: why are these charts not part of the wire-server chart? -# These charts, in particular cassandra/elasticsearch -# cannot be part of the wire-server chart, because of the required ordering of -# 1. install databases, wait for them to be ready -# 2. run database cassandra-migrations (done as a pre-install/pre-upgrade hook) -# 3. install wire-server -# I.e. cassandra-migrations cannot run before databases are ready, so one cannot make them a pre-install/upgrade hook. Making them a post-install hook also doesn't work: -# Installing all charts in parallel means brig/galley/gundeck won't start up -# since cassandra-migrations did not yet run; but the cassandra-migrations hook -# requires all pods to be in a 'Ready' state before starting (condition for post-install); this is impossible. -##################################################### -- name: redis-ephemeral - version: "0.94.0" - repository: "file://../redis-ephemeral" - tags: - - redis-ephemeral - - databases-ephemeral - - demo -- name: elasticsearch-ephemeral - version: "0.94.0" - repository: "file://../elasticsearch-ephemeral" - tags: - - elasticsearch-ephemeral - - databases-ephemeral - - demo -- name: cassandra-ephemeral - version: "0.94.0" - repository: "file://../cassandra-ephemeral" - tags: - - cassandra-ephemeral - - databases-ephemeral - - demo diff --git a/charts/databases-ephemeral/templates/NOTES.txt b/charts/databases-ephemeral/templates/NOTES.txt deleted file mode 100644 index 2e2ad5b05..000000000 --- a/charts/databases-ephemeral/templates/NOTES.txt +++ /dev/null @@ -1,11 +0,0 @@ -You now have an in-memory, non-persistent, non-highly-available set of databases: - -* cassandra-ephemeral -* elasticsearch-ephemeral -* redis-ephemeral - -!! WARNING WARNING !! -This is fine for testing and demo purposes, but NOT for a production use case. -!! WARNING WARNING !! - -Note that before use of these databases for wire-server components, an index (in the case of elasticsearch) and a set of cassandra-migrations (in the case of cassandra) have to be applied. This comes bundled with the wire-server chart (see cassandra-migrations and elasticsearch-index charts for details) diff --git a/charts/databases-ephemeral/values.yaml b/charts/databases-ephemeral/values.yaml deleted file mode 100644 index 5bf90332f..000000000 --- a/charts/databases-ephemeral/values.yaml +++ /dev/null @@ -1 +0,0 @@ -# Default values for ephemeral-databases diff --git a/charts/demo-smtp/.helmignore b/charts/demo-smtp/.helmignore deleted file mode 100644 index f0c131944..000000000 --- a/charts/demo-smtp/.helmignore +++ /dev/null @@ -1,21 +0,0 @@ -# Patterns to ignore when building packages. -# This supports shell glob matching, relative path matching, and -# negation (prefixed with !). Only one pattern per line. -.DS_Store -# Common VCS dirs -.git/ -.gitignore -.bzr/ -.bzrignore -.hg/ -.hgignore -.svn/ -# Common backup files -*.swp -*.bak -*.tmp -*~ -# Various IDEs -.project -.idea/ -*.tmproj diff --git a/charts/demo-smtp/Chart.yaml b/charts/demo-smtp/Chart.yaml deleted file mode 100644 index 6d71113ba..000000000 --- a/charts/demo-smtp/Chart.yaml +++ /dev/null @@ -1,5 +0,0 @@ -apiVersion: v1 -appVersion: "1.0" -description: A demo helm chart to send emails. Not production ready. -name: demo-smtp -version: 0.94.0 diff --git a/charts/demo-smtp/templates/_helpers.tpl b/charts/demo-smtp/templates/_helpers.tpl deleted file mode 100644 index 5f2c965ce..000000000 --- a/charts/demo-smtp/templates/_helpers.tpl +++ /dev/null @@ -1,32 +0,0 @@ -{{/* vim: set filetype=mustache: */}} -{{/* -Expand the name of the chart. -*/}} -{{- define "demo-smtp.name" -}} -{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} -{{- end -}} - -{{/* -Create a default fully qualified app name. -We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). -If release name contains chart name it will be used as a full name. -*/}} -{{- define "demo-smtp.fullname" -}} -{{- if .Values.fullnameOverride -}} -{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} -{{- else -}} -{{- $name := default .Chart.Name .Values.nameOverride -}} -{{- if contains $name .Release.Name -}} -{{- .Release.Name | trunc 63 | trimSuffix "-" -}} -{{- else -}} -{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} -{{- end -}} -{{- end -}} -{{- end -}} - -{{/* -Create chart name and version as used by the chart label. -*/}} -{{- define "demo-smtp.chart" -}} -{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} -{{- end -}} diff --git a/charts/demo-smtp/templates/deployment.yaml b/charts/demo-smtp/templates/deployment.yaml deleted file mode 100644 index 2107016df..000000000 --- a/charts/demo-smtp/templates/deployment.yaml +++ /dev/null @@ -1,35 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: {{ template "demo-smtp.fullname" . }} - labels: - app: {{ template "demo-smtp.name" . }} - chart: {{ template "demo-smtp.chart" . }} - release: {{ .Release.Name }} - heritage: {{ .Release.Service }} -spec: - replicas: {{ .Values.replicaCount }} - selector: - matchLabels: - app: {{ template "demo-smtp.name" . }} - release: {{ .Release.Name }} - template: - metadata: - labels: - app: {{ template "demo-smtp.name" . }} - release: {{ .Release.Name }} - spec: - containers: - - name: {{ .Chart.Name }} - image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" - env: - {{- range $key, $val := .Values.envVars }} - - name: {{ $key }} - value: {{ $val | quote }} - {{- end }} - ports: - - name: smtp - containerPort: 25 - protocol: TCP - resources: -{{ toYaml .Values.resources | indent 12 }} diff --git a/charts/demo-smtp/templates/service.yaml b/charts/demo-smtp/templates/service.yaml deleted file mode 100644 index 3bd0a5973..000000000 --- a/charts/demo-smtp/templates/service.yaml +++ /dev/null @@ -1,19 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - name: {{ template "demo-smtp.fullname" . }} - labels: - app: {{ template "demo-smtp.name" . }} - chart: {{ template "demo-smtp.chart" . }} - release: {{ .Release.Name }} - heritage: {{ .Release.Service }} -spec: - type: {{ .Values.service.type }} - ports: - - port: {{ .Values.service.port }} - targetPort: smtp - protocol: TCP - name: smtp - selector: - app: {{ template "demo-smtp.name" . }} - release: {{ .Release.Name }} diff --git a/charts/demo-smtp/values.yaml b/charts/demo-smtp/values.yaml deleted file mode 100644 index 5724ce48f..000000000 --- a/charts/demo-smtp/values.yaml +++ /dev/null @@ -1,28 +0,0 @@ -fullnameOverride: demo-smtp -replicaCount: 1 -image: - repository: namshi/smtp - tag: latest - -service: - port: 25 - -resources: - limits: - cpu: 500m - memory: 500Mi - requests: - cpu: 100m - memory: 128Mi - -# Some relevant environment options can be -# passed to the SMTP docker image, check -# https://hub.docker.com/r/namshi/smtp/ -# for more details -# NOTE: Without an empty dictionary, you will -# see warnings when overriding envVars -envVars: {} -# E.g. -# envVars: -# RELAY_NETWORKS: ":x.y.z.w/16" -# diff --git a/charts/elasticsearch-curator/Chart.yaml b/charts/elasticsearch-curator/Chart.yaml deleted file mode 100644 index f784663cf..000000000 --- a/charts/elasticsearch-curator/Chart.yaml +++ /dev/null @@ -1,4 +0,0 @@ -apiVersion: v1 -description: Wrapper chart for stable/elasticsearch-curator -name: elasticsearch-curator -version: 0.94.0 diff --git a/charts/elasticsearch-curator/requirements.yaml b/charts/elasticsearch-curator/requirements.yaml deleted file mode 100644 index d52a2eec3..000000000 --- a/charts/elasticsearch-curator/requirements.yaml +++ /dev/null @@ -1,4 +0,0 @@ -dependencies: -- name: elasticsearch-curator - version: 1.5.0 - repository: https://kubernetes-charts.storage.googleapis.com diff --git a/charts/elasticsearch-curator/values.yaml b/charts/elasticsearch-curator/values.yaml deleted file mode 100644 index 45ca08210..000000000 --- a/charts/elasticsearch-curator/values.yaml +++ /dev/null @@ -1,31 +0,0 @@ -# See defaults in https://github.com/helm/charts/tree/master/stable/elasticsearch-curator -elasticsearch-curator: - configMaps: - action_file_yml: |- - --- - actions: - 1: - action: delete_indices - description: "Clean up ES by deleting old indices" - options: - timeout_override: - continue_if_exception: False - disable_action: False - ignore_empty_list: True - filters: - - filtertype: age - source: name - direction: older - timestring: '%Y.%m.%d' - unit: days - unit_count: 3 - field: - stats_result: - epoch: - exclude: False - config_yml: |- - --- - client: - hosts: - - elasticsearch-ephemeral - port: 9200 diff --git a/charts/elasticsearch-ephemeral/Chart.yaml b/charts/elasticsearch-ephemeral/Chart.yaml deleted file mode 100644 index eeebdfbd3..000000000 --- a/charts/elasticsearch-ephemeral/Chart.yaml +++ /dev/null @@ -1,4 +0,0 @@ -apiVersion: v1 -description: Dummy ephemeral elasticsearch -name: elasticsearch-ephemeral -version: 0.94.0 diff --git a/charts/elasticsearch-ephemeral/templates/_helpers.tpl b/charts/elasticsearch-ephemeral/templates/_helpers.tpl deleted file mode 100644 index e49dfab7a..000000000 --- a/charts/elasticsearch-ephemeral/templates/_helpers.tpl +++ /dev/null @@ -1,27 +0,0 @@ -{{/* vim: set filetype=mustache: */}} -{{/* -Expand the name of the chart. -*/}} -{{- define "name" -}} -{{- default .Chart.Name .Values.nameOverride | trunc 53 | trimSuffix "-" -}} -{{- end -}} - -{{/* -Create a default fully qualified app name. -We truncate at 53 chars (63 - len("-discovery")) because some Kubernetes name fields are limited to 63 (by the DNS naming spec). -*/}} -{{- define "fullname" -}} -{{- $name := default .Chart.Name .Values.nameOverride -}} -{{- printf "%s" $name | trunc 53 | trimSuffix "-" -}} -{{- end -}} - -{{/* -Return the appropriate apiVersion for Curactor cron job. -*/}} -{{- define "curator.cronJob.apiVersion" -}} -{{- if ge .Capabilities.KubeVersion.Minor "8" -}} -"batch/v1beta1" -{{- else -}} -"batch/v2alpha1" -{{- end -}} -{{- end -}} diff --git a/charts/elasticsearch-ephemeral/templates/es-svc.yaml b/charts/elasticsearch-ephemeral/templates/es-svc.yaml deleted file mode 100644 index b8189bcf8..000000000 --- a/charts/elasticsearch-ephemeral/templates/es-svc.yaml +++ /dev/null @@ -1,24 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - name: {{ template "fullname" . }} - labels: - wireService: {{ template "fullname" . }} - app: {{ template "fullname" . }} - chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" - release: "{{ .Release.Name }}" - heritage: "{{ .Release.Service }}" - component: {{ template "fullname" . }} -spec: - type: ClusterIP - selector: - component: {{ template "fullname" . }} - ports: - - name: http - port: {{ .Values.service.httpPort }} - targetPort: 9200 - protocol: TCP - - name: transport - port: {{ .Values.service.transportPort }} - targetPort: 9300 - protocol: TCP diff --git a/charts/elasticsearch-ephemeral/templates/es.yaml b/charts/elasticsearch-ephemeral/templates/es.yaml deleted file mode 100644 index 9c9f00fca..000000000 --- a/charts/elasticsearch-ephemeral/templates/es.yaml +++ /dev/null @@ -1,49 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: {{ template "fullname" . }} - labels: - wireService: {{ template "fullname" . }} - app: {{ template "fullname" . }} - chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" - release: "{{ .Release.Name }}" - heritage: "{{ .Release.Service }}" - component: {{ template "fullname" . }} -spec: - replicas: 1 - selector: - matchLabels: - component: {{ template "fullname" . }} - template: - metadata: - labels: - component: {{ template "fullname" . }} - spec: - containers: - - name: es - image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" - env: - - name: MAX_HEAP_SIZE - value: "2048" - - name: HEAP_NEWSIZE - value: "800M" - - name: "bootstrap.system_call_filter" - value: "false" - - name: "discovery.type" - value: "single-node" - ports: - - containerPort: 9200 - name: http - protocol: TCP - - containerPort: 9300 - name: transport - protocol: TCP - volumeMounts: - - name: storage - mountPath: /data - resources: -{{ toYaml .Values.resources | indent 12 }} - volumes: - - emptyDir: - medium: "" - name: "storage" diff --git a/charts/elasticsearch-ephemeral/values.yaml b/charts/elasticsearch-ephemeral/values.yaml deleted file mode 100644 index 55dd63fd8..000000000 --- a/charts/elasticsearch-ephemeral/values.yaml +++ /dev/null @@ -1,15 +0,0 @@ -image: - repository: elasticsearch - tag: 6.7.1 - -service: - httpPort: 9200 - transportPort: 9300 - -resources: - limits: - cpu: "2000m" - memory: "4Gi" - requests: - cpu: "250m" - memory: "500Mi" diff --git a/charts/elasticsearch-external/Chart.yaml b/charts/elasticsearch-external/Chart.yaml deleted file mode 100644 index dd2ab85dc..000000000 --- a/charts/elasticsearch-external/Chart.yaml +++ /dev/null @@ -1,4 +0,0 @@ -apiVersion: v1 -description: Refer to elasticsearch IPs located outside kubernetes by specifying IPs manually -name: elasticsearch-external -version: 0.94.0 diff --git a/charts/elasticsearch-external/templates/endpoint.yaml b/charts/elasticsearch-external/templates/endpoint.yaml deleted file mode 100644 index 04a4b1a04..000000000 --- a/charts/elasticsearch-external/templates/endpoint.yaml +++ /dev/null @@ -1,38 +0,0 @@ -# create a headless clusterIP service to create dns name "elasticsearch-external" -# and a custom endpoint, thus forwarding traffic when resolving DNS to custom IPs -kind: Service -apiVersion: v1 -metadata: - name: {{ .Chart.Name }} - labels: - app: {{ .Chart.Name }} - chart: {{ template "elasticsearch-external.chart" . }} - release: {{ .Release.Name }} - heritage: {{ .Release.Service }} -spec: - type: ClusterIP - clusterIP: None # headless service - ports: - - name: http - port: {{ .Values.portHttp }} - targetPort: {{ .Values.portHttp }} ---- -kind: Endpoints -apiVersion: v1 -metadata: - name: {{ .Chart.Name }} - labels: - app: {{ .Chart.Name }} - chart: {{ template "elasticsearch-external.chart" . }} - release: {{ .Release.Name }} - heritage: {{ .Release.Service }} -subsets: - - addresses: - {{- range .Values.IPs }} - - ip: {{ . }} - {{- end }} - ports: - # port and name in the endpoint must match port and name in the service - # see also https://docs.openshift.com/enterprise/3.0/dev_guide/integrating_external_services.html - - name: http - port: {{ .Values.portHttp }} diff --git a/charts/elasticsearch-external/templates/helpers.tpl b/charts/elasticsearch-external/templates/helpers.tpl deleted file mode 100644 index 8c545ceb5..000000000 --- a/charts/elasticsearch-external/templates/helpers.tpl +++ /dev/null @@ -1,11 +0,0 @@ -{{- define "elasticsearch-external.fullname" -}} -{{- $name := default .Chart.Name .Values.nameOverride -}} -{{- printf "%s" $name | trunc 63 | trimSuffix "-" -}} -{{- end -}} - -{{/* -Create chart name and version as used by the chart label. -*/}} -{{- define "elasticsearch-external.chart" -}} -{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} -{{- end -}} diff --git a/charts/elasticsearch-external/values.yaml b/charts/elasticsearch-external/values.yaml deleted file mode 100644 index b6f296f6d..000000000 --- a/charts/elasticsearch-external/values.yaml +++ /dev/null @@ -1,6 +0,0 @@ -portHttp: 9200 - -## Configure this helm chart with: -# IPs: -# - 1.2.3.4 -# - 5.6.7.8 diff --git a/charts/elasticsearch-index/.helmignore b/charts/elasticsearch-index/.helmignore deleted file mode 100644 index f0c131944..000000000 --- a/charts/elasticsearch-index/.helmignore +++ /dev/null @@ -1,21 +0,0 @@ -# Patterns to ignore when building packages. -# This supports shell glob matching, relative path matching, and -# negation (prefixed with !). Only one pattern per line. -.DS_Store -# Common VCS dirs -.git/ -.gitignore -.bzr/ -.bzrignore -.hg/ -.hgignore -.svn/ -# Common backup files -*.swp -*.bak -*.tmp -*~ -# Various IDEs -.project -.idea/ -*.tmproj diff --git a/charts/elasticsearch-index/Chart.yaml b/charts/elasticsearch-index/Chart.yaml deleted file mode 100644 index 1bdd8a272..000000000 --- a/charts/elasticsearch-index/Chart.yaml +++ /dev/null @@ -1,4 +0,0 @@ -apiVersion: v1 -description: Elasticsearch index for brig -name: elasticsearch-index -version: 0.94.0 diff --git a/charts/elasticsearch-index/templates/create-index.yaml b/charts/elasticsearch-index/templates/create-index.yaml deleted file mode 100644 index 7c669d83e..000000000 --- a/charts/elasticsearch-index/templates/create-index.yaml +++ /dev/null @@ -1,48 +0,0 @@ -apiVersion: batch/v1 -kind: Job -metadata: - name: elasticsearch-index-create - labels: - wireService: elasticsearch-index-create - app: elasticsearch-index-create - heritage: {{.Release.Service | quote }} - release: {{.Release.Name | quote }} - chart: "{{.Chart.Name}}-{{.Chart.Version}}" - annotations: - "helm.sh/hook": pre-install,pre-upgrade - "helm.sh/hook-delete-policy": "before-hook-creation" -spec: - template: - metadata: - name: "{{.Release.Name}}" - labels: - wireService: elasticsearch-index-create - app: elasticsearch-index-create - heritage: {{.Release.Service | quote }} - release: {{.Release.Name | quote }} - chart: "{{.Chart.Name}}-{{.Chart.Version}}" - spec: - restartPolicy: OnFailure - initContainers: - # Creates index in elasticsearch only when it doesn't exist. - # Does nothing if the index exists. - - name: brig-index-create - image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" - args: - - create - - --elasticsearch-server - - "http://{{ required "missing elasticsearch-index.elasticsearch.host!" .Values.elasticsearch.host }}:{{ .Values.elasticsearch.port }}" - - --elasticsearch-index - - "{{ .Values.elasticsearch.index }}" - - --elasticsearch-shards=5 - - --elasticsearch-replicas=2 - - --elasticsearch-refresh-interval=5 - containers: - - name: brig-index-update-mapping - image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" - args: - - update-mapping - - --elasticsearch-server - - "http://{{ required "missing elasticsearch-index.elasticsearch.host!" .Values.elasticsearch.host }}:{{ .Values.elasticsearch.port }}" - - --elasticsearch-index - - "{{ .Values.elasticsearch.index }}" diff --git a/charts/elasticsearch-index/templates/helpers.tpl b/charts/elasticsearch-index/templates/helpers.tpl deleted file mode 100644 index fb1ddbd47..000000000 --- a/charts/elasticsearch-index/templates/helpers.tpl +++ /dev/null @@ -1,7 +0,0 @@ -{{/* -override default fullname template to remove the .Release.Name from the definition -*/}} -{{- define "fullname" -}} -{{- $name := default .Chart.Name .Values.nameOverride -}} -{{- printf "%s" $name | trunc 63 | trimSuffix "-" -}} -{{- end -}} \ No newline at end of file diff --git a/charts/elasticsearch-index/templates/migrate-data.yaml b/charts/elasticsearch-index/templates/migrate-data.yaml deleted file mode 100644 index f739997a2..000000000 --- a/charts/elasticsearch-index/templates/migrate-data.yaml +++ /dev/null @@ -1,42 +0,0 @@ -apiVersion: batch/v1 -kind: Job -metadata: - name: brig-index-migrate-data - labels: - wireService: elasticsearch-index-migrate-data - app: elasticsearch-index-migrate-data - heritage: {{.Release.Service | quote }} - release: {{.Release.Name | quote }} - chart: "{{.Chart.Name}}-{{.Chart.Version}}" - annotations: - "helm.sh/hook": post-install,post-upgrade - "helm.sh/hook-delete-policy": "before-hook-creation" -spec: - template: - metadata: - name: "{{.Release.Name}}" - labels: - wireService: elasticsearch-index-migrate-data - app: elasticsearch-index-migrate-data - heritage: {{.Release.Service | quote }} - release: {{.Release.Name | quote }} - chart: "{{.Chart.Name}}-{{.Chart.Version}}" - spec: - restartPolicy: OnFailure - containers: - # Creates index in elasticsearch only when it doesn't exist. - # Does nothing if the index exists. - - name: brig-index - image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" - args: - - migrate-data - - --elasticsearch-server - - "http://{{ required "missing elasticsearch-index.elasticsearch.host!" .Values.elasticsearch.host }}:{{ .Values.elasticsearch.port }}" - - --elasticsearch-index - - "{{ .Values.elasticsearch.index }}" - - --cassandra-host - - "{{ required "missing elasticsearch-index.cassandra.host!" .Values.cassandra.host }}" - - --cassandra-port - - "{{ required "missing elasticsearch-index.cassandra.port!" .Values.cassandra.port }}" - - --cassandra-keyspace - - "{{ required "missing elasticsearch-index.cassandra.keyspace!" .Values.cassandra.keyspace }}" diff --git a/charts/elasticsearch-index/values.yaml b/charts/elasticsearch-index/values.yaml deleted file mode 100644 index cb813d279..000000000 --- a/charts/elasticsearch-index/values.yaml +++ /dev/null @@ -1,12 +0,0 @@ -# Default values for elasticsearch-index -elasticsearch: - #host: # elasticsearch-client|elasticsearch-ephemeral - port: 9200 - index: directory -cassandra: - # host: - port: 9042 - keyspace: brig -image: - repository: quay.io/wire/brig-index - tag: 2.78.0 diff --git a/charts/fake-aws-dynamodb/Chart.yaml b/charts/fake-aws-dynamodb/Chart.yaml deleted file mode 100644 index b4c3fb612..000000000 --- a/charts/fake-aws-dynamodb/Chart.yaml +++ /dev/null @@ -1,4 +0,0 @@ -apiVersion: v1 -description: Dummy ephemeral DynamoDB service -name: fake-aws-dynamodb -version: 0.94.0 diff --git a/charts/fake-aws-dynamodb/templates/_helpers.tpl b/charts/fake-aws-dynamodb/templates/_helpers.tpl deleted file mode 100644 index 6ecbd30d5..000000000 --- a/charts/fake-aws-dynamodb/templates/_helpers.tpl +++ /dev/null @@ -1,16 +0,0 @@ -{{/* vim: set filetype=mustache: */}} -{{/* -Expand the name of the chart. -*/}} -{{- define "name" -}} -{{- default .Chart.Name .Values.nameOverride | trunc 53 | trimSuffix "-" -}} -{{- end -}} - -{{/* -Create a default fully qualified app name. -We truncate at 53 chars (63 - len("-discovery")) because some Kubernetes name fields are limited to 63 (by the DNS naming spec). -*/}} -{{- define "fullname" -}} -{{- $name := default .Chart.Name .Values.nameOverride -}} -{{- printf "%s" $name | trunc 53 | trimSuffix "-" -}} -{{- end -}} diff --git a/charts/fake-aws-dynamodb/templates/deployment.yaml b/charts/fake-aws-dynamodb/templates/deployment.yaml deleted file mode 100644 index 549349cb5..000000000 --- a/charts/fake-aws-dynamodb/templates/deployment.yaml +++ /dev/null @@ -1,63 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: {{ template "fullname" . }} - labels: - app: {{ template "fullname" . }} - chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" - release: "{{ .Release.Name }}" - heritage: "{{ .Release.Service }}" -spec: - replicas: 1 - selector: - matchLabels: - app: {{ template "fullname" . }} - template: - metadata: - labels: - app: {{ template "fullname" . }} - spec: - containers: - - name: dynamodb - image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" - ports: - - containerPort: {{ .Values.service.internalPort }} - name: http - protocol: TCP - volumeMounts: - - name: storage - mountPath: /data - resources: -{{ toYaml .Values.resources | indent 12 }} - - name: create-tables - image: mesosphere/aws-cli:1.14.5 - command: [/bin/sh] - args: - - -c - - | - exec_until_ready() { - until $1; do echo 'service not ready yet'; sleep 1; done - } - table_exists() { - OUTPUT=$(aws --endpoint-url=http://localhost:{{ $.Values.service.internalPort }} dynamodb list-tables | grep $1 | wc -l) - echo $OUTPUT - } - echo 'Creating AWS resources' - aws configure set aws_access_key_id dummy - aws configure set aws_secret_access_key dummy - aws configure set region eu-west-1 - - while true - do - # Recreate resources if needed - TABLE=$(table_exists "{{ $.Values.tables.brigPrekeys }}") - if [ "$TABLE" == "1" ] - then echo "Table {{ $.Values.tables.brigPrekeys }} exists, no need to re-create" - else exec_until_ready "aws --endpoint-url=http://localhost:{{ $.Values.service.internalPort }} dynamodb create-table --table-name {{ $.Values.tables.brigPrekeys }} --attribute-definitions AttributeName=client,AttributeType=S --key-schema AttributeName=client,KeyType=HASH --provisioned-throughput ReadCapacityUnits=5,WriteCapacityUnits=5" - fi - echo 'Sleeping 10' - sleep 10 - done - volumes: - - emptyDir: {} - name: "storage" diff --git a/charts/fake-aws-dynamodb/templates/service.yaml b/charts/fake-aws-dynamodb/templates/service.yaml deleted file mode 100644 index 2e6c106de..000000000 --- a/charts/fake-aws-dynamodb/templates/service.yaml +++ /dev/null @@ -1,18 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - name: {{ template "fullname" . }} - labels: - app: {{ template "fullname" . }} - chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" - release: "{{ .Release.Name }}" - heritage: "{{ .Release.Service }}" -spec: - type: ClusterIP - selector: - app: {{ template "fullname" . }} - ports: - - name: http - port: {{ .Values.service.externalPort }} - targetPort: {{ .Values.service.internalPort }} - protocol: TCP diff --git a/charts/fake-aws-dynamodb/values.yaml b/charts/fake-aws-dynamodb/values.yaml deleted file mode 100644 index e59468b20..000000000 --- a/charts/fake-aws-dynamodb/values.yaml +++ /dev/null @@ -1,18 +0,0 @@ -image: - repository: cnadiminti/dynamodb-local - tag: 2018-04-11 - -service: - internalPort: 8000 - externalPort: 4567 - -tables: - brigPrekeys: integration-brig-prekeys - -resources: - limits: - cpu: "300m" - memory: "3000Mi" - requests: - cpu: "100m" - memory: "100Mi" diff --git a/charts/fake-aws-s3/.helmignore b/charts/fake-aws-s3/.helmignore deleted file mode 100644 index f0c131944..000000000 --- a/charts/fake-aws-s3/.helmignore +++ /dev/null @@ -1,21 +0,0 @@ -# Patterns to ignore when building packages. -# This supports shell glob matching, relative path matching, and -# negation (prefixed with !). Only one pattern per line. -.DS_Store -# Common VCS dirs -.git/ -.gitignore -.bzr/ -.bzrignore -.hg/ -.hgignore -.svn/ -# Common backup files -*.swp -*.bak -*.tmp -*~ -# Various IDEs -.project -.idea/ -*.tmproj diff --git a/charts/fake-aws-s3/Chart.yaml b/charts/fake-aws-s3/Chart.yaml deleted file mode 100644 index 652307f48..000000000 --- a/charts/fake-aws-s3/Chart.yaml +++ /dev/null @@ -1,4 +0,0 @@ -apiVersion: v1 -description: Wrapper chart for stable/minio -name: fake-aws-s3 -version: 0.94.0 diff --git a/charts/fake-aws-s3/requirements.yaml b/charts/fake-aws-s3/requirements.yaml deleted file mode 100644 index a39853614..000000000 --- a/charts/fake-aws-s3/requirements.yaml +++ /dev/null @@ -1,4 +0,0 @@ -dependencies: -- name: minio - version: 2.5.15 - repository: https://kubernetes-charts.storage.googleapis.com diff --git a/charts/fake-aws-s3/templates/_helpers.tpl b/charts/fake-aws-s3/templates/_helpers.tpl deleted file mode 100644 index 6ecbd30d5..000000000 --- a/charts/fake-aws-s3/templates/_helpers.tpl +++ /dev/null @@ -1,16 +0,0 @@ -{{/* vim: set filetype=mustache: */}} -{{/* -Expand the name of the chart. -*/}} -{{- define "name" -}} -{{- default .Chart.Name .Values.nameOverride | trunc 53 | trimSuffix "-" -}} -{{- end -}} - -{{/* -Create a default fully qualified app name. -We truncate at 53 chars (63 - len("-discovery")) because some Kubernetes name fields are limited to 63 (by the DNS naming spec). -*/}} -{{- define "fullname" -}} -{{- $name := default .Chart.Name .Values.nameOverride -}} -{{- printf "%s" $name | trunc 53 | trimSuffix "-" -}} -{{- end -}} diff --git a/charts/fake-aws-s3/templates/reaper.yaml b/charts/fake-aws-s3/templates/reaper.yaml deleted file mode 100644 index 0d668c0d5..000000000 --- a/charts/fake-aws-s3/templates/reaper.yaml +++ /dev/null @@ -1,33 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: {{ template "fullname" . }}-reaper - labels: - app: {{ template "fullname" . }}-reaper - chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" - release: "{{ .Release.Name }}" - heritage: "{{ .Release.Service }}" -spec: - replicas: 1 - selector: - matchLabels: - app: {{ template "fullname" . }}-reaper - template: - metadata: - labels: - app: {{ template "fullname" . }}-reaper - spec: - containers: - - name: initiate-fake-aws-s3 - image: mesosphere/aws-cli:1.14.5 - command: [/bin/sh] - args: - - -c - - | - echo 'Creating AWS resources' - while true - do - AWS_SECRET_ACCESS_KEY={{ .Values.minio.secretKey }} AWS_ACCESS_KEY_ID={{ .Values.minio.accessKey }} aws s3 --endpoint http://{{ .Values.minio.fullnameOverride }}:9000 mb s3://{{ .Values.minio.defaultBucket.name }} | grep -ev "BucketAlreadyOwnedByYou" - sleep 10 - done - diff --git a/charts/fake-aws-s3/values.yaml b/charts/fake-aws-s3/values.yaml deleted file mode 100644 index 2347e3fd5..000000000 --- a/charts/fake-aws-s3/values.yaml +++ /dev/null @@ -1,19 +0,0 @@ -# See defaults in https://github.com/helm/charts/tree/master/stable/minio -minio: - fullnameOverride: fake-aws-s3 - accessKey: dummykey - secretKey: dummysecret - persistence: - enabled: false - buckets: - - name: dummy-bucket - purge: true - policy: none - - name: assets - purge: false - policy: none - - name: public - purge: false - policy: public - environment: - MINIO_BROWSER: "off" diff --git a/charts/fake-aws-ses/Chart.yaml b/charts/fake-aws-ses/Chart.yaml deleted file mode 100644 index 18e2dc822..000000000 --- a/charts/fake-aws-ses/Chart.yaml +++ /dev/null @@ -1,4 +0,0 @@ -apiVersion: v1 -description: Dummy ephemeral SES service (based on localstack) -name: fake-aws-ses -version: 0.94.0 diff --git a/charts/fake-aws-ses/templates/_helpers.tpl b/charts/fake-aws-ses/templates/_helpers.tpl deleted file mode 100644 index 6ecbd30d5..000000000 --- a/charts/fake-aws-ses/templates/_helpers.tpl +++ /dev/null @@ -1,16 +0,0 @@ -{{/* vim: set filetype=mustache: */}} -{{/* -Expand the name of the chart. -*/}} -{{- define "name" -}} -{{- default .Chart.Name .Values.nameOverride | trunc 53 | trimSuffix "-" -}} -{{- end -}} - -{{/* -Create a default fully qualified app name. -We truncate at 53 chars (63 - len("-discovery")) because some Kubernetes name fields are limited to 63 (by the DNS naming spec). -*/}} -{{- define "fullname" -}} -{{- $name := default .Chart.Name .Values.nameOverride -}} -{{- printf "%s" $name | trunc 53 | trimSuffix "-" -}} -{{- end -}} diff --git a/charts/fake-aws-ses/templates/deployment.yaml b/charts/fake-aws-ses/templates/deployment.yaml deleted file mode 100644 index 11ec6b550..000000000 --- a/charts/fake-aws-ses/templates/deployment.yaml +++ /dev/null @@ -1,69 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: {{ template "fullname" . }} - labels: - app: {{ template "fullname" . }} - chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" - release: "{{ .Release.Name }}" - heritage: "{{ .Release.Service }}" -spec: - replicas: 1 - selector: - matchLabels: - app: {{ template "fullname" . }} - template: - metadata: - labels: - app: {{ template "fullname" . }} - spec: - containers: - - name: fake-aws-ses - image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" - env: - - name: DEBUG - value: "1" - - name: DEFAULT_REGION - value: "eu-west-1" - - name: SERVICES - value: "ses" - ports: - - containerPort: {{ .Values.service.internalPort }} - name: http - protocol: TCP - volumeMounts: - - name: storage - mountPath: /data - resources: -{{ toYaml .Values.resources | indent 12 }} - - name: initiate-fake-aws-ses - image: mesosphere/aws-cli:1.14.5 - command: [/bin/sh] - args: - - -c - - | - exec_until_ready() { - until $1; do echo 'service not ready yet'; sleep 1; done - } - ses_identity_exists() { - OUTPUT=$(aws --endpoint-url=http://localhost:{{ $.Values.service.internalPort }} ses list-identities | grep $1 | wc -l) - echo $OUTPUT - } - echo 'Creating AWS resources' - aws configure set aws_access_key_id dummy - aws configure set aws_secret_access_key dummy - aws configure set region eu-west-1 - - while true - do - SES_SENDER=$(ses_identity_exists "{{ $.Values.sesSender }}") - if [ "$SES_SENDER" == "1" ] - # Set our sender address as verified - then echo "Resources already created, sleeping for 10, to keep this container (and thus the pod) alive" - else exec_until_ready "aws --endpoint-url=http://localhost:{{ $.Values.service.internalPort }} ses verify-email-identity --email-address {{ $.Values.sesSender }}" && echo 'Resource successfully created' - fi - sleep 10 - done - volumes: - - emptyDir: {} - name: "storage" diff --git a/charts/fake-aws-ses/templates/service.yaml b/charts/fake-aws-ses/templates/service.yaml deleted file mode 100644 index 2e6c106de..000000000 --- a/charts/fake-aws-ses/templates/service.yaml +++ /dev/null @@ -1,18 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - name: {{ template "fullname" . }} - labels: - app: {{ template "fullname" . }} - chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" - release: "{{ .Release.Name }}" - heritage: "{{ .Release.Service }}" -spec: - type: ClusterIP - selector: - app: {{ template "fullname" . }} - ports: - - name: http - port: {{ .Values.service.externalPort }} - targetPort: {{ .Values.service.internalPort }} - protocol: TCP diff --git a/charts/fake-aws-ses/values.yaml b/charts/fake-aws-ses/values.yaml deleted file mode 100644 index 3dcc068d8..000000000 --- a/charts/fake-aws-ses/values.yaml +++ /dev/null @@ -1,19 +0,0 @@ -image: - repository: localstack/localstack - tag: 0.8.7 - -service: - internalPort: 4579 - externalPort: 4569 - -resources: - limits: - cpu: "200m" - memory: 500Mi - requests: - cpu: "100m" - memory: 100Mi - -## The following needs to be provided (and consistent with the config in brig) -#TODO: It would actually be useful if the deployment _fails_ if this is undefined -#sesSender: "sender@example.com" diff --git a/charts/fake-aws-sns/Chart.yaml b/charts/fake-aws-sns/Chart.yaml deleted file mode 100644 index c86ffac0f..000000000 --- a/charts/fake-aws-sns/Chart.yaml +++ /dev/null @@ -1,4 +0,0 @@ -apiVersion: v1 -description: Dummy ephemeral SNS service (based on localstack) -name: fake-aws-sns -version: 0.94.0 diff --git a/charts/fake-aws-sns/templates/_helpers.tpl b/charts/fake-aws-sns/templates/_helpers.tpl deleted file mode 100644 index 6ecbd30d5..000000000 --- a/charts/fake-aws-sns/templates/_helpers.tpl +++ /dev/null @@ -1,16 +0,0 @@ -{{/* vim: set filetype=mustache: */}} -{{/* -Expand the name of the chart. -*/}} -{{- define "name" -}} -{{- default .Chart.Name .Values.nameOverride | trunc 53 | trimSuffix "-" -}} -{{- end -}} - -{{/* -Create a default fully qualified app name. -We truncate at 53 chars (63 - len("-discovery")) because some Kubernetes name fields are limited to 63 (by the DNS naming spec). -*/}} -{{- define "fullname" -}} -{{- $name := default .Chart.Name .Values.nameOverride -}} -{{- printf "%s" $name | trunc 53 | trimSuffix "-" -}} -{{- end -}} diff --git a/charts/fake-aws-sns/templates/deployment.yaml b/charts/fake-aws-sns/templates/deployment.yaml deleted file mode 100644 index f93bfc621..000000000 --- a/charts/fake-aws-sns/templates/deployment.yaml +++ /dev/null @@ -1,71 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: {{ template "fullname" . }} - labels: - app: {{ template "fullname" . }} - chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" - release: "{{ .Release.Name }}" - heritage: "{{ .Release.Service }}" -spec: - replicas: 1 - selector: - matchLabels: - app: {{ template "fullname" . }} - template: - metadata: - labels: - app: {{ template "fullname" . }} - spec: - containers: - - name: fake-aws-sns - image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" - env: - - name: DEBUG - value: "1" - - name: DEFAULT_REGION - value: "eu-west-1" - - name: SERVICES - value: "sns" - ports: - - containerPort: {{ .Values.service.internalPort }} - name: http - protocol: TCP - volumeMounts: - - name: storage - mountPath: /data - resources: -{{ toYaml .Values.resources | indent 12 }} - - name: initiate-fake-aws-sns - image: mesosphere/aws-cli:1.14.5 - command: [/bin/sh] - args: - - -c - - | - exec_until_ready() { - until $1; do echo 'service not ready yet'; sleep 1; done - } - application_exists() { - OUTPUT=$(aws --endpoint-url=http://localhost:{{ $.Values.service.internalPort }} sns list-platform-applications | grep $1 | wc -l) - echo $OUTPUT - } - echo 'Creating AWS resources' - aws configure set aws_access_key_id dummy - aws configure set aws_secret_access_key dummy - aws configure set region eu-west-1 - - while true - do - {{ range $i, $app := .Values.applications }} - APPLICATION=$(application_exists "{{ $app.platform }}/{{ $app.name }}") - if [ "$APPLICATION" == "1" ] - then echo "Application {{ $app.name }} exists, no need to re-create" - else exec_until_ready "aws --endpoint-url=http://localhost:{{ $.Values.service.internalPort }} sns create-platform-application --name {{ $app.name }} --platform {{ $app.platform }} --attributes PlatformCredential={{ $app.credential }}" - fi - {{ end }} - echo "Resources created, sleeping for 10, to keep this container (and thus the pod) alive" - sleep 10 - done - volumes: - - emptyDir: {} - name: "storage" diff --git a/charts/fake-aws-sns/templates/service.yaml b/charts/fake-aws-sns/templates/service.yaml deleted file mode 100644 index 2e6c106de..000000000 --- a/charts/fake-aws-sns/templates/service.yaml +++ /dev/null @@ -1,18 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - name: {{ template "fullname" . }} - labels: - app: {{ template "fullname" . }} - chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" - release: "{{ .Release.Name }}" - heritage: "{{ .Release.Service }}" -spec: - type: ClusterIP - selector: - app: {{ template "fullname" . }} - ports: - - name: http - port: {{ .Values.service.externalPort }} - targetPort: {{ .Values.service.internalPort }} - protocol: TCP diff --git a/charts/fake-aws-sns/values.yaml b/charts/fake-aws-sns/values.yaml deleted file mode 100644 index 1af701b8e..000000000 --- a/charts/fake-aws-sns/values.yaml +++ /dev/null @@ -1,26 +0,0 @@ -image: - repository: localstack/localstack - tag: 0.8.7 - -service: - internalPort: 4575 - externalPort: 4575 - -resources: - limits: - cpu: "200m" - memory: 500Mi - requests: - cpu: "100m" - memory: 100Mi - -applications: - - name: integration-test - platform: GCM - credential: testkey - - name: integration-test - platform: APNS_SANDBOX - credential: testprivatekey - - name: integration-com.wire.ent - platform: APNS_SANDBOX - credential: testprivatekey diff --git a/charts/fake-aws-sqs/Chart.yaml b/charts/fake-aws-sqs/Chart.yaml deleted file mode 100644 index d9ef7151e..000000000 --- a/charts/fake-aws-sqs/Chart.yaml +++ /dev/null @@ -1,4 +0,0 @@ -apiVersion: v1 -description: Dummy ephemeral SQS service -name: fake-aws-sqs -version: 0.94.0 diff --git a/charts/fake-aws-sqs/templates/_helpers.tpl b/charts/fake-aws-sqs/templates/_helpers.tpl deleted file mode 100644 index 6ecbd30d5..000000000 --- a/charts/fake-aws-sqs/templates/_helpers.tpl +++ /dev/null @@ -1,16 +0,0 @@ -{{/* vim: set filetype=mustache: */}} -{{/* -Expand the name of the chart. -*/}} -{{- define "name" -}} -{{- default .Chart.Name .Values.nameOverride | trunc 53 | trimSuffix "-" -}} -{{- end -}} - -{{/* -Create a default fully qualified app name. -We truncate at 53 chars (63 - len("-discovery")) because some Kubernetes name fields are limited to 63 (by the DNS naming spec). -*/}} -{{- define "fullname" -}} -{{- $name := default .Chart.Name .Values.nameOverride -}} -{{- printf "%s" $name | trunc 53 | trimSuffix "-" -}} -{{- end -}} diff --git a/charts/fake-aws-sqs/templates/deployment.yaml b/charts/fake-aws-sqs/templates/deployment.yaml deleted file mode 100644 index 24055e126..000000000 --- a/charts/fake-aws-sqs/templates/deployment.yaml +++ /dev/null @@ -1,68 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: {{ template "fullname" . }} - labels: - app: {{ template "fullname" . }} - chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" - release: "{{ .Release.Name }}" - heritage: "{{ .Release.Service }}" -spec: - replicas: 1 - selector: - matchLabels: - app: {{ template "fullname" . }} - template: - metadata: - labels: - app: {{ template "fullname" . }} - spec: - containers: - - name: fake-aws-sqs - image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" - ports: - - containerPort: {{ .Values.service.httpPort }} - name: http - protocol: TCP - volumeMounts: - - name: storage - mountPath: /data - resources: -{{ toYaml .Values.resources | indent 12 }} - - name: initiate-fake-aws-sqs - image: mesosphere/aws-cli:1.14.5 - command: [/bin/sh] - args: - - -c - - | - exec_until_ready() { - until $1; do echo 'service not ready yet'; sleep 1; done - } - queue_exists() { - # NOTE: we use the '"' to match the queue name more exactly (otherwise there is some overlap) - OUTPUT=$(aws --endpoint-url=http://localhost:{{ $.Values.service.httpPort }} sqs list-queues | grep $1'"' | wc -l) - echo $OUTPUT - } - - echo 'Creating AWS resources' - aws configure set aws_access_key_id dummy - aws configure set aws_secret_access_key dummy - aws configure set region eu-west-1 - - while true - do - # Recreate resources if needed - {{ range $i, $queueName := .Values.queueNames }} - QUEUE=$(queue_exists "{{ $queueName }}") - if [ "$QUEUE" == "1" ] - then echo "Queue {{ $queueName }} exists, no need to re-create" - else exec_until_ready "aws --endpoint-url=http://localhost:{{ $.Values.service.httpPort }} sqs create-queue --queue-name {{ $queueName }}" - fi - {{ end }} - - echo 'Sleeping 10' - sleep 10 - done - volumes: - - emptyDir: {} - name: "storage" diff --git a/charts/fake-aws-sqs/templates/service.yaml b/charts/fake-aws-sqs/templates/service.yaml deleted file mode 100644 index ede3603a6..000000000 --- a/charts/fake-aws-sqs/templates/service.yaml +++ /dev/null @@ -1,18 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - name: {{ template "fullname" . }} - labels: - app: {{ template "fullname" . }} - chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" - release: "{{ .Release.Name }}" - heritage: "{{ .Release.Service }}" -spec: - type: ClusterIP - selector: - app: {{ template "fullname" . }} - ports: - - name: http - port: {{ .Values.service.httpPort }} - targetPort: {{ .Values.service.httpPort }} - protocol: TCP diff --git a/charts/fake-aws-sqs/values.yaml b/charts/fake-aws-sqs/values.yaml deleted file mode 100644 index 4f46cd50d..000000000 --- a/charts/fake-aws-sqs/values.yaml +++ /dev/null @@ -1,21 +0,0 @@ -image: - repository: airdock/fake-sqs - tag: 0.3.1 - -# TODO: in a wire-server chart, these queue names should match the ones defined in galley/brig/gundeck (i.e. only be defined once) -queueNames: - - "integration-team-events.fifo" - - "integration-brig-events" - - "integration-brig-events-internal" - - "integration-gundeck-events" - -service: - httpPort: 4568 - -resources: - limits: - cpu: "1000m" - memory: "1000Mi" - requests: - memory: "256Mi" - cpu: "100m" diff --git a/charts/fake-aws/.helmignore b/charts/fake-aws/.helmignore deleted file mode 100644 index f0c131944..000000000 --- a/charts/fake-aws/.helmignore +++ /dev/null @@ -1,21 +0,0 @@ -# Patterns to ignore when building packages. -# This supports shell glob matching, relative path matching, and -# negation (prefixed with !). Only one pattern per line. -.DS_Store -# Common VCS dirs -.git/ -.gitignore -.bzr/ -.bzrignore -.hg/ -.hgignore -.svn/ -# Common backup files -*.swp -*.bak -*.tmp -*~ -# Various IDEs -.project -.idea/ -*.tmproj diff --git a/charts/fake-aws/Chart.yaml b/charts/fake-aws/Chart.yaml deleted file mode 100644 index dce68138e..000000000 --- a/charts/fake-aws/Chart.yaml +++ /dev/null @@ -1,4 +0,0 @@ -apiVersion: v1 -description: A Helm chart for fake-aws services (replacing real AWS services for demo and test) -name: fake-aws -version: 0.94.0 diff --git a/charts/fake-aws/requirements.yaml b/charts/fake-aws/requirements.yaml deleted file mode 100644 index 07b9879dd..000000000 --- a/charts/fake-aws/requirements.yaml +++ /dev/null @@ -1,44 +0,0 @@ -dependencies: -####################################### -## dependent (demo, non-HA) AWS mocks -####################################### -- name: fake-aws-sns - version: "0.94.0" - repository: "file://../fake-aws-sns" - condition: fake-aws-sns.enabled,global.fake-aws-sns.enabled - tags: - - fake-aws-sns - - aws-mocks - - demo -- name: fake-aws-sqs - version: "0.94.0" - repository: "file://../fake-aws-sqs" - condition: fake-aws-sqs.enabled,global.fake-aws-sqs.enabled - tags: - - fake-aws-sqs - - aws-mocks - - demo -- name: fake-aws-s3 - version: "0.94.0" - repository: "file://../fake-aws-s3" - condition: fake-aws-s3.enabled,global.fake-aws-s3.enabled - tags: - - fake-aws-s3 - - aws-mocks - - demo -- name: fake-aws-dynamodb - version: "0.94.0" - repository: "file://../fake-aws-dynamodb" - condition: fake-aws-dynamodb.enabled,global.fake-aws-dynamodb.enabled - tags: - - fake-aws-dynamodb - - aws-mocks - - demo -- name: fake-aws-ses - version: "0.94.0" - repository: "file://../fake-aws-ses" - condition: fake-aws-ses.enabled,global.fake-aws-ses.enabled - tags: - - fake-aws-ses - - aws-mocks - - demo diff --git a/charts/fake-aws/templates/NOTES.txt b/charts/fake-aws/templates/NOTES.txt deleted file mode 100644 index 38170b545..000000000 --- a/charts/fake-aws/templates/NOTES.txt +++ /dev/null @@ -1,30 +0,0 @@ -You can reach the fake AWS services at: - -{{- /* - -'index' is used to avoid - -[ERROR] templates/: parse error in "fake-aws/templates/NOTES.txt": template: fake-aws/templates/NOTES.txt:4: bad character U+002D '-' - -One needs to love helm chart inconsistencies like this one: https://github.com/helm/helm/issues/2192 - -*/ -}} - -{{- if index .Values "fake-aws-sns" "enabled" }} -SNS : http://fake-aws-sns:{{ index .Values "fake-aws-sns" "service" "externalPort" }} -{{- end }} -{{- if index .Values "fake-aws-sqs" "enabled" }} -SQS : http://fake-aws-sqs:{{ index .Values "fake-aws-sqs" "service" "httpPort" }} - queues: -{{ toYaml (index .Values "fake-aws-sqs" "queueNames") | indent 4 }} -{{- end }} -{{- if index .Values "fake-aws-s3" "enabled" }} -S3 : http://fake-aws-s3:9000 - bucket: {{ index .Values "fake-aws-s3" "minio" "defaultBucket" "name" }} -{{- end }} -{{- if index .Values "fake-aws-dynamodb" "enabled" }} -DYNAMODB : http://fake-aws-dynamodb:{{ index .Values "fake-aws-dynamodb" "service" "externalPort" }} -{{- end }} -{{- if index .Values "fake-aws-ses" "enabled" }} -SES : http://fake-aws-ses:{{ index .Values "fake-aws-ses" "service" "externalPort" }} -{{- end }} diff --git a/charts/fake-aws/values.yaml b/charts/fake-aws/values.yaml deleted file mode 100644 index f5626af27..000000000 --- a/charts/fake-aws/values.yaml +++ /dev/null @@ -1,14 +0,0 @@ -# Default values for fake-aws - -# Set enabled: false to not install that subchart. - -fake-aws-sqs: - enabled: true -fake-aws-sns: - enabled: true -fake-aws-s3: - enabled: true -fake-aws-dynamodb: - enabled: true -fake-aws-ses: - enabled: false diff --git a/charts/fluent-bit/Chart.yaml b/charts/fluent-bit/Chart.yaml deleted file mode 100644 index 44fd27250..000000000 --- a/charts/fluent-bit/Chart.yaml +++ /dev/null @@ -1,4 +0,0 @@ -apiVersion: v1 -description: Wrapper chart for stable/fluent-bit -name: fluent-bit -version: 0.94.0 diff --git a/charts/fluent-bit/requirements.yaml b/charts/fluent-bit/requirements.yaml deleted file mode 100644 index 52368c82f..000000000 --- a/charts/fluent-bit/requirements.yaml +++ /dev/null @@ -1,4 +0,0 @@ -dependencies: -- name: fluent-bit - version: 2.7.0 - repository: https://kubernetes-charts.storage.googleapis.com diff --git a/charts/fluent-bit/values.yaml b/charts/fluent-bit/values.yaml deleted file mode 100644 index 2a31dcf27..000000000 --- a/charts/fluent-bit/values.yaml +++ /dev/null @@ -1,11 +0,0 @@ -# See defaults in https://github.com/helm/charts/tree/master/stable/fluent-bit -fluent-bit: - backend: - type: es - es: - host: elasticsearch-ephemeral - parsers: - enabled: true - regex: - - name: nginz - regex: '^(?[^ ]*) (?[^ ]*) "(?[0-9\/a-zA-Z:]* [+][0-9]*)" "(?.*)" (?[0-9]*) (?[0-9]*) "(?[^ ])" "(?.*)" (?[^ ]*) (?[^ ]*) (?[^ ]*) (?[^ ]*) (?[^ ]*) (?[^ ]*) (?[^ ]*) (?[a-z0-9]*)' diff --git a/charts/galley/Chart.yaml b/charts/galley/Chart.yaml deleted file mode 100644 index 9852af111..000000000 --- a/charts/galley/Chart.yaml +++ /dev/null @@ -1,4 +0,0 @@ -apiVersion: v1 -description: Galley (part of Wire Server) - Conversations -name: galley -version: 0.94.0 diff --git a/charts/galley/templates/configmap.yaml b/charts/galley/templates/configmap.yaml deleted file mode 100644 index 7e0eca8df..000000000 --- a/charts/galley/templates/configmap.yaml +++ /dev/null @@ -1,55 +0,0 @@ -apiVersion: v1 -kind: ConfigMap -metadata: - name: galley -data: - {{- with .Values.config }} - galley.yaml: | - logNetStrings: True # log using netstrings encoding: - # http://cr.yp.to/proto/netstrings.txt - logLevel: {{ .logLevel }} - logFormat: {{ .logFormat }} - - galley: - host: 0.0.0.0 - port: 8080 - - cassandra: - endpoint: - host: {{ .cassandra.host }} - port: 9042 - keyspace: galley - - brig: - host: brig - port: 8080 - - gundeck: - host: gundeck - port: 8080 - - spar: - host: spar - port: 8080 - - {{- if (.journal) }} - journal: - queueName: {{ .journal.queue }} - endpoint: {{ .journal.endpoint }} - {{- end }} - - settings: - httpPoolSize: 128 - intraListing: false - maxTeamSize: {{ .settings.maxTeamSize }} - maxConvSize: {{ .settings.maxConvSize }} - {{- if .settings.maxFanoutSize }} - maxFanoutSize: {{ .settings.maxFanoutSize }} - {{- end }} - conversationCodeURI: {{ .settings.conversationCodeURI | quote }} - {{- if .settings.featureFlags }} - featureFlags: - sso: {{ .settings.featureFlags.sso }} - legalhold: {{ .settings.featureFlags.legalhold }} - {{- end }} - {{- end }} diff --git a/charts/galley/templates/deployment.yaml b/charts/galley/templates/deployment.yaml deleted file mode 100644 index 9f38c9a4d..000000000 --- a/charts/galley/templates/deployment.yaml +++ /dev/null @@ -1,92 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: galley - labels: - wireService: galley - chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} - release: {{ .Release.Name }} - heritage: {{ .Release.Service }} -spec: - replicas: {{ .Values.replicaCount }} - strategy: - type: RollingUpdate - rollingUpdate: - maxUnavailable: 0 - maxSurge: {{ .Values.replicaCount }} - selector: - matchLabels: - wireService: galley - template: - metadata: - labels: - wireService: galley - release: {{ .Release.Name }} - annotations: - # An annotation of the configmap checksum ensures changes to the configmap cause a redeployment upon `helm upgrade` - checksum/configmap: {{ include (print .Template.BasePath "/configmap.yaml") . | sha256sum }} - checksum/secret: {{ include (print .Template.BasePath "/secret.yaml") . | sha256sum }} - spec: - volumes: - - name: "galley-config" - configMap: - name: "galley" - - name: "galley-secrets" - secret: - secretName: "galley" - containers: - - name: galley - image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" - imagePullPolicy: {{ default "" .Values.imagePullPolicy | quote }} - volumeMounts: - - name: "galley-secrets" - mountPath: "/etc/wire/galley/secrets" - - name: "galley-config" - mountPath: "/etc/wire/galley/conf" - env: - - name: AWS_ACCESS_KEY_ID - valueFrom: - secretKeyRef: - name: galley - key: awsKeyId - - name: AWS_SECRET_ACCESS_KEY - valueFrom: - secretKeyRef: - name: galley - key: awsSecretKey - - name: AWS_REGION - value: "{{ .Values.config.aws.region }}" - {{- with .Values.config.proxy }} - {{- if .httpProxy }} - - name: http_proxy - value: {{ .httpProxy | quote }} - - name: HTTP_PROXY - value: {{ .httpProxy | quote }} - {{- end }} - {{- if .httpsProxy }} - - name: https_proxy - value: {{ .httpsProxy | quote }} - - name: HTTPS_PROXY - value: {{ .httpsProxy | quote }} - {{- end }} - {{- if .noProxyList }} - - name: no_proxy - value: {{ join "," .noProxyList | quote }} - - name: NO_PROXY - value: {{ join "," .noProxyList | quote }} - {{- end }} - {{- end }} - ports: - - containerPort: {{ .Values.service.internalPort }} - livenessProbe: - httpGet: - scheme: HTTP - path: /i/status - port: {{ .Values.service.internalPort }} - readinessProbe: - httpGet: - scheme: HTTP - path: /i/status - port: {{ .Values.service.internalPort }} - resources: -{{ toYaml .Values.resources | indent 12 }} diff --git a/charts/galley/templates/secret.yaml b/charts/galley/templates/secret.yaml deleted file mode 100644 index 0579bd5e4..000000000 --- a/charts/galley/templates/secret.yaml +++ /dev/null @@ -1,15 +0,0 @@ -apiVersion: v1 -kind: Secret -metadata: - name: galley - labels: - app: galley - chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" - release: "{{ .Release.Name }}" - heritage: "{{ .Release.Service }}" -type: Opaque -data: - {{- with .Values.secrets }} - awsKeyId: {{ .awsKeyId | b64enc | quote }} - awsSecretKey: {{ .awsSecretKey | b64enc | quote }} - {{- end }} diff --git a/charts/galley/templates/service.yaml b/charts/galley/templates/service.yaml deleted file mode 100644 index 805ea9a89..000000000 --- a/charts/galley/templates/service.yaml +++ /dev/null @@ -1,18 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - name: galley - labels: - wireService: galley - chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} - release: {{ .Release.Name }} - heritage: {{ .Release.Service }} -spec: - type: ClusterIP - ports: - - name: http - port: {{ .Values.service.externalPort }} - targetPort: {{ .Values.service.internalPort }} - selector: - wireService: galley - release: {{ .Release.Name }} diff --git a/charts/galley/templates/tests/configmap.yaml b/charts/galley/templates/tests/configmap.yaml deleted file mode 100644 index b55bcd448..000000000 --- a/charts/galley/templates/tests/configmap.yaml +++ /dev/null @@ -1,24 +0,0 @@ -apiVersion: v1 -kind: ConfigMap -metadata: - name: "galley-integration" -data: - integration.yaml: | - galley: - host: galley - port: {{ .Values.service.internalPort }} - - brig: - host: brig - port: 8080 - - cannon: - host: cannon - port: 8080 - - provider: - privateKey: /etc/wire/integration-secrets/provider-privatekey.pem - publicKey: /etc/wire/integration-secrets/provider-publickey.pem - cert: /etc/wire/integration-secrets/provider-publiccert.pem - botHost: https://galley-integration - botPort: 9000 diff --git a/charts/galley/templates/tests/galley-integration.yaml b/charts/galley/templates/tests/galley-integration.yaml deleted file mode 100644 index 33034050b..000000000 --- a/charts/galley/templates/tests/galley-integration.yaml +++ /dev/null @@ -1,58 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - name: "galley-integration" - labels: - wireService: galley-integration - chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} - release: {{ .Release.Name }} - heritage: {{ .Release.Service }} -spec: - type: ClusterIP - ports: - - port: 9000 - targetPort: 9000 - selector: - wireService: galley-integration - release: {{ .Release.Name }} ---- -apiVersion: v1 -kind: Pod -metadata: - name: "{{ .Release.Name }}-galley-integration" - annotations: - "helm.sh/hook": test-success - labels: - wireService: galley-integration - release: {{ .Release.Name }} -spec: - volumes: - - name: "galley-integration" - configMap: - name: "galley-integration" - - name: "galley" - configMap: - name: "galley" - - name: "galley-integration-secrets" - configMap: - name: "galley-integration-secrets" - containers: - - name: integration - image: "{{ .Values.image.repository }}-integration:{{ .Values.image.tag }}" - volumeMounts: - - name: "galley-integration" - mountPath: "/etc/wire/integration" - - name: "galley" - mountPath: "/etc/wire/galley/conf" - - name: "galley-integration-secrets" - # TODO: see corresp. TODO in brig. - mountPath: "/etc/wire/integration-secrets" - env: - # these dummy values are necessary for Amazonka's "Discover" - - name: AWS_ACCESS_KEY_ID - value: "dummy" - - name: AWS_SECRET_ACCESS_KEY - value: "dummy" - - name: AWS_REGION - value: "eu-west-1" - restartPolicy: Never diff --git a/charts/galley/templates/tests/secret.yaml b/charts/galley/templates/tests/secret.yaml deleted file mode 100644 index 74f118d1c..000000000 --- a/charts/galley/templates/tests/secret.yaml +++ /dev/null @@ -1,66 +0,0 @@ -apiVersion: v1 -kind: ConfigMap -metadata: - name: galley-integration-secrets -data: - # These "secrets" are only used in tests and are therefore safe to be stored unencrypted - provider-privatekey.pem: | - -----BEGIN RSA PRIVATE KEY----- - MIIEpAIBAAKCAQEAu+Kg/PHHU3atXrUbKnw0G06FliXcNt3lMwl2os5twEDcPPFw - /feGiAKymxp+7JqZDrseS5D9THGrW+OQRIPHWvUBdiLfGrZqJO223DB6D8K2Su/o - dmnjZJ2z23rhXoEArTplu+Dg9K+c2LVeXTKVVPOaOzgtAB21XKRiQ4ermqgi3/nj - r03rXyq/qNkuNd6tNcg+HAfGxfGvvCSYBfiSbUKr/BeArYRcjzr/h5m1In6fG/if - 9GEI6m8dxHT9JbY53wiksowy6ajCuqskIFg87X883H+LA/d6X5CTiPv1VMxXdBUi - GPuC9IT/6CNQ1/LFt0P37ax58+LGYlaFo7lanQIDAQABAoIBAQC0doVy7zgpLsBv - Sz0AnbPe1pjxEwRlntRbJSfSULySALqJvs5s4adSVGUBHX3z/LousAP1SRpCppuU - 8wrLBFgjQVlaAzyQB84EEl+lNtrG8Jrvd2es9R/4sJDkqy50+yuPN5wnzWPFIjhg - 3jP5CHDu29y0LMzsY5yjkzDe9B0bueXEZVU+guRjhpwHHKOFeAr9J9bugFUwgeAr - jF0TztzFAb0fsUNPiQAho1J5PyjSVgItaPfAPv/p30ROG+rz+Rd5NSSvBC5F+yOo - azb84zzwCg/knAfIz7SOMRrmBh2qhGZFZ8gXdq65UaYv+cpT/qo28mpAT2vOkyeD - aPZp0ysBAoGBAOQROoDipe/5BTHBcXYuUE1qa4RIj3wgql5I8igXr4K6ppYBmaOg - DL2rrnqD86chv0P4l/XOomKFwYhVGXtqRkeYnk6mQXwNVkgqcGbY5PSNyMg5+ekq - jSOOPHGzzTWKzYuUDUpB/Lf6jbTv8fq2GYW3ZYiqQ/xiugOvglZrTE7NAoGBANLl - irjByfxAWGhzCrDx0x5MBpsetadI9wUA8u1BDdymsRg73FDn3z7NipVUAMDXMGVj - lqbCRlHESO2yP4GaPEA4FM+MbTZSuhAYV+SY07mEPLHF64/nJas83Zp91r5rhaqJ - L9rWCl3KJ5OUnr3YizCnHIW72FxjwtpjxHJLupsRAoGAGIbhy8qUHeKh9F/hW9xP - NoQjW+6Rv7+jktA1eqpRbbW1BJzXcQldVWiJMxPNuEOg1iZ98SlvvTi1P3wnaWZc - eIapP7wRfs3QYaJuxCC/Pq2g0ieqALFazGAXkALOJtvujvw1Ea9XBlIjuzmyxEuh - Iwg+Gxx0g0f6yTquwax4YGECgYEAnpAK3qKFNO1ECzQDo8oNy0ep59MNDPtlDhQK - katJus5xdCD9oq7TQKrVOTTxZAvmzTQ1PqfuqueDVYOhD9Zg2n/P1cRlEGTek99Z - pfvppB/yak6+r3FA9yBKFS/r1zuMQg3nNweav62QV/tz5pT7AdeDMGFtaPlwtTYx - qyWY5aECgYBPySbPccNj+xxQzxcti2y/UXjC04RgOA/Hm1D0exa0vBqS9uxlOdG8 - F47rKenpBrslvdfTVsCDB1xyP2ebWVzp6EqMycw6OLPxgo3fBfZ4pi6P+rByh0Cc - Lhfh+ET0CPnKCxtop3lUrn4ZvqchS0j3J+M0pDuqoWF5hfKxFhkEIw== - -----END RSA PRIVATE KEY----- - provider-publickey.pem: | - -----BEGIN PUBLIC KEY----- - MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAu+Kg/PHHU3atXrUbKnw0 - G06FliXcNt3lMwl2os5twEDcPPFw/feGiAKymxp+7JqZDrseS5D9THGrW+OQRIPH - WvUBdiLfGrZqJO223DB6D8K2Su/odmnjZJ2z23rhXoEArTplu+Dg9K+c2LVeXTKV - VPOaOzgtAB21XKRiQ4ermqgi3/njr03rXyq/qNkuNd6tNcg+HAfGxfGvvCSYBfiS - bUKr/BeArYRcjzr/h5m1In6fG/if9GEI6m8dxHT9JbY53wiksowy6ajCuqskIFg8 - 7X883H+LA/d6X5CTiPv1VMxXdBUiGPuC9IT/6CNQ1/LFt0P37ax58+LGYlaFo7la - nQIDAQAB - -----END PUBLIC KEY----- - provider-publiccert.pem: | - -----BEGIN CERTIFICATE----- - MIIDdjCCAl4CCQCm0AiwERR/qjANBgkqhkiG9w0BAQsFADB9MQswCQYDVQQGEwJE - RTEPMA0GA1UECAwGQmVybGluMQ8wDQYDVQQHDAZCZXJsaW4xGDAWBgNVBAoMD1dp - cmUgU3dpc3MgR21iSDERMA8GA1UEAwwId2lyZS5jb20xHzAdBgkqhkiG9w0BCQEW - EGJhY2tlbmRAd2lyZS5jb20wHhcNMTYwODA0MTMxNDQyWhcNMzYwNzMwMTMxNDQy - WjB9MQswCQYDVQQGEwJERTEPMA0GA1UECAwGQmVybGluMQ8wDQYDVQQHDAZCZXJs - aW4xGDAWBgNVBAoMD1dpcmUgU3dpc3MgR21iSDERMA8GA1UEAwwId2lyZS5jb20x - HzAdBgkqhkiG9w0BCQEWEGJhY2tlbmRAd2lyZS5jb20wggEiMA0GCSqGSIb3DQEB - AQUAA4IBDwAwggEKAoIBAQC74qD88cdTdq1etRsqfDQbToWWJdw23eUzCXaizm3A - QNw88XD994aIArKbGn7smpkOux5LkP1Mcatb45BEg8da9QF2It8atmok7bbcMHoP - wrZK7+h2aeNknbPbeuFegQCtOmW74OD0r5zYtV5dMpVU85o7OC0AHbVcpGJDh6ua - qCLf+eOvTetfKr+o2S413q01yD4cB8bF8a+8JJgF+JJtQqv8F4CthFyPOv+HmbUi - fp8b+J/0YQjqbx3EdP0ltjnfCKSyjDLpqMK6qyQgWDztfzzcf4sD93pfkJOI+/VU - zFd0FSIY+4L0hP/oI1DX8sW3Q/ftrHnz4sZiVoWjuVqdAgMBAAEwDQYJKoZIhvcN - AQELBQADggEBAEuwlHElIGR56KVC1dJiw238mDGjMfQzSP76Wi4zWS6/zZwJUuog - BkC+vacfju8UAMvL+vdqkjOVUHor84/2wuq0qn91AjOITD7tRAZB+XLXxsikKv/v - OXE3A/lCiNi882NegPyXAfFPp/71CIiTQZps1eQkAvhD5t5WiFYPESxDlvEJrHFY - XP4+pp8fL8YPS7iZNIq+z+P8yVIw+B/Hs0ht7wFIYN0xACbU8m9+Rs08JMoT16c+ - hZMuK3BWD3fzkQVfW0yMwz6fWRXB483ZmekGkgndOTDoJQMdJXZxHpI3t2FcxQYj - T45GXxRd18neXtuYa/OoAw9UQFDN5XfXN0g= - -----END CERTIFICATE----- diff --git a/charts/galley/values.yaml b/charts/galley/values.yaml deleted file mode 100644 index a96ca39c6..000000000 --- a/charts/galley/values.yaml +++ /dev/null @@ -1,30 +0,0 @@ -replicaCount: 3 -image: - repository: quay.io/wire/galley - tag: 2.78.0 - schemaRepository: quay.io/wire/galley-schema -service: - externalPort: 8080 - internalPort: 8080 -resources: - requests: - memory: "256Mi" - cpu: "100m" - limits: - memory: "512Mi" - cpu: "500m" -config: - logLevel: Info - logFormat: JSON - cassandra: - host: aws-cassandra - replicaCount: 3 - settings: - maxTeamSize: 500 - maxConvSize: 500 - featureFlags: # see #RefConfigOptions in `/docs/reference` (https://github.com/wireapp/wire-server/) - sso: disabled-by-default - legalhold: disabled-by-default - aws: - region: "eu-west-1" - proxy: {} \ No newline at end of file diff --git a/charts/gundeck/.helmignore b/charts/gundeck/.helmignore deleted file mode 100644 index f0c131944..000000000 --- a/charts/gundeck/.helmignore +++ /dev/null @@ -1,21 +0,0 @@ -# Patterns to ignore when building packages. -# This supports shell glob matching, relative path matching, and -# negation (prefixed with !). Only one pattern per line. -.DS_Store -# Common VCS dirs -.git/ -.gitignore -.bzr/ -.bzrignore -.hg/ -.hgignore -.svn/ -# Common backup files -*.swp -*.bak -*.tmp -*~ -# Various IDEs -.project -.idea/ -*.tmproj diff --git a/charts/gundeck/Chart.yaml b/charts/gundeck/Chart.yaml deleted file mode 100644 index 7d1a07bea..000000000 --- a/charts/gundeck/Chart.yaml +++ /dev/null @@ -1,4 +0,0 @@ -apiVersion: v1 -description: Gundeck (part of Wire Server) - Push Notification Hub Service -name: gundeck -version: 0.94.0 diff --git a/charts/gundeck/README.md b/charts/gundeck/README.md deleted file mode 100644 index eafa3c5bc..000000000 --- a/charts/gundeck/README.md +++ /dev/null @@ -1,6 +0,0 @@ -Note that gundeck depends on some provisioned storage, namely: - -- cassandra-all -- redis-gundeck - -These are dealt with independently from this chart. Ensure the `config.redis.host` and `config.cassandra.host` point to valid dns names. diff --git a/charts/gundeck/templates/configmap.yaml b/charts/gundeck/templates/configmap.yaml deleted file mode 100644 index 804257472..000000000 --- a/charts/gundeck/templates/configmap.yaml +++ /dev/null @@ -1,47 +0,0 @@ -apiVersion: v1 -kind: ConfigMap -metadata: - name: "gundeck" -data: - {{- with .Values.config }} - gundeck.yaml: | - logNetStrings: True # log using netstrings encoding: http://cr.yp.to/proto/netstrings.txt - logLevel: {{ .logLevel }} - - gundeck: - host: 0.0.0.0 - port: {{ $.Values.service.internalPort }} - - cassandra: - endpoint: - host: {{ .cassandra.host }} - port: 9042 - keyspace: gundeck - - redis: - host: {{ .redis.host }} - port: {{ .redis.port }} - - # Gundeck uses discovery for AWS access key / secrets - # For more details, check amazonka's documentation at: - # https://hackage.haskell.org/package/amazonka-1.4.5/docs/Network-AWS.html#t:Credentials - # later we should have a look at https://github.com/jtblin/kube2iam - {{- with .aws }} - aws: - queueName: {{ .queueName }} - region: {{ .region }} - account: {{ .account | quote }} - arnEnv: {{ .arnEnv }} - sqsEndpoint: {{ .sqsEndpoint | quote }} - snsEndpoint: {{ .snsEndpoint | quote }} - connectionLimit: 256 - {{- end }} - - settings: - httpPoolSize: 1024 - notificationTTL: 2419200 - bulkPush: {{ .bulkPush }} - maxConcurrentNativePushes: - soft: 1000 - # hard: 30 # more than this number of threads will not be allowed - {{- end }} diff --git a/charts/gundeck/templates/deployment.yaml b/charts/gundeck/templates/deployment.yaml deleted file mode 100644 index 6bf4104a4..000000000 --- a/charts/gundeck/templates/deployment.yaml +++ /dev/null @@ -1,92 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: gundeck - labels: - wireService: gundeck - chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} - release: {{ .Release.Name }} - heritage: {{ .Release.Service }} -spec: - replicas: {{ .Values.replicaCount }} - strategy: - type: RollingUpdate - rollingUpdate: - maxUnavailable: 0 - maxSurge: {{ .Values.replicaCount }} - selector: - matchLabels: - wireService: gundeck - template: - metadata: - labels: - wireService: gundeck - release: {{ .Release.Name }} - annotations: - # An annotation of the configmap checksum ensures changes to the configmap cause a redeployment upon `helm upgrade` - checksum/configmap: {{ include (print .Template.BasePath "/configmap.yaml") . | sha256sum }} - checksum/secret: {{ include (print .Template.BasePath "/secret.yaml") . | sha256sum }} - spec: - volumes: - - name: "gundeck-config" - configMap: - name: "gundeck" - - name: "gundeck-secrets" - secret: - secretName: "gundeck" - containers: - - name: gundeck - image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" - imagePullPolicy: {{ default "" .Values.imagePullPolicy | quote }} - volumeMounts: - - name: "gundeck-secrets" - mountPath: "/etc/wire/gundeck/secrets" - - name: "gundeck-config" - mountPath: "/etc/wire/gundeck/conf" - env: - - name: AWS_ACCESS_KEY_ID - valueFrom: - secretKeyRef: - name: gundeck - key: awsKeyId - - name: AWS_SECRET_ACCESS_KEY - valueFrom: - secretKeyRef: - name: gundeck - key: awsSecretKey - - name: AWS_REGION - value: "{{ .Values.config.aws.region }}" - {{- with .Values.config.proxy }} - {{- if .httpProxy }} - - name: http_proxy - value: {{ .httpProxy | quote }} - - name: HTTP_PROXY - value: {{ .httpProxy | quote }} - {{- end }} - {{- if .httpsProxy }} - - name: https_proxy - value: {{ .httpsProxy | quote }} - - name: HTTPS_PROXY - value: {{ .httpsProxy | quote }} - {{- end }} - {{- if .noProxyList }} - - name: no_proxy - value: {{ join "," .noProxyList | quote }} - - name: NO_PROXY - value: {{ join "," .noProxyList | quote }} - {{- end }} - {{- end }} - ports: - - containerPort: {{ .Values.service.internalPort }} - livenessProbe: - httpGet: - scheme: HTTP - path: /i/status - port: {{ .Values.service.internalPort }} - readinessProbe: - httpGet: - scheme: HTTP - path: /i/status - port: {{ .Values.service.internalPort }} - resources: -{{ toYaml .Values.resources | indent 12 }} diff --git a/charts/gundeck/templates/secret.yaml b/charts/gundeck/templates/secret.yaml deleted file mode 100644 index 2a90daca9..000000000 --- a/charts/gundeck/templates/secret.yaml +++ /dev/null @@ -1,15 +0,0 @@ -apiVersion: v1 -kind: Secret -metadata: - name: gundeck - labels: - wireService: gundeck - chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" - release: "{{ .Release.Name }}" - heritage: "{{ .Release.Service }}" -type: Opaque -data: - {{- with .Values.secrets }} - awsKeyId: {{ .awsKeyId | b64enc | quote }} - awsSecretKey: {{ .awsSecretKey | b64enc | quote }} - {{- end }} diff --git a/charts/gundeck/templates/service.yaml b/charts/gundeck/templates/service.yaml deleted file mode 100644 index 14921fc65..000000000 --- a/charts/gundeck/templates/service.yaml +++ /dev/null @@ -1,18 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - name: gundeck - labels: - wireService: gundeck - chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} - release: {{ .Release.Name }} - heritage: {{ .Release.Service }} -spec: - type: ClusterIP - ports: - - name: http - port: {{ .Values.service.externalPort }} - targetPort: {{ .Values.service.internalPort }} - selector: - wireService: gundeck - release: {{ .Release.Name }} diff --git a/charts/gundeck/templates/tests/configmap.yaml b/charts/gundeck/templates/tests/configmap.yaml deleted file mode 100644 index ccb551e5f..000000000 --- a/charts/gundeck/templates/tests/configmap.yaml +++ /dev/null @@ -1,37 +0,0 @@ -apiVersion: v1 -kind: ConfigMap -metadata: - name: "gundeck-integration" -data: - integration.yaml: | - gundeck: - host: gundeck - port: {{ .Values.service.internalPort }} - - cannon: - host: cannon - port: 8080 - - cannon2: - # some gundeck integration tests make use of two different - # cannon instances to test the distributed case. when running - # the integration tests locally, the two instances will be spun - # up separately (see `wire-server/services/integration.sh`). - # - # here, we spin up two replicas, provide the integration tests - # with the same service coordinates, and rely on the k8s load - # balancer to give us both replicas in at least some cases. - # this would be enough to make the test meaningful. - # - # alternatively (eg. if we have more involved integration tests - # that expect ca and ca2 to deterministically refer to specific - # replicas), we could enter the IP addresses of the replicas - # here. those are available from `kubectl get endpoints`, but - # how do we inject that info into the config file on time for - # the gundeck integration tests to find them there? - host: cannon - port: 8080 - - brig: - host: brig - port: 8080 diff --git a/charts/gundeck/templates/tests/gundeck-integration.yaml b/charts/gundeck/templates/tests/gundeck-integration.yaml deleted file mode 100644 index 8424fd377..000000000 --- a/charts/gundeck/templates/tests/gundeck-integration.yaml +++ /dev/null @@ -1,34 +0,0 @@ -apiVersion: v1 -kind: Pod -metadata: - name: "{{ .Release.Name }}-gundeck-integration" - annotations: - "helm.sh/hook": test-success -spec: - volumes: - - name: "gundeck-integration" - configMap: - name: "gundeck-integration" - # Used to read some values from the gundeck service - - name: "gundeck-config" - configMap: - name: "gundeck" - containers: - - name: integration - # TODO: When deployed to staging (or real AWS env), _all_ tests should be run - command: ["gundeck-integration", "--pattern", "!/RealAWS/"] - image: "{{ .Values.image.repository }}-integration:{{ .Values.image.tag }}" - volumeMounts: - - name: "gundeck-integration" - mountPath: "/etc/wire/integration" - - name: "gundeck-config" - mountPath: "/etc/wire/gundeck/conf" - env: - # these dummy values are necessary for Amazonka's "Discover" - - name: AWS_ACCESS_KEY_ID - value: "dummy" - - name: AWS_SECRET_ACCESS_KEY - value: "dummy" - - name: AWS_REGION - value: "eu-west-1" - restartPolicy: Never diff --git a/charts/gundeck/values.yaml b/charts/gundeck/values.yaml deleted file mode 100644 index b638f6a66..000000000 --- a/charts/gundeck/values.yaml +++ /dev/null @@ -1,25 +0,0 @@ -replicaCount: 3 -image: - repository: quay.io/wire/gundeck - tag: 2.78.0 -service: - externalPort: 8080 - internalPort: 8080 -resources: - requests: - memory: "256Mi" - cpu: "100m" - limits: - memory: "512Mi" - cpu: "500m" -config: - logLevel: Info - cassandra: - host: aws-cassandra - redis: - host: redis-ephemeral - port: 6379 - bulkPush: false - aws: - region: "eu-west-1" - proxy: {} \ No newline at end of file diff --git a/charts/kibana/Chart.yaml b/charts/kibana/Chart.yaml deleted file mode 100644 index d7470a575..000000000 --- a/charts/kibana/Chart.yaml +++ /dev/null @@ -1,4 +0,0 @@ -apiVersion: v1 -description: Wrapper chart for stable/kibana -name: kibana -version: 0.94.0 diff --git a/charts/kibana/requirements.yaml b/charts/kibana/requirements.yaml deleted file mode 100644 index 6ad2ebe5d..000000000 --- a/charts/kibana/requirements.yaml +++ /dev/null @@ -1,4 +0,0 @@ -dependencies: -- name: kibana - version: 2.2.0 - repository: https://kubernetes-charts.storage.googleapis.com diff --git a/charts/kibana/values.yaml b/charts/kibana/values.yaml deleted file mode 100644 index 86bd5feae..000000000 --- a/charts/kibana/values.yaml +++ /dev/null @@ -1,11 +0,0 @@ -# See defaults in https://github.com/helm/charts/tree/master/stable/kibana -kibana: - env: - # All Kibana configuration options are adjustable via env vars. - # To adjust a config option to an env var uppercase + replace `.` with `_` - # Ref: https://www.elastic.co/guide/en/kibana/current/settings.html - # - ELASTICSEARCH_URL: http://elasticsearch-ephemeral:9200 - files: - kibana.yml: - elasticsearch.url: http://elasticsearch-ephemeral:9200 diff --git a/charts/metallb/.helmignore b/charts/metallb/.helmignore deleted file mode 100644 index f0c131944..000000000 --- a/charts/metallb/.helmignore +++ /dev/null @@ -1,21 +0,0 @@ -# Patterns to ignore when building packages. -# This supports shell glob matching, relative path matching, and -# negation (prefixed with !). Only one pattern per line. -.DS_Store -# Common VCS dirs -.git/ -.gitignore -.bzr/ -.bzrignore -.hg/ -.hgignore -.svn/ -# Common backup files -*.swp -*.bak -*.tmp -*~ -# Various IDEs -.project -.idea/ -*.tmproj diff --git a/charts/metallb/Chart.yaml b/charts/metallb/Chart.yaml deleted file mode 100644 index 16df782c7..000000000 --- a/charts/metallb/Chart.yaml +++ /dev/null @@ -1,4 +0,0 @@ -apiVersion: v1 -description: A Helm chart for metallb on Kubernetes -name: metallb -version: 0.94.0 diff --git a/charts/metallb/README.md b/charts/metallb/README.md deleted file mode 100644 index cdc42403c..000000000 --- a/charts/metallb/README.md +++ /dev/null @@ -1,10 +0,0 @@ -# IMPORTANT -# -# Make sure you have a SINGLE metallb instance across the whole cluster, do not create multiple of these -# Have a read at the warning here: https://metallb.universe.tf/installation/ -# -# You only need to adjust values to the available IP address range and run -# -# helm upgrade --install --namespace metallb-system metallb charts/metallb \ -# -f values/metallb/values.yaml --wait --timeout 1800 -# diff --git a/charts/metallb/requirements.yaml b/charts/metallb/requirements.yaml deleted file mode 100644 index db29ea58c..000000000 --- a/charts/metallb/requirements.yaml +++ /dev/null @@ -1,4 +0,0 @@ -dependencies: -- name: metallb - version: 0.8.0 - repository: https://kubernetes-charts.storage.googleapis.com diff --git a/charts/metallb/templates/configmap.yaml b/charts/metallb/templates/configmap.yaml deleted file mode 100644 index 60a306d83..000000000 --- a/charts/metallb/templates/configmap.yaml +++ /dev/null @@ -1,14 +0,0 @@ -apiVersion: v1 -kind: ConfigMap -metadata: - namespace: metallb-system - name: metallb-config -data: - config: | - address-pools: - - name: my-ip-space - protocol: layer2 - addresses: - {{- range .Values.cidrAddresses }} - - {{ . }} - {{- end }} diff --git a/charts/metallb/values.yaml b/charts/metallb/values.yaml deleted file mode 100644 index b130d6223..000000000 --- a/charts/metallb/values.yaml +++ /dev/null @@ -1,7 +0,0 @@ -# Adjust here the IP addresses that the LB can manage -# Note that there is no "validation" of these addresses -# so if you can bind to such addresses your services -# will be allocated bogus external IP addresses - -# cidrAddresses: -# - 10.0.0.0/32 diff --git a/charts/minio-external/Chart.yaml b/charts/minio-external/Chart.yaml deleted file mode 100644 index f10a35a8c..000000000 --- a/charts/minio-external/Chart.yaml +++ /dev/null @@ -1,4 +0,0 @@ -apiVersion: v1 -description: Refer to minio IPs located outside kubernetes by specifying IPs manually -name: minio-external -version: 0.94.0 diff --git a/charts/minio-external/templates/endpoint.yaml b/charts/minio-external/templates/endpoint.yaml deleted file mode 100644 index 5cd7a1307..000000000 --- a/charts/minio-external/templates/endpoint.yaml +++ /dev/null @@ -1,38 +0,0 @@ -# create a headless service (thus creating dns name "minio-external") -# and a custom endpoint (thus forwarding traffic when resolving DNS to custom IPs) -kind: Service -apiVersion: v1 -metadata: - name: {{ .Chart.Name }} - labels: - app: {{ .Chart.Name }} - chart: {{ template "minio-external.chart" . }} - release: {{ .Release.Name }} - heritage: {{ .Release.Service }} -spec: - type: ClusterIP - clusterIP: None # create a headless service, we want no extra load balancing for minio - ports: - - name: minio - port: {{ .Values.portHttp }} - targetPort: {{ .Values.portHttp }} ---- -kind: Endpoints -apiVersion: v1 -metadata: - name: {{ .Chart.Name }} - labels: - app: {{ .Chart.Name }} - chart: {{ template "minio-external.chart" . }} - release: {{ .Release.Name }} - heritage: {{ .Release.Service }} -subsets: - - addresses: - {{- range .Values.IPs }} - - ip: {{ . }} - {{- end }} - ports: - # port and name in the endpoint must match port and name in the service - # see also https://docs.openshift.com/enterprise/3.0/dev_guide/integrating_external_services.html - - name: minio - port: {{ .Values.portHttp }} diff --git a/charts/minio-external/templates/helpers.tpl b/charts/minio-external/templates/helpers.tpl deleted file mode 100644 index f2870cc05..000000000 --- a/charts/minio-external/templates/helpers.tpl +++ /dev/null @@ -1,11 +0,0 @@ -{{- define "minio-external.fullname" -}} -{{- $name := default .Chart.Name .Values.nameOverride -}} -{{- printf "%s" $name | trunc 63 | trimSuffix "-" -}} -{{- end -}} - -{{/* -Create chart name and version as used by the chart label. -*/}} -{{- define "minio-external.chart" -}} -{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} -{{- end -}} diff --git a/charts/minio-external/values.yaml b/charts/minio-external/values.yaml deleted file mode 100644 index ab3719884..000000000 --- a/charts/minio-external/values.yaml +++ /dev/null @@ -1,6 +0,0 @@ -portHttp: 9000 - -## Configure this helm chart with: -# IPs: -# - 1.2.3.4 -# - 5.6.7.8 diff --git a/charts/nginx-ingress-controller/.helmignore b/charts/nginx-ingress-controller/.helmignore deleted file mode 100644 index f0c131944..000000000 --- a/charts/nginx-ingress-controller/.helmignore +++ /dev/null @@ -1,21 +0,0 @@ -# Patterns to ignore when building packages. -# This supports shell glob matching, relative path matching, and -# negation (prefixed with !). Only one pattern per line. -.DS_Store -# Common VCS dirs -.git/ -.gitignore -.bzr/ -.bzrignore -.hg/ -.hgignore -.svn/ -# Common backup files -*.swp -*.bak -*.tmp -*~ -# Various IDEs -.project -.idea/ -*.tmproj diff --git a/charts/nginx-ingress-controller/Chart.yaml b/charts/nginx-ingress-controller/Chart.yaml deleted file mode 100644 index bb67da0c1..000000000 --- a/charts/nginx-ingress-controller/Chart.yaml +++ /dev/null @@ -1,4 +0,0 @@ -apiVersion: v1 -description: A Helm chart for an ingress controller (using nginx) on Kubernetes -name: nginx-ingress-controller -version: 0.94.0 diff --git a/charts/nginx-ingress-controller/README.md b/charts/nginx-ingress-controller/README.md deleted file mode 100644 index 11a850804..000000000 --- a/charts/nginx-ingress-controller/README.md +++ /dev/null @@ -1,5 +0,0 @@ -This deploys a single ingress controller - ideally, you want this on a separate, shared namespace since controllers listen on all namespaces by default (you can also modify that but it's generally discouraged). - -It is mostly a wrapper of the [nginx-ingress](https://github.com/helm/charts/blob/master/stable/nginx-ingress/README.md) with some other defaults that make sense for our use case(s). - -For more options, have a look at [nginx-ingress](https://github.com/helm/charts/blob/master/stable/nginx-ingress/README.md) and other overrides that may be useful for your use case. diff --git a/charts/nginx-ingress-controller/requirements.yaml b/charts/nginx-ingress-controller/requirements.yaml deleted file mode 100644 index 280bf1ca7..000000000 --- a/charts/nginx-ingress-controller/requirements.yaml +++ /dev/null @@ -1,4 +0,0 @@ -dependencies: -- name: nginx-ingress - version: 1.33.3 - repository: https://kubernetes-charts.storage.googleapis.com diff --git a/charts/nginx-ingress-controller/templates/_helpers.tpl b/charts/nginx-ingress-controller/templates/_helpers.tpl deleted file mode 100644 index f51004df8..000000000 --- a/charts/nginx-ingress-controller/templates/_helpers.tpl +++ /dev/null @@ -1,16 +0,0 @@ -{{/* vim: set filetype=mustache: */}} -{{/* -Expand the name of the chart. -*/}} -{{- define "nginx-ingress-controller.name" -}} -{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} -{{- end -}} - -{{/* -Create a default fully qualified app name. -We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). -*/}} -{{- define "nginx-ingress-controller.fullname" -}} -{{- $name := default .Chart.Name .Values.nameOverride -}} -{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} -{{- end -}} diff --git a/charts/nginx-ingress-controller/values.yaml b/charts/nginx-ingress-controller/values.yaml deleted file mode 100644 index ce3178cbf..000000000 --- a/charts/nginx-ingress-controller/values.yaml +++ /dev/null @@ -1,89 +0,0 @@ -# Default values for nginx-ingress-controller - -nginx-ingress: - controller: - config: - # NOTE: These are some sane defaults, you may want to overrride them on your own installation - ssl-ciphers: "ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-SHA384:ECDHE-ECDSA-AES128-SHA256" - http2-max-field-size: 16k - http2-max-header-size: 32k - proxy-buffer-size: 16k - proxy-body-size: 1024m -# Normally, NodePort will listen to traffic on all nodes, and uses kube-proxy -# to redirect to the node that actually runs nginx-ingress-controller. However -# one problem with this is that this traffic is NAT'ed. This means that nginx -# will not have access to the source IP address from which the request -# originated. We want to have this source IP address for potentially logging -# and rate-limiting based on it. By setting externalTrafficPolicy: local, -# nodes will no longer forward requests to other nodes if they receive a -# request that they themselves can not handle. Upside is that the traffic is -# now not NAT'ed anymore, and we get access to the source IP address. Downside -# is that you need to know beforehand which nodes run a certain pod. However, -# with kubernetes a pod can be rescheduled to any node at any time so we can -# not trust this. We could do something with node affinities to decide apriori -# on what set of nodes will be publicly reachable and make sure the nginx -# controller pods are only ran on there but for now that sounds a bit overkill. -# Instead, we just simply run the ingress controller on each node using a -# daemonset. This means that any node in the cluster can receive requests and -# redirect them to the correct service, whilst maintaining the source ip -# address. The ingress controller is sort of taking over the role of what -# kube-proxy was doing before. -# More information: -# https://kubernetes.io/docs/tutorials/services/source-ip/#source-ip-for-services-with-typenodeport -# https://kubernetes.github.io/ingress-nginx/deploy/baremetal/ -# -# There are also downsides to setting externalTrafficPolicy: Local -# Please look at the following blog post, which very clearly explains the upsides and -# downsides of this setting -# https://www.asykim.com/blog/deep-dive-into-kubernetes-external-traffic-policies - kind: DaemonSet - # By default, each node will now be configured to accept ingress traffic. You should add - # all the nodes to your external load balancer, or add them to DNS records. - # - # You can also decide to only run the nginx controller on a subset of workers - # nodes in your cluster. This is especially useful in large clusters, where you - # probably don't want all workers nodes reachable from the internet or behind - # your company's loadbalancer. Instead you probably have a subset of nodes - # that are able to receive traffic from the outside world. - # - # You can set node labels in an ad-hoc way with kubectl: - # $ kubectl label nodes mynode wire.com/role=ingress - # - # Or in ansible you can set the `node_labels` variable in a declarative way - # https://github.com/kubernetes-sigs/kubespray/blob/master/docs/vars.md#other-service-variables - # - # In your inventory file you could for example set: - # - # [ingress] - # mynode1 - # mynode2 - # - # [ingress:vars] - # node_labels = "wire.com/role=ingress" - # - # If you have labelled the nodes that are fit for receiving ingress, you - # can uncomment the following nodeSelector to make sure that only those - # nodes actually have the ingress controller running: - # - # nodeSelector: - # wire.com/role: ingress - service: - # If your kubernetes installation has support for LoadBalancers, set - # type: LoadBalancer - # - # This will then automatically add and remove health / unhealthy nodes - # from the set of servers that should receive traffic, and if you would - # add a new node to the cluster, it will automatically be added to the - # LoadBalancer. If you set it to NodePort, then you need to manually add - # the node IP addresses to the external load balancer when you add nodes, - # and remove them when you remove nodes - type: NodePort # or LoadBalancer - externalTrafficPolicy: Local - nodePorts: - # The nginx instance is exposed on ports 31773 (https) and 31772 (http) - # on the node on which it runs. You should add a port-forwarding rule - # on the node or on the loadbalancer that forwards ports 443 and 80 to - # these respective ports. See ansible/iptables.yml how to do this with - # ansible and Iptables - https: 31773 - http: 31772 diff --git a/charts/nginx-ingress-services/.helmignore b/charts/nginx-ingress-services/.helmignore deleted file mode 100644 index f0c131944..000000000 --- a/charts/nginx-ingress-services/.helmignore +++ /dev/null @@ -1,21 +0,0 @@ -# Patterns to ignore when building packages. -# This supports shell glob matching, relative path matching, and -# negation (prefixed with !). Only one pattern per line. -.DS_Store -# Common VCS dirs -.git/ -.gitignore -.bzr/ -.bzrignore -.hg/ -.hgignore -.svn/ -# Common backup files -*.swp -*.bak -*.tmp -*~ -# Various IDEs -.project -.idea/ -*.tmproj diff --git a/charts/nginx-ingress-services/Chart.yaml b/charts/nginx-ingress-services/Chart.yaml deleted file mode 100644 index c67667835..000000000 --- a/charts/nginx-ingress-services/Chart.yaml +++ /dev/null @@ -1,4 +0,0 @@ -apiVersion: v1 -description: A Helm chart for ingresses and services on Kubernetes -name: nginx-ingress-services -version: 0.94.0 diff --git a/charts/nginx-ingress-services/README.md b/charts/nginx-ingress-services/README.md deleted file mode 100644 index 565aab0ff..000000000 --- a/charts/nginx-ingress-services/README.md +++ /dev/null @@ -1,29 +0,0 @@ -This helm chart is a helper to set up needed services, ingresses and (likely) secrets to access your cluster. -It will _NOT_ deploy an ingress controller! Ensure you already have one on your cluster - or have a look at our [nginx-ingress-controller](../nginx-ingress-controller/README.md) - -If tls.enabled == true, then you need to supply 2 variables, `tlsWildcardCert` and `tlsWildcardKey` that could either be supplied as plain text in the form of a `-f path/to/secrets.yaml`, like this: - -``` -secrets: - tlsWildcardCert: | - -----BEGIN CERTIFICATE----- - ... (Your Primary SSL certificate) ... - -----END CERTIFICATE----- - -----BEGIN CERTIFICATE----- - ... (Your Intermediate certificate) ... - -----END CERTIFICATE----- - tlsWildcardKey: | - -----BEGIN PRIVATE KEY----- - ... - -----END PRIVATE KEY----- -``` - -or encrypted with `sops` and then use `helm-wrapper`. - -Have a look at the [values file](values.yaml) for different configuration options. - -# Common issues - -Q: My ingress keeps serving "Kubernetes Ingress Controller Fake Certificate"!! - -A: Ensure that your certificate is _valid_ and has _not expired_; trying to serve expired certificates will silently fail and the nginx ingress will simply fallback to the default certificate. diff --git a/charts/nginx-ingress-services/templates/_helpers.tpl b/charts/nginx-ingress-services/templates/_helpers.tpl deleted file mode 100644 index b2d2a0925..000000000 --- a/charts/nginx-ingress-services/templates/_helpers.tpl +++ /dev/null @@ -1,16 +0,0 @@ -{{/* vim: set filetype=mustache: */}} -{{/* -Expand the name of the chart. -*/}} -{{- define "nginx-ingress-services.name" -}} -{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} -{{- end -}} - -{{/* -Create a default fully qualified app name. -We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). -*/}} -{{- define "nginx-ingress-services.fullname" -}} -{{- $name := default .Chart.Name .Values.nameOverride -}} -{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} -{{- end -}} diff --git a/charts/nginx-ingress-services/templates/ingress.yaml b/charts/nginx-ingress-services/templates/ingress.yaml deleted file mode 100644 index 240fd9f41..000000000 --- a/charts/nginx-ingress-services/templates/ingress.yaml +++ /dev/null @@ -1,71 +0,0 @@ -apiVersion: extensions/v1beta1 -kind: Ingress -metadata: - name: nginx-ingress - annotations: - kubernetes.io/ingress.class: "nginx" -spec: - # This assumes you have created the given cert - # https://github.com/kubernetes/ingress-nginx/blob/master/docs/examples/PREREQUISITES.md#tls-certificates -{{- if .Values.tls.enabled }} - tls: - - hosts: - - {{ .Values.config.dns.https }} - - {{ .Values.config.dns.ssl }} - - {{ .Values.config.dns.webapp }} - - {{ .Values.config.dns.fakeS3 }} -{{- if .Values.teamSettings.enabled }} - - {{ .Values.config.dns.teamSettings }} -{{- end }} -{{- if .Values.accountPages.enabled }} - - {{ .Values.config.dns.accountPages }} -{{- end }} - secretName: nginx-ingress-services-wildcard-tls-certificate -{{- end }} - rules: - - host: {{ .Values.config.dns.https }} - http: - paths: - - path: / - backend: - serviceName: nginz-http - servicePort: {{ .Values.service.nginz.externalHttpPort }} - - host: {{ .Values.config.dns.ssl }} - http: - paths: - - path: / - backend: - serviceName: nginz-tcp - servicePort: {{ .Values.service.nginz.externalTcpPort }} - - host: {{ .Values.config.dns.webapp }} - http: - paths: - - path: / - backend: - serviceName: webapp-http - servicePort: {{ .Values.service.webapp.externalPort }} - - host: {{ .Values.config.dns.fakeS3 }} - http: - paths: - - path: / - backend: - serviceName: {{ .Values.service.s3.serviceName }} - servicePort: {{ .Values.service.s3.externalPort }} -{{- if .Values.teamSettings.enabled }} - - host: {{ .Values.config.dns.teamSettings }} - http: - paths: - - path: / - backend: - serviceName: team-settings-http - servicePort: {{ .Values.service.teamSettings.externalPort }} -{{- end }} -{{- if .Values.accountPages.enabled }} - - host: {{ .Values.config.dns.accountPages }} - http: - paths: - - path: / - backend: - serviceName: account-pages-http - servicePort: {{ .Values.service.accountPages.externalPort }} -{{- end }} diff --git a/charts/nginx-ingress-services/templates/secret.yaml b/charts/nginx-ingress-services/templates/secret.yaml deleted file mode 100644 index 0950c2157..000000000 --- a/charts/nginx-ingress-services/templates/secret.yaml +++ /dev/null @@ -1,17 +0,0 @@ -apiVersion: v1 -kind: Secret -metadata: - name: nginx-ingress-services-wildcard-tls-certificate - labels: - chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" - release: "{{ .Release.Name }}" - heritage: "{{ .Release.Service }}" -type: kubernetes.io/tls -data: - {{/* for_helm_linting is necessary only since the 'with' block below does not throw an error upon an empty .Values.secrets */}} - for_helm_linting: {{ required "No .secrets found in configuration. Did you forget to helm -f path/to/secrets.yaml ?" .Values.secrets | quote | b64enc | quote }} - - {{- with .Values.secrets }} - tls.crt: {{ .tlsWildcardCert | b64enc | quote }} - tls.key: {{ .tlsWildcardKey | b64enc | quote }} - {{- end }} diff --git a/charts/nginx-ingress-services/templates/service.yaml b/charts/nginx-ingress-services/templates/service.yaml deleted file mode 100644 index 41373505f..000000000 --- a/charts/nginx-ingress-services/templates/service.yaml +++ /dev/null @@ -1,77 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - name: nginz-http -spec: - type: ClusterIP - ports: - - port: {{ .Values.service.nginz.externalHttpPort }} - targetPort: 8080 - selector: - wireService: nginz ---- -apiVersion: v1 -kind: Service -metadata: - name: nginz-tcp -spec: - type: ClusterIP - ports: - - port: {{ .Values.service.nginz.externalTcpPort }} - targetPort: 8081 - selector: - wireService: nginz ---- -apiVersion: v1 -kind: Service -metadata: - name: webapp-http -spec: - type: ClusterIP - ports: - - port: {{ .Values.service.webapp.externalPort }} - targetPort: 8080 - selector: - wireService: webapp -{{- if not .Values.service.s3.externallyCreated }} ---- -apiVersion: v1 -kind: Service -metadata: - name: s3-http -spec: - type: ClusterIP - ports: - - port: {{ .Values.service.s3.externalPort }} - targetPort: 9000 - selector: - wireService: {{ .Values.service.s3.serviceName }} -{{- end }} -{{- if .Values.teamSettings.enabled }} ---- -apiVersion: v1 -kind: Service -metadata: - name: team-settings-http -spec: - type: ClusterIP - ports: - - port: {{ .Values.service.teamSettings.externalPort }} - targetPort: 8080 - selector: - wireService: team-settings -{{- end }} -{{- if .Values.accountPages.enabled }} ---- -apiVersion: v1 -kind: Service -metadata: - name: account-pages-http -spec: - type: ClusterIP - ports: - - port: {{ .Values.service.accountPages.externalPort }} - targetPort: 8080 - selector: - wireService: account-pages -{{- end }} diff --git a/charts/nginx-ingress-services/values.yaml b/charts/nginx-ingress-services/values.yaml deleted file mode 100644 index ce135a5af..000000000 --- a/charts/nginx-ingress-services/values.yaml +++ /dev/null @@ -1,67 +0,0 @@ -# Default values for nginx-ingress-services - -# Team settings is disabled by default since it requires access to a private repo. -teamSettings: - enabled: false -# Account pages may be useful to enable password reset or email validation done after the initial registration -accountPages: - enabled: false - -# If you want to use TLS termination on the ingress, -# then set this variable to true and ensure that there -# is a valid wildcard TLS certificate -# When TLS is enable, ensure that you have the certificate -# and secret in a file under helm_vars/bare/secret.yaml (or -# another location) since enabling TLS requires you to supply -# a `tlsWildcardCert` and `tlsWildcardKey` (these will then be -# 64 encoded as part of the secret creation) so those values -# should simply contain the certificate and key (you can also -# encrypt them with sops and use helm-wrapper, check README.md -# for an example) -tls: - enabled: true - -service: - nginz: - externalHttpPort: 8080 - externalTcpPort: 8081 - webapp: - externalPort: 8080 - s3: - externalPort: 9000 - serviceName: fake-aws-s3 - externallyCreated: false # See note below - teamSettings: - externalPort: 8080 - accountPages: - externalPort: 8080 - -# You will need to supply some DNS names, namely -# config: -# dns: -# https: nginz-https. -# ssl: nginz-ssl. -# webapp: webapp. -# fakeS3: assets. -# teamSettings: teams. -# ^ teamSettings is ignored unless teamSettings.enabled == true -# accountPages: account. -# ^ accountPages is ignored unless accountPages.enabled == true -# For TLS -# secrets: -# tlsWildcardCert: | -# -----BEGIN CERTIFICATE----- -# -----END CERTIFICATE----- -# tlsWildcardKey: | -# -----BEGIN PRIVATE KEY----- -# -----END PRIVATE KEY----- -# -# For Services: -# service: -# s3: -# externallyCreated: true -# ^ externallyCreated might be useful if S3 access is provided by -# an external service such as `minio-external`: in such cases -# we do not want to create yet another service here but rather -# use that service instead in the ingress -# serviceName: minio-external diff --git a/charts/nginz/.helmignore b/charts/nginz/.helmignore deleted file mode 100644 index f0c131944..000000000 --- a/charts/nginz/.helmignore +++ /dev/null @@ -1,21 +0,0 @@ -# Patterns to ignore when building packages. -# This supports shell glob matching, relative path matching, and -# negation (prefixed with !). Only one pattern per line. -.DS_Store -# Common VCS dirs -.git/ -.gitignore -.bzr/ -.bzrignore -.hg/ -.hgignore -.svn/ -# Common backup files -*.swp -*.bak -*.tmp -*~ -# Various IDEs -.project -.idea/ -*.tmproj diff --git a/charts/nginz/Chart.yaml b/charts/nginz/Chart.yaml deleted file mode 100644 index dfc64bab5..000000000 --- a/charts/nginz/Chart.yaml +++ /dev/null @@ -1,4 +0,0 @@ -apiVersion: v1 -description: A Helm chart for nginz in Kubernetes -name: nginz -version: 0.94.0 diff --git a/charts/nginz/README.md b/charts/nginz/README.md deleted file mode 100644 index 20a399f40..000000000 --- a/charts/nginz/README.md +++ /dev/null @@ -1,24 +0,0 @@ -# nginz chart - -Deploys [nginz - nginx with authentication module](https://github.com/wireapp/wire-server/services/nginz), along with a little sidecar container - -## Configuring zauth - -* Public keys must match the public/private keys as used in the `brig` chart. - -TODO more documentation/links - -## Configuring basic_auth - -See also https://nginx.org/en/docs/http/ngx_http_auth_basic_module.html - -This only needs to be done when you wish to bypass normal authentication for some specific, otherwise not-exposed endpoints (e.g. `/i/users/activation-code` - see values.yaml in this chart) in test environments for performing automated end-to-end tests. - -* set `nginx_conf.env` to `staging` (and ensure this environment does not have any production traffic) -* `htpasswd -cb myfile.txt myuser mypassword && cat myfile.txt` (`htpasswd` is from `httpd-tools` or `apache-utils`) generates a hashed user:password line which you can pass to the nginz chart under `nginz.secrets.basicAuth` (see also wire-server-deploy/values/wire-server/secrets.yaml ) -* generate the base64 value of the original user:password (*not* of the myfile contents): `echo ':' | base64` -* deploy and try a request by passing a header `Authorization: Basic ` - -## Sidecar container nginz-disco - -Due to nginx not supporting DNS names for its list of upstream servers (unless you pay extra), the [nginz-disco](https://github.com/wireapp/wire-server/tree/develop/tools/nginz_disco) container is a simple bash script to do DNS lookups and write the resulting IPs to a file. Nginz reloads on changes to this file. diff --git a/charts/nginz/conf/static/zauth.acl b/charts/nginz/conf/static/zauth.acl deleted file mode 100644 index 9498b8cc4..000000000 --- a/charts/nginz/conf/static/zauth.acl +++ /dev/null @@ -1,17 +0,0 @@ -a (blacklist (path "/provider") - (path "/provider/**") - (path "/bot") - (path "/bot/**") - (path "/i/**")) - -b (whitelist (path "/bot") - (path "/bot/**")) - -p (whitelist (path "/provider") - (path "/provider/**")) - -# LegalHold Access Tokens -la (whitelist (path "/notifications") - (path "/assets/v3/**") - (path "/users") - (path "/users/**")) diff --git a/charts/nginz/templates/_helpers.tpl b/charts/nginz/templates/_helpers.tpl deleted file mode 100644 index 94ac8698e..000000000 --- a/charts/nginz/templates/_helpers.tpl +++ /dev/null @@ -1,16 +0,0 @@ -{{/* vim: set filetype=mustache: */}} -{{/* -Expand the name of the chart. -*/}} -{{- define "nginz.name" -}} -{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} -{{- end -}} - -{{/* -Create a default fully qualified app name. -We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). -*/}} -{{- define "nginz.fullname" -}} -{{- $name := default .Chart.Name .Values.nameOverride -}} -{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} -{{- end -}} diff --git a/charts/nginz/templates/conf/_nginx.conf.tpl b/charts/nginz/templates/conf/_nginx.conf.tpl deleted file mode 100644 index 0a1675615..000000000 --- a/charts/nginz/templates/conf/_nginx.conf.tpl +++ /dev/null @@ -1,349 +0,0 @@ -{{- define "nginz_nginx.conf" }} -user {{ .Values.nginx_conf.user }} {{ .Values.nginx_conf.group }}; -worker_processes {{ .Values.nginx_conf.worker_processes }}; -worker_rlimit_nofile {{ .Values.nginx_conf.worker_rlimit_nofile | default 1024 }}; -pid /var/run/nginz.pid; - -# nb. start up errors (eg. misconfiguration) may still end up in -# /var/log/nginz/error.log -error_log stderr warn; - -events { - worker_connections {{ .Values.nginx_conf.worker_connections | default 1024 }}; - multi_accept off; - use epoll; -} - -http { - # - # Sockets - # - - sendfile on; - tcp_nopush on; - tcp_nodelay on; - - # - # Timeouts - # - - client_body_timeout 60; - client_header_timeout 60; - keepalive_timeout 75; - send_timeout 60; - - ignore_invalid_headers off; - - types_hash_max_size 2048; - - server_names_hash_bucket_size 64; - server_name_in_redirect off; - - large_client_header_buffers 4 8k; - - - # - # Security - # - - server_tokens off; - - # - # Logging - # - # Note sanitized_request: - # We allow passing access_token as query parameter for e.g. websockets - # However we do not want to log access tokens. - # - - log_format custom_zeta '$remote_addr $remote_user "$time_local" "$sanitized_request" $status $body_bytes_sent "$http_referer" "$http_user_agent" $http_x_forwarded_for $connection $request_time $upstream_response_time $upstream_cache_status $zauth_user $zauth_connection $request_id $proxy_protocol_addr'; - access_log /dev/stdout custom_zeta; - - # - # Monitoring - # - vhost_traffic_status_zone; - - # - # Gzip - # - - gzip on; - gzip_disable msie6; - gzip_vary on; - gzip_proxied any; - gzip_comp_level 6; - gzip_buffers 16 8k; - gzip_http_version 1.1; - gzip_min_length 1024; - gzip_types text/plain text/css application/json application/x-javascript text/xml application/xml application/xml+rss text/javascript; - - # - # This directive ensures that X-Forwarded-For is used - # as the client's real IP address (since nginz is always - # behind an ELB, remote_addr now becomes the client's real - # IP address) - # - - real_ip_header X-Forwarded-For; - set_real_ip_from 0.0.0.0/0; - - # - # Rate Limiting Exemptions - # - - geo $rate_limit { - default 1; - - # IPs to exempt can be added in the .Values.nginx_conf.rate_limit and .Values.nginx_conf.simulators helm values - {{ if (hasKey .Values.nginx_conf "rate_limit_exemptions") }} - {{ range $ip := .Values.nginx_conf.rate_limit_exemptions }} - {{ $ip }} 0; - {{ end }} - {{ end }} - - {{ if (hasKey .Values.nginx_conf "simulators") }} - {{ range $ip := .Values.nginx_conf.simulators }} - {{ $ip }} 0; - {{ end }} - {{ end }} - } - - # - # Rate Limiting Mapping - # - - map $rate_limit $rate_limited_by_addr { - 1 "$binary_remote_addr$uri"; - 0 ""; - } - - map $rate_limit $rate_limited_by_zuser { - 1 $zauth_user; - 0 ""; - } - - map $http_origin $cors_header { - default ""; - "~^https://([^/]+\.)?{{ .Values.nginx_conf.external_env_domain | replace "." "\\." }}(:[0-9]{2,5})?$" "$http_origin"; - } - - - # - # Rate Limiting - # - - limit_req_zone $rate_limited_by_zuser zone=reqs_per_user:12m rate=10r/s; - limit_req_zone $rate_limited_by_addr zone=reqs_per_addr:12m rate=5r/m; - - limit_conn_zone $rate_limited_by_zuser zone=conns_per_user:10m; - limit_conn_zone $rate_limited_by_addr zone=conns_per_addr:10m; - - # Too Many Requests (420) is returned on throttling - # TODO: Change to 429 once all clients support this - limit_req_status 420; - limit_conn_status 420; - - limit_req_log_level warn; - limit_conn_log_level warn; - - # Limit by $zauth_user if present and not part of rate limit exemptions - limit_req zone=reqs_per_user burst=20; - limit_conn conns_per_user 25; - - # - # Proxied Upstream Services - # - - include {{ .Values.nginx_conf.upstream_config }}; - - # - # Mapping for websocket connections - # - - map $http_upgrade $connection_upgrade { - websocket upgrade; - default ''; - } - - - - # - # Locations - # - - server { - listen {{ .Values.config.http.httpPort }}; - listen {{ .Values.config.ws.wsPort }}{{ if (.Values.config.ws.useProxyProtocol) }} proxy_protocol{{ end }}; - - zauth_keystore {{ .Values.nginx_conf.zauth_keystore }}; - zauth_acl {{ .Values.nginx_conf.zauth_acl }}; - - location /status { - zauth off; - access_log off; - allow 10.0.0.0/8; - deny all; - - return 200; - } - - location /vts { - zauth off; - access_log off; - allow 10.0.0.0/8; - allow 127.0.0.1; - deny all; - - vhost_traffic_status_display; - vhost_traffic_status_display_format html; - } - - # Block "Franz" -- http://meetfranz.com - if ($http_user_agent ~* Franz) { - return 403; - } - - {{ range $path := .Values.nginx_conf.disabled_paths }} - location {{ $path }} { - - return 404; - } - {{ end }} - - # - # Service Routing - # - - {{ range $name, $locations := .Values.nginx_conf.upstreams -}} - {{- range $location := $locations -}} - {{- if hasKey $location "envs" -}} - {{- range $env := $location.envs -}} - {{- if or (eq $env $.Values.nginx_conf.env) (eq $env "all") -}} - - {{- if and (not (eq $.Values.nginx_conf.env "prod")) ($location.doc) -}} - rewrite ^/api-docs{{ $location.path }} {{ $location.path }}/api-docs?base_url=https://{{ $.Values.nginx_conf.env }}-nginz-https.{{ $.Values.nginx_conf.external_env_domain }}/ break; - {{- end }} - - location {{ $location.path }} { - - # remove access_token from logs, see 'Note sanitized_request' above. - set $sanitized_request $request; - if ($sanitized_request ~ (.*)access_token=[^&]*(.*)) { - set $sanitized_request $1access_token=****$2; - } - - {{- if ($location.basic_auth) }} - auth_basic "Restricted"; - auth_basic_user_file {{ $.Values.nginx_conf.basic_auth_file }}; - {{- end -}} - - {{- if ($location.disable_zauth) }} - zauth off; - - # If zauth is off, limit by remote address if not part of limit exemptions - {{- if ($location.unlimited_requests_endpoint) }} - # Note that this endpoint has no rate limit - {{- else -}} - limit_req zone=reqs_per_addr burst=5 nodelay; - limit_conn conns_per_addr 20; - {{- end -}} - {{- end }} - - if ($request_method = 'OPTIONS') { - add_header 'Access-Control-Allow-Methods' "GET, POST, PUT, DELETE, OPTIONS"; - add_header 'Access-Control-Allow-Headers' "$http_access_control_request_headers, DNT,X-Mx-ReqToken,Keep-Alive,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type"; - add_header 'Content-Type' 'text/plain; charset=UTF-8'; - add_header 'Content-Length' 0; - return 204; - } - - proxy_pass http://{{ $name }}; - proxy_http_version 1.1; - - {{- if ($location.disable_request_buffering) }} - proxy_request_buffering off; - {{ end -}} - {{- if (hasKey $location "body_buffer_size") }} - client_body_buffer_size {{ $location.body_buffer_size -}}; - {{- end }} - client_max_body_size {{ $location.max_body_size | default "64k" }}; - - {{ if ($location.use_websockets) }} - proxy_set_header Upgrade $http_upgrade; - proxy_set_header Connection $connection_upgrade; - proxy_read_timeout 1h; - {{- else }} - proxy_set_header Connection ""; - {{ end -}} - - {{- if not ($location.disable_zauth) }} - proxy_set_header Authorization ""; - {{- end }} - - proxy_set_header Z-Type $zauth_type; - proxy_set_header Z-User $zauth_user; - proxy_set_header Z-Connection $zauth_connection; - proxy_set_header Z-Provider $zauth_provider; - proxy_set_header Z-Bot $zauth_bot; - proxy_set_header Z-Conversation $zauth_conversation; - proxy_set_header Request-Id $request_id; - - {{- if ($location.allow_credentials) }} - more_set_headers 'Access-Control-Allow-Credentials: true'; - {{ end -}} - - {{ if ($location.restrict_whitelisted_origin) -}} - more_set_headers 'Access-Control-Allow-Origin: $cors_header'; - {{- else }} - more_set_headers 'Access-Control-Allow-Origin: $http_origin'; - {{- end }} - - more_set_headers 'Access-Control-Expose-Headers: Request-Id, Location'; - more_set_headers 'Request-Id: $request_id'; - more_set_headers 'Strict-Transport-Security: max-age=31536000; preload'; - } - - {{- end -}} - {{- end -}} - - {{- end -}} - {{- end -}} - {{- end }} - - {{ if not (eq $.Values.nginx_conf.env "prod") }} - # - # Swagger Resource Listing - # - - location /api-docs { - default_type application/json; - root {{ $.Values.nginx_conf.swagger_root }}; - index resources.json; - if ($request_method = 'OPTIONS') { - add_header 'Access-Control-Allow-Methods' "GET, POST, PUT, DELETE, OPTIONS"; - add_header 'Access-Control-Allow-Headers' "$http_access_control_request_headers, DNT,X-Mx-ReqToken,Keep-Alive,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type"; - add_header 'Content-Type' 'text/plain; charset=UTF-8'; - add_header 'Content-Length' 0; - return 204; - } - more_set_headers 'Access-Control-Allow-Origin: $http_origin'; - } - {{ end }} - - # Swagger UI - - location /swagger-ui { - zauth off; - gzip off; - alias /opt/zwagger-ui; - types { - application/javascript js; - text/css css; - text/html html; - image/png png; - } - } - } -} -{{- end }} diff --git a/charts/nginz/templates/conf/_upstreams.txt.tpl b/charts/nginz/templates/conf/_upstreams.txt.tpl deleted file mode 100644 index 62994068d..000000000 --- a/charts/nginz/templates/conf/_upstreams.txt.tpl +++ /dev/null @@ -1,3 +0,0 @@ -{{ define "nginz_upstreams.txt" }} -{{ range $key, $value := .Values.nginx_conf.upstreams }}{{ $key }} {{ end -}} -{{ end }} diff --git a/charts/nginz/templates/conf/_zwagger-config.js.tpl b/charts/nginz/templates/conf/_zwagger-config.js.tpl deleted file mode 100644 index ded11c8c5..000000000 --- a/charts/nginz/templates/conf/_zwagger-config.js.tpl +++ /dev/null @@ -1,3 +0,0 @@ -{{ define "nginz_zwagger-config.js" }} -var environment = '{{ .Values.nginx_conf.env }}'; -{{- end }} diff --git a/charts/nginz/templates/configmap.yaml b/charts/nginz/templates/configmap.yaml deleted file mode 100644 index b14e4042e..000000000 --- a/charts/nginz/templates/configmap.yaml +++ /dev/null @@ -1,13 +0,0 @@ -apiVersion: v1 -data: - nginx.conf: |2 -{{- include "nginz_nginx.conf" . | indent 4 }} - upstreams.txt: |2 -{{- include "nginz_upstreams.txt" . | indent 4 }} - zwagger-config.js: |2 -{{- include "nginz_zwagger-config.js" . | indent 4 }} -{{ (.Files.Glob "conf/static/*").AsConfig | indent 2 }} -kind: ConfigMap -metadata: - creationTimestamp: null - name: nginz diff --git a/charts/nginz/templates/deployment.yaml b/charts/nginz/templates/deployment.yaml deleted file mode 100644 index 0472c82c0..000000000 --- a/charts/nginz/templates/deployment.yaml +++ /dev/null @@ -1,93 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: nginz - labels: - wireService: nginz - chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} - release: {{ .Release.Name }} - heritage: {{ .Release.Service }} -spec: - replicas: {{ .Values.replicaCount }} - strategy: - type: RollingUpdate - rollingUpdate: - maxUnavailable: 0 - maxSurge: {{ .Values.replicaCount | mul 2 }} - selector: - matchLabels: - wireService: nginz - app: nginz - template: - metadata: - labels: - wireService: nginz - app: nginz - release: {{ .Release.Name }} - annotations: - # An annotation of the configmap checksum ensures changes to the configmap cause a redeployment upon `helm upgrade` - checksum/configmap: {{ include (print .Template.BasePath "/configmap.yaml") . | sha256sum }} - checksum/secret: {{ include (print .Template.BasePath "/secret.yaml") . | sha256sum }} - fluentbit.io/parser-nginz: nginz - spec: - terminationGracePeriodSeconds: {{ .Values.terminationGracePeriodSeconds }} # should be higher than the drainTimeout (sleep duration of preStop) - containers: - - name: nginz-disco - image: "{{ .Values.images.nginzDisco.repository }}:{{ .Values.images.nginzDisco.tag }}" - volumeMounts: - - name: config - mountPath: /etc/wire/nginz/conf - readOnly: true - - name: upstreams - mountPath: /etc/wire/nginz/upstreams - readOnly: false - - name: nginz - image: "{{ .Values.images.nginz.repository }}:{{ .Values.images.nginz.tag }}" - lifecycle: - preStop: - exec: - # kubernetes by default sends a SIGTERM to the container, - # which would cause nginz to exit, breaking existing websocket connections. - # Instead we sleep for a day, then terminate gracefully. - # (SIGTERM is still sent, but afterwards) - command: ["sh", "-c", "sleep {{ .Values.drainTimeout }} && nginx -c /etc/wire/nginz/conf/nginx.conf -s quit"] - volumeMounts: - - name: secrets - mountPath: /etc/wire/nginz/secrets - readOnly: true - - name: config - mountPath: /etc/wire/nginz/conf - readOnly: true - - name: upstreams - mountPath: /etc/wire/nginz/upstreams - readOnly: true - ports: - - name: http - containerPort: {{ .Values.config.http.httpPort }} - - name: tcp - containerPort: {{ .Values.config.ws.wsPort }} - readinessProbe: - httpGet: - path: /status - port: {{ .Values.config.http.httpPort }} - scheme: HTTP - livenessProbe: - initialDelaySeconds: 30 - timeoutSeconds: 1 - httpGet: - path: /status - port: {{ .Values.config.http.httpPort }} - scheme: HTTP - resources: -{{ toYaml .Values.resources | indent 12 }} - dnsPolicy: ClusterFirst - restartPolicy: Always - volumes: - - name: config - configMap: - name: nginz - - name: secrets - secret: - secretName: nginz - - name: upstreams - emptyDir: {} diff --git a/charts/nginz/templates/secret.yaml b/charts/nginz/templates/secret.yaml deleted file mode 100644 index 2dc5ab850..000000000 --- a/charts/nginz/templates/secret.yaml +++ /dev/null @@ -1,19 +0,0 @@ -apiVersion: v1 -kind: Secret -metadata: - name: nginz - labels: - wireService: nginz - app: nginz - chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" - release: "{{ .Release.Name }}" - heritage: "{{ .Release.Service }}" -type: Opaque -data: - {{/* for_helm_linting is necessary only since the 'with' block below does not throw an error upon an empty .Values.secrets */}} - for_helm_linting: {{ required "No .secrets found in configuration. Did you forget to helm -f path/to/secrets.yaml ?" .Values.secrets | quote | b64enc | quote }} - - {{- with .Values.secrets }} - zauth.conf: {{ .zAuth.publicKeys | b64enc | quote }} - basic_auth.txt: {{ .basicAuth | b64enc | quote }} - {{- end }} diff --git a/charts/nginz/values.yaml b/charts/nginz/values.yaml deleted file mode 100644 index 41f531be7..000000000 --- a/charts/nginz/values.yaml +++ /dev/null @@ -1,371 +0,0 @@ -replicaCount: 3 -resources: - requests: - memory: "256Mi" - cpu: "100m" - limits: - memory: "1024Mi" - cpu: "2" -images: - nginzDisco: - repository: quay.io/wire/nginz_disco - tag: 2.78.0 - nginz: - repository: quay.io/wire/nginz - tag: 2.78.0 -config: - http: - httpPort: 8080 - ws: - wsPort: 8081 - useProxyProtocol: true -drainTimeout: 0 -terminationGracePeriodSeconds: 30 -nginx_conf: - user: nginx - group: nginx - upstream_config: /etc/wire/nginz/upstreams/upstreams.conf - zauth_keystore: /etc/wire/nginz/secrets/zauth.conf - zauth_acl: /etc/wire/nginz/conf/zauth.acl - basic_auth_file: /etc/wire/nginz/secrets/basic_auth.txt - worker_processes: auto - worker_rlimit_nofile: 131072 - worker_connections: 65536 - swagger_root: /var/www/swagger - disabled_paths: - - /conversations/last-events - - ~* ^/conversations/([^/]*)/knock - - ~* ^/conversations/([^/]*)/hot-knock - - ~* ^/conversations/([^/]*)/messages - - ~* ^/conversations/([^/]*)/client-messages - - ~* ^/conversations/([^/]*)/events - - ~* ^/conversations/([^/]*)/call - - ~* ^/conversations/([^/]*)/call/state - - /search/top - - /search/common - upstreams: - cargohold: - - path: ~* ^/conversations/([^/]*)/assets - envs: - - all - max_body_size: "0" - disable_request_buffering: true - - path: ~* ^/conversations/([^/]*)/otr/assets - envs: - - all - max_body_size: "0" - disable_request_buffering: true - - path: /assets - envs: - - all - max_body_size: "0" - disable_request_buffering: true - doc: true - - path: /bot/assets - envs: - - all - max_body_size: "0" - disable_request_buffering: true - - path: /provider/assets - envs: - - all - allow_credentials: true - restrict_whitelisted_origin: true - max_body_size: "0" - disable_request_buffering: true - brig: - - path: /users - envs: - - all - envs: - - all - doc: true - - path: /self - envs: - - all - - path: /connections - envs: - - all - - path: /invitations - envs: - - all - - path: /clients - envs: - - all - - path: /properties - envs: - - all - - path: /provider/register - envs: - - all - disable_zauth: true - - path: /provider/activate - envs: - - all - disable_zauth: true - - path: /provider/approve - envs: - - all - disable_zauth: true - - path: /provider/login - envs: - - all - disable_zauth: true - allow_credentials: true - - path: /provider/password-reset - envs: - - all - disable_zauth: true - - path: /providers - envs: - - all - - path: /services - envs: - - all - - path: /provider - envs: - - all - allow_credentials: true - restrict_whitelisted_origin: true - - path: /bot/self - envs: - - all - - path: /bot/client - envs: - - all - - path: /bot/users - envs: - - all - - path: ~* ^/conversations/([^/]*)/bots - envs: - - all - - path: /invitations/info - envs: - - all - disable_zauth: true - - path: /register - envs: - - all - allow_credentials: true - restrict_whitelisted_origin: true - disable_zauth: true - - path: /activate - envs: - - all - allow_credentials: true - restrict_whitelisted_origin: true - disable_zauth: true - - path: /delete - envs: - - all - disable_zauth: true - - path: /password-reset - envs: - - all - disable_zauth: true - - path: /login/send - envs: - - all - disable_zauth: true - - path: /i/users/activation-code - envs: - - staging - disable_zauth: true - basic_auth: true - - path: /i/users/login-code - envs: - - staging - disable_zauth: true - basic_auth: true - - path: /i/users/invitation-code - envs: - - staging - disable_zauth: true - basic_auth: true - - path: ~* ^/i/teams/([^/]*)/suspend - envs: - - staging - disable_zauth: true - basic_auth: true - - path: ~* ^/i/teams/([^/]*)/unsuspend - envs: - - staging - disable_zauth: true - basic_auth: true - - path: /cookies - envs: - - all - - path: /access - envs: - - all - disable_zauth: true - allow_credentials: true - restrict_whitelisted_origin: true - unlimited_requests_endpoint: true - - path: /login - envs: - - all - disable_zauth: true - allow_credentials: true - - path: /onboarding - envs: - - all - max_body_size: 5m - body_buffer_size: 1m - - path: /search - envs: - - all - - path: ~* ^/teams/([^/]*)/invitations(.*) - envs: - - all - - path: ~* ^/teams/([^/]*)/services(.*) - envs: - - all - - path: ~* ^/teams/invitations/info$ - envs: - - all - disable_zauth: true - - path: /i/teams/invitation-code - envs: - - staging - disable_zauth: true - basic_auth: true - - path: /calls - envs: - - all - galley: - - path: /conversations/code-check - disable_zauth: true - envs: - - all - - path: ~* ^/conversations/([^/]*)/otr/messages - envs: - - all - max_body_size: 1m - body_buffer_size: 256k - - path: /broadcast/otr/messages - envs: - - all - max_body_size: 1m - body_buffer_size: 256k - - path: /bot/conversation - envs: - - all - - path: /bot/messages - envs: - - all - max_body_size: 1m - body_buffer_size: 256k - - path: /conversations - envs: - - all - doc: true - - path: ~* ^/teams$ - envs: - - all - - path: ~* ^/teams/([^/]*)$ - envs: - - all - - path: ~* ^/teams/([^/]*)/members(.*) - envs: - - all - - path: ~* ^/teams/([^/]*)/get-members-by-ids-using-post(.*) - envs: - - all - - path: ~* ^/teams/([^/]*)/conversations(.*) - envs: - - all - - path: ~* ^/teams/([^/]*)/legalhold(.*) - envs: - - all - - path: ~* ^/i/teams/([^/]*)/legalhold(.*) - envs: - - staging - disable_zauth: true - basic_auth: true - - path: ~* ^/custom-backend/by-domain/([^/]*)$ - disable_zauth: true - envs: - - all - - path: ~* ^/i/custom-backend/by-domain/([^/]*)$ - disable_zauth: true - basic_auth: true - envs: - - staging - - path: ~* ^/teams/api-docs - envs: - - all - disable_zauth: true - - path: ~* ^/teams/([^/]*)/features/([^/])* - envs: - - all - gundeck: - - path: /push - envs: - - all - doc: true - - path: /presences - envs: - - all - - path: /notifications - envs: - - all - spar: - - path: /identity-providers - max_body_size: 256k - envs: - - all - - path: /i/sso - disable_zauth: true - basic_auth: true - envs: - - staging - - path: /sso-initiate-bind - envs: - - all - - path: /sso/initiate-login - envs: - - all - disable_zauth: true - allow_credentials: true - - path: /sso/finalize-login - envs: - - all - disable_zauth: true - allow_credentials: true - - path: /sso - envs: - - all - disable_zauth: true - - path: /scim/v2 - envs: - - all - disable_zauth: true - allow_credentials: true - - path: /scim - envs: - - all - proxy: - - path: /proxy - envs: - - all - doc: true - cannon: - - path: /await - envs: - - all - use_websockets: true - doc: true - ibis: - - path: /billing - envs: - - all - disable_zauth: true - - path: ~* ^/teams/([^/]*)/billing(.*) - envs: - - all - calling-test: - - path: /calling-test - envs: - - all - disable_zauth: true diff --git a/charts/proxy/.helmignore b/charts/proxy/.helmignore deleted file mode 100644 index f0c131944..000000000 --- a/charts/proxy/.helmignore +++ /dev/null @@ -1,21 +0,0 @@ -# Patterns to ignore when building packages. -# This supports shell glob matching, relative path matching, and -# negation (prefixed with !). Only one pattern per line. -.DS_Store -# Common VCS dirs -.git/ -.gitignore -.bzr/ -.bzrignore -.hg/ -.hgignore -.svn/ -# Common backup files -*.swp -*.bak -*.tmp -*~ -# Various IDEs -.project -.idea/ -*.tmproj diff --git a/charts/proxy/Chart.yaml b/charts/proxy/Chart.yaml deleted file mode 100644 index 22645ce98..000000000 --- a/charts/proxy/Chart.yaml +++ /dev/null @@ -1,4 +0,0 @@ -apiVersion: v1 -description: Proxy (part of Wire Server) - 3rd party proxy service -name: proxy -version: 0.94.0 diff --git a/charts/proxy/templates/configmap.yaml b/charts/proxy/templates/configmap.yaml deleted file mode 100644 index bbce9d594..000000000 --- a/charts/proxy/templates/configmap.yaml +++ /dev/null @@ -1,14 +0,0 @@ -apiVersion: v1 -kind: ConfigMap -metadata: - name: "proxy" -data: - proxy.yaml: | - logNetStrings: True # log using netstrings encoding: http://cr.yp.to/proto/netstrings.txt - logLevel: {{ .Values.config.logLevel }} - - host: 0.0.0.0 - port: {{ .Values.service.internalPort }} - httpPoolSize: 1000 - maxConns: 5000 - secretsConfig: /etc/wire/proxy/secrets/proxy.config diff --git a/charts/proxy/templates/deployment.yaml b/charts/proxy/templates/deployment.yaml deleted file mode 100644 index bedb30e16..000000000 --- a/charts/proxy/templates/deployment.yaml +++ /dev/null @@ -1,80 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: proxy - labels: - wireService: proxy - chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} - release: {{ .Release.Name }} - heritage: {{ .Release.Service }} -spec: - replicas: {{ .Values.replicaCount }} - strategy: - type: RollingUpdate - rollingUpdate: - maxUnavailable: 0 - maxSurge: {{ .Values.replicaCount }} - selector: - matchLabels: - wireService: proxy - template: - metadata: - labels: - wireService: proxy - release: {{ .Release.Name }} - annotations: - # An annotation of the configmap checksum ensures changes to the configmap cause a redeployment upon `helm upgrade` - checksum/configmap: {{ include (print .Template.BasePath "/configmap.yaml") . | sha256sum }} - checksum/secret: {{ include (print .Template.BasePath "/secret.yaml") . | sha256sum }} - spec: - volumes: - - name: "proxy-config" - configMap: - name: "proxy" - - name: "proxy-secrets" - secret: - secretName: "proxy" - containers: - - name: proxy - image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" - imagePullPolicy: {{ default "" .Values.imagePullPolicy | quote }} - volumeMounts: - - name: "proxy-secrets" - mountPath: "/etc/wire/proxy/secrets" - - name: "proxy-config" - mountPath: "/etc/wire/proxy/conf" - env: - {{- with .Values.config.proxy }} - {{- if .httpProxy }} - - name: http_proxy - value: {{ .httpProxy | quote }} - - name: HTTP_PROXY - value: {{ .httpProxy | quote }} - {{- end }} - {{- if .httpsProxy }} - - name: https_proxy - value: {{ .httpsProxy | quote }} - - name: HTTPS_PROXY - value: {{ .httpsProxy | quote }} - {{- end }} - {{- if .noProxyList }} - - name: no_proxy - value: {{ join "," .noProxyList | quote }} - - name: NO_PROXY - value: {{ join "," .noProxyList | quote }} - {{- end }} - {{- end }} - ports: - - containerPort: {{ .Values.service.internalPort }} - livenessProbe: - httpGet: - scheme: HTTP - path: /i/status - port: {{ .Values.service.internalPort }} - readinessProbe: - httpGet: - scheme: HTTP - path: /i/status - port: {{ .Values.service.internalPort }} - resources: -{{ toYaml .Values.resources | indent 12 }} diff --git a/charts/proxy/templates/secret.yaml b/charts/proxy/templates/secret.yaml deleted file mode 100644 index de452b7fc..000000000 --- a/charts/proxy/templates/secret.yaml +++ /dev/null @@ -1,12 +0,0 @@ -apiVersion: v1 -kind: Secret -metadata: - name: proxy - labels: - app: proxy - chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" - release: "{{ .Release.Name }}" - heritage: "{{ .Release.Service }}" -type: Opaque -data: - proxy.config: {{ .Values.secrets.proxy_config | b64enc | quote }} diff --git a/charts/proxy/templates/service.yaml b/charts/proxy/templates/service.yaml deleted file mode 100644 index 0b60d66f6..000000000 --- a/charts/proxy/templates/service.yaml +++ /dev/null @@ -1,18 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - name: proxy - labels: - wireService: proxy - chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} - release: {{ .Release.Name }} - heritage: {{ .Release.Service }} -spec: - type: ClusterIP - ports: - - name: http - port: {{ .Values.service.externalPort }} - targetPort: {{ .Values.service.internalPort }} - selector: - wireService: proxy - release: {{ .Release.Name }} diff --git a/charts/proxy/values.yaml b/charts/proxy/values.yaml deleted file mode 100644 index 65abcd5e1..000000000 --- a/charts/proxy/values.yaml +++ /dev/null @@ -1,17 +0,0 @@ -replicaCount: 3 -image: - repository: quay.io/wire/proxy - tag: 2.78.0 -service: - externalPort: 8080 - internalPort: 8080 -resources: - requests: - memory: "128Mi" - cpu: "100m" - limits: - memory: "512Mi" - cpu: "500m" -config: - logLevel: Debug - proxy: {} \ No newline at end of file diff --git a/charts/reaper/.helmignore b/charts/reaper/.helmignore deleted file mode 100644 index f0c131944..000000000 --- a/charts/reaper/.helmignore +++ /dev/null @@ -1,21 +0,0 @@ -# Patterns to ignore when building packages. -# This supports shell glob matching, relative path matching, and -# negation (prefixed with !). Only one pattern per line. -.DS_Store -# Common VCS dirs -.git/ -.gitignore -.bzr/ -.bzrignore -.hg/ -.hgignore -.svn/ -# Common backup files -*.swp -*.bak -*.tmp -*~ -# Various IDEs -.project -.idea/ -*.tmproj diff --git a/charts/reaper/Chart.yaml b/charts/reaper/Chart.yaml deleted file mode 100644 index b482b9432..000000000 --- a/charts/reaper/Chart.yaml +++ /dev/null @@ -1,5 +0,0 @@ -apiVersion: v1 -version: 0.94.0 -name: reaper -appVersion: 0.1.0 -description: A helm charts to restart cannons if redis-ephemeal has died diff --git a/charts/reaper/README.md b/charts/reaper/README.md deleted file mode 100644 index e8b0695ca..000000000 --- a/charts/reaper/README.md +++ /dev/null @@ -1,5 +0,0 @@ -reaper ------- - -Due to the nature of pods and their ephemerality, there might be situations where a redis pod is restarted. -In such cases, clients will have stale connections and will not receive any messages; this reaper will check that the `redis-ephemeral` pod is older than any other `cannon`; if that is not the case, it kills the `cannon`s forcing clients to reconnect. diff --git a/charts/reaper/templates/deployment.yaml b/charts/reaper/templates/deployment.yaml deleted file mode 100644 index 1c2d9b250..000000000 --- a/charts/reaper/templates/deployment.yaml +++ /dev/null @@ -1,50 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: reaper - labels: - wireService: reaper - chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} - release: {{ .Release.Name }} - heritage: {{ .Release.Service }} -spec: - replicas: 1 - selector: - matchLabels: - wireService: reaper - release: {{ .Release.Name }} - template: - metadata: - labels: - wireService: reaper - release: {{ .Release.Name }} - spec: - serviceAccountName: reaper-role - containers: - - name: reaper - imagePullPolicy: Always - image: roffe/kubectl:v1.13.2 - command: ["bash"] - args: - - -c - - | - NAMESPACE={{ .Release.Namespace }} - kill_all_cannons() { - echo "Killing all cannons" - while IFS= read -r cannon - do - echo "Killing $cannon" - kubectl -n "$NAMESPACE" delete pod "$cannon" - done < <(kubectl -n "$NAMESPACE" get pods | grep -e "cannon" | awk '{ print $1 }') - } - - while true - do - FIRST_POD=$(kubectl -n "$NAMESPACE" get pods --sort-by=.metadata.creationTimestamp | grep -e "cannon" -e "redis-ephemeral" | head -n 1 | awk '{ print $1 }') - if [[ "$FIRST_POD" =~ "redis-ephemeral" ]]; - then echo "redis-ephemeral is the oldest pod, all good" - else kill_all_cannons - fi - echo 'Sleeping 1 seconds' - sleep 1 - done diff --git a/charts/reaper/templates/rbac.yaml b/charts/reaper/templates/rbac.yaml deleted file mode 100644 index 83862bb01..000000000 --- a/charts/reaper/templates/rbac.yaml +++ /dev/null @@ -1,17 +0,0 @@ -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: reaper-role -subjects: -- kind: ServiceAccount - name: reaper-role - namespace: {{ .Release.Namespace }} -roleRef: - kind: ClusterRole - name: cluster-admin - apiGroup: rbac.authorization.k8s.io ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: reaper-role diff --git a/charts/redis-ephemeral/Chart.yaml b/charts/redis-ephemeral/Chart.yaml deleted file mode 100644 index 7abf87346..000000000 --- a/charts/redis-ephemeral/Chart.yaml +++ /dev/null @@ -1,4 +0,0 @@ -apiVersion: v1 -description: Wrapper chart for stable/redis -name: redis-ephemeral -version: 0.94.0 diff --git a/charts/redis-ephemeral/requirements.yaml b/charts/redis-ephemeral/requirements.yaml deleted file mode 100644 index 756dd8e22..000000000 --- a/charts/redis-ephemeral/requirements.yaml +++ /dev/null @@ -1,5 +0,0 @@ -dependencies: -- name: redis - version: 1.1.7 - repository: https://kubernetes-charts.storage.googleapis.com - alias: redis-ephemeral diff --git a/charts/redis-ephemeral/templates/helpers.tpl b/charts/redis-ephemeral/templates/helpers.tpl deleted file mode 100644 index 73ddf2682..000000000 --- a/charts/redis-ephemeral/templates/helpers.tpl +++ /dev/null @@ -1,8 +0,0 @@ -{{/* -override default fullname template to remove the .Release.Name from the definition in -https://github.com/kubernetes/charts/blob/master/stable/redis-ha/templates/_helpers.tpl -*/}} -{{- define "redis.fullname" -}} -{{- $name := default .Chart.Name .Values.nameOverride -}} -{{- printf "%s" $name | trunc 63 | trimSuffix "-" -}} -{{- end -}} \ No newline at end of file diff --git a/charts/redis-ephemeral/values.yaml b/charts/redis-ephemeral/values.yaml deleted file mode 100644 index 3226e9524..000000000 --- a/charts/redis-ephemeral/values.yaml +++ /dev/null @@ -1,12 +0,0 @@ -redis-ephemeral: - usePassword: false - persistence: - enabled: false - - resources: - limits: - cpu: "1000m" - memory: "1024Mi" - requests: - cpu: "500m" - memory: "512Mi" diff --git a/charts/spar/.helmignore b/charts/spar/.helmignore deleted file mode 100644 index f0c131944..000000000 --- a/charts/spar/.helmignore +++ /dev/null @@ -1,21 +0,0 @@ -# Patterns to ignore when building packages. -# This supports shell glob matching, relative path matching, and -# negation (prefixed with !). Only one pattern per line. -.DS_Store -# Common VCS dirs -.git/ -.gitignore -.bzr/ -.bzrignore -.hg/ -.hgignore -.svn/ -# Common backup files -*.swp -*.bak -*.tmp -*~ -# Various IDEs -.project -.idea/ -*.tmproj diff --git a/charts/spar/Chart.yaml b/charts/spar/Chart.yaml deleted file mode 100644 index 39147e90d..000000000 --- a/charts/spar/Chart.yaml +++ /dev/null @@ -1,4 +0,0 @@ -apiVersion: v1 -description: Spar (part of Wire Server) - SSO Service -name: spar -version: 0.94.0 diff --git a/charts/spar/templates/configmap.yaml b/charts/spar/templates/configmap.yaml deleted file mode 100644 index 33d36a4ca..000000000 --- a/charts/spar/templates/configmap.yaml +++ /dev/null @@ -1,41 +0,0 @@ -apiVersion: v1 -kind: ConfigMap -metadata: - name: "spar" -data: - spar.yaml: | - logNetStrings: True # log using netstrings encoding (see http://cr.yp.to/proto/netstrings.txt) - logLevel: {{ .Values.config.logLevel }} - - brig: - host: brig - port: 8080 - - galley: - host: galley - port: 8080 - - cassandra: - endpoint: - host: {{ .Values.config.cassandra.host }} - port: 9042 - keyspace: spar - - maxttlAuthreq: {{ .Values.config.maxttlAuthreq }} - maxttlAuthresp: {{ .Values.config.maxttlAuthresp }} - - richInfoLimit: {{ .Values.config.richInfoLimit }} - - maxScimTokens: {{ .Values.config.maxScimTokens }} - - saml: - version: SAML2.0 - logLevel: {{ .Values.config.logLevel }} - - spHost: 0.0.0.0 - spPort: {{ .Values.service.externalPort }} - spAppUri: {{ .Values.config.appUri }} - spSsoUri: {{ .Values.config.ssoUri }} - - contacts: -{{ toYaml .Values.config.contacts | indent 12 }} diff --git a/charts/spar/templates/deployment.yaml b/charts/spar/templates/deployment.yaml deleted file mode 100644 index a6b86f8bb..000000000 --- a/charts/spar/templates/deployment.yaml +++ /dev/null @@ -1,74 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: spar - labels: - wireService: spar - chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} - release: {{ .Release.Name }} - heritage: {{ .Release.Service }} -spec: - replicas: {{ .Values.replicaCount }} - strategy: - type: RollingUpdate - rollingUpdate: - maxUnavailable: 0 - maxSurge: {{ .Values.replicaCount }} - selector: - matchLabels: - wireService: spar - template: - metadata: - labels: - wireService: spar - release: {{ .Release.Name }} - annotations: - # An annotation of the configmap checksum ensures changes to the configmap cause a redeployment upon `helm upgrade` - checksum/configmap: {{ include (print .Template.BasePath "/configmap.yaml") . | sha256sum }} - spec: - volumes: - - name: "spar-config" - configMap: - name: "spar" - containers: - - name: spar - image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" - imagePullPolicy: {{ default "" .Values.imagePullPolicy | quote }} - volumeMounts: - - name: "spar-config" - mountPath: "/etc/wire/spar/conf" - env: - {{- with .Values.config.proxy }} - {{- if .httpProxy }} - - name: http_proxy - value: {{ .httpProxy | quote }} - - name: HTTP_PROXY - value: {{ .httpProxy | quote }} - {{- end }} - {{- if .httpsProxy }} - - name: https_proxy - value: {{ .httpsProxy | quote }} - - name: HTTPS_PROXY - value: {{ .httpsProxy | quote }} - {{- end }} - {{- if .noProxyList }} - - name: no_proxy - value: {{ join "," .noProxyList | quote }} - - name: NO_PROXY - value: {{ join "," .noProxyList | quote }} - {{- end }} - {{- end }} - ports: - - containerPort: {{ .Values.service.internalPort }} - livenessProbe: - httpGet: - scheme: HTTP - path: /i/status - port: {{ .Values.service.internalPort }} - readinessProbe: - httpGet: - scheme: HTTP - path: /i/status - port: {{ .Values.service.internalPort }} - resources: -{{ toYaml .Values.resources | indent 12 }} diff --git a/charts/spar/templates/service.yaml b/charts/spar/templates/service.yaml deleted file mode 100644 index e360ac0f3..000000000 --- a/charts/spar/templates/service.yaml +++ /dev/null @@ -1,18 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - name: spar - labels: - wireService: spar - chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} - release: {{ .Release.Name }} - heritage: {{ .Release.Service }} -spec: - type: ClusterIP - ports: - - name: http - port: {{ .Values.service.externalPort }} - targetPort: {{ .Values.service.internalPort }} - selector: - wireService: spar - release: {{ .Release.Name }} diff --git a/charts/spar/templates/tests/configmap.yaml b/charts/spar/templates/tests/configmap.yaml deleted file mode 100644 index 071d1c3b4..000000000 --- a/charts/spar/templates/tests/configmap.yaml +++ /dev/null @@ -1,17 +0,0 @@ -apiVersion: v1 -kind: ConfigMap -metadata: - name: "spar-integration" -data: - integration.yaml: | - brig: - host: brig - port: 8080 - - galley: - host: galley - port: 8080 - - spar: - host: spar - port: 8080 diff --git a/charts/spar/templates/tests/spar-integration.yaml b/charts/spar/templates/tests/spar-integration.yaml deleted file mode 100644 index 19646326a..000000000 --- a/charts/spar/templates/tests/spar-integration.yaml +++ /dev/null @@ -1,28 +0,0 @@ -apiVersion: v1 -kind: Pod -metadata: - name: "{{ .Release.Name }}-spar-integration" - annotations: - "helm.sh/hook": test-success - labels: - wireService: spar-integration - release: {{ .Release.Name }} -spec: - volumes: - - name: "spar-integration" - configMap: - name: "spar-integration" - # Used to read some values from the spar service - - name: "spar-config" - configMap: - name: "spar" - containers: - - name: integration - image: "{{ .Values.image.repository }}-integration:{{ .Values.image.tag }}" - volumeMounts: - - name: "spar-integration" - mountPath: "/etc/wire/integration" - - name: "spar-config" - mountPath: "/etc/wire/spar/conf" - - restartPolicy: Never diff --git a/charts/spar/values.yaml b/charts/spar/values.yaml deleted file mode 100644 index 2572b3d90..000000000 --- a/charts/spar/values.yaml +++ /dev/null @@ -1,23 +0,0 @@ -replicaCount: 3 -image: - repository: quay.io/wire/spar - tag: 2.78.0 -resources: - requests: - memory: "128Mi" - cpu: "100m" - limits: - memory: "512Mi" - cpu: "500m" -service: - externalPort: 8080 - internalPort: 8080 -config: - cassandra: - host: aws-cassandra - richInfoLimit: 5000 - maxScimTokens: 0 - logLevel: Info - maxttlAuthreq: 7200 - maxttlAuthresp: 7200 - proxy: {} \ No newline at end of file diff --git a/charts/team-settings/.helmignore b/charts/team-settings/.helmignore deleted file mode 100644 index f0c131944..000000000 --- a/charts/team-settings/.helmignore +++ /dev/null @@ -1,21 +0,0 @@ -# Patterns to ignore when building packages. -# This supports shell glob matching, relative path matching, and -# negation (prefixed with !). Only one pattern per line. -.DS_Store -# Common VCS dirs -.git/ -.gitignore -.bzr/ -.bzrignore -.hg/ -.hgignore -.svn/ -# Common backup files -*.swp -*.bak -*.tmp -*~ -# Various IDEs -.project -.idea/ -*.tmproj diff --git a/charts/team-settings/Chart.yaml b/charts/team-settings/Chart.yaml deleted file mode 100644 index 48f650972..000000000 --- a/charts/team-settings/Chart.yaml +++ /dev/null @@ -1,4 +0,0 @@ -apiVersion: v1 -description: A Helm chart for the Wire team-settings in Kubernetes -name: team-settings -version: 0.94.0 diff --git a/charts/team-settings/README.md b/charts/team-settings/README.md deleted file mode 100644 index 5a8e758e9..000000000 --- a/charts/team-settings/README.md +++ /dev/null @@ -1,5 +0,0 @@ -Team settings are part of a private repo. As such, this chart expects a secret named `wire-teamsettings-readonly-pull-secret` to be made available as a secret. Check the [values file](values.yaml) for more info. - -kubectl create -f wire-teamsettings-readonly-pull-secret.yml --namespace= - -If you want to get access to it, get in [touch with us](https://wire.com/pricing/). diff --git a/charts/team-settings/templates/_helpers.tpl b/charts/team-settings/templates/_helpers.tpl deleted file mode 100644 index 4e8e5d74c..000000000 --- a/charts/team-settings/templates/_helpers.tpl +++ /dev/null @@ -1,16 +0,0 @@ -{{/* vim: set filetype=mustache: */}} -{{/* -Expand the name of the chart. -*/}} -{{- define "team-settings.name" -}} -{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} -{{- end -}} - -{{/* -Create a default fully qualified app name. -We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). -*/}} -{{- define "team-settings.fullname" -}} -{{- $name := default .Chart.Name .Values.nameOverride -}} -{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} -{{- end -}} diff --git a/charts/team-settings/templates/deployment.yaml b/charts/team-settings/templates/deployment.yaml deleted file mode 100644 index b9bce38d4..000000000 --- a/charts/team-settings/templates/deployment.yaml +++ /dev/null @@ -1,65 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: team-settings - labels: - wireService: team-settings - chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} - release: {{ .Release.Name }} - heritage: {{ .Release.Service }} -spec: - replicas: {{ .Values.replicaCount }} - strategy: - type: RollingUpdate - rollingUpdate: - maxUnavailable: 0 - maxSurge: {{ .Values.replicaCount | mul 2 }} - selector: - matchLabels: - wireService: team-settings - app: team-settings - template: - metadata: - labels: - wireService: team-settings - app: team-settings - release: {{ .Release.Name }} - spec: - # Check the README to find out more about this secret - imagePullSecrets: - - name: wire-teamsettings-readonly-pull-secret - containers: - - name: team-settings - image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" - env: - - name: NODE_PORT - value: "{{ .Values.service.http.internalPort }}" - - name: APP_BASE - value: https://{{ .Values.config.externalUrls.appHost }}/ - - name: BACKEND_REST - value: https://{{ .Values.config.externalUrls.backendRest }} - - name: BACKEND_WS - value: wss://{{ .Values.config.externalUrls.backendWebsocket }} - {{- range $key, $val := .Values.envVars }} - - name: {{ $key }} - value: {{ $val | quote }} - {{- end }} - ports: - - name: http - containerPort: {{ .Values.service.http.internalPort }} - readinessProbe: - httpGet: - path: /_health/ - port: {{ .Values.service.http.internalPort }} - scheme: HTTP - livenessProbe: - initialDelaySeconds: 30 - timeoutSeconds: 3 - httpGet: - path: /_health/ - port: {{ .Values.service.http.internalPort }} - scheme: HTTP - resources: -{{ toYaml .Values.resources | indent 12 }} - dnsPolicy: ClusterFirst - restartPolicy: Always diff --git a/charts/team-settings/templates/secret.yaml b/charts/team-settings/templates/secret.yaml deleted file mode 100644 index 64710da6f..000000000 --- a/charts/team-settings/templates/secret.yaml +++ /dev/null @@ -1,16 +0,0 @@ -apiVersion: v1 -kind: Secret -metadata: - name: wire-teamsettings-readonly-pull-secret - labels: - chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" - release: "{{ .Release.Name }}" - heritage: "{{ .Release.Service }}" -type: kubernetes.io/dockerconfigjson -data: - {{/* for_helm_linting is necessary only since the 'with' block below does not throw an error upon an empty .Values.secrets */}} - for_helm_linting: {{ required "No .secrets found in configuration. Did you forget to helm -f path/to/secrets.yaml ?" .Values.secrets | quote | b64enc | quote }} - - {{- with .Values.secrets }} - .dockerconfigjson: {{ .configJson }} - {{- end }} diff --git a/charts/team-settings/values.yaml b/charts/team-settings/values.yaml deleted file mode 100644 index d9b34f847..000000000 --- a/charts/team-settings/values.yaml +++ /dev/null @@ -1,55 +0,0 @@ -# Default values for the team-settings. -replicaCount: 1 -resources: - requests: - memory: "128Mi" - cpu: "100m" - limits: - memory: "512Mi" - cpu: "1" -image: - repository: quay.io/wire/team-settings - tag: 13000-2.10.0-3b12f2-v0.24.33-production -service: - https: - externalPort: 443 - http: - internalPort: 8080 - -## The following has to be provided to deploy this chart - -#config: -# externalUrls: -# backendRest: nginz-https.example.com -# backendWebsocket: nginz-ssl.example.com -# backendDomain: example.com -# appHost: teams.example.com - -#secrets: -# configJson: -# - -# Some relevant environment options, have a look at -# https://github.com/wireapp/wire-team-settings/wiki/Self-hosting -# Note that it is a private repo, check README for more info -# NOTE: Without an empty dictionary, you will see warnings -# when overriding envVars -envVars: {} -# E.g. -# envVars: -# FEATURE_ENABLE_DEBUG: "true" -# You are likely to need at least following CSP headers -# due to the fact that you are likely to do cross sub-domain requests -# i.e., from teams.example.com to nginz-https.example.com -# CSP_EXTRA_CONNECT_SRC: "https://*.example.com, wss://*.example.com" -# CSP_EXTRA_IMG_SRC: "https://*.example.com" -# CSP_EXTRA_SCRIPT_SRC: "https://*.example.com" -# CSP_EXTRA_DEFAULT_SRC: "https://*.example.com" -# CSP_EXTRA_FONT_SRC: "https://*.example.com" -# CSP_EXTRA_FRAME_SRC: "https://*.example.com" -# CSP_EXTRA_MANIFEST_SRC: "https://*.example.com" -# CSP_EXTRA_OBJECT_SRC: "https://*.example.com" -# CSP_EXTRA_MEDIA_SRC: "https://*.example.com" -# CSP_EXTRA_PREFETCH_SRC: "https://*.example.com" -# CSP_EXTRA_STYLE_SRC: "https://*.example.com" -# CSP_EXTRA_WORKER_SRC: "https://*.example.com" diff --git a/charts/webapp/.helmignore b/charts/webapp/.helmignore deleted file mode 100644 index f0c131944..000000000 --- a/charts/webapp/.helmignore +++ /dev/null @@ -1,21 +0,0 @@ -# Patterns to ignore when building packages. -# This supports shell glob matching, relative path matching, and -# negation (prefixed with !). Only one pattern per line. -.DS_Store -# Common VCS dirs -.git/ -.gitignore -.bzr/ -.bzrignore -.hg/ -.hgignore -.svn/ -# Common backup files -*.swp -*.bak -*.tmp -*~ -# Various IDEs -.project -.idea/ -*.tmproj diff --git a/charts/webapp/Chart.yaml b/charts/webapp/Chart.yaml deleted file mode 100644 index ba2e89cfe..000000000 --- a/charts/webapp/Chart.yaml +++ /dev/null @@ -1,4 +0,0 @@ -apiVersion: v1 -description: A Helm chart for the Wire webapp in Kubernetes -name: webapp -version: 0.94.0 diff --git a/charts/webapp/templates/_helpers.tpl b/charts/webapp/templates/_helpers.tpl deleted file mode 100644 index 97a82aca3..000000000 --- a/charts/webapp/templates/_helpers.tpl +++ /dev/null @@ -1,16 +0,0 @@ -{{/* vim: set filetype=mustache: */}} -{{/* -Expand the name of the chart. -*/}} -{{- define "webapp.name" -}} -{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} -{{- end -}} - -{{/* -Create a default fully qualified app name. -We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). -*/}} -{{- define "webapp.fullname" -}} -{{- $name := default .Chart.Name .Values.nameOverride -}} -{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} -{{- end -}} diff --git a/charts/webapp/templates/deployment.yaml b/charts/webapp/templates/deployment.yaml deleted file mode 100644 index a619b18f9..000000000 --- a/charts/webapp/templates/deployment.yaml +++ /dev/null @@ -1,65 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: webapp - labels: - wireService: webapp - chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} - release: {{ .Release.Name }} - heritage: {{ .Release.Service }} -spec: - replicas: {{ .Values.replicaCount }} - strategy: - type: RollingUpdate - rollingUpdate: - maxUnavailable: 0 - maxSurge: {{ .Values.replicaCount | mul 2 }} - selector: - matchLabels: - wireService: webapp - app: webapp - template: - metadata: - labels: - wireService: webapp - app: webapp - release: {{ .Release.Name }} - spec: - containers: - - name: webapp - image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" - # Check variables here: https://github.com/wireapp/wire-webapp/wiki/Self-hosting - env: - # it is vital that you don't add trailing '/' in this section! - - name: NODE_PORT - value: "{{ .Values.service.http.internalPort }}" - - name: APP_BASE - value: "https://{{ .Values.config.externalUrls.appHost }}" - - name: BACKEND_REST - value: "https://{{ .Values.config.externalUrls.backendRest }}" - - name: BACKEND_WS - value: "wss://{{ .Values.config.externalUrls.backendWebsocket }}" - {{- range $key, $val := .Values.envVars }} - - name: {{ $key }} - value: {{ $val | quote }} - {{- end }} - ports: - - name: http - containerPort: {{ .Values.service.http.internalPort }} - # NOTE: /test/ returns an HTML document a 200 response code - readinessProbe: - httpGet: - path: /_health/ - port: {{ .Values.service.http.internalPort }} - scheme: HTTP - livenessProbe: - initialDelaySeconds: 30 - timeoutSeconds: 3 - httpGet: - path: /_health/ - port: {{ .Values.service.http.internalPort }} - scheme: HTTP - resources: -{{ toYaml .Values.resources | indent 12 }} - dnsPolicy: ClusterFirst - restartPolicy: Always diff --git a/charts/webapp/values.yaml b/charts/webapp/values.yaml deleted file mode 100644 index fb9e57037..000000000 --- a/charts/webapp/values.yaml +++ /dev/null @@ -1,50 +0,0 @@ -# Default values for the webapp. -replicaCount: 1 -resources: - requests: - memory: "128Mi" - cpu: "100m" - limits: - memory: "512Mi" - cpu: "1" -image: - repository: quay.io/wire/webapp - tag: 48056-0.1.0-f5e9e8-v0.24.34-production -service: - https: - externalPort: 443 - http: - internalPort: 8080 - -## The following has to be provided to deploy this chart - -#config: -# externalUrls: -# backendRest: nginz-https.example.com -# backendWebsocket: nginz-ssl.example.com -# backendDomain: example.com -# appHost: webapp.example.com - -# Some relevant environment options, have a look at -# https://github.com/wireapp/wire-webapp/wiki/Self-hosting -# NOTE: Without an empty dictionary, you will see warnings -# when overriding envVars -envVars: {} -# E.g. -# envVars: -# FEATURE_ENABLE_DEBUG: "true" -# You are likely to need at least following CSP headers -# due to the fact that you are likely to do cross sub-domain requests -# i.e., from webapp.example.com to nginz-https.example.com -# CSP_EXTRA_CONNECT_SRC: "https://*.example.com, wss://*.example.com" -# CSP_EXTRA_IMG_SRC: "https://*.example.com" -# CSP_EXTRA_SCRIPT_SRC: "https://*.example.com" -# CSP_EXTRA_DEFAULT_SRC: "https://*.example.com" -# CSP_EXTRA_FONT_SRC: "https://*.example.com" -# CSP_EXTRA_FRAME_SRC: "https://*.example.com" -# CSP_EXTRA_MANIFEST_SRC: "https://*.example.com" -# CSP_EXTRA_OBJECT_SRC: "https://*.example.com" -# CSP_EXTRA_MEDIA_SRC: "https://*.example.com" -# CSP_EXTRA_PREFETCH_SRC: "https://*.example.com" -# CSP_EXTRA_STYLE_SRC: "https://*.example.com" -# CSP_EXTRA_WORKER_SRC: "https://*.example.com" diff --git a/charts/wire-server-metrics/.helmignore b/charts/wire-server-metrics/.helmignore deleted file mode 100644 index f0c131944..000000000 --- a/charts/wire-server-metrics/.helmignore +++ /dev/null @@ -1,21 +0,0 @@ -# Patterns to ignore when building packages. -# This supports shell glob matching, relative path matching, and -# negation (prefixed with !). Only one pattern per line. -.DS_Store -# Common VCS dirs -.git/ -.gitignore -.bzr/ -.bzrignore -.hg/ -.hgignore -.svn/ -# Common backup files -*.swp -*.bak -*.tmp -*~ -# Various IDEs -.project -.idea/ -*.tmproj diff --git a/charts/wire-server-metrics/Chart.yaml b/charts/wire-server-metrics/Chart.yaml deleted file mode 100644 index 4dbffae8a..000000000 --- a/charts/wire-server-metrics/Chart.yaml +++ /dev/null @@ -1,5 +0,0 @@ -apiVersion: v1 -appVersion: "1.0" -description: Adds monitoring for the kubernetes cluster and wire-server services -name: wire-server-metrics -version: 0.94.0 diff --git a/charts/wire-server-metrics/README.md b/charts/wire-server-metrics/README.md deleted file mode 100644 index 20d782942..000000000 --- a/charts/wire-server-metrics/README.md +++ /dev/null @@ -1,15 +0,0 @@ -wire-server-metrics -------------------- - - This is mostly a wrapper over https://github.com/helm/charts/tree/master/stable/prometheus-operator -For a full list of overrides, please check the appropriate chart version and its options. - - How to use this chart? ----------------------- - - In its simplest form, install the chart with: -``` -helm upgrade --install --namespace wire/wire-server-metrics [-f ] -``` - -For more detailed information on how to set up monitoring on your cluster, go to the [monitoring page](../docs/monitoring.md) diff --git a/charts/wire-server-metrics/dashboards/message-stats.json b/charts/wire-server-metrics/dashboards/message-stats.json deleted file mode 100644 index 2b0a57032..000000000 --- a/charts/wire-server-metrics/dashboards/message-stats.json +++ /dev/null @@ -1,1064 +0,0 @@ -{ - "annotations": { - "list": [ - { - "builtIn": 1, - "datasource": "-- Grafana --", - "enable": true, - "hide": true, - "iconColor": "rgba(0, 211, 255, 1)", - "name": "Annotations & Alerts", - "type": "dashboard" - } - ] - }, - "editable": true, - "gnetId": null, - "graphTooltip": 0, - "id": 23, - "iteration": 1556873944843, - "links": [], - "panels": [ - { - "collapsed": false, - "gridPos": { - "h": 1, - "w": 24, - "x": 0, - "y": 0 - }, - "id": 25, - "panels": [], - "title": "Brig Stats", - "type": "row" - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "default", - "fill": 3, - "gridPos": { - "h": 7, - "w": 12, - "x": 0, - "y": 1 - }, - "id": 17, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 2, - "links": [], - "nullPointMode": "null", - "paceLength": 10, - "percentage": false, - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "sum(rate(http_request_duration_seconds_count{handler=\"/teams/:tid/invitations\", status_code=\"201\", namespace=\"$namespace\"}[5m]))", - "format": "time_series", - "hide": false, - "intervalFactor": 3, - "legendFormat": "invitations/sec(201)", - "refId": "C" - }, - { - "expr": "sum(rate(http_request_duration_seconds_count{handler=\"/register\", status_code=\"201\", namespace=\"$namespace\"}[5m]))", - "format": "time_series", - "hide": false, - "intervalFactor": 3, - "legendFormat": "registration/sec(201)", - "refId": "D" - }, - { - "expr": "sum(rate(http_request_duration_seconds_count{handler=\"/activate/send\", status_code=~\"200\", namespace=\"$namespace\"}[5m]))", - "format": "time_series", - "hide": false, - "intervalFactor": 3, - "legendFormat": "code_requests/sec(200)", - "refId": "E" - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Registrations/invitations/code requests (brig)", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "collapsed": false, - "gridPos": { - "h": 1, - "w": 24, - "x": 0, - "y": 8 - }, - "id": 23, - "panels": [], - "title": "Cannon Stats", - "type": "row" - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "default", - "fill": 2, - "gridPos": { - "h": 7, - "w": 12, - "x": 0, - "y": 9 - }, - "id": 15, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "paceLength": 10, - "percentage": false, - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "sum(net_websocket_clients{namespace=\"$namespace\"})", - "format": "time_series", - "intervalFactor": 1, - "legendFormat": "Connected Clients", - "refId": "A" - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Connected Clients (cannon)", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "collapsed": false, - "gridPos": { - "h": 1, - "w": 24, - "x": 0, - "y": 16 - }, - "id": 21, - "panels": [], - "title": "Gundeck Stats", - "type": "row" - }, - { - "cards": { - "cardPadding": null, - "cardRound": null - }, - "color": { - "cardColor": "#8F3BB8", - "colorScale": "sqrt", - "colorScheme": "interpolateOranges", - "exponent": 0.5, - "mode": "spectrum" - }, - "dataFormat": "tsbuckets", - "datasource": "default", - "gridPos": { - "h": 7, - "w": 12, - "x": 0, - "y": 17 - }, - "heatmap": {}, - "hideZeroBuckets": true, - "highlightCards": true, - "id": 7, - "legend": { - "show": false - }, - "links": [], - "reverseYBuckets": false, - "targets": [ - { - "expr": "sum(increase(http_request_duration_seconds_bucket{handler=\"/i/push/v2\", namespace=\"$namespace\"}[5m])) by (le)", - "format": "heatmap", - "instant": false, - "interval": "", - "intervalFactor": 10, - "legendFormat": "{{ le }}", - "refId": "B" - } - ], - "timeFrom": null, - "timeShift": null, - "title": "Message Push Latencies (gundeck)", - "tooltip": { - "show": true, - "showHistogram": false - }, - "type": "heatmap", - "xAxis": { - "show": true - }, - "xBucketNumber": null, - "xBucketSize": null, - "yAxis": { - "decimals": null, - "format": "short", - "logBase": 1, - "max": null, - "min": null, - "show": true, - "splitFactor": null - }, - "yBucketBound": "auto", - "yBucketNumber": null, - "yBucketSize": null - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "default", - "fill": 1, - "gridPos": { - "h": 7, - "w": 12, - "x": 12, - "y": 17 - }, - "id": 9, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "paceLength": 10, - "percentage": false, - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "sum(increase(push_native_success[5m]))", - "format": "time_series", - "instant": false, - "intervalFactor": 1, - "legendFormat": "Success", - "refId": "A" - }, - { - "expr": "-sum(increase(push_native_disabled[5m]))", - "format": "time_series", - "instant": false, - "intervalFactor": 1, - "legendFormat": "-Disabled", - "refId": "B" - }, - { - "expr": "-sum(increase(push_native_invalid{namespace=\"$namespace\"}[5m]))", - "format": "time_series", - "instant": false, - "intervalFactor": 1, - "legendFormat": "-Invalid", - "refId": "C" - }, - { - "expr": "-sum(increase(push_native_errors{namespace=\"$namespace\"}[5m]))", - "format": "time_series", - "instant": false, - "intervalFactor": 1, - "legendFormat": "-Errors", - "refId": "D" - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Native Push (Outgoing) (gundeck)", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "default", - "fill": 1, - "gridPos": { - "h": 7, - "w": 12, - "x": 0, - "y": 24 - }, - "id": 2, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "paceLength": 10, - "percentage": false, - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "sum(increase(http_request_duration_seconds_count{role=\"gundeck\", handler=\"/i/push/v2\", namespace=\"$namespace\"}[5m])) by (status_code)", - "format": "time_series", - "intervalFactor": 1, - "legendFormat": "{{ status_code }}", - "refId": "A" - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Push Rate (incoming) (gundeck)", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "default", - "fill": 1, - "gridPos": { - "h": 7, - "w": 12, - "x": 12, - "y": 24 - }, - "id": 4, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "paceLength": 10, - "percentage": true, - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": true, - "steppedLine": false, - "targets": [ - { - "expr": "sum(http_request_duration_seconds_count{handler=\"/i/push/v2\", namespace=\"$namespace\"}) by (status_code)", - "format": "time_series", - "intervalFactor": 1, - "legendFormat": "{{ status_code }}", - "refId": "A" - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Push Status Codes as % (gundeck)", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": "", - "logBase": 1, - "max": "100", - "min": "0", - "show": true - }, - { - "format": "short", - "label": "", - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "collapsed": false, - "gridPos": { - "h": 1, - "w": 24, - "x": 0, - "y": 31 - }, - "id": 19, - "panels": [], - "title": "Galley Stats", - "type": "row" - }, - { - "cards": { - "cardPadding": null, - "cardRound": null - }, - "color": { - "cardColor": "#8F3BB8", - "colorScale": "sqrt", - "colorScheme": "interpolateOranges", - "exponent": 0.5, - "mode": "spectrum" - }, - "dataFormat": "tsbuckets", - "datasource": "default", - "gridPos": { - "h": 7, - "w": 12, - "x": 0, - "y": 32 - }, - "heatmap": {}, - "hideZeroBuckets": true, - "highlightCards": true, - "id": 6, - "legend": { - "show": false - }, - "links": [], - "reverseYBuckets": false, - "targets": [ - { - "expr": "sum(increase(http_request_duration_seconds_bucket{handler=\"/conversations/:cnv/otr/messages\", namespace=\"$namespace\"}[5m])) by (le)", - "format": "heatmap", - "instant": false, - "interval": "", - "intervalFactor": 10, - "legendFormat": "{{ le }}", - "refId": "B" - } - ], - "timeFrom": null, - "timeShift": null, - "title": "Message POSTing Latencies (galley)", - "tooltip": { - "show": true, - "showHistogram": false - }, - "type": "heatmap", - "xAxis": { - "show": true - }, - "xBucketNumber": null, - "xBucketSize": null, - "yAxis": { - "decimals": null, - "format": "short", - "logBase": 1, - "max": null, - "min": null, - "show": true, - "splitFactor": null - }, - "yBucketBound": "auto", - "yBucketNumber": null, - "yBucketSize": null - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "default", - "fill": 8, - "gridPos": { - "h": 7, - "w": 12, - "x": 12, - "y": 32 - }, - "id": 13, - "legend": { - "avg": false, - "current": false, - "hideEmpty": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "paceLength": 10, - "percentage": true, - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": true, - "steppedLine": false, - "targets": [ - { - "expr": "sum(increase(http_request_duration_seconds_count{role=\"galley\", handler=\"/conversations/:cnv/otr/messages\", status_code=~\"2..\", namespace=\"$namespace\"}[10m]))", - "format": "time_series", - "intervalFactor": 5, - "legendFormat": "2XX", - "refId": "A" - }, - { - "expr": "sum(increase(http_request_duration_seconds_count{role=\"galley\", handler=\"/conversations/:cnv/otr/messages\", status_code=~\"4..\", namespace=\"$namespace\"}[10m]))", - "format": "time_series", - "hide": false, - "intervalFactor": 5, - "legendFormat": "4XX", - "refId": "B" - }, - { - "expr": "sum(increase(http_request_duration_seconds_count{role=\"galley\", handler=\"/conversations/:cnv/otr/messages\", status_code=~\"5..\", namespace=\"$namespace\"}[10m]))", - "format": "time_series", - "intervalFactor": 5, - "legendFormat": "5XX", - "refId": "C" - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "OTR Message Receives as % by Status Code (galley)", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": "100", - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "default", - "fill": 1, - "gridPos": { - "h": 8, - "w": 12, - "x": 0, - "y": 39 - }, - "id": 11, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "paceLength": 10, - "percentage": false, - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "sum(increase(http_request_duration_seconds_count{handler=\"/conversations/:cnv/otr/messages\", status_code=~\"2..\", namespace=\"$namespace\"}[5m]))", - "format": "time_series", - "intervalFactor": 1, - "legendFormat": "", - "refId": "A" - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Messages Sent every 5m (galley)", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - } - ], - "schemaVersion": 18, - "style": "dark", - "tags": [], - "templating": { - "list": [ - { - "allValue": null, - "current": { - "tags": [], - "text": "staging", - "value": "staging" - }, - "datasource": "Prometheus", - "definition": "kube_namespace_labels", - "hide": 0, - "includeAll": true, - "label": null, - "multi": false, - "name": "namespace", - "options": [ - { - "selected": false, - "text": "All", - "value": "$__all" - }, - { - "selected": false, - "text": "chris", - "value": "chris" - }, - { - "selected": false, - "text": "chris-test", - "value": "chris-test" - }, - { - "selected": false, - "text": "demo", - "value": "demo" - }, - { - "selected": false, - "text": "demo2", - "value": "demo2" - }, - { - "selected": false, - "text": "fisx", - "value": "fisx" - }, - { - "selected": false, - "text": "global", - "value": "global" - }, - { - "selected": false, - "text": "joe", - "value": "joe" - }, - { - "selected": false, - "text": "matthias", - "value": "matthias" - }, - { - "selected": false, - "text": "metallb-system", - "value": "metallb-system" - }, - { - "selected": true, - "text": "staging", - "value": "staging" - }, - { - "selected": false, - "text": "staging-metrics", - "value": "staging-metrics" - }, - { - "selected": false, - "text": "test-2en9vf2arqph", - "value": "test-2en9vf2arqph" - }, - { - "selected": false, - "text": "test-484gdrhp8px4", - "value": "test-484gdrhp8px4" - }, - { - "selected": false, - "text": "test-5t9t91jnf4ds", - "value": "test-5t9t91jnf4ds" - }, - { - "selected": false, - "text": "test-8v0m4wjlebdz", - "value": "test-8v0m4wjlebdz" - }, - { - "selected": false, - "text": "test-9wyh13nzsklw", - "value": "test-9wyh13nzsklw" - }, - { - "selected": false, - "text": "test-a8naztqkyneg", - "value": "test-a8naztqkyneg" - }, - { - "selected": false, - "text": "test-ab9cgmivxfxi", - "value": "test-ab9cgmivxfxi" - }, - { - "selected": false, - "text": "test-mu2d9h1eohhh", - "value": "test-mu2d9h1eohhh" - }, - { - "selected": false, - "text": "test-vlofz373vi5a", - "value": "test-vlofz373vi5a" - }, - { - "selected": false, - "text": "test-wiuxbjzfgf0m", - "value": "test-wiuxbjzfgf0m" - }, - { - "selected": false, - "text": "test-xm4f7254de0n", - "value": "test-xm4f7254de0n" - }, - { - "selected": false, - "text": "default", - "value": "default" - }, - { - "selected": false, - "text": "kube-public", - "value": "kube-public" - }, - { - "selected": false, - "text": "kube-system", - "value": "kube-system" - } - ], - "query": "kube_namespace_labels", - "refresh": 0, - "regex": "/namespace=\"([^\"]+)\"/", - "skipUrlSync": false, - "sort": 0, - "tagValuesQuery": "", - "tags": [], - "tagsQuery": "", - "type": "query", - "useTags": false - } - ] - }, - "time": { - "from": "now-6h", - "to": "now" - }, - "timepicker": { - "refresh_intervals": [ - "5s", - "10s", - "30s", - "1m", - "5m", - "15m", - "30m", - "1h", - "2h", - "1d" - ], - "time_options": [ - "5m", - "15m", - "1h", - "6h", - "12h", - "24h", - "2d", - "7d", - "30d" - ] - }, - "timezone": "", - "title": "Message Stats", - "uid": "sqSUztzZz", - "version": 4 -} \ No newline at end of file diff --git a/charts/wire-server-metrics/dashboards/services.json b/charts/wire-server-metrics/dashboards/services.json deleted file mode 100644 index 30bc06c34..000000000 --- a/charts/wire-server-metrics/dashboards/services.json +++ /dev/null @@ -1,1007 +0,0 @@ -{ - "annotations": { - "list": [ - { - "builtIn": 1, - "datasource": "-- Grafana --", - "enable": true, - "hide": true, - "iconColor": "rgba(0, 211, 255, 1)", - "name": "Annotations & Alerts", - "type": "dashboard" - } - ] - }, - "editable": true, - "gnetId": null, - "graphTooltip": 0, - "id": 11, - "iteration": 1556873852486, - "links": [], - "panels": [ - { - "content": "# Haskell Service Statistics\n## Change the $(service) and $(namespace) variables to filter stats", - "gridPos": { - "h": 3, - "w": 24, - "x": 0, - "y": 0 - }, - "id": 97, - "links": [], - "mode": "markdown", - "timeFrom": null, - "timeShift": null, - "title": "README", - "transparent": true, - "type": "text" - }, - { - "collapsed": false, - "gridPos": { - "h": 1, - "w": 24, - "x": 0, - "y": 3 - }, - "id": 89, - "panels": [], - "title": "Global Metrics", - "type": "row" - }, - { - "aliasColors": {}, - "bars": true, - "dashLength": 10, - "dashes": false, - "fill": 1, - "gridPos": { - "h": 7, - "w": 12, - "x": 0, - "y": 4 - }, - "id": 23, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": false, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "paceLength": 10, - "percentage": false, - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "sum(increase(http_request_duration_seconds_count{status_code=~\"5..\", namespace=\"$namespace\"}[5m])) by (role)", - "format": "time_series", - "intervalFactor": 5, - "legendFormat": "{{role}}", - "refId": "A" - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "5XXs By Service", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "cacheTimeout": null, - "columns": [], - "fontSize": "100%", - "gridPos": { - "h": 7, - "w": 12, - "x": 12, - "y": 4 - }, - "id": 98, - "interval": "", - "links": [], - "pageSize": null, - "scroll": true, - "showHeader": true, - "sort": { - "col": 3, - "desc": true - }, - "styles": [ - { - "alias": "Time", - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "pattern": "Time", - "type": "hidden" - }, - { - "alias": "", - "colorMode": null, - "colors": [ - "rgba(245, 54, 54, 0.9)", - "rgba(237, 129, 40, 0.89)", - "rgba(50, 172, 45, 0.97)" - ], - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "decimals": 2, - "mappingType": 1, - "pattern": "", - "thresholds": [], - "type": "number", - "unit": "short" - }, - { - "alias": "", - "colorMode": null, - "colors": [ - "rgba(245, 54, 54, 0.9)", - "rgba(237, 129, 40, 0.89)", - "rgba(50, 172, 45, 0.97)" - ], - "decimals": 2, - "pattern": "/.*/", - "thresholds": [], - "type": "number", - "unit": "short" - } - ], - "targets": [ - { - "expr": "sum(increase(http_request_duration_seconds_count{status_code=~\"5..\", namespace=\"$namespace\"}[$__range])) by (role, handler)", - "format": "table", - "instant": true, - "intervalFactor": 1, - "legendFormat": "# 5XXs", - "refId": "A" - } - ], - "timeFrom": null, - "timeShift": null, - "title": "5XX/s per handler (current time range) - all services", - "transform": "table", - "type": "table" - }, - { - "collapsed": false, - "gridPos": { - "h": 1, - "w": 24, - "x": 0, - "y": 11 - }, - "id": 91, - "panels": [], - "title": "$service request stats", - "type": "row" - }, - { - "aliasColors": { - "/await": "red", - "4XX": "yellow", - "5XX": "semi-dark-red" - }, - "bars": false, - "dashLength": 10, - "dashes": false, - "fill": 4, - "gridPos": { - "h": 8, - "w": 12, - "x": 0, - "y": 12 - }, - "id": 18, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "paceLength": 10, - "percentage": false, - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "sum(increase(http_request_duration_seconds_count{status_code=~\"2..\", role=\"$service\", namespace=\"$namespace\"}[5m])) # Disabled because it's way too slow", - "format": "time_series", - "hide": true, - "instant": false, - "intervalFactor": 1, - "legendFormat": "2XX", - "refId": "A" - }, - { - "expr": "sum(increase(http_request_duration_seconds_count{status_code=~\"4..\", role=\"$service\", namespace=\"$namespace\"}[5m]))", - "format": "time_series", - "hide": false, - "instant": false, - "intervalFactor": 1, - "legendFormat": "4XX", - "refId": "B" - }, - { - "expr": "sum(increase(http_request_duration_seconds_count{status_code=~\"5..\", role=\"$service\", namespace=\"$namespace\"}[5m]))", - "format": "time_series", - "hide": false, - "instant": false, - "intervalFactor": 1, - "legendFormat": "5XX", - "refId": "C" - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Request Error Breakdown - $service", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "cacheTimeout": null, - "columns": [], - "fontSize": "100%", - "gridPos": { - "h": 8, - "w": 6, - "x": 12, - "y": 12 - }, - "id": 20, - "interval": "", - "links": [], - "pageSize": null, - "scroll": true, - "showHeader": true, - "sort": { - "col": 2, - "desc": true - }, - "styles": [ - { - "alias": "Time", - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "pattern": "Time", - "type": "hidden" - }, - { - "alias": "", - "colorMode": null, - "colors": [ - "rgba(245, 54, 54, 0.9)", - "rgba(237, 129, 40, 0.89)", - "rgba(50, 172, 45, 0.97)" - ], - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "decimals": 2, - "mappingType": 1, - "pattern": "", - "thresholds": [], - "type": "number", - "unit": "short" - }, - { - "alias": "", - "colorMode": null, - "colors": [ - "rgba(245, 54, 54, 0.9)", - "rgba(237, 129, 40, 0.89)", - "rgba(50, 172, 45, 0.97)" - ], - "decimals": 2, - "pattern": "/.*/", - "thresholds": [], - "type": "number", - "unit": "short" - } - ], - "targets": [ - { - "expr": "sum(increase(http_request_duration_seconds_count{status_code=~\"4..\", role=\"$service\", namespace=\"$namespace\"}[$__range])) by (handler)", - "format": "table", - "instant": true, - "intervalFactor": 1, - "legendFormat": "# 5XXs", - "refId": "A" - } - ], - "timeFrom": null, - "timeShift": null, - "title": "4XX/s per handler (current time range) - $service", - "transform": "table", - "type": "table" - }, - { - "cacheTimeout": null, - "columns": [], - "fontSize": "100%", - "gridPos": { - "h": 8, - "w": 6, - "x": 18, - "y": 12 - }, - "id": 21, - "interval": "", - "links": [], - "pageSize": null, - "scroll": true, - "showHeader": true, - "sort": { - "col": 2, - "desc": true - }, - "styles": [ - { - "alias": "Time", - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "pattern": "Time", - "type": "hidden" - }, - { - "alias": "", - "colorMode": null, - "colors": [ - "rgba(245, 54, 54, 0.9)", - "rgba(237, 129, 40, 0.89)", - "rgba(50, 172, 45, 0.97)" - ], - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "decimals": 2, - "mappingType": 1, - "pattern": "", - "thresholds": [], - "type": "number", - "unit": "short" - }, - { - "alias": "", - "colorMode": null, - "colors": [ - "rgba(245, 54, 54, 0.9)", - "rgba(237, 129, 40, 0.89)", - "rgba(50, 172, 45, 0.97)" - ], - "decimals": 2, - "pattern": "/.*/", - "thresholds": [], - "type": "number", - "unit": "short" - } - ], - "targets": [ - { - "expr": "sum(increase(http_request_duration_seconds_count{status_code=~\"5..\", role=\"$service\", namespace=\"$namespace\"}[$__range])) by (handler)", - "format": "table", - "instant": true, - "intervalFactor": 1, - "legendFormat": "# 5XXs", - "refId": "A" - } - ], - "timeFrom": null, - "timeShift": null, - "title": "5XX/s per handler (current time range) - $service", - "transform": "table", - "type": "table" - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "default", - "editable": true, - "error": false, - "fill": 2, - "grid": {}, - "gridPos": { - "h": 7, - "w": 12, - "x": 0, - "y": 20 - }, - "id": 2, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "connected", - "paceLength": 10, - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "sum(rate(container_network_receive_bytes_total{pod_name=~\"$service.*\", namespace=\"$namespace\"}[5m])) by (pod_name)", - "format": "time_series", - "hide": false, - "intervalFactor": 2, - "legendFormat": "{{pod_name}}", - "refId": "A", - "step": 3600, - "target": "alias(scale(scaleToSeconds(nonNegativeDerivative(sumSeries(gundeck.*.eu-west-1.compute.internal.interface.eth0.if_octets.rx)), 1), 0.001), 'kb/s - inbound')" - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Network Traffic - $service", - "tooltip": { - "shared": false, - "sort": 0, - "value_type": "cumulative" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": "Num / KB", - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "collapsed": false, - "gridPos": { - "h": 1, - "w": 24, - "x": 0, - "y": 27 - }, - "id": 27, - "panels": [], - "repeat": null, - "title": "$service stats", - "type": "row" - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "default", - "editable": true, - "error": false, - "fill": 2, - "grid": {}, - "gridPos": { - "h": 6, - "w": 12, - "x": 0, - "y": 28 - }, - "id": 4, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "connected", - "paceLength": 10, - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "sum(container_cpu_load_average_10s{pod_name=~\"$service.*\", namespace=\"$namespace\"}) by (pod_name) * 100", - "format": "time_series", - "hide": false, - "instant": false, - "intervalFactor": 2, - "legendFormat": "{{ pod_name }}", - "refId": "B", - "step": 2400, - "target": "alias(averageSeries(gundeck.*.eu-west-1.compute.internal.load.load.shortterm), 'load - shorterm')" - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "CPU % (Mean) - $service", - "tooltip": { - "shared": false, - "sort": 0, - "value_type": "cumulative" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "logBase": 1, - "max": "100", - "min": "0", - "show": true - }, - { - "format": "short", - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": { - "Memory Limit (MB)": "red" - }, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "default", - "editable": true, - "error": false, - "fill": 1, - "grid": {}, - "gridPos": { - "h": 6, - "w": 12, - "x": 12, - "y": 28 - }, - "id": 5, - "legend": { - "avg": false, - "current": false, - "hideEmpty": false, - "hideZero": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "connected", - "paceLength": 10, - "percentage": true, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [ - { - "alias": "Free mem (MB)", - "yaxis": 2 - }, - { - "alias": "gc.used.current (MB)", - "yaxis": 1 - } - ], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "sum(container_memory_usage_bytes{namespace=\"$namespace\", pod_name=~\"$service.*\", namespace=\"$namespace\"}) by (pod_name) / (1024 * 1024)", - "format": "time_series", - "hide": false, - "interval": "", - "intervalFactor": 2, - "legendFormat": "{{ pod_name }}", - "refId": "A", - "step": 2400, - "target": "alias(scale(averageSeries(gundeck.*.eu-west-1.compute.internal.memory.memory.used), 0.000001), 'used')" - }, - { - "expr": "sum(container_spec_memory_limit_bytes{namespace=\"staging\", pod_name=~\"brig.*\", namespace=\"$namespace\"}) / (1024 * 1024)", - "format": "time_series", - "hide": true, - "instant": false, - "intervalFactor": 2, - "legendFormat": "Memory Limit (MB)", - "metric": "", - "refId": "C", - "step": 2400, - "target": "alias(scale(averageSeries(gundeck.*.eu-west-1.compute.internal.memory.memory.used), 0.000001), 'used')" - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Memory Usage - $service", - "tooltip": { - "shared": false, - "sort": 0, - "value_type": "cumulative" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": "MB", - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": "", - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "default", - "fill": 2, - "gridPos": { - "h": 8, - "w": 12, - "x": 0, - "y": 34 - }, - "id": 25, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "paceLength": 10, - "percentage": false, - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "sum(net_connections{role=\"$service\", namespace=\"$namespace\"})", - "format": "time_series", - "intervalFactor": 1, - "legendFormat": "Inbound Connections", - "refId": "A" - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Inbound Connections - $service", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - } - ], - "refresh": false, - "schemaVersion": 18, - "style": "dark", - "tags": [], - "templating": { - "list": [ - { - "allValue": null, - "current": { - "tags": [], - "text": "brig", - "value": "brig" - }, - "hide": 0, - "includeAll": false, - "label": "service", - "multi": false, - "name": "service", - "options": [ - { - "selected": false, - "text": "brig", - "value": "brig" - }, - { - "selected": false, - "text": "galley", - "value": "galley" - }, - { - "selected": false, - "text": "cannon", - "value": "cannon" - }, - { - "selected": false, - "text": "gundeck", - "value": "gundeck" - }, - { - "selected": false, - "text": "cargohold", - "value": "cargohold" - }, - { - "selected": false, - "text": "spar", - "value": "spar" - }, - { - "selected": true, - "text": "proxy", - "value": "proxy" - } - ], - "query": "brig,galley,cannon,gundeck,cargohold,spar,proxy", - "skipUrlSync": false, - "type": "custom" - }, - { - "allValue": null, - "current": { - "text": "staging", - "value": "staging" - }, - "datasource": "Prometheus", - "definition": "kube_namespace_labels", - "hide": 0, - "includeAll": true, - "label": null, - "multi": false, - "name": "namespace", - "options": [], - "query": "kube_namespace_labels", - "refresh": 1, - "regex": "/namespace=\"([^\"]+)\"/", - "skipUrlSync": false, - "sort": 0, - "tagValuesQuery": "", - "tags": [], - "tagsQuery": "", - "type": "query", - "useTags": false - } - ] - }, - "time": { - "from": "now-24h", - "to": "now" - }, - "timepicker": { - "collapse": false, - "enable": true, - "notice": false, - "now": true, - "refresh_intervals": [ - "5s", - "10s", - "30s", - "1m", - "5m", - "15m", - "30m", - "1h", - "2h", - "1d" - ], - "status": "Stable", - "time_options": [ - "5m", - "15m", - "1h", - "6h", - "12h", - "24h", - "2d", - "7d", - "30d" - ], - "type": "timepicker" - }, - "timezone": "browser", - "title": "Wire Services", - "uid": "000000043", - "version": 17 -} \ No newline at end of file diff --git a/charts/wire-server-metrics/requirements.yaml b/charts/wire-server-metrics/requirements.yaml deleted file mode 100644 index c3659fe4e..000000000 --- a/charts/wire-server-metrics/requirements.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -dependencies: - - name: prometheus-operator - version: 6.7.2 - repository: "@stable" diff --git a/charts/wire-server-metrics/values.yaml b/charts/wire-server-metrics/values.yaml deleted file mode 100644 index 37cbc3785..000000000 --- a/charts/wire-server-metrics/values.yaml +++ /dev/null @@ -1,84 +0,0 @@ -prometheus-operator: - prometheus: - additionalServiceMonitors: - - name: wire-services - # We copy these labels from the pod onto the collected metrics from that pod - targetLabels: - - wireService - endpoints: - - path: '/i/metrics' - port: http - interval: 10s - metricRelabelings: - # Rename 'service' to 'role' to allow sharing of grafana dashboards - # between k8s and AWS services. - - sourceLabels: [service] - targetLabel: role - # This monitors _all_ namespaces and selects all - # pods that with a wireServices selector - namespaceSelector: - any: true - selector: - matchExpressions: - # select any pod with a 'wireService' label - - key: wireService - operator: Exists - - prometheusOperator: - # Don't try to create custom resource types; we prefer to do it manually - # Otherwise we run into race conditions when installing helm charts - createCustomResource: false - - grafana: - adminPassword: "admin" - ingress: - enabled: false - persistence: - storageClassName: "aws-ebs-retained" - enabled: true - accessModes: ["ReadWriteOnce"] - size: 10Gi - - dashboardProviders: - dashboardproviders.yaml: - apiVersion: 1 - providers: - - name: 'default' - orgId: 1 - folder: '' - type: file - disableDeletion: false - editable: true - options: - path: /var/lib/grafana/dashboards/default - - dashboards: - default: - # grafana can only access files from within its own chart directory - # which we don't have access to here; we can either dump the json - # directly into the values file, or load from a url - messages: - url: https://raw.githubusercontent.com/wireapp/wire-server-deploy/master/charts/wire-server-metrics/dashboards/message-stats.json - services: - url: https://raw.githubusercontent.com/wireapp/wire-server-deploy/master/charts/wire-server-metrics/dashboards/services.json - - prometheusSpec: - storageSpec: - volumeClaimTemplate: - spec: - storageClassName: "aws-ebs-retained" - accessModes: ["ReadWriteOnce"] - resources: - requests: - storage: 10Gi - - alertmanager: - alertmanagerSpec: - storage: - volumeClaimTemplate: - spec: - storageClassName: "aws-ebs-retained" - accessModes: ["ReadWriteOnce"] - resources: - requests: - storage: 10Gi diff --git a/charts/wire-server/.helmignore b/charts/wire-server/.helmignore deleted file mode 100644 index f0c131944..000000000 --- a/charts/wire-server/.helmignore +++ /dev/null @@ -1,21 +0,0 @@ -# Patterns to ignore when building packages. -# This supports shell glob matching, relative path matching, and -# negation (prefixed with !). Only one pattern per line. -.DS_Store -# Common VCS dirs -.git/ -.gitignore -.bzr/ -.bzrignore -.hg/ -.hgignore -.svn/ -# Common backup files -*.swp -*.bak -*.tmp -*~ -# Various IDEs -.project -.idea/ -*.tmproj diff --git a/charts/wire-server/Chart.yaml b/charts/wire-server/Chart.yaml deleted file mode 100644 index a62e21af3..000000000 --- a/charts/wire-server/Chart.yaml +++ /dev/null @@ -1,4 +0,0 @@ -apiVersion: v1 -description: A Helm chart for wire-server https://github.com/wireapp/wire-server -name: wire-server -version: 0.94.0 diff --git a/charts/wire-server/requirements.yaml b/charts/wire-server/requirements.yaml deleted file mode 100644 index 2cb1c4571..000000000 --- a/charts/wire-server/requirements.yaml +++ /dev/null @@ -1,92 +0,0 @@ -dependencies: -######################## -## wire-servers/database cassandra-migrations -######################## -- name: cassandra-migrations - version: "0.94.0" - repository: "file://../cassandra-migrations" - tags: - - cassandra-migrations -- name: elasticsearch-index - version: "0.94.0" - repository: "file://../elasticsearch-index" - tags: - - elasticsearch-index -######################## -## wire-servers/services -######################## -- name: cannon - version: "0.94.0" - repository: "file://../cannon" - tags: - - cannon - - haskellServices - - services -- name: proxy - version: "0.94.0" - repository: "file://../proxy" - tags: - - proxy - - haskellServices - - services -- name: cargohold - version: "0.94.0" - repository: "file://../cargohold" - tags: - - cargohold - - haskellServices - - services -- name: gundeck - version: "0.94.0" - repository: "file://../gundeck" - tags: - - gundeck - - haskellServices - - services -- name: spar - version: "0.94.0" - repository: "file://../spar" - tags: - - spar - - haskellServices - - services -- name: galley - version: "0.94.0" - repository: "file://../galley" - tags: - - galley - - haskellServices - - services -- name: brig - version: "0.94.0" - repository: "file://../brig" - tags: - - brig - - haskellServices - - services -- name: nginz - version: "0.94.0" - repository: "file://../nginz" - tags: - - nginz - - services -- name: webapp - version: "0.94.0" - repository: "file://../webapp" - tags: - - web - - webapp -- name: team-settings - version: "0.94.0" - repository: "file://../team-settings" - tags: - - web - - team-settings - - private -- name: account-pages - version: "0.94.0" - repository: "file://../account-pages" - tags: - - web - - account-pages - - private diff --git a/charts/wire-server/templates/NOTES.txt b/charts/wire-server/templates/NOTES.txt deleted file mode 100644 index 2f9338732..000000000 --- a/charts/wire-server/templates/NOTES.txt +++ /dev/null @@ -1,2 +0,0 @@ - -TODO: write nice NOTES.txt diff --git a/charts/wire-server/values.yaml b/charts/wire-server/values.yaml deleted file mode 100644 index 827ebbe2d..000000000 --- a/charts/wire-server/values.yaml +++ /dev/null @@ -1,10 +0,0 @@ -# Default values for wire-server. - -# Tags allow to enable/disable certain components of wire-server as defined in requirements.yaml -# https://docs.helm.sh/developing_charts/#tags-and-condition-fields-in-requirements-yaml -# tags: -# services: true - -tags: - team-settings: false - account-pages: false diff --git a/default.nix b/default.nix new file mode 100644 index 000000000..bf27afb64 --- /dev/null +++ b/default.nix @@ -0,0 +1,105 @@ +{ system ? builtins.currentSystem }: + +let + sources = import ./nix/sources.nix; + pkgs = import sources.nixpkgs { + inherit system; + config = { }; + overlays = [ + (import ./nix/overlay.nix) + ]; + }; + profileEnv = pkgs.writeTextFile { + name = "profile-env"; + destination = "/.profile"; + # This gets sourced by direnv. Set NIX_PATH, so `nix-shell` uses the same nixpkgs as here. + text = '' + export NIX_PATH=nixpkgs=${toString pkgs.path} + ''; + }; + + +in +rec { + inherit pkgs profileEnv; + + env = pkgs.buildEnv { + name = "wire-server-deploy"; + paths = with pkgs; [ + ansible_2_15 + pythonForAnsible + jmespath + apacheHttpd + awscli2 + gnumake + gnupg + + kubernetes-tools + + # Note: This is overriden in nix/overlay.nix to have plugins. This is + # required so that helmfile get's the correct version of helm in its PATH. + kubernetes-helm + helmfile + openssl + moreutils + skopeo + sops + opentofu + yq + create-container-dump + list-helm-containers + mirror-apt-jammy + generate-gpg1-key + # Linting + shellcheck + + niv + nix-prefetch-docker + ] ++ [ + profileEnv + ] ++ lib.optionals pkgs.stdenv.isLinux [ + pkgs.containerd + patch-ingress-controller-images # depends on containerd, TODO: migrate to skopeo? + + + # for RTP session debugging + wireshark + gnuplot + ]; + }; + + # The container we use for offline deploys. Where people probably do not have + # nix + direnv :) + container = pkgs.dockerTools.buildImage { + name = "quay.io/wire/wire-server-deploy"; + fromImage = pkgs.dockerTools.pullImage (import ./nix/docker-alpine.nix); + # we don't want git or ssh or anything in here, the ansible folder is + # mounted into here. + contents = [ + pkgs.cacert + pkgs.coreutils + pkgs.bashInteractive + pkgs.openssh # ansible needs this too, even with paramiko + pkgs.sshpass # needed for password login + + # The enivronment + env + # provide /usr/bin/env and /tmp in the container too :-) + #(pkgs.runCommandNoCC "foo" {} " + # mkdir -p $out/usr/bin $out/tmp + # ln -sfn ${pkgs.coreutils}/bin/env $out/usr/bin/env + #") + ]; + config = { + Volumes = { + "/wire-server-deploy" = { }; + }; + WorkingDir = "/wire-server-deploy"; + Env = [ + "KUBECONFIG=/wire-server-deploy/ansible/inventory/offline/artifacts/admin.conf" + "ANSIBLE_CONFIG=/wire-server-deploy/ansible/ansible.cfg" + "LOCALHOST_PYTHON=${env}/bin/python" + ]; + }; + }; +} diff --git a/examples/control-planes-only-k8s/README.md b/examples/control-planes-only-k8s/README.md new file mode 100644 index 000000000..c2b58c489 --- /dev/null +++ b/examples/control-planes-only-k8s/README.md @@ -0,0 +1,11 @@ +EXAMPLE: Control plane only K8s nodes +===================================== + +This example deploys a group of machines ready to deploy a Kubernetes cluster on top, that +only consists of control plane machines. + + +### Characteristics: + +* deployed on Hetzner +* no load-balancing diff --git a/examples/control-planes-only-k8s/terraform.tfvars b/examples/control-planes-only-k8s/terraform.tfvars new file mode 100644 index 000000000..3b56a6c16 --- /dev/null +++ b/examples/control-planes-only-k8s/terraform.tfvars @@ -0,0 +1,24 @@ +environment = "CHANGE_ME:generic-name" + +root_domain = "CHANGE_ME:FQDN" + +operator_ssh_public_keys = { + terraform_managed = { + "CHANGE_ME:unique-name" = "CHANGE_ME:key-file-content" + } + preuploaded_key_names = [] +} + +k8s_cluster = { + cloud = "hetzner" + + machine_groups = [ + { + group_name = "cpns" + # NOTE: set to 1 in order to get a single-machine Kubernetes cluster + machine_count = 3 + machine_type = "cx21" + component_classes = [ "controlplane", "node" ] + } + ] +} diff --git a/examples/multi-instance-sft/README.md b/examples/multi-instance-sft/README.md new file mode 100644 index 000000000..e3b715f7e --- /dev/null +++ b/examples/multi-instance-sft/README.md @@ -0,0 +1,9 @@ +EXAMPLE: SFT server in a blue-green deployment +============================================== + +This example deploys two groups of SFT servers. + + +### Characteristics: + +* deployed on Hetzner diff --git a/examples/multi-instance-sft/terraform.tfvars b/examples/multi-instance-sft/terraform.tfvars new file mode 100644 index 000000000..a64229c57 --- /dev/null +++ b/examples/multi-instance-sft/terraform.tfvars @@ -0,0 +1,15 @@ +environment = "CHANGE_ME:generic-name" + +root_domain = "CHANGE_ME:FQDN" + +operator_ssh_public_keys = { + terraform_managed = { + "CHANGE_ME:unique-name" = "CHANGE_ME:key-file-content" + } + preuploaded_key_names = [] +} + +sft_server_names_blue = ["1", "2", "3"] +sft_server_type_blue = "cx31" +sft_server_names_green = ["4", "5", "6"] +sft_server_type_green = "cx31" diff --git a/examples/multi-node-k8s-with-lb-and-dns/README.md b/examples/multi-node-k8s-with-lb-and-dns/README.md new file mode 100644 index 000000000..a2c6b6fb9 --- /dev/null +++ b/examples/multi-node-k8s-with-lb-and-dns/README.md @@ -0,0 +1,16 @@ +EXAMPLE: Multiple K8s nodes +=========================== + +This example deploys Wire on a multi-node Kubernetes cluster with one control plane machine. + + +### Characteristics: + +* Helm values serve a demonstration purpose only +* +* DNS for all Wire services +* Kubernetes deployed on Hetzner +* Cert-Manager +* Network Load Balancer +* Ephemeral Databases running on Kubernetes +* on-board Mail server diff --git a/examples/multi-node-k8s-with-lb-and-dns/backend.tfvars b/examples/multi-node-k8s-with-lb-and-dns/backend.tfvars new file mode 100644 index 000000000..e70ef8985 --- /dev/null +++ b/examples/multi-node-k8s-with-lb-and-dns/backend.tfvars @@ -0,0 +1,4 @@ +bucket = "CHANGE_ME:bucket-name" +key = "CHANGE_ME:path/in/bucket/to/terraform.tfstate" +region = "CHANGE_ME:AWS-region" +dynamodb_table = "CHANGE_ME:shared-state-lock" diff --git a/examples/multi-node-k8s-with-lb-and-dns/helm_vars/demo-smtp/values.yaml b/examples/multi-node-k8s-with-lb-and-dns/helm_vars/demo-smtp/values.yaml new file mode 100644 index 000000000..87cc4a162 --- /dev/null +++ b/examples/multi-node-k8s-with-lb-and-dns/helm_vars/demo-smtp/values.yaml @@ -0,0 +1,23 @@ +# CHANGEME-PROD: This is often a good default when using calico's default CIDR +# https://github.com/kubernetes-sigs/kubespray/blob/master/docs/calico.md#optional--define-the-default-pool-cidr +# or flannel's https://github.com/kubernetes-sigs/kubespray/blob/master/docs/flannel.md#flannel +# If you override those values, etc., then verify that this CIDR still makes sense +# For all variables the "ixdotai/smtp" image supports see: https://github.com/ix-ai/smtp#readme +envVars: + RELAY_NETWORKS: ":10.233.0.0/16" +# +# PORT: "25" +# NET_DEV: eth0 +# OTHER_HOSTNAMES: other.example.com +# DISABLE_IPV6: 1 +# BIND_IP: 0.0.0.0 +# BIND_IP6: ::0 +# MAILNAME: mail.example.com +# DKIM_KEY_PATH: /etc/exim4/dkim.key +# KEY_PATH: /path/to/key.crt +# CERTIFICATE_PATH: /path/to/certificate.crt +# SMARTHOST_ADDRESS: mail.example.com +# SMARTHOST_PORT: "587" +# SMARTHOST_USER: exampleuser +# SMARTHOST_PASSWORD: secret +# SMARTHOST_ALIASES: "*.example.com" diff --git a/examples/multi-node-k8s-with-lb-and-dns/helm_vars/nginx-ingress-services/values.yaml b/examples/multi-node-k8s-with-lb-and-dns/helm_vars/nginx-ingress-services/values.yaml new file mode 100644 index 000000000..ab9e10051 --- /dev/null +++ b/examples/multi-node-k8s-with-lb-and-dns/helm_vars/nginx-ingress-services/values.yaml @@ -0,0 +1,22 @@ +teamSettings: + enabled: false +accountPages: + enabled: true +tls: + enabled: true + useCertManager: true + +certManager: + # CHANGE_ME:to-get-valid-cert + # inTestMode: true + certmasterEmail: "CHANGE_ME:valid-email-address" + +# NOTE: corresponds to ./../../terraform.tfvars +config: + dns: + https: nginz-https.CHANGE_ME:generic-name.CHANGE_ME:FQDN + ssl: nginz-ssl.CHANGE_ME:generic-name.CHANGE_ME:FQDN + webapp: webapp.CHANGE_ME:generic-name.CHANGE_ME:FQDN + fakeS3: assets.CHANGE_ME:generic-name.CHANGE_ME:FQDN + teamSettings: teams.CHANGE_ME:generic-name.CHANGE_ME:FQDN + accountPages: account.CHANGE_ME:generic-name.CHANGE_ME:FQDN diff --git a/examples/multi-node-k8s-with-lb-and-dns/helm_vars/wire-server/values.yaml b/examples/multi-node-k8s-with-lb-and-dns/helm_vars/wire-server/values.yaml new file mode 100644 index 000000000..b60bdcfa4 --- /dev/null +++ b/examples/multi-node-k8s-with-lb-and-dns/helm_vars/wire-server/values.yaml @@ -0,0 +1,155 @@ +tags: + proxy: false + spar: false + team-settings: false + account-pages: true + +cassandra-migrations: + cassandra: + host: cassandra-ephemeral + replicationFactor: 1 + +elasticsearch-index: + elasticsearch: + host: elasticsearch-ephemeral + cassandra: + host: cassandra-ephemeral + +brig: + replicaCount: 1 + config: + cassandra: + host: cassandra-ephemeral + replicaCount: 1 + elasticsearch: + host: elasticsearch-ephemeral + useSES: false + aws: + sqsEndpoint: http://fake-aws-sqs:4568 + dynamoDBEndpoint: http://fake-aws-dynamodb:4567 + internalQueue: integration-brig-events-internal + prekeyTable: integration-brig-prekeys + externalUrls: + nginz: https://nginz-https.CHANGE_ME:generic-name.CHANGE_ME:FQDN + teamSettings: https://teams.CHANGE_ME:generic-name.CHANGE_ME:FQDN + teamCreatorWelcome: https://teams.CHANGE_ME:generic-name.CHANGE_ME:FQDN/login + teamMemberWelcome: https://teams.CHANGE_ME:generic-name.CHANGE_ME:FQDN/download + optSettings: + setFederationDomain: CHANGE_ME:generic-name.CHANGE_ME:FQDN + emailSMS: + general: + emailSender: email@CHANGE_ME:generic-name.CHANGE_ME:FQDN + smsSender: "insert-sms-sender-for-twilio" # change this if SMS support is desired + smtp: + host: demo-smtp + port: 25 + connType: plain + +proxy: + replicaCount: 1 + +cannon: + replicaCount: 1 + drainTimeout: 10 + +cargohold: + replicaCount: 1 + config: + aws: + s3Bucket: dummy-bucket + s3Endpoint: http://fake-aws-s3:9000 + s3DownloadEndpoint: https://assets.CHANGE_ME:generic-name.CHANGE_ME:FQDN + +galley: + replicaCount: 1 + config: + cassandra: + host: cassandra-ephemeral + replicaCount: 1 + settings: + conversationCodeURI: https://CHANGE_ME:generic-name.CHANGE_ME:FQDN/conversation-join/ # must point to account pages + featureFlags: + sso: enabled-by-default + teamSearchVisibility: disabled-by-default + enableIndexedBillingTeamMembers: true + federationDomain: CHANGE_ME:generic-name.CHANGE_ME:FQDN + +gundeck: + replicaCount: 1 + config: + cassandra: + host: cassandra-ephemeral + replicaCount: 1 + aws: + account: "123456789012" + region: eu-west-1 + arnEnv: integration + queueName: integration-gundeck-events + sqsEndpoint: http://fake-aws-sqs:4568 + snsEndpoint: http://fake-aws-sns:4575 + +nginz: + replicaCount: 1 + config: + ws: + useProxyProtocol: false + nginx_conf: + env: prod + external_env_domain: CHANGE_ME:generic-name.CHANGE_ME:FQDN + drainTimeout: 10 + terminationGracePeriodSeconds: 30 + +webapp: + replicaCount: 1 + config: + externalUrls: + backendRest: nginz-https.CHANGE_ME:generic-name.CHANGE_ME:FQDN + backendWebsocket: nginz-ssl.CHANGE_ME:generic-name.CHANGE_ME:FQDN + backendDomain: CHANGE_ME:generic-name.CHANGE_ME:FQDN + appHost: webapp.CHANGE_ME:generic-name.CHANGE_ME:FQDN + envVars: + BRAND_NAME: "CHANGE_ME" + BACKEND_NAME: "QA - Fixed SSO" + FEATURE_CHECK_CONSENT: "false" + FEATURE_ENABLE_DEBUG: "false" + FEATURE_ENABLE_DOMAIN_DISCOVERY: "true" + FEATURE_ENABLE_PHONE_LOGIN: "false" + URL_ACCOUNT_BASE: "https://account.CHANGE_ME:generic-name.CHANGE_ME:FQDN" + URL_TEAMS_BASE: "https://teams.CHANGE_ME:generic-name.CHANGE_ME:FQDN" + URL_WEBSITE_BASE: "https://wire.link" + CSP_EXTRA_CONNECT_SRC: "https://*.CHANGE_ME:generic-name.CHANGE_ME:FQDN, wss://*.CHANGE_ME:generic-name.CHANGE_ME:FQDN" + CSP_EXTRA_IMG_SRC: "https://*.CHANGE_ME:generic-name.CHANGE_ME:FQDN" + CSP_EXTRA_SCRIPT_SRC: "https://*.CHANGE_ME:generic-name.CHANGE_ME:FQDN" + CSP_EXTRA_DEFAULT_SRC: "https://*.CHANGE_ME:generic-name.CHANGE_ME:FQDN" + CSP_EXTRA_FONT_SRC: "https://*.CHANGE_ME:generic-name.CHANGE_ME:FQDN" + CSP_EXTRA_FRAME_SRC: "https://*.CHANGE_ME:generic-name.CHANGE_ME:FQDN" + CSP_EXTRA_MANIFEST_SRC: "https://*.CHANGE_ME:generic-name.CHANGE_ME:FQDN" + CSP_EXTRA_OBJECT_SRC: "https://*.CHANGE_ME:generic-name.CHANGE_ME:FQDN" + CSP_EXTRA_MEDIA_SRC: "https://*.CHANGE_ME:generic-name.CHANGE_ME:FQDN" + CSP_EXTRA_PREFETCH_SRC: "https://*.CHANGE_ME:generic-name.CHANGE_ME:FQDN" + CSP_EXTRA_STYLE_SRC: "https://*.CHANGE_ME:generic-name.CHANGE_ME:FQDN" + CSP_EXTRA_WORKER_SRC: "https://*.CHANGE_ME:generic-name.CHANGE_ME:FQDN" + +account-pages: + replicaCount: 1 + config: + externalUrls: + backendRest: nginz-https.CHANGE_ME:generic-name.CHANGE_ME:FQDN + backendDomain: CHANGE_ME:generic-name.CHANGE_ME:FQDN + appHost: account.CHANGE_ME:generic-name.CHANGE_ME:FQDN + envVars: + URL_ACCOUNT_BASE: "https://account.CHANGE_ME:generic-name.CHANGE_ME:FQDN" + URL_TEAMS_BASE: "https://teams.CHANGE_ME:generic-name.CHANGE_ME:FQDN" + URL_WEBSITE_BASE: "https://wire.link" + CSP_EXTRA_CONNECT_SRC: "https://*.CHANGE_ME:generic-name.CHANGE_ME:FQDN, wss://*.CHANGE_ME:generic-name.CHANGE_ME:FQDN" + CSP_EXTRA_IMG_SRC: "https://*.CHANGE_ME:generic-name.CHANGE_ME:FQDN" + CSP_EXTRA_SCRIPT_SRC: "https://*.CHANGE_ME:generic-name.CHANGE_ME:FQDN" + CSP_EXTRA_DEFAULT_SRC: "https://*.CHANGE_ME:generic-name.CHANGE_ME:FQDN" + CSP_EXTRA_FONT_SRC: "https://*.CHANGE_ME:generic-name.CHANGE_ME:FQDN" + CSP_EXTRA_FRAME_SRC: "https://*.CHANGE_ME:generic-name.CHANGE_ME:FQDN" + CSP_EXTRA_MANIFEST_SRC: "https://*.CHANGE_ME:generic-name.CHANGE_ME:FQDN" + CSP_EXTRA_OBJECT_SRC: "https://*.CHANGE_ME:generic-name.CHANGE_ME:FQDN" + CSP_EXTRA_MEDIA_SRC: "https://*.CHANGE_ME:generic-name.CHANGE_ME:FQDN" + CSP_EXTRA_PREFETCH_SRC: "https://*.CHANGE_ME:generic-name.CHANGE_ME:FQDN" + CSP_EXTRA_STYLE_SRC: "https://*.CHANGE_ME:generic-name.CHANGE_ME:FQDN" + CSP_EXTRA_WORKER_SRC: "https://*.CHANGE_ME:generic-name.CHANGE_ME:FQDN" diff --git a/examples/multi-node-k8s-with-lb-and-dns/helmfile.yaml b/examples/multi-node-k8s-with-lb-and-dns/helmfile.yaml new file mode 100644 index 000000000..8903257b9 --- /dev/null +++ b/examples/multi-node-k8s-with-lb-and-dns/helmfile.yaml @@ -0,0 +1,59 @@ +helmDefaults: + wait: true + timeout: 600 + devel: true + +repositories: + - name: wire + url: 'https://s3-eu-west-1.amazonaws.com/public.wire.com/charts' + - name: wire-develop + url: 'https://s3-eu-west-1.amazonaws.com/public.wire.com/charts-develop' + - name: jetstack + url: 'https://charts.jetstack.io' + +releases: + - name: 'fake-aws' + namespace: 'wire' + chart: 'wire/fake-aws' + version: 'CHANGE_ME' + + - name: 'databases-ephemeral' + namespace: 'wire' + chart: 'wire/databases-ephemeral' + version: 'CHANGE_ME' + + - name: 'demo-smtp' + namespace: 'wire' + chart: 'wire/demo-smtp' + version: 'CHANGE_ME' + values: + - './helm_vars/demo-smtp/values.yaml' + + - name: 'cert-manager' + namespace: 'cert-manager' + chart: 'jetstack/cert-manager' + version: '1.5.2' + set: + - name: installCRDs + value: true + + - name: 'wire-server' + namespace: 'wire' + chart: 'wire/wire-server' + version: 'CHANGE_ME' + values: + - './helm_vars/wire-server/values.yaml' + secrets: + - './helm_vars/wire-server/secrets.yaml' + + - name: 'ingress-nginx-controller' + namespace: 'wire' + chart: 'wire/ingress-nginx-controller' + version: 'CHANGE_ME' + + - name: 'nginx-ingress-services' + namespace: 'wire' + chart: 'wire/nginx-ingress-services' + version: 'CHANGE_ME' + values: + - './helm_vars/nginx-ingress-services/values.yaml' diff --git a/examples/multi-node-k8s-with-lb-and-dns/inventory/inventory.yml b/examples/multi-node-k8s-with-lb-and-dns/inventory/inventory.yml new file mode 100644 index 000000000..4105f139d --- /dev/null +++ b/examples/multi-node-k8s-with-lb-and-dns/inventory/inventory.yml @@ -0,0 +1,27 @@ +all: + vars: + root_domain: 'CHANGE_ME' + environment_name: 'CHANGE_ME' + + ansible_ssh_user: 'CHANGE_ME' + + +k8s-cluster: + vars: + kube_version: 'CHANGE_ME' + + container_manager: 'CHANGE_ME' + # NOTE: relax handling a list with more than 3 items + # CHANGE_ME:if-using-docker-and-hetzner + # docker_dns_servers_strict: false + + # NOTE: Make sure that internal kube-apiserver requests are always traveling between cluster machines + # directly, regardless whether an external load balancer exists + # DOCS: https://github.com/kubernetes-sigs/kubespray/blob/master/docs/ha-mode.md + loadbalancer_apiserver_localhost: true + + # NOTE: Necessary for the Hetzner Cloud until Calico v3.17 arrives in Kubespray + calico_mtu: 1450 + calico_veth_mtu: 1430 + + dashboard_enabled: false diff --git a/examples/multi-node-k8s-with-lb-and-dns/terraform.tfvars b/examples/multi-node-k8s-with-lb-and-dns/terraform.tfvars new file mode 100644 index 000000000..6a575459c --- /dev/null +++ b/examples/multi-node-k8s-with-lb-and-dns/terraform.tfvars @@ -0,0 +1,56 @@ +environment = "CHANGE_ME:generic-name" + +root_domain = "CHANGE_ME:FQDN" +# NOTE: corresponds to helm_vars/[wire-server,nginx-ingress-services]/values.yaml +sub_domains = [ + "nginz-https", + "nginz-ssl", + "webapp", + "assets", + "account", + "teams" +] +create_spf_record = true + +operator_ssh_public_keys = { + terraform_managed = { + "CHANGE_ME:unique-name" = "CHANGE_ME:key-file-content" + } + preuploaded_key_names = [] +} + +k8s_cluster = { + cloud = "hetzner" + + # NOTE: corresponds to wire-server/charts/ingress-nginx-controller/values.yaml#nodePorts + load_balancer_ports = [ + { + name = "http" + protocol = "tcp" + listen = 80 + destination = 31772 + }, + { + name = "https" + protocol = "tcp" + listen = 443 + destination = 31773 + } + ] + + machine_groups = [ + { + group_name = "cps" + machine_count = 1 + machine_type = "cx21" + component_classes = [ "controlplane" ] + }, + + { + group_name = "nodes" + machine_count = 2 + machine_type = "cpx41" + component_classes = [ "node" ] + }, + ] +} diff --git a/examples/team-provisioning-qr-codes/README.md b/examples/team-provisioning-qr-codes/README.md new file mode 100644 index 000000000..4829c046a --- /dev/null +++ b/examples/team-provisioning-qr-codes/README.md @@ -0,0 +1,105 @@ +EXAMPLE: Onboarding users with QR codes instead of emails +========================================================= + +This example includes a bash script to automate the provisioning of users in a +Wire team on a private server instance without users needing to be able to +receive email. + +New users are invited to a team by a team administrator, who sends an invite +link to each user's email address from the team settings page. When the user +opens the invite link, they are then prompted to create a new account on the +Wire server, which then becomes a member of the team. + +The script in this directory takes a user's email address and extracts the +invite code which was generated for their email address from the +`teams/{tid}/invitations` API. Then, it generates a PDF containing +administrator-provided setup instructions, the email address they should use +when creating their account, and QR codes for their invite link, and the Wire +server's deeplink (for configuring mobile clients to use the private instance +instead of the public cloud instance). + +## Usage + +This script assumes that the Wire server instance is deployed and functioning +correctly. The operator requires needs to add the *team id* (which can be found +on the `team-settings` webapp under the menu item `Customization`) to the galley +server configuration setting `exposeInvitationURLsTeamAllowlist`. + +The `qrencode` command line tool is used for generating the URL QR codes, +and a LaTeX toolchain and the `latexmk` script are used for generating the +final PDF with the user's QR codes and instructions. On Debian and Ubuntu Linux +systems, these tools may be obtained by installing the `qrencode`, `texlive`, +and `latexmk` packages. Other distributions may have different names for these +packages. Alternatively, if you are using the offline installation instructions +described [here](../../offline/docs.md), the installation artifact tarball +includes these packages, which may be installed inside the deployment and +administration docker container. + +The administrator must also provide a file containing setup instructions for +the user to follow when they receive a copy of the PDF. These instructions +should direct the user to first scan the invite link QR code, and create an +account on the Wire server using the email address listed in the PDF. Then, +if they are using a mobile client, they should scan the deeplink QR code, and +open the link to trigger configuration of the mobile client application with +the private server instance. + +The script reads configuration from a series of environment variables: + +- `TEAM_ADMIN_EMAIL`: the eMail address of the team admin account. + +- `TEAM_ADMIN_PASSWORD`: the password of the team admin account + +- `NGINZ_HOST`: the fully qualified domain name of the nginz host. For the Wire + cloud it's `prod-nginz-https.wire.com`. error 'NGINZ_HOST is not set' + +- `TEAM_ID`: Team Id. This can be found in the `team-settings` webapp under + `Customization`. + +- `DEEPLINK_URL`: the URL for the private Wire server's deeplink. See [this + page](https://docs.wire.com/how-to/associate/deeplink.html) for further + information on using deeplinks with private Wire instances. Example: + `https://assets.wire.example.com/public/deeplink.html`. + +- `INSTRUCTIONS`: path to a file containing administrator-provided setup + instructions to be included in the generated PDF. The contents of this file + are included in the LaTeX sources verbatim. If the instructions file includes + LaTeX control characters, the script will print a warning, as invalid LaTeX + may cause the build step for generating the PDF to fail. + +In order to use the script to generate a PDF file for a user, the team +administrator must first send an invitation link to that user's email +address. The Wire server uses the email address to uniquely identify the user +internally, however, the email address, in this case, does not need to be able +to receive email, and might be a placeholder value. The administrator must also +write the setup instruction file, as described above. + +The PDF generation script may then be run with the user's email address as a +single argument, and the script will extract their invite code, generate QR +codes, and then build the PDF, which will be copied into the script's current +working directory. + +An example invocation of the script could look like this: + +``` sh + $ cat > instructions.txt <&2 + exit 1 +} + +if [ -z "$TEAM_ADMIN_EMAIL" ]; then + error 'TEAM_ADMIN_EMAIL is not set' +elif [ -z "$TEAM_ADMIN_PASSWORD" ]; then + error 'TEAM_ADMIN_PASSWORD is not set' +elif [ -z "$NGINZ_HOST" ]; then + error 'NGINZ_HOST is not set' +elif [ -z "$TEAM_ID" ]; then + error 'TEAM_ID is not set' +elif [ -z "$DEEPLINK_URL" ]; then + error 'DEEPLINK_URL is not set' +elif [ -z "$INSTRUCTIONS" ]; then + error 'INSTRUCTIONS is not set' +elif [ ! -f "$INSTRUCTIONS" ] || [ ! -r "$INSTRUCTIONS" ]; then + error 'INSTRUCTIONS file does not exist or is not readable' +fi + +if grep -Eq '[{}\\]' "$INSTRUCTIONS"; then + echo 'warning: instructions file contains LaTeX control characters' >&2 + echo 'warning: the output PDF file may not render as expected' >&2 + echo 'will continue in 3 seconds' >&2 + sleep 3 +fi + +if [ -z "$1" ]; then + error 'no email address provided' +fi + +user_email="$1" + +if echo "$user_email" | grep -Fq "'"; then + error 'email address contains invalid character' +fi + +echo "info: get access token by logging in" +access_token=$(curl --location --request POST "https://$NGINZ_HOST/login" \ +--header 'Content-Type: application/json' \ +--data-raw "{ + \"email\": \"$TEAM_ADMIN_EMAIL\", + \"password\": \"$TEAM_ADMIN_PASSWORD\" +}" | jq -r .access_token) + +if [ "$access_token" = "null" ] ; then + error "Cannot login. Are the credentials correct?" +fi + +echo "info: enable feature" +feature_status=$(curl --location --request PUT "https://$NGINZ_HOST/teams/$TEAM_ID/features/exposeInvitationURLsToTeamAdmin" \ +--header "Authorization: Bearer $access_token" \ +--header 'Content-Type: application/json' \ +--data-raw '{ + "status": "enabled" +}' | jq -r .status) + +if [ "$feature_status" != "enabled" ] ; then + error "Cannot set feature status. Please check server configuration." +fi + +echo "info: create account and get invitation url" +invite_url=$(curl --location --request POST "https://$NGINZ_HOST/teams/$TEAM_ID/invitations" \ +--header "Authorization: Bearer $access_token" \ +--header 'Content-Type: application/json' \ +--data-raw "{ + \"email\": \"$user_email\" +}" | jq -r .url) + +if [ "$invite_url" = "null" ] ; then + error "Cannot create invitation url. Is this email address already registered?" +fi + +echo "info invitation url to be encoded: $invite_url" + +set -e + +# prepare QR codes and LaTeX sources for PDF generation +tmpdir=$(mktemp -d "/tmp/tmp.qr-code-provisioning.XXXXXXXX") +cleanup() { + echo 'info: cleaning up...' + rm -rf "$tmpdir" +} +trap cleanup EXIT + +echo "info: user invite URL is $invite_url" + +qrencode -s 30 -t PNG -o "$tmpdir/invite.png" "$invite_url" +echo 'info: created invite QR code' + +qrencode -s 30 -t PNG -o "$tmpdir/deeplink.png" "$DEEPLINK_URL" +echo 'info: created deeplink QR code' + +# generate LaTeX source file. +cat > "$tmpdir/onboarding.tex" <> "$tmpdir/onboarding.tex" + +cat >> "$tmpdir/onboarding.tex" < /dev/null +latexmk -pdf onboarding +popd > /dev/null +echo 'info: ...complete' + +cp "$tmpdir/onboarding.pdf" "$user_email"'.pdf' +echo "info: user onboarding PDF in ${user_email}.pdf" + diff --git a/helm/Makefile b/helm/Makefile new file mode 100644 index 000000000..20c705c83 --- /dev/null +++ b/helm/Makefile @@ -0,0 +1,61 @@ +SHELL := /usr/bin/env bash -eo pipefail + + +# Overwriteable variables +#ENV_DIR + +# Internal variables +ENVIRONMENTS_DIR := $(abspath $(CURDIR)/../../cailleach/environments) + + + +ifndef ENV_DIR +ifndef ENV +$(error please define either ENV or ENV_DIR) +else +ENV_DIR = $(ENVIRONMENTS_DIR)/$(ENV) +endif +endif + + + +################################### HELM ################################### + +.PHONY: deploy +deploy: check-helm-inputs + KUBECONFIG=$(ENV_DIR)/kubeconfig.dec \ + helmfile \ + --file $(ENV_DIR)/helmfile.yaml \ + sync \ + --concurrency 1 + + + +############################### CREDENTIALS ################################ + +.PHONY: decrypt +decrypt: kubeconfig.dec + +.DELETE_ON_ERROR: $(ENV_DIR)/kubeconfig.dec +.PHONY: kubeconfig.dec +kubeconfig.dec: check-env-dir + @if [ ! -e $(ENV_DIR)/$(basename $(@)) ]; then exit 0; fi + sops -d $(ENV_DIR)/$(basename $(@)) > $(ENV_DIR)/$(@) + chmod 0600 $(ENV_DIR)/$(@) + @test -s $(ENV_DIR)/$(@) || (echo "[ERR] Failed decrypting kubeconfig" && exit 1) + + + +################################ FAIL-SAFES ################################ + +.PHONY: check-env-dir +check-env-dir: $(ENV_DIR) +$(ENV_DIR): + $(error directory: $(ENV_DIR) must exist) + + +.PHONY: check-helm-inputs +check-helm-inputs: $(ENV_DIR)/kubeconfig.dec + +$(ENV_DIR)/kubeconfig.dec: + $(error please make sure Kubernetes is installed and $(ENV_DIR)/kubeconfig.dec exists) diff --git a/helm/README.md b/helm/README.md new file mode 100644 index 000000000..9aa41e584 --- /dev/null +++ b/helm/README.md @@ -0,0 +1,45 @@ +# Helm-based deployment + +The Wire platform is deployed on top of Kubernetes. This certainly includes all *stateless* services (e.g. Brig, Galley), +but may or may not include *stateful* backing services (e.g. Cassandra or Elasticsearch). + +The respective Kubernetes objects are defined in Helm charts. This allows to template and transfer them. +The charts themselves are defined in [wire-server](https://github.com/wireapp/wire-server/tree/master/charts) +and uploaded to the [release](https://s3-eu-west-1.amazonaws.com/public.wire.com/charts) or +[develop](https://s3-eu-west-1.amazonaws.com/public.wire.com/charts-develop) (or develop older than 2023-04 [old-develop](https://s3-eu-west-1.amazonaws.com/public.wire.com/charts-develop-2022)) Helm repositories. + +To describe a deployment in a declarative fashion a tool called [*Helmfile*](https://github.com/roboll/helmfile) is +being used, which wraps the `helm` CLI. + + +## Deploy environment created by `terraform/environment` + +An 'environment' is supposed to represent all the setup required for the Wire +platform to function. + +'Deploying' an environment means creating the respective Objects on top of Kubernetes +to instantiate all the services that together represent the Wire backend. This action +can be re-run as often as you want (e.g. in case you change some variables or upgrade +to new versions). + +To start with, the environment must contain a `helmfile.yaml` listing each *release* +(based on a chart) and repositories they depend on. + +1. Please ensure `ENV_DIR` or `ENV` are exported as specified in the [docs in + the terraform folder](../terraform/README.md) +1. Ensure that `make bootstrap` has been run to create a Kubernetes cluster +1. Ensure `$ENV_DIR/kubeconfig.dec` exists to authenticate against the kube-apiserver + of the cluster in question. +1. Craft the Helm values according to your needs (see + [charts](https://github.com/wireapp/wire-server/blob/develop/charts) for available + configuration) +1. Running `make deploy` from this directory will bootstrap the + environment. + + +## Using Helmfiles + +* similar to `helm` it is possible to also set `chart` to be a path in the local filesystem + (e.g. `./wire-server/charts/nginx-ingress-services`) +* setting `installed: [true, false]` can help do get rid of a release or just comment out things, + also `force: true` might solve some weird states diff --git a/nix/docker-alpine.nix b/nix/docker-alpine.nix new file mode 100644 index 000000000..9e1ed728f --- /dev/null +++ b/nix/docker-alpine.nix @@ -0,0 +1,7 @@ +{ + imageName = "alpine"; + imageDigest = "sha256:21a3deaa0d32a8057914f36584b5288d2e5ecc984380bc0118285c70fa8c9300"; + sha256 = "1wmrq8x0l5sjrwlklvfkabmxpn0qphik1gb37i04x8jm8bjiisip"; + finalImageName = "alpine"; + finalImageTag = "3"; +} diff --git a/nix/overlay.nix b/nix/overlay.nix new file mode 100644 index 000000000..fdf47b91c --- /dev/null +++ b/nix/overlay.nix @@ -0,0 +1,79 @@ +self: +let helm-mapkubeapis = self.callPackage ./pkgs/helm-mapkubeapis.nix { }; +in +super: { + pythonForAnsible = (self.python3.withPackages (_: self.ansible.requiredPythonModules ++ [ + super.python3Packages.boto + super.python3Packages.boto3 + super.python3Packages.cryptography + super.python3Packages.six + # for packet debugging and reporting. + super.python3Packages.pyshark + super.python3Packages.matplotlib + ])); + + # kubeadm and kubectl + kubernetes-tools = self.callPackage ./pkgs/kubernetes-tools.nix { }; + + kubernetes-helm = super.wrapHelm super.kubernetes-helm { + plugins = with super.kubernetes-helmPlugins; [ helm-s3 helm-secrets helm-diff helm-mapkubeapis ]; + }; + + wire-binaries = self.callPackage ./pkgs/wire-binaries.nix { }; + + generate-gpg1-key = super.runCommandNoCC "generate-gpg1-key" + { + nativeBuildInputs = [ super.makeWrapper ]; + } + '' + # This key isn't a secret (it's built and uploaded to the binary cache after all ;-) ) + # It's created out of the necessity that apt wants to verify against a + # key + # It's set to expire 2y after its creation, + # or whenever this derivation is built again without having the result in the binary cache. + # The public part of the key is shipped with the offline bundle + # ($aptly_root/public/gpg). + # The private key (Github secret) was last replaced on 2024-07-12 and is valid for two years. + + install -Dm755 ${./scripts/generate-gpg1-key.sh} $out/bin/generate-gpg1-key + # we *--set* PATH here, to ensure we don't pick wrong gpgs + wrapProgram $out/bin/generate-gpg1-key --set PATH '${super.lib.makeBinPath (with self; [ bash coreutils gnupg1orig ])}' + ''; + mirror-apt-jammy = super.runCommandNoCC "mirror-apt-jammy" + { + nativeBuildInputs = [ super.makeWrapper ]; + } + '' + install -Dm755 ${./scripts/mirror-apt-jammy.sh} $out/bin/mirror-apt-jammy + # we need to *--set* PATH here, otherwise aptly will pick the wrong gpg + wrapProgram $out/bin/mirror-apt-jammy --set PATH '${super.lib.makeBinPath (with self; [ aptly bash coreutils curl gnupg1orig gnused gnutar ])}' + ''; + + create-container-dump = super.runCommandNoCC "create-container-dump" + { + nativeBuildInputs = [ super.makeWrapper ]; + } + '' + install -Dm755 ${./scripts/create-container-dump.sh} $out/bin/create-container-dump + wrapProgram $out/bin/create-container-dump --prefix PATH : '${super.lib.makeBinPath [ self.skopeo ]}' + ''; + + + list-helm-containers = super.runCommandNoCC "list-helm-containers" + { + nativeBuildInputs = [ super.makeWrapper ]; + } + '' + install -Dm755 ${./scripts/list-helm-containers.sh} $out/bin/list-helm-containers + wrapProgram $out/bin/list-helm-containers --prefix PATH : '${super.lib.makeBinPath [ self.kubernetes-helm ]}' + ''; + + patch-ingress-controller-images = super.runCommandNoCC "patch-ingress-controller-images" + { + nativeBuildInputs = [ super.makeWrapper ]; + } + '' + install -Dm755 ${./scripts/patch-ingress-controller-images.sh} $out/bin/patch-ingress-controller-images + wrapProgram $out/bin/patch-ingress-controller-images --prefix PATH : '${super.lib.makeBinPath [ self.containerd ]}' + ''; +} diff --git a/nix/pkgs/helm-mapkubeapis.nix b/nix/pkgs/helm-mapkubeapis.nix new file mode 100644 index 000000000..d9813f780 --- /dev/null +++ b/nix/pkgs/helm-mapkubeapis.nix @@ -0,0 +1,40 @@ +{ buildGoModule, fetchFromGitHub, lib }: + +buildGoModule rec { + pname = "helm-mapkubeapis"; + # in case you change this version, ensure to set sha256 to empty string, as it will + # otherwise recompile but not actually update the version. Nix is not intuitive + # at all, this sucks! But you've been warned. :) + version = "0.1.0"; + + src = fetchFromGitHub { + owner = "helm"; + repo = pname; + rev = "v${version}"; + sha256 = "sha256-OIom+fMjLkbYXbxCsISuihdr3CWjUnkucTnDfoix9B0="; + }; + + vendorHash = "sha256-jqVzBRlGFhDHaiSF9AArJdt4KRCiUqUuo0CnJUTbSfE="; + + # NOTE: Remove the install and upgrade hooks. + postPatch = '' + sed -i '/^hooks:/,+2 d' plugin.yaml + ''; + + checkPhase = '' + ''; + + postInstall = '' + install -dm755 $out/${pname} + mv $out/bin $out/${pname}/ + install -m644 -Dt $out/${pname}/config/ config/Map.yaml + install -m644 -Dt $out/${pname} plugin.yaml + ''; + + meta = with lib; { + description = "A Helm plugin to map helm release deprecated Kubernetes APIs in-place"; + homepage = "https://github.com/helm/helm-mapkubeapis"; + license = licenses.asl20; + maintainers = with maintainers; [ ]; + }; +} diff --git a/nix/pkgs/kubernetes-tools.nix b/nix/pkgs/kubernetes-tools.nix new file mode 100644 index 000000000..3b2d497e1 --- /dev/null +++ b/nix/pkgs/kubernetes-tools.nix @@ -0,0 +1,40 @@ +{ buildGoModule, runtimeShell, fetchFromGitHub, makeWrapper, which, rsync, stdenv, fetchurl }: + + +buildGoModule rec { + pname = "kubernetes"; + version = "1.28.2"; + + src = fetchFromGitHub { + owner = "kubernetes"; + repo = "kubernetes"; + rev = "v${version}"; + hash = "sha256-7juoX4nFvQbIIbhTlnIYVUEYUJGwu+aKrpw4ltujjXI="; + }; + + vendorHash = null; + + doCheck = false; + + nativeBuildInputs = [ makeWrapper which rsync ]; + + outputs = [ "out" ]; + + buildPhase = '' + runHook preBuild + substituteInPlace "hack/update-generated-docs.sh" --replace "make" "make SHELL=${runtimeShell}" + patchShebangs ./hack ./cluster/addons/addon-manager + make "SHELL=${runtimeShell}" "WHAT=cmd/kubeadm cmd/kubectl" + ./hack/update-generated-docs.sh + runHook postBuild + ''; + + installPhase = '' + runHook preInstall + for p in cmd/kubeadm cmd/kubectl; do + install -D _output/local/go/bin/''${p##*/} -t $out/bin + done + + runHook postInstall + ''; +} diff --git a/nix/pkgs/wire-binaries.nix b/nix/pkgs/wire-binaries.nix new file mode 100644 index 000000000..1bb75bc6f --- /dev/null +++ b/nix/pkgs/wire-binaries.nix @@ -0,0 +1,114 @@ +{ fetchurl +, lib +, runCommandNoCC +}: +let + image_arch = "amd64"; + + # These values are manually kept in sync with: + # https://github.com/kubernetes-sigs/kubespray/blob/release-2.24/roles/kubespray-defaults/defaults/main/download.yml + # TODO: Find a better process. Automate this! + kube_version = "v1.28.2"; + etcd_version = "v3.5.10"; + cni_version = "v1.3.0"; + calico_version = "v3.26.4"; + crictl_version = "v1.28.0"; + runc_version = "v1.1.10"; + nerdctl_version = "1.7.1"; + containerd_version = "1.7.11"; + + + # Note: If you change a version, replace the checksum with zeros, run « nix-build --no-out-link -A pkgs.wire-binaries », it will complain and give you the right checksum, use that checksum in this file, run it again and it should build without complaining. + cassandra_version = "3.11.16"; + jmx_prometheus_javaagent_version = "0.10"; + elasticsearch_version = "6.8.23"; + srcs = { + kubelet = fetchurl rec { + passthru.url = url; + url = "https://storage.googleapis.com/kubernetes-release/release/${ kube_version }/bin/linux/${ image_arch }/kubelet"; + sha256 = "17edb866636f14eceaad58c56eab12af7ab3be3c78400aff9680635d927f1185"; + }; + kubeadm = fetchurl rec { + passthru.url = url; + url = "https://storage.googleapis.com/kubernetes-release/release/${ kube_version }/bin/linux/${ image_arch }/kubeadm"; + sha256 = "6a4808230661c69431143db2e200ea2d021c7f1b1085e6353583075471310d00"; + }; + kubectl = fetchurl rec { + passthru.url = url; + url = "https://storage.googleapis.com/kubernetes-release/release/${ kube_version }/bin/linux/${ image_arch }/kubectl"; + sha256 = "c922440b043e5de1afa3c1382f8c663a25f055978cbc6e8423493ec157579ec5"; + }; + crictl = fetchurl rec { + passthru.url = url; + url = "https://github.com/kubernetes-sigs/cri-tools/releases/download/${ crictl_version }/crictl-${ crictl_version }-linux-${ image_arch }.tar.gz"; + sha256 = "8dc78774f7cbeaf787994d386eec663f0a3cf24de1ea4893598096cb39ef2508"; + }; + containerd = fetchurl rec { + passthru.url = url; + url = "https://github.com/containerd/containerd/releases/download/v${ containerd_version }/containerd-${ containerd_version }-linux-${ image_arch }.tar.gz"; + sha256 = "d66161d54546fad502fd50a13fcb79efff033fcd895adc9c44762680dcde4e69"; + }; + runc = fetchurl rec { + passthru.url = url; + url = "https://github.com/opencontainers/runc/releases/download/${ runc_version }/runc.${ image_arch }"; + sha256 = "81f73a59be3d122ab484d7dfe9ddc81030f595cc59968f61c113a9a38a2c113a"; + }; + calico_crds = fetchurl rec { + passthru.url = url; + url = "https://github.com/projectcalico/calico/archive/${ calico_version }.tar.gz"; + sha256 = "481e52de684c049f3f7f7bac78f0f6f4ae424d643451adc9e3d3fa9d03fb6d57"; + }; + nerdctl = fetchurl rec { + passthru.url = url; + url = "https://github.com/containerd/nerdctl/releases/download/v${ nerdctl_version }/nerdctl-${ nerdctl_version }-linux-${ image_arch }.tar.gz"; + sha256 = "5fc0a6e8c3a71cbba95fbdb6833fb8a7cd8e78f53de10988362d4029c14b905a"; + }; + calicoctl = fetchurl rec { + passthru.url = url; + url = "https://github.com/projectcalico/calico/releases/download/${ calico_version }/calicoctl-linux-${ image_arch }"; + sha256 = "9960357ef6d61eda7abf80bd397544c1952f89d61e5eaf9f6540dae379a3ef61"; + }; + etcd = fetchurl rec { + passthru.url = url; + url = "https://github.com/coreos/etcd/releases/download/${ etcd_version }/etcd-${ etcd_version }-linux-${ image_arch }.tar.gz"; + sha256 = "26e90d024fa2310bc52bb40e7f2132e81640b55f8fc446c00ae07e30af2a44fd"; + }; + cni = fetchurl rec { + passthru.url = url; + url = "https://github.com/containernetworking/plugins/releases/download/${ cni_version }/cni-plugins-linux-${ image_arch }-${ cni_version }.tgz"; + sha256 = "754a71ed60a4bd08726c3af705a7d55ee3df03122b12e389fdba4bea35d7dd7e"; + }; + cassandra = fetchurl rec { + passthru.url = url; + url = "http://archive.apache.org/dist/cassandra/${ cassandra_version }/apache-cassandra-${ cassandra_version }-bin.tar.gz"; + sha256 = "sha256-zQHG0SNFMWoflAEzJj7qnShMeiC370XCbxoitbR1/Ag="; + }; + jmx_prometheus_javaagent = fetchurl rec { + passthru.url = url; + url = "https://repo1.maven.org/maven2/io/prometheus/jmx/jmx_prometheus_javaagent/${ jmx_prometheus_javaagent_version }/jmx_prometheus_javaagent-${ jmx_prometheus_javaagent_version }.jar"; + sha256 = "0abyydm2dg5g57alpvigymycflgq4b3drw4qs7c65vn95yiaai5i"; + }; + minio = fetchurl rec { + passthru.url = url; + url = "https://dl.min.io/server/minio/release/linux-amd64/archive/minio.RELEASE.2023-07-07T07-13-57Z"; + sha256 = "sha256-9tGq30uuwVVogOZZdI1/vGvI0trDVU+BbpVJLTiBZgo="; + }; + mc = fetchurl rec { + passthru.url = url; + url = "https://dl.min.io/client/mc/release/linux-amd64/archive/mc.RELEASE.2023-10-24T05-18-28Z"; + sha256 = "sha256-XxKSa2RrUzzeoaVIxURgpNrXjye4sX05m6Av9O42jk0="; + }; + elasticsearch = fetchurl rec { + passthru.url = url; + url = "https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-oss-${elasticsearch_version}.deb"; + sha256 = "sha256:0s7m49rvg5n6mrjzg5snbg3092mq0n661qs9209phjzka1lqajvb"; + }; + }; +in +runCommandNoCC "wire-binaries" +{ + nativeBuildInputs = [ ]; +} '' + mkdir -p $out + ${toString (lib.mapAttrsToList (k: v: "cp ${v} $out/${baseNameOf v.url}\n") srcs)} +'' diff --git a/nix/scripts/create-container-dump.sh b/nix/scripts/create-container-dump.sh new file mode 100644 index 000000000..a098d0d79 --- /dev/null +++ b/nix/scripts/create-container-dump.sh @@ -0,0 +1,40 @@ +#!/usr/bin/env bash +# This consumes a list of containers from stdin and produces a `skopeo sync` +# dir at $1. +set -eou pipefail + +if [[ ! $# -eq 1 ]]; then + echo "usage: $0 OUTPUT-DIR" >&2 + exit 1 +fi + +mkdir -p $1 +# Download all the docker images into $1, and append its name to an index.txt +# If this errors out for you, copy default-policy.json from the skopeo repo to +# /etc/containers/policy.json +while IFS= read -r image; do + # sanitize the image file name, replace slashes with underscores, suffix with .tar + image_filename=$(sed -r "s/[:\/]/_/g" <<< $image) + image_path=$(realpath $1)/${image_filename}.tar + if [[ -e $image_path ]];then + echo "Skipping $image_filename…" + else + echo "Fetching $image_filename…" + + # All of these images should be publicly fetchable, especially given we + # ship public tarballs containing these images. + # ci.sh already honors DOCKER_LOGIN, so do the same here, otherwise + # fallback to unauthorized fetching. + + # If an image has both a tag and digest, remove the tag. Return the original if there is no match. + image_trimmed=$(echo "$image" | sed -E 's/(.+)(:.+(@.+))/\1\3/') + if [[ -n "${DOCKER_LOGIN:-}" && "$image" =~ quay.io/wire ]];then + skopeo copy --insecure-policy --src-creds "$DOCKER_LOGIN" \ + docker://$image_trimmed docker-archive:${image_path} --additional-tag $image + else + skopeo copy --insecure-policy \ + docker://$image_trimmed docker-archive:${image_path} --additional-tag $image + fi + echo "${image_filename}.tar" >> $(realpath "$1")/index.txt + fi +done diff --git a/nix/scripts/create-offline-artifact.sh b/nix/scripts/create-offline-artifact.sh new file mode 100644 index 000000000..3d98c0776 --- /dev/null +++ b/nix/scripts/create-offline-artifact.sh @@ -0,0 +1,8 @@ +#!/usr/bin/env bash + +mkdir -p assets assets/containers-{helm,other,system} assets/debs assets/binaries + +mirror-apt-jammy assets/debs + + + diff --git a/nix/scripts/generate-gpg1-key.sh b/nix/scripts/generate-gpg1-key.sh new file mode 100755 index 000000000..c5b05e337 --- /dev/null +++ b/nix/scripts/generate-gpg1-key.sh @@ -0,0 +1,32 @@ +#!/usr/bin/env bash +set -eou pipefail + +# This will create a gpg1 private key with uid gpg@wire.com, and output it as +# ascii-armoured to stdout. + +GNUPGHOME=$(mktemp -d) +export GNUPGHOME +trap 'rm -Rf -- "$GNUPGHOME"' EXIT + +# configure gpg to use a custom keyring, because aptly reads from it +gpg="gpg --keyring=$GNUPGHOME/trustedkeys.gpg --no-default-keyring" + +# create a gpg signing key. This is temporary for now, in the future, there +# will be a stable signing key and official releases for this. +cat > "$GNUPGHOME"/keycfg <&2 + elif [[ $image =~ ":" ]]; then + echo "$image" + elif [[ $image =~ "@" ]]; then + echo "$image" + else + echo "Container $image without a tag found or pin found. Aborting! Fix this chart. not compatible with offline. Components need explicit tags for that" >&2 + exit 1 + fi + done +} + +# For each helm chart passed in from stdin, use the example values to +# render the charts, and assemble the list of images this would fetch. +while IFS= read -r chart; do + echo "Running helm template on chart ${chart}…" >&2 + # The image values are left as-is + helm template --debug "$chart" \ + --set federate.dtls.tls.key=emptyString \ + --set federate.dtls.tls.crt=emptyString \ + $( [[ -f ./values/$(basename $chart)/prod-values.example.yaml ]] && echo "-f ./values/$(basename $chart)/prod-values.example.yaml" ) \ + $( [[ -f ./values/$(basename $chart)/prod-secrets.example.yaml ]] && echo "-f ./values/$(basename $chart)/prod-secrets.example.yaml" ) \ + | yq -r '..|.image? | select(.)' | optionally_complain | sort -u +done | sort -u diff --git a/nix/scripts/mirror-apt-jammy.sh b/nix/scripts/mirror-apt-jammy.sh new file mode 100755 index 000000000..136822165 --- /dev/null +++ b/nix/scripts/mirror-apt-jammy.sh @@ -0,0 +1,161 @@ +#!/usr/bin/env bash +set -euo pipefail + +# This will consume a list of ubuntu jammy packages (or queries), and produces +# a packages.tgz tarball, which can be statically served. + +# It assumes a GPG_PRIVATE_KEY environment variable is set +# containing a key with uid gpg@wire.com +# This should contain an ascii-armoured gpg private key + +usage() { + echo "usage: GPG_PRIVATE_KEY= $0 OUTPUT-DIR" >&2 + echo "You can generate a private key as follows:" >&2 + echo "GPG_PRIVATE_KEY=\$(generate-gpg1-key)" >&2 + echo "export GPG_PRIVATE_KEY" >&2 + exit 1 +} + +[ $# -lt 1 ] && usage +[[ -z "${GPG_PRIVATE_KEY:-}" ]] && usage +aptly_root=$1 +mkdir -p "$aptly_root" +shift + + +# NOTE: These are all the packages needed for all our playbooks to succeed. This list was created by trial and error +packages=( + python3-apt + python3-netaddr + python3-pip + aufs-tools + apt-transport-https + software-properties-common + conntrack + ipvsadm + ipset + curl + rsync + socat + unzip + e2fsprogs + xfsprogs + ebtables + python3-minimal + openjdk-8-jdk-headless + iproute2 + procps + libjemalloc2 + qrencode + texlive + latexmk + libopts25 + ntp + libc6 + libseccomp2 + iptables + bash-completion + logrotate + cron + crontab + ufw + netcat + telnet + less + traceroute + strace + iputils-ping + nano + vi + tcpdump + gnupg + bzip2 + # Dependencies for the rabbitmq-server package + erlang-base + erlang-asn1 + erlang-crypto + erlang-eldap + erlang-ftp + erlang-inets + erlang-mnesia + erlang-os-mon + erlang-parsetools + erlang-public-key + erlang-runtime-tools + erlang-snmp + erlang-ssl + erlang-syntax-tools + erlang-tftp + erlang-tools + erlang-xmerl + rabbitmq-server +) + +# shellcheck disable=SC2001 +packages_=$(echo "${packages[@]}" | sed 's/\s/ \| /g') + +echo "$packages_" + +# NOTE: kubespray pins the exact docker and containerd versions that it +# installs. This is kept in sync with kubespray manually. +# See roles/container-engine/docker/vars/ubuntu.yml +# See roles/container-engine/containerd-common/vars/ubuntu.yml +docker_packages="docker-ce (= 5:20.10.20~3-0~ubuntu-jammy) | docker-ce-cli (= 5:20.10.20~3-0~ubuntu-jammy) | containerd.io (= 1.6.8-1)" +GNUPGHOME=$(mktemp -d) +export GNUPGHOME +aptly_config=$(mktemp) +trap 'rm -Rf -- "$aptly_config $GNUPGHOME"' EXIT + +cat > "$aptly_config" < "$aptly_root/public/gpg" diff --git a/nix/scripts/patch-ingress-controller-images.sh b/nix/scripts/patch-ingress-controller-images.sh new file mode 100644 index 000000000..9a520311a --- /dev/null +++ b/nix/scripts/patch-ingress-controller-images.sh @@ -0,0 +1,16 @@ +#!/usr/bin/env sh + +dir_path="$1"/containers-helm/ +index_txt_path="$dir_path"/index.txt + +sudo ctr images pull registry.k8s.io/ingress-nginx/kube-webhook-certgen:v20220916-gd32f8c343 +sudo ctr images export "$dir_path"/kube_webhook_certgen.tar registry.k8s.io/ingress-nginx/kube-webhook-certgen:v20220916-gd32f8c343 + +sudo ctr images pull registry.k8s.io/ingress-nginx/controller:v1.6.4 +sudo ctr images export "$dir_path"/controller_1_6_4.tar registry.k8s.io/ingress-nginx/controller:v1.6.4 + +sed -i "/registry\.k8s\.io_ingress-nginx_controller_v1.6.4/d" "$index_txt_path" +echo "controller_1_6_4.tar" >> "$index_txt_path" + +sed -i "/registry\.k8s\.io_ingress-nginx_kube-webhook-certgen/d" "$index_txt_path" +echo "kube_webhook_certgen.tar" >> "$index_txt_path" diff --git a/nix/sources.json b/nix/sources.json new file mode 100644 index 000000000..3068042f5 --- /dev/null +++ b/nix/sources.json @@ -0,0 +1,26 @@ +{ + "niv": { + "branch": "master", + "description": "Easy dependency management for Nix projects", + "homepage": "https://github.com/nmattia/niv", + "owner": "nmattia", + "repo": "niv", + "rev": "9cb7ef336bb71fd1ca84fc7f2dff15ef4b033f2a", + "sha256": "1ajyqr8zka1zlb25jx1v4xys3zqmdy3prbm1vxlid6ah27a8qnzh", + "type": "tarball", + "url": "https://github.com/nmattia/niv/archive/9cb7ef336bb71fd1ca84fc7f2dff15ef4b033f2a.tar.gz", + "url_template": "https://github.com///archive/.tar.gz" + }, + "nixpkgs": { + "branch": "master", + "description": "A read-only mirror of NixOS/nixpkgs tracking the released channels. Send issues and PRs to", + "homepage": "https://github.com/NixOS/nixpkgs", + "owner": "NixOS", + "repo": "nixpkgs", + "rev": "057f9aecfb71c4437d2b27d3323df7f93c010b7e", + "sha256": "1ndiv385w1qyb3b18vw13991fzb9wg4cl21wglk89grsfsnra41k", + "type": "tarball", + "url": "https://github.com/NixOS/nixpkgs/archive/057f9aecfb71c4437d2b27d3323df7f93c010b7e.tar.gz", + "url_template": "https://github.com///archive/.tar.gz" + } +} diff --git a/nix/sources.nix b/nix/sources.nix new file mode 100644 index 000000000..1938409dd --- /dev/null +++ b/nix/sources.nix @@ -0,0 +1,174 @@ +# This file has been generated by Niv. + +let + + # + # The fetchers. fetch_ fetches specs of type . + # + + fetch_file = pkgs: name: spec: + let + name' = sanitizeName name + "-src"; + in + if spec.builtin or true then + builtins_fetchurl { inherit (spec) url sha256; name = name'; } + else + pkgs.fetchurl { inherit (spec) url sha256; name = name'; }; + + fetch_tarball = pkgs: name: spec: + let + name' = sanitizeName name + "-src"; + in + if spec.builtin or true then + builtins_fetchTarball { name = name'; inherit (spec) url sha256; } + else + pkgs.fetchzip { name = name'; inherit (spec) url sha256; }; + + fetch_git = name: spec: + let + ref = + if spec ? ref then spec.ref else + if spec ? branch then "refs/heads/${spec.branch}" else + if spec ? tag then "refs/tags/${spec.tag}" else + abort "In git source '${name}': Please specify `ref`, `tag` or `branch`!"; + in + builtins.fetchGit { url = spec.repo; inherit (spec) rev; inherit ref; }; + + fetch_local = spec: spec.path; + + fetch_builtin-tarball = name: throw + ''[${name}] The niv type "builtin-tarball" is deprecated. You should instead use `builtin = true`. + $ niv modify ${name} -a type=tarball -a builtin=true''; + + fetch_builtin-url = name: throw + ''[${name}] The niv type "builtin-url" will soon be deprecated. You should instead use `builtin = true`. + $ niv modify ${name} -a type=file -a builtin=true''; + + # + # Various helpers + # + + # https://github.com/NixOS/nixpkgs/pull/83241/files#diff-c6f540a4f3bfa4b0e8b6bafd4cd54e8bR695 + sanitizeName = name: + ( + concatMapStrings (s: if builtins.isList s then "-" else s) + ( + builtins.split "[^[:alnum:]+._?=-]+" + ((x: builtins.elemAt (builtins.match "\\.*(.*)" x) 0) name) + ) + ); + + # The set of packages used when specs are fetched using non-builtins. + mkPkgs = sources: system: + let + sourcesNixpkgs = + import (builtins_fetchTarball { inherit (sources.nixpkgs) url sha256; }) { inherit system; }; + hasNixpkgsPath = builtins.any (x: x.prefix == "nixpkgs") builtins.nixPath; + hasThisAsNixpkgsPath = == ./.; + in + if builtins.hasAttr "nixpkgs" sources + then sourcesNixpkgs + else if hasNixpkgsPath && ! hasThisAsNixpkgsPath then + import {} + else + abort + '' + Please specify either (through -I or NIX_PATH=nixpkgs=...) or + add a package called "nixpkgs" to your sources.json. + ''; + + # The actual fetching function. + fetch = pkgs: name: spec: + + if ! builtins.hasAttr "type" spec then + abort "ERROR: niv spec ${name} does not have a 'type' attribute" + else if spec.type == "file" then fetch_file pkgs name spec + else if spec.type == "tarball" then fetch_tarball pkgs name spec + else if spec.type == "git" then fetch_git name spec + else if spec.type == "local" then fetch_local spec + else if spec.type == "builtin-tarball" then fetch_builtin-tarball name + else if spec.type == "builtin-url" then fetch_builtin-url name + else + abort "ERROR: niv spec ${name} has unknown type ${builtins.toJSON spec.type}"; + + # If the environment variable NIV_OVERRIDE_${name} is set, then use + # the path directly as opposed to the fetched source. + replace = name: drv: + let + saneName = stringAsChars (c: if isNull (builtins.match "[a-zA-Z0-9]" c) then "_" else c) name; + ersatz = builtins.getEnv "NIV_OVERRIDE_${saneName}"; + in + if ersatz == "" then drv else + # this turns the string into an actual Nix path (for both absolute and + # relative paths) + if builtins.substring 0 1 ersatz == "/" then /. + ersatz else /. + builtins.getEnv "PWD" + "/${ersatz}"; + + # Ports of functions for older nix versions + + # a Nix version of mapAttrs if the built-in doesn't exist + mapAttrs = builtins.mapAttrs or ( + f: set: with builtins; + listToAttrs (map (attr: { name = attr; value = f attr set.${attr}; }) (attrNames set)) + ); + + # https://github.com/NixOS/nixpkgs/blob/0258808f5744ca980b9a1f24fe0b1e6f0fecee9c/lib/lists.nix#L295 + range = first: last: if first > last then [] else builtins.genList (n: first + n) (last - first + 1); + + # https://github.com/NixOS/nixpkgs/blob/0258808f5744ca980b9a1f24fe0b1e6f0fecee9c/lib/strings.nix#L257 + stringToCharacters = s: map (p: builtins.substring p 1 s) (range 0 (builtins.stringLength s - 1)); + + # https://github.com/NixOS/nixpkgs/blob/0258808f5744ca980b9a1f24fe0b1e6f0fecee9c/lib/strings.nix#L269 + stringAsChars = f: s: concatStrings (map f (stringToCharacters s)); + concatMapStrings = f: list: concatStrings (map f list); + concatStrings = builtins.concatStringsSep ""; + + # https://github.com/NixOS/nixpkgs/blob/8a9f58a375c401b96da862d969f66429def1d118/lib/attrsets.nix#L331 + optionalAttrs = cond: as: if cond then as else {}; + + # fetchTarball version that is compatible between all the versions of Nix + builtins_fetchTarball = { url, name ? null, sha256 }@attrs: + let + inherit (builtins) lessThan nixVersion fetchTarball; + in + if lessThan nixVersion "1.12" then + fetchTarball ({ inherit url; } // (optionalAttrs (!isNull name) { inherit name; })) + else + fetchTarball attrs; + + # fetchurl version that is compatible between all the versions of Nix + builtins_fetchurl = { url, name ? null, sha256 }@attrs: + let + inherit (builtins) lessThan nixVersion fetchurl; + in + if lessThan nixVersion "1.12" then + fetchurl ({ inherit url; } // (optionalAttrs (!isNull name) { inherit name; })) + else + fetchurl attrs; + + # Create the final "sources" from the config + mkSources = config: + mapAttrs ( + name: spec: + if builtins.hasAttr "outPath" spec + then abort + "The values in sources.json should not have an 'outPath' attribute" + else + spec // { outPath = replace name (fetch config.pkgs name spec); } + ) config.sources; + + # The "config" used by the fetchers + mkConfig = + { sourcesFile ? if builtins.pathExists ./sources.json then ./sources.json else null + , sources ? if isNull sourcesFile then {} else builtins.fromJSON (builtins.readFile sourcesFile) + , system ? builtins.currentSystem + , pkgs ? mkPkgs sources system + }: rec { + # The sources, i.e. the attribute set of spec name to spec + inherit sources; + + # The "pkgs" (evaluated nixpkgs) to use for e.g. non-builtin fetchers + inherit pkgs; + }; + +in +mkSources (mkConfig {}) // { __functor = _: settings: mkSources (mkConfig settings); } diff --git a/offline/cd.sh b/offline/cd.sh new file mode 100755 index 000000000..6b8bee6f1 --- /dev/null +++ b/offline/cd.sh @@ -0,0 +1,23 @@ +#!/usr/bin/env bash + +set -euo pipefail + +function cleanup { + (cd terraform/examples/wire-server-deploy-offline-hetzner ; terraform destroy -auto-approve) + echo done +} +trap cleanup EXIT +(cd terraform/examples/wire-server-deploy-offline-hetzner ; terraform init ; terraform apply -auto-approve ) +adminhost=$(cd terraform/examples/wire-server-deploy-offline-hetzner ; terraform output adminhost) +adminhost="${adminhost//\"/}" # remove extra quotes around the returned string +ssh_private_key=$(cd terraform/examples/wire-server-deploy-offline-hetzner ; terraform output ssh_private_key) + +eval `ssh-agent` +ssh-add - <<< "$ssh_private_key" + +ssh -oStrictHostKeyChecking=accept-new -oConnectionAttempts=10 "root@$adminhost" tar xzv < ./assets.tgz + +(cd terraform/examples/wire-server-deploy-offline-hetzner; terraform output -json static-inventory)| ssh "root@$adminhost" tee ./ansible/inventory/offline/inventory.yml + +# NOTE: Agent is forwarded; so that the adminhost can provision the other boxes +ssh -A "root@$adminhost" ./bin/offline-deploy.sh diff --git a/offline/ci.sh b/offline/ci.sh new file mode 100755 index 000000000..d45ec4399 --- /dev/null +++ b/offline/ci.sh @@ -0,0 +1,221 @@ +#!/usr/bin/env bash +set -euo pipefail + +INCREMENTAL="${INCREMENTAL:-0}" + +# Default exclude list +HELM_CHART_EXCLUDE_LIST="inbucket,wire-server-enterprise" + +# Parse the HELM_CHART_EXCLUDE_LIST argument +for arg in "$@" +do + case $arg in + HELM_CHART_EXCLUDE_LIST=*) + HELM_CHART_EXCLUDE_LIST="${arg#*=}" + ;; + esac +done +HELM_CHART_EXCLUDE_LIST=$(echo "$HELM_CHART_EXCLUDE_LIST" | jq -R 'split(",")') +echo "Excluding following charts from the release: $HELM_CHART_EXCLUDE_LIST" + +# Build the container image +container_image=$(nix-build --no-out-link -A container) +# if [[ -n "${DOCKER_LOGIN:-}" ]];then +# skopeo copy --dest-creds "$DOCKER_LOGIN" \ +# docker-archive:"$container_image" \ +# "docker://quay.io/wire/wire-server-deploy" \ +# --aditional-tag "$(git rev-parse HEAD)" +# else +# echo "Skipping container upload, no DOCKER_LOGIN provided" +# fi + +mkdir -p containers-{helm,other,system,adminhost} +install -m755 "$container_image" "containers-adminhost/container-wire-server-deploy.tgz" + +mirror-apt-jammy debs-jammy +tar cf debs-jammy.tar debs-jammy +rm -r debs-jammy + +fingerprint=$(echo "$GPG_PRIVATE_KEY" | gpg --with-colons --import-options show-only --import --fingerprint | awk -F: '$1 == "fpr" {print $10; exit}') + +echo "$fingerprint" + +mkdir -p binaries +install -m755 "$(nix-build --no-out-link -A pkgs.wire-binaries)/"* binaries/ +tar cf binaries.tar binaries +rm -r binaries + +function list-system-containers() { +# These are manually updated with values from +# https://github.com/kubernetes-sigs/kubespray/blob/release-2.24/roles/kubespray-defaults/defaults/main/download.yml +# TODO: Automate this. This is very wieldy :) + cat < +# on stdin +pull_charts() { + echo "Pulling charts into ./charts ..." + mkdir -p ./charts + + home=$(mktemp -d) + export HELM_CACHE_HOME="$home" + export HELM_DATA_HOME="$home" + export HELM_CONFIG_HOME="$home" + + declare -A repos + # needed to handle associative array lookup + set +u + + while IFS=$'\n' read -r line + do + echo "$line" + IFS=$' ' read -r -a parts <<< "$line" + name=${parts[0]} + repo=${parts[1]} + version=${parts[2]} + + # we add and update the repo only the first time we see it to speed up the process + repo_short_name=${repos[$repo]} + if [ "$repo_short_name" == "" ]; then + n=${#repos[@]} + repo_short_name="repo_$((n+1))" + repos[$repo]=$repo_short_name + helm repo add "$repo_short_name" "$repo" + helm repo update "$repo_short_name" + fi + (cd ./charts; helm pull --version "$version" --untar "$repo_short_name/$name") + done + echo "Pulling charts done." +} + +wire_build="https://raw.githubusercontent.com/wireapp/wire-builds/991e280a114701209d0ba3c1847e4e1ac7d05a43/build.json" +wire_build_chart_release "$wire_build" | pull_charts + +# Uncomment if you want to create non-wire-build release +# and uncomment the other pull_charts call from aboe +# legacy_chart_release | pull_charts + +# TODO: Awaiting some fixes in wire-server regarding tagless images + +# Download zauth; as it's needed to generate certificates +wire_version=$(helm show chart ./charts/wire-server | yq -r .version) +echo "quay.io/wire/zauth:$wire_version" | create-container-dump containers-adminhost + +################################### +####### DIRTY HACKS GO HERE ####### +################################### + +# Patch wire-server values.yaml to include federator +# This is needed to bundle it's image. +sed -i -Ee 's/federation: false/federation: true/' "$(pwd)"/values/wire-server/prod-values.example.yaml +sed -i -Ee 's/useSharedFederatorSecret: false/useSharedFederatorSecret: true/' "$(pwd)"/charts/wire-server/charts/federator/values.yaml + +# drop step-certificates/.../test-connection.yaml because it lacks an image tag +# cf. https://github.com/smallstep/helm-charts/pull/196/files +rm -v charts/step-certificates/charts/step-certificates/templates/tests/* + +# Get and dump required containers from Helm charts. Omit integration test +# containers (e.g. `quay.io_wire_galley-integration_4.22.0`.) +for chartPath in "$(pwd)"/charts/*; do + echo "$chartPath" +done | list-helm-containers | grep -v "\-integration:" | create-container-dump containers-helm + +# Undo changes on wire-server values.yaml +sed -i -Ee 's/useSharedFederatorSecret: true/useSharedFederatorSecret: false/' "$(pwd)"/charts/wire-server/charts/federator/values.yaml +sed -i -Ee 's/federation: true/federation: false/' "$(pwd)"/values/wire-server/prod-values.example.yaml + +patch-ingress-controller-images "$(pwd)" + +tar cf containers-helm.tar containers-helm +[[ "$INCREMENTAL" -eq 0 ]] && rm -r containers-helm + +echo "docker_ubuntu_repo_repokey: '${fingerprint}'" > ansible/inventory/offline/group_vars/all/key.yml + +tar czf assets.tgz debs-jammy.tar binaries.tar containers-adminhost containers-helm.tar containers-system.tar ansible charts values bin + +echo "Done" diff --git a/offline/coturn.md b/offline/coturn.md new file mode 100644 index 000000000..8e2a74afd --- /dev/null +++ b/offline/coturn.md @@ -0,0 +1,513 @@ +# Installing Coturn. + +Coturn is a free and open-source implementation of TURN and STUN server. + +It is used to relay media between two clients that are unable to establish a direct connection. + +This is useful in cases where the clients are behind a NAT or a firewall. + +This document explains how to install Coturn on a newly deployed Wire-Server installation. + +This presumes you already have: + +* Followed the [single Hetzner machine installation](single_hetzner_machine_installation.md) guide or otherwise have a machine ready to accept a Wire-Server deployment. +* Have followed the [Wire-Server installation](docs_ubuntu_22.04.md) guide and have Wire-Server deployed and working. + +## Plan. + +To setup Coturn, we will: + +* Create a `values.yaml` file and fill it with configuration. +* Create a `secret.yaml` file for the Coturn secrets. +* Configure the Coturn labels to select on which machine(s) it will run. +* Configure the SFT deployment for node selection and public IP discovery. +* Configure the port redirection in Nftables. +* Change the Wire-Server configuration to use Coturn. +* Install Coturn using Helm. +* Verify that Coturn is working. + +This entire document presumes you are working from inside your Wire-Server deployment directory (typically `~/wire-server-deploy/`). + +Step by step: + +## Create a `values.yaml` file and fill it with configuration. + +Create a folder for the Coturn configuration: + +```bash + +mkdir -p values/coturn + +``` + +Create/edit a `values.yaml` file inside the `values/coturn` folder: + +```bash + +nano values/coturn/values.yaml + +``` + +Add the following configuration to the `values.yaml` file: + +```yaml + +# Value file for coturn chart. +# +# See: https://github.com/wireapp/wire-server/blob/develop/charts/coturn/values.yaml +# And: https://github.com/wireapp/wire-server/blob/develop/charts/coturn/README.md + +# use nodeSelector only if you are planning on using coturn on fewer than the number of workers in your cluster. This is used to pin coturn to specific nodes. +nodeSelector: + wire.com/role: coturn + +replicaCount: 3 +coturnTurnListenIP: "__COTURN_POD_IP__" +coturnTurnExternalIP: "__COTURN_EXT_IP__" +coturnTurnRelayIP: "__COTURN_POD_IP__" +``` + +Annotate nodes with the wire.com/external-ip annotation if the nodes are behind a 1:1 NAT. This is to make coturn aware of its external IP address. + +ie. +``` +d kubectl annotate node kubenode1 wire.com/external-ip=IP.ADDRESS +``` + +## Create a `secret.yaml` file for the Coturn secrets. + +For the Coturn secrets, we are going to re-use the wire-server secrets. + +First locate your wire-server secrets file: + +```bash + +cat values/wire-server/secrets.yaml + +``` + +You will see a section like this: + +```yaml + +brig: + secrets: + smtpPassword: dummyPassword + zAuth: + publicKeys: "2DaWAtcJ6ZCP[...]0O2Z2_zf-M=" + privateKeys: "t0R49fDju3GVU0LIA[...]KZ99rQ7Znb" + turn: + secret: "Ob4C52U8WPwv[...]QUy724p1n" + awsKeyId: dummykey + awsSecretKey: dummysecret + +``` + +This section, with the secrets, are what we want to copy into our `secret.yaml` file for Coturn. + +Create/edit a `secret.yaml` file inside the `values/coturn` folder: + +```bash + +nano values/coturn/secret.yaml + +``` + +Add the following configuration to the `secret.yaml` file: + +```yaml + +# Path is .secrets. +secrets: + zrestSecrets: + - "Ob4C52U8WPwv[...]QUy724p1n" + +``` + +Here, the value for `secrets.zrestSecrets` is the same as `brig.secrets.turn.secret` from the wire-server secrets. + +## Configure the Coturn labels to select on which machine(s) it will run. + +Next, we must select on which machine Coturn will run. + +In this example, we've decided it will run on the third kubernetes node, `kubenode3`, which has an IP address of `192.168.122.23`. + +We've set the `nodeSelector` in the `values.yaml` file to select the `coturn` role, this and machine we label with the `wire.com/role: coturn` label will be selected to run Coturn. + +So we need to label the `kubenode3` machine with the `wire.com/role: coturn` label. + +We do this by running: + +```bash + +d kubectl label node kubenode3 wire.com/role=coturn + +``` + +By default, only one machine will be selected to run Coturn. + +If you want to run Coturn on multiple machines, you must: + +1. Add the `wire.com/role: coturn` label to multiple machines. + +2. Change the `replicaCount` in the `charts/coturn/values.yaml` file to the number of machines you want to run Coturn on. + +## Configure the SFT deployment for node selection and public IP discovery + +First, we must locate what the "external" IP address of the machine is. + +We get it by running the following command: + +```bash + +sudo ip addr + +``` + +The first interface will be the loopback interface, `lo`, and the second interface will be the "external" interface, `enp41s0` in our example, the output looking something like this: + +```bash + +demo@install-docs:~/wire-server-deploy$ ip addr +1: lo: mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000 + link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 + inet 127.0.0.1/8 scope host lo + valid_lft forever preferred_lft forever + inet6 ::1/128 scope host + valid_lft forever preferred_lft forever +2: enp41s0: mtu 1500 qdisc mq state UP group default qlen 1000 + link/ether a8:a1:59:a2:9b:5b brd ff:ff:ff:ff:ff:ff + inet 5.9.84.121/32 scope global enp41s0 + valid_lft forever preferred_lft forever + inet6 2a01:4f8:162:3b6::2/64 scope global + valid_lft forever preferred_lft forever + inet6 fe80::aaa1:59ff:fea2:9b5b/64 scope link + valid_lft forever preferred_lft forever +3: etc... + +``` + +In this case, the external IP address is `5.9.84.121`. + +Please note: This step is also documented in the [Wire install docs](docs_ubuntu_22.04) + +We must make sure that Coturn pods and SFT pods do not run on the same kubernetes nodes. + +This means we must label the kubernetes nodes to run on nodes that we did not select to run Coturn in the previous step. + +In this example, we've decided to run Coturn on the third kubernetes node, `kubenode3`, which has an IP address of `192.168.122.23`. + +First we make sure the SFT chart is configured to only run on kubernetes nodes with the right label (`sftd`). + +Edit the `values/sftd/values.yaml` file: + +```bash + +nodeSelector: + wire.com/role: sftd + +``` + +Then we label the `kubenode1` machine with the `wire.com/role: sftd` label: + +```bash + +d kubectl label node kubenode1 wire.com/role=sftd + +``` + +We must also annotate the node with the exrenal IP address we will be listening to (which we found with `sudo ip addr` above): + +```bash + +d kubectl annotate node kubenode3 wire.com/external-ip='your.public.ip.address' + +``` + +If we want to run SFT on multiple nodes, the procedure is the same as the one documented above for running Coturn on multiple nodes. + +We now should have Coturn configured to run on one or more kubernetes node(s), and SFT configured to run on one or more kubernetes node(s), and the two should not run on the same node(s)/overlap. + +Before moving on, we must also re-deploy SFT's chart to apply the new configuration: + +```bash + +d helm upgrade --install sftd ./charts/sftd --set 'nodeSelector.wire\.com/role=sftd' --values values/sftd/values.yaml + +``` + +## Configure the port redirection in Nftables. + +```{note} + +Note: This section is only relevant if you are running Wire-Server/Coturn/SFT behind a `nftables`-managed firewall. + +``` + +We must configure the port redirection in Nftables to allow traffic to reach Coturn. + +Calling and TURN services (Coturn, SFT) require being reachable on a range of ports used to transmit the calling data. SFT service listens on port 443 which is managed through k8s ingress controller, we must ensure that external traffic for port 443 is able to reach ingress controller. + +Here we have decided the following distribution of ports: + +* Coturn will operate between ports 32768 and 61000. + +We will configure the port redirection in Nftables to allow traffic to reach Coturn. + +In the file `/etc/nftables.conf`, which we edit with: + +```bash + +sudo nano /etc/nftables.conf + +``` + +We will do the following modifications: + +First, we create some definitions in the beginning of the file for readability: + +``` +define COTURNIP = 192.168.122.23 +define KUBENODEIP = 192.168.122.21 +define INF_WAN = enp41s0 +``` + +Where: + +* `COTURNIP` is the IP address of the machine where Coturn will run (in our example, the third kubernetes node, `kubenode3`). +* `KUBENODEIP` is the IP address of the machine running nginx HTTP / HTTPS ingress. +* `INF_WAN` is the name of the WAN interface exposed to the outside world (the Internet). + +Then, we edit the `table ip nat` / `chain PREROUTING` section of the file: + +```nft + +table ip nat { + chain PREROUTING { + + type nat hook prerouting priority -100; + + iifname { $INF_WAN, virbr0 } tcp dport 80 fib daddr type local dnat to $KUBENODEIP:31772 comment "HTTP ingress" + iifname { $INF_WAN, virbr0 } tcp dport 443 fib daddr type local dnat to $KUBENODEIP:31773 comment "HTTPS ingress" + + iifname { $INF_WAN, virbr0 } tcp dport 3478 fib daddr type local dnat to $COTURNIP comment "COTURN control TCP" + iifname { $INF_WAN, virbr0 } udp dport 3478 fib daddr type local dnat to $COTURNIP comment "COTURN control UDP" + + iifname { $INF_WAN, virbr0 } udp dport 32768-61000 fib daddr type local dnat to $COTURNIP comment "COTURN UDP range" + + fib daddr type local counter jump DOCKER + } + +``` + +Some explanations: + +This is used for the HTTP(S) ingress: + +```nft + iifname { $INF_WAN, virbr0 } tcp dport 80 fib daddr type local dnat to $KUBENODEIP:31772 comment "HTTP ingress" + iifname { $INF_WAN, virbr0 } tcp dport 443 fib daddr type local dnat to $KUBENODEIP:31773 comment "HTTPS ingress" +``` + +This is the part that routes the UDP packets (media/calling traffic) to the calling services: + +```nft + iifname { $INF_WAN, virbr0 } udp dport 32768-61000 fib daddr type local dnat to $COTURNIP comment "COTURN UDP range" +``` + +This is the part that redirects the control traffic to the Coturn port: + +```nft + iifname { $INF_WAN, virbr0 } tcp dport 3478 fib daddr type local dnat to $COTURNIP comment "COTURN control TCP" + iifname { $INF_WAN, virbr0 } udp dport 3478 fib daddr type local dnat to $COTURNIP comment "COTURN control UDP" +``` + + +Then we restart Nftables to apply the changes: + +```bash + +sudo systemctl restart nftables + +``` + +## Change the Wire-Server configuration to use Coturn. + +We must change the Wire-Server configuration to use Coturn. + +First, we must locate what the "external" IP address of the machine is. + +This is the IP we must provide in our Wire-Server configuration to allow the clients to connect to Coturn. + +We get it by running the following command: + +```bash + +sudo ip addr + +``` + +For more details on getting the extrenal IP address see the `Configure the SFT deployment for node selection and public IP discovery` section above. + +Edit the `values/wire-server/values.yaml` file: + +```bash + +nano values/wire-server/values.yaml + +``` + +You will find a section that looks like this (default): + +```yaml + + turnStatic: + v1: [] + v2: + # - "turn::3478" + # - "turn::3478" + # - "turn::3478?transport=tcp" + # - "turn::3478?transport=tcp" + +``` + +Instead, we configure it to use the external IP addres we found above, and the Coturn port, `3478` (as seen above in the `nftables` configuration): + +```yaml + turnStatic: + v1: [] + v2: + - "turn:5.9.84.121:3478" + - "turn:5.9.84.121:3478?transport=tcp" +``` + +As we have changed our Wire-Server configuration, we must re-deploy the Wire-Server chart to apply the new configuration: + +```bash + +d helm upgrade --install wire-server ./charts/wire-server --timeout=15m0s --values ./values/wire-server/values.yaml --values ./values/wire-server/secrets.yaml + +``` + + +## Install Coturn with Helm. + +We have now configured our Coturn `value` and `secret` files, configured `wire-server` to use Coturn. + +It is time to actually deploy Coturn. + +To actually install coturn, you run: + +```bash +d helm install coturn ./charts/coturn --timeout=15m0s --values values/coturn/values.yaml --values values/coturn/secret.yaml +``` + +## Verify that coturn is running. + +To verify that coturn is running, you can run: + +```bash +d kubectl get pods -l app=coturn +``` + +Which should give you something like: + +```bash +demo@install-docs:~/wire-server-deploy$ d kubectl get pods -l app=coturn +NAME READY STATUS RESTARTS AGE +coturn-0 1/1 Running 0 1d +``` + +## Appendix: Debugging procedure. + +If coturn has already been installed once (for example if something went wrong and you are re-trying), before running a new deploy of Coturn first do: + +```bash +d helm uninstall coturn +``` + +Also make sure you stop any running coturn service: + +```bash +d kubectl delete pod -l app=coturn +``` + +And then re-run the `helm install` command. + +```bash +d helm install coturn ./charts/coturn --timeout=15m0s --values values/coturn/values.yaml --values values/coturn/secret.yaml +``` + +For further debugging, enable `verboseLogging` in `charts/coturn/values.yaml` and redeploy coturn: + +```yaml +config: + verboseLogging: true +``` + +```bash +d helm uninstall coturn +d helm install coturn ./charts/coturn --timeout=15m0s --values values/coturn/values.yaml --values values/coturn/secret.yaml +``` + +Debug log should now be visible in the coturn pod stdout: + +```bash +d kubectl logs coturn-0 +``` + +Check if the pod has the correct IP configuration in place. + +```bash +d kubectl exec -it coturn-0 -- bash +grep ip= coturn-config/turnserver.conf + +# output will look something like this + +listening-ip=xxx.xxx.xxx.xxx +relay-ip=xxx.xxx.xxx.xxx.xxx +external-ip=xxx.xxx.xxx.xxx +``` + + +## Appendix: Note on migration. + +The current guide is written with the assumption that you are setting up Coturn for the first time, on a fresh Wire-Server installation. + +If you are migrating from Restund to Coturn to an existing/running/in-use installation, as clients are currently using Restund, you can not disable Restund before all clients have migrated to Coturn, which they do by retrieving a freshly updated calling configuration from Wire-Server that instructs them to use the Coturn IPs instead of the Restund IPs. + +This configuration update occurs every 24 hours, so you will have to wait at least 24 hours before you can disable Restund. + +These are the additional steps to ensure a smooth transition: + +1. Deploy Coturn as described in this guide, without disabling Restund yet. +2. Change the `turnStatic` call configuration in the `values/wire-server/values.yaml` file to use the Coturn IPs instead of the Restund IPs. +3. Re-deploy the Wire-Server chart to apply the new configuration. +4. Wait at least 24 hours for all clients to retrieve the new configuration. +5. Once you are sure all clients have migrated to Coturn, you can disable Restund as described in this guide below. + +## Disable Restund. + +As we are no longer using Restund, we should now disable it entirely. + +We do this by editing the `hosts.ini` file: + +Edit `ansible/inventory/offline/hosts.ini`, and comment out the restund section by adding `#` at the beginning of each line : + +``` +[restund] +# ansnode1 +# ansnode2 +``` + +Then connect to each ansnode and do: + +```bash +sudo service restund stop +``` + +And check it is stopped with: + +```bash +sudo service restund status +``` \ No newline at end of file diff --git a/offline/docs_ubuntu_22.04.md b/offline/docs_ubuntu_22.04.md new file mode 100644 index 000000000..76bfe6b6c --- /dev/null +++ b/offline/docs_ubuntu_22.04.md @@ -0,0 +1,992 @@ +# How to install wire (offline cluster) + +We have a pipeline in `wire-server-deploy` producing container images, static +binaries, ansible playbooks, debian package sources and everything required to +install Wire. + +## Demo / Testing installation + +To install a self-hosted instance of Wire deployed on one Server ("Wire in a box") for testing purposes, we recommend the [autodeploy.sh](../bin/autodeploy.sh) script. See also: [Automated full install](single_hetzner_machine_installation.md#automated-full-install) section in the Single Hetzner Machine installation readme. + +## Installing docker + +Note: If you are using a Hetzner machine, docker should already be installed (you can check with `docker version`) and you can skip this section. + +On your machine (we call this the "admin host"), you need to have `docker` +installed (or any other compatible container runtime really, even though +instructions may need to be modified). See [how to install +docker](https://docker.com) for instructions. + +On ubuntu 22.04, connected to the internet: + +``` +sudo bash -c ' +set -eo pipefail; + +apt install docker.io; +systemctl enable docker; +systemctl start docker; +' +``` + +Ensure the user you are using for the install has permission to run docker, or add 'sudo' to the docker commands below. + +### Ensuring you can run docker without sudo: + +Run the following command to add your user to the docker group: + +``` +sudo usermod -aG docker $USER +``` + +Note: Replace $USER with your actual username as needed. + +Log out and log back in to apply the changes. Alternatively, you can run the following command to activate the changes in your current shell session: + +``` +newgrp docker +``` + +Verify that you can run Docker without sudo by running the following command: + +``` +docker version +``` + +If you see the curent docker version and no error, it means that Docker is now configured to run without sudo. + + +## Downloading and extracting the artifact + +Note: If you have followed the Ubuntu installation instructions (`single_hetzner_machine_installation.md`) before following this page, you already have a wire-server-deploy folder with an artifact extracted into it, and you can simply use that. + +Create a fresh workspace to download the artifacts: + +``` +$ cd ... # you pick a good location! +``` +Obtain the latest airgap artifact for wire-server-deploy. Please contact us to get it. + +Extract the above listed artifacts into your workspace: + +``` +$ wget https://s3-eu-west-1.amazonaws.com/public.wire.com/artifacts/wire-server-deploy-static-.tgz +$ tar xvzf wire-server-deploy-static-.tgz +``` +Where `` above is the hash of your deployment artifact, given to you by Wire, or acquired by looking at the above build job. +Extract this tarball. + +Make sure that the admin host can `ssh` into all the machines that you want to provision. Our docker container will use the `.ssh` folder and the `ssh-agent` of the user running the scripts. + +There's also a docker image containing the tooling inside this repo. + +## Making tooling available in your environment. + +If you don't intend to develop *on wire-server-deploy itself*, you should source the following shell script. +``` +source ./bin/offline-env.sh +``` + +The shell script will set up a `d` alias. Which runs commands passed to it inside the docker container +with all the tools needed for doing an offline deploy. + +E.g.: + +``` +$ d ansible --version +ansible [core 2.15.5] + config file = /wire-server-deploy/ansible/ansible.cfg + configured module search path = ['/root/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules'] + ansible python module location = /nix/store/p9kbf1v35r184hwx9p4snny1clkbrvp7-python3.11-ansible-core-2.15.5/lib/python3.11/site-packages/ansible + ansible collection location = /root/.ansible/collections:/usr/share/ansible/collections + executable location = /nix/store/p9kbf1v35r184hwx9p4snny1clkbrvp7-python3.11-ansible-core-2.15.5/bin/ansible + python version = 3.11.6 (main, Oct 2 2023, 13:45:54) [GCC 12.3.0] (/nix/store/qp5zys77biz7imbk6yy85q5pdv7qk84j-python3-3.11.6/bin/python3.11) + jinja version = 3.1.2 + libyaml = True + + +``` + +## Artifacts provided in the deployment tarball. + +The following artifacts are provided: + + - `containers-adminhost/wire-server-deploy-*.tar` + A container image containing ansible, helm, and other tools and their + dependencies in versions verified to be compatible with the current wire + stack. Published to `quay.io/wire/wire-server-deploy` as well, but shipped + in the artifacts tarball for convenience. + - `ansible` + These contain all the ansible playbooks the rest of the guide refers to, as + well as an example inventory, which should be configured according to the + environment this is installed into. + - `binaries.tar` + This contains static binaries, both used during the kubespray-based + kubernetes bootstrapping, as well as to provide some binaries that are + installed during other ansible playbook runs. + - `charts` + The charts themselves, as tarballs. We don't use an external helm + repository, every helm chart dependency is resolved already. + - `containers-system.tar` + These are the container images needed to bootstrap kubernetes itself + (currently using kubespray) + - `containers-helm.tar` + These are the container images our charts (and charts we depend on) refer to. + Also come as tarballs, and are seeded like the system containers. + - `debs-jammy.tar` + This acts as a self-contained dump of all packages required to install + kubespray, as well as all other packages that are installed by ansible + playbooks on nodes that don't run kubernetes. + There's an ansible playbook copying these assets to an "assethost", starting + a little webserver there serving it, and configuring all nodes to use it as + a package repo. + - `values` + Contains helm chart values and secrets. Needs to be tweaked to the + environment. + +## Editing the inventory + +Copy `ansible/inventory/offline/99-static` to `ansible/inventory/offline/hosts.ini`, and remove the original. + +``` +cp ansible/inventory/offline/99-static ansible/inventory/offline/hosts.ini +mv ansible/inventory/offline/99-static ansible/inventory/offline/orig.99-static +``` + +Edit `ansible/inventory/offline/hosts.ini`. +Here, you will describe the topology of your offline deploy. There's instructions in the comments on how to set +everything up. You can also refer to extra information here. https://docs.wire.com/how-to/install/ansible-VMs.html + +Add one entry in the `all` section of this file for each machine you are managing via ansible. This will be all of the machines in your Wire cluster. + +If you are using username/password to log into and sudo up, in the `all:vars` section, add: +``` +ansible_user= +ansible_password= +ansible_become_pass= +``` +#### Editing the ansible inventory + +##### Updating Group Membership +It's recommended to update the lists of what nodes belong to which group, so ansible knows what to install on these nodes. + +For our Wire internal offline deployments using seven VMs, we edit the inventory to run all services outside of K8s on three `ansnode` VMs. +For productive on-prem deployments, these sections can be divided into individual host groups, reflecting the architecture of the target infrastructure. +Examples with individual nodes for Elastic, MinIO, and Cassandra are commented out below. +``` +[elasticsearch] +# elasticsearch1 +# elasticsearch2 +# elasticsearch3 +ansnode1 +ansnode2 +ansnode3 + +[minio] +# minio1 +# minio2 +# minio3 +ansnode1 +ansnode2 +ansnode3 + +[cassandra] +# cassandra1 +# cassandra2 +# cassandra3 +ansnode1 +ansnode2 +ansnode3 + +[cassandra_seed] +# cassandraseed1 +ansnode1 + +``` + +### Configuring kubernetes and etcd + +To run Kubernetes, at least three nodes are required, which need to be added to the `[kube-master]`, `[etcd]` and `[kube-node]` groups of the inventory file. Any +additional nodes should only be added to the `[kube-node]` group: +For our Wire internal offline deployments using seven VMs, we edit the inventory to run all services inside K8s on three `kubenode` VMs. +For productive on-prem deployments, these sections can be divided into individual host groups, reflecting the architecture of the target infrastructure. +``` +[kube-master] +# kubemaster1 +# kubemaster2 +# kubemaster3 +kubenode1 +kubenode2 +kubenode3 + +[etcd] +# etcd1 etcd_member_name=etcd1 +# etcd2 etcd_member_name=etcd2 +# etcd3 etcd_member_name=etcd3 +kubenode1 etcd_member_name=etcd1 +kubenode2 etcd_member_name=etcd2 +kubenode3 etcd_member_name=etcd3 + +[kube-node] +# prodnode1 +# prodnode2 +# prodnode3 +# prodnode4 +# ... +kubenode1 +kubenode2 +kubenode3 +``` + +### Setting up databases and kubernetes to talk over the correct (private) interface +If you are deploying wire on servers that are expected to use one interface to talk to the public, and a separate interface to talk amongst themselves, you will need to add "ip=" declarations for the private interface of each node. for instance, if the first kubenode was expected to talk to the world on 192.168.122.21, but speak to other wire services (kubernetes, databases, etc) on 192.168.0.2, you should edit its entry like the following: +``` +kubenode1 ansible_host=192.168.122.21 ip=192.168.0.2 +``` +Do this for all of the instances. + +### Setting up Database network interfaces. +* Make sure that `assethost` is present in the inventory file with the correct `ansible_host` (and `ip` values if required) +* Make sure that `cassandra_network_interface` is set to the name of the network interface on which the kubenodes should talk to cassandra and on which the cassandra nodes + should communicate among each other. Run `ip addr` on one of the cassandra nodes to determine the network interface names, and which networks they correspond to. In Ubuntu 22.04 for example, interface names are predictable and individualized, eg. `enp41s0`. +* Similarly `elasticsearch_network_interface` and `minio_network_interface` should be set to the network interface names you want elasticsearch and minio to communicate with kubernetes with, as well. + + +### Marking kubenode for calling server (SFT) + +The SFT Calling server should be running on a kubernetes nodes that are connected to the public internet. +If not all kubernetes nodes match these criteria, you should specifically label the nodes that do match +these criteria, so that you're sure SFT is deployed correctly. + + +By using a `node_label` you can make sure SFT is only deployed on a certain node like `kubenode4` + +``` +kubenode4 node_labels="{'wire.com/role': 'sftd'}" node_annotations="{'wire.com/external-ip': 'a.b.c.d'}" +``` + +If the node does not know its onw public IP (e.g. becuase it's behind NAT) then you should also set +the `wire.com/external-ip` annotation to the public IP of the node. + +### Configuring MinIO + +In order to automatically generate deeplinks, Edit the minio variables in `[minio:vars]` (`prefix`, `domain` and `deeplink_title`) by replacing `example.com` with your own domain. + + +### Example hosts.ini + +Here is an example `hosts.ini` file for an internal "Wire in a box" deployment with seven VMs. +Please note that your on-prem infrastructure requirements likely differ in terms of number of VMs / nodes, IP addresses and ranges, as well as host names. + +``` +[all] +assethost ansible_host=192.168.122.10 +kubenode1 ansible_host=192.168.122.21 +kubenode2 ansible_host=192.168.122.22 +kubenode3 ansible_host=192.168.122.23 +ansnode1 ansible_host=192.168.122.31 +ansnode2 ansible_host=192.168.122.32 +ansnode3 ansible_host=192.168.122.33 + +[all:vars] +ansible_user = demo + +[cassandra:vars] +cassandra_network_interface = enp1s0 +cassandra_backup_enabled = False +cassandra_incremental_backup_enabled = False +# cassandra_backup_s3_bucket = + +[elasticsearch:vars] +elasticsearch_network_interface = enp1s0 + +[minio:vars] +minio_network_interface = enp1s0 +prefix = "" +domain = "example.com" +deeplink_title = "wire demo environment, example.com" + +[rmq-cluster:vars] +rabbitmq_network_interface = enp1s0 + +[kube-master] +kubenode1 +kubenode2 +kubenode3 + +[etcd] +kubenode1 etcd_member_name=etcd1 +kubenode2 etcd_member_name=etcd2 +kubenode3 etcd_member_name=etcd3 + +[kube-node] +kubenode1 +kubenode2 +kubenode3 + +[k8s-cluster:children] +kube-master +kube-node + +[cassandra] +ansnode1 +ansnode2 +ansnode3 + +[cassandra_seed] +ansnode1 + +[elasticsearch] +ansnode1 +ansnode2 +ansnode3 + +[elasticsearch_master:children] +elasticsearch + +[minio] +ansnode1 +ansnode2 +ansnode3 + +[rmq-cluster] +ansnode1 +ansnode2 +ansnode3 + +``` + +## Generating secrets + +Minio and coturn services have shared secrets with the `wire-server` helm chart. Run the folllowing script that generates a fresh set of secrets for these components: + +``` +./bin/offline-secrets.sh +``` + +This should generate two files. `./ansible/inventory/group_vars/all/secrets.yaml` and `values/wire-server/secrets.yaml`. + + +### WORKAROUND: old debian key +All of our debian archives up to version 4.12.0 used a now-outdated debian repository signature. Some modifications are required to be able to install everything properly. + +Edit the ansible/setup-offline-sources.yml file + +Open it with your prefered text editor and edit the following: +* find a big block of comments and uncomment everything in it `- name: trust anything...` +* after the block you will find `- name: Register offline repo key...`. Comment out that segment (do not comment out the part with `- name: Register offline repo`!) + +Then disable checking for outdated signatures by editing the following file: +``` +ansible/roles-external/kubespray/roles/container-engine/docker/tasks/main.yml +``` +* comment out the block with -name: ensure docker-ce repository public key is installed... +* comment out the next block -name: ensure docker-ce repository is enabled + +Now you are ready to start deploying services. + +#### WORKAROUND: dependency + +Some ubuntu systems do not have GPG by default. Wire assumes this is already present. Ensure you have gpg installed on all of your nodes before continuing to the next step. + +You can check if gpg is installed by running: + +``` +gpg --version +``` + +Which should produce an output ressembling: + +``` +demo@assethost:~$ gpg --version +gpg (GnuPG) 2.2.27 +libgcrypt 1.9.4 +Copyright (C) 2021 Free Software Foundation, Inc. +License GNU GPL-3.0-or-later +This is free software: you are free to change and redistribute it. +There is NO WARRANTY, to the extent permitted by law. + +Home: /home/demo/.gnupg +Supported algorithms: +Pubkey: RSA, ELG, DSA, ECDH, ECDSA, EDDSA +Cipher: IDEA, 3DES, CAST5, BLOWFISH, AES, AES192, AES256, TWOFISH, + CAMELLIA128, CAMELLIA192, CAMELLIA256 +Hash: SHA1, RIPEMD160, SHA256, SHA384, SHA512, SHA224 +Compression: Uncompressed, ZIP, ZLIB, BZIP2 +``` + +## Deploying Kubernetes and stateful services + +In order to deploy all mentioned services, run: +``` +d ./bin/offline-cluster.sh +``` +In case any of the steps in this script fail, see the notes in the comments that accompany each step. +Comment out steps that have already completed when re-running the scripts. + +#### Ensuring Kubernetes is healthy. + +Ensure the cluster comes up healthy. The container also contains `kubectl`, so check the node status: + +``` +d kubectl get nodes -owide +``` +They should all report ready. + +### Troubleshooting external services +Cassandra, Minio and Elasticsearch are running outside Kubernets cluster, make sure those machines have necessary ports open - + +On each of the machines running Cassandra, Minio and Elasticsearch, run the following commands to open the necessary ports, if needed: +``` +sudo bash -c ' +set -eo pipefail; + +# cassandra +ufw allow 9042/tcp; +ufw allow 9160/tcp; +ufw allow 7000/tcp; +ufw allow 7199/tcp; + +# elasticsearch +ufw allow 9300/tcp; +ufw allow 9200/tcp; + +# minio +ufw allow 9000/tcp; +ufw allow 9092/tcp; + +#rabbitmq +ufw allow 5671/tcp; +ufw allow 5672/tcp; +ufw allow 4369/tcp; +ufw allow 25672/tcp; +' +``` + +### Deploy RabbitMQ cluster +Follow the steps mentioned here to create a RabbitMQ cluster based on your setup - [offline/rabbitmq_setup.md](./rabbitmq_setup.md) + +### Preparation for Federation +For enabling Federation, we need to have RabbitMQ in place. Please follow the instructions in [offline/federation_preparation.md](./federation_preparation.md) for setting up RabbitMQ. + +After that continue to the next steps below. + +### Deploying Wire + +It's now time to deploy the helm charts on top of kubernetes, installing the Wire platform. + +#### Finding the stateful services +First, setup interfaces from Kubernetes to external services by running: + +``` +d helm install cassandra-external ./charts/cassandra-external --values ./values/cassandra-external/values.yaml +d helm install elasticsearch-external ./charts/elasticsearch-external --values ./values/elasticsearch-external/values.yaml +d helm install minio-external ./charts/minio-external --values ./values/minio-external/values.yaml +``` + +#### Deploying stateless dependencies + +Also copy the values file for `databases-ephemeral` as it is required for the next step: + +``` +cp values/databases-ephemeral/prod-values.example.yaml values/databases-ephemeral/values.yaml +# edit values.yaml if necessary +d helm install databases-ephemeral ./charts/databases-ephemeral/ --values ./values/databases-ephemeral/values.yaml +``` + +Next, three more services that need no additional configuration need to be deployed: +``` +d helm install fake-aws ./charts/fake-aws --values ./values/fake-aws/prod-values.example.yaml + +# ensure that the RELAY_NETWORKS value is set to the podCIDR +SMTP_VALUES_FILE="./values/demo-smtp/prod-values.example.yaml" +podCIDR=$(d kubectl get configmap -n kube-system kubeadm-config -o yaml | grep -i 'podSubnet' | awk '{print $2}' 2>/dev/null) +if [[ $? -eq 0 && -n "$podCIDR" ]]; then + sed -i "s|RELAY_NETWORKS: \".*\"|RELAY_NETWORKS: \":${podCIDR}\"|" $SMTP_VALUES_FILE +else + echo "Failed to fetch podSubnet. Attention using the default value: $(grep -i RELAY_NETWORKS $SMTP_VALUES_FILE)" +fi +d helm install demo-smtp ./charts/demo-smtp --values $SMTP_VALUES_FILE + +d helm install reaper ./charts/reaper +``` + +#### Preparing your values + +Next, move `./values/wire-server/prod-values.example.yaml` to `./values/wire-server/values.yaml`. + +``` +cp ./values/wire-server/prod-values.example.yaml ./values/wire-server/values.yaml +``` + +Inspect all the values and adjust domains to your domains where needed. + +Add the IPs of your `coturn` servers to the `turnStatic.v2` list: +```yaml + turnStatic: + v1: [] + v2: + - "turn::3478" + - "turn::3478" + - "turn::3478?transport=tcp" + - "turn::3478?transport=tcp" +``` + +Open up `./values/wire-server/secrets.yaml` and inspect the values. In theory +this file should have only generated secrets, and no additional secrets have to +be added, unless additional options have been enabled. + +Open up `./values/wire-server/values.yaml` and replace example.com and other domains and subdomain with your domain. You can do it with: + +``` +sed -i 's/example.com//g' ./values/wire-server/values.yaml +``` + +#### [Optional] Using Kubernetes managed Cassandra (K8ssandra) +You can deploy K8ssandra by following these docs - +[offline/k8ssandra_setup.md](./k8ssandra_setup.md) + +Once K8ssandra is deployed, change the host address in `values/wire-server/values.yaml` to the K8ssandra service address, i.e. +``` +sed -i 's/cassandra-external/k8ssandra-cluster-datacenter-1-service.database/g' ./values/wire-server/values.yaml +``` + + +#### Deploying Wire-Server + +Now deploy `wire-server`: + +``` +d helm install wire-server ./charts/wire-server --timeout=15m0s --values ./values/wire-server/values.yaml --values ./values/wire-server/secrets.yaml +``` + +### Deploying webapp + +Update the values in `./values/webapp/prod-values.example.yaml` + +Set your domain name with sed: +``` +sed -i "s/example.com/YOURDOMAINHERE/g" values/webapp/prod-values.example.yaml +``` +and run +``` +d helm install webapp ./charts/webapp --values ./values/webapp/prod-values.example.yaml +``` + +### Deploying team-settings + +Update the values in `./values/team-settings/prod-values.example.yaml` and `./values/team-settings/prod-secrets.example.yaml` + +Set your domain name with sed: +``` +sed -i "s/example.com/YOURDOMAINHERE/g" values/team-settings/prod-values.example.yaml +``` +then run +``` +d helm install team-settings ./charts/team-settings --values ./values/team-settings/prod-values.example.yaml --values ./values/team-settings/prod-secrets.example.yaml +``` + +### Deploying account-pages + +Update the values in `./values/account-pages/prod-values.example.yaml` + +Set your domain name with sed: +``` +sed -i "s/example.com/YOURDOMAINHERE/g" values/account-pages/prod-values.example.yaml +``` +and run +``` +d helm install account-pages ./charts/account-pages --values ./values/account-pages/prod-values.example.yaml +``` + +### Deploying smallstep-accomp + +Update the values in `./values/smallstep-accomp/prod-values.example.yaml` +and then run +``` +d helm install smallstep-accomp ./charts/smallstep-accomp --values ./values/smallstep-accomp/prod-values.example.yaml +``` + + +## Directing Traffic to Wire + +### Deploy ingress-nginx-controller + +This component requires no configuration, and is a requirement for all of the methods we support for getting traffic into your cluster: + +``` +cp ./values/ingress-nginx-controller/prod-values.example.yaml ./values/ingress-nginx-controller/values.yaml +d helm install ingress-nginx-controller ./charts/ingress-nginx-controller --values ./values/ingress-nginx-controller/values.yaml +``` + +### Forwarding traffic to your cluster + +#### Using network services + +The goal of the section is to forward traffic on ports 443 and 80 to the kubernetes node(s) that run(s) ingress service. +Wire expected https traffic port 443 to be forwarded to port 31773 and http traffic on port 80 to be forwarded to port 31772. + +#### Through an IP Masquerading Firewall + +Your ip masquerading firewall must forward port 443 and port 80 to one of the kubernetes nodes (which must always remain online). +Additionally, if you want to use letsEncrypt CA certificates, items behind your firewall must be redirected to your kubernetes node, when the cluster is attempting to contact the outside IP. + +The following instructions are given only as an example. Depending on your network setup different dns masquarading rules are required. +In the following all traffic destined to your wire cluster is going through a single IP masquerading firewall. + +##### Incoming SSL Traffic + +To prepare determine the interface of your outbound IP: + +```bash +export OUTBOUNDINTERFACE=$(ip ro | sed -n "/default/s/.* dev \([enpso0-9]*\) .*/\1/p") +echo "OUTBOUNDINTERFACE is $OUTBOUNDINTERFACE" +``` + +Please check that `OUTBOUNDINTERFACE` is correctly set, before continuning. + +Supply your outside IP address: + +```bash +export PUBLICIPADDRESS= +``` + +You can do this directly with this one-liner command, which inserts into `$PUBLICIPADDRESS` the IP of the interface with name `$OUTBOUNDINTERFACE` : + +```bash +export PUBLICIPADDRESS=$(ip -br addr | awk -v iface="$OUTBOUNDINTERFACE" '$1 == iface {split($3, a, "/"); print a[1]}') +``` + +Finally you can check the right value is in the environment variable using: + +```bash +echo "PUBLICIPADDRESS is $PUBLICIPADDRESS" +``` + +Then: + +1. Find out on which node `ingress-nginx` is running: +``` +d kubectl get pods -l app.kubernetes.io/name=ingress-nginx -o=custom-columns=NAME:.metadata.name,NODE:.spec.nodeName,IP:.status.hostIP +``` +2. Use that IP for $KUBENODEIP + +``` +export KUBENODEIP= +``` + +Or instead of getting the IP manually, you can also do this with a one-liner command: + +```bash +export KUBENODEIP=$(sudo docker run --network=host -v ${SSH_AUTH_SOCK:-nonexistent}:/ssh-agent -e SSH_AUTH_SOCK=/ssh-agent -v $HOME/.ssh:/root/.ssh -v $PWD:/wire-server-deploy $WSD_CONTAINER kubectl get pods -l app.kubernetes.io/name=ingress-nginx -o=custom-columns=NAME:.metadata.name,NODE:.spec.nodeName,IP:.status.hostIP --no-headers | awk '{print $3}') +``` + +then, in case the server owns the public IP (i.e. you can see the IP in `ip addr`), run the following: +``` +sudo bash -c " +set -xeo pipefail; + +iptables -t nat -A PREROUTING -d $PUBLICIPADDRESS -i $OUTBOUNDINTERFACE -p tcp --dport 80 -j DNAT --to-destination $KUBENODEIP:31772; +iptables -t nat -A PREROUTING -d $PUBLICIPADDRESS -i $OUTBOUNDINTERFACE -p tcp --dport 443 -j DNAT --to-destination $KUBENODEIP:31773; +" +``` + +If your server is being forwarded traffic from another firewall (you do not see the IP in `ip addr`), run the following: +``` +sudo bash -c " +set -eo pipefail; + +iptables -t nat -A PREROUTING -i $OUTBOUNDINTERFACE -p tcp --dport 80 -j DNAT --to-destination $KUBENODEIP:31772; +iptables -t nat -A PREROUTING -i $OUTBOUNDINTERFACE -p tcp --dport 443 -j DNAT --to-destination $KUBENODEIP:31773; +" +``` +or add the corresponding rules to a config file (for UFW, /etc/ufw/before.rules) so they persist after rebooting. + +If you are running a UFW firewall, make sure to allow inbound traffic on 443 and 80: +``` +sudo bash -c " +set -eo pipefail; + +ufw enable; +ufw allow in on $OUTBOUNDINTERFACE proto tcp to any port 443; +ufw allow in on $OUTBOUNDINTERFACE proto tcp to any port 80; +" +``` + +For wire-in-a-box deployments based on single_hetzner_machine_installation.md, an nftables based firewall including a predefined ruleset should already exist. +By default, the predefined ruleset forwards ingress traffic to kubenode1 (192.168.122.21). To check on which node the ingress controller has been deployed, get the node IP via kubectl: +``` +d kubectl get pods -l app.kubernetes.io/name=ingress-nginx -o=custom-columns=NAME:.metadata.name,NODE:.spec.nodeName,IP:.status.hostIP +``` + +If the IP returns 192.168.122.21, you can skip the next few steps. +Otherwise, execute these commands: +``` +export KUBENODEIP= + +sudo sed -i -e "s/192.168.122.21/$KUBENODEIP/g" /etc/nftables.conf + +sudo systemctl restart nftables +" +``` + +###### Mirroring the public IP + +`cert-manager` has a requirement on being able to reach the kubernetes on its external IP. This might be problematic, because in security conscious environments, the external IP might not owned by any of the kubernetes hosts. + +On an IP Masquerading router, you can redirect outgoing traffic from your cluster, i.e. when the cluster asks to connect to your external IP, it will be routed to the kubernetes node inside the cluster. + +Make sure `PUBLICIPADDRESS` is exported (see above). + +``` +export INTERNALINTERFACE=virbr0 +sudo bash -c " +set -xeo pipefail; + +iptables -t nat -A PREROUTING -i $INTERNALINTERFACE -d $PUBLICIPADDRESS -p tcp --dport 80 -j DNAT --to-destination $KUBENODEIP:31772; +iptables -t nat -A PREROUTING -i $INTERNALINTERFACE -d $PUBLICIPADDRESS -p tcp --dport 443 -j DNAT --to-destination $KUBENODEIP:31773; +" +``` + +or add the corresponding rules to a config file (for UFW, /etc/ufw/before.rules) so they persist after rebooting. + + +### Changing the TURN port + +FIXME: ansibleize this! +turn's connection port for incoming clients is set to 80 by default. to change it: +on the restund nodes, edit /etc/restund.conf, and replace ":80" with your desired port. for instance, 8080 like above. + + +### Acquiring / Deploying SSL Certificates: + +SSL certificates are required by the nginx-ingress-services helm chart. You can either register and provide your own, or use cert-manager to request certificates from LetsEncrypt. + +##### Prepare to deploy nginx-ingress-services + +Move the example values for `nginx-ingress-services`: + +``` +cp ./values/nginx-ingress-services/prod-values.example.yaml ./values/nginx-ingress-services/values.yaml +cp ./values/nginx-ingress-services/prod-secrets.example.yaml ./values/nginx-ingress-services/secrets.yaml +``` + +#### Bring your own certificates + +if you generated your SSL certificates yourself, there are two ways to give these to wire: + +##### From the command line +if you have the certificate and it's corresponding key available on the filesystem, copy them into the root of the Wire-Server directory, and: + +``` +d helm install nginx-ingress-services ./charts/nginx-ingress-services --values ./values/nginx-ingress-services/values.yaml --set-file secrets.tlsWildcardCert=certificate.pem --set-file secrets.tlsWildcardKey=key.pem +``` + +Do not try to use paths to refer to the certificates, as the 'd' command messes with file paths outside of Wire-Server. + +##### In your nginx-ingress-services values file +Change the domains in `values.yaml` to your domain. And add your wildcard or SAN certificate that is valid for all these +domains to the `secrets.yaml` file. + +Now install the service with helm: + +``` +d helm install nginx-ingress-services ./charts/nginx-ingress-services --values ./values/nginx-ingress-services/values.yaml --values ./values/nginx-ingress-services/secrets.yaml +``` + +#### Use letsencrypt generated certificates + +If you are using a single external IP and no route then you need to make sure that the cert-manger pods are not deployed on the same node as ingress-nginx-controller node. + +To do that...check where ingress-nginx-controller pod is running on, e.g. by running + + +``` +d kubectl get pods -l app.kubernetes.io/name=ingress-nginx -o=custom-columns=NAME:.metadata.name,NODE:.spec.nodeName +``` +For e.g. .. if it is `kubenode1` + +taint the node + +``` +d kubectl cordon kubenode1 +``` + +first, download cert manager, and place it in the appropriate location: +``` +wget https://charts.jetstack.io/charts/cert-manager-v1.13.2.tgz +tar -C ./charts -xvzf cert-manager-v1.13.2.tgz +``` + +In case `values.yaml` and `secrets.yaml` doesn't exist yet in `./values/nginx-ingress-services` create them from templates +``` +cp ./values/nginx-ingress-services/prod-secrets.example.yaml ./values/nginx-ingress-services/secrets.yaml +cp ./values/nginx-ingress-services/prod-values.example.yaml ./values/nginx-ingress-services/values.yaml +``` +and customize. + +Edit `values.yaml`: + + * set `useCertManager: true` + * set `certmasterEmail: ` + +Set your domain name with sed: +``` +sed -i "s/example.com/YOURDOMAINHERE/" values/nginx-ingress-services/values.yaml +``` + +Install `cert-manager` into a new namespace `cert-manager-ns`. +``` +d kubectl create namespace cert-manager-ns +d helm upgrade --install -n cert-manager-ns --set 'installCRDs=true' cert-manager charts/cert-manager +``` + +Uncordon the node you cordonned earlier: +``` +d kubectl uncordon kubenode1 +``` + +Then run: + +``` +d helm upgrade --install nginx-ingress-services charts/nginx-ingress-services -f values/nginx-ingress-services/values.yaml +``` + +In order to acquire SSL certificates from letsencrypt, outgoing traffic needs from VMs needs to be enabled temporarily. +With the nftables based Hetzner Server setup, enable this rule and restart nftables: + +``` +vi /etc/nftables.conf + +iifname virbr0 oifname $INF_WAN counter accept comment "allow internet for internal VMs, enable this rule only for letsencrypt cert issue" + +sudo systemctl restart nftables +``` + +Watch the output of the following command to know how your request is going: +``` +d kubectl get certificate +``` + +Once the cert has been issued successfully, the rule above can be disabled again, disallowing outgoing traffic from VMs. Restart the firewall after edits. + + +#### Old wire-server releases + +on older wire-server releases, nginx-ingress-services may fail to deploy. some version numbers of services have changed. make the following changes, and try to re-deploy till it works. + +certificate.yaml: +v1alpha2 -> v1 +remove keyAlgorithm keySize keyEncoding + +certificate-federator.yaml: +v1alpha2 -> v1 +remove keyAlgorithm keySize keyEncoding + +issuer: +v1alpha2 -> v1 + +## Installing sftd + +For full docs with details and explanations please see https://github.com/wireapp/wire-server-deploy/blob/d7a089c1563089d9842aa0e6be4a99f6340985f2/charts/sftd/README.md + +First, make sure you have a certificate for `sftd.`, or you are using letsencrypt certificate. +for bring-your-own-certificate, this could be the same wildcard or SAN certificate you used at previous steps. + +Next, copy `values/sftd/prod-values.example.yaml` to `values/sftd/values.yaml`, and change the contents accordingly. + + * If your turn servers can be reached on their public IP by the SFT service, Wire recommends you enable cooperation between turn and SFT. add a line reading `turnDiscoveryEnabled: true` to `values/sftd/values.yaml`. + +edit values/sftd/values.yaml, and select whether you want lets-encrypt certificates, and ensure the alloworigin and the host point to the appropriate domains. + +#### Deploying + +##### Node Annotations and External IPs. +If you want to restrict SFT to certain nodes, make sure that in your inventory file you have annotated all of the nodes that are able to run sftd workloads with a node label indicating they are to be used, and their external IP, if they are behind a 1:1 firewall (Wire recommends this.). +``` +kubenode3 node_labels="{'wire.com/role': 'sftd'}" node_annotations="{'wire.com/external-ip': 'XXXX'}" +``` + +If you failed to perform the above step during the ansible deployment of your sft services, you can perform then manually: +``` +d kubectl annotate node kubenode1 wire.com/external-ip=178.63.60.45 +d kubectl label node kubenode1 wire.com/role=sftd +``` + +##### A selected group of kubernetes nodes: +By default, the replicaCount in `values/sftd/values.yaml` is set to 3. Change it to the number of nodes on which you want to deploy sftd server. + +If you are restricting SFT to certain nodes, use `nodeSelector` to run on specific nodes (**replacing the example.com domains with yours**): +``` +d helm upgrade --install sftd ./charts/sftd --set 'nodeSelector.wire\.com/role=sftd' --values values/sftd/values.yaml +``` + +##### All kubernetes nodes. +If you are not doing that, omit the `nodeSelector` argument: + +``` +d helm upgrade --install sftd ./charts/sftd --values values/sftd/values.yaml +``` + +##### Specifying your certificates. + +If you bring your own certificates, you can specify them with: + +``` +d helm upgrade --install sftd ./charts/sftd \ + --set-file tls.crt=/path/to/tls.crt \ + --set-file tls.key=/path/to/tls.key \ + --values values/sftd/values.yaml +``` + + +## Coturn. + +To deploy coturn on your new installation, go to the following link and follow the instructions there: + +[Installing Coturn](coturn.md) + + +## Installing fluent-bit + +To collect and distribute logs to database or log servers such as Elasticsearch, syslog, etc. +Copy `values/fluent-bit/prod-values.example.yaml` to `values/fluent-bit/values.yaml` and edit the file accordingly. Sample values for Elasticsearch and syslog are provided in the file. + +``` +cp values/fluent-bit/prod-values.example.yaml values/fluent-bit/values.yaml +``` + +and, install the fluent-bit helm chart + +``` +d helm upgrade --install fluent-bit ./charts/fluent-bit --values values/fluent-bit/values.yaml +``` + +Make sure that traffic is allowed from your kubernetes nodes to your destination server (elasticsearch or syslog). + + +## Appendixes + +### Syncing time on cassandra nodes +The nodes running cassandra (`ansnode` 1, 2 and 3) require precise synchronization of their clock. + +In case the cassandra migration doesn't complete, it might be probably due to the clock not being in sync. + +To sync them, run the following ansible playbook - +``` +d ansible-playbook -i ./ansible/inventory/offline/hosts.ini ansible/sync_time.yml +``` + +The above playbook will configure NTP on all Cassandra nodes, assigns first node as the authoritative node. All other nodes will sync their time with the authoritative node. + +### Resetting the k8s cluster +To reset the k8s cluster, run the following command: +``` +d ansible-playbook -i ansible/inventory/offline/hosts.ini ansible/roles-external/kubespray/reset.yml --skip-tags files +``` +You can remove the `--skip-tags files` option if you want to remove all the loaded container images as well. + +After that, to reinstall the cluster, comment out the steps in the `offline-cluster.sh` script, such as setup-offline-sources and seed-offline-containerd to avoid re-downloading the container images to save time, and run - +``` +d ./bin/offline-cluster.sh +``` diff --git a/offline/federation_preparation.md b/offline/federation_preparation.md new file mode 100644 index 000000000..65ee74287 --- /dev/null +++ b/offline/federation_preparation.md @@ -0,0 +1,34 @@ +## DNS + +Each Wire instance needs a SRV DNS record for federation to work (in addition to the ingress A record): + +``` +dig srv _wire-server-federator._tcp.example.com +short + +0 10 443 federator.example.com. +``` + +## Ingress + +SSL certs for the federator ingress can either be acquired via cert-manager and Letsencrypt if enabled in the [nginx-ingress-service/values.yaml](../values/nginx-ingress-services/prod-values.example.yaml) +or added manually, see "Bring your own certificates" in [offline/docs_ubuntu_22.04.md](./docs_ubuntu_22.04.md) + + +## Helm chart configuration + +Our example [values.yaml](../values/wire-server/prod-values.example.yaml) and [secrets.yaml](../values/wire-server/prod-secrets.example.yaml) for `wire-server` are preconfigured to allow for enabling federation. +Look for any federation related settings and enable or adjust accordingly. +Important: `brig`, `galley` and `background-worker` need to be able to access RabbitMQ with the same secret. + +Adding remote instances to federate with happens in the `brig` subsection in [values.yaml](../values/wire-server/prod-values.example.yaml): + +``` + setFederationDomainConfigs: + - domain: remotebackend1.example.com + search_policy: full_search +``` +Multiple domains with individual search policies can be added. + +## RabbitMQ + +You can refer to [offline/rabbitmq_setup.md](./rabbitmq_setup.md) for creating RabbitMQ cluster, if you haven't yet. diff --git a/offline/k8ssandra_setup.md b/offline/k8ssandra_setup.md new file mode 100644 index 000000000..7472140b9 --- /dev/null +++ b/offline/k8ssandra_setup.md @@ -0,0 +1,159 @@ +# Setting up K8ssandra +Reference - https://docs.k8ssandra.io/install/local/single-cluster-helm/ + +K8ssandra will need the following components to be installed in the cluster - +- Dynamic persistent volume provisioning (e.g with OpenEBS) +- Cert-Manager +- Minio (for backup and restore) +- K8ssandra-operator +- Configure minio bucket for backups + +## [1] Dynamic Persistent Volume Provisioning +Refer to [offline/local_persistent_storage_k8s](./local_persistent_storage_k8s.md) + +## [2] Install cert-manager +cert-manager is a must requirement for k8ssandra - see https://docs.k8ssandra.io/install/local/single-cluster-helm/#deploy-cert-manager for why. + +To install the cert-manager, follow the steps mentioned in `Use letsencrypt generated certificates` section in [offline/docs_ubuntu_22.04.md](./docs_ubuntu_22.04.md) + +## [3] Install Minio +Minio and minio-external chart should have been already installed, if you are following docs_ubuntu_22.04.md + +## [4] Deploy K8ssandra Operator +``` +cp ./values/k8ssandra-operator/prod-values.example.yaml ./values/k8ssandra-operator/values.yaml + +d helm install k8ssandra-operator charts/k8ssandra-operator --values ./values/k8ssandra-operator/values.yaml -n database --create-namespace +``` + +## [5] Configure Minio Bucket for Backups +Create a K8s secret for k8ssandra to access with Minio by applying `minio-secret.yaml` below. + +You can find the value of `aws_access_key_id` and `aws_secret_access_key` from `ansible/inventory/offline/group_vars/all/secrets.yaml` file, they will be named `minio_access_key` and `minio_secret_key` respectively. Replace them in the secret config below. + +``` +apiVersion: v1 +kind: Secret +metadata: + name: medusa-bucket-key + namespace: database +type: Opaque +stringData: + credentials: |- + [default] + aws_access_key_id = UIWEGQZ53qVlLuQ2mkM3 #update this + aws_secret_access_key = dpZqqiR0Bwz6Kc6J8ruPfTC1VqIPI4EM0Id6TLWG83 #update this +``` + +Apply the secret: + +```d kubectl apply -f minio-secret.yaml``` + +Now, put this medusa config directly below the `spec:` section in `charts/k8ssandra-test-cluster/templates/k8ssandra-cluster.yaml`: +``` +medusa: + storageProperties: + storageProvider: s3_compatible + region: eu-west-1 + bucketName: k8ssandra-backups + host: minio-external + port: 9000 + prefix: dc1 + storageSecretRef: + name: medusa-bucket-key + secure: false + maxBackupAge: 7 +``` + +## Install K8ssandra Test Cluster +Create a copy of the provided values file - +``` +cp ./values/k8ssandra-test-cluster/prod-values.example.yaml ./values/k8ssandra-test-cluster/values.yaml +``` + +You can update the values in the `values/k8ssandra-test-cluster/values.yaml` file as per your requirement. + +Now, deploy it - + +``` +d helm upgrade --install k8ssandra-test-cluster charts/k8ssandra-test-cluster --values values/k8ssandra-test-cluster/values.yaml --namespace database +``` + +After successful deployment, change the `datacenter -> size` to 3 in ```values/k8ssandra-test-cluster/values.yaml``` and upgrade the deployment. + +Note: Deploying with size: 3 directly will result in some hostname resolution issues. +``` +d helm upgrade --install k8ssandra-test-cluster charts/k8ssandra-test-cluster --values values/k8ssandra-test-cluster/values.yaml --namespace database +``` + +## Enable Backups +Reference - https://docs.k8ssandra.io/tasks/backup-restore/ + +To enable Medusa backup schedule and purging schedule for old backups, create a file `k8ssandra-backup.yaml`: +``` +apiVersion: medusa.k8ssandra.io/v1alpha1 +kind: MedusaBackupSchedule +metadata: + name: medusa-backup-schedule + namespace: database +spec: + backupSpec: + backupType: differential + cassandraDatacenter: datacenter-1 + cronSchedule: "30 1 * * *" + disabled: false + +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: k8ssandra-medusa-backup + namespace: database +spec: + schedule: "0 0 * * *" + jobTemplate: + spec: + template: + metadata: + name: k8ssandra-medusa-backup + spec: + serviceAccountName: medusa-backup + containers: + - name: medusa-backup-cronjob + image: bitnami/kubectl:1.29.3 + imagePullPolicy: IfNotPresent + command: + - 'bin/bash' + - '-c' + - 'printf "apiVersion: medusa.k8ssandra.io/v1alpha1\nkind: MedusaTask\nmetadata:\n name: purge-backups-timestamp\n namespace: database\nspec:\n cassandraDatacenter: datacenter-1\n operation: purge" | sed "s/timestamp/$(date +%Y%m%d%H%M%S)/g" | kubectl apply -f -' + restartPolicy: OnFailure +``` + +Note: You can update the backup schedule as per your requirement, apply it with - +```d kubectl apply -f k8ssandra-backup.yaml ``` + +You can see the backup schedular via - + +```d kubectl get MedusaBackupSchedule -A``` + +and the past backups via - + +```d kubectl get MedusaBackupJob -A``` + +## Restoring a backup +Create a `restore-k8ssandra.yaml` file and put into it below content and replace the backup name from the one you want to restore with, you can get the name from `d kubectl get MedusaBackupJob -A` + +``` +apiVersion: medusa.k8ssandra.io/v1alpha1 +kind: MedusaRestoreJob +metadata: + name: restore-backup1 + namespace: database +spec: + cassandraDatacenter: dc1 + backup: medusa-backup1 +``` + +and apply it + +```d kubectl apply -f restore-k8ssandra.yaml``` diff --git a/offline/ldap-scim-bridge.md b/offline/ldap-scim-bridge.md new file mode 100644 index 000000000..bb8a1ca07 --- /dev/null +++ b/offline/ldap-scim-bridge.md @@ -0,0 +1,148 @@ +# How to deploy the ldap-scim-bridge + +Create a values file +mkdir values/ldap-scim-bridge_team-0 +the values file looks like the following: +``` +# one a minute. +schedule: "*/5 * * * *" +# https://github.com/wireapp/ldap-scim-bridge +config: + logLevel: "Debug" # one of Trace,Debug,Info,Warn,Error,Fatal; Fatal is least noisy, Trace most. + ldapSource: + tls: true + host: "dc1.example.com" + port: 636 + dn: "CN=Read Only User,CN=users,DC=example,DC=com" + password: "READONLYPASSWORD" + search: + base: "DC=example,DC=com" + objectClass: "person" + memberOf: "CN=VIP,OU=Engineering,DC=example,DC=com" + codec: "utf8" +# deleteOnAttribute: # optional, related to delete-from-directory. +# key: "deleted" +# value: "true" +# deleteFromDirectory: # optional; ok to use together with delete-on-attribute if you use both. +# base: "ou=DeletedPeople,DC=example,DC=com" +# objectClass: "account" + scimTarget: + tls: false + host: "spar" + port: 8080 + path: "/scim/v2" + token: "Bearer " + mapping: + displayName: "displayName" + userName: "mailNickname" + externalId: "mail" + email: "mail" +``` + + +## Get the Active Directory root authority's public certificate + +Ask the remote team to provide this. + +## Create a configmap for the Public Certificate + +See if there's a configmap already in place. +``` +d kubectl get configmaps +``` + +If not, create a configmap for this certificate. +``` +d kubectl create configmap ca-ad-pemstore ad-public-root.crt +``` + +## Create a kubernetes patch + +Create a patch, which forces the `ldap-scim-bridge` to use the AD public certificate. + +``` +cat >> add_ad_ca.patch +``` + +Place the following contents in the file: + +``` +spec: + jobTemplate: + spec: + template: + spec: + containers: + - name: ldap-scim-bridge + VolumeMounts: + - name: ca-ad-pemstore + mountPath: /etc/ssl/certs/ad-public-root.crt + subPath: ad-public-root.crt + readOnly: true + volumes: + - name: ca-ad-pemstore + configMap: + name: ca-ad-pemstore +``` + +the cronjob may have run between the time you installed it, and the time you patched it. +in these cases, you will get a "Error_Protocol (\"certificate has unknown CA\",True,UnknownCA)" in the kubectl logs + +## Copy the values + +Since the `ldap-scim-bridge` needs to be configured at least once per team, we must copy the values. +``` +cp values/ldap-scim-bridge/ values/ldap-scim-bridge-team- +``` +Edit the values. + +Set the schedule to `"*/10 * * * *"` for every 10 minutes. + +### Set the ldap source. + +For active Directory: + +``` +ldapSource: + tls: true + host: "dc1.example.com" + port: 636 + dn: "CN=Wire RO,CN=users,DC=com,DC=example" + password: "SECRETPASSWORDHERE" +``` + +### Pick your users + +Select the user group you want to sync. for example, to find all of the people in the engineering department of the example.com AD domain: + +``` +search: + base: ~DC=com,DC=example" + objectClass: "person" + memberOf "CN=WireTeam1,OU=engineering,DC=com,DC=example" +``` + +### Pick the user mapping + +An example mapping for Active Directory is: +``` +DisplayName: "displayName~ +userNome: "mailNickname" +externalId: "mail" +email: "mail" +``` + +### Authorize the sync engine + +Add a `Bearer ` token for ScimTarget's target attribute. + + +### Deploy the sync engine +``` +d helm install ldap-scim-bridge-team-1 charts/ldap-scim-bridge/ --values values/ldap-scim-bridge_team-1/values.yaml +``` + +### Patch the sync engine. +``` +d kubectl patch cronjob ldap-scim-bridge-team-1 -p "$(cat add_ad_ca.patch)" +``` diff --git a/offline/local_persistent_storage_k8s.md b/offline/local_persistent_storage_k8s.md new file mode 100644 index 000000000..b48a7f435 --- /dev/null +++ b/offline/local_persistent_storage_k8s.md @@ -0,0 +1,23 @@ +## Dynamic Persistent Volume Provisioning +If you already have a dynamic persistent volume provisioning setup, you can skip this step. If not, we can use OpenEBS for dynamic persistent volume provisioning. + +Reference docs - https://openebs.io/docs/user-guides/local-storage-user-guide/local-pv-hostpath/hostpath-installation + +### Deploy OpenEBS + +``` +d helm install openebs charts/openebs --namespace openebs --create-namespace +``` +The above helm chart is available in the offline artifact. + +After successful deployment of OpenEBS, you will see these storage classes: +``` +d kubectl get sc +NAME PROVISIONER RECLAIMPOLICY VOLUMEBINDINGMODE ALLOWVOLUMEEXPANSION AGE +openebs-device openebs.io/local Delete WaitForFirstConsumer false 5d20h +openebs-hostpath openebs.io/local Delete WaitForFirstConsumer false 5d20h +``` + +### Backup and Restore + +For backup and restore of the OpenEBS Local Storage, refer to the official docs at - https://openebs.io/docs/user-guides/local-storage-user-guide/additional-information/backupandrestore diff --git a/offline/rabbitmq_setup.md b/offline/rabbitmq_setup.md new file mode 100644 index 000000000..eee3f4cd8 --- /dev/null +++ b/offline/rabbitmq_setup.md @@ -0,0 +1,149 @@ +## RabbitMQ + +There are two methods to deploy the RabbitMQ cluster: + +### Method 1: Install RabbitMQ inside kubernetes cluster with the help of helm chart + +To install the RabbitMQ service, first copy the value and secret files: +``` +cp ./values/rabbitmq/prod-values.example.yaml ./values/rabbitmq/values.yaml +cp ./values/rabbitmq/prod-secrets.example.yaml ./values/rabbitmq/secrets.yaml +``` +By default this will create a RabbitMQ deployment with ephemeral storage. To use the local persistence storage of Kubernetes nodes, please refer to the related documentation in [offline/local_persistent_storage_k8s.md](./local_persistent_storage_k8s.md). + +Now, update the `./values/rabbitmq/values.yaml` and `./values/rabbitmq/secrets.yaml` with correct values as needed. + +Deploy the `rabbitmq` helm chart: +``` +d helm upgrade --install rabbitmq ./charts/rabbitmq --values ./values/rabbitmq/values.yaml --values ./values/rabbitmq/secrets.yaml +``` + +### Method 2: Install RabbitMQ outside of the Kubernetes cluster with an Ansible playbook + +Add the nodes on which you want to run rabbitmq to the `[rmq-cluster]` group in the `ansible/inventory/offline/hosts.ini` file. Also, update the `ansible/roles/rabbitmq-cluster/defaults/main.yml` file with the correct configurations for your environment. + +If you need RabbitMQ to listen on a different interface than the default gateway, set `rabbitmq_network_interface` + +You should have following entries in the `/ansible/inventory/offline/hosts.ini` file. For example: +``` +[rmq-cluster:vars] +rabbitmq_network_interface = enp1s0 + +[rmq-cluster] +ansnode1 +ansnode2 +ansnode3 +``` + +#### Hostname Resolution +RabbitMQ nodes address each other using a node name, a combination of a prefix and domain name, either short or fully-qualified (FQDNs). For e.g. rabbitmq@ansnode1 + +Therefore every cluster member must be able to resolve hostnames of every other cluster member, its own hostname, as well as machines on which command line tools such as rabbitmqctl might be used. + +Nodes will perform hostname resolution early on node boot. In container-based environments it is important that hostname resolution is ready before the container is started. + +Hostname resolution can use any of the standard OS-provided methods: + +For e.g. DNS records +Local host files (e.g. /etc/hosts) +Reference - https://www.rabbitmq.com/clustering.html#cluster-formation-requirements + + +For adding entries to local host file(`/etc/hosts`), run +``` +d ansible-playbook -i ansible/inventory/offline/hosts.ini ansible/roles/rabbitmq-cluster/tasks/configure_dns.yml +``` + +Create the rabbitmq cluster: + +``` +d ansible-playbook -i ansible/inventory/offline/hosts.ini ansible/rabbitmq.yml +``` + +and run the following playbook to create values file for helm charts to look for RabbitMQ IP addresses - + +``` +d ansible-playbook -i ./ansible/inventory/offline/hosts.ini ansible/helm_external.yml --tags=rabbitmq-external +``` + +Make Kubernetes aware of where RabbitMQ external stateful service is running: +``` +d helm install rabbitmq-external ./charts/rabbitmq-external --values ./values/rabbitmq-external/values.yaml +``` + +Configure wire-server to use the external RabbitMQ service: + +Edit the `/values/wire-server/prod-values.example.yaml` file to update the RabbitMQ host +Under `brig` and `galley` section, you will find the `rabbitmq` config, update the host to `rabbitmq-external`, it should look like this: +``` +rabbitmq: + host: rabbitmq-external +``` + +## Backup and Restore + +The following steps describe the backup and restore process for RabbitMQ deployed outside of Kubernetes. + +This can vary based on your security, privacy, and administrative policies. It is also recommended to read and follow the official documentation here - https://www.rabbitmq.com/docs/backup + +## Backup +Make sure to have the nodes on which RabbitMQ is running in the [ansible inventory file](https://github.com/wireapp/wire-server-deploy/blob/master/offline/docs_ubuntu_22.04.md#editing-the-inventory), under the `rmq-cluster` group. +Then run the following command to load your wire utility environment: +``` +source bin/offline-env.sh +``` + +Replace `/path/to/backup` in the command below with the backup target path on the rabbitmq nodes. + +``` +d ansible-playbook -i ansible/inventory/offline/hosts.ini ansible/backup_rabbitmq.yml --extra-vars "backup_dir=/path/to/backup" +``` + +This ansible playbook will create `definitions.json` (Definitions) and `rabbitmq-backup.tgz` (Messages) files on all RabbitMQ nodes at `/path/to/backup`. + +Now, save these files on your host machine with scp command - +``` +mkdir rabbitmq_backups +cd rabbitmq_backups +``` +Fetch the backup files for each node one by one, +``` +scp -r :/path/to/backup/ / +``` + + +## Restore +You should have the definition and data backup files on your host machine for each node, in the specific `node_name` directory. +To restore the RabbitMQ backup, +Copy both files to the specific nodes at `/path/to/restore/from` for each node - +``` +scp -r / :/path/to/restore/from +``` + +### Restore Definitions +ssh into each node and run the following command from the path `/path/to/restore/from` - +``` +rabbitmqadmin import definitions.json +``` + +### Restore Data +To restore the data, we need to stop the rabbitmq service on each node first - +On each nodes, stop the service with - +``` +ssh +sudo systemctl stop rabbitmq-server +``` + +Once the service is stopped, restore the data - + +``` +sudo tar xvf rabbitmq-backup.tgz -C / +sudo chown -R rabbitmq:rabbitmq /var/lib/rabbitmq/mnesia # To ensure the correct permissions +``` + +At the end, restart the RabbitMQ server on each node - +``` +sudo systemctl start rabbitmq-server +``` + +At the end, please make sure that the RabbitMQ is running fine on all the nodes. diff --git a/offline/single_hetzner_machine_installation.md b/offline/single_hetzner_machine_installation.md new file mode 100644 index 000000000..55e912b7c --- /dev/null +++ b/offline/single_hetzner_machine_installation.md @@ -0,0 +1,120 @@ +# Scope + +This document gives exact instructions for performing an offline demo installation of Wire on a single dedicated Hetzner server. It uses the KVM based virtual machine system to create all of the required virtual machines. + +Bootstrapping a single dedicated Hetzner server for virtual machine deployment, the wire-server-deploy artifact download as well as the wire-server k8s installation have been fully automated. + +## Use the hetzner robot console to create a new server. + +Select Ubuntu 22.04.2 on an ax101 dedicated server. Make sure you provide a public key in the Hetzner console which can be used for ansible deployment. + +If not using Hetzner, for reference, the specs of the ax101 server are: + +- AMD Ryzen™ 9 5950X +- 128 GB DDR4 ECC RAM +- 2 x 3.84 TB NVMe SSD Datacenter Edition (software RAID 1) +- 1 GBit/s port + +The main public IPv4 address of the Hetzner server to connect to with SSH / ansible can be found in the "Server" tab in the Hetzner Robot console, next to the Server Name. +As soon as the initial Hetzner server deployment is finished, we'll use Ansible to further provision the system. + +## Automated full install + +If you wish to set up "Wire in a box" for demo or testing purposes, use the script [autodeploy.sh](../bin/autodeploy.sh). It supports several config flags, which can be reviewed by calling the script using a helper flag: + +```bash +autodeploy.sh -h +``` + +Running the script against a valid dedicated (Hetzner) server will install a fully functioning "Wire in a box" demo environment, based on the instructions provided in [docs_ubuntu_22.04.md](docs_ubuntu_22.04.md) and [coturn.md](coturn.md). + +This process takes approximately 90 minutes. If this script suits your needs and the installation is a success, there's no need to follow the individualized instructions below. + + +## Adjust ansible playbook vars as needed + +Take a look at the "vars:" section in wire-server-deploy/ansible/hetzner-single-deploy.yml and adjust vars as needed. Example: +``` + vars: + artifact_hash: 452c8d41b519a3b41f22d93110cfbcf269697953 + ubuntu_version: 22.04.3 + ssh_pubkey: "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIDPTGTo1lTqd3Ym/75MRyQvj8xZINO/GI6FzfIadSe5c backend+hetzner-dedicated-operator@wire.com" +``` + +The variable 'artifact_hash' above is the hash of your deployment artifact, given to you by Wire, or acquired by looking at the build job. + +## Run ansible playbook for server bootstrapping + +Navigate to the ansible folder in wire-server-deploy and execute the playbook using valid vars as described above. +``` +~ ❯ cd wire-server-deploy/ansible +~ ❯ ansible-playbook hetzner-single-deploy.yml -i root@$HETZNER_IP, --diff +``` +Please note and include the trailing comma when invoking the playbook. Playbook execution might take a few minutes, especially when downloading and unpacking a new artifact. + +The playbook will install baseline defaults (packages, firewall, SSH config, SSH key(s), user(s)), download & extract wire-server-deploy and download the specified ubuntu ISO. +The playbook is written to be idempotent; eg. files won't be redownloaded as long as they already exist on the target host. Deploying a new version of "wire-server-deploy" is as easy as removing the folder from the target host and updating the "artifact_hash" variable in the playbook. + +At this point it's recommended to reboot the server once. + +## Create VMs + +SSH into the target host as demo@$HETZNER_IP and execute wire-server-deploy/bin/offline-vm-setup.sh +``` +demo@Ubuntu-2204-jammy-amd64-base:~$ cd wire-server-deploy/ +demo@Ubuntu-2204-jammy-amd64-base:~/wire-server-deploy$ bin/offline-vm-setup.sh +``` +Without arguments, the script will deploy seven VMs behind the default libvirt network (virbr0, 192.168.122.0/24). + + * assethost - IP: 192.168.122.10 + * kubenode1 - IP: 192.168.122.21 + * kubenode2 - IP: 192.168.122.22 + * kubenode3 - IP: 192.168.122.23 + * ansnode1 - IP: 192.168.122.31 + * ansnode2 - IP: 192.168.122.32 + * ansnode3 - IP: 192.168.122.33 + +This will take up to 15 min (longer if the server still builds its MD RAID in the background). Once all VMs are deployed, they should be shut off. Status can be checked with: +``` +demo@Ubuntu-2204-jammy-amd64-base:~$ sudo virsh list --all +``` + +Hint: If your local machine is running Linux, use "virt-manager" to connect to the Hetzner server and make VM administration more comfortable. + +Start all VMs: + +``` +demo@Ubuntu-2204-jammy-amd64-base:~$ sudo bash -c " +set -e; +virsh start assethost; +virsh start kubenode1; +virsh start kubenode2; +virsh start kubenode3; +virsh start ansnode1; +virsh start ansnode2; +virsh start ansnode3; +" +``` + +## Access VMs + +VMs created with offline-vm-setup.sh are accessible via SSH with two public keys. + * Existing key from ~/.ssh/authorized_keys (externally via ProxyJump) + * Local keypair key from ~/.ssh/id_ed25519 (Keypair on dedicated server) + +To use your own key, use SSH with ProxyJump, as it's the more secure alternative compared to Key Forwarding ("ssh -A"): +``` +~ ❯ ssh demo@192.168.122.XXX -J demo@$HETZNER_IP +``` + +Or just use the local keypair, created by offline-vm-setup.sh inside the dedicated server: +``` +demo@Ubuntu-2204-jammy-amd64-base:~$ ssh assethost +``` + +Hint: resolving VM hostnames from inside the dedicated server should work, since the script is appending entries to /etc/hosts during VM creation. +But this does not work for resolving hostnames between VMs at this point. We'll be using IP addresses only going forward. + +### From this point: + +Switch to [the Ubuntu 22.04 Wire install docs](docs_ubuntu_22.04.md) diff --git a/offline/stackIT-wiab.md b/offline/stackIT-wiab.md new file mode 100644 index 000000000..9a0105b0e --- /dev/null +++ b/offline/stackIT-wiab.md @@ -0,0 +1,117 @@ +# StackIT Deployment and Configuration Guide + +This guide outlines the steps to set up and deploy Wire in a StackIT environment, including DNS configuration, Minikube cluster creation, Docker container setup, and Helm chart deployment. Each task and its associated commands are provided for clarity and customization. + +--- + +## Prerequisites + +- ansible +- ssh, ssh key for the ansible user on StackIT VM +- stackIT VM with the following requirements: + - CPU cores >= 32 + - Memory > 64 GiB + - Disk > 500 GiB (storage_premium_perf12 - recomended) + - OS - Ubuntu 24.04 + - Security group with the following rules: + +| Protocol | Direction | Start Port | End Port | Ether Type | IP Range | Reason | +|----------|-----------|------------|----------|------------|------------|---------------------------------------------| +| Any | egress | Any | Any | IPv4 | Any | Allow all outgoing IPv4 traffic | +| Any | egress | Any | Any | IPv6 | Any | Allow all outgoing IPv6 traffic | +| tcp | ingress | 22 | 22 | IPv4 | 0.0.0.0/0 | Allow SSH access | +| tcp | ingress | 443 | 443 | IPv4 | 0.0.0.0/0 | Allow HTTPS traffic | +| tcp | ingress | 80 | 80 | IPv4 | 0.0.0.0/0 | Allow HTTP traffic | +| tcp | ingress | 3478 | 3478 | IPv4 | 0.0.0.0/0 | Allow alternative STUN/TURN traffic over TCP| +| udp | ingress | 3478 | 3478 | IPv4 | Any | Allow STUN/TURN traffic for Coturn | +| udp | ingress | 32768 | 61000 | IPv4 | 0.0.0.0/0 | Allow calling traffic for Coturn over UDP | + +## Steps to Deploy WIAB from local environment (or on stackIT node) + +### 1. Clone the repository + - `git clone https://github.com/wireapp/wire-server-deploy.git` + - `cd wire-server-deploy` + +### 2. Prepare the variables for Wire deployment +- Prepare DNS records, StackIT public IP and set up Cert Manager (for example, letsencrypt) to start before next step as mentioned [here](https://docs.wire.com/how-to/install/helm.html#how-to-set-up-dns-records). + - Check file `stackIT/host.ini` for host details, replace example.com with the host machine. + - Check file `stackIT/stackit-vm-setup.yml` to define target_domain, replace example.com with the desired base domain of your Wire deployment - Ansible tasks will take care of other replacement operations. + - Check file `stackIT/setting-values.sh` for DNS records i.e. TARGET_SYSTEM and CERT_MASTER_EMAIL, replace example.com with the wire host domain, the bash script will take care of other replacement operations in helm chart values. + - We have used letsencrypt for example for cert management. + - If you intend to use something other than letsencrypt, then please follow the documentation [Acquiring / Deploying SSL Certificates](https://github.com/wireapp/wire-server-deploy/blob/master/offline/docs_ubuntu_22.04.md#acquiring--deploying-ssl-certificates) **post running all the steps in** [3. Commands to Run on the StackIT Node in directory wire-server-deploy](https://github.com/wireapp/wire-server-deploy/blob/master/offline/stackIT-wiab.md#3-commands-to-run-on-the-stackit-node-in-directory-wire-server-deploy), to deploy your own certificates. + +### 3. Run the ansible playbook +- **Note**: The deployment of the Wire application uses two layers of Ansible playbooks. The first layer (used in this step) provisions the containers on the stackIT node, downloads the artifact, and configures the iptables rules. The second layer ([bin/offline-cluster.sh](https://github.com/wireapp/wire-server-deploy/blob/master/bin/offline-cluster.sh), used in step 3.2) is designed to configure the datastore services on the containers created by the first layer. + +- Use the following command to set up the VM: + ```bash + ansible-playbook -i stackIT/host.ini stackIT/stackit-vm-setup.yml + ``` + +- **Optional Skips:** + The ansible playbook is seggregated into multiple blocks. The following variables can be used to control the flow of tasks in ansible-playbook, if required: + ```bash + -e skip_install=true + -e skip_ssh=true + -e skip_minikube=true + -e skip_docker=true + -e skip_inventory=true + -e skip_download=true + -e skip_iptables=true + -e skip_disable_kubespray=true + ``` + +- **The above command will accomplish the following tasks:** + - Deploy a Minikube Kubernetes cluster using docker containers as base, and 4 Docker containers to support assethost and datastore requirements. The functionality of different nodes is explained [here](https://docs.wire.com/how-to/install/planning.html#production-installation-persistent-data-high-availability). + - Generate `hosts.ini` based on the IPs of above containers for further ansible operations on containers. Read more [here](https://github.com/wireapp/wire-server-deploy/blob/master/offline/docs_ubuntu_22.04.md#example-hostsini). + - Download wire-server-deploy artifacts in the user's home directory. Read more [here](https://github.com/wireapp/wire-server-deploy/blob/master/offline/docs_ubuntu_22.04.md#artifacts-provided-in-the-deployment-tarball) + - Configure iptables rules to handle the traffic for k8s Nginx Controller and handle DNAT for Coturn (used by Wire applications). Read more [here](https://github.com/wireapp/wire-server-deploy/blob/master/offline/docs_ubuntu_22.04.md#directing-traffic-to-wire). + +--- + +### 3. Commands to Run on the StackIT Node in directory `wire-server-deploy` +#### Note: These commands can be collected to run inside a single script, here we have broken down the deployment into small collective steps. These commands can work only from the stackIT node and in the directory wire-server-deploy. + +- **Load the environment:** + ```bash + source stackIT/offline-env.sh + ``` + It will load WSD_CONTAINER container on your StackIT host and it has all the tools required to further deploy the services using ansible and helm charts on nodes. `d` is an alias to run the container with all the required tools. + + 1. **Generate secrets:** + ```bash + d bash -x bin/offline-secrets.sh + ``` + + 2. **Set up and configure the environment:** + Run the following to set up the AssetHost, loading containers for k8s cluster, sync time, cassandra, elasticsearch and minio: + ```bash + d bash -x bin/offline-cluster.sh + ``` + + 3. **Deploy Helm charts:** + Use the following script to set up Helm chart values and deploy them: + ```bash + d bash -x stackIT/setting-values.sh + ``` + +--- + +### File Structure Highlights + +- **Ansible Playbook Files:** + - `stackIT/stackit-vm-setup.yml` + +- **Environment Scripts:** + - `stackIT/offline-env.sh` + - `../bin/offline-secrets.sh` + +- **Cluster and Helm Setup:** + - `../bin/offline-cluster.sh` + - `stackIT/setting-values.sh` + +--- + +## Notes +- Read all the files involved before executing them to understand defaults. + diff --git a/offline/ubuntu_18_to_ubuntu_22_migration.md b/offline/ubuntu_18_to_ubuntu_22_migration.md new file mode 100644 index 000000000..a08b2f4c4 --- /dev/null +++ b/offline/ubuntu_18_to_ubuntu_22_migration.md @@ -0,0 +1,210 @@ +# Migrating your Ubuntu 18 based deployment to Ubuntu 22. + +NOTE: The following migration process was done as a clean install of wire-server on new Ubuntu 22.04 VMs, then restoring a backed up snapshot of Cassandra and MiniO. + +**IMPORTANT**: You should notify your users of the planned migration and stop all wire-server services before making any backup snapshots and keep it down until upgrade is complete! For extra security, have your users backup their conversation history! +On your current Ubuntu 18 based deployment you might be using the following version of required tools: +
Kubernetes - 1.19.7
+Ansible - 2.9.6 +

+While on the new Ubuntu 22 based deployment you will be using the following version of required tools: +
Kubernetes - 1.23.7
+Ansible - 2.11.6 +

+ +### We will be deploying the new environment in parallel to the old one and then migrate the data from the old one to the new one. At the end we will remove the old environment. + +
+## On your current Ubuntu 18 based deployment - + +### Uninstall wire-server deployment + +`d helm uninstall wire-server` + +### Backup your wire-server-deploy directory. + +This is where all your configurations/secrets are which will be needed for a successful upgrade. + +``` +tar -cvf wire-server-deploy-old.tar +``` + +### Backup your cassandra data directory from each node. + +Stop Cassandra on all node using the command: + +``` +sudo service cassandra stop +``` + +Verify that it's stopped with: + +``` +sudo service cassandra status +``` + +For each node, create a backup of the /mnt/cassandra folder using the tar command. +For example, run: + +``` +tar -cvf ~/mnt-cassandra-1.tar /mnt/cassandra +``` + +Verify the tar files by listing them with: + +``` +ls -lh ~/mnt*.tar. +``` + +Repeat the above steps for each node, replacing the number in the file name with the respective node number. + +Copy the tar files from the origin machine to your local machine using the scp command: + +``` +scp demo@origin-machine:~/mnt*.tar /tmp/. +``` + +Transfer the tar files from your local machine to the destination machine using: + +``` +scp '/tmp/mnt*.tar' demo@destination-machine:~/. +``` + +You can also directly move the tar files from the origin machine to the destination machine.

+ +### Backup your Minio data directory from each node. + +On each node, create a backup of the /var/lib/minio-server1 and /var/lib/minio-server2 folders using the tar command. For example, run: + +``` +sudo tar -cvf minio-server1-node1.tar /var/lib/minio-server1 +sudo tar -cvf minio-server2-node1.tar /var/lib/minio-server2 +``` + +Repeat the steps for the other nodes, replacing the number in the file name with the respective node number. + +At the end, you should have 6 tar files, 2 for each node. +Get them all in a single folder on the host machine, and compress them into a single tar file: + +``` +tar -cvf minio-backup.tar minio-server1-node1.tar minio-server2-node1.tar minio-server1-node2.tar minio-server2-node2.tar minio-server1-node3.tar minio-server2-node3.tar +``` + +Copy the minio-backup tar file to the destination machine with ubuntu 22 using scp: + +``` +scp minio-backup.tar demo@destination-machine:~/. +``` + +## On your new Ubuntu 22 based host machine - + +As of now, you should have the following files on your new Ubuntu 22 based host machine: + +- Docker installed on your host machine.(Follow these instructions to install docker - https://github.com/wireapp/wire-server-deploy/blob/update_to_ubuntu_22/offline/docs_ubuntu_22.04.md#installing-docker) +- Tar file of backed up wire-server-deploy-old directory. +- Tar file of backed up cassandra data directory. +- Tar file of backed up minio data directory. + +Get the new offline artifact from Wire which has all the required binaries and dependencies for new version of Ubuntu, Kubernetes and Ansible. + +``` +wget +``` + +Untar the above artifact in a new directory. + +``` +mkdir wire-server-deploy +cd wire-server-deploy +tar -xvzf ../ +cd .. # Go back to the parent directory +``` + +Now untar the wire-server-deploy-old.tar file in another directory. + +``` +mkdir wire-server-deploy-old +cd wire-server-deploy-old +tar -xvf wire-server-deploy-old.tar +cd .. # Go back to the parent directory +``` + +Copy the `values/wire-server/secrets.yaml` and `ansible/inventory/offline/group_vars/all/secrets.yaml` from the old wire-server-deploy to the new one. + +``` +cp wire-server-deploy-old/values/wire-server/secrets.yaml wire-server-deploy/values/wire-server/secrets.yaml +cp wire-server-deploy-old/ansible/inventory/offline/group_vars/all/secrets.yaml wire-server-deploy/ansible/inventory/offline/group_vars/all/secrets.yaml +``` + +**IMPORTANT**: Skip the configuration parts and generating secrets part as we have those from your previous deployment. Compare the “old” files with the new ones in case of changes between your old deployment and this one. + +Now, we will create a kubernets cluster on the new machine. +First cd into the wire-server-deploy directory. + +``` +cd wire-server-deploy +``` + +Now, follow the instruction from here up to step `Ensuring kubernetes is healthy` - https://github.com/wireapp/wire-server-deploy/blob/master/offline/docs_ubuntu_22.04.md#ensuring-kubernetes-is-healthy + +Now, you will be having a kubernetes v1.23.7 cluster up and running on your new machine. + +We will need to restore the minio data files on the new machine first before procedding further.
+You should have minio-backup.tar file on your new machine. Untar it in a new directory. + +``` +mkdir minio-backup +cd minio-backup +tar -xvf ../minio-backup.tar +cd .. # Go back to the parent directory +``` + +Now, we will restore the minio data files on the specific nodes. +Move the respective backup files to the respective nodes using scp. + +``` +scp minio-backup/minio-server1-node1.tar demo@node1: +scp minio-backup/minio-server2-node1.tar demo@node1: +``` + +Repeat the above steps for the other nodes, replacing the number in the file name with the respective node number. + +Now ssh into each node and restore the minio data files. + +``` +ssh demo@node1 +cd / +tar -xvf /home/demo/minio-server1-node1.tar +tar -xvf /home/demo/minio-server2-node1.tar +``` + +Repeat the above steps for the other nodes, replacing the number in the file name with the respective node number. + +Now run the minio playbook. + +**IMPORTANT**: Do not proceed with wire-server installation until you have restored backed up minio-server files! + +Now, continue with the next steps of the wire installation from here and install the rest of ansible playbooks (including cassandra!). + +After running cassandra playbook start the restore process: + +- Copy each tar file to the respective node using scp. For example, for node1: + `scp mnt-cassandra-1.tar node1:~/. ` +- SSH into each node1 +- Create a working folder on each node: `mkdir ~/mnt-cassandra-1/.` +- Navigate to the working folder: `cd ~/mnt-cassandra-1/.` +- Extract the tar file: `tar -cvf ../mnt-cassandra-1.tar` +- Copy the extracted files to the destination: `sudo cp -rvf ~/mnt-cassandra-1/mnt/cassandra /mnt/.` +- Set the correct ownership for the files: `sudo chown -R cassandra:cassandra /mnt/cassandra/.` +- Start Cassandra on each node: `sudo service cassandra start` +- Verify with: `sudo service cassandra status` +- Check the status of the nodes using: sudo nodetool status. +- Connect to the Cassandra instance using cqlsh: sudo docker run -it --rm cassandra:3.11 cqlsh internal.ip.addr 9042. +- Switch to the desired keyspace using: use brig;. +- Retrieve the user data with: select \* from user;. +- The output should display the user data from the origin machine. + +Continue with the rest of wire-server deployment as usual untill done. + +Now, you can try to login/sign-up on the new wire-server deployment on your new Ubuntu 22 based host machine.
+You should be able to see the old chat history and download the old attachments. diff --git a/offline/upgrading-SFT_ONLY.md b/offline/upgrading-SFT_ONLY.md new file mode 100644 index 000000000..63c156039 --- /dev/null +++ b/offline/upgrading-SFT_ONLY.md @@ -0,0 +1,39 @@ +# How to upgrade wire (SFT only) + +You should have received a deployment artifact from the Wire team in order to upgrade your SFT calling service. + +Your deployment artifact contains three things: the new chart, the new values file, and an image for sftd. + +## Uploading the Image into Kubernetes hosts + +The image needs to be imported with `docker load` on each of the kubernetes hosts. + +Copy the sft image to the kubernetes hosts, and `docker load` it on each of the kubernetes hosts. + +To load into docker as root, you can `cat quay.io_wire_sftd_2.1.19.tar | docker load`. If you are using a non-priviledged user, and sudo (wire's recommendation), you can use: + +``` +sudo bash -c "cat quay.io_wire_sftd_2.1.19.tar | docker load" +``` + +## Replacing your SFT Chart + +Move your sft/ chart out of the charts folder in the workspace where you're working with wire, and replace it wit the one in the deliverable. Keep this, in case you need to step back. + +## Examining your Values + +Examine the values file we've provided, comparing it to the one you used when last deploying SFT. Make sure there are no changes to make with the new chart. If there are changes to make, make a backup copy before you make changes! + +## Deploying: + +Use helm install --upgrade in the same fashion as the installation process guided you through. + +## Verifying your deployment was successful: + +In the web client, place a call, and then go to 'gear icon' -> Audio / Video -> and then to 'Save the calling debug report'. +When you read that file, search for a line that starts with "a=tool:sftd". that has your sft server version on it. + +# How to step back, if this has made things worse: + +Just move your old SFT chart and values file back into place, and use helm uninstall, and then helm install. + diff --git a/offline/upgrading.md b/offline/upgrading.md new file mode 100644 index 000000000..be403e734 --- /dev/null +++ b/offline/upgrading.md @@ -0,0 +1,423 @@ +# How to upgrade wire (services only) + +We have a pipeline in `wire-server-deploy` producing container images, static binaries, ansible playbooks, debian package sources and everything required to install Wire. + +Create a fresh workspace to download the new artifacts: + +``` +$ mkdir ... # you pick a good location! +$ cd ... +``` + +Obtain the latest airgrap artifact for wire-server-deploy. Please contact us to get it for now. We are +working on publishing a list of airgap artifacts. + +## Clean up enough disk space to operate: + +### AdminHost +Prune old containers that are generated during our 'd' invocations: +``` +df -h +sudo docker container prune +``` + +Prune old security update deployment archives: +``` +sudo apt clean +``` + +### Kubernetes hosts: + +#### Wire Cluster +Remove wire-server images from two releases ago, or from the current release that we know are unused. For instance, + +``` +sudo docker image ls +# look at the output of the last command, to find +VERSION="2.106.0" +sudo docker image ls | grep -E "^quay.io/wire/([bcg]|spar|nginz)" | grep $VERSION | sed "s/.*[ ]*\([0-9a-f]\{12\}\).*/sudo docker image rm \1/" + +``` + +If you are not running SFT in your main cluster (for example, do not use SFT, or have SFT in a separate DMZ'd cluster).. then remove SFT images from the Wire Kubernetes cluster. +``` +sudo docker image ls | grep -E "^quay.io/wire/sftd" | sed "s/.*[ ]*\([0-9a-f]\{12\}\).*/sudo docker image rm \1/" +``` + +#### SFT Cluster +If you are running a DMZ deployment, prune the old wire-server images and their dependencies on the SFT kubernetes hosts... +``` +sudo docker image ls | grep -E "^quay.io/wire/(team-settings|account|webapp|ixdotai-smtp)" | sed "s/.*[ ]*\([0-9a-f]\{12\}\).*/sudo docker image rm \1/" +sudo docker image ls | grep -E "^(bitnami/redis|airdock/fake-sqs|localstack/localstack)" | sed "s/.*[ ]*\([0-9a-f]\{12\}\).*/sudo docker image rm \1/" +``` + +## Preparing for deployment +Verify you have the container images and configuration for the version of wire you are currently running. + +Extract the latest airgap artifact into a NEW workspace: + +``` +$ wget https://s3-eu-west-1.amazonaws.com/public.wire.com/artifacts/wire-server-deploy-static-.tgz +$ mkdir New-Wire-Server +$ cd New-Wire-Server +$ tar xvzf wire-server-deploy-static-.tgz +``` +Where the HASH above is the hash of your deployment artifact, given to you by Wire, or acquired by looking at the above build job. +Extract this tarball. + +There's also a docker image containing the tooling inside this repo. + +Source the following shell script. +``` +source ./bin/offline-env.sh +``` + +The shell script will set up a `d` alias. Which runs commands passed to it inside the docker container +with all the tools needed for doing an offline deploy. + +E.g.: + +``` +$ d ansible --version +ansible 2.9.12 + config file = /home/arian/.ansible.cfg + configured module search path = ['/home/arian/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules'] + ansible python module location = /nix/store/gfrhkj3j53znj0vyvkqkbn56n2mh708k-python3.8-ansible-2.9.12/lib/python3.8/site-packages/ansible + executable location = /nix/store/gfrhkj3j53znj0vyvkqkbn56n2mh708k-python3.8-ansible-2.9.12/bin/ansible + python version = 3.8.7 (default, Dec 21 2020, 17:18:55) [GCC 10.2.0] + +``` + +## Artifacts provided in the deployment tarball. + +The following is a list of important artifacts which are provided: + + - `containers-adminhost/wire-server-deploy-*.tar` + A container image containing ansible, helm, and other tools and their + dependencies in versions verified to be compatible with the current wire + stack. Published to `quay.io/wire/wire-server-deploy` as well, but shipped + in the artifacts tarball for convenience. + - `ansible` + These contain all the ansible playbooks the rest of the guide refers to, as + well as an example inventory, which should be configured according to the + environment this is installed into. + - `binaries.tar` + This contains static binaries, both used during the kubespray-based + kubernetes bootstrapping, as well as to provide some binaries that are + installed during other ansible playbook runs. + - `charts` + The charts themselves, as tarballs. We don't use an external helm + repository, every helm chart dependency is resolved already. + - `containers-system.tar` + These are the container images needed to bootstrap kubernetes itself + (currently using kubespray) + - `containers-helm.tar` + These are the container images our charts (and charts we depend on) refer to. + Also come as tarballs, and are seeded like the system containers. + - `debs-*.tar` + This acts as a self-contained dump of all packages required to install + kubespray, as well as all other packages that are installed by ansible + playbooks on nodes that don't run kubernetes. + There's an ansible playbook copying these assets to an "assethost", starting + a little webserver there serving it, and configuring all nodes to use it as + a package repo. + - `values` + Contains helm chart values and secrets. Needs to be tweaked to the + environment. + +## Comparing the inventory + +Diff outputs differences between the two files. lines that start with `@@` specify a position. lines with `-` are from the old file, lines with `+` are from the new inventory, and lines starting with ` ` are the same in both files. We are going to use diff to compare files from your old install with your new install. + +Copy `ansible/inventory/offline/99-static` to `ansible/inventory/offline/hosts.ini`. + +Compare the inventory from your old install to the inventory of your new install. +``` +diff -u ..//ansible/inventory/offline/99-static ansible/inventory/offline/hosts.ini +``` + +Your old install may use a `hosts.ini` instead of `99-static`. +check to see if a hosts.ini is present: +``` +ls ..//ansible/inventory/offline/hosts.ini +``` + +If you get "cannot access ..... No such file or directory", compare the 99-static from the old install. +``` +diff -u ..//ansible/inventory/offline/99-static ansible/inventory/offline/hosts.ini +``` + +otherwise, compare hosts.ini from both installation directories. +``` +diff -u ..//ansible/inventory/offline/hosts.ini ansible/inventory/offline/hosts.ini +``` + +Using a text editor, make sure your new hosts.ini has all of the work you did on the first installation. + +There are instructions in the comments on how to set everything up. You can also refer to extra information at https://docs.wire.com/how-to/install/ansible-VMs.html . + +### TURN +If you are using restund calling services, make sure your inventory sets: + +``` +# Explicitely specify the restund user id to be "root" to override the default of "997" +restund_uid = root +``` + +### Deeplink +If you are using the old deeplink process (deprecated!), set: +``` +[minio:vars] +minio_deeplink_prefix = domainname.com +minio_deeplink_domain = prefix- +``` + +### SFT +If you have SFT on the same cluster as your wire cluster, read the `Marking kubenode for calling server (SFT)` section below. + +# Migrate the kubeconfig + +Old versions of the package contained the kubeconfig at ansible/kubeconfig. newer ones create a directory at ansible/inventory/offline/artifacts, and place the kubeconfig there, as 'admin.conf' + +If your deployment package uses the old style, then in the place where you are keeping your new package: +``` +mkdir ansible/inventory/offline/artifacts +cp ..//ansible/kubeconfig ansible/inventory/offline/artifacts/admin.conf +``` + +Otherwise: +``` +mkdir ansible/inventory/offline/artifacts +sudo cp ..//ansible/inventory/offline/artifacts/admin.conf ansible/inventory/offline/artifacts/admin.conf +``` + +## Preparing to upgrade kubernetes services + +Log into the assethost, and verify the 'serve-assets' systemd component is running by looking at `sudo lsof -i -P -n | grep LISTEN`, and checking for `8080`. If it's not: +``` +sudo service serve-assets start +``` + +### WORKAROUND: old debian key +All of our debian archives up to version 4.12.0 used a now-outdated debian repository signature. Some modifications are required to be able to install everything properly. + +First, gather a copy of the 'setup-offline-sources.yml' file from: https://raw.githubusercontent.com/wireapp/wire-server-deploy/kvm_support/ansible/setup-offline-sources.yml . +``` +wget https://raw.githubusercontent.com/wireapp/wire-server-deploy/kvm_support/ansible/setup-offline-sources.yml +``` +copy it into the ansible/ directory: +``` +cp ansible/setup-offline-sources.yml ansible/setup-offline-sources.yml.backup +cp setup-offline-sources.yml ansible/ +``` + +Open it with your prefered text editor and edit the following: +* find a big block of comments and uncomment everything in it `- name: trust everything...` +* after the block you will find `- name: Register offline repo key...`. Comment out that segment (do not comment out the part with `- name: Register offline repo`!) + +If you are doing anything with kubernetes itsself (unlikely!), disable checking for outdated signatures by editing the following file: +``` +ansible/roles/external/kubespray/roles/container-engine/docker/tasks/main.yml +``` +* comment out the block with -name: ensure docker-ce repository public key is installed... +* comment out the next block -name: ensure docker-ce repository is enabled + +Now you are ready to start deploying services. + +#### WORKAROUND: dependency +some ubuntu systems do not have GPG by default. wire assumes this is already present. ensure you have gpg installed on all of your nodes before continuing to the next step. + +#### Populate the assethost, and prepare to install images from it. +Since docker is already installed on all nodes that need it, push the new container images to the assethost, and seed all container images: + +``` +d ansible-playbook -i ./ansible/inventory/offline/hosts.ini ansible/setup-offline-sources.yml --tags "debs,containers-helm" +d ansible-playbook -i ./ansible/inventory/offline/hosts.ini ansible/seed-offline-docker.yml +``` + +#### Ensuring kubernetes is healthy. + +Ensure the cluster is healthy. use kubectl to check the node health: + +``` +d kubectl get nodes -owide +``` +They should all report ready. + +## Upgrading wire-server using helm + +### Upgrading non-wire components: + +#### External Service Definitions: + +Compare your external service definition files, and decide whether you need to change them or not. +``` +diff -u ..//values/cassandra-external/values.yaml values/cassandra-external/prod-values.example.yaml +diff -u ..//values/elasticsearch-external/values.yaml values/elasticsearch-external/prod-values.example.yaml +diff -u ..//values/minio-external/values.yaml values/minio-external/prod-values.example.yaml +``` + +If there are only IP addresses in the diff output, copy these files into your new tree. +``` +cp ..//values/cassandra-external/values.yaml values/cassandra-external/values.yaml +cp ..//values/elasticsearch-external/values.yaml values/elasticsearch-external/values.yaml +cp ..//values/minio-external/values.yaml values/minio-external/values.yaml +``` + +If not, examine differences between the values files for the old service definitions and the new service definitions + +When you are satisfied with the results of the above, upgrade the external service definitions. +``` +d helm upgrade cassandra-external ./charts/cassandra-external/ --values ./values/cassandra-external/values.yaml +d helm upgrade elasticsearch-external ./charts/elasticsearch-external/ --values ./values/elasticsearch-external/values.yaml +d helm upgrade minio-external ./charts/minio-external/ --values ./values/minio-external/values.yaml +``` + +#### Non-Wire Services + +Compare your non-wire service definition files, and decide whether you need to change them or not. +``` +diff -u ..//values/fake-aws/prod-values.example.yaml values/cassandra-external/prod-values.example.yaml +diff -u ..//values/databases-ephemeral/values.yaml values/databases-ephemeral/prod-values.example.yaml +``` + +If there are no differences, copy these files into your new tree. +``` +cp ..//values/fake-aws/prod-values.example.yaml values/cassandra-external/values.yaml +cp ..//values/databases-ephemeral/values.yaml values/databases-ephemeral/values.yaml +``` + +Next, upgrade the internal non-wire services. +``` +d helm upgrade fake-aws ./charts/fake-aws/ --values ./values/fake-aws/values.yaml +d helm upgrade databases-ephemeral ./charts/databases-ephemeral/ --values ./values/databases-ephemeral/values.yaml +d helm upgrade reaper ./charts/reaper/ +``` + +#### Upgrading the demo SMTP service + +Compare your demo-smtp configuration files, and decide whether you need to change them or not. +``` +diff -u ..//values/demo-smtp/values.yaml values/demo-smtp/values.yaml +``` + +If there are no differences, copy these files into your new tree. +``` +cp ..//values/demo-smtp/values.yaml values/demo-smtp/values.yaml +``` + +``` +d helm upgrade demo-smtp ./charts/demo-smtp/ --values ./values/demo-smtp/values.yaml +``` + +#### Upgrading the NginX ingress + +Compare your demo-smtp configuration files, and decide whether you need to change them or not. +``` +diff -u ..//values/nginx-ingress-services/values.yaml values/nginx-ingress-services/prod-values.example.yaml +``` + +If there are no differences, copy these files into your new tree. +``` +cp ..//values/nginx-ingress-services/values.yaml values/nginx-ingress-services/values.yaml +``` + +#### Upgrading ingress-nginx-controller + +Re-deploy your ingress, to direct traffic into your cluster with the new version of nginx. +``` +d helm upgrade ingress-nginx-controller ./charts/ingress-nginx-controller/ +``` + +### Upgrading Wire itsself + +Inspect your `values.yaml` and `secrets.yaml` files with diff comparing them to the new defaults. + +Now upgrade `wire-server`: + +``` +d helm upgrade wire-server ./charts/wire-server/ --timeout=15m0s --values ./values/wire-server/values.yaml --values ./values/wire-server/secrets.yaml +``` + +#### Bring your own certificates + +If you generated your own SSL certificates, there are two ways to give these to wire: + +##### From the command line +if you have the certificate and it's corresponding key available on the filesystem, copy them into the root of the Wire-Server directory, and: + +``` +d helm install nginx-ingress-services ./charts/nginx-ingress-services --values ./values/nginx-ingress-services/values.yaml --set-file secrets.tlsWildcardCert=certificate.pem --set-file secrets.tlsWildcardKey=key.pem +``` + +Do not try to use paths to refer to the certificates, as the 'd' command messes with file paths outside of Wire-Server. + +##### In your nginx config +This is the more error prone process, due to having to edit yaml files. + +Change the domains in `values.yaml` to your domain. And add your wildcard or SAN certificate that is valid for all these +domains to the `secrets.yaml` file. + +Now install the service with helm: +``` +d helm install nginx-ingress-services ./charts/nginx-ingress-services --values ./values/nginx-ingress-services/values.yaml --values ./values/nginx-ingress-services/secrets.yaml +``` + +#### Use letsencrypt generated certificates + +UNDER CONSTRUCTION: +If your machine has internet access to letsencrypt's servers, you can configure cert-manager to generate certificates, and load them for you. +``` +d kubectl create namespace cert-manager-ns +d helm upgrade --install -n cert-manager-ns --set 'installCRDs=true' cert-manager charts/cert-manager +d helm upgrade --install nginx-ingress-services charts/nginx-ingress-services -f values/nginx-ingress-services/values.yaml +``` + +### Marking kubenode for calling server (SFT) + +The SFT Calling server should be running on a set of kubernetes nodes that have traffic directed to them from the public internet. +If not all kubernetes nodes match these criteria, you should specifically label the nodes that do match +these criteria, so that we're sure SFT is deployed correctly. + + +By using a `node_label` we can make sure SFT is only deployed on a certain node like `kubenode4` + +``` +kubenode4 node_labels="wire.com/role=sftd" node_annotations="{'wire.com/external-ip': 'XXXX'}" +``` + +If the node does not know its own public IP (e.g. because it's behind NAT) then you should also set +the `wire.com/external-ip` annotation to the public IP of the node. + +### Upgradinging sftd + +For full docs with details and explanations please see https://github.com/wireapp/wire-server-deploy/blob/d7a089c1563089d9842aa0e6be4a99f6340985f2/charts/sftd/README.md + +First, make sure you still have the certificates for `sftd.`. This could be the same wildcard or SAN certificate you used at previous steps. + +If you are restricting SFT to certain nodes, make sure that in your inventory +you have annotated all the nodes that are able to run sftd workloads correctly. +``` +kubenode3 node_labels="{'wire.com/role': 'sftd'}" node_annotations="{'wire.com/external-ip': 'XXXX'}" +``` + +You may also want to look at the output of `d kubectl describe node` for each node, and to see if the node label, attribute and annotations are in order. + +If you are restricting SFT to certain nodes, use `nodeSelector` to run on specific nodes (of course **replace the domains with yours**): +``` +d helm upgrade --install sftd ./charts/sftd \ + --set 'nodeSelector.wire\.com/role=sftd' \ + --set host=sftd.example.com \ + --set allowOrigin=https://webapp.example.com \ + --set-file tls.crt=/path/to/tls.crt \ + --set-file tls.key=/path/to/tls.key +``` + +If you are not doing that, omit the `nodeSelector` argument: +``` +d helm upgrade --install sftd ./charts/sftd \ + --set host=sftd.example.com \ + --set allowOrigin=https://webapp.example.com \ + --set-file tls.crt=/path/to/tls.crt \ + --set-file tls.key=/path/to/tls.key +``` diff --git a/shell.nix b/shell.nix deleted file mode 100644 index 7047a9d83..000000000 --- a/shell.nix +++ /dev/null @@ -1,8 +0,0 @@ -{ pkgs ? import {}}: -pkgs.mkShell { - buildInputs = [ - pkgs.kubectl - pkgs.kubernetes-helm - pkgs.python27Packages.poetry - ]; -} diff --git a/stackIT/host.ini b/stackIT/host.ini new file mode 100644 index 000000000..9a7d1e424 --- /dev/null +++ b/stackIT/host.ini @@ -0,0 +1 @@ +example.com ansible_ssh_common_args='-o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null' diff --git a/stackIT/offline-env.sh b/stackIT/offline-env.sh new file mode 100644 index 000000000..a8ee55fc6 --- /dev/null +++ b/stackIT/offline-env.sh @@ -0,0 +1,18 @@ +#!/usr/bin/env bash + +SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" + +ZAUTH_CONTAINER=$(sudo docker load -i "$SCRIPT_DIR"/../containers-adminhost/quay.io_wire_zauth_*.tar | awk '{print $3}') +export ZAUTH_CONTAINER + +WSD_CONTAINER=$(sudo docker load -i "$SCRIPT_DIR"/../containers-adminhost/container-wire-server-deploy.tgz | awk '{print $3}') + +alias d="sudo docker run --network=host \ + -v \${SSH_AUTH_SOCK:-nonexistent}:/ssh-agent \ + -e SSH_AUTH_SOCK=/ssh-agent \ + -v \$HOME/.ssh:/root/.ssh \ + -v \$PWD:/wire-server-deploy \ + -v /home/ubuntu/.kube:/root/.kube \ + -v /home/ubuntu/.minikube:/home/ubuntu/.minikube \ + -e KUBECONFIG=/root/.kube/config \ + \$WSD_CONTAINER" diff --git a/stackIT/setting-values.sh b/stackIT/setting-values.sh new file mode 100644 index 000000000..de99e86ee --- /dev/null +++ b/stackIT/setting-values.sh @@ -0,0 +1,193 @@ +#!/usr/bin/env bash +# shellcheck disable=SC2087 +set -Eeuo pipefail + +BASE_DIR="/wire-server-deploy" +TARGET_SYSTEM="example.com" +CERT_MASTER_EMAIL="certmaster@example.com" +# this IP should match the DNS A record for TARGET_SYSTEM +HOST_IP=$(wget -qO- https://api.ipify.org) +SFT_NODE="minikube" +COTURN_NODE="minikube-m03" +COTURN_NODE_IP=$(kubectl get node $COTURN_NODE -o jsonpath='{.status.addresses[?(@.type=="InternalIP")].address}') +CHART_URL="https://charts.jetstack.io/charts/cert-manager-v1.13.2.tgz" +NGINX_K8S_NODE="minikube-m02" + +# it create the values.yaml from prod-values.example.yaml/example.yaml to values.yaml +process_charts() { + + # values for cassandra-external, elasticsearch-external, minio-external are created from offline-cluster.sh - helm_external.yml + # List of Helm charts to process values are here: + charts=( + fake-aws demo-smtp + rabbitmq databases-ephemeral reaper wire-server webapp account-pages + team-settings smallstep-accomp cert-manager-ns + nginx-ingress-services sftd coturn + ) + + for chart in "${charts[@]}"; do + chart_dir="$BASE_DIR/values/$chart" + + if [[ -d "$chart_dir" ]]; then + if [[ -f "$chart_dir/prod-values.example.yaml" ]]; then + if [[ ! -f "$chart_dir/values.yaml" ]]; then + cp "$chart_dir/prod-values.example.yaml" "$chart_dir/values.yaml" + echo "Used template prod-values.example.yaml to create $chart_dir/values.yaml" + fi + fi + fi + + done + + # some manual secrets + if [[ ! -f "$BASE_DIR/values/rabbitmq/secrets.yaml" ]]; then + cp "$BASE_DIR/values/rabbitmq/prod-secrets.example.yaml" "$BASE_DIR/values/rabbitmq/secrets.yaml" + echo "Used template prod-secrets.example.yaml to create $BASE_DIR/values/rabbitmq/secrets.yaml" + fi + if [[ ! -f "$BASE_DIR/values/team-settings/secrets.yaml" ]]; then + cp "$BASE_DIR/values/team-settings/prod-secrets.example.yaml" "$BASE_DIR/values/team-settings/secrets.yaml" + echo "Used template prod-secrets.example.yaml to create $BASE_DIR/values/team-settings/secrets.yaml" + fi + if [[ ! -f "$BASE_DIR/values/ingress-nginx-controller/values.yaml" ]]; then + cp "$BASE_DIR/values/ingress-nginx-controller/hetzner-ci.example.yaml" "$BASE_DIR/values/ingress-nginx-controller/values.yaml" + echo "Used template hetzner-ci.example.yaml to create $BASE_DIR/values/ingress-nginx-controller/values.yaml" + fi +} + +process_values() { + TEMP_DIR=$(mktemp -d) + trap 'rm -rf $TEMP_DIR' EXIT + + # Fixing the hosts with TARGET_SYSTEM and setting the turn server + sed -e "s/example.com/$TARGET_SYSTEM/g" \ + -e "s/# - \"turn::80\"/- \"turn:$HOST_IP:3478\"/g" \ + -e "s/# - \"turn::80?transport=tcp\"/- \"turn:$HOST_IP:3478?transport=tcp\"/g" \ + "$BASE_DIR/values/wire-server/values.yaml" > "$TEMP_DIR/wire-server-values.yaml" + + # Fixing the hosts in webapp team-settings and account-pages charts + for chart in webapp team-settings account-pages; do + sed "s/example.com/$TARGET_SYSTEM/g" "$BASE_DIR/values/$chart/values.yaml" > "$TEMP_DIR/$chart-values.yaml" + done + + # Setting certManager and DNS records + sed -e 's/useCertManager: false/useCertManager: true/g' \ + -e "/certmasterEmail:$/s/certmasterEmail:/certmasterEmail: $CERT_MASTER_EMAIL/" \ + -e "s/example.com/$TARGET_SYSTEM/" \ + "$BASE_DIR/values/nginx-ingress-services/values.yaml" > "$TEMP_DIR/nginx-ingress-services-values.yaml" + + # adding nodeSelector for ingress controller as it should run as Deployment in the k8s cluster i.e. lack of external load balancer + sed -e 's/kind: DaemonSet/kind: Deployment/' \ + "$BASE_DIR/values/ingress-nginx-controller/values.yaml" > "$TEMP_DIR/ingress-nginx-controller-values.yaml" + if ! grep -q "kubernetes.io/hostname: $NGINX_K8S_NODE" "$TEMP_DIR/ingress-nginx-controller-values.yaml"; then + echo -e " nodeSelector:\n kubernetes.io/hostname: $NGINX_K8S_NODE" >> "$TEMP_DIR/ingress-nginx-controller-values.yaml" + fi + + # Fixing SFTD hosts and setting the cert-manager to http01 and setting the replicaCount to 1 + sed -e "s/webapp.example.com/webapp.$TARGET_SYSTEM/" \ + -e "s/sftd.example.com/sftd.$TARGET_SYSTEM/" \ + -e 's/name: letsencrypt-prod/name: letsencrypt-http01/' \ + -e "s/replicaCount: 3/replicaCount: 1/" \ + "$BASE_DIR/values/sftd/values.yaml" > "$TEMP_DIR/sftd-values.yaml" + + # Creating coturn values and secrets + ZREST_SECRET=$(grep -A1 turn "$BASE_DIR/values/wire-server/secrets.yaml" | grep secret | tr -d '"' | awk '{print $NF}') + cat >"$TEMP_DIR/coturn-secrets.yaml"<"$TEMP_DIR/coturn-values.yaml"< /etc/apt/sources.list.d/docker.list + args: + executable: /bin/bash + + - name: Update apt package index + apt: + update_cache: yes + + - name: Install Docker packages + apt: + name: + - docker-ce + - docker-ce-cli + - containerd.io + - docker-buildx-plugin + - docker-compose-plugin + state: present + + - name: Add ubuntu user to the docker group + user: + name: "{{ ansible_user }}" # Replace with the username you want to modify + groups: docker + append: yes + + - name: Enable and start Docker service + systemd: + name: docker + enabled: yes + state: started + + - name: Reset SSH connection to apply docker group membership changes + meta: reset_connection + + - name: Install Minikube + get_url: + url: "https://github.com/kubernetes/minikube/releases/latest/download/minikube-linux-amd64" + dest: /usr/local/bin/minikube + mode: '0755' + + - name: Install kubectl + get_url: + url: "https://dl.k8s.io/release/{{ kubernetes_version }}/bin/linux/amd64/kubectl" + dest: /usr/local/bin/kubectl + mode: '0755' + + when: skip_install | default(false) == false + + - name: Creating ssh key and storing it + # storing creds in the {{ ansible_user }} user's home directory + become: yes + become_user: "{{ ansible_user }}" + block: + - name: Ensure the .ssh directory exists + file: + path: "/home/{{ ansible_user }}/.ssh" + state: directory + mode: '0700' + owner: "{{ ansible_user }}" + group: "{{ ansible_user }}" + + - name: Generate SSH key if it does not exist + shell: | + if [ ! -f "/home/{{ ansible_user }}/.ssh/id_rsa" ]; then + ssh-keygen -t rsa -b 4096 -f "/home/{{ ansible_user }}/.ssh/id_rsa" -N "" -C "ansible-generated-key"; + fi + args: + creates: "/home/{{ ansible_user }}/.ssh/id_rsa" + + - name: Read the public key content + slurp: + src: "/home/{{ ansible_user }}/.ssh/id_rsa.pub" + register: ssh_key_content + + - name: Set the public key as a fact + set_fact: + ssh_public_key: "{{ ssh_key_content['content'] | b64decode }}" + + when: skip_ssh | default(false) == false + + - name: start k8s(minikube) cluster + become: yes + become_user: "{{ ansible_user }}" + block: + - name: Check if Minikube is running + shell: minikube status + register: minikube_status + failed_when: false + changed_when: false + + - name: Start Minikube with specified configurations + shell: | + minikube start \ + --nodes={{ minikube_nodes }} \ + --cpus={{ minikube_cpus }} \ + --memory={{ minikube_memory }} \ + --disk-size={{ minikube_disk_size }} \ + --kubernetes-version="{{ kubernetes_version }}" \ + --container-runtime="{{ container_runtime }}" \ + --driver=docker \ + --extra-config=kubeadm.pod-network-cidr={{ pod_network_cidr }} + when: "'Running' not in minikube_status.stdout" + + - name: Retrieve node names from the cluster + shell: kubectl get nodes -o json | jq -r '.items[].metadata.name' + register: kube_node_names + + - name: Configure Node labels + shell: | + kubectl label node {{ item.1 }} wire.io/node={{ item.0 }} + loop: "{{ k8s_node_names | zip(kube_node_names.stdout_lines) | list }}" + register: label_output + + - name: Get list of running Minikube nodes + shell: minikube node list | awk '{print $1}' + register: minikube_nodes_raw + + - name: Add SSH key to all Minikube nodes + shell: | + minikube ssh --native-ssh=false -n {{ item }} -- "mkdir -p ~/.ssh && echo '{{ ssh_public_key }}' >> ~/.ssh/authorized_keys && chmod 600 ~/.ssh/authorized_keys" + args: + executable: /bin/bash + with_items: "{{ minikube_nodes_raw.stdout_lines }}" + async: 30 + poll: 5 + + when: skip_minikube | default(false) == false + + - name: Start Container Nodes + become: yes + become_user: "{{ ansible_user }}" + block: + - name: Pull the base Ubuntu image + docker_image: + name: ubuntu:22.04 + source: pull + + - name: Write public key to a file + copy: + dest: /tmp/wire-deploy/id_rsa.pub + content: "{{ ssh_public_key }}" + + - name: Create Dockerfile + copy: + dest: /tmp/wire-deploy/Dockerfile + content: | + FROM ubuntu:22.04 + RUN apt update && apt install -y openssh-server systemd systemd-sysv cron && mkdir /var/run/sshd + RUN systemctl enable ssh + RUN systemctl enable cron + RUN echo "PermitRootLogin yes" >> /etc/ssh/sshd_config + RUN mkdir -p /root/.ssh + COPY id_rsa.pub /root/.ssh/authorized_keys + RUN chmod 600 /root/.ssh/authorized_keys + EXPOSE 22 + STOPSIGNAL SIGRTMIN+3 + CMD ["/sbin/init"] + + - name: Build the Docker image + shell: | + docker build --no-cache -t {{ image_name }} /tmp/wire-deploy + + - name: Create and start containers + docker_container: + name: "{{ item }}" + image: "{{ image_name }}" + state: started + restart_policy: always + hostname: "{{ item }}" + privileged: yes + network_mode: "{{ docker_network_name }}" + env: + container: "docker" + volumes: + - /sys/fs/cgroup:/sys/fs/cgroup:rw + cgroupns_mode: "host" + tmpfs: + - /run + - /run/lock + security_opts: + - seccomp=unconfined + - apparmor=unconfined + loop: "{{ container_node_names }}" + + when: skip_docker | default(false) == false + + - name: Generate hosts.ini with dynamic IPs + become: yes + become_user: "{{ ansible_user }}" + block: + + - name: Display running containers + shell: docker ps + register: docker_ps_output + + - name: Print Docker container information + debug: + var: docker_ps_output.stdout + + - name: Extract IPs of Minikube nodes + shell: | + kubectl get nodes -o json | jq -r '.items[].status.addresses[] | select(.type=="InternalIP").address' + register: kube_ips + + - name: Store Minikube node IPs as variable + set_fact: + kubernetes_node_ips: "{{ kube_ips.stdout_lines }}" + + - name: Extract IPs of Docker containers + shell: | + docker inspect -f '{{ "{{ range.NetworkSettings.Networks }}{{ .IPAddress }}{{ end }}" }}' {{ item }} + loop: "{{ container_node_names }}" + register: docker_ips + + - name: Store Docker container IPs as variable + set_fact: + docker_container_ips: "{{ docker_ips.results | map(attribute='stdout') }}" + + - name: Display Kubernetes node IPs + debug: + msg: "Kubernetes Node IPs: {{ kubernetes_node_ips }}" + + - name: Display Docker container IPs + debug: + msg: "Docker Container IPs: {{ docker_container_ips }}" + + - name: Create dictionary for Kubernetes nodes and container IPs + set_fact: + host_ips: + kubenode1: "{{ kubernetes_node_ips[0] }}" + kubenode2: "{{ kubernetes_node_ips[1] }}" + kubenode3: "{{ kubernetes_node_ips[2] }}" + assethost: "{{ docker_container_ips[0] }}" + ansnode1: "{{ docker_container_ips[1] }}" + ansnode2: "{{ docker_container_ips[2] }}" + ansnode3: "{{ docker_container_ips[3] }}" + + - name: Generate hosts.ini content + set_fact: + hosts_ini_content: | + [all] + kubenode1 ansible_host={{ host_ips.kubenode1 }} ansible_user=docker + kubenode2 ansible_host={{ host_ips.kubenode2 }} ansible_user=docker + kubenode3 ansible_host={{ host_ips.kubenode3 }} ansible_user=docker + assethost ansible_host={{ host_ips.assethost }} ansible_user=root + ansnode1 ansible_host={{ host_ips.ansnode1 }} ansible_user=root + ansnode2 ansible_host={{ host_ips.ansnode2 }} ansible_user=root + ansnode3 ansible_host={{ host_ips.ansnode3 }} ansible_user=root + + [all:vars] + ansible_ssh_common_args = '-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no' + + [cassandra:vars] + cassandra_network_interface = eth0 + cassandra_backup_enabled = False + cassandra_incremental_backup_enabled = False + + [elasticsearch:vars] + elasticsearch_network_interface = eth0 + + [minio:vars] + minio_network_interface = eth0 + prefix = "" + domain = "example.com" + deeplink_title = "wire demo environment, example.com" + + [rmq-cluster:vars] + rabbitmq_network_interface = eth0 + + [kube-master] + kubenode1 + kubenode2 + kubenode3 + + [etcd] + kubenode1 etcd_member_name=etcd1 + kubenode2 etcd_member_name=etcd2 + kubenode3 etcd_member_name=etcd3 + + [kube-node] + kubenode1 + kubenode2 + kubenode3 + + [k8s-cluster:children] + kube-master + kube-node + + [cassandra] + ansnode1 + ansnode2 + ansnode3 + + [cassandra_seed] + ansnode1 + + [elasticsearch] + ansnode1 + ansnode2 + ansnode3 + + [elasticsearch_master:children] + elasticsearch + + [minio] + ansnode1 + ansnode2 + ansnode3 + + [rmq-cluster] + ansnode1 + ansnode2 + ansnode3 + + - name: Replace example.com with the target domain + set_fact: + hosts_ini_content: "{{ hosts_ini_content | replace('example.com', target_domain) }}" + + when: skip_inventory | default(false) == false + + - name: Download wire artifact + become: yes + become_user: "{{ ansible_user }}" + block: + - name: create wire-server-deploy directory for {{ ansible_user }} user + file: + path: /home/{{ ansible_user }}/wire-server-deploy + state: directory + owner: "{{ ansible_user }}" + group: "{{ ansible_user }}" + mode: 0775 + + - name: check if wire-server-deploy-static-{{ artifact_hash }}.tgz exists + stat: + path: /home/{{ ansible_user }}/wire-server-deploy-static-{{ artifact_hash }}.tgz + get_checksum: False + + register: artifact_archive_file_check + - name: download wire-server-deploy archive + shell: + cmd: curl -fsSLo /home/{{ ansible_user }}/wire-server-deploy-static-{{ artifact_hash }}.tgz https://s3-eu-west-1.amazonaws.com/public.wire.com/artifacts/wire-server-deploy-static-{{ artifact_hash }}.tgz + creates: /home/{{ ansible_user }}/wire-server-deploy-static-{{ artifact_hash }}.tgz + when: not artifact_archive_file_check.stat.exists + + - name: check if wire-server-deploy folder contents exist + stat: + path: /home/{{ ansible_user }}/wire-server-deploy/containers-helm.tar + get_checksum: False + register: artifact_folder_content_check + + - name: unpack wire-server-deploy archive + unarchive: + src: /home/{{ ansible_user }}/wire-server-deploy-static-{{ artifact_hash }}.tgz + dest: /home/{{ ansible_user }}/wire-server-deploy + remote_src: yes + + when: not artifact_folder_content_check.stat.exists + - name: set permissions inside wire-server-deploy via shell command (fails when using ansible directive) + shell: + cmd: sudo chmod -R 0775 /home/{{ ansible_user }}/wire-server-deploy; sudo chown -R {{ ansible_user }}:{{ ansible_user }} /home/{{ ansible_user }} + + - name: Write updated hosts.ini to file + copy: + dest: /home/{{ ansible_user }}/wire-server-deploy/ansible/inventory/offline/hosts.ini + content: "{{ hosts_ini_content }}" + + when: skip_download | default(false) == false + + - name: Delete /tmp/wire-deploy directory with contents + file: + path: /tmp/wire-deploy + state: absent + + - name: Configure iptables rules + become: yes + block: + - name: Get the default interface for the default route + shell: ip route | awk '/default/ {print $5}' | head -n 1 + register: default_interface + changed_when: false + + - name: Get the IP address of the default interface + shell: ip -4 addr show dev {{ default_interface.stdout }} | awk '/inet / {print $2}' | cut -d/ -f1 + register: default_interface_ip + changed_when: false + + - name: Get the IP address of the k8s_ingress_controller node + shell: | + kubectl get node {{ k8s_ingress_controller_node }} -o json | jq -r '.status.addresses[] | select(.type=="InternalIP").address' + register: k8s_ingress_controller_ip + become: yes + become_user: "{{ ansible_user }}" + changed_when: false + + - name: Configure DNAT rules to send http/https traffic to the k8s ingress controller + iptables: + table: nat + chain: PREROUTING + protocol: "{{ item.protocol }}" + jump: DNAT + in_interface: "{{ default_interface.stdout }}" + destination: "{{ default_interface_ip.stdout }}" + destination_port: "{{ item.port }}" + to_destination: "{{ k8s_ingress_controller_ip.stdout }}:{{ item.to_port }}" + state: present + action: insert + loop: "{{ http_dnat_rules }}" + loop_control: + label: "Setting DNAT rule for port {{ item.port }} -> {{ k8s_ingress_controller_ip.stdout | default('undefined') }}:{{ item.to_port }}" + + - name: Get the {{ docker_network_name }} Docker network ID + shell: | + docker network inspect {{ docker_network_name }} | jq -r '.[0].Id' + register: docker_network_id + changed_when: false + + - name: Get all interfaces with bridge interfaces + shell: ip -o addr show | awk '{print $2}' | grep -i 'br-' + register: bridge_interfaces + changed_when: false + + - name: Find the matching bridge interface for {{ docker_network_name }} Docker network + shell: | + for iface in {{ bridge_interfaces.stdout_lines | join(' ') }}; do + iface_id=$(echo "$iface" | cut -d '-' -f2) + if echo "{{ docker_network_id.stdout }}" | grep -q "$iface_id"; then + echo "$iface" + break + fi + done + register: matching_bridge_interface + changed_when: false + + - name: Ensure FORWARD rule for traffic from main interface to ingress controller + iptables: + table: filter + chain: FORWARD + in_interface: "{{ default_interface.stdout }}" + out_interface: "{{ matching_bridge_interface.stdout }}" + jump: ACCEPT + state: present + action: insert + + - name: Ensure FORWARD rule for traffic from ingress controller to main interface + iptables: + table: filter + chain: FORWARD + in_interface: "{{ matching_bridge_interface.stdout }}" + out_interface: "{{ default_interface.stdout }}" + jump: ACCEPT + state: present + action: insert + + - name: Get the IP address of the coturn node + shell: | + kubectl get node {{ coturn_k8s_node }} -o json | jq -r '.status.addresses[] | select(.type=="InternalIP").address' + register: coturn_k8s_node_ip + become: yes + become_user: "{{ ansible_user }}" + changed_when: false + + - name: Configure DNAT rule to send UDP traffic for coturn to coturn server on k8s node + iptables: + table: nat + chain: PREROUTING + protocol: udp + jump: DNAT + destination: "{{ default_interface_ip.stdout }}" + destination_ports: "32768:61000" + in_interface: "{{ default_interface.stdout }}" + to_destination: "{{ coturn_k8s_node_ip.stdout }}" + state: present + action: insert + + - name: Configure DNAT rules to reach turn servers running on k8s node + iptables: + table: nat + chain: PREROUTING + protocol: "{{ item.protocol }}" + jump: DNAT + in_interface: "{{ default_interface.stdout }}" + destination: "{{ default_interface_ip.stdout }}" + destination_port: "{{ item.port }}" + to_destination: "{{ coturn_k8s_node_ip.stdout }}:{{ item.to_port }}" + state: present + action: insert + loop: "{{ turn_dnat_rules }}" + loop_control: + label: "Setting DNAT rule for port {{ item.port }} -> {{ coturn_k8s_node_ip.stdout | default('undefined') }}:{{ item.to_port }}" + + - name: Ensure /etc/iptables directory exists + ansible.builtin.file: + path: /etc/iptables + state: directory + owner: root + group: root + mode: '0755' + + - name: Save iptables rules + shell: iptables-save -f /etc/iptables/rules.v4 + + when: skip_iptables | default(false) == false + + - name: disabling kubespray in offline-cluster.sh + become: yes + become_user: "{{ ansible_user }}" + block: + - name: Comment specific lines in offline-cluster.sh + ansible.builtin.lineinfile: + path: /home/{{ ansible_user }}/wire-server-deploy/bin/offline-cluster.sh + regexp: '^ansible-playbook -i \$INVENTORY_FILE \$ANSIBLE_DIR/kubernetes.yml --tags bastion,bootstrap-os,preinstall,container-engine' + line: '# ansible-playbook -i $INVENTORY_FILE $ANSIBLE_DIR/kubernetes.yml --tags bastion,bootstrap-os,preinstall,container-engine' + state: present + + - name: Comment another specific line in offline-cluster.sh + ansible.builtin.lineinfile: + path: /home/{{ ansible_user }}/wire-server-deploy/bin/offline-cluster.sh + regexp: '^ansible-playbook -i \$INVENTORY_FILE \$ANSIBLE_DIR/kubernetes.yml --skip-tags bootstrap-os,preinstall,container-engine,multus' + line: '# ansible-playbook -i $INVENTORY_FILE $ANSIBLE_DIR/kubernetes.yml --skip-tags bootstrap-os,preinstall,container-engine,multus' + state: present + + when: skip_disable_kubespray | default(false) == false diff --git a/terraform/README.md b/terraform/README.md new file mode 100644 index 000000000..16aee9302 --- /dev/null +++ b/terraform/README.md @@ -0,0 +1,97 @@ +# Terraform for wire-server + +This directory contains (aspires to contain) all the Terraform required to se tup +wire-server. The `environment` directory is to be considered the "root" +directory of Terraform. + +## How to create a new environment + +Recommended: Use nix-shell from the root of this repository to ensure that you +have the right version of terraform. + +Run all commands from `terraform/environment` directory. + +1. Export `ENV_DIR` environment variable to a directory where you want to store + data specific to an environment. Ensure that this directory exists. + + For Wire employees, please create this directory in `cailleach/environments`. + If cailleach is not checked-out as a sibling directory to wire-server-deploy, + please export `CAILLEACH_DIR` as absolute path to the cailleach directory. + Additionally, export `ENV` as the name of the environment. For the rest of + this README, please consider `ENV_DIR` to be + `${CAILLEACH_DIR}/environments/${ENV}`. +1. Create backend-config in `"$ENV_DIR/backend.tfvars` which looks like this: + ```tf + region = "" + bucket = "" + key = "" + dynamodb_table = "" + ``` + + Please refer to [s3 backend + docs](https://www.terraform.io/docs/backends/types/s3.html) for details. +1. Create token from hetzner cloud and put the following contents (including the export) + in a file called `$ENV_DIR/hcloud-token.dec`[1]: + ``` + export HCLOUD_TOKEN= + ``` +1. Create ssh key-pair, put the private key in a file called + `$ENV_DIR/operator-ssh.dec`[1]. Example: + + ```bash + ssh-keygen -o -a 100 -t ed25519 -f "$ENV_DIR/operator-ssh.dec" -C "example@example.com" + # see footnote 2 if you're a wire employee + ``` +1. (optional) encrypt files if collaborating using SOPS: + ``` + sops -e "$ENV_DIR"/operator-ssh.dec > "$ENV_DIR"/operator-ssh + sops -e "$ENV_DIR"/hcloud-token.dec > "$ENV_DIR"/hcloud-token + ``` +1. Create variables for the environment in `$ENV_DIR/terraform.tfvars`, example: + ```tf + environment = + root_domain = "example.com" + operator_ssh_public_keys = { + terraform_managed = { + "" = "" + } + preuploaded_key_names = [] + } + ``` + Delete operator-ssh.dec.pub. + Please refer to variable definitions in `environment/*.vars.tf` in order to see which + ones are available. Additional examples can be found in the `examples` folder at the + top-level of this repository. +1. Initialize Terraform + ``` + make re-init + ``` +1. Apply terraform + ``` + make apply + ``` +1. Create inventory + ``` + make create-inventory + ``` +1. To bootstrap the nodes, please refer to the [Ansible README](../ansible/README.md) +1. To deploy Wire on top, please refer to the [Helm README](../helm/README.md) + +[1]For wire employees: Encrypt this file using `sops`, it will not +work in the `nix-shell`, so change shell as needed. + +[2]For wire employees: Use "backend+${ENV}-operator@wire.com" as a +convention. + +## Decommissioning machines + +### SFT + +Each SFT server has a unique identifier. Decommissioning is as easy as removing that +identifier from one of the list - preferably from the non-active group. + +### Kubernetes + +Defining Kubernetes machines, is done by defining *group(s)* of machines. In order +to destroy a single machine, one has to decommission the entire group - preferably +after bringing up another group taking its place. diff --git a/terraform/environment/Makefile b/terraform/environment/Makefile new file mode 100644 index 000000000..a432bd45e --- /dev/null +++ b/terraform/environment/Makefile @@ -0,0 +1,59 @@ +SHELL := /usr/bin/env bash +ROOT_DIR := ${shell dirname ${realpath ${firstword ${MAKEFILE_LIST}}}} +TOK = hcloud-token +SSH = operator-ssh + +# Please ignore the following line if you're not a wire employee +CAILLEACH_DIR:=${abspath ${ROOT_DIR}/../../../cailleach} + +export TF_DATA_DIR=${ENV_DIR}/.terraform + +.PHONY: init +init: check-env + terraform init -backend-config=${ENV_DIR}/backend.tfvars + +.PHONY: output +output: check-env + terraform output -json + +.PHONY: force-unlock +force-unlock: check-env +ifndef LOCK_ID + ${error please define LOCK_ID} +endif + terraform force-unlock ${LOCK_ID} ${ROOT_DIR} + +.PHONY: create-inventory +create-inventory: check-env + mkdir -p ${ENV_DIR}/gen && \ + terraform output -json inventory > ${ENV_DIR}/gen/terraform-inventory.yml + +.PHONY: apply plan console destroy +apply plan console destroy: check-env + source ${ENV_DIR}/hcloud-token.dec && \ + terraform $@ -var-file=${ENV_DIR}/terraform.tfvars + +.PHONY: check-env +check-env: +ifndef ENV_DIR +ifndef ENV + ${error please define either ENV or ENV_DIR} +else +ENV_DIR=${CAILLEACH_DIR}/environments/${ENV} +endif +endif + +.PHONY: decrypt +decrypt: ${ENV_DIR}/${TOK}.dec ${ENV_DIR}/${SSH}.dec + +${ENV_DIR}/${TOK}.dec: check-env + echo ${ENV_DIR}/${TOK}.dec + sops -d ${ENV_DIR}/${TOK} > ${ENV_DIR}/${TOK}.dec + +${ENV_DIR}/${SSH}.dec: check-env + sops -d ${ENV_DIR}/${SSH} > ${ENV_DIR}/${SSH}.dec + chmod 0600 ${ENV_DIR}/${SSH}.dec + +.PHONY: clean +clean: check-env + rm ${ENV_DIR}/*.dec diff --git a/terraform/environment/aws.tf b/terraform/environment/aws.tf new file mode 100644 index 000000000..46595d608 --- /dev/null +++ b/terraform/environment/aws.tf @@ -0,0 +1,7 @@ +variable "aws_region" { + default = "eu-central-1" +} + +provider "aws" { + region = var.aws_region +} diff --git a/terraform/environment/hcloud.tf b/terraform/environment/hcloud.tf new file mode 100644 index 000000000..4b68ff21b --- /dev/null +++ b/terraform/environment/hcloud.tf @@ -0,0 +1,16 @@ +provider "hcloud" { + # NOTE: You must have a HCLOUD_TOKEN environment variable set! +} + +resource "hcloud_ssh_key" "operator_ssh" { + for_each = var.operator_ssh_public_keys.terraform_managed + name = each.key + public_key = each.value +} + +locals { + hcloud_ssh_keys = concat( + [for key in hcloud_ssh_key.operator_ssh: key.name], + tolist(var.operator_ssh_public_keys.preuploaded_key_names) + ) +} diff --git a/terraform/environment/hcloud.vars.tf b/terraform/environment/hcloud.vars.tf new file mode 100644 index 000000000..c88a03760 --- /dev/null +++ b/terraform/environment/hcloud.vars.tf @@ -0,0 +1,21 @@ +variable "hcloud_image" { + default = "ubuntu-22.04" +} + +variable "hcloud_location" { + default = "nbg1" +} + +variable "operator_ssh_public_keys" { + type = object({ + terraform_managed = map(string) # Map of key name to the public key content + preuploaded_key_names = set(string) + }) + validation { + condition = ( + length(var.operator_ssh_public_keys.terraform_managed) > 0 || + length(var.operator_ssh_public_keys.preuploaded_key_names) > 0 + ) + error_message = "At least one key must be provided." + } +} diff --git a/terraform/environment/inventory.tf b/terraform/environment/inventory.tf new file mode 100644 index 000000000..f051a1e3b --- /dev/null +++ b/terraform/environment/inventory.tf @@ -0,0 +1,10 @@ +# Generates an inventory file to be used by ansible. Ideally, we would generate +# this outside terraform using outputs, but it is not possible to use 'terraform +# output' when the init directory is different from the root code directory. +# Terraform Issue: https://github.com/hashicorp/terraform/issues/17300 +output "inventory" { + value = merge( + local.sft_inventory, + local.k8s_cluster_inventory + ) +} diff --git a/terraform/environment/kubernetes.cluster.tf b/terraform/environment/kubernetes.cluster.tf new file mode 100644 index 000000000..5ecc3a951 --- /dev/null +++ b/terraform/environment/kubernetes.cluster.tf @@ -0,0 +1,25 @@ +locals { + machines = flatten([ + for g in try(var.k8s_cluster.machine_groups, []) : [ + for mid in range(1, 1 + lookup(g, "machine_count", 1)) : merge( + # NOTE: destruct group configuration and removing 'machine_count' + { for k,v in g : k => v if k != "machine_count" }, + { machine_id = format("%02d", mid) } + ) + ] + ]) + # NOTE: set 'with_load_balancer' to true if not defined but LB ports are defined, thus 'load_balancer' may become optional + load_balancer_is_used = lookup(var.k8s_cluster, "load_balancer", length(lookup(var.k8s_cluster, "load_balancer_ports", [])) > 0) +} + +module "hetzner_k8s_cluster" { + for_each = toset(try(var.k8s_cluster.cloud == "hetzner", false) ? [var.environment] : []) + + source = "./../modules/hetzner-kubernetes" + + cluster_name = each.key + machines = local.machines + ssh_keys = local.hcloud_ssh_keys + with_load_balancer = local.load_balancer_is_used + lb_port_mappings = lookup(var.k8s_cluster, "load_balancer_ports", []) +} diff --git a/terraform/environment/kubernetes.cluster.vars.tf b/terraform/environment/kubernetes.cluster.vars.tf new file mode 100644 index 000000000..9e94a46ad --- /dev/null +++ b/terraform/environment/kubernetes.cluster.vars.tf @@ -0,0 +1,29 @@ +# FUTUREWORK: replace 'any' by implementing https://www.terraform.io/docs/language/functions/defaults.html +# +variable "k8s_cluster" { + description = "represents Kubernetes cluster" + # type = object({ + # cloud = string + # load_balancer = optional(bool) + # load_balancer_ports = optional(list( + # object({ + # name = string + # protocol = string + # listen = number + # destination = number + # }) + # )) + # machine_groups = list(object({ + # group_name = string + # machine_count = optional(number) + # machine_type = string + # component_classes = list(string) + # volume = optional(object({ + # size = number + # format = optional(string) + # })) + # })) + # }) + type = any + default = {} +} diff --git a/terraform/environment/kubernetes.dns.tf b/terraform/environment/kubernetes.dns.tf new file mode 100644 index 000000000..ae9abbe4f --- /dev/null +++ b/terraform/environment/kubernetes.dns.tf @@ -0,0 +1,22 @@ +module "kubernetes-dns-records" { + for_each = toset(var.root_domain != null && length(var.sub_domains) > 0 ? [var.environment] : []) + + source = "../modules/aws-dns-records" + + zone_fqdn = var.root_domain + domain = var.environment + subdomains = var.sub_domains + ips = module.hetzner_k8s_cluster[var.environment].ips + # NOTE: this list could have been generated similar to ./kubernetes.inventory.tf, but + # Terraform thinks differently. While building up the dependency tree, it appears + # that it is not able to see indirect dependencies, e.g. local.cluster_machines. # + # It fails at modules/aws-dns-records/resources.route53.tf resource aws_route53_record.spf.count + # with: + # + # The "count" value depends on resource attributes that cannot be determined until apply + # + # So, in order to work around this, a second output for public node IPs is being introduced. + spf_record_ips = module.hetzner_k8s_cluster[var.environment].node_ips + + srvs = var.srvs +} diff --git a/terraform/environment/kubernetes.dns.vars.tf b/terraform/environment/kubernetes.dns.vars.tf new file mode 100644 index 000000000..31ba35161 --- /dev/null +++ b/terraform/environment/kubernetes.dns.vars.tf @@ -0,0 +1,19 @@ +variable "root_domain" { + type = string + default = null +} + +variable "sub_domains" { + type = list(string) + default = [] +} + +variable "create_spf_record" { + type = bool + default = false +} + +variable "srvs" { + type = map(list(string)) + default = {} +} diff --git a/terraform/environment/kubernetes.inventory.tf b/terraform/environment/kubernetes.inventory.tf new file mode 100644 index 000000000..d162425f5 --- /dev/null +++ b/terraform/environment/kubernetes.inventory.tf @@ -0,0 +1,36 @@ +locals { + cluster_machines = try(module.hetzner_k8s_cluster[var.environment].machines, []) +} + +locals { + k8s_cluster_inventory = length(local.cluster_machines) > 0 ? { + kube-master = { hosts = { for m in local.cluster_machines : m.hostname => {} if contains(m.component_classes, "controlplane" ) } } + kube-node = { hosts = { for m in local.cluster_machines : m.hostname => {} if contains(m.component_classes, "node" ) } } + etcd = { hosts = { for m in local.cluster_machines : m.hostname => {} if contains(keys(m), "etcd_member_name" ) } } + k8s-cluster = { + children = { + kube-master = {} + kube-node = {} + } + hosts = {for m in local.cluster_machines : + m.hostname => merge( + { + ansible_host = m.public_ipv4 + ip = m.private_ipv4 + }, + contains(keys(m), "etcd_member_name" ) ? { etcd_member_name = m.etcd_member_name } : {} + ) + } + vars = merge( + { + # NOTE: instead of setting static inventory variables here, please consider placing them + # instead in the inventory of the respective environment + }, + local.load_balancer_is_used ? { + apiserver_loadbalancer_domain_name = module.hetzner_k8s_cluster[var.environment].ips[0] + loadbalancer_apiserver = { address = module.hetzner_k8s_cluster[var.environment].ips[0] } + } : tomap({}) + ) + } + } : tomap({}) +} diff --git a/terraform/environment/main.vars.tf b/terraform/environment/main.vars.tf new file mode 100644 index 000000000..afee37a90 --- /dev/null +++ b/terraform/environment/main.vars.tf @@ -0,0 +1,3 @@ +variable "environment" { + type = string +} diff --git a/terraform/environment/sft.inventory.tf b/terraform/environment/sft.inventory.tf new file mode 100644 index 000000000..faef50b14 --- /dev/null +++ b/terraform/environment/sft.inventory.tf @@ -0,0 +1,29 @@ +locals { + sft_instances_blue = flatten(module.sft[*].sft.instances_blue) + sft_instances_green = flatten(module.sft[*].sft.instances_green) +} + +locals { + sft_inventory = { + sft_servers = { + hosts = { for instance in concat(local.sft_instances_blue, local.sft_instances_green): instance.hostname => { + ansible_host = instance.ipaddress + sft_fqdn = instance.fqdn + srv_announcer_record_target = instance.fqdn + srv_announcer_zone_domain = var.root_domain + srv_announcer_aws_key_id = module.sft[0].sft.aws_key_id + srv_announcer_aws_access_key = module.sft[0].sft.aws_access_key + srv_announcer_aws_region = module.sft[0].sft.aws_region + srv_announcer_record_name = "_sft._tcp.${var.environment}" + ansible_python_interpreter = "/usr/bin/python3" + ansible_ssh_user = "root" + }} + } + sft_servers_blue = { + hosts = { for instance in local.sft_instances_blue : instance.hostname => {} } + } + sft_servers_green = { + hosts = { for instance in local.sft_instances_green : instance.hostname => {} } + } + } +} diff --git a/terraform/environment/sft.tf b/terraform/environment/sft.tf new file mode 100644 index 000000000..f4ba167cb --- /dev/null +++ b/terraform/environment/sft.tf @@ -0,0 +1,21 @@ +module "sft" { + count = min(1, length(setunion(var.sft_server_names_blue, var.sft_server_names_green))) + + source = "../modules/sft" + root_domain = var.root_domain + environment = var.environment + a_record_ttl = var.sft_a_record_ttl + image = var.hcloud_image + location = var.hcloud_location + ssh_keys = local.hcloud_ssh_keys + server_groups = { + blue = { + server_names = var.sft_server_names_blue + server_type = var.sft_server_type_blue + } + green = { + server_names = var.sft_server_names_green + server_type = var.sft_server_type_green + } + } +} diff --git a/terraform/environment/sft.vars.tf b/terraform/environment/sft.vars.tf new file mode 100644 index 000000000..69409cde9 --- /dev/null +++ b/terraform/environment/sft.vars.tf @@ -0,0 +1,23 @@ +variable "sft_server_names_blue" { + type = set(string) + default = [] +} + +variable "sft_server_type_blue" { + type = string + default = "cx11" +} + +variable "sft_server_names_green" { + type = set(string) + default = [] +} + +variable "sft_server_type_green" { + type = string + default = "cx11" +} + +variable "sft_a_record_ttl" { + default = 60 +} diff --git a/terraform/environment/terraform.tf b/terraform/environment/terraform.tf new file mode 100644 index 000000000..79f3bb7c8 --- /dev/null +++ b/terraform/environment/terraform.tf @@ -0,0 +1,18 @@ +terraform { + required_version = "~> 1.1" + + required_providers { + aws = { + source = "hashicorp/aws" + version = "~> 2.58" + } + hcloud = { + source = "hetznercloud/hcloud" + } + } + + backend s3 { + encrypt = true + } + +} diff --git a/terraform/examples/create-infrastructure.tf b/terraform/examples/create-infrastructure.tf index 2cb8dfc58..94147ddcf 100644 --- a/terraform/examples/create-infrastructure.tf +++ b/terraform/examples/create-infrastructure.tf @@ -1,7 +1,7 @@ # Example terraform script to create virtual machines on the hetzner cloud provider # and an ansible-compatible inventory file terraform { - required_version = ">= 0.12.1" + required_version = "~> 1.1" # Recommended: configure a backend to share terraform state # See terraform documentation @@ -20,10 +20,9 @@ resource "hcloud_ssh_key" "default" { resource "hcloud_server" "node" { count = 3 name = "node${count.index}" - image = "ubuntu-18.04" - server_type = "cx41" + image = "ubuntu-22.04" + server_type = "cx42" ssh_keys = ["hetznerssh-key"] - # Nuremberg (for choices see `hcloud datacenter list`) location = "nbg1" } @@ -31,8 +30,8 @@ resource "hcloud_server" "node" { resource "hcloud_server" "etcd" { count = 3 name = "etcd${count.index}" - image = "ubuntu-18.04" - server_type = "cx41" + image = "ubuntu-22.04" + server_type = "cx42" ssh_keys = ["hetznerssh-key"] # Nuremberg (for choices see `hcloud datacenter list`) @@ -42,19 +41,8 @@ resource "hcloud_server" "etcd" { resource "hcloud_server" "redis" { count = 0 name = "redis${count.index}" - image = "ubuntu-18.04" - server_type = "cx11" - ssh_keys = ["hetznerssh-key"] - - # Nuremberg (for choices see `hcloud datacenter list`) - location = "nbg1" -} - -resource "hcloud_server" "restund" { - count = 2 - name = "restund${count.index}" - image = "ubuntu-18.04" - server_type = "cx11" + image = "ubuntu-22.04" + server_type = "cx22" ssh_keys = ["hetznerssh-key"] # Nuremberg (for choices see `hcloud datacenter list`) @@ -64,8 +52,8 @@ resource "hcloud_server" "restund" { resource "hcloud_server" "minio" { count = 3 name = "minio${count.index}" - image = "ubuntu-18.04" - server_type = "cx11" + image = "ubuntu-22.04" + server_type = "cx22" ssh_keys = ["hetznerssh-key"] # Nuremberg (for choices see `hcloud datacenter list`) @@ -75,8 +63,8 @@ resource "hcloud_server" "minio" { resource "hcloud_server" "cassandra" { count = 3 name = "cassandra${count.index}" - image = "ubuntu-18.04" - server_type = "cx21" + image = "ubuntu-22.04" + server_type = "cx22" ssh_keys = ["hetznerssh-key"] # Nuremberg (for choices see `hcloud datacenter list`) @@ -86,8 +74,8 @@ resource "hcloud_server" "cassandra" { resource "hcloud_server" "elasticsearch" { count = 3 name = "elasticsearch${count.index}" - image = "ubuntu-18.04" - server_type = "cx11" + image = "ubuntu-22.04" + server_type = "cx22" ssh_keys = ["hetznerssh-key"] # Nuremberg (for choices see `hcloud datacenter list`) @@ -153,7 +141,6 @@ data "template_file" "inventory" { connection_strings_elasticsearch = "${join("\n", formatlist("%s ansible_host=%s vpn_ip=%s", hcloud_server.elasticsearch.*.name, hcloud_server.elasticsearch.*.ipv4_address, null_resource.vpnes.*.triggers.ip))}" connection_strings_minio = "${join("\n", formatlist("%s ansible_host=%s vpn_ip=%s", hcloud_server.minio.*.name, hcloud_server.minio.*.ipv4_address, null_resource.vpnminio.*.triggers.ip))}" connection_strings_redis = "${join("\n", formatlist("%s ansible_host=%s vpn_ip=%s", hcloud_server.redis.*.name, hcloud_server.redis.*.ipv4_address, null_resource.vpnredis.*.triggers.ip))}" - connection_strings_restund = "${join("\n", formatlist("%s ansible_host=%s", hcloud_server.restund.*.name, hcloud_server.restund.*.ipv4_address))}" list_master = "${join("\n",hcloud_server.node.*.name)}" list_etcd = "${join("\n",hcloud_server.etcd.*.name)}" list_node = "${join("\n",hcloud_server.node.*.name)}" @@ -161,7 +148,6 @@ data "template_file" "inventory" { list_elasticsearch = "${join("\n",hcloud_server.elasticsearch.*.name)}" list_minio = "${join("\n",hcloud_server.minio.*.name)}" list_redis = "${join("\n",hcloud_server.redis.*.name)}" - list_restund = "${join("\n",hcloud_server.restund.*.name)}" } } diff --git a/terraform/examples/inventory.tpl b/terraform/examples/inventory.tpl index 26c5d965a..932e2fb5f 100644 --- a/terraform/examples/inventory.tpl +++ b/terraform/examples/inventory.tpl @@ -5,7 +5,6 @@ ${connection_strings_minio} ${connection_strings_elasticsearch} ${connection_strings_cassandra} ${connection_strings_redis} -${connection_strings_restund} [vpn:children] k8s-cluster @@ -56,9 +55,6 @@ ${list_minio} [redis] ${list_redis} -[restund] -${list_restund} - [all:vars] ## path to the ssh private key # ansible_ssh_private_key_file = @@ -78,7 +74,6 @@ ansible_python_interpreter = /usr/bin/python3 # cassandra_network_interface = vpn0 # redis_network_interface = vpn0 # registry_network_interface = vpn0 -# restund_network_interface = vpn0 ## configure a proxy if one is needed to access the Internet # http_proxy = "" diff --git a/terraform/examples/wire-server-deploy-offline-hetzner/.envrc b/terraform/examples/wire-server-deploy-offline-hetzner/.envrc new file mode 100644 index 000000000..8a29ee6d6 --- /dev/null +++ b/terraform/examples/wire-server-deploy-offline-hetzner/.envrc @@ -0,0 +1,5 @@ +[[ -f .envrc.local ]] && source_env .envrc.local +# You can set this in .envrc.local to keep it out of VCS +export HCLOUD_TOKEN +source_up + diff --git a/terraform/examples/wire-server-deploy-offline-hetzner/README.md b/terraform/examples/wire-server-deploy-offline-hetzner/README.md new file mode 100644 index 000000000..8869926b6 --- /dev/null +++ b/terraform/examples/wire-server-deploy-offline-hetzner/README.md @@ -0,0 +1,7 @@ +# Wire-server-deploy-offline-hetzner + +This environment is set up and destroyed on demand to test our offline story, +and to function as a reference network diagram for an offline deploy + +This is almost identical to the `wire` environment. We should probably reuse some code. +I just needed some boxes to test the offline deploy path on-demand. diff --git a/terraform/examples/wire-server-deploy-offline-hetzner/main.tf b/terraform/examples/wire-server-deploy-offline-hetzner/main.tf new file mode 100644 index 000000000..854a24309 --- /dev/null +++ b/terraform/examples/wire-server-deploy-offline-hetzner/main.tf @@ -0,0 +1,190 @@ +locals { + rfc1918_cidr = "10.0.0.0/8" + kubenode_count = 3 + minio_count = 2 + elasticsearch_count = 2 + cassandra_count = 3 + ssh_keys = [hcloud_ssh_key.adminhost.name] + + # TODO: IPv6 + disable_network_cfg = <<-EOF + #cloud-config + runcmd: + + # Allow DNS + - iptables -A OUTPUT -o eth0 -p udp --dport 53 -j ACCEPT + - ip6tables -A OUTPUT -o eth0 -p udp --dport 53 -j ACCEPT + + # Allow NTP + - iptables -A OUTPUT -o eth0 -p udp --dport 123 -j ACCEPT + - ip6tables -A OUTPUT -o eth0 -p udp --dport 123 -j ACCEPT + + # Drop all other traffic + - iptables -A OUTPUT -o eth0 -j DROP + - ip6tables -A OUTPUT -o eth0 -j DROP + + EOF +} + + +resource "random_pet" "main" { +} + +resource "hcloud_network" "main" { + name = "main-${random_pet.main.id}" + ip_range = cidrsubnet(local.rfc1918_cidr, 8, 1) +} + +resource "hcloud_network_subnet" "main" { + network_id = hcloud_network.main.id + type = "cloud" + network_zone = "eu-central" + ip_range = cidrsubnet(hcloud_network.main.ip_range, 8, 1) +} + + +resource "random_pet" "adminhost" { +} + +resource "tls_private_key" "admin" { + algorithm = "ECDSA" + ecdsa_curve = "P256" +} + +resource "hcloud_ssh_key" "adminhost" { + name = "adminhost-${random_pet.adminhost.id}" + public_key = tls_private_key.admin.public_key_openssh +} + +# Connected to all other servers. Simulates the admin's "laptop" +resource "hcloud_server" "adminhost" { + location = "nbg1" + name = "adminhost-${random_pet.adminhost.id}" + image = "ubuntu-22.04" + ssh_keys = local.ssh_keys + server_type = "cpx41" + user_data = <<-EOF + #cloud-config + apt: + sources: + docker.list: + source: deb [arch=amd64] https://download.docker.com/linux/ubuntu $RELEASE stable + keyid: 9DC858229FC7DD38854AE2D88D81803C0EBFCD88 + packages: + - docker-ce + - docker-ce-cli + users: + - name: admin + groups: + - sudo + shell: /bin/bash + ssh_authorized_keys: + - "${tls_private_key.admin.public_key_openssh}" + EOF +} + +resource "hcloud_server_network" "adminhost" { + server_id = hcloud_server.adminhost.id + subnet_id = hcloud_network_subnet.main.id +} + +resource "random_pet" "assethost" { +} + +# The server hosting all the bootstrap assets +resource "hcloud_server" "assethost" { + location = "nbg1" + name = "assethost-${random_pet.assethost.id}" + image = "ubuntu-22.04" + ssh_keys = local.ssh_keys + server_type = "cpx41" + user_data = local.disable_network_cfg +} + +resource "hcloud_server_network" "assethost" { + server_id = hcloud_server.assethost.id + subnet_id = hcloud_network_subnet.main.id +} + +resource "random_pet" "kubenode" { + count = local.kubenode_count +} + +resource "hcloud_server" "kubenode" { + count = local.kubenode_count + location = "nbg1" + name = "kubenode-${random_pet.kubenode[count.index].id}" + image = "ubuntu-22.04" + ssh_keys = local.ssh_keys + server_type = "cpx41" + user_data = local.disable_network_cfg +} + +resource "hcloud_server_network" "kubenode" { + count = local.kubenode_count + server_id = hcloud_server.kubenode[count.index].id + subnet_id = hcloud_network_subnet.main.id +} + + +resource "random_pet" "cassandra" { + count = local.cassandra_count +} + +resource "hcloud_server" "cassandra" { + count = local.cassandra_count + location = "nbg1" + name = "cassandra-${random_pet.cassandra[count.index].id}" + image = "ubuntu-22.04" + ssh_keys = local.ssh_keys + server_type = "cx22" + # user_data = local.disable_network_cfg +} + +resource "hcloud_server_network" "cassandra" { + count = local.cassandra_count + server_id = hcloud_server.cassandra[count.index].id + subnet_id = hcloud_network_subnet.main.id +} + + +resource "random_pet" "elasticsearch" { + count = local.elasticsearch_count +} + +resource "hcloud_server" "elasticsearch" { + count = local.elasticsearch_count + location = "nbg1" + name = "elasticsearch-${random_pet.elasticsearch[count.index].id}" + image = "ubuntu-22.04" + ssh_keys = local.ssh_keys + server_type = "cx22" + # user_data = local.disable_network_cfg +} + +resource "hcloud_server_network" "elasticsearch" { + count = local.elasticsearch_count + server_id = hcloud_server.elasticsearch[count.index].id + subnet_id = hcloud_network_subnet.main.id +} + + +resource "random_pet" "minio" { + count = local.minio_count +} + +resource "hcloud_server" "minio" { + count = local.minio_count + location = "nbg1" + name = "minio-${random_pet.minio[count.index].id}" + image = "ubuntu-22.04" + ssh_keys = local.ssh_keys + server_type = "cx22" + # user_data = local.disable_network_cfg +} + +resource "hcloud_server_network" "minio" { + count = local.minio_count + server_id = hcloud_server.minio[count.index].id + subnet_id = hcloud_network_subnet.main.id +} diff --git a/terraform/examples/wire-server-deploy-offline-hetzner/outputs.tf b/terraform/examples/wire-server-deploy-offline-hetzner/outputs.tf new file mode 100644 index 000000000..f951b28e4 --- /dev/null +++ b/terraform/examples/wire-server-deploy-offline-hetzner/outputs.tf @@ -0,0 +1,100 @@ +output "ssh_private_key" { + sensitive = true + value = tls_private_key.admin.private_key_pem +} +output "adminhost" { + sensitive = true + value = hcloud_server.adminhost.ipv4_address +} +# output format that a static inventory file expects +output "static-inventory" { + sensitive = true + value = { + + assethost = { + hosts = { + "assethost" = { + ansible_host = hcloud_server_network.assethost.ip + ansible_user = "root" + } + } + } + adminhost = { + hosts = { + "adminhost" = { + ansible_host = hcloud_server.adminhost.ipv4_address + ansible_user = "root" + } + } + } + etcd = { + children = { "kube-master" = {} } + } + kube-master = { + children = { "kube-node" = {} } + } + kube-node = { + hosts = { + for index, server in hcloud_server.kubenode : server.name => { + ansible_host = hcloud_server_network.kubenode[index].ip + ip = hcloud_server_network.kubenode[index].ip + ansible_user = "root" + etcd_member_name = server.name + } + } + } + k8s-cluster = { + children = { + "kube-node" = {} + "kube-master" = {} + } + # NOTE: Necessary for the Hetzner Cloud until Calico v3.17 arrives in Kubespray + # Hetzner private networks have an MTU of 1450 instead of 1500 + vars = { + calico_mtu = 1450 + calico_veth_mtu = 1430 + # NOTE: relax handling a list with more than 3 items; required on Hetzner + docker_dns_servers_strict: false + } + } + cassandra = { + hosts = { + for index, server in hcloud_server.cassandra : server.name => { + ansible_host = hcloud_server_network.cassandra[index].ip + ansible_user = "root" + } + } + vars = { + cassandra_network_interface = "eth0" + } + } + cassandra_seed = { + hosts = { (hcloud_server.cassandra[0].name) = {} } + } + elasticsearch = { + hosts = { + for index, server in hcloud_server.elasticsearch : server.name => { + ansible_host = hcloud_server_network.elasticsearch[index].ip + ansible_user = "root" + } + } + vars = { + elasticsearch_network_interface = "eth0" + } + } + elasticsearch_master = { + children = { "elasticsearch" = {} } + } + minio = { + hosts = { + for index, server in hcloud_server.minio : server.name => { + ansible_host = hcloud_server_network.minio[index].ip + ansible_user = "root" + } + } + vars = { + minio_network_interface = "eth0" + } + } + } +} diff --git a/terraform/examples/wire-server-deploy-offline-hetzner/versions.tf b/terraform/examples/wire-server-deploy-offline-hetzner/versions.tf new file mode 100644 index 000000000..b047ba54e --- /dev/null +++ b/terraform/examples/wire-server-deploy-offline-hetzner/versions.tf @@ -0,0 +1,8 @@ +terraform { + required_providers { + hcloud = { + source = "hetznercloud/hcloud" + } + } + required_version = "~> 1.1" +} diff --git a/terraform/modules/aws_ami_ubuntu_search/README.md b/terraform/modules/aws-ami-ubuntu-search/README.md similarity index 100% rename from terraform/modules/aws_ami_ubuntu_search/README.md rename to terraform/modules/aws-ami-ubuntu-search/README.md diff --git a/terraform/modules/aws_ami_ubuntu_search/main.tf b/terraform/modules/aws-ami-ubuntu-search/main.tf similarity index 100% rename from terraform/modules/aws_ami_ubuntu_search/main.tf rename to terraform/modules/aws-ami-ubuntu-search/main.tf diff --git a/terraform/modules/aws_ami_ubuntu_search/outputs.tf b/terraform/modules/aws-ami-ubuntu-search/outputs.tf similarity index 100% rename from terraform/modules/aws_ami_ubuntu_search/outputs.tf rename to terraform/modules/aws-ami-ubuntu-search/outputs.tf diff --git a/terraform/modules/aws-brig-prekey-lock-event-queue-email-sending/README.md b/terraform/modules/aws-brig-prekey-lock-event-queue-email-sending/README.md new file mode 100644 index 000000000..49253026b --- /dev/null +++ b/terraform/modules/aws-brig-prekey-lock-event-queue-email-sending/README.md @@ -0,0 +1,52 @@ +Terraform module: Brig pre-key locking, event queue (optional: email sending) +============================================================================= + +State: __experimental__ + +This module allows wire-server's brig service to leverage AWS resources (A) to +acquire a lock using dynamoDB (used during insertion and retrieval of prekeys +in cassandra to avoid race conditions), and (B) to establish a message queue +for internal events (used e.g. during user deletions). + +[Optional] Wire-server's "brig" components needs to send emails. This can either +be done by configuring an SMTP server (Option 1), or by using AWS resources (Option 2). +This terraform module can enable brig to send emails using option 2. In addition, it +configures *MAIL FROM* for outgoing emails, but does not enable incoming emails +(possible solution: `aws_ses_receipt_rule`). + +AWS resources: SQS, DynamoDB, (optionally: SES, SNS, DNS) + + +#### Important note + +This module causes Terraform to store sensitive data in the `.tfstate` file. Hence, encrypting the state should be +mandatory. + + +#### How to use the module + +##### With email sending __enabled__ + +```hcl +module "brig_prekey_lock_and_event_queue_emailing" { + source = "github.com/wireapp/wire-server-deploy.git//terraform/modules/aws-brig-prekey-lock-event-queue-email-sending?ref=CHANGE-ME" + + environment = "staging" + + zone_id = "Z12345678SQWERTYU" + domain = "example.com" +} +``` + +##### With email sending __disabled__ + +```hcl +module "brig_prekey_lock_and_event_queue" { + source = "github.com/wireapp/wire-server-deploy.git//terraform/modules/aws-brig-prekey-lock-event-queue-email-sending?ref=CHANGE-ME" + + environment = "staging" + enable_email_sending = false # default: true +} +``` + +Outputs are used in [wire-server chart values](https://github.com/wireapp/wire-server-deploy/blob/a55d17afa5ac2f40bd50c5d0b907f60ac028377a/values/wire-server/prod-values.example.yaml#L27) diff --git a/terraform/modules/aws-brig-prekey-lock-event-queue-email-sending/data.tf b/terraform/modules/aws-brig-prekey-lock-event-queue-email-sending/data.tf new file mode 100644 index 000000000..61bbd5f8b --- /dev/null +++ b/terraform/modules/aws-brig-prekey-lock-event-queue-email-sending/data.tf @@ -0,0 +1,3 @@ +# NOTE: obtains region that is set in providers.tf by given variable +# +data "aws_region" "current" {} diff --git a/terraform/modules/aws-brig-prekey-lock-event-queue-email-sending/locals.mailing.tf b/terraform/modules/aws-brig-prekey-lock-event-queue-email-sending/locals.mailing.tf new file mode 100644 index 000000000..d1fe74bd6 --- /dev/null +++ b/terraform/modules/aws-brig-prekey-lock-event-queue-email-sending/locals.mailing.tf @@ -0,0 +1,3 @@ +locals { + emailing_enabled = var.enable_email_sending ? 1 : 0 +} diff --git a/terraform/modules/aws-brig-prekey-lock-event-queue-email-sending/main.tf b/terraform/modules/aws-brig-prekey-lock-event-queue-email-sending/main.tf new file mode 100644 index 000000000..9fc0f400c --- /dev/null +++ b/terraform/modules/aws-brig-prekey-lock-event-queue-email-sending/main.tf @@ -0,0 +1,3 @@ +terraform { + required_version = "~> 1.1" +} diff --git a/terraform/modules/aws-brig-prekey-lock-event-queue-email-sending/outputs.mailing.tf b/terraform/modules/aws-brig-prekey-lock-event-queue-email-sending/outputs.mailing.tf new file mode 100644 index 000000000..e683d5fcf --- /dev/null +++ b/terraform/modules/aws-brig-prekey-lock-event-queue-email-sending/outputs.mailing.tf @@ -0,0 +1,5 @@ +# Output required to configure wire-server + +output "ses_endpoint" { + value = "https://email.${data.aws_region.current.name}.amazonaws.com" +} diff --git a/terraform/modules/aws-brig-prekey-lock-event-queue-email-sending/outputs.tf b/terraform/modules/aws-brig-prekey-lock-event-queue-email-sending/outputs.tf new file mode 100644 index 000000000..a52fe09e4 --- /dev/null +++ b/terraform/modules/aws-brig-prekey-lock-event-queue-email-sending/outputs.tf @@ -0,0 +1,18 @@ +# Output required to configure wire-server + +output "sqs_endpoint" { + value = "https://sqs.${data.aws_region.current.name}.amazonaws.com" +} + +output "dynamodb_endpoint" { + value = "https://dynamodb.${data.aws_region.current.name}.amazonaws.com" +} + +output "brig_access_key" { + value = aws_iam_access_key.brig.id +} + +output "brig_access_secret" { + value = aws_iam_access_key.brig.secret + sensitive = true +} diff --git a/terraform/modules/aws-brig-prekey-lock-event-queue-email-sending/resources.dns.mailing.tf b/terraform/modules/aws-brig-prekey-lock-event-queue-email-sending/resources.dns.mailing.tf new file mode 100644 index 000000000..f29408c92 --- /dev/null +++ b/terraform/modules/aws-brig-prekey-lock-event-queue-email-sending/resources.dns.mailing.tf @@ -0,0 +1,72 @@ +resource "aws_route53_record" "ses_domain_verification_record" { + count = local.emailing_enabled + + zone_id = var.zone_id + name = "_amazonses.${var.domain}" + type = "TXT" + ttl = "600" + records = [aws_ses_domain_identity.brig[0].verification_token] +} + +# Apparently, the amount of tokens that AWS is handing out, amounts for the count of 3, +# which is why one might find examples that hard-code this number by setting `count = 3` +# example: https://www.terraform.io/docs/providers/aws/r/ses_domain_dkim.html +# docs: https://docs.aws.amazon.com/ses/latest/DeveloperGuide/send-email-authentication-dkim-easy-setup-domain.html +# +resource "aws_route53_record" "ses_domain_dkim_record" { + # FUTUREWORK: try replacing `3` with `length( aws_ses_domain_dkim.brig[0].dkim_tokens )` + count = var.enable_email_sending ? 3 : 0 + + zone_id = var.zone_id + name = "${element(aws_ses_domain_dkim.brig[0].dkim_tokens, count.index)}._domainkey.${var.domain}" + type = "CNAME" + ttl = "600" + records = ["${element(aws_ses_domain_dkim.brig[0].dkim_tokens, count.index)}.dkim.amazonses.com"] +} + +resource "aws_route53_record" "ses_domain_spf" { + count = local.emailing_enabled + + zone_id = var.zone_id + name = aws_ses_domain_identity.brig[0].domain + type = "TXT" + ttl = "600" + records = ["v=spf1 include:amazonses.com -all"] +} + +# indicate compliance with SPF or DKIM +# docs: https://dmarc.org/wiki/FAQ +# https://docs.aws.amazon.com/ses/latest/DeveloperGuide/send-email-authentication-dmarc.html +# +resource "aws_route53_record" "ses_domain_dmarc" { + count = local.emailing_enabled + + zone_id = var.zone_id + name = "_dmarc.${aws_ses_domain_identity.brig[0].domain}" + type = "TXT" + ttl = "600" + records = ["v=DMARC1; p=quarantine; pct=25; rua=mailto:dmarcreports@${aws_ses_domain_identity.brig[0].domain}"] +} + +# NOTE: in order to configure MAIL FROM +# docs: https://www.terraform.io/docs/providers/aws/r/ses_domain_mail_from.html +# +resource "aws_route53_record" "ses_domain_mail_from_mx" { + count = local.emailing_enabled + + zone_id = var.zone_id + name = aws_ses_domain_mail_from.brig[0].mail_from_domain + type = "MX" + ttl = "600" + records = ["10 feedback-smtp.${data.aws_region.current.name}.amazonses.com"] +} + +resource "aws_route53_record" "ses_domain_mail_from_spf" { + count = local.emailing_enabled + + zone_id = var.zone_id + name = aws_ses_domain_mail_from.brig[0].mail_from_domain + type = "TXT" + ttl = "600" + records = ["v=spf1 include:amazonses.com -all"] +} diff --git a/terraform/modules/aws-brig-prekey-lock-event-queue-email-sending/resources.dynamodb.tf b/terraform/modules/aws-brig-prekey-lock-event-queue-email-sending/resources.dynamodb.tf new file mode 100644 index 000000000..33152f2bc --- /dev/null +++ b/terraform/modules/aws-brig-prekey-lock-event-queue-email-sending/resources.dynamodb.tf @@ -0,0 +1,15 @@ +# FUTUREWORK: Potentially look at autoscaling for dynamoDB +# see: https://www.terraform.io/docs/providers/aws/r/appautoscaling_policy.html +# +resource "aws_dynamodb_table" "prekey_locks" { + name = "${var.environment}-brig-prekey-locks" + billing_mode = "PROVISIONED" + read_capacity = var.prekey_table_read_capacity + write_capacity = var.prekey_table_write_capacity + hash_key = "client" + + attribute { + name = "client" + type = "S" + } +} diff --git a/terraform/modules/aws-brig-prekey-lock-event-queue-email-sending/resources.iam.mailing.tf b/terraform/modules/aws-brig-prekey-lock-event-queue-email-sending/resources.iam.mailing.tf new file mode 100644 index 000000000..ba7ab57f3 --- /dev/null +++ b/terraform/modules/aws-brig-prekey-lock-event-queue-email-sending/resources.iam.mailing.tf @@ -0,0 +1,50 @@ +resource "aws_iam_user_policy" "allow_brig_to_queue_email_events" { + count = local.emailing_enabled + + name = "${var.environment}-brig-email-events-queue-policy" + user = aws_iam_user.brig.name + + policy = <<-EOP + { + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "sqs:DeleteMessage", + "sqs:GetQueueUrl", + "sqs:ReceiveMessage" + ], + "Resource": [ + "${aws_sqs_queue.email_events[0].arn}" + ] + } + ] + } + EOP +} + +resource "aws_iam_user_policy" "allow_brig_to_send_emails" { + count = local.emailing_enabled + + name = "${var.environment}-brig-send-emails-policy" + user = aws_iam_user.brig.name + + policy = <<-EOP + { + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "ses:SendEmail", + "ses:SendRawEmail" + ], + "Resource": [ + "*" + ] + } + ] + } + EOP +} diff --git a/terraform/modules/aws-brig-prekey-lock-event-queue-email-sending/resources.iam.tf b/terraform/modules/aws-brig-prekey-lock-event-queue-email-sending/resources.iam.tf new file mode 100644 index 000000000..c9fe42a98 --- /dev/null +++ b/terraform/modules/aws-brig-prekey-lock-event-queue-email-sending/resources.iam.tf @@ -0,0 +1,57 @@ +resource "aws_iam_user" "brig" { + name = "${var.environment}-brig-full-access" + force_destroy = true +} + +resource "aws_iam_access_key" "brig" { + user = aws_iam_user.brig.name +} + +resource "aws_iam_user_policy" "allow_brig_to_lock_prekeys" { + name = "${var.environment}-brig-prekeys-policy" + user = aws_iam_user.brig.name + + policy = <<-EOP + { + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "dynamodb:GetItem", + "dynamodb:PutItem", + "dynamodb:DeleteItem" + ], + "Resource": [ + "${aws_dynamodb_table.prekey_locks.arn}" + ] + } + ] + } + EOP +} + +resource "aws_iam_user_policy" "allow_brig_to_queue_internal_events" { + name = "${var.environment}-brig-internal-events-queue-policy" + user = aws_iam_user.brig.name + + policy = <<-EOP + { + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "sqs:DeleteMessage", + "sqs:GetQueueUrl", + "sqs:ReceiveMessage", + "sqs:SendMessage" + ], + "Resource": [ + "${aws_sqs_queue.internal_events.arn}" + ] + } + ] + } + EOP +} diff --git a/terraform/modules/aws-brig-prekey-lock-event-queue-email-sending/resources.ses.mailing.tf b/terraform/modules/aws-brig-prekey-lock-event-queue-email-sending/resources.ses.mailing.tf new file mode 100644 index 000000000..198956979 --- /dev/null +++ b/terraform/modules/aws-brig-prekey-lock-event-queue-email-sending/resources.ses.mailing.tf @@ -0,0 +1,43 @@ +resource "aws_ses_domain_identity" "brig" { + count = local.emailing_enabled + + domain = var.domain +} + +resource "aws_ses_email_identity" "brig" { + count = local.emailing_enabled + + email = "${var.sender_email_username}@${var.domain}" +} + +resource "aws_ses_domain_dkim" "brig" { + count = local.emailing_enabled + + domain = aws_ses_domain_identity.brig[0].domain +} + +resource "aws_ses_domain_mail_from" "brig" { + count = local.emailing_enabled + + domain = aws_ses_domain_identity.brig[0].domain + mail_from_domain = "${var.from_subdomain}.${var.domain}" +} + + +resource "aws_ses_identity_notification_topic" "bounce" { + count = local.emailing_enabled + + topic_arn = aws_sns_topic.email_notifications[0].arn + notification_type = "Bounce" + identity = aws_ses_email_identity.brig[0].arn + include_original_headers = false +} + +resource "aws_ses_identity_notification_topic" "complaint" { + count = local.emailing_enabled + + topic_arn = aws_sns_topic.email_notifications[0].arn + notification_type = "Complaint" + identity = aws_ses_email_identity.brig[0].arn + include_original_headers = false +} diff --git a/terraform/modules/aws-brig-prekey-lock-event-queue-email-sending/resources.sns.mailling.tf b/terraform/modules/aws-brig-prekey-lock-event-queue-email-sending/resources.sns.mailling.tf new file mode 100644 index 000000000..7adf294cc --- /dev/null +++ b/terraform/modules/aws-brig-prekey-lock-event-queue-email-sending/resources.sns.mailling.tf @@ -0,0 +1,14 @@ +resource "aws_sns_topic" "email_notifications" { + count = local.emailing_enabled + + name = aws_sqs_queue.email_events[0].name +} + +resource "aws_sns_topic_subscription" "notify_via_email" { + count = local.emailing_enabled + + topic_arn = aws_sns_topic.email_notifications[0].arn + protocol = "sqs" + endpoint = aws_sqs_queue.email_events[0].arn + raw_message_delivery = true +} diff --git a/terraform/modules/aws-brig-prekey-lock-event-queue-email-sending/resources.sqs.mailing.tf b/terraform/modules/aws-brig-prekey-lock-event-queue-email-sending/resources.sqs.mailing.tf new file mode 100644 index 000000000..236f44c35 --- /dev/null +++ b/terraform/modules/aws-brig-prekey-lock-event-queue-email-sending/resources.sqs.mailing.tf @@ -0,0 +1,35 @@ +resource "aws_sqs_queue" "email_events" { + count = local.emailing_enabled + + name = "${var.environment}-brig-email-events" +} + +# Ensure that the SNS topic is allowed to publish messages to the SQS queue + +resource "aws_sqs_queue_policy" "allow_email_notification_events" { + count = local.emailing_enabled + + queue_url = aws_sqs_queue.email_events[0].id + + policy = <<-EOP + { + "Version": "2012-10-17", + "Id": "${aws_sqs_queue.email_events[0].arn}/SQSDefaultPolicy", + "Statement": [ + { + "Effect": "Allow", + "Principal": { + "AWS": "*" + }, + "Action": "SQS:SendMessage", + "Resource": "${aws_sqs_queue.email_events[0].arn}", + "Condition": { + "ArnEquals": { + "aws:SourceArn": "${aws_sns_topic.email_notifications[0].arn}" + } + } + } + ] + } + EOP +} diff --git a/terraform/modules/aws-brig-prekey-lock-event-queue-email-sending/resources.sqs.tf b/terraform/modules/aws-brig-prekey-lock-event-queue-email-sending/resources.sqs.tf new file mode 100644 index 000000000..dae8437ff --- /dev/null +++ b/terraform/modules/aws-brig-prekey-lock-event-queue-email-sending/resources.sqs.tf @@ -0,0 +1,5 @@ +# Create queues for internal events + +resource "aws_sqs_queue" "internal_events" { + name = "${var.environment}-brig-events-internal" +} diff --git a/terraform/modules/aws-brig-prekey-lock-event-queue-email-sending/variables.mailing.tf b/terraform/modules/aws-brig-prekey-lock-event-queue-email-sending/variables.mailing.tf new file mode 100644 index 000000000..eebb188da --- /dev/null +++ b/terraform/modules/aws-brig-prekey-lock-event-queue-email-sending/variables.mailing.tf @@ -0,0 +1,34 @@ +variable "enable_email_sending" { + type = bool + description = "flag to either hand off email sending to AWS or not" + default = true +} + +# NOTE: setting the default to `null` allows to omit this var when instantiating the module +# while still forcing it to be set, when email sending is enabled +# +variable "zone_id" { + type = string + description = "zone ID defined by a 'aws_route53_zone.zone_id' resource (example: Z12345678SQWERTYU)" + default = null +} +variable "domain" { + type = string + description = "FQDN of the email address that is used in 'From' when sending emails (example: example.com)" + default = null +} + +# As to why configuring a MAIL FROM +# docs: https://docs.aws.amazon.com/ses/latest/DeveloperGuide/mail-from.html#mail-from-overview +# +variable "from_subdomain" { + type = string + description = "subdomain that is prepended to domain and used to configue MAIL FROM for mails being sent" + default = "email" +} + +variable "sender_email_username" { + type = string + description = "username of the email address that is used in 'From' when sending emails (default: 'no-reply'; result: 'no-reply@$domain')" + default = "no-reply" +} diff --git a/terraform/modules/aws-brig-prekey-lock-event-queue-email-sending/variables.tf b/terraform/modules/aws-brig-prekey-lock-event-queue-email-sending/variables.tf new file mode 100644 index 000000000..b7b45de68 --- /dev/null +++ b/terraform/modules/aws-brig-prekey-lock-event-queue-email-sending/variables.tf @@ -0,0 +1,25 @@ +variable "region" { + type = string + description = "defines in which region state and lock are being stored (default: 'eu-central-1')" + default = "eu-central-1" +} + +variable "environment" { + type = string + description = "name of the environment as a scope for the created resources (default: 'dev'; example: 'prod', 'staging')" + default = "dev" +} + +# NOTE: tweak to adjust performance/pricng ratio +# see: https://aws.amazon.com/dynamodb/pricing/provisioned/ +# +variable "prekey_table_read_capacity" { + type = number + description = "defines how many reads/sec allowed on the table (default: '10'; example: '100')" + default = 10 +} +variable "prekey_table_write_capacity" { + type = number + description = "defines how many writes/sec allowed on the table (default: '10'; example: '100')" + default = 10 +} diff --git a/terraform/modules/aws-cargohold-asset-storage/README.md b/terraform/modules/aws-cargohold-asset-storage/README.md new file mode 100644 index 000000000..598def577 --- /dev/null +++ b/terraform/modules/aws-cargohold-asset-storage/README.md @@ -0,0 +1,32 @@ +Terraform module: Cargohold Asset Storage +========================================= + +State: __experimental__ + +This module creates an Object Storage on AWS for cargohold to store encrypted assets. + +AWS resources: S3 + + +#### Important note + +This module causes Terraform to store sensitive data in the `.tfstate` file. Hence, encrypting the state should be +mandatory. + + +#### TODO + +* [ ] add cloudfront support + + +#### How to use the module + +```hcl +module "cargohold_asset_storage" { + source = "github.com/wireapp/wire-server-deploy.git//terraform/modules/aws-cargohold-asset-storage?ref=CHANGE-ME" + + environment = "staging" +} +``` + +Outputs are used in [wire-server chart values](https://github.com/wireapp/wire-server-deploy/blob/a55d17afa5ac2f40bd50c5d0b907f60ac028377a/values/wire-server/prod-values.example.yaml#L95) diff --git a/terraform/modules/aws-cargohold-asset-storage/main.tf b/terraform/modules/aws-cargohold-asset-storage/main.tf new file mode 100644 index 000000000..9fc0f400c --- /dev/null +++ b/terraform/modules/aws-cargohold-asset-storage/main.tf @@ -0,0 +1,3 @@ +terraform { + required_version = "~> 1.1" +} diff --git a/terraform/modules/aws-cargohold-asset-storage/outputs.tf b/terraform/modules/aws-cargohold-asset-storage/outputs.tf new file mode 100644 index 000000000..856b68954 --- /dev/null +++ b/terraform/modules/aws-cargohold-asset-storage/outputs.tf @@ -0,0 +1,29 @@ +# Output required to configure wire-server + +output "bucket_name" { + value = aws_s3_bucket.asset_storage.bucket +} + +output "bucket_id" { + value = aws_s3_bucket.asset_storage.id +} + +output "s3_endpoint" { + value = "https://s3.${aws_s3_bucket.asset_storage.region}.amazonaws.com" +} + +output "s3_endpoint_id" { + value = aws_vpc_endpoint.s3.id +} + +output "cargohold_access_key" { + value = aws_iam_access_key.cargohold.id +} + +output "cargohold_access_secret" { + value = aws_iam_access_key.cargohold.secret +} + +output "talk_to_S3" { + value = aws_security_group.talk_to_S3.id +} diff --git a/terraform/modules/aws-cargohold-asset-storage/resources.iam.tf b/terraform/modules/aws-cargohold-asset-storage/resources.iam.tf new file mode 100644 index 000000000..24e1a346d --- /dev/null +++ b/terraform/modules/aws-cargohold-asset-storage/resources.iam.tf @@ -0,0 +1,87 @@ +resource "aws_iam_user" "cargohold" { + name = "${var.environment}-cargohold-full-access" + force_destroy = true +} + +resource "aws_iam_access_key" "cargohold" { + user = aws_iam_user.cargohold.name +} + +# Create a specific user that can be used to access the bucket and the files within +# +resource "aws_iam_user_policy" "cargohold" { + name = "${var.environment}-cargohold-full-access-policy" + user = aws_iam_user.cargohold.name + + policy = <<-EOP + { + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "s3:GetObject", + "s3:ListBucket", + "s3:PutObject", + "s3:DeleteObject" + ], + "Resource": [ + "${aws_s3_bucket.asset_storage.arn}/*", + "${aws_s3_bucket.asset_storage.arn}" + ] + } + ] + } + EOP +} + +# Create a policy that can be applied to a role, and can be used to access the bucket and the files within. +resource "aws_iam_policy" "cargohold-s3" { + name = "${var.environment}-cargohold-s3" + policy = <<-EOP + { + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": "s3:*", + "Resource": "*" + } + ] + } + EOP +} + +# Create an IAM role that can be applied to an instance, and can be used to access the bucket and the files within. +resource "aws_iam_role" "cargohold-s3" { + name = "${var.environment}-cargohold-s3" + description = "provide access to s3, for cargohold." + assume_role_policy = <<-EOP + { + "Version": "2012-10-17", + "Statement": [ + { + "Action": "sts:AssumeRole", + "Principal": { + "Service": "ec2.amazonaws.com" + }, + "Effect": "Allow", + "Sid": "" + } + ] + } + EOP + tags = { + Name = "${var.environment}-cargohold-s3", + Environment = "${var.environment}" + Gateway = "cargohold-s3" + } +} + + +# attach our IAM policy to our IAM role. +resource "aws_iam_policy_attachment" "cargohold-s3-attach" { + name = "${var.environment}-cargohold-s3" + roles = [aws_iam_role.cargohold-s3.name] + policy_arn = aws_iam_policy.cargohold-s3.arn +} diff --git a/terraform/modules/aws-cargohold-asset-storage/resources.s3.tf b/terraform/modules/aws-cargohold-asset-storage/resources.s3.tf new file mode 100644 index 000000000..0973186d3 --- /dev/null +++ b/terraform/modules/aws-cargohold-asset-storage/resources.s3.tf @@ -0,0 +1,36 @@ +resource "aws_s3_bucket" "asset_storage" { + bucket = "${random_string.bucket.keepers.env}-${random_string.bucket.keepers.name}-cargohold-${random_string.bucket.result}" + acl = "private" + region = var.region + + cors_rule { + allowed_headers = ["*"] + allowed_methods = ["GET", "HEAD"] + allowed_origins = ["*"] + max_age_seconds = 3000 + } + +} + +resource "random_string" "bucket" { + length = 8 + lower = true + upper = false + number = true + special = false + + keepers = { + env = var.environment + name = var.bucket_name + } +} + +resource "aws_vpc_endpoint" "s3" { + vpc_id = var.vpc_id + service_name = "com.amazonaws.${var.region}.s3" + + tags = { + Environment = var.environment + } +} + diff --git a/terraform/modules/aws-cargohold-asset-storage/resources.security_groups.tf b/terraform/modules/aws-cargohold-asset-storage/resources.security_groups.tf new file mode 100644 index 000000000..8692f388c --- /dev/null +++ b/terraform/modules/aws-cargohold-asset-storage/resources.security_groups.tf @@ -0,0 +1,17 @@ +resource "aws_security_group" "talk_to_S3" { + name = "talk_to_S3" + description = "hosts that are allowed to talk to S3." + vpc_id = var.vpc_id + + egress { + description = "" + from_port = 443 + to_port = 443 + protocol = "tcp" + cidr_blocks = aws_vpc_endpoint.s3.cidr_blocks + } + + tags = { + Name = "talk_to_S3" + } +} diff --git a/terraform/modules/aws-cargohold-asset-storage/variables.tf b/terraform/modules/aws-cargohold-asset-storage/variables.tf new file mode 100644 index 000000000..28bb5afcb --- /dev/null +++ b/terraform/modules/aws-cargohold-asset-storage/variables.tf @@ -0,0 +1,24 @@ +variable "region" { + type = string + description = "defines in which region state and lock are being stored (default: 'eu-central-1')" + default = "eu-central-1" +} + +variable "environment" { + type = string + description = "name of the environment as a scope for the created resources (default: 'dev'; example: 'prod', 'staging')" + default = "dev" +} + +variable "bucket_name" { + type = string + description = "Name of the bucket that cargohold uses to store files (default: 'assets'; prefix: $environment) " + default = "assets" +} + +variable "vpc_id" { + type = string + description = "the ID of the VPC to add an S3 endpoint to" +} + + diff --git a/terraform/modules/aws-dns-records/README.md b/terraform/modules/aws-dns-records/README.md new file mode 100644 index 000000000..fbe3c1605 --- /dev/null +++ b/terraform/modules/aws-dns-records/README.md @@ -0,0 +1,57 @@ +Terraform module: DNS records +============================= + +State: __experimental__ + +This module creates a set of DNS entries on AWS. As of now it's capable of managing the following type of records: + +* A (`ips`) +* CNAME (`cnames`) + +AWS resources: route53 + + +#### How to use the module + +Assuming you already have a root zone with fqdn `example.com` in route53 setup elsewhere, example usage: + +```hcl +module "dns_records" { + source = "github.com/wireapp/wire-server-deploy.git//terraform/modules/aws-dns-records?ref=CHANGE-ME" + + zone_fqdn = "example.com" + domain = "staging" + sub_domains = [ + "nginz-https", + "nginz-ssl", + "webapp", + "assets", + "account", + "teams" + ] + ips = [ "9.9.9.10", "23.42.23.42" ] + + # Optional + spf_record_ips = [ "9.9.9.10", "23.42.23.42" ] + + # Optional + srvs = { "_wire-server._tcp" = [ "0 10 443 nginz-https" ] } +} +``` + +This creates entries for the following FQDNs: + +* `nginz-https.staging.example.com` +* `nginz-ssl.staging.example.com` +* `webapp.staging.example.com` +* `assets.staging.example.com` +* `account.staging.example.com` +* `teams.staging.example.com` + +It also creates a TXT SPF record for your mail server on `staging.example.com` with a value `"v=spf1 ip4:9.9.9.10 ip4:23.42.23.42 -all"` + +As well as an SRV record `_wire-server._tcp.staging.example.com` pointing to `0 10 443 nginz-https.staging.example.com` + +These sub-domains represent the primary set of FQDNs used in a +[`wire-server` installation](https://docs.wire.com/how-to/install/helm-prod.html#how-to-set-up-dns-records), +to expose all frontend applications as well as necessary HTTP & websocket endpoints. diff --git a/terraform/modules/aws-dns-records/data.tf b/terraform/modules/aws-dns-records/data.tf new file mode 100644 index 000000000..5d0642c35 --- /dev/null +++ b/terraform/modules/aws-dns-records/data.tf @@ -0,0 +1,3 @@ +data "aws_route53_zone" "rz" { + name = "${var.zone_fqdn}." +} diff --git a/terraform/modules/aws-dns-records/locals.tf b/terraform/modules/aws-dns-records/locals.tf new file mode 100644 index 000000000..d9d9f34ad --- /dev/null +++ b/terraform/modules/aws-dns-records/locals.tf @@ -0,0 +1,6 @@ +locals { + name_suffix = concat( + var.domain != null ? [var.domain] : [], + [var.zone_fqdn] + ) +} diff --git a/terraform/modules/aws-dns-records/main.tf b/terraform/modules/aws-dns-records/main.tf new file mode 100644 index 000000000..9fc0f400c --- /dev/null +++ b/terraform/modules/aws-dns-records/main.tf @@ -0,0 +1,3 @@ +terraform { + required_version = "~> 1.1" +} diff --git a/terraform/modules/aws-dns-records/outputs.tf b/terraform/modules/aws-dns-records/outputs.tf new file mode 100644 index 000000000..288bcbfb7 --- /dev/null +++ b/terraform/modules/aws-dns-records/outputs.tf @@ -0,0 +1,6 @@ +output "fqdns" { + value = concat( + [for record in aws_route53_record.a : record.fqdn], + [for record in aws_route53_record.cname : record.fqdn] + ) +} diff --git a/terraform/modules/aws-dns-records/resources.route53.tf b/terraform/modules/aws-dns-records/resources.route53.tf new file mode 100644 index 000000000..ba70b75e7 --- /dev/null +++ b/terraform/modules/aws-dns-records/resources.route53.tf @@ -0,0 +1,47 @@ +resource "aws_route53_record" "a" { + for_each = toset(length(var.ips) > 0 ? var.subdomains : []) + + zone_id = data.aws_route53_zone.rz.zone_id + name = join(".", concat([each.value], local.name_suffix)) + type = "A" + ttl = var.ttl + records = var.ips +} + + +resource "aws_route53_record" "cname" { + for_each = toset(length(var.cnames) > 0 ? var.subdomains : []) + + zone_id = data.aws_route53_zone.rz.zone_id + name = join(".", concat([each.value], local.name_suffix)) + type = "CNAME" + ttl = var.ttl + records = var.cnames +} + +resource "aws_route53_record" "spf" { + count = length(var.spf_record_ips) > 0 ? 1 : 0 + + zone_id = data.aws_route53_zone.rz.zone_id + name = join(".", local.name_suffix) + type = "TXT" + ttl = "60" + records = [ + join(" ", concat( + ["v=spf1"], + [for ip in var.spf_record_ips : "ip4:${ip}"], + ["-all"] + )) + ] +} + +resource "aws_route53_record" "srv-server" { + for_each = var.srvs + + zone_id = data.aws_route53_zone.rz.zone_id + name = join(".", concat([each.key], local.name_suffix)) + type = "SRV" + ttl = "60" + + records = [for t in each.value : join(".", concat([t], local.name_suffix))] +} diff --git a/terraform/modules/aws-dns-records/variables.tf b/terraform/modules/aws-dns-records/variables.tf new file mode 100644 index 000000000..cb2538d55 --- /dev/null +++ b/terraform/modules/aws-dns-records/variables.tf @@ -0,0 +1,45 @@ +variable "zone_fqdn" { + type = string + description = "FQDN of the DNS zone root (required; example: example.com; will append: '.')" +} + +variable "domain" { + type = string + description = "name of the sub-tree all given subdomains are append to (default: not set; example: $subdomains[0].$domain.$zone_fqdn)" + default = null +} + +variable "subdomains" { + type = list(string) + description = "list of sub-domains that will be registered directly under the given zone or otherwise under domain if defined" +} + +variable "ips" { + type = list(string) + description = "a list of IPs used to create A records for the given list of subdomains" + default = [] +} + +variable "cnames" { + type = list(string) + description = "a list of FQDNs used to create CNAME records for the given list of subdomains" + default = [] +} + +variable "ttl" { + type = number + description = "time to live for the DNS entries (defaults to 1 minute)" + default = 60 +} + +variable "spf_record_ips" { + type = list(string) + description = "list of IPs converted into a list of 'ip4' mechanisms" + default = [] +} + +variable "srvs" { + type = map(list(string)) + description = "map of SRV records and their list of targets. All strings (record and targets) get an automatic suffix of '.domain.zone_fqdn'. See module README for an example." + default = {} +} diff --git a/terraform/modules/aws-network-load-balancer/README.md b/terraform/modules/aws-network-load-balancer/README.md new file mode 100644 index 000000000..10f4bcc96 --- /dev/null +++ b/terraform/modules/aws-network-load-balancer/README.md @@ -0,0 +1,71 @@ +Terraform module: Network load balancer +======================================= + +State: __experimental__ + +This module creates a network load balancer for HTTP (port 80) and HTTPS (port 443) traffic. +It uses a *target group* for each port and attaches all instances that share the given *role* +to each group. It furthermore uses the given target ports to check their health. + +Load balancing happens across availability zones. The VPC is determined by the given environment. +The subnets used within the VPC are assumed to + +a) have an internet gateway +b) be attached to the machines referred to by IP via list of `node_ips`` + +*Please note, in order for this to work, ingress has to be allowed on the given target ports on all target machines. +Furthermore, since those target machines - referred to by IP - are not part of an auto-scaling group, the instance of +this module has to be re-applied every time the set of machines changes.* + +AWS resources: lb (type: network) + +#### How to use the module + +```hcl +module "nlb" { + source = "github.com/wireapp/wire-server-deploy.git//terraform/modules/aws-network-load-balancer?ref=CHANGE-ME" + + environment = "staging" + + node_ips = ["10.0.23.17", "10.0.42.78", "10.0.222.171"] + subnet_ids = ["subnet-0001", "subnet-0002", "subnet-0003"] + + http_target_port = 3000 + https_target_port = 3001 +} +``` + +One way to generate the IPs and subnets lists would be to refer to the respective resources, or +attributes of another resource (e.g. VPC). Alternatively, you may want to obtain those lists +with the help of some data sources, e.g. + +```hcl +data "aws_subnet_ids" "public" { + vpc_id = var.vpc_id + + filter { + name = "tag:Environment" + values = ["staging"] + } + + filter { + name = "tag:Routability" + values = ["public"] + } +} + + +data "aws_instances" "nodes" { + filter { + name = "tag:Environment" + values = ["staging"] + } + + filter { + name = "tag:Role" + values = [ "kubenode" ] + } + + instance_state_names = ["running"] +} +``` \ No newline at end of file diff --git a/charts/reaper/values.yaml b/terraform/modules/aws-network-load-balancer/data.tf similarity index 100% rename from charts/reaper/values.yaml rename to terraform/modules/aws-network-load-balancer/data.tf diff --git a/terraform/modules/aws-network-load-balancer/main.tf b/terraform/modules/aws-network-load-balancer/main.tf new file mode 100644 index 000000000..9fc0f400c --- /dev/null +++ b/terraform/modules/aws-network-load-balancer/main.tf @@ -0,0 +1,3 @@ +terraform { + required_version = "~> 1.1" +} diff --git a/terraform/modules/aws-network-load-balancer/outputs.tf b/terraform/modules/aws-network-load-balancer/outputs.tf new file mode 100644 index 000000000..dd14ad914 --- /dev/null +++ b/terraform/modules/aws-network-load-balancer/outputs.tf @@ -0,0 +1,3 @@ +output "fqdn" { + value = aws_lb.nlb.dns_name +} diff --git a/terraform/modules/aws-network-load-balancer/resources.lb.tf b/terraform/modules/aws-network-load-balancer/resources.lb.tf new file mode 100644 index 000000000..d5b65e3bb --- /dev/null +++ b/terraform/modules/aws-network-load-balancer/resources.lb.tf @@ -0,0 +1,107 @@ +resource "aws_lb" "nlb" { + name = "${var.environment}-loadbalancer" + + internal = false + load_balancer_type = "network" + enable_cross_zone_load_balancing = true + + subnets = var.subnet_ids + + tags = { + Environment = var.environment + } +} + + +resource "aws_lb_listener" "ingress-http" { + load_balancer_arn = aws_lb.nlb.arn + + port = 80 + protocol = "TCP" + + default_action { + type = "forward" + target_group_arn = aws_lb_target_group.nodes-http.arn + } +} + + +resource "aws_lb_target_group" "nodes-http" { + name = "${var.environment}-nodes-http" + + vpc_id = var.aws_vpc_id + + # NOTE: using "instance" - as an alternative type - does not work due to the way security groups are being + # configured (VPC CIDR vs NLB network IP addresses) + # SRC: https://docs.aws.amazon.com/elasticloadbalancing/latest/network/target-group-register-targets.html#target-security-groups + # DOC: https://docs.aws.amazon.com/elasticloadbalancing/latest/network/load-balancer-target-groups.html + target_type = "ip" + port = var.node_port_http + protocol = "TCP" + + # docs: https://docs.aws.amazon.com/elasticloadbalancing/latest/network/target-group-health-checks.html + # + health_check { + protocol = "TCP" + port = var.node_port_http + interval = 30 # NOTE: 10 or 30 seconds + # NOTE: defaults to 10 for TCP and is not allowed to be set when using an NLB + # timeout = 10 + } + + tags = { + Environment = var.environment + } +} + + +resource "aws_lb_target_group_attachment" "each-node-http" { + count = length(var.node_ips) + + target_group_arn = aws_lb_target_group.nodes-http.arn + port = aws_lb_target_group.nodes-http.port + target_id = var.node_ips[count.index] +} + + +resource "aws_lb_listener" "ingress-https" { + load_balancer_arn = aws_lb.nlb.arn + + port = 443 + protocol = "TCP" + + default_action { + type = "forward" + target_group_arn = aws_lb_target_group.nodes-https.arn + } +} + + +resource "aws_lb_target_group" "nodes-https" { + name = "${var.environment}-nodes-https" + + vpc_id = var.aws_vpc_id + + target_type = "ip" + port = var.node_port_https + protocol = "TCP" + + health_check { + protocol = "TCP" + port = var.node_port_https + interval = 30 + } + + tags = { + Environment = var.environment + } +} + + +resource "aws_lb_target_group_attachment" "each-node-https" { + count = length(var.node_ips) + + target_group_arn = aws_lb_target_group.nodes-https.arn + port = aws_lb_target_group.nodes-https.port + target_id = var.node_ips[count.index] +} diff --git a/terraform/modules/aws-network-load-balancer/variables.tf b/terraform/modules/aws-network-load-balancer/variables.tf new file mode 100644 index 000000000..3594af862 --- /dev/null +++ b/terraform/modules/aws-network-load-balancer/variables.tf @@ -0,0 +1,32 @@ +variable "environment" { + type = string + description = "name of the environment as a scope for the created resources (default: 'dev'; example: 'prod', 'staging')" + default = "dev" +} + +variable "node_port_http" { + type = number + description = "HTTP port on the target machines that the LB forwards ingress from port 80 to" + default = 8080 +} + +variable "node_port_https" { + type = number + description = "HTTPS port on the target machines that the LB forwards ingress from port 443 to" + default = 8443 +} + +variable "node_ips" { + type = list(string) + description = "a list of private IPs from all nodes the load balancer forwards traffic to" +} + +variable "subnet_ids" { + type = list(string) + description = "a list of IDs from subnets where the nodes are part of, and the load balancer egress is attached to" +} + +variable "aws_vpc_id" { + type = string + description = "the ID of the VPC we are adding our targets to." +} diff --git a/terraform/modules/aws_terraform-state-share/README.md b/terraform/modules/aws-terraform-state-share/README.md similarity index 97% rename from terraform/modules/aws_terraform-state-share/README.md rename to terraform/modules/aws-terraform-state-share/README.md index 5b6e0ad96..ef7fbaf31 100644 --- a/terraform/modules/aws_terraform-state-share/README.md +++ b/terraform/modules/aws-terraform-state-share/README.md @@ -14,7 +14,7 @@ It makes use of the following AWS services: The module can be used in the following way ``` module "initiate-tf-state-sharing" { - source = "github.com/wireapp/wire-server-deploy.git//terraform/modules/aws_terraform-state-share" + source = "github.com/wireapp/wire-server-deploy.git//terraform/modules/aws-terraform-state-share" bucket_name = "myBucketName" table_name = "myTableName" } diff --git a/terraform/modules/aws-terraform-state-share/main.tf b/terraform/modules/aws-terraform-state-share/main.tf new file mode 100644 index 000000000..9fc0f400c --- /dev/null +++ b/terraform/modules/aws-terraform-state-share/main.tf @@ -0,0 +1,3 @@ +terraform { + required_version = "~> 1.1" +} diff --git a/terraform/modules/aws_terraform-state-share/resources.tf b/terraform/modules/aws-terraform-state-share/resources.tf similarity index 100% rename from terraform/modules/aws_terraform-state-share/resources.tf rename to terraform/modules/aws-terraform-state-share/resources.tf diff --git a/terraform/modules/aws_terraform-state-share/variables.tf b/terraform/modules/aws-terraform-state-share/variables.tf similarity index 100% rename from terraform/modules/aws_terraform-state-share/variables.tf rename to terraform/modules/aws-terraform-state-share/variables.tf diff --git a/terraform/modules/aws_vpc/README.md b/terraform/modules/aws-vpc-security-groups/README.md similarity index 100% rename from terraform/modules/aws_vpc/README.md rename to terraform/modules/aws-vpc-security-groups/README.md diff --git a/terraform/modules/aws_vpc_security_groups/main.tf b/terraform/modules/aws-vpc-security-groups/main.tf similarity index 79% rename from terraform/modules/aws_vpc_security_groups/main.tf rename to terraform/modules/aws-vpc-security-groups/main.tf index 65c3424d6..87ec45794 100644 --- a/terraform/modules/aws_vpc_security_groups/main.tf +++ b/terraform/modules/aws-vpc-security-groups/main.tf @@ -11,7 +11,7 @@ resource "aws_security_group" "world_ssh_in" { to_port = 22 protocol = "tcp" cidr_blocks = ["0.0.0.0/0"] - } + } tags = { Name = "world_ssh_in" @@ -29,13 +29,13 @@ resource "aws_security_group" "world_web_out" { to_port = 80 protocol = "tcp" cidr_blocks = ["0.0.0.0/0"] - } + } egress { from_port = 443 to_port = 443 protocol = "tcp" cidr_blocks = ["0.0.0.0/0"] - } + } tags = { Name = "world_web_out" @@ -67,9 +67,9 @@ resource "aws_security_group" "has_ssh" { vpc_id = var.vpc_id ingress { - from_port = 22 - to_port = 22 - protocol = "tcp" + from_port = 22 + to_port = 22 + protocol = "tcp" security_groups = ["${aws_security_group.ssh_from.id}"] } @@ -106,7 +106,7 @@ resource "aws_security_group" "talk_to_assets" { to_port = 123 protocol = "udp" cidr_blocks = ["172.17.0.0/20"] - } + } # HTTP egress { @@ -114,7 +114,7 @@ resource "aws_security_group" "talk_to_assets" { to_port = 80 protocol = "tcp" cidr_blocks = ["172.17.0.0/20"] - } + } # HTTPS egress { @@ -122,7 +122,7 @@ resource "aws_security_group" "talk_to_assets" { to_port = 443 protocol = "tcp" cidr_blocks = ["172.17.0.0/20"] - } + } tags = { Name = "talk_to_assets" @@ -136,61 +136,72 @@ resource "aws_security_group" "has_assets" { vpc_id = var.vpc_id ingress { - from_port = 53 - to_port = 53 - protocol = "tcp" + from_port = 53 + to_port = 53 + protocol = "tcp" security_groups = ["${aws_security_group.talk_to_assets.id}"] } ingress { - from_port = 53 - to_port = 53 - protocol = "udp" + from_port = 53 + to_port = 53 + protocol = "udp" security_groups = ["${aws_security_group.talk_to_assets.id}"] } # Time ingress { - from_port = 123 - to_port = 123 - protocol = "udp" + from_port = 123 + to_port = 123 + protocol = "udp" security_groups = ["${aws_security_group.talk_to_assets.id}"] - } + } # HTTP ingress { - from_port = 80 - to_port = 80 - protocol = "tcp" + from_port = 80 + to_port = 80 + protocol = "tcp" security_groups = ["${aws_security_group.talk_to_assets.id}"] - } + } # HTTPS ingress { - from_port = 443 - to_port = 443 - protocol = "tcp" + from_port = 443 + to_port = 443 + protocol = "tcp" security_groups = ["${aws_security_group.talk_to_assets.id}"] - } + } tags = { Name = "has_assets" } } -# A security group for access to kubernetes nodes. should be added to the admin host only. +# A security group for administrative access to kubernetes nodes. should be added to the admin host only. resource "aws_security_group" "talk_to_k8s" { name = "talk_to_k8s" description = "hosts that are allowed to speak to kubernetes." vpc_id = var.vpc_id + # kubectl egress { + description = "" from_port = 6443 to_port = 6443 protocol = "tcp" cidr_blocks = ["172.17.0.0/20"] } + # the application itsself. + egress { + description = "" + from_port = 31772 + to_port = 31773 + protocol = "tcp" + cidr_blocks = ["172.17.0.0/20"] + } + tags = { Name = "talk_to_k8s" } @@ -204,28 +215,41 @@ resource "aws_security_group" "k8s_node" { # incoming from the admin node (kubectl) ingress { - from_port = 6443 - to_port = 6443 - protocol = "tcp" + description = "" + from_port = 6443 + to_port = 6443 + protocol = "tcp" security_groups = ["${aws_security_group.talk_to_k8s.id}"] } # FIXME: tighten this up. ingress { - from_port = 0 - to_port = 65535 - protocol = "tcp" + description = "" + from_port = 0 + to_port = 65535 + protocol = "tcp" security_groups = ["${aws_security_group.k8s_private.id}"] } # FIXME: tighten this up. need UDP for flannel. ingress { - from_port = 0 - to_port = 65535 - protocol = "udp" + description = "" + from_port = 0 + to_port = 65535 + protocol = "udp" security_groups = ["${aws_security_group.k8s_private.id}"] } + # incoming traffic to the application. + ingress { + from_port = 31772 + to_port = 31773 + protocol = "tcp" + # NOTE: NLBs dont allow security groups to be set on them, which is why + # we go with the CIDR for now, which is hard-coded and needs fixing + cidr_blocks = ["172.17.0.0/20"] + } + tags = { Name = "k8s_node" } @@ -295,7 +319,7 @@ resource "aws_security_group" "talk_to_stateful" { to_port = 9092 protocol = "tcp" cidr_blocks = ["172.17.0.0/20"] - } + } tags = { Name = "talk_to_stateful" @@ -346,7 +370,7 @@ resource "aws_security_group" "stateful_private" { to_port = 9092 protocol = "tcp" cidr_blocks = ["172.17.0.0/20"] - } + } tags = { Name = "stateful_private" @@ -361,73 +385,73 @@ resource "aws_security_group" "stateful_node" { # incoming cassandra clients ingress { - from_port = 9042 - to_port = 9042 - protocol = "tcp" + from_port = 9042 + to_port = 9042 + protocol = "tcp" security_groups = ["${aws_security_group.talk_to_stateful.id}"] } # incoming elasticsearch clients. ingress { - from_port = 9200 - to_port = 9200 - protocol = "tcp" + from_port = 9200 + to_port = 9200 + protocol = "tcp" security_groups = ["${aws_security_group.talk_to_stateful.id}"] } # incoming minio clients. ingress { - from_port = 9000 - to_port = 9000 - protocol = "tcp" + from_port = 9000 + to_port = 9000 + protocol = "tcp" security_groups = ["${aws_security_group.talk_to_stateful.id}"] } # incoming minio clients. ingress { - from_port = 9092 - to_port = 9092 - protocol = "tcp" + from_port = 9092 + to_port = 9092 + protocol = "tcp" security_groups = ["${aws_security_group.talk_to_stateful.id}"] } # other cassandra nodes (non-TLS) ingress { - from_port = 7000 - to_port = 7000 - protocol = "tcp" + from_port = 7000 + to_port = 7000 + protocol = "tcp" security_groups = ["${aws_security_group.stateful_private.id}"] } # other cassandra nodes (TLS) ingress { - from_port = 9160 - to_port = 9160 - protocol = "tcp" + from_port = 9160 + to_port = 9160 + protocol = "tcp" security_groups = ["${aws_security_group.stateful_private.id}"] } # other elasticsearch nodes ingress { - from_port = 9300 - to_port = 9300 - protocol = "tcp" + from_port = 9300 + to_port = 9300 + protocol = "tcp" security_groups = ["${aws_security_group.stateful_private.id}"] } # other minio nodes ingress { - from_port = 9000 - to_port = 9000 - protocol = "tcp" + from_port = 9000 + to_port = 9000 + protocol = "tcp" security_groups = ["${aws_security_group.stateful_private.id}"] } # other minio nodes ingress { - from_port = 9092 - to_port = 9092 - protocol = "tcp" + from_port = 9092 + to_port = 9092 + protocol = "tcp" security_groups = ["${aws_security_group.stateful_private.id}"] } diff --git a/terraform/modules/aws-vpc-security-groups/outputs.tf b/terraform/modules/aws-vpc-security-groups/outputs.tf new file mode 100644 index 000000000..d45763528 --- /dev/null +++ b/terraform/modules/aws-vpc-security-groups/outputs.tf @@ -0,0 +1,51 @@ +# the world can ssh to this instance. +output "world_ssh_in" { + value = aws_security_group.world_ssh_in.id +} + +# this instance can SSH into other boxes in the VPC. +output "ssh_from" { + value = aws_security_group.ssh_from.id +} + +# apply to boxes you want "ssh_from" hosts to be able to talk to. +output "has_ssh" { + value = aws_security_group.has_ssh.id +} + +output "world_web_out" { + value = aws_security_group.world_web_out.id +} + +output "talk_to_assets" { + value = aws_security_group.talk_to_assets.id +} + +output "has_assets" { + value = aws_security_group.has_assets.id +} + +output "talk_to_stateful" { + value = aws_security_group.talk_to_stateful.id +} + +output "stateful_node" { + value = aws_security_group.stateful_node.id +} + +output "stateful_private" { + value = aws_security_group.stateful_private.id +} + +output "talk_to_k8s" { + value = aws_security_group.talk_to_k8s.id +} + +output "k8s_private" { + value = aws_security_group.k8s_private.id +} + +output "k8s_node" { + value = aws_security_group.k8s_node.id +} + diff --git a/terraform/modules/aws-vpc-security-groups/variables.tf b/terraform/modules/aws-vpc-security-groups/variables.tf new file mode 100644 index 000000000..106bab921 --- /dev/null +++ b/terraform/modules/aws-vpc-security-groups/variables.tf @@ -0,0 +1,5 @@ +variable "vpc_id" { + type = string + description = "ID of VPC these security groups are for." +} + diff --git a/terraform/modules/aws_vpc_security_groups/README.md b/terraform/modules/aws-vpc/README.md similarity index 100% rename from terraform/modules/aws_vpc_security_groups/README.md rename to terraform/modules/aws-vpc/README.md diff --git a/terraform/modules/aws_vpc/main.tf b/terraform/modules/aws-vpc/main.tf similarity index 57% rename from terraform/modules/aws_vpc/main.tf rename to terraform/modules/aws-vpc/main.tf index 04563c3f7..ac4136d65 100644 --- a/terraform/modules/aws_vpc/main.tf +++ b/terraform/modules/aws-vpc/main.tf @@ -1,5 +1,5 @@ terraform { - required_version = ">= 0.12.0" + required_version = "~> 1.1" } # In AWS, (eu-central-1) @@ -7,12 +7,6 @@ provider "aws" { region = "eu-central-1" } -# Used for the in-VPC EC2 endpoint. -data "aws_security_group" "default" { - name = "default" - vpc_id = module.vpc.vpc_id -} - module "vpc" { source = "github.com/terraform-aws-modules/terraform-aws-vpc?ref=v2.33.0" @@ -24,23 +18,13 @@ module "vpc" { private_subnets = ["172.17.0.0/22", "172.17.4.0/22", "172.17.8.0/22"] public_subnets = ["172.17.12.0/24", "172.17.13.0/24", "172.17.14.0/24"] - enable_dns_hostnames = true + enable_dns_hostnames = false enable_dns_support = true - enable_dhcp_options = true - dhcp_options_domain_name = var.dhcp_options_domain_name - # dhcp_options_domain_name_servers = - # In case we run terraform from within the environment. # VPC endpoint for DynamoDB enable_dynamodb_endpoint = true - # In case we run terraform from within the environment. - # VPC Endpoint for EC2 - enable_ec2_endpoint = true - ec2_endpoint_private_dns_enabled = true - ec2_endpoint_security_group_ids = [data.aws_security_group.default.id] - enable_nat_gateway = true one_nat_gateway_per_az = false # Use this only in productionish environments. @@ -51,7 +35,13 @@ module "vpc" { Environment = var.environment } vpc_tags = { - Owner = "Backend Team" - Name = var.name + Owner = "Backend Team" + Name = var.name + } + private_subnet_tags = { + Routability = "private" + } + public_subnet_tags = { + Routability = "public" } } diff --git a/terraform/modules/aws-vpc/outputs.tf b/terraform/modules/aws-vpc/outputs.tf new file mode 100644 index 000000000..decd6540d --- /dev/null +++ b/terraform/modules/aws-vpc/outputs.tf @@ -0,0 +1,15 @@ +output "vpc_id" { + value = module.vpc.vpc_id +} + +output "public_subnets" { + value = module.vpc.public_subnets +} + +output "private_subnets" { + value = module.vpc.private_subnets +} + +output "private_route_table_ids" { + value = module.vpc.private_route_table_ids +} diff --git a/terraform/modules/aws_vpc/variables.tf b/terraform/modules/aws-vpc/variables.tf similarity index 62% rename from terraform/modules/aws_vpc/variables.tf rename to terraform/modules/aws-vpc/variables.tf index aa4830ae1..e34a3beb7 100644 --- a/terraform/modules/aws_vpc/variables.tf +++ b/terraform/modules/aws-vpc/variables.tf @@ -1,13 +1,16 @@ variable "name" { + type = string description = "VPC name as appearing in AWS" } variable "environment" { - default = "dev" + type = string description = "Environment name, as appears in the environment definition" + default = "dev" } variable "dhcp_options_domain_name" { + type = string + description = "the default domain given to hosts in this VPC by the AWS DHCP servers" default = "internal.vpc" - description = "TODO" } diff --git a/terraform/modules/aws_terraform-state-share/main.tf b/terraform/modules/aws_terraform-state-share/main.tf deleted file mode 100644 index ec900db26..000000000 --- a/terraform/modules/aws_terraform-state-share/main.tf +++ /dev/null @@ -1,3 +0,0 @@ -terraform { - required_version = "~> 0.12" -} diff --git a/terraform/modules/aws_terraform-state-share/providers.tf b/terraform/modules/aws_terraform-state-share/providers.tf deleted file mode 100644 index c46b0b75c..000000000 --- a/terraform/modules/aws_terraform-state-share/providers.tf +++ /dev/null @@ -1,8 +0,0 @@ -# NOTE: the provider assums that the respective environemnt variables, -# reuqired for authentication, already being set -# -provider "aws" { - version = "~> 2.58" - - region = var.region -} diff --git a/terraform/modules/aws_vpc/outputs.tf b/terraform/modules/aws_vpc/outputs.tf deleted file mode 100644 index 5a75cc584..000000000 --- a/terraform/modules/aws_vpc/outputs.tf +++ /dev/null @@ -1,5 +0,0 @@ -output "vpc_id" { - - value = module.vpc.vpc_id - -} diff --git a/terraform/modules/aws_vpc_security_groups/variables.tf b/terraform/modules/aws_vpc_security_groups/variables.tf deleted file mode 100644 index cefab3bbe..000000000 --- a/terraform/modules/aws_vpc_security_groups/variables.tf +++ /dev/null @@ -1,4 +0,0 @@ -variable "vpc_id" { - description = "ID of VPC these security groups are for" -} - diff --git a/terraform/modules/hetzner-kubernetes/load-balancer.locals.tf b/terraform/modules/hetzner-kubernetes/load-balancer.locals.tf new file mode 100644 index 000000000..f7e880cf1 --- /dev/null +++ b/terraform/modules/hetzner-kubernetes/load-balancer.locals.tf @@ -0,0 +1,22 @@ +locals { + LB_PORT_MAPPINGS = [ + { + name = "http" + protocol = "tcp" + listen = 80 + destination = 8080 + }, + { + name = "https" + protocol = "tcp" + listen = 443 + destination = 8443 + }, + { + name = "kube-api" + protocol = "tcp" + listen = 6443 + destination = 6443 + } + ] +} diff --git a/terraform/modules/hetzner-kubernetes/load-balancer.resources.tf b/terraform/modules/hetzner-kubernetes/load-balancer.resources.tf new file mode 100644 index 000000000..3dfeb55cc --- /dev/null +++ b/terraform/modules/hetzner-kubernetes/load-balancer.resources.tf @@ -0,0 +1,80 @@ +resource "hcloud_load_balancer" "lb" { + count = var.with_load_balancer ? 1 : 0 + + name = "${var.cluster_name}-lb" + location = var.default_location + + load_balancer_type = ( + length(var.machines) <= 5 ? "lb11" : ( + length(var.machines) <= 75 ? "lb21" : ( + length(var.machines) <= 150 ? "lb31" : null)) + ) + + # NOTE: not sure what impact the fact has that one LB targets two possibly + # disjunct groups of machines and what role the algo play in that + algorithm { + type = "round_robin" + } +} + + +resource "hcloud_load_balancer_network" "lb-nw" { + count = var.with_load_balancer ? 1 : 0 + + load_balancer_id = hcloud_load_balancer.lb[0].id + network_id = hcloud_network.nw.id +} + + +resource "hcloud_load_balancer_service" "svcs" { + for_each = var.with_load_balancer ? merge( + { for pm in local.LB_PORT_MAPPINGS : pm.name => pm }, + { for pm in var.lb_port_mappings : pm.name => pm } + ) : {} + + load_balancer_id = hcloud_load_balancer.lb[0].id + + protocol = each.value.protocol + listen_port = each.value.listen + destination_port = each.value.destination + + health_check { + port = each.value.destination + protocol = "tcp" + interval = 15 + retries = 3 + timeout = 6 + } +} + + +resource "hcloud_load_balancer_target" "controlplanes" { + count = var.with_load_balancer ? 1 : 0 + + type = "label_selector" + + label_selector = "component-class.${local.LABEL_PREFIX}/controlplane" + use_private_ip = true + + load_balancer_id = hcloud_load_balancer.lb[0].id + + # NOTE: prevent race condition + # ISSUE: https://github.com/hetznercloud/terraform-provider-hcloud/issues/170 + depends_on = [hcloud_load_balancer_network.lb-nw] +} + + +resource "hcloud_load_balancer_target" "nodes" { + count = var.with_load_balancer ? 1 : 0 + + type = "label_selector" + + label_selector = "component-class.${local.LABEL_PREFIX}/node" + use_private_ip = true + + load_balancer_id = hcloud_load_balancer.lb[0].id + + # NOTE: prevent race condition + # ISSUE: https://github.com/hetznercloud/terraform-provider-hcloud/issues/170 + depends_on = [hcloud_load_balancer_network.lb-nw] +} diff --git a/terraform/modules/hetzner-kubernetes/load-balancer.variables.tf b/terraform/modules/hetzner-kubernetes/load-balancer.variables.tf new file mode 100644 index 000000000..692513396 --- /dev/null +++ b/terraform/modules/hetzner-kubernetes/load-balancer.variables.tf @@ -0,0 +1,10 @@ +variable "lb_port_mappings" { + description = "list of ports the load balancer is being configured with" + type = list(object({ + name = string + protocol = string + listen = number + destination = number + })) + default = [] +} diff --git a/terraform/modules/hetzner-kubernetes/locals.tf b/terraform/modules/hetzner-kubernetes/locals.tf new file mode 100644 index 000000000..53ee7ca65 --- /dev/null +++ b/terraform/modules/hetzner-kubernetes/locals.tf @@ -0,0 +1,3 @@ +locals { + LABEL_PREFIX = "wire.infra" +} diff --git a/terraform/modules/hetzner-kubernetes/machines.outputs.tf b/terraform/modules/hetzner-kubernetes/machines.outputs.tf new file mode 100644 index 000000000..2334aa7d0 --- /dev/null +++ b/terraform/modules/hetzner-kubernetes/machines.outputs.tf @@ -0,0 +1,28 @@ +locals { + servers_private_ip = { for _, snw in hcloud_server_network.snw : snw.server_id => snw.ip } + servers_volume_device_path = { for _, vol in hcloud_volume.volumes : vol.server_id => vol.linux_device } +} + + +output "machines" { + value = [ for _, machine in hcloud_server.machines : + merge( + { + hostname = machine.name + private_ipv4 = local.servers_private_ip[machine.id] + public_ipv4 = machine.ipv4_address + component_classes = [ + for label_name, _ in machine.labels : + split("/", label_name)[1] + if replace(label_name, "component-class.${local.LABEL_PREFIX}", "") != label_name + ] + }, + contains( keys(machine.labels), "etcd_member_name" ) + ? { etcd_member_name = machine.labels.etcd_member_name } + : {}, + contains( keys(local.servers_volume_device_path), machine.id ) + ? { volume = { device_path = local.servers_volume_device_path[machine.id] } } + : {}, + ) + ] +} diff --git a/terraform/modules/hetzner-kubernetes/machines.resources.tf b/terraform/modules/hetzner-kubernetes/machines.resources.tf new file mode 100644 index 000000000..36b9956ec --- /dev/null +++ b/terraform/modules/hetzner-kubernetes/machines.resources.tf @@ -0,0 +1,54 @@ +resource "hcloud_server" "machines" { + for_each = { for m in var.machines: "${m.group_name}-${m.machine_id}" => m } + + name = "${var.cluster_name}-${each.key}" + location = var.default_location + image = var.default_image + server_type = lookup(each.value, "machine_type", var.default_server_type) + ssh_keys = var.ssh_keys + + # NOTE: string is the only accepted type + # DOCS: for possible characters, see https://docs.hetzner.cloud/#labels + labels = merge( + { + cluster = var.cluster_name + group_name = each.value.group_name + machine_id = each.value.machine_id + }, + contains( each.value.component_classes, "controlplane" ) ? { etcd_member_name = "etcd-${ each.value.machine_id }" } : {}, + { for class in each.value.component_classes : "component-class.${local.LABEL_PREFIX}/${class}" => true } + ) +} + + +resource "hcloud_server_network" "snw" { + for_each = toset([ for m in var.machines: "${m.group_name}-${m.machine_id}" ]) + + server_id = hcloud_server.machines[each.key].id + subnet_id = hcloud_network_subnet.sn.id +} + + +resource "hcloud_volume" "volumes" { + for_each = { + for m in var.machines: + "${m.group_name}-${m.machine_id}" => m + if contains(keys(m), "volume") + } + + name = "vol-${ var.cluster_name }-${ each.value.group_name }-${ each.value.machine_id }" + size = each.value.volume.size + automount = contains(keys(each.value.volume), "format") + format = try(each.value.volume.format, null) + + server_id = hcloud_server.machines[each.key].id + + labels = merge( + { + cluster = var.cluster_name + group_name = each.value.group_name + attached_to = each.value.machine_id + }, + contains( each.value.component_classes, "controlplane" ) ? { etcd_member_name = "etcd-${ each.value.machine_id }" } : {} + ) +} diff --git a/terraform/modules/hetzner-kubernetes/machines.variables.tf b/terraform/modules/hetzner-kubernetes/machines.variables.tf new file mode 100644 index 000000000..b1b74fd62 --- /dev/null +++ b/terraform/modules/hetzner-kubernetes/machines.variables.tf @@ -0,0 +1,35 @@ +variable "default_location" { + default = "nbg1" +} + +variable "default_server_type" { + default = "cx51" +} + +variable "default_image" { + default = "ubuntu-22.04" +} + + +# FUTUREWORK: replace 'any' by implementing https://www.terraform.io/docs/language/functions/defaults.html +# +variable "machines" { + description = "list of machines" + # type = list(object({ + # group_name = string + # machine_id = string + # machine_type = string + # component_classes = list(string) + # volume = optional(object({ + # size = number + # format = optional(string) + # })) + # })) + type = any + default = [] + + validation { + condition = length(var.machines) > 0 + error_message = "At least one machine must be defined." + } +} diff --git a/terraform/modules/hetzner-kubernetes/network.resources.tf b/terraform/modules/hetzner-kubernetes/network.resources.tf new file mode 100644 index 000000000..efaa49e7e --- /dev/null +++ b/terraform/modules/hetzner-kubernetes/network.resources.tf @@ -0,0 +1,17 @@ +resource "hcloud_network" "nw" { + name = "k8s-${ var.cluster_name }" + + ip_range = "192.168.0.0/16" +} + + +resource "hcloud_network_subnet" "sn" { + network_id = hcloud_network.nw.id + + ip_range = "192.168.1.0/24" + + # NOTE: No other sensible values available at this time + # DOCS: https://docs.hetzner.cloud/#subnets + type = "cloud" + network_zone = "eu-central" +} diff --git a/terraform/modules/hetzner-kubernetes/outputs.tf b/terraform/modules/hetzner-kubernetes/outputs.tf new file mode 100644 index 000000000..e2efa7bd7 --- /dev/null +++ b/terraform/modules/hetzner-kubernetes/outputs.tf @@ -0,0 +1,15 @@ +output "ips" { + value = var.with_load_balancer ? [ hcloud_load_balancer.lb[0].ipv4 ] : [ + for _, machine in hcloud_server.machines : machine.ipv4_address + if contains( keys(machine.labels), "component-class.${local.LABEL_PREFIX}/node" ) + ] +} + +# NOTE: the existence of this output feels indeed odd. What is generated here could and actually should +# be done on the outside since 'machines' is already exposed. See ./../../environment/kubernetes.dns.tf +output "node_ips" { + value = [ + for _, machine in hcloud_server.machines : machine.ipv4_address + if contains( keys(machine.labels), "component-class.${local.LABEL_PREFIX}/node" ) + ] +} diff --git a/terraform/modules/hetzner-kubernetes/variables.tf b/terraform/modules/hetzner-kubernetes/variables.tf new file mode 100644 index 000000000..12a7e7f05 --- /dev/null +++ b/terraform/modules/hetzner-kubernetes/variables.tf @@ -0,0 +1,13 @@ +variable "cluster_name" { + type = string +} + +variable "ssh_keys" { + type = set(string) +} + +variable "with_load_balancer" { + description = "indicates whether a load balancer is being created and placed in front of all K8s machines" + type = bool + default = false +} diff --git a/terraform/modules/hetzner-kubernetes/versions.tf b/terraform/modules/hetzner-kubernetes/versions.tf new file mode 100644 index 000000000..c80dfa990 --- /dev/null +++ b/terraform/modules/hetzner-kubernetes/versions.tf @@ -0,0 +1,11 @@ +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + } + hcloud = { + source = "hetznercloud/hcloud" + } + } + required_version = "~> 1.1" +} diff --git a/terraform/modules/sft/dns.tf b/terraform/modules/sft/dns.tf new file mode 100644 index 000000000..1e5080293 --- /dev/null +++ b/terraform/modules/sft/dns.tf @@ -0,0 +1,21 @@ +data "aws_route53_zone" "sft_zone" { + name = var.root_domain +} + +resource "aws_route53_record" "sft_a" { + for_each = setunion(var.server_groups.blue.server_names, var.server_groups.green.server_names) + + zone_id = data.aws_route53_zone.sft_zone.zone_id + name = "sft${each.value}.sft.${var.environment}" + type = "A" + ttl = var.a_record_ttl + records = [hcloud_server.sft[each.key].ipv4_address] +} + +resource "aws_route53_record" "metrics_srv" { + zone_id = data.aws_route53_zone.sft_zone.zone_id + name = "_sft-metrics._tcp.${var.environment}" + type = "SRV" + ttl = var.metrics_srv_record_ttl + records = [for a_record in aws_route53_record.sft_a : "0 10 8443 ${a_record.fqdn}"] +} diff --git a/terraform/modules/sft/outputs.tf b/terraform/modules/sft/outputs.tf new file mode 100644 index 000000000..088f6674b --- /dev/null +++ b/terraform/modules/sft/outputs.tf @@ -0,0 +1,26 @@ +# TODO: It is absurd that srv-announcer requires this. All route53 resources are +# scoped globally, figure out if we really need to do this. +data "aws_region" "current" {} + +output "sft" { + value = { + sft_srv = "_sft._tcp.${var.environment}" + aws_key_id = aws_iam_access_key.srv-announcer.id + aws_access_key = aws_iam_access_key.srv-announcer.secret + aws_region = data.aws_region.current.name + instances_blue = [ for server_name, _ in var.server_groups.blue.server_names : + { + hostname = hcloud_server.sft[server_name].name + ipaddress = hcloud_server.sft[server_name].ipv4_address + fqdn = aws_route53_record.sft_a[server_name].fqdn + } + ] + instances_green = [ for server_name, _ in var.server_groups.green.server_names : + { + hostname = hcloud_server.sft[server_name].name + ipaddress = hcloud_server.sft[server_name].ipv4_address + fqdn = aws_route53_record.sft_a[server_name].fqdn + } + ] + } +} diff --git a/terraform/modules/sft/server.tf b/terraform/modules/sft/server.tf new file mode 100644 index 000000000..361b8a897 --- /dev/null +++ b/terraform/modules/sft/server.tf @@ -0,0 +1,17 @@ +locals { + // This duplication is bad, but terraform doesn't allow defining functions. + map_server_name_to_type_green = {for _, server_name in var.server_groups.green.server_names: server_name => var.server_groups.green.server_type} + map_server_name_to_type_blue = {for _, server_name in var.server_groups.blue.server_names : server_name => var.server_groups.blue.server_type} + map_server_name_to_type = merge(local.map_server_name_to_type_blue, local.map_server_name_to_type_green) +} + + +resource "hcloud_server" "sft" { + for_each = local.map_server_name_to_type + + name = "${var.environment}-sft-${each.key}" + server_type = each.value + image = var.image + location = var.location + ssh_keys = var.ssh_keys +} diff --git a/terraform/modules/sft/srv-announcer-iam.tf b/terraform/modules/sft/srv-announcer-iam.tf new file mode 100644 index 000000000..c14045fd2 --- /dev/null +++ b/terraform/modules/sft/srv-announcer-iam.tf @@ -0,0 +1,60 @@ +resource "aws_iam_user" "srv-announcer" { + name = "${var.environment}-srv-announcer" + force_destroy = true # TODO: Add a comment explaining this. Does this mean + # changing this user will make existing srv announcements + # fail? +} + +resource "aws_iam_access_key" "srv-announcer" { + user = aws_iam_user.srv-announcer.name +} + +# NOTE: Does not configure permissions for GeoLocation, because they are not +# needed by the srv-announcer DOCS: +# https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/r53-api-permissions-ref.html#required-permissions-resource-record-sets +# +resource "aws_iam_user_policy" "srv-announcer-recordsets" { + name = "${var.environment}-srv-announcer-route53-recordsets-policy" + user = aws_iam_user.srv-announcer.name + + policy = <<-EOP + { + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "route53:ChangeResourceRecordSets", + "route53:ListResourceRecordSets" + ], + "Resource": [ + "arn:aws:route53:::hostedzone/${data.aws_route53_zone.sft_zone.zone_id}" + ] + } + ] + } + EOP +} + +resource "aws_iam_user_policy" "srv-announcer-getrecordchanges" { + name = "${var.environment}-srv-announcer-route53-getrecordchanges-policy" + user = aws_iam_user.srv-announcer.name + + policy = <<-EOP + { + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "route53:GetChange", + "route53:ListHostedZonesByName" + ], + "Resource": [ + "*" + ] + } + ] + } + EOP +} diff --git a/terraform/modules/sft/variables.tf b/terraform/modules/sft/variables.tf new file mode 100644 index 000000000..f01e14f96 --- /dev/null +++ b/terraform/modules/sft/variables.tf @@ -0,0 +1,63 @@ +variable "root_domain" { + type = string +} + +variable "environment" { + type = string +} + +variable "server_groups" { + description = <.." + EOD + type = object({ + //Arbitrary name for the first group + blue = object({ + server_names = set(string) + server_type = string + }) + + //Arbitrary name for the second group + green = object({ + server_names = set(string) + server_type = string + }) + }) + + validation { + condition = length(setintersection(var.server_groups.blue.server_names, var.server_groups.green.server_names)) == 0 + error_message = "The server_names in the blue and green server_groups must not intersect." + } +} + +variable "a_record_ttl" { + type = number +} + +variable "metrics_srv_record_ttl" { + default = 60 +} + +variable "server_type" { + default = "cx11" +} + +variable "server_type_stale" { + default = "cx11" +} + +variable "image" { + default = "ubuntu-18.04" +} + +variable "location" { + default = "nbg1" +} + +variable "ssh_keys" { + type = list +} diff --git a/terraform/modules/sft/versions.tf b/terraform/modules/sft/versions.tf new file mode 100644 index 000000000..c80dfa990 --- /dev/null +++ b/terraform/modules/sft/versions.tf @@ -0,0 +1,11 @@ +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + } + hcloud = { + source = "hetznercloud/hcloud" + } + } + required_version = "~> 1.1" +} diff --git a/utils/generate_graph.pl b/utils/generate_graph.pl new file mode 100755 index 000000000..a064ebe91 --- /dev/null +++ b/utils/generate_graph.pl @@ -0,0 +1,41 @@ +#!/usr/bin/env -S gnuplot -c + +#################################################################### +# GNUPlot script to display reports on packet captured RTP streams # +#################################################################### + +############################## +# General Usage +# +# once you have a report from rtpstreams_graph.py saved to a file, +# provide it to this utility, and get a graphical output. + +############################## +# Requirements +# +# If you're not using wire-server-deploy's direnv and nix setup, +# you will need to install a version of gnuplot greater than version 5. + +if (ARGC != 2) { print "usage: ", ARG0, " "; + exit -1 +} + +set boxwidth 0.3 +set style fill solid + +set style line 1 lc rgb "blue" +set style line 2 lc rgb "red" + +set term pngcairo size 1024,768 enhance font 'Verdana,10' + +set title "Packet size against mean pairwise transmission delay" + +set xlabel "Packet size ranges per bucket (bytes)" +set xrange [0:] +set ylabel "Packet-pairwise transmission delay (microseconds)" +set yrange [0:] + +set output ARG2 + +plot sprintf(" tcpdump -i ens160 -s 0 -w testnumber.pcap host and udp +# +# *place call from host here* +# +# Next, copy this pcap file to a place where you have these tools, and run this command on a pcap file to find out what udp ports were seen during the capture: +# +# adminhost> ./rtpstreams_summary.py testnumber.pcap +# usage: ./rtpstreams_graph.py +# finding source ports for you, be patient... +# pcap contains 21 packets with source port 37462 +# pcap contains 29 packets with source port 38654 +# pcap contains 67 packets with source port 80 +# pcap contains 13 packets with source port 56899 +# pcap contains 58 packets with source port 44279 +# pcap contains 8340 packets with source port 50996 +# pcap contains 5650 packets with source port 34096 +# adminhost> +# +# Pick the port that has a lot of packets captured, as those are probably your calls. +# +# If you want to graph a single session, use rtpstreams_graph to get your session numbers. otherwise, skip to the next step. +# +# adminhost> ./rtpstreams_summary.py testnumber.pcap 50996 +# capture file found. generating summary.. +# SSRC 220450815: 4180 packets +# packet 27697 delayed by 0:00:00.137442 +# packet 27705 delayed by 0:00:00.310505 +# 4180 packets recved, 0 lost (0 %) and 0 with same seq +# max delay between packets 0:00:00.310505 +# SSRC 2008506802: 3422 packets +# packet 257 delayed by 0:00:00.142737 +# packet 271 delayed by 0:00:00.160726 +# packet 491 delayed by 0:00:00.169627 +# packet 640 delayed by 0:00:00.182204 +# packet 1261 delayed by 0:00:00.121933 +# packet 1614 delayed by 0:00:00.200193 +# packet 1945 delayed by 0:00:00.168273 +# packet 2059 delayed by 0:00:00.127896 +# packet 2639 delayed by 0:00:00.169698 +# packet 2761 delayed by 0:00:00.132851 +# packet 2781 delayed by 0:00:00.160073 +# 3422 packets recved, 64 lost (1 %) and 0 with same seq +# max delay between packets 0:00:00.200193 +# +# The values you're looking for are on the lines that begin with SSRC. +# +# Now use this program to graph a single session, or multiple sessions. +# +# adminhost> ./rtpdelay_graph.py testnumber.pcap 50996 220450815 +# Capture file found. Generating graph.. +# SSRC 220450815: 4180 packets +# +# You should now have a file named testnumber.pcap.png with your graph in it. + + +import datetime +import matplotlib.pyplot as plt +import pyshark +import sys +import time + +# colours for the lines +colors = ['blue', 'red', 'green', 'cyan', 'magenta'] + +if len(sys.argv) < 3 or len(sys.argv) > 4: + print('usage: {} [ssrc]'.format(sys.argv[0])) + +if len(sys.argv) == 1: + exit (-1) + +fname = sys.argv[1] + +if len(sys.argv) == 2: + ss = dict() + cap = pyshark.FileCapture (fname) + print('Finding source ports for you, be patient...') + for pkt in cap: + if 'udp' in pkt: + id = int(pkt.udp.srcport) + if id not in ss: + ss[id] = list() + ss[id].append(pkt.udp.dstport) + for id in ss: + print ('pcap contains {} packets with source port {}'.format(len(ss[id]), id)) + exit (0) + +port = sys.argv[2] +if len(sys.argv) == 4: + selssrc = int(sys.argv[3]) +else: + selssrc = None + +# get the packets from tshark +cap = pyshark.FileCapture(fname, + display_filter='udp', + decode_as={'udp.port=={}'.format(port):'rtp'}) + +seqs = {} + +print('Capture file found. Generating graph..') +for packet in cap: + if 'rtp' in packet and packet.rtp.get('p_type') == '100': + r = packet.rtp + # video p_type=100, audio 111, video via TURN 98 + ssrc = int(r.ssrc, 16) + if ssrc not in seqs: + seqs[ssrc] = [] + + # store the relevant info for later + seqs[ssrc].append({'seq': int(r.seq), + 'ts': int(r.timestamp), + 'sniffts': packet.sniff_time}) + +c = 0 + +for ssrc in seqs: + + # if an SSRC is given, skip the others + if selssrc != None and ssrc != selssrc: + continue + + print('SSRC {}: {} packets'.format(ssrc, len(seqs[ssrc]))) + + # sort by the RTP packet ts (source ts) + pid = sorted(seqs[ssrc], key=lambda x: x['seq']) + s = 0 + + # use first packet for offsets + firstseq = pid[0]['seq'] + firstts = pid[0]['ts'] + firstsniffts = pid[0]['sniffts'] + + x = [] + y = [] + + for pkt in pid: + # calculate ts diff from first packet + # video RTP packet ts is in 1/90000s so do ts*1000/90 for us + pts = int((pkt['ts'] - firstts) * 1000 / 90) + + # calculate sniffed ts from first packet + sniffts = (pkt['sniffts'] - firstsniffts) + psniffts = sniffts.seconds * 1000000 + sniffts.microseconds + + tsdiff = psniffts - pts + + #print('{} {}'.format(pkt['seq'] - firstseq, tsdiff)) + x.append(pkt['seq'] - firstseq) + y.append(tsdiff / 1000) + + plt.plot(x, y, color=colors[c], linestyle='solid', linewidth=1, label='{}'.format(ssrc)) + + # next colour + c += 1 + +plt.xlabel('Packet seqNo') +plt.ylabel('Delay relative to first packet (ms)') + +plt.savefig('{}.png'.format(fname), pad_inches=0.2) + diff --git a/utils/rtpstreams_graph.py b/utils/rtpstreams_graph.py new file mode 100755 index 000000000..1a67de6bd --- /dev/null +++ b/utils/rtpstreams_graph.py @@ -0,0 +1,167 @@ +#!/usr/bin/env python3 + +############################################################### +# Utility to derive statistics on packet captured RTP streams # +############################################################### + +###################### +# General Usage: +# +# First, capture a call's packets with tcpdump: +# +# kubenode1> tcpdump -i ens160 -s 0 -w testnumber.pcap host and udp +# +# *place call from host here* +# +# Next, copy this pcap file to a place where you have these tools, and run this command on a pcap file to find out what udp ports were seen during the capture: +# +# adminhost> ./rtpstreams_graph.py testnumber.pcap +# usage: ./analyse_rtp_streams.py +# finding source ports for you, be patient... +# pcap contains 21 packets with source port 37462 +# pcap contains 29 packets with source port 38654 +# pcap contains 67 packets with source port 80 +# pcap contains 13 packets with source port 56899 +# pcap contains 58 packets with source port 44279 +# pcap contains 8340 packets with source port 50996 +# pcap contains 5650 packets with source port 34096 +# adminhost> +# +# Pick the port that has a lot of packets captured, as those are probably your calls. +# +# adminhost> ./rtpstreams_graph.py testnumber.pcap 50996 +# capture file found. generating reports.. +# Processing session 220450815 with 4180 packets +# +# ... +# +# Processing session 2008506802 with 3422 packets +# +# ... +# +# +# Copy everything between the start report, and the end report marker, and place it in a text file. +# +# Use generate_graph.pl to create a graph from your report! +# +# adminhost> ./generate_graph.pl report1.txt report1.png + +############################## +# Interpreting these results: +# +# TL;dr: any packet delayed by more than 0:00:00.12 is problems. these will show as the red bars. +# delayed packets can cause SFT to lose track of the stream, and wait for the next keyframe. +# If there is no traffic shaping, the blue bars should be delayed corresponding to their packet sizes. + +################## +# Requirements: +# +# If you're not using nix and direnv in our wire-server-deploy directory, you'll need: +# Python 3 +# pyshark +# wireshark + +import datetime +import sys +import time +import pyshark +import functools +import collections + +BUCKETS = 10 + +if len(sys.argv) < 3: + print('usage: {} '.format(sys.argv[0])) + +if len(sys.argv) == 1: + exit (-1) + +fname = sys.argv[1] +ss = dict() + +if len(sys.argv) == 2: + cap = pyshark.FileCapture (fname) + print('Finding source ports for you, be patient...') + for pkt in cap: + if 'udp' in pkt: + id = int(pkt.udp.srcport) + if id not in ss: + ss[id] = list() + ss[id].append(pkt.udp.dstport) + for id in ss: + print ('pcap contains {} packets with source port {}'.format(len(ss[id]), id)) + exit (0) + +port = sys.argv[2] +cap = pyshark.FileCapture (fname, + display_filter='udp', + decode_as={'udp.port=={}'.format(port):'rtp'}) + +print('Capture file found. Generating reports..') +for pkt in cap: + # only keep rtp packets of type 100 + if 'rtp' in pkt and pkt.rtp.get('p_type') == '100': + id = int(pkt.rtp.ssrc, 16) + # bucket packets by which rtp session they belong to + if id not in ss: + ss[id] = list() + ss[id].append(pkt) + +for id in ss: + print('Processing session {} with {} packets'.format(id, len(ss[id]))) + + # sort packets by the time they were recorded by the filter program + pkts = sorted(ss[id], key=lambda p: p.sniff_time) + + # retrieve the length of each packet, and the pairwise delay between + # each packet and its predecessor. caution: this uses the length of the IP + # datagram, not the length of the inner udp datagram. + szdel = map(lambda i: { + 'size': int(pkts[i].length), + 'delay': pkts[i].sniff_time - pkts[i-1].sniff_time + }, range(1, len(pkts))) + + # flatten timestamps into microseconds + szdel = map(lambda i: { + 'size': i['size'], + 'delay': i['delay'].microseconds + (i['delay'].seconds * 1000000) + }, szdel) + + # sort the list by packet size + szdel = sorted(szdel, key=lambda p: p['size']) + + # split the list into N buckets by packet size + bksz = len(szdel) / BUCKETS + bknum = 0 + buckets = list() + buckets.append(list()) + + for i in range(0, len(szdel)): + if i >= ((bknum + 1) * bksz) and (bknum + 1) < BUCKETS: + bknum += 1 + buckets.append(list()) + + buckets[bknum].append(szdel[i]) + + # calculate the mean and max pairwise delay for each packet size bucket, + # and retrieve the min and max size for labelling. + avgs = map(lambda b: { + 'smin': min(map(lambda x: x['size'], b)), + 'smax': max(map(lambda x: x['size'], b)), + 'davg': functools.reduce(lambda x, y: x + y['delay'], b, 0) / len(b), + 'dmax': max(map(lambda x: x['delay'], b)) + }, buckets) + + avgs = list(avgs) + + print('') + # report + for i in range(0, len(avgs)): + a = avgs[i] + lo = a['smin'] + hi = a['smax'] + print('{}-{} {} {}'.format(lo, hi, i+1, a['davg'])) + # v-- gnuplot magic hacks. + print('{}-{} {}.3 {}'.format(lo, hi, i+1, a['dmax'])) + print() + print('') diff --git a/utils/rtpstreams_summary.py b/utils/rtpstreams_summary.py new file mode 100755 index 000000000..553c8c1fb --- /dev/null +++ b/utils/rtpstreams_summary.py @@ -0,0 +1,145 @@ +#!/usr/bin/env python3 + +############################################################## +# Utility to generate summary of packet captured RTP streams # +############################################################## + +###################### +# General Usage: +# +# capture packets with tcpdump: +# +# kubenode1> tcpdump -i ens160 -s 0 -w testnumber.pcap host and udp +# +# run this command on a pcap file to find out what udp ports were seen during the capture +# +# Copy this pcap file to a place where you have these tools. +# +# adminhost> ./rtpstreams_summary.py testnumber.pcap +# usage: ./analyse_rtp_streams.py +# finding source ports for you, be patient... +# pcap contains 21 packets with source port 37462 +# pcap contains 29 packets with source port 38654 +# pcap contains 67 packets with source port 80 +# pcap contains 13 packets with source port 56899 +# pcap contains 58 packets with source port 44279 +# pcap contains 8340 packets with source port 50996 +# pcap contains 5650 packets with source port 34096 +# adminhost> +# +# Pick the big ones, as those are probably your calls. +# +# adminhost> ./rtpstreams_summary.py testnumber.pcap 50996 +# Capture file found. Generating summary.. +# SSRC 220450815: 4180 packets +# packet 27697 delayed by 0:00:00.137442 +# packet 27705 delayed by 0:00:00.310505 +# 4180 packets recved, 0 lost (0 %) and 0 with same seq +# max delay between packets 0:00:00.310505 +# SSRC 2008506802: 3422 packets +# packet 257 delayed by 0:00:00.142737 +# packet 271 delayed by 0:00:00.160726 +# packet 491 delayed by 0:00:00.169627 +# packet 640 delayed by 0:00:00.182204 +# packet 1261 delayed by 0:00:00.121933 +# packet 1614 delayed by 0:00:00.200193 +# packet 1945 delayed by 0:00:00.168273 +# packet 2059 delayed by 0:00:00.127896 +# packet 2639 delayed by 0:00:00.169698 +# packet 2761 delayed by 0:00:00.132851 +# packet 2781 delayed by 0:00:00.160073 +# 3422 packets recved, 64 lost (1 %) and 0 with same seq +# max delay between packets 0:00:00.200193 +# + +############################## +# Interpreting these results: +# +# TL;dr: any packet delayed by more than 0:00:00.12 is problems, and packet loss of above 0.1% can also be problematic. +# Both of these situations can cause SFT to lose track of the stream, and wait for the next keyframe. + +###### Requirements: +# If you're not using nix and direnv in our wire-server-deploy directory, you'll need: +# Python 3 +# pyshark +# wireshark + +import datetime +import pyshark +import sys +import time + +if len(sys.argv) < 3: + print('usage: {} '.format(sys.argv[0])) + +if len(sys.argv) == 1: + exit (-1) + +fname = sys.argv[1] +ss = dict() + +if len(sys.argv) == 2: + cap = pyshark.FileCapture (fname) + + print('Finding source ports for you, be patient...') + for pkt in cap: + if 'udp' in pkt: + id = int(pkt.udp.srcport) + if id not in ss: + ss[id] = list() + ss[id].append(pkt.udp.dstport) + for id in ss: + print ('pcap contains {} packets with source port {}'.format(len(ss[id]), id)) + + exit (0) + +port = sys.argv[2] +cap = pyshark.FileCapture(fname, + display_filter='udp', + decode_as={'udp.port=={}'.format(port):'rtp'}) +seqs = {} +print('Capture file found. Generating summary..') + +for packet in cap: + if 'rtp' in packet: + r = packet.rtp + if r.get('p_type') == '100': + ssrc = int(r.ssrc, 16) + if ssrc not in seqs: + seqs[ssrc] = [] + seqs[ssrc].append({'seq': int(r.seq), + 'ts': int(r.timestamp), + 'sts': packet.sniff_time}) + +for ssrc in seqs: + print('SSRC {}: {} packets'.format(ssrc, len(seqs[ssrc]))) + pid = sorted(seqs[ssrc], key=lambda x: x['ts']) + s = 0 + lastts = None + maxts = datetime.timedelta(0) + limitts = datetime.timedelta(seconds=0.12) + lost = 0 + recv = 0 + rsnd = 0 + + for pkt in pid: + idx = pkt['seq'] + ts = pkt['sts'] + + if lastts != None and ts - lastts > limitts: + print('packet {} delayed by {}'.format(idx, ts-lastts)) + + if lastts != None and ts - lastts > maxts: + maxts = ts - lastts + + if s != 0 and idx >= s+1: + lost += idx - s - 1 + elif s != 0 and idx == s: + rsnd += 1 + + lastts = ts + s = idx + recv += 1 + + print('{} packets recved, {} lost ({} %) and {} with same seq'.format(recv, lost, int(lost * 100 / recv), rsnd)) + print('max delay between packets {}'.format(maxts)) diff --git a/values/account-pages/prod-values.example.yaml b/values/account-pages/prod-values.example.yaml new file mode 100644 index 000000000..6085e444d --- /dev/null +++ b/values/account-pages/prod-values.example.yaml @@ -0,0 +1,31 @@ +replicaCount: 1 +# image: +# tag: some-tag (only override if you want a newer/different version than what is in the chart) +config: + externalUrls: + backendRest: nginz-https.example.com + backendDomain: example.com + appHost: account.example.com +# See full list of available environment variables: https://github.com/wireapp/wire-account/blob/dev/server/config.ts +envVars: + APP_NAME: "Wire Account Management" + COMPANY_NAME: "YourCompany" + FEATURE_ENFORCE_HTTPS: "true" + FEATURE_ENABLE_DEBUG: "false" + URL_SUPPORT_BASE: "https://www.example.com/support" + URL_TEAMS_BASE: "https://teams.example.com" + URL_WEBAPP_BASE: "https://webapp.example.com" + URL_WEBSITE_BASE: "https://www.example.com" + CSP_EXTRA_CONNECT_SRC: "https://*.example.com, wss://*.example.com" + CSP_EXTRA_IMG_SRC: "https://*.example.com" + CSP_EXTRA_SCRIPT_SRC: "https://*.example.com" + CSP_EXTRA_DEFAULT_SRC: "https://*.example.com" + CSP_EXTRA_FONT_SRC: "https://*.example.com" + CSP_EXTRA_FRAME_SRC: "https://*.example.com" + CSP_EXTRA_MANIFEST_SRC: "https://*.example.com" + CSP_EXTRA_OBJECT_SRC: "https://*.example.com" + CSP_EXTRA_MEDIA_SRC: "https://*.example.com" + CSP_EXTRA_PREFETCH_SRC: "https://*.example.com" + CSP_EXTRA_STYLE_SRC: "https://*.example.com" + CSP_EXTRA_WORKER_SRC: "https://*.example.com" + IS_SELF_HOSTED: "true" diff --git a/values/coturn/prod-values.example.yaml b/values/coturn/prod-values.example.yaml new file mode 100644 index 000000000..973d868de --- /dev/null +++ b/values/coturn/prod-values.example.yaml @@ -0,0 +1,3 @@ +secrets: + zrestSecrets: + - "" diff --git a/values/databases-ephemeral/prod-values.example.yaml b/values/databases-ephemeral/prod-values.example.yaml index a0bff09cb..3b3bb9f75 100644 --- a/values/databases-ephemeral/prod-values.example.yaml +++ b/values/databases-ephemeral/prod-values.example.yaml @@ -3,3 +3,32 @@ tags: cassandra-ephemeral: false elasticsearch-ephemeral: false + +redis-ephemeral: + redis-ephemeral: + usePassword: false + cluster: + enabled: true + # https://artifacthub.io/packages/helm/bitnami-aks/redis/11.3.4#production-configuration + # default slaveCount is 2 + slaveCount: 3 + master: + persistence: + enabled: false + resources: + limits: + cpu: "1000m" + memory: "1024Mi" + requests: + cpu: "500m" + memory: "512Mi" + slave: + persistence: + enabled: false + resources: + limits: + cpu: "1000m" + memory: "1024Mi" + requests: + cpu: "500m" + memory: "512Mi" diff --git a/values/demo-smtp/prod-values.example.yaml b/values/demo-smtp/prod-values.example.yaml index 9a8950f08..87cc4a162 100644 --- a/values/demo-smtp/prod-values.example.yaml +++ b/values/demo-smtp/prod-values.example.yaml @@ -1,6 +1,23 @@ # CHANGEME-PROD: This is often a good default when using calico's default CIDR # https://github.com/kubernetes-sigs/kubespray/blob/master/docs/calico.md#optional--define-the-default-pool-cidr # or flannel's https://github.com/kubernetes-sigs/kubespray/blob/master/docs/flannel.md#flannel -# If you overrride those values, etc., then verify that this CIDR still makes sense +# If you override those values, etc., then verify that this CIDR still makes sense +# For all variables the "ixdotai/smtp" image supports see: https://github.com/ix-ai/smtp#readme envVars: RELAY_NETWORKS: ":10.233.0.0/16" +# +# PORT: "25" +# NET_DEV: eth0 +# OTHER_HOSTNAMES: other.example.com +# DISABLE_IPV6: 1 +# BIND_IP: 0.0.0.0 +# BIND_IP6: ::0 +# MAILNAME: mail.example.com +# DKIM_KEY_PATH: /etc/exim4/dkim.key +# KEY_PATH: /path/to/key.crt +# CERTIFICATE_PATH: /path/to/certificate.crt +# SMARTHOST_ADDRESS: mail.example.com +# SMARTHOST_PORT: "587" +# SMARTHOST_USER: exampleuser +# SMARTHOST_PASSWORD: secret +# SMARTHOST_ALIASES: "*.example.com" diff --git a/values/fluent-bit/prod-values.example.yaml b/values/fluent-bit/prod-values.example.yaml new file mode 100644 index 000000000..ae0ad0390 --- /dev/null +++ b/values/fluent-bit/prod-values.example.yaml @@ -0,0 +1,78 @@ +# See defaults in https://github.com/fluent/helm-charts/tree/main/charts/fluent-bit +fluent-bit: + config: + inputs: | + [INPUT] + Name tail + Path /var/log/containers/*.log + Parser cri + Tag kube.* + Mem_Buf_Limit 5MB + Skip_Long_Lines On + + [INPUT] + Name systemd + Tag host.* + Systemd_Filter _SYSTEMD_UNIT=kubelet.service + Read_From_Tail On + + outputs: | + ###### Uncomment and update values in this part to enable output to Elasticsearch + # [OUTPUT] + # Name es + # Match kube.* + # Host elasticsearch-ephemeral + # Generate_ID On + # Logstash_Format On + # Logstash_Prefix pod + # Retry_Limit False + # Trace_Error On + # Replace_Dots On + # [OUTPUT] + # Name es + # Match host.* + # Host elasticsearch-ephemeral + # Generate_ID On + # Logstash_Format On + # Logstash_Prefix node + # Retry_Limit False + # Trace_Error On + # Replace_Dots On + + ###### Uncomment and update values in this part to enable output to Syslog + # syslog output reference - https://docs.fluentbit.io/manual/pipeline/outputs/syslog + # Uncomment this section to enable syslog output + # [OUTPUT] + # name syslog + # match * + # host 127.0.0.1 # IP address of the syslog server + # port 514 + # mode udp + # syslog_format rfc5424 + # syslog_maxsize 2048 + # syslog_severity_preset 7 # Between 0-7 for log severity levels + # syslog_message_key message + + ## https://docs.fluentbit.io/manual/pipeline/parsers + customParsers: | + [PARSER] + # http://rubular.com/r/tjUt3Awgg4 + Name cri + Format regex + Regex ^(?