From c0b0e8f9800ca04c319a7c82ad559e4f2fc38c00 Mon Sep 17 00:00:00 2001 From: Anjan Nath Date: Wed, 9 Oct 2024 18:41:51 +0530 Subject: [PATCH] add systemd services for configuration after start the services does the various needed tasks to setup the ocp or microshift cluster, these systemd units runs small shell scripts which are based on: https://github.com/crc-org/crc-cloud/blob/main/pkg/bundle/setup/clustersetup.sh and does the following tasks: - creates crc specific configurations for dnsmasq - sets a new uuid as cluster id - creates the pod for routes-controller - tries to grow the disk and filesystem - checks if the cluster operators are ready - adds the pull secret to the cluster - sets kubeadmin and developer user passwords - sets a custom ca for authentication - sets custom nip.io cluster domain --- .gitignore | 1 + createdisk-library.sh | 35 +++++++++++ createdisk.sh | 13 ++++ docs/self-sufficient-bundle.md | 34 ++++++++++ systemd/crc-cluster-status.service | 13 ++++ systemd/crc-cluster-status.sh | 43 +++++++++++++ systemd/crc-dnsmasq.service | 18 ++++++ systemd/crc-pullsecret.service | 12 ++++ systemd/crc-pullsecret.sh | 21 +++++++ systemd/crc-routes-controller.service | 12 ++++ systemd/crc-routes-controller.sh | 16 +++++ systemd/crc-systemd-common.sh | 12 ++++ systemd/dnsmasq.sh.template | 26 ++++++++ systemd/ocp-cluster-ca.service | 11 ++++ systemd/ocp-cluster-ca.sh | 91 +++++++++++++++++++++++++++ systemd/ocp-clusterid.service | 11 ++++ systemd/ocp-clusterid.sh | 11 ++++ systemd/ocp-custom-domain.service | 12 ++++ systemd/ocp-custom-domain.sh | 54 ++++++++++++++++ systemd/ocp-growfs.service | 9 +++ systemd/ocp-growfs.sh | 11 ++++ systemd/ocp-mco-sshkey.service | 14 +++++ systemd/ocp-mco-sshkey.sh | 19 ++++++ systemd/ocp-userpasswords.service | 12 ++++ systemd/ocp-userpasswords.sh | 38 +++++++++++ 25 files changed, 549 insertions(+) create mode 100644 docs/self-sufficient-bundle.md create mode 100644 systemd/crc-cluster-status.service create mode 100644 systemd/crc-cluster-status.sh create mode 100644 systemd/crc-dnsmasq.service create mode 100644 systemd/crc-pullsecret.service create mode 100644 systemd/crc-pullsecret.sh create mode 100644 systemd/crc-routes-controller.service create mode 100644 systemd/crc-routes-controller.sh create mode 100644 systemd/crc-systemd-common.sh create mode 100644 systemd/dnsmasq.sh.template create mode 100644 systemd/ocp-cluster-ca.service create mode 100644 systemd/ocp-cluster-ca.sh create mode 100644 systemd/ocp-clusterid.service create mode 100644 systemd/ocp-clusterid.sh create mode 100644 systemd/ocp-custom-domain.service create mode 100644 systemd/ocp-custom-domain.sh create mode 100644 systemd/ocp-growfs.service create mode 100644 systemd/ocp-growfs.sh create mode 100644 systemd/ocp-mco-sshkey.service create mode 100644 systemd/ocp-mco-sshkey.sh create mode 100644 systemd/ocp-userpasswords.service create mode 100644 systemd/ocp-userpasswords.sh diff --git a/.gitignore b/.gitignore index 3782fda9..66877be9 100644 --- a/.gitignore +++ b/.gitignore @@ -11,3 +11,4 @@ podman-remote/ .sw[a-p] crc-cluster-kube-apiserver-operator crc-cluster-kube-controller-manager-operator +systemd/crc-dnsmasq.sh diff --git a/createdisk-library.sh b/createdisk-library.sh index da45684d..d1fa4c4a 100755 --- a/createdisk-library.sh +++ b/createdisk-library.sh @@ -223,6 +223,7 @@ function prepare_hyperV() { echo 'CONST{virt}=="microsoft", RUN{builtin}+="kmod load hv_sock"' > /etc/udev/rules.d/90-crc-vsock.rules EOF } + function prepare_qemu_guest_agent() { local vm_ip=$1 @@ -392,3 +393,37 @@ function remove_pull_secret_from_disk() { esac } +function copy_systemd_units() { + case "${BUNDLE_TYPE}" in + "snc"|"okd") + export APPS_DOMAIN="apps-crc.testing" + envsubst '${APPS_DOMAIN}' < systemd/dnsmasq.sh.template > systemd/crc-dnsmasq.sh + unset APPS_DOMAIN + ;; + "microshift") + export APPS_DOMAIN="apps.crc.testing" + envsubst '${APPS_DOMAIN}' < systemd/dnsmasq.sh.template > systemd/crc-dnsmasq.sh + unset APPS_DOMAIN + ;; + esac + + ${SSH} core@${VM_IP} -- 'mkdir -p /home/core/systemd-units && mkdir -p /home/core/systemd-scripts' + ${SCP} systemd/crc-*.service core@${VM_IP}:/home/core/systemd-units/ + ${SCP} systemd/crc-*.sh core@${VM_IP}:/home/core/systemd-scripts/ + + case "${BUNDLE_TYPE}" in + "snc"|"okd") + ${SCP} systemd/ocp-*.service core@${VM_IP}:/home/core/systemd-units/ + ${SCP} systemd/ocp-*.sh core@${VM_IP}:/home/core/systemd-scripts/ + ;; + esac + + ${SSH} core@${VM_IP} -- 'sudo cp /home/core/systemd-units/* /etc/systemd/system/ && sudo cp /home/core/systemd-scripts/* /usr/local/bin/' + ${SSH} core@${VM_IP} -- 'ls /home/core/systemd-scripts/ | xargs -t -I % sudo chmod +x /usr/local/bin/%' + ${SSH} core@${VM_IP} -- 'sudo restorecon -rv /usr/local/bin' + + # enable only the .path units + ${SSH} core@${VM_IP} -- 'ls /home/core/systemd-units/*.service | xargs basename -a | xargs sudo systemctl enable' + + ${SSH} core@${VM_IP} -- 'rm -rf /home/core/systemd-units /home/core/systemd-scripts' +} diff --git a/createdisk.sh b/createdisk.sh index 66bd7560..88fb82a4 100755 --- a/createdisk.sh +++ b/createdisk.sh @@ -140,6 +140,8 @@ fi # Beyond this point, packages added to the ADDITIONAL_PACKAGES variable won’t be installed in the guest install_additional_packages ${VM_IP} +copy_systemd_units + cleanup_vm_image ${VM_NAME} ${VM_IP} # Enable cloud-init service @@ -162,6 +164,17 @@ fi podman_version=$(${SSH} core@${VM_IP} -- 'rpm -q --qf %{version} podman') +# Disable cloud-init network config +${SSH} core@${VM_IP} 'sudo bash -x -s' << EOF +cat << EFF > /etc/cloud/cloud.cfg.d/05_disable-network.cfg +network: + config: disabled +EFF +EOF + +# Disable cloud-init hostname update +${SSH} core@${VM_IP} -- 'sudo sed -i "s/^preserve_hostname: false$/preserve_hostname: true/" /etc/cloud/cloud.cfg' + # Cleanup cloud-init config ${SSH} core@${VM_IP} -- "sudo cloud-init clean --logs" diff --git a/docs/self-sufficient-bundle.md b/docs/self-sufficient-bundle.md new file mode 100644 index 00000000..5e97d455 --- /dev/null +++ b/docs/self-sufficient-bundle.md @@ -0,0 +1,34 @@ +# Self sufficient bundles + +Since release 4.19.0 of OpenShift Local, the bundles generated by `snc` contain additional systemd services to provision the cluster and remove the need for +an outside entity to provision the cluster, although an outside process needs to create some files on pre-defined locations inside the VM for the systemd +services to do their work. + +## The following table lists the systemd services and the location of files they need to provision the cluster, users of SNC need to create those files + +| Systemd unit | Runs for (ocp, MicroShift, both) | Input files location | Marker env variables | +| :----------------------------: | :------------------------------: | :----------------------------------: | :------------------: | +| `crc-cluster-status.service` | both | none | none | +| `crc-pullsecret.service` | both | /opt/crc/pull-secret | none | +| `crc-dnsmasq.service` | both | none | none | +| `crc-routes-controller.service`| both | none | none | +| `ocp-cluster-ca.service` | ocp | /opt/crc/custom-ca.crt | CRC_CLOUD=1 | +| `ocp-clusterid.service` | ocp | none | none | +| `ocp-custom-domain.service` | ocp | none | CRC_CLOUD=1 | +| `ocp-growfs.service` | ocp | none | none | +| `ocp-userpasswords.service` | ocp | /opt/crc/pass_{kubeadmin, developer} | none | + +In addition to the above services we have `ocp-cluster-ca.path`, `crc-pullsecret.path` and `ocp-userpasswords.path` that monitors the filesystem paths +related to their `*.service` counterparts and starts the service when the paths become available. + +> [!NOTE] +> "Marker env variable" is set using an env file, if the required env variable is not set then unit is skipped +> some units are run only when CRC_CLOUD=1 is set, these are only needed when using the bundles with crc-cloud + +The systemd services are heavily based on the [`clustersetup.sh`](https://github.com/crc-org/crc-cloud/blob/main/pkg/bundle/setup/clustersetup.sh) script found in the `crc-cloud` project. + +## Naming convention for the systemd unit files + +Systemd units that are needed for both 'OpenShift' and 'MicroShift' are named as `crc-*.service`, units that are needed only for 'OpenShift' are named +as `ocp-*.service` and when we add units that are only needed for 'MicroShift' they should be named as `ucp-*.service` + diff --git a/systemd/crc-cluster-status.service b/systemd/crc-cluster-status.service new file mode 100644 index 00000000..5ebcbdfd --- /dev/null +++ b/systemd/crc-cluster-status.service @@ -0,0 +1,13 @@ +[Unit] +Description=CRC Unit checking if cluster is ready +After=kubelet.service ocp-clusterid.service ocp-cluster-ca.service ocp-custom-domain.service +After=crc-pullsecret.service + +[Service] +Type=oneshot +Restart=on-failure +ExecStart=/usr/local/bin/crc-cluster-status.sh +RemainAfterExit=true + +[Install] +WantedBy=multi-user.target diff --git a/systemd/crc-cluster-status.sh b/systemd/crc-cluster-status.sh new file mode 100644 index 00000000..91163789 --- /dev/null +++ b/systemd/crc-cluster-status.sh @@ -0,0 +1,43 @@ +#!/bin/bash + +set -x + +export KUBECONFIG=/opt/kubeconfig + +function check_cluster_healthy() { + WAIT="authentication|console|etcd|ingress|openshift-apiserver" + + until `oc get co > /dev/null 2>&1` + do + sleep 2 + done + + for i in $(oc get co | grep -P "$WAIT" | awk '{ print $3 }') + do + if [[ $i == "False" ]] + then + return 1 + fi + done + return 0 +} + +rm -rf /tmp/.crc-cluster-ready + +COUNTER=0 +CLUSTER_HEALTH_SLEEP=8 +CLUSTER_HEALTH_RETRIES=500 + +while ! check_cluster_healthy +do + sleep $CLUSTER_HEALTH_SLEEP + if [[ $COUNTER == $CLUSTER_HEALTH_RETRIES ]] + then + return 1 + fi + ((COUNTER++)) +done + +# need to set a marker to let `crc` know the cluster is ready +touch /tmp/.crc-cluster-ready + diff --git a/systemd/crc-dnsmasq.service b/systemd/crc-dnsmasq.service new file mode 100644 index 00000000..32f6032c --- /dev/null +++ b/systemd/crc-dnsmasq.service @@ -0,0 +1,18 @@ +[Unit] +Description=CRC Unit for configuring dnsmasq +Wants=ovs-configuration.service +After=ovs-configuration.service +Before=kubelet-dependencies.target +StartLimitIntervalSec=30 + +[Service] +Type=oneshot +Restart=on-failure +EnvironmentFile=/etc/systemd/system/crc-env +ExecStartPre=/bin/systemctl start ovs-configuration.service +ExecStart=/usr/local/bin/crc-dnsmasq.sh +ExecStartPost=/usr/bin/systemctl restart NetworkManager.service +ExecStartPost=/usr/bin/systemctl restart dnsmasq.service + +[Install] +WantedBy=kubelet-dependencies.target diff --git a/systemd/crc-pullsecret.service b/systemd/crc-pullsecret.service new file mode 100644 index 00000000..d6e2c5a7 --- /dev/null +++ b/systemd/crc-pullsecret.service @@ -0,0 +1,12 @@ +[Unit] +Description=CRC Unit for adding pull secret to cluster +After=kubelet.service +StartLimitIntervalSec=90sec + +[Service] +Type=oneshot +Restart=on-failure +ExecStart=/usr/local/bin/crc-pullsecret.sh + +[Install] +WantedBy=multi-user.target diff --git a/systemd/crc-pullsecret.sh b/systemd/crc-pullsecret.sh new file mode 100644 index 00000000..7aebfae4 --- /dev/null +++ b/systemd/crc-pullsecret.sh @@ -0,0 +1,21 @@ +#!/bin/bash + +set -x + +source /usr/local/bin/crc-systemd-common.sh +export KUBECONFIG="/opt/kubeconfig" + +wait_for_resource secret + +# check if existing pull-secret is valid if not add the one from /opt/crc/pull-secret +existingPsB64=$(oc get secret pull-secret -n openshift-config -o jsonpath="{['data']['\.dockerconfigjson']}") +existingPs=$(echo "${existingPsB64}" | base64 -d) + +echo "${existingPs}" | jq -e '.auths' + +if [[ $? != 0 ]]; then + pullSecretB64=$(cat /opt/crc/pull-secret | base64 -w0) + oc patch secret pull-secret -n openshift-config --type merge -p "{\"data\":{\".dockerconfigjson\":\"${pullSecretB64}\"}}" + rm -f /opt/crc/pull-secret +fi + diff --git a/systemd/crc-routes-controller.service b/systemd/crc-routes-controller.service new file mode 100644 index 00000000..4280c57d --- /dev/null +++ b/systemd/crc-routes-controller.service @@ -0,0 +1,12 @@ +[Unit] +Description=CRC Unit starting routes controller +Wants=network-online.target gvisor-tap-vsock.service sys-class-net-tap0.device +After=sys-class-net-tap0.device network-online.target kubelet.service gvisor-tap-vsock.service + +[Service] +Type=oneshot +EnvironmentFile=/etc/systemd/system/crc-env +ExecStart=/usr/local/bin/crc-routes-controller.sh + +[Install] +WantedBy=multi-user.target diff --git a/systemd/crc-routes-controller.sh b/systemd/crc-routes-controller.sh new file mode 100644 index 00000000..7aa2c331 --- /dev/null +++ b/systemd/crc-routes-controller.sh @@ -0,0 +1,16 @@ +#!/bin/bash + +set -x + +if [[ ${CRC_NETWORK_MODE_USER} -eq 0 ]]; then + echo -n "network-mode 'system' detected: skipping routes-controller pod deployment" + exit 0 +fi + +source /usr/local/bin/crc-systemd-common.sh +export KUBECONFIG=/opt/kubeconfig + +wait_for_resource pods + +oc apply -f /opt/crc/routes-controller.yaml + diff --git a/systemd/crc-systemd-common.sh b/systemd/crc-systemd-common.sh new file mode 100644 index 00000000..3ccec925 --- /dev/null +++ b/systemd/crc-systemd-common.sh @@ -0,0 +1,12 @@ +# $1 is the resource to check +# $2 is an optional maximum retry count; default 20 +function wait_for_resource() { + local retry=0 + local max_retry=${2:-20} + until `oc get "$1" > /dev/null 2>&1` + do + [ $retry == $max_retry ] && exit 1 + sleep 5 + ((retry++)) + done +} diff --git a/systemd/dnsmasq.sh.template b/systemd/dnsmasq.sh.template new file mode 100644 index 00000000..f0168fd9 --- /dev/null +++ b/systemd/dnsmasq.sh.template @@ -0,0 +1,26 @@ +#!/bin/bash + +set -x + +if [[ ${CRC_NETWORK_MODE_USER} -eq 1 ]]; then + echo -n "network-mode 'user' detected: skipping dnsmasq configuration" + exit 0 +fi + +hostName=$(hostname) +hostIp=$(hostname --all-ip-addresses | awk '{print $1}') + +cat << EOF > /etc/dnsmasq.d/crc-dnsmasq.conf +listen-address=$hostIp +expand-hosts +log-queries +local=/crc.testing/ +domain=crc.testing +address=/${APPS_DOMAIN}/$hostIp +address=/api.crc.testing/$hostIp +address=/api-int.crc.testing/$hostIp +address=/$hostName.crc.testing/$hostIp +EOF + +/bin/systemctl enable --now dnsmasq.service +/bin/nmcli conn modify --temporary ovs-if-br-ex ipv4.dns $hostIp,1.1.1.1 diff --git a/systemd/ocp-cluster-ca.service b/systemd/ocp-cluster-ca.service new file mode 100644 index 00000000..7294c2c8 --- /dev/null +++ b/systemd/ocp-cluster-ca.service @@ -0,0 +1,11 @@ +[Unit] +Description=CRC Unit setting custom cluster ca +After=kubelet.service ocp-clusterid.service + +[Service] +Type=oneshot +Restart=on-failure +ExecStart=/usr/local/bin/ocp-cluster-ca.sh + +[Install] +WantedBy=multi-user.target diff --git a/systemd/ocp-cluster-ca.sh b/systemd/ocp-cluster-ca.sh new file mode 100644 index 00000000..9cf14e87 --- /dev/null +++ b/systemd/ocp-cluster-ca.sh @@ -0,0 +1,91 @@ +#!/bin/bash + +# The steps followed to generate CA and replace system:admin cert are from: +# https://access.redhat.com/solutions/5286371 +# https://access.redhat.com/solutions/6054981 + +set -x + +source /usr/local/bin/crc-systemd-common.sh +export KUBECONFIG="/opt/kubeconfig" + +wait_for_resource configmap + +custom_ca_path=/opt/crc/custom-ca.crt +external_ip_path=/opt/crc/eip + +if [ ! -f ${custom_ca_path} ]; then + echo "Cert bundle /opt/crc/custom-ca.crt not found, generating one..." + # generate a ca bundle and use it, overwrite custom_ca_path + CA_SUBJ="/OU=openshift/CN=admin-kubeconfig-signer-custom" + openssl genrsa -out /tmp/custom-ca.key 4096 + openssl req -x509 -new -nodes -key /tmp/custom-ca.key -sha256 -days 365 -out "${custom_ca_path}" -subj "${CA_SUBJ}" +fi + +if [ ! -f /opt/crc/pass_kubeadmin ]; then + echo "kubeadmin password file not found" + exit 1 +fi + +PASS_KUBEADMIN="$(cat /opt/crc/pass_kubeadmin)" +oc create configmap client-ca-custom -n openshift-config --from-file=ca-bundle.crt=${custom_ca_path} +oc patch apiserver cluster --type=merge -p '{"spec": {"clientCA": {"name": "client-ca-custom"}}}' +oc create configmap admin-kubeconfig-client-ca -n openshift-config --from-file=ca-bundle.crt=${custom_ca_path} \ + --dry-run=client -o yaml | oc replace -f - + +rm -f /opt/crc/custom-ca.crt + +# create CSR +openssl req -new -newkey rsa:4096 -nodes -keyout /tmp/newauth-access.key -out /tmp/newauth-access.csr -subj "/CN=system:admin" + +cat << EOF >> /tmp/newauth-access-csr.yaml +apiVersion: certificates.k8s.io/v1 +kind: CertificateSigningRequest +metadata: + name: newauth-access +spec: + signerName: kubernetes.io/kube-apiserver-client + groups: + - system:authenticated + request: $(cat /tmp/newauth-access.csr | base64 -w0) + usages: + - client auth +EOF + +oc create -f /tmp/newauth-access-csr.yaml + +until `oc adm certificate approve newauth-access > /dev/null 2>&1` +do + echo "Unable to approve the csr newauth-access" + sleep 5 +done + +cluster_name=$(oc config view -o jsonpath='{.clusters[0].name}') +apiserver_url=$(oc config view -o jsonpath='{.clusters[0].cluster.server}') + +if [ -f "${external_ip_path}" ]; then + apiserver_url=api.$(cat "${external_ip_path}").nip.io +fi + +updated_kubeconfig_path=/opt/crc/kubeconfig + +oc get csr newauth-access -o jsonpath='{.status.certificate}' | base64 -d > /tmp/newauth-access.crt +oc config set-credentials system:admin --client-certificate=/tmp/newauth-access.crt --client-key=/tmp/newauth-access.key --embed-certs --kubeconfig="${updated_kubeconfig_path}" +oc config set-context system:admin --cluster="${cluster_name}" --namespace=default --user=system:admin --kubeconfig="${updated_kubeconfig_path}" +oc get secret localhost-recovery-client-token -n openshift-kube-controller-manager -ojsonpath='{.data.ca\.crt}'| base64 -d > /tmp/bundle-ca.crt +oc config set-cluster "${cluster_name}" --server="${apiserver_url}" --certificate-authority=/tmp/bundle-ca.crt \ + --kubeconfig="${updated_kubeconfig_path}" --embed-certs + +echo "Logging in again to update $KUBECONFIG with kubeadmin token" +COUNTER=0 +MAXIMUM_LOGIN_RETRY=500 +until `oc login --insecure-skip-tls-verify=true -u kubeadmin -p "$PASS_KUBEADMIN" https://api.crc.testing:6443 --kubeconfig /opt/crc/newkubeconfig > /dev/null 2>&1` +do + if [ $COUNTER == $MAXIMUM_LOGIN_RETRY ]; then + echo "Unable to login to the cluster..., installation failed." + exit 1 + fi + echo "Logging into OpenShift with updated credentials try $COUNTER, hang on...." + sleep 5 + ((COUNTER++)) +done diff --git a/systemd/ocp-clusterid.service b/systemd/ocp-clusterid.service new file mode 100644 index 00000000..8882491b --- /dev/null +++ b/systemd/ocp-clusterid.service @@ -0,0 +1,11 @@ +[Unit] +Description=CRC Unit setting random cluster ID +After=kubelet.service + +[Service] +Type=oneshot +ExecStart=/usr/local/bin/ocp-clusterid.sh +Restart=on-failure + +[Install] +WantedBy=multi-user.target diff --git a/systemd/ocp-clusterid.sh b/systemd/ocp-clusterid.sh new file mode 100644 index 00000000..686deaa5 --- /dev/null +++ b/systemd/ocp-clusterid.sh @@ -0,0 +1,11 @@ +#!/bin/bash + +set -x + +source /usr/local/bin/crc-systemd-common.sh +export KUBECONFIG="/opt/kubeconfig" +uuid=$(uuidgen) + +wait_for_resource clusterversion + +oc patch clusterversion version -p "{\"spec\":{\"clusterID\":\"${uuid}\"}}" --type merge diff --git a/systemd/ocp-custom-domain.service b/systemd/ocp-custom-domain.service new file mode 100644 index 00000000..dd4ba397 --- /dev/null +++ b/systemd/ocp-custom-domain.service @@ -0,0 +1,12 @@ +[Unit] +Description=CRC Unit setting nip.io domain for cluster +After=kubelet.service ocp-clusterid.service ocp-cluster-ca.service + +[Service] +Type=oneshot +Restart=on-failure +EnvironmentFile=/etc/systemd/system/crc-env +ExecStart=/usr/local/bin/ocp-custom-domain.sh + +[Install] +WantedBy=multi-user.target diff --git a/systemd/ocp-custom-domain.sh b/systemd/ocp-custom-domain.sh new file mode 100644 index 00000000..fe39486e --- /dev/null +++ b/systemd/ocp-custom-domain.sh @@ -0,0 +1,54 @@ +#!/bin/bash + +set -x + +if [ -z $CRC_CLOUD ]; then + echo "Not running in crc-cloud mode" + exit 0 +fi + +source /usr/local/bin/crc-systemd-common.sh +export KUBECONFIG="/opt/kubeconfig" + +if [ ! -f /opt/crc/eip ]; then + echo "external ip not found" + exit 1 +fi + +EIP=$(cat /opt/crc/eip) + +STEPS_SLEEP_TIME=30 + +wait_for_resource secret + +# create cert and add as secret +openssl req -newkey rsa:2048 -new -nodes -x509 -days 3650 -keyout /tmp/nip.key -out /tmp/nip.crt -subj "/CN=$EIP.nip.io" -addext "subjectAltName=DNS:apps.$EIP.nip.io,DNS:*.apps.$EIP.nip.io,DNS:api.$EIP.nip.io" +oc delete secret nip-secret -n openshift-config || true +oc create secret tls nip-secret --cert=/tmp/nip.crt --key=/tmp/nip.key -n openshift-config +sleep $STEPS_SLEEP_TIME + +# patch ingress + cat < /tmp/ingress-patch.yaml +spec: + appsDomain: apps.$EIP.nip.io + componentRoutes: + - hostname: console-openshift-console.apps.$EIP.nip.io + name: console + namespace: openshift-console + servingCertKeyPairSecret: + name: nip-secret + - hostname: oauth-openshift.apps.$EIP.nip.io + name: oauth-openshift + namespace: openshift-authentication + servingCertKeyPairSecret: + name: nip-secret +EOF +oc patch ingresses.config.openshift.io cluster --type=merge --patch-file=/tmp/ingress-patch.yaml + +# patch API server to use new CA secret +oc patch apiserver cluster --type=merge -p '{"spec":{"servingCerts": {"namedCertificates":[{"names":["api.'$EIP'.nip.io"],"servingCertificate": {"name": "nip-secret"}}]}}}' + +# patch image registry route +oc patch -p '{"spec": {"host": "default-route-openshift-image-registry.'$EIP'.nip.io"}}' route default-route -n openshift-image-registry --type=merge + +#wait_cluster_become_healthy "authentication|console|etcd|ingress|openshift-apiserver" diff --git a/systemd/ocp-growfs.service b/systemd/ocp-growfs.service new file mode 100644 index 00000000..ce771ed6 --- /dev/null +++ b/systemd/ocp-growfs.service @@ -0,0 +1,9 @@ +[Unit] +Description=CRC Unit to grow the root filesystem + +[Service] +Type=oneshot +ExecStart=/usr/local/bin/ocp-growfs.sh + +[Install] +WantedBy=multi-user.target diff --git a/systemd/ocp-growfs.sh b/systemd/ocp-growfs.sh new file mode 100644 index 00000000..c637a7c0 --- /dev/null +++ b/systemd/ocp-growfs.sh @@ -0,0 +1,11 @@ +#!/bin/bash + +set -x + +root_partition=$(/usr/sbin/blkid -t TYPE=xfs -o device) +/usr/bin/growpart "${root_partition%?}" "${root_partition#/dev/???}" + +rootFS="/sysroot" +mount -o remount,rw "${rootFS}" +xfs_growfs "${rootFS}" +#mount -o remount,ro "${rootFS}" diff --git a/systemd/ocp-mco-sshkey.service b/systemd/ocp-mco-sshkey.service new file mode 100644 index 00000000..cfcd3557 --- /dev/null +++ b/systemd/ocp-mco-sshkey.service @@ -0,0 +1,14 @@ +[Unit] +Description=CRC Unit patching the MachineConfig to add new ssh key +After=kubelet.service +StartLimitIntervalSec=1min +StartLimitBurst=1 + +[Service] +Type=oneshot +Restart=on-failure +ExecStart=/usr/local/bin/ocp-mco-sshkey.sh +RemainAfterExit=true + +[Install] +WantedBy=multi-user.target diff --git a/systemd/ocp-mco-sshkey.sh b/systemd/ocp-mco-sshkey.sh new file mode 100644 index 00000000..5df2062e --- /dev/null +++ b/systemd/ocp-mco-sshkey.sh @@ -0,0 +1,19 @@ +#!/bin/bash + +set -x + +source /usr/local/bin/crc-systemd-common.sh +export KUBECONFIG="/opt/kubeconfig" + +pub_key_path="/opt/crc/id_rsa.pub" + +if [ ! -f "${pub_key_path}" ]; then + echo "No pubkey file found" + exit 1 +fi + +echo "Updating the public key resource for machine config operator" +pub_key=$(tr -d '\n\r' < ${pub_key_path}) +wait_for_resource machineconfig +oc patch machineconfig 99-master-ssh -p "{\"spec\": {\"config\": {\"passwd\": {\"users\": [{\"name\": \"core\", \"sshAuthorizedKeys\": [\"${pub_key}\"]}]}}}}" --type merge +[ "$?" != 0 ] && echo "failed to update public key to machine config operator" && exit 1 diff --git a/systemd/ocp-userpasswords.service b/systemd/ocp-userpasswords.service new file mode 100644 index 00000000..e1ef18fd --- /dev/null +++ b/systemd/ocp-userpasswords.service @@ -0,0 +1,12 @@ +[Unit] +Description=CRC Unit setting the developer and kubeadmin user password +After=kubelet.service + +[Service] +Type=oneshot +Restart=on-failure +ExecStartPre=/usr/bin/sleep 5 +ExecStart=/usr/local/bin/ocp-userpasswords.sh + +[Install] +WantedBy=multi-user.target diff --git a/systemd/ocp-userpasswords.sh b/systemd/ocp-userpasswords.sh new file mode 100644 index 00000000..c42170d8 --- /dev/null +++ b/systemd/ocp-userpasswords.sh @@ -0,0 +1,38 @@ +#!/bin/bash + +set -x + +source /usr/local/bin/crc-systemd-common.sh +export KUBECONFIG="/opt/kubeconfig" + +function gen_htpasswd() { + if [ ! -z "${1}" ] && [ ! -z "${2}" ]; then + podman run --rm -ti xmartlabs/htpasswd $1 $2 >> /tmp/htpasswd.txt + fi +} + +wait_for_resource secret + +if [ ! -f /opt/crc/pass_developer ]; then + echo "developer password does not exist" + exit 1 +fi + +if [ ! -f /opt/crc/pass_kubeadmin ]; then + echo "developer password does not exist" + exit 1 +fi + +PASS_DEVELOPER=$(cat /opt/crc/pass_developer) +PASS_KUBEADMIN=$(cat /opt/crc/pass_kubeadmin) + +rm -f /tmp/htpasswd.txt +gen_htpasswd developer "${PASS_DEVELOPER}" +gen_htpasswd kubeadmin "${PASS_KUBEADMIN}" + +if [ -f /tmp/htpasswd.txt ]; then + sed -i '/^\s*$/d' /tmp/htpasswd.txt + + oc create secret generic htpass-secret --from-file=htpasswd=/tmp/htpasswd.txt -n openshift-config --dry-run=client -o yaml > /tmp/htpass-secret.yaml + oc replace -f /tmp/htpass-secret.yaml +fi