|
| 1 | +#!/bin/bash |
| 2 | + |
| 3 | +set -o errexit |
| 4 | + |
| 5 | +test_dir=$(realpath "$(dirname "$0")") |
| 6 | +. "${test_dir}/../functions" |
| 7 | +set_debug |
| 8 | + |
| 9 | +vault_name="vault-service" |
| 10 | + |
| 11 | +VOLUME_SNAPSHOT_CLASS=$(deploy_volume_snapshot_class) |
| 12 | + |
| 13 | +create_tls_secret() { |
| 14 | + local name=$1 |
| 15 | + local service_name=$2 |
| 16 | + |
| 17 | + local tmp_dir |
| 18 | + tmp_dir=$(mktemp -d) |
| 19 | + |
| 20 | + openssl genrsa -out "${tmp_dir}/vault.key" 2048 |
| 21 | + |
| 22 | + cat <<EOF >"${tmp_dir}/csr.conf" |
| 23 | +[req] |
| 24 | +distinguished_name = req_distinguished_name |
| 25 | +x509_extensions = v3_req |
| 26 | +prompt = no |
| 27 | +[req_distinguished_name] |
| 28 | +CN = ${service_name}.${namespace}.svc |
| 29 | +[v3_req] |
| 30 | +basicConstraints = CA:FALSE |
| 31 | +keyUsage = nonRepudiation, digitalSignature, keyEncipherment |
| 32 | +extendedKeyUsage = serverAuth |
| 33 | +subjectAltName = @alt_names |
| 34 | +[alt_names] |
| 35 | +DNS.1 = ${service_name} |
| 36 | +DNS.2 = *.${service_name} |
| 37 | +DNS.3 = *.${service_name}.${namespace} |
| 38 | +DNS.4 = *.${service_name}.${namespace}.svc |
| 39 | +DNS.5 = ${service_name}.${namespace}.svc |
| 40 | +DNS.6 = ${service_name}.${namespace}.svc.cluster.local |
| 41 | +DNS.7 = *.${service_name}.${namespace}.svc.cluster.local |
| 42 | +IP.1 = 127.0.0.1 |
| 43 | +EOF |
| 44 | + openssl req -x509 -new -nodes \ |
| 45 | + -key "${tmp_dir}"/vault.key \ |
| 46 | + -sha256 \ |
| 47 | + -days 3650 \ |
| 48 | + -out "${tmp_dir}"/vault.crt \ |
| 49 | + -config "${tmp_dir}"/csr.conf \ |
| 50 | + -extensions v3_req |
| 51 | + |
| 52 | + if [ ! -s "${tmp_dir}"/vault.crt ]; then |
| 53 | + echo "failed to generate vault tls certificate" |
| 54 | + exit 1 |
| 55 | + fi |
| 56 | + |
| 57 | + cp "${tmp_dir}"/vault.crt "${tmp_dir}"/vault.ca |
| 58 | + |
| 59 | + kubectl create secret generic "$name" \ |
| 60 | + --from-file=tls.key="${tmp_dir}"/vault.key \ |
| 61 | + --from-file=tls.crt="${tmp_dir}"/vault.crt \ |
| 62 | + --from-file=ca.crt="${tmp_dir}"/vault.ca |
| 63 | + |
| 64 | + rm -rf "$tmp_dir" |
| 65 | +} |
| 66 | + |
| 67 | +setup_vault() { |
| 68 | + local sa_namespace=$namespace |
| 69 | + if [ -n "$OPERATOR_NS" ]; then |
| 70 | + sa_namespace=$OPERATOR_NS |
| 71 | + fi |
| 72 | + |
| 73 | + deploy_vault $vault_name \ |
| 74 | + --set "global.enabled=true" \ |
| 75 | + --set "global.tlsDisable=true" \ |
| 76 | + \ |
| 77 | + --set "server.standalone.enabled=true" |
| 78 | + sleep 10 |
| 79 | + |
| 80 | + wait_pod $vault_name-0 |
| 81 | + |
| 82 | + sleep 20 |
| 83 | + |
| 84 | + kubectl_bin exec $vault_name-0 -- vault auth enable kubernetes |
| 85 | + cat "$test_dir"/conf/role-binding.yml \ |
| 86 | + | yq ".metadata.namespace=\"$namespace\"" \ |
| 87 | + | yq ".subjects[0].namespace=\"$sa_namespace\"" \ |
| 88 | + | kubectl_bin apply -f - |
| 89 | + |
| 90 | + token=$(kubectl_bin exec $vault_name-0 -- vault token create -policy=operator -format=json | jq -r '.auth.client_token') |
| 91 | + kubectl_bin create secret generic vault-sync-secret --from-literal=token="${token}" |
| 92 | + |
| 93 | + # shellcheck disable=SC2016 |
| 94 | + kubectl_bin exec $vault_name-0 -- sh -c 'vault write auth/kubernetes/config kubernetes_host=https://$KUBERNETES_SERVICE_HOST:$KUBERNETES_SERVICE_PORT' |
| 95 | + |
| 96 | + kubectl_bin exec $vault_name-0 -- sh -c 'vault policy write operator - <<EOF |
| 97 | + # KVv1 |
| 98 | + path "secret/psmdb/operator/*" { |
| 99 | + capabilities = ["read"] |
| 100 | + } |
| 101 | +
|
| 102 | + # KVv2 |
| 103 | + path "secret/data/psmdb/operator/*" { |
| 104 | + capabilities = ["read"] |
| 105 | + } |
| 106 | +EOF |
| 107 | +' |
| 108 | + |
| 109 | + kubectl_bin exec $vault_name-0 -- vault write auth/kubernetes/role/operator \ |
| 110 | + bound_service_account_names=percona-server-mongodb-operator \ |
| 111 | + bound_service_account_namespaces="$sa_namespace" \ |
| 112 | + policies=operator \ |
| 113 | + ttl=1h |
| 114 | +} |
| 115 | + |
| 116 | +vault_append() { |
| 117 | + local key=$1 |
| 118 | + local value=$2 |
| 119 | + |
| 120 | + local tmp_json |
| 121 | + tmp_json=$(mktemp) |
| 122 | + local new_tmp_json |
| 123 | + new_tmp_json=$(mktemp) |
| 124 | + |
| 125 | + kubectl_bin exec $vault_name-0 -- sh -c "vault kv get -format=json -mount=secret psmdb/operator/$namespace/$cluster/users" | jq '.data.data' >"$tmp_json" |
| 126 | + |
| 127 | + if [ ! -s "$tmp_json" ]; then |
| 128 | + echo '{}' >"$tmp_json" |
| 129 | + fi |
| 130 | + |
| 131 | + jq --arg key "$key" --arg value "$value" \ |
| 132 | + '(. // {}) + {($key): $value}' \ |
| 133 | + "$tmp_json" >"$new_tmp_json" |
| 134 | + |
| 135 | + kubectl_bin cp "$new_tmp_json" $vault_name-0:/tmp/data_new.json |
| 136 | + kubectl_bin exec $vault_name-0 -- sh -c "vault kv put -mount=secret psmdb/operator/$namespace/$cluster/users @\"/tmp/data_new.json\"" |
| 137 | + |
| 138 | + rm -f "$tmp_json" "$new_tmp_json" |
| 139 | +} |
| 140 | + |
| 141 | +run_snapshot_backup() { |
| 142 | + local backup_name=$1 |
| 143 | + |
| 144 | + log "running snapshot backup $backup_name" |
| 145 | + |
| 146 | + yq eval '.metadata.name = "'${backup_name}'" | .spec.volumeSnapshotClass = "'${VOLUME_SNAPSHOT_CLASS}'"' \ |
| 147 | + "${test_dir}/conf/backup.yml" \ |
| 148 | + | kubectl_bin apply -f - |
| 149 | +} |
| 150 | + |
| 151 | +run_snapshot_recovery_check() { |
| 152 | + local backup_name=$1 |
| 153 | + |
| 154 | + wait_restore "${backup_name}" "${cluster}" "ready" "0" "3000" |
| 155 | + |
| 156 | + if [ "$(kubectl_bin get psmdb "${cluster}" -o jsonpath='{.metadata.annotations.percona\.com/resync-pbm}')" != "true" ]; then |
| 157 | + echo "psmdb/${cluster} should be annotated with percona.com/resync-pbm after a snapshot restore" |
| 158 | + exit 1 |
| 159 | + fi |
| 160 | + |
| 161 | + wait_cluster_consistency "${cluster}" |
| 162 | + wait_for_pbm_operations "${cluster}" |
| 163 | + |
| 164 | + compare_mongo_cmd "find" "myApp:myPass@${cluster}-rs0-0.${cluster}-rs0.${namespace}" "" "" "" "" "" "true" |
| 165 | + compare_mongo_cmd "find" "myApp:myPass@${cluster}-rs0-1.${cluster}-rs0.${namespace}" "" "" "" "" "" "true" |
| 166 | + compare_mongo_cmd "find" "myApp:myPass@${cluster}-rs0-2.${cluster}-rs0.${namespace}" "" "" "" "" "" "true" |
| 167 | +} |
| 168 | + |
| 169 | +cluster="some-name" |
| 170 | + |
| 171 | +create_infra "${namespace}" |
| 172 | +deploy_minio |
| 173 | +apply_s3_storage_secrets |
| 174 | + |
| 175 | +desc 'Setting up Vault' |
| 176 | +setup_vault |
| 177 | + |
| 178 | +desc 'Seeding initial credentials in Vault' |
| 179 | +vault_append "MONGODB_USER_ADMIN_PASSWORD" "userAdmin123456" |
| 180 | +vault_append "MONGODB_BACKUP_PASSWORD" "backup123456#" |
| 181 | + |
| 182 | +desc 'Deploying PSMDB cluster with Vault and snapshot backup' |
| 183 | +kubectl_bin apply -f "${test_dir}/conf/secrets.yml" |
| 184 | +apply_cluster "${test_dir}/conf/${cluster}.yml" |
| 185 | +kubectl_bin apply -f "${conf_dir}/client_with_tls.yml" |
| 186 | + |
| 187 | +echo 'check if all pods started' |
| 188 | +wait_for_running "${cluster}-rs0" 3 |
| 189 | +wait_cluster_consistency "${cluster}" |
| 190 | + |
| 191 | +sleep 60 # give time for resync to start |
| 192 | +wait_for_pbm_operations "${cluster}" |
| 193 | + |
| 194 | +desc 'Writing test data' |
| 195 | +run_mongo_tls \ |
| 196 | + 'db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})' \ |
| 197 | + "userAdmin:userAdmin123456@${cluster}-rs0.${namespace}" |
| 198 | +sleep 1 |
| 199 | +run_mongo_tls \ |
| 200 | + 'use myApp\n db.test.insert({ x: 100500 })' \ |
| 201 | + "myApp:myPass@${cluster}-rs0.${namespace}" |
| 202 | +sleep 5 |
| 203 | +compare_mongo_cmd "find" "myApp:myPass@${cluster}-rs0-0.${cluster}-rs0.${namespace}" "" "" "" "" "" "true" |
| 204 | +compare_mongo_cmd "find" "myApp:myPass@${cluster}-rs0-1.${cluster}-rs0.${namespace}" "" "" "" "" "" "true" |
| 205 | +compare_mongo_cmd "find" "myApp:myPass@${cluster}-rs0-2.${cluster}-rs0.${namespace}" "" "" "" "" "" "true" |
| 206 | + |
| 207 | +desc 'Running snapshot backup' |
| 208 | +backup_name="backup-snapshot-vault" |
| 209 | +run_snapshot_backup "${backup_name}" |
| 210 | +wait_backup "${backup_name}" |
| 211 | + |
| 212 | +desc 'Drop collection and restore from snapshot' |
| 213 | +run_mongo_tls 'use myApp\n db.test.drop()' "myApp:myPass@${cluster}-rs0.${namespace}" |
| 214 | +run_restore "${backup_name}" |
| 215 | +run_snapshot_recovery_check "${backup_name}" |
| 216 | + |
| 217 | +desc 'Verifying Vault sync still works after snapshot restore' |
| 218 | +newpass="vault-test-password-after-restore" |
| 219 | +vault_append "MONGODB_BACKUP_PASSWORD" "$newpass" |
| 220 | +sleep 25 |
| 221 | +wait_cluster_consistency "${cluster}" |
| 222 | +sleep 15 |
| 223 | +backup_user=$(getUserData "some-users" "MONGODB_BACKUP_USER") |
| 224 | +ping=$(run_mongo_tls "db.runCommand({ ping: 1 }).ok" "$backup_user:$newpass@${cluster}-rs0-0.${cluster}-rs0.${namespace}" "mongodb" "" "--quiet") |
| 225 | +if [ "${ping}" != "1" ]; then |
| 226 | + echo "Vault sync check failed: could not authenticate as $backup_user with new password" |
| 227 | + exit 1 |
| 228 | +fi |
| 229 | + |
| 230 | +log "Vault sync after restore: OK" |
| 231 | + |
| 232 | +destroy_vault "${vault_name}" |
| 233 | +destroy "${namespace}" |
| 234 | + |
| 235 | +desc 'test passed' |
0 commit comments