|
1 | 1 | /* Copyright (C) 2024 NooBaa */
|
| 2 | +/* eslint-disable max-statements */ |
2 | 3 | 'use strict';
|
3 | 4 |
|
4 | 5 | const path = require('path');
|
5 | 6 | const _ = require('lodash');
|
| 7 | +const fs = require('fs'); |
6 | 8 | const P = require('../../util/promise');
|
7 | 9 | const mocha = require('mocha');
|
8 | 10 | const assert = require('assert');
|
9 | 11 | const fs_utils = require('../../util/fs_utils');
|
10 |
| -const { TMP_PATH, generate_nsfs_account, get_new_buckets_path_by_test_env, generate_s3_client, get_coretest_path } = require('../system_tests/test_utils'); |
| 12 | +const { TMP_PATH, generate_nsfs_account, get_new_buckets_path_by_test_env, generate_s3_client, |
| 13 | + get_coretest_path, exec_manage_cli } = require('../system_tests/test_utils'); |
| 14 | +const { TYPES, ACTIONS } = require('../../manage_nsfs/manage_nsfs_constants'); |
| 15 | +const ManageCLIResponse = require('../../manage_nsfs/manage_nsfs_cli_responses').ManageCLIResponse; |
11 | 16 |
|
12 | 17 | const coretest_path = get_coretest_path();
|
13 | 18 | const coretest = require(coretest_path);
|
14 | 19 | const setup_options = { forks: 2, debug: 5 };
|
15 | 20 | coretest.setup(setup_options);
|
16 |
| -const { rpc_client, EMAIL, get_current_setup_options, stop_nsfs_process, start_nsfs_process } = coretest; |
| 21 | +const { rpc_client, EMAIL, get_current_setup_options, stop_nsfs_process, start_nsfs_process, config_dir_name } = coretest; |
17 | 22 |
|
18 | 23 | const CORETEST_ENDPOINT = coretest.get_http_address();
|
19 | 24 |
|
@@ -74,4 +79,72 @@ mocha.describe('operations with a couple of forks', async function() {
|
74 | 79 | // cleanup
|
75 | 80 | await s3_admin.deleteBucket({ Bucket: bucket_name });
|
76 | 81 | });
|
| 82 | + |
| 83 | + mocha.it('list buckets after regenerate access keys', async function() { |
| 84 | + // create additional account |
| 85 | + const account_name = 'James'; |
| 86 | + const account_options_create = { account_name, uid: 5, gid: 5, config_root: config_dir_name }; |
| 87 | + await fs_utils.create_fresh_path(new_bucket_path_param); |
| 88 | + await fs.promises.chown(new_bucket_path_param, account_options_create.uid, account_options_create.gid); |
| 89 | + await fs.promises.chmod(new_bucket_path_param, 0o700); |
| 90 | + const access_details = await generate_nsfs_account(rpc_client, EMAIL, new_bucket_path_param, account_options_create); |
| 91 | + // check the account status |
| 92 | + const account_options_status = { config_root: config_dir_name, name: account_name}; |
| 93 | + const res_account_status = await exec_manage_cli(TYPES.ACCOUNT, ACTIONS.STATUS, account_options_status); |
| 94 | + assert.equal(JSON.parse(res_account_status).response.code, ManageCLIResponse.AccountStatus.code); |
| 95 | + // generate the s3 client |
| 96 | + const s3_uid5_before_access_keys_update = generate_s3_client(access_details.access_key, |
| 97 | + access_details.secret_key, CORETEST_ENDPOINT); |
| 98 | + // check the connection for the new account (can be any of the forks) |
| 99 | + const res_list_buckets = await s3_uid5_before_access_keys_update.listBuckets({}); |
| 100 | + assert.equal(res_list_buckets.$metadata.httpStatusCode, 200); |
| 101 | + // create a bucket |
| 102 | + const bucket_name2 = 'bucket2'; |
| 103 | + const res_bucket_create = await s3_uid5_before_access_keys_update.createBucket({ Bucket: bucket_name2 }); |
| 104 | + assert.equal(res_bucket_create.$metadata.httpStatusCode, 200); |
| 105 | + // update the account |
| 106 | + const account_options_update = { config_root: config_dir_name, name: account_name, regenerate: true}; |
| 107 | + const res_account_update = await exec_manage_cli(TYPES.ACCOUNT, ACTIONS.UPDATE, account_options_update); |
| 108 | + const access_key_id_updated = JSON.parse(res_account_update).response.reply.access_keys[0].access_key; |
| 109 | + const secret_key_updated = JSON.parse(res_account_update).response.reply.access_keys[0].secret_key; |
| 110 | + const s3_uid5_after_access_keys_update = generate_s3_client(access_key_id_updated, |
| 111 | + secret_key_updated, CORETEST_ENDPOINT); |
| 112 | + // check the connection for the updated access keys account (can be any of the forks) |
| 113 | + const res_list_buckets3 = await s3_uid5_after_access_keys_update.listBuckets({}); |
| 114 | + assert.equal(res_list_buckets3.$metadata.httpStatusCode, 200); |
| 115 | + |
| 116 | + // a couple of requests with the previous access keys (all should failed) |
| 117 | + // without checking the stat the expiry is OBJECT_SDK_ACCOUNT_CACHE_EXPIRY_MS |
| 118 | + let failed_operations = 0; |
| 119 | + let successful_operations = 0; |
| 120 | + const number_of_requests = 5; |
| 121 | + for (let i = 0; i < number_of_requests; i++) { |
| 122 | + try { |
| 123 | + await s3_uid5_before_access_keys_update.listBuckets({}); |
| 124 | + successful_operations += 1; |
| 125 | + } catch (err) { |
| 126 | + failed_operations += 1; |
| 127 | + } |
| 128 | + } |
| 129 | + assert.equal(successful_operations, 0); |
| 130 | + assert.equal(failed_operations, number_of_requests); |
| 131 | + |
| 132 | + // a couple of requests with the updated access keys (all should success) |
| 133 | + let failed_operations2 = 0; |
| 134 | + let successful_operations2 = 0; |
| 135 | + const number_of_requests2 = 5; |
| 136 | + for (let i = 0; i < number_of_requests2; i++) { |
| 137 | + try { |
| 138 | + await s3_uid5_after_access_keys_update.listBuckets({}); |
| 139 | + successful_operations2 += 1; |
| 140 | + } catch (err) { |
| 141 | + failed_operations2 += 1; |
| 142 | + } |
| 143 | + } |
| 144 | + assert.equal(successful_operations2, number_of_requests2); |
| 145 | + assert.equal(failed_operations2, 0); |
| 146 | + |
| 147 | + // cleanup |
| 148 | + await s3_uid5_after_access_keys_update.deleteBucket({ Bucket: bucket_name2 }); |
| 149 | + }); |
77 | 150 | });
|
0 commit comments