From 6c5a42ebf3a419f8f94164de69277e460a638f00 Mon Sep 17 00:00:00 2001 From: Aayush Chouhan Date: Wed, 9 Apr 2025 18:42:45 +0530 Subject: [PATCH] Added x-amz-expiration, missing HTTP header in the response of object GET/PUT/HEAD Signed-off-by: Aayush Chouhan --- src/endpoint/s3/ops/s3_get_object.js | 1 + src/endpoint/s3/ops/s3_head_object.js | 1 + src/endpoint/s3/ops/s3_put_object.js | 8 + src/manage_nsfs/nc_lifecycle.js | 65 +------ src/test/system_tests/test_utils.js | 74 ++++++++ .../jest_tests/test_nc_lifecycle.test.js | 93 +++++----- ...est_nc_lifecycle_posix_integration.test.js | 16 +- src/test/unit_tests/nc_index.js | 1 + src/test/unit_tests/test_lifecycle.js | 88 +++++++++ .../test_nc_lifecycle_expiration.js | 150 ++++++++++++++++ src/util/http_utils.js | 23 +++ src/util/lifecycle_utils.js | 167 ++++++++++++++++++ 12 files changed, 564 insertions(+), 123 deletions(-) create mode 100644 src/test/unit_tests/test_nc_lifecycle_expiration.js diff --git a/src/endpoint/s3/ops/s3_get_object.js b/src/endpoint/s3/ops/s3_get_object.js index 6a183fa336..69c2391817 100644 --- a/src/endpoint/s3/ops/s3_get_object.js +++ b/src/endpoint/s3/ops/s3_get_object.js @@ -49,6 +49,7 @@ async function get_object(req, res) { } } http_utils.set_response_headers_from_request(req, res); + if (!version_id) await http_utils.set_expiration_header(req, res, object_md); // setting expiration header for bucket lifecycle const obj_size = object_md.size; const params = { object_md, diff --git a/src/endpoint/s3/ops/s3_head_object.js b/src/endpoint/s3/ops/s3_head_object.js index 1cd3543c3a..c34db17d6a 100644 --- a/src/endpoint/s3/ops/s3_head_object.js +++ b/src/endpoint/s3/ops/s3_head_object.js @@ -29,6 +29,7 @@ async function head_object(req, res) { s3_utils.set_response_object_md(res, object_md); s3_utils.set_encryption_response_headers(req, res, object_md.encryption); http_utils.set_response_headers_from_request(req, res); + if (!params.version_id) await http_utils.set_expiration_header(req, res, object_md); // setting expiration header for bucket lifecycle } module.exports = { diff --git a/src/endpoint/s3/ops/s3_put_object.js b/src/endpoint/s3/ops/s3_put_object.js index d8bbdddd1e..17389e05c7 100644 --- a/src/endpoint/s3/ops/s3_put_object.js +++ b/src/endpoint/s3/ops/s3_put_object.js @@ -81,6 +81,14 @@ async function put_object(req, res) { } res.setHeader('ETag', `"${reply.etag}"`); + const object_info = { + key: req.params.key, + create_time: new Date().getTime(), + size: size, + tagging: tagging, + }; + await http_utils.set_expiration_header(req, res, object_info); // setting expiration header for bucket lifecycle + if (reply.seq) { res.seq = reply.seq; delete reply.seq; diff --git a/src/manage_nsfs/nc_lifecycle.js b/src/manage_nsfs/nc_lifecycle.js index f679aab3c3..83a1ccc0cb 100644 --- a/src/manage_nsfs/nc_lifecycle.js +++ b/src/manage_nsfs/nc_lifecycle.js @@ -271,7 +271,7 @@ class NCLifecycle { if (candidates.delete_candidates?.length > 0) { const expiration = lifecycle_rule.expiration ? this._get_expiration_time(lifecycle_rule.expiration) : 0; - const filter_func = this._build_lifecycle_filter({filter: lifecycle_rule.filter, expiration}); + const filter_func = lifecycle_utils.build_lifecycle_filter({filter: lifecycle_rule.filter, expiration}); dbg.log0('process_rule: calling delete_multiple_objects, num of objects to be deleted', candidates.delete_candidates.length); const delete_res = await this._call_op_and_update_status({ bucket_name, @@ -478,7 +478,7 @@ class NCLifecycle { if (rule_state.is_finished) return []; const expiration = this._get_expiration_time(lifecycle_rule.expiration); if (expiration < 0) return []; - const filter_func = this._build_lifecycle_filter({filter: lifecycle_rule.filter, expiration}); + const filter_func = lifecycle_utils.build_lifecycle_filter({filter: lifecycle_rule.filter, expiration}); const filtered_objects = []; // TODO list_objects does not accept a filter and works in batch sizes of 1000. should handle batching @@ -537,7 +537,7 @@ class NCLifecycle { const versions_list = params.versions_list; const candidates = []; const expiration = lifecycle_rule.expiration?.days ? this._get_expiration_time(lifecycle_rule.expiration) : 0; - const filter_func = this._build_lifecycle_filter({filter: lifecycle_rule.filter, expiration}); + const filter_func = lifecycle_utils.build_lifecycle_filter({filter: lifecycle_rule.filter, expiration}); for (let i = 0; i < versions_list.objects.length - 1; i++) { if (this.filter_expired_delete_marker(versions_list.objects[i], versions_list.objects[i + 1], filter_func)) { candidates.push(versions_list.objects[i]); @@ -640,7 +640,7 @@ class NCLifecycle { } const versions_list = params.versions_list; - const filter_func = this._build_lifecycle_filter({filter: lifecycle_rule.filter, expiration: 0}); + const filter_func = lifecycle_utils.build_lifecycle_filter({filter: lifecycle_rule.filter, expiration: 0}); const num_newer_versions = lifecycle_rule.noncurrent_version_expiration.newer_noncurrent_versions; const num_non_current_days = lifecycle_rule.noncurrent_version_expiration.noncurrent_days; const delete_candidates = []; @@ -674,7 +674,7 @@ class NCLifecycle { const expiration = lifecycle_rule.abort_incomplete_multipart_upload.days_after_initiation; const res = []; - const filter_func = this._build_lifecycle_filter({filter, expiration}); + const filter_func = lifecycle_utils.build_lifecycle_filter({filter, expiration}); let dir_handle; //TODO this is almost identical to list_uploads except for error handling and support for pagination. should modify list-upload and use it in here instead try { @@ -720,29 +720,6 @@ class NCLifecycle { ///////// FILTER HELPERS //////// //////////////////////////////////// - /** - * @typedef {{ - * filter: Object - * expiration: Number - * }} filter_params - * - * @param {filter_params} params - * @returns - */ - _build_lifecycle_filter(params) { - /** - * @param {Object} object_info - */ - return function(object_info) { - if (params.filter?.prefix && !object_info.key.startsWith(params.filter.prefix)) return false; - if (params.expiration && object_info.age < params.expiration) return false; - if (params.filter?.tags && !_file_contain_tags(object_info, params.filter.tags)) return false; - if (params.filter?.object_size_greater_than && object_info.size < params.filter.object_size_greater_than) return false; - if (params.filter?.object_size_less_than && object_info.size > params.filter.object_size_less_than) return false; - return true; - }; - } - /** * get the expiration time in days of an object * if rule is set with date, then rule is applied for all objects after that date @@ -1468,38 +1445,6 @@ class NCLifecycle { } } -////////////////// -// TAGS HELPERS // -////////////////// - -/** - * checks if tag query_tag is in the list tag_set - * @param {Object} query_tag - * @param {Array} tag_set - */ -function _list_contain_tag(query_tag, tag_set) { - for (const t of tag_set) { - if (t.key === query_tag.key && t.value === query_tag.value) return true; - } - return false; -} - -/** - * checks if object has all the tags in filter_tags - * @param {Object} object_info - * @param {Array} filter_tags - * @returns - */ -function _file_contain_tags(object_info, filter_tags) { - if (object_info.tags === undefined) return false; - for (const tag of filter_tags) { - if (!_list_contain_tag(tag, object_info.tags)) { - return false; - } - } - return true; -} - // EXPORTS exports.NCLifecycle = NCLifecycle; exports.ILM_POLICIES_TMP_DIR = ILM_POLICIES_TMP_DIR; diff --git a/src/test/system_tests/test_utils.js b/src/test/system_tests/test_utils.js index 32c4a2f947..378f7c1429 100644 --- a/src/test/system_tests/test_utils.js +++ b/src/test/system_tests/test_utils.js @@ -790,6 +790,80 @@ const run_or_skip_test = cond => { } else return it.skip; }; +/** + * update_file_mtime updates the mtime of the target path + * Warnings: + * - This operation would change the mtime of the file to 5 days ago - which means that it changes the etag / obj_id of the object + * - Please do not use on versioned objects (version_id will not be changed, but the mtime will be changed) - might cause issues. + * @param {String} target_path + * @returns {Promise} + */ +async function update_file_mtime(target_path) { + const update_file_mtime_cmp = os_utils.IS_MAC ? `touch -t $(date -v -5d +"%Y%m%d%H%M.%S") ${target_path}` : `touch -d "5 days ago" ${target_path}`; + await os_utils.exec(update_file_mtime_cmp, { return_stdout: true }); +} + +///////////////////////////////// +////// LIFECYCLE UTILS /////// +///////////////////////////////// + +/** + * generate_lifecycle_rule generate an S3 lifecycle rule with optional filters and expiration currently (can be extend to support more lifecycle rule params) + * + * @param {number} expiration_days + * @param {string} id + * @param {string} [prefix] + * @param {Array} [tags] + * @param {number} [size_gt] + * @param {number} [size_lt] + * @returns {Object} + */ +function generate_lifecycle_rule(expiration_days, id, prefix, tags, size_gt, size_lt) { + const filters = {}; + if (prefix) filters.Prefix = prefix; + if (Array.isArray(tags) && tags.length) filters.Tags = tags; + if (size_gt !== undefined) filters.ObjectSizeGreaterThan = size_gt; + if (size_lt !== undefined) filters.ObjectSizeLessThan = size_lt; + + const filter = Object.keys(filters).length > 1 ? { And: filters } : filters; + + return { + ID: id, + Status: 'Enabled', + Filter: filter, + Expiration: { Days: expiration_days }, + }; +} + +/** + * validate_expiration_header validates the `x-amz-expiration` header against the object creation time, expected rule ID and expiration days + * + * The header is expected to have the format: + * expiry-date="YYYY-MM-DDTHH:MM:SS.SSSZ", rule-id="RULE_ID" + * + * @param {string} expiration_header - expiration header value + * @param {string|Date} start_time - start/create time (string or Date) of the object + * @param {string} expected_rule_id - expected rule ID to match in the header + * @param {number} delta_days - expected number of days between start_time and expiry-date + * @returns {boolean} true if the header is valid and matches the expected_rule_id and delta_days otherwise false + */ +function validate_expiration_header(expiration_header, start_time, expected_rule_id, delta_days) { + const match = expiration_header.match(/expiry-date="(.+)", rule-id="(.+)"/); + if (!match) return false; + + const [, expiry_str, rule_id] = match; + const expiration = new Date(expiry_str); + const start = new Date(start_time); + start.setUTCHours(0, 0, 0, 0); // adjusting to midnight UTC otherwise the tests will fail - fix for ceph-s3 tests + + const days_diff = Math.floor((expiration.getTime() - start.getTime()) / (24 * 60 * 60 * 1000)); + + return days_diff === delta_days && rule_id === expected_rule_id; +} + +exports.update_file_mtime = update_file_mtime; +exports.generate_lifecycle_rule = generate_lifecycle_rule; +exports.validate_expiration_header = validate_expiration_header; exports.run_or_skip_test = run_or_skip_test; exports.blocks_exist_on_cloud = blocks_exist_on_cloud; exports.create_hosts_pool = create_hosts_pool; diff --git a/src/test/unit_tests/jest_tests/test_nc_lifecycle.test.js b/src/test/unit_tests/jest_tests/test_nc_lifecycle.test.js index cc2c5ce4d2..a61c36e77f 100644 --- a/src/test/unit_tests/jest_tests/test_nc_lifecycle.test.js +++ b/src/test/unit_tests/jest_tests/test_nc_lifecycle.test.js @@ -9,10 +9,9 @@ const path = require('path'); const crypto = require('crypto'); const config = require('../../../../config'); const fs_utils = require('../../../util/fs_utils'); -const { ConfigFS } = require('../../../sdk/config_fs'); const NamespaceFS = require('../../../sdk/namespace_fs'); const buffer_utils = require('../../../util/buffer_utils'); -const { NCLifecycle } = require('../../../manage_nsfs/nc_lifecycle'); +const lifecycle_utils = require('../../../../src/util/lifecycle_utils'); const endpoint_stats_collector = require('../../../sdk/endpoint_stats_collector'); const { TMP_PATH, set_nc_config_dir_in_config, TEST_TIMEOUT } = require('../../system_tests/test_utils'); @@ -21,9 +20,7 @@ const config_root = path.join(TMP_PATH, 'config_root_nc_lifecycle'); const root_path = path.join(TMP_PATH, 'root_path_nc_lifecycle/'); const bucket_name = 'lifecycle_bucket'; const bucket_path = path.join(root_path, bucket_name); -const config_fs = new ConfigFS(config_root); const dummy_object_sdk = make_dummy_object_sdk(); -const nc_lifecycle = new NCLifecycle(config_fs); const key = 'obj1.txt'; const data = crypto.randomBytes(100); @@ -90,7 +87,7 @@ describe('delete_multiple_objects + filter', () => { it('delete_multiple_objects - filter should fail on wrong prefix - versioning DISABLED bucket', async () => { const data_buffer = buffer_utils.buffer_to_read_stream(data); await nsfs.upload_object({ bucket: bucket_name, key: key, source_stream: data_buffer }, dummy_object_sdk); - const filter_func = nc_lifecycle._build_lifecycle_filter({ filter: { prefix: 'd' }, expiration: 0 }); + const filter_func = lifecycle_utils.build_lifecycle_filter({ filter: { prefix: 'd' }, expiration: 0 }); const delete_res = await nsfs.delete_multiple_objects({ objects: [{ key }], filter_func }, dummy_object_sdk); await assert_object_deletion_failed(delete_res); }); @@ -98,7 +95,7 @@ describe('delete_multiple_objects + filter', () => { it('delete_multiple_objects - filter should fail on wrong object_size_less_than - versioning DISABLED bucket', async () => { const data_buffer = buffer_utils.buffer_to_read_stream(data); await nsfs.upload_object({ bucket: bucket_name, key: key, source_stream: data_buffer }, dummy_object_sdk); - const filter_func = nc_lifecycle._build_lifecycle_filter({ filter: { object_size_less_than: 99 }, expiration: 0 }); + const filter_func = lifecycle_utils.build_lifecycle_filter({ filter: { object_size_less_than: 99 }, expiration: 0 }); const delete_res = await nsfs.delete_multiple_objects({ objects: [{ key }], filter_func }, dummy_object_sdk); await assert_object_deletion_failed(delete_res); }); @@ -106,7 +103,7 @@ describe('delete_multiple_objects + filter', () => { it('delete_multiple_objects - filter should fail on wrong object_size_greater_than - versioning DISABLED bucket', async () => { const data_buffer = buffer_utils.buffer_to_read_stream(data); await nsfs.upload_object({ bucket: bucket_name, key: key, source_stream: data_buffer }, dummy_object_sdk); - const filter_func = nc_lifecycle._build_lifecycle_filter({ filter: { object_size_greater_than: 101 }, expiration: 0 }); + const filter_func = lifecycle_utils.build_lifecycle_filter({ filter: { object_size_greater_than: 101 }, expiration: 0 }); const delete_res = await nsfs.delete_multiple_objects({ objects: [{ key }], filter_func }, dummy_object_sdk); await assert_object_deletion_failed(delete_res); }); @@ -114,7 +111,7 @@ describe('delete_multiple_objects + filter', () => { it('delete_multiple_objects - filter should fail on wrong tags - versioning DISABLED bucket', async () => { const data_buffer = buffer_utils.buffer_to_read_stream(data); await nsfs.upload_object({ bucket: bucket_name, key: key, source_stream: data_buffer }, dummy_object_sdk); - const filter_func = nc_lifecycle._build_lifecycle_filter({ filter: { tags: [{ key: 'a', value: 'b'}] }, expiration: 0 }); + const filter_func = lifecycle_utils.build_lifecycle_filter({ filter: { tags: [{ key: 'a', value: 'b'}] }, expiration: 0 }); const delete_res = await nsfs.delete_multiple_objects({ objects: [{ key }], filter_func }, dummy_object_sdk); await assert_object_deletion_failed(delete_res); }); @@ -122,7 +119,7 @@ describe('delete_multiple_objects + filter', () => { it('delete_multiple_objects - filter should fail on expiration - versioning DISABLED bucket', async () => { const data_buffer = buffer_utils.buffer_to_read_stream(data); await nsfs.upload_object({ bucket: bucket_name, key: key, source_stream: data_buffer }, dummy_object_sdk); - const filter_func = nc_lifecycle._build_lifecycle_filter({ expiration: 5 }); + const filter_func = lifecycle_utils.build_lifecycle_filter({ expiration: 5 }); const delete_res = await nsfs.delete_multiple_objects({ objects: [{ key }], filter_func }, dummy_object_sdk); await assert_object_deletion_failed(delete_res); }); @@ -133,7 +130,7 @@ describe('delete_multiple_objects + filter', () => { it('delete_multiple_objects - filter should pass on wrong prefix - versioning DISABLED bucket', async () => { const data_buffer = buffer_utils.buffer_to_read_stream(data); await nsfs.upload_object({ bucket: bucket_name, key: key, source_stream: data_buffer }, dummy_object_sdk); - const filter_func = nc_lifecycle._build_lifecycle_filter({ filter: { prefix: 'ob' }, expiration: 0 }); + const filter_func = lifecycle_utils.build_lifecycle_filter({ filter: { prefix: 'ob' }, expiration: 0 }); const delete_res = await nsfs.delete_multiple_objects({ objects: [{ key }], filter_func }, dummy_object_sdk); await assert_object_deleted(delete_res); }); @@ -141,7 +138,7 @@ describe('delete_multiple_objects + filter', () => { it('delete_multiple_objects - filter should pass on wrong object_size_less_than - versioning DISABLED bucket', async () => { const data_buffer = buffer_utils.buffer_to_read_stream(data); await nsfs.upload_object({ bucket: bucket_name, key: key, source_stream: data_buffer }, dummy_object_sdk); - const filter_func = nc_lifecycle._build_lifecycle_filter({ filter: { object_size_less_than: 101 }, expiration: 0 }); + const filter_func = lifecycle_utils.build_lifecycle_filter({ filter: { object_size_less_than: 101 }, expiration: 0 }); const delete_res = await nsfs.delete_multiple_objects({ objects: [{ key }], filter_func }, dummy_object_sdk); await assert_object_deleted(delete_res); }); @@ -149,7 +146,7 @@ describe('delete_multiple_objects + filter', () => { it('delete_multiple_objects - filter should pass on wrong object_size_greater_than - versioning DISABLED bucket', async () => { const data_buffer = buffer_utils.buffer_to_read_stream(data); await nsfs.upload_object({ bucket: bucket_name, key: key, source_stream: data_buffer }, dummy_object_sdk); - const filter_func = nc_lifecycle._build_lifecycle_filter({ filter: { object_size_greater_than: 1 }, expiration: 0 }); + const filter_func = lifecycle_utils.build_lifecycle_filter({ filter: { object_size_greater_than: 1 }, expiration: 0 }); const delete_res = await nsfs.delete_multiple_objects({ objects: [{ key }], filter_func }, dummy_object_sdk); await assert_object_deleted(delete_res); }); @@ -158,7 +155,7 @@ describe('delete_multiple_objects + filter', () => { const data_buffer = buffer_utils.buffer_to_read_stream(data); const tagging = [{ key: 'a', value: 'b' }]; await nsfs.upload_object({ bucket: bucket_name, key: key, source_stream: data_buffer, tagging }, dummy_object_sdk); - const filter_func = nc_lifecycle._build_lifecycle_filter({ filter: { tags: tagging }, expiration: 0 }); + const filter_func = lifecycle_utils.build_lifecycle_filter({ filter: { tags: tagging }, expiration: 0 }); const delete_res = await nsfs.delete_multiple_objects({ objects: [{ key }], filter_func }, dummy_object_sdk); await assert_object_deleted(delete_res); }); @@ -166,7 +163,7 @@ describe('delete_multiple_objects + filter', () => { it('delete_multiple_objects - filter should pass on expiration - versioning DISABLED bucket', async () => { const data_buffer = buffer_utils.buffer_to_read_stream(data); await nsfs.upload_object({ bucket: bucket_name, key: key, source_stream: data_buffer }, dummy_object_sdk); - const filter_func = nc_lifecycle._build_lifecycle_filter({ expiration: 0 }); + const filter_func = lifecycle_utils.build_lifecycle_filter({ expiration: 0 }); const delete_res = await nsfs.delete_multiple_objects({ objects: [{ key }], filter_func }, dummy_object_sdk); await assert_object_deleted(delete_res); }); @@ -178,7 +175,7 @@ describe('delete_multiple_objects + filter', () => { nsfs.versioning = 'ENABLED'; const data_buffer = buffer_utils.buffer_to_read_stream(data); await nsfs.upload_object({ bucket: bucket_name, key: key, source_stream: data_buffer }, dummy_object_sdk); - const filter_func = nc_lifecycle._build_lifecycle_filter({ filter: { prefix: 'd' }, expiration: 0 }); + const filter_func = lifecycle_utils.build_lifecycle_filter({ filter: { prefix: 'd' }, expiration: 0 }); const delete_res = await nsfs.delete_multiple_objects({ objects: [{ key }], filter_func }, dummy_object_sdk); await assert_object_deletion_failed(delete_res); }); @@ -187,7 +184,7 @@ describe('delete_multiple_objects + filter', () => { nsfs.versioning = 'ENABLED'; const data_buffer = buffer_utils.buffer_to_read_stream(data); await nsfs.upload_object({ bucket: bucket_name, key: key, source_stream: data_buffer }, dummy_object_sdk); - const filter_func = nc_lifecycle._build_lifecycle_filter({ filter: { object_size_less_than: 99 }, expiration: 0 }); + const filter_func = lifecycle_utils.build_lifecycle_filter({ filter: { object_size_less_than: 99 }, expiration: 0 }); const delete_res = await nsfs.delete_multiple_objects({ objects: [{ key }], filter_func }, dummy_object_sdk); await assert_object_deletion_failed(delete_res); }); @@ -196,7 +193,7 @@ describe('delete_multiple_objects + filter', () => { nsfs.versioning = 'ENABLED'; const data_buffer = buffer_utils.buffer_to_read_stream(data); await nsfs.upload_object({ bucket: bucket_name, key: key, source_stream: data_buffer }, dummy_object_sdk); - const filter_func = nc_lifecycle._build_lifecycle_filter({ filter: { object_size_greater_than: 101 }, expiration: 0 }); + const filter_func = lifecycle_utils.build_lifecycle_filter({ filter: { object_size_greater_than: 101 }, expiration: 0 }); const delete_res = await nsfs.delete_multiple_objects({ objects: [{ key }], filter_func }, dummy_object_sdk); await assert_object_deletion_failed(delete_res); }); @@ -205,7 +202,7 @@ describe('delete_multiple_objects + filter', () => { nsfs.versioning = 'ENABLED'; const data_buffer = buffer_utils.buffer_to_read_stream(data); await nsfs.upload_object({ bucket: bucket_name, key: key, source_stream: data_buffer }, dummy_object_sdk); - const filter_func = nc_lifecycle._build_lifecycle_filter({ filter: { tags: [{ key: 'a', value: 'b'}] }, expiration: 0 }); + const filter_func = lifecycle_utils.build_lifecycle_filter({ filter: { tags: [{ key: 'a', value: 'b'}] }, expiration: 0 }); const delete_res = await nsfs.delete_multiple_objects({ objects: [{ key }], filter_func }, dummy_object_sdk); await assert_object_deletion_failed(delete_res); }); @@ -214,7 +211,7 @@ describe('delete_multiple_objects + filter', () => { nsfs.versioning = 'ENABLED'; const data_buffer = buffer_utils.buffer_to_read_stream(data); await nsfs.upload_object({ bucket: bucket_name, key: key, source_stream: data_buffer }, dummy_object_sdk); - const filter_func = nc_lifecycle._build_lifecycle_filter({ expiration: 5 }); + const filter_func = lifecycle_utils.build_lifecycle_filter({ expiration: 5 }); const delete_res = await nsfs.delete_multiple_objects({ objects: [{ key }], filter_func }, dummy_object_sdk); await assert_object_deletion_failed(delete_res); }); @@ -226,7 +223,7 @@ describe('delete_multiple_objects + filter', () => { nsfs.versioning = 'ENABLED'; const data_buffer = buffer_utils.buffer_to_read_stream(data); await nsfs.upload_object({ bucket: bucket_name, key: key, source_stream: data_buffer }, dummy_object_sdk); - const filter_func = nc_lifecycle._build_lifecycle_filter({ filter: { prefix: 'ob' }, expiration: 0 }); + const filter_func = lifecycle_utils.build_lifecycle_filter({ filter: { prefix: 'ob' }, expiration: 0 }); const delete_res = await nsfs.delete_multiple_objects({ objects: [{ key }], filter_func }, dummy_object_sdk); await assert_object_deleted(delete_res, { should_create_a_delete_marker: true }); }); @@ -235,7 +232,7 @@ describe('delete_multiple_objects + filter', () => { nsfs.versioning = 'ENABLED'; const data_buffer = buffer_utils.buffer_to_read_stream(data); await nsfs.upload_object({ bucket: bucket_name, key: key, source_stream: data_buffer }, dummy_object_sdk); - const filter_func = nc_lifecycle._build_lifecycle_filter({ filter: { object_size_less_than: 101 }, expiration: 0 }); + const filter_func = lifecycle_utils.build_lifecycle_filter({ filter: { object_size_less_than: 101 }, expiration: 0 }); const delete_res = await nsfs.delete_multiple_objects({ objects: [{ key }], filter_func }, dummy_object_sdk); await assert_object_deleted(delete_res, { should_create_a_delete_marker: true }); }); @@ -244,7 +241,7 @@ describe('delete_multiple_objects + filter', () => { nsfs.versioning = 'ENABLED'; const data_buffer = buffer_utils.buffer_to_read_stream(data); await nsfs.upload_object({ bucket: bucket_name, key: key, source_stream: data_buffer }, dummy_object_sdk); - const filter_func = nc_lifecycle._build_lifecycle_filter({ filter: { object_size_greater_than: 1 }, expiration: 0 }); + const filter_func = lifecycle_utils.build_lifecycle_filter({ filter: { object_size_greater_than: 1 }, expiration: 0 }); const delete_res = await nsfs.delete_multiple_objects({ objects: [{ key }], filter_func }, dummy_object_sdk); await assert_object_deleted(delete_res, { should_create_a_delete_marker: true }); }); @@ -254,7 +251,7 @@ describe('delete_multiple_objects + filter', () => { const data_buffer = buffer_utils.buffer_to_read_stream(data); const tagging = [{ key: 'a', value: 'b' }]; await nsfs.upload_object({ bucket: bucket_name, key: key, source_stream: data_buffer, tagging }, dummy_object_sdk); - const filter_func = nc_lifecycle._build_lifecycle_filter({ filter: { tags: tagging }, expiration: 0 }); + const filter_func = lifecycle_utils.build_lifecycle_filter({ filter: { tags: tagging }, expiration: 0 }); const delete_res = await nsfs.delete_multiple_objects({ objects: [{ key }], filter_func }, dummy_object_sdk); await assert_object_deleted(delete_res, { should_create_a_delete_marker: true }); }); @@ -263,7 +260,7 @@ describe('delete_multiple_objects + filter', () => { nsfs.versioning = 'ENABLED'; const data_buffer = buffer_utils.buffer_to_read_stream(data); await nsfs.upload_object({ bucket: bucket_name, key: key, source_stream: data_buffer }, dummy_object_sdk); - const filter_func = nc_lifecycle._build_lifecycle_filter({ expiration: 0 }); + const filter_func = lifecycle_utils.build_lifecycle_filter({ expiration: 0 }); const delete_res = await nsfs.delete_multiple_objects({ objects: [{ key }], filter_func }, dummy_object_sdk); await assert_object_deleted(delete_res, { should_create_a_delete_marker: true }); }); @@ -275,7 +272,7 @@ describe('delete_multiple_objects + filter', () => { nsfs.versioning = 'ENABLED'; const data_buffer = buffer_utils.buffer_to_read_stream(data); const upload_res = await nsfs.upload_object({ bucket: bucket_name, key: key, source_stream: data_buffer }, dummy_object_sdk); - const filter_func = nc_lifecycle._build_lifecycle_filter({ filter: { prefix: 'd' }, expiration: 0 }); + const filter_func = lifecycle_utils.build_lifecycle_filter({ filter: { prefix: 'd' }, expiration: 0 }); const objects_to_delete = [{ key, version_id: upload_res.version_id }]; const delete_res = await nsfs.delete_multiple_objects({ objects: objects_to_delete, filter_func }, dummy_object_sdk); await assert_object_deletion_failed(delete_res); @@ -285,7 +282,7 @@ describe('delete_multiple_objects + filter', () => { nsfs.versioning = 'ENABLED'; const data_buffer = buffer_utils.buffer_to_read_stream(data); const upload_res = await nsfs.upload_object({ bucket: bucket_name, key: key, source_stream: data_buffer }, dummy_object_sdk); - const filter_func = nc_lifecycle._build_lifecycle_filter({ filter: { object_size_less_than: 99 }, expiration: 0 }); + const filter_func = lifecycle_utils.build_lifecycle_filter({ filter: { object_size_less_than: 99 }, expiration: 0 }); const objects_to_delete = [{ key, version_id: upload_res.version_id }]; const delete_res = await nsfs.delete_multiple_objects({ objects: objects_to_delete, filter_func }, dummy_object_sdk); await assert_object_deletion_failed(delete_res); @@ -295,7 +292,7 @@ describe('delete_multiple_objects + filter', () => { nsfs.versioning = 'ENABLED'; const data_buffer = buffer_utils.buffer_to_read_stream(data); const upload_res = await nsfs.upload_object({ bucket: bucket_name, key: key, source_stream: data_buffer }, dummy_object_sdk); - const filter_func = nc_lifecycle._build_lifecycle_filter({ filter: { object_size_greater_than: 101 }, expiration: 0 }); + const filter_func = lifecycle_utils.build_lifecycle_filter({ filter: { object_size_greater_than: 101 }, expiration: 0 }); const objects_to_delete = [{ key, version_id: upload_res.version_id }]; const delete_res = await nsfs.delete_multiple_objects({ objects: objects_to_delete, filter_func }, dummy_object_sdk); await assert_object_deletion_failed(delete_res); @@ -305,7 +302,7 @@ describe('delete_multiple_objects + filter', () => { nsfs.versioning = 'ENABLED'; const data_buffer = buffer_utils.buffer_to_read_stream(data); const upload_res = await nsfs.upload_object({ bucket: bucket_name, key: key, source_stream: data_buffer }, dummy_object_sdk); - const filter_func = nc_lifecycle._build_lifecycle_filter({ filter: { tags: [{ key: 'a', value: 'b'}] }, expiration: 0 }); + const filter_func = lifecycle_utils.build_lifecycle_filter({ filter: { tags: [{ key: 'a', value: 'b'}] }, expiration: 0 }); const objects_to_delete = [{ key, version_id: upload_res.version_id }]; const delete_res = await nsfs.delete_multiple_objects({ objects: objects_to_delete, filter_func }, dummy_object_sdk); await assert_object_deletion_failed(delete_res); @@ -315,7 +312,7 @@ describe('delete_multiple_objects + filter', () => { nsfs.versioning = 'ENABLED'; const data_buffer = buffer_utils.buffer_to_read_stream(data); const upload_res = await nsfs.upload_object({ bucket: bucket_name, key: key, source_stream: data_buffer }, dummy_object_sdk); - const filter_func = nc_lifecycle._build_lifecycle_filter({ expiration: 5 }); + const filter_func = lifecycle_utils.build_lifecycle_filter({ expiration: 5 }); const objects_to_delete = [{ key, version_id: upload_res.version_id }]; const delete_res = await nsfs.delete_multiple_objects({ objects: objects_to_delete, filter_func }, dummy_object_sdk); await assert_object_deletion_failed(delete_res); @@ -329,7 +326,7 @@ describe('delete_multiple_objects + filter', () => { nsfs.versioning = 'ENABLED'; const data_buffer = buffer_utils.buffer_to_read_stream(data); const upload_res = await nsfs.upload_object({ bucket: bucket_name, key: key, source_stream: data_buffer }, dummy_object_sdk); - const filter_func = nc_lifecycle._build_lifecycle_filter({ filter: { prefix: 'ob' }, expiration: 0 }); + const filter_func = lifecycle_utils.build_lifecycle_filter({ filter: { prefix: 'ob' }, expiration: 0 }); const objects_to_delete = [{ key, version_id: upload_res.version_id }]; const delete_res = await nsfs.delete_multiple_objects({ objects: objects_to_delete, filter_func }, dummy_object_sdk); await assert_object_deleted(delete_res); @@ -339,7 +336,7 @@ describe('delete_multiple_objects + filter', () => { nsfs.versioning = 'ENABLED'; const data_buffer = buffer_utils.buffer_to_read_stream(data); const upload_res = await nsfs.upload_object({ bucket: bucket_name, key: key, source_stream: data_buffer }, dummy_object_sdk); - const filter_func = nc_lifecycle._build_lifecycle_filter({ filter: { object_size_less_than: 101 }, expiration: 0 }); + const filter_func = lifecycle_utils.build_lifecycle_filter({ filter: { object_size_less_than: 101 }, expiration: 0 }); const objects_to_delete = [{ key, version_id: upload_res.version_id }]; const delete_res = await nsfs.delete_multiple_objects({ objects: objects_to_delete, filter_func }, dummy_object_sdk); await assert_object_deleted(delete_res); @@ -349,7 +346,7 @@ describe('delete_multiple_objects + filter', () => { nsfs.versioning = 'ENABLED'; const data_buffer = buffer_utils.buffer_to_read_stream(data); const upload_res = await nsfs.upload_object({ bucket: bucket_name, key: key, source_stream: data_buffer }, dummy_object_sdk); - const filter_func = nc_lifecycle._build_lifecycle_filter({ filter: { object_size_greater_than: 1 }, expiration: 0 }); + const filter_func = lifecycle_utils.build_lifecycle_filter({ filter: { object_size_greater_than: 1 }, expiration: 0 }); const objects_to_delete = [{ key, version_id: upload_res.version_id }]; const delete_res = await nsfs.delete_multiple_objects({ objects: objects_to_delete, filter_func }, dummy_object_sdk); await assert_object_deleted(delete_res); @@ -361,7 +358,7 @@ describe('delete_multiple_objects + filter', () => { const tagging = [{ key: 'a', value: 'b' }]; const upload_res = await nsfs.upload_object({ bucket: bucket_name, key: key, source_stream: data_buffer, tagging }, dummy_object_sdk); - const filter_func = nc_lifecycle._build_lifecycle_filter({ filter: { tags: tagging }, expiration: 0 }); + const filter_func = lifecycle_utils.build_lifecycle_filter({ filter: { tags: tagging }, expiration: 0 }); const objects_to_delete = [{ key, version_id: upload_res.version_id }]; const delete_res = await nsfs.delete_multiple_objects({ objects: objects_to_delete, filter_func }, dummy_object_sdk); await assert_object_deleted(delete_res); @@ -371,7 +368,7 @@ describe('delete_multiple_objects + filter', () => { nsfs.versioning = 'ENABLED'; const data_buffer = buffer_utils.buffer_to_read_stream(data); const upload_res = await nsfs.upload_object({ bucket: bucket_name, key: key, source_stream: data_buffer }, dummy_object_sdk); - const filter_func = nc_lifecycle._build_lifecycle_filter({ expiration: 0 }); + const filter_func = lifecycle_utils.build_lifecycle_filter({ expiration: 0 }); const objects_to_delete = [{ key, version_id: upload_res.version_id }]; const delete_res = await nsfs.delete_multiple_objects({ objects: objects_to_delete, filter_func }, dummy_object_sdk); await assert_object_deleted(delete_res); @@ -386,7 +383,7 @@ describe('delete_multiple_objects + filter', () => { const data_buffer2 = buffer_utils.buffer_to_read_stream(data); const upload_res1 = await nsfs.upload_object({ bucket: bucket_name, key: key, source_stream: data_buffer1 }, dummy_object_sdk); await nsfs.upload_object({ bucket: bucket_name, key: key, source_stream: data_buffer2 }, dummy_object_sdk); - const filter_func = nc_lifecycle._build_lifecycle_filter({ filter: { prefix: 'd' }, expiration: 0 }); + const filter_func = lifecycle_utils.build_lifecycle_filter({ filter: { prefix: 'd' }, expiration: 0 }); const objects_to_delete = [{ key, version_id: upload_res1.version_id }]; const delete_res = await nsfs.delete_multiple_objects({ objects: objects_to_delete, filter_func }, dummy_object_sdk); await assert_object_deletion_failed(delete_res); @@ -398,7 +395,7 @@ describe('delete_multiple_objects + filter', () => { const data_buffer2 = buffer_utils.buffer_to_read_stream(data); const upload_res1 = await nsfs.upload_object({ bucket: bucket_name, key: key, source_stream: data_buffer1 }, dummy_object_sdk); await nsfs.upload_object({ bucket: bucket_name, key: key, source_stream: data_buffer2 }, dummy_object_sdk); - const filter_func = nc_lifecycle._build_lifecycle_filter({ filter: { object_size_less_than: 99 }, expiration: 0 }); + const filter_func = lifecycle_utils.build_lifecycle_filter({ filter: { object_size_less_than: 99 }, expiration: 0 }); const objects_to_delete = [{ key, version_id: upload_res1.version_id }]; const delete_res = await nsfs.delete_multiple_objects({ objects: objects_to_delete, filter_func }, dummy_object_sdk); await assert_object_deletion_failed(delete_res); @@ -410,7 +407,7 @@ describe('delete_multiple_objects + filter', () => { const data_buffer2 = buffer_utils.buffer_to_read_stream(data); const upload_res1 = await nsfs.upload_object({ bucket: bucket_name, key: key, source_stream: data_buffer1 }, dummy_object_sdk); await nsfs.upload_object({ bucket: bucket_name, key: key, source_stream: data_buffer2 }, dummy_object_sdk); - const filter_func = nc_lifecycle._build_lifecycle_filter({ filter: { object_size_greater_than: 101 }, expiration: 0 }); + const filter_func = lifecycle_utils.build_lifecycle_filter({ filter: { object_size_greater_than: 101 }, expiration: 0 }); const objects_to_delete = [{ key, version_id: upload_res1.version_id }]; const delete_res = await nsfs.delete_multiple_objects({ objects: objects_to_delete, filter_func }, dummy_object_sdk); await assert_object_deletion_failed(delete_res); @@ -422,7 +419,7 @@ describe('delete_multiple_objects + filter', () => { const data_buffer2 = buffer_utils.buffer_to_read_stream(data); const upload_res1 = await nsfs.upload_object({ bucket: bucket_name, key: key, source_stream: data_buffer1 }, dummy_object_sdk); await nsfs.upload_object({ bucket: bucket_name, key: key, source_stream: data_buffer2 }, dummy_object_sdk); - const filter_func = nc_lifecycle._build_lifecycle_filter({ filter: { tags: [{ key: 'a', value: 'b'}] }, expiration: 0 }); + const filter_func = lifecycle_utils.build_lifecycle_filter({ filter: { tags: [{ key: 'a', value: 'b'}] }, expiration: 0 }); const objects_to_delete = [{ key, version_id: upload_res1.version_id }]; const delete_res = await nsfs.delete_multiple_objects({ objects: objects_to_delete, filter_func }, dummy_object_sdk); await assert_object_deletion_failed(delete_res); @@ -434,7 +431,7 @@ describe('delete_multiple_objects + filter', () => { const data_buffer2 = buffer_utils.buffer_to_read_stream(data); const upload_res1 = await nsfs.upload_object({ bucket: bucket_name, key: key, source_stream: data_buffer1 }, dummy_object_sdk); await nsfs.upload_object({ bucket: bucket_name, key: key, source_stream: data_buffer2 }, dummy_object_sdk); - const filter_func = nc_lifecycle._build_lifecycle_filter({ expiration: 5 }); + const filter_func = lifecycle_utils.build_lifecycle_filter({ expiration: 5 }); const objects_to_delete = [{ key, version_id: upload_res1.version_id }]; const delete_res = await nsfs.delete_multiple_objects({ objects: objects_to_delete, filter_func }, dummy_object_sdk); await assert_object_deletion_failed(delete_res); @@ -449,7 +446,7 @@ describe('delete_multiple_objects + filter', () => { const data_buffer2 = buffer_utils.buffer_to_read_stream(data); const upload_res1 = await nsfs.upload_object({ bucket: bucket_name, key: key, source_stream: data_buffer1 }, dummy_object_sdk); const upload_res2 = await nsfs.upload_object({ bucket: bucket_name, key: key, source_stream: data_buffer2 }, dummy_object_sdk); - const filter_func = nc_lifecycle._build_lifecycle_filter({ filter: { prefix: 'ob' }, expiration: 0 }); + const filter_func = lifecycle_utils.build_lifecycle_filter({ filter: { prefix: 'ob' }, expiration: 0 }); const objects_to_delete = [{ key, version_id: upload_res1.version_id }]; const delete_res = await nsfs.delete_multiple_objects({ objects: objects_to_delete, filter_func }, dummy_object_sdk); await assert_object_deleted(delete_res, { new_latest_version: upload_res2.version_id}); @@ -461,7 +458,7 @@ describe('delete_multiple_objects + filter', () => { const data_buffer2 = buffer_utils.buffer_to_read_stream(data); const upload_res1 = await nsfs.upload_object({ bucket: bucket_name, key: key, source_stream: data_buffer1 }, dummy_object_sdk); const upload_res2 = await nsfs.upload_object({ bucket: bucket_name, key: key, source_stream: data_buffer2 }, dummy_object_sdk); - const filter_func = nc_lifecycle._build_lifecycle_filter({ filter: { object_size_less_than: 101 }, expiration: 0 }); + const filter_func = lifecycle_utils.build_lifecycle_filter({ filter: { object_size_less_than: 101 }, expiration: 0 }); const objects_to_delete = [{ key, version_id: upload_res1.version_id }]; const delete_res = await nsfs.delete_multiple_objects({ objects: objects_to_delete, filter_func }, dummy_object_sdk); await assert_object_deleted(delete_res, { new_latest_version: upload_res2.version_id}); @@ -473,7 +470,7 @@ describe('delete_multiple_objects + filter', () => { const data_buffer2 = buffer_utils.buffer_to_read_stream(data); const upload_res1 = await nsfs.upload_object({ bucket: bucket_name, key: key, source_stream: data_buffer1 }, dummy_object_sdk); const upload_res2 = await nsfs.upload_object({ bucket: bucket_name, key: key, source_stream: data_buffer2 }, dummy_object_sdk); - const filter_func = nc_lifecycle._build_lifecycle_filter({ filter: { object_size_greater_than: 1 }, expiration: 0 }); + const filter_func = lifecycle_utils.build_lifecycle_filter({ filter: { object_size_greater_than: 1 }, expiration: 0 }); const objects_to_delete = [{ key, version_id: upload_res1.version_id }]; const delete_res = await nsfs.delete_multiple_objects({ objects: objects_to_delete, filter_func }, dummy_object_sdk); await assert_object_deleted(delete_res, { new_latest_version: upload_res2.version_id}); @@ -486,7 +483,7 @@ describe('delete_multiple_objects + filter', () => { const data_buffer2 = buffer_utils.buffer_to_read_stream(data); const upload_res1 = await nsfs.upload_object({ bucket: bucket_name, key: key, source_stream: data_buffer1 }, dummy_object_sdk); const upload_res2 = await nsfs.upload_object({ bucket: bucket_name, key: key, source_stream: data_buffer2 }, dummy_object_sdk); - const filter_func = nc_lifecycle._build_lifecycle_filter({ filter: { tags: tagging }, expiration: 0 }); + const filter_func = lifecycle_utils.build_lifecycle_filter({ filter: { tags: tagging }, expiration: 0 }); const objects_to_delete = [{ key, version_id: upload_res1.version_id }]; const delete_res = await nsfs.delete_multiple_objects({ objects: objects_to_delete, filter_func }, dummy_object_sdk); await assert_object_deleted(delete_res, { new_latest_version: upload_res2.version_id}); @@ -498,7 +495,7 @@ describe('delete_multiple_objects + filter', () => { const data_buffer2 = buffer_utils.buffer_to_read_stream(data); const upload_res1 = await nsfs.upload_object({ bucket: bucket_name, key: key, source_stream: data_buffer1 }, dummy_object_sdk); const upload_res2 = await nsfs.upload_object({ bucket: bucket_name, key: key, source_stream: data_buffer2 }, dummy_object_sdk); - const filter_func = nc_lifecycle._build_lifecycle_filter({ expiration: 0 }); + const filter_func = lifecycle_utils.build_lifecycle_filter({ expiration: 0 }); const objects_to_delete = [{ key, version_id: upload_res1.version_id }]; const delete_res = await nsfs.delete_multiple_objects({ objects: objects_to_delete, filter_func }, dummy_object_sdk); await assert_object_deleted(delete_res, { new_latest_version: upload_res2.version_id}); @@ -513,7 +510,7 @@ describe('delete_multiple_objects + filter', () => { await nsfs.upload_object({ bucket: bucket_name, key: key, source_stream: data_buffer1 }, dummy_object_sdk); const delete_res1 = await nsfs.delete_object({ bucket: bucket_name, key: key }, dummy_object_sdk); expect(delete_res1.created_delete_marker).toBe(true); - const filter_func = nc_lifecycle._build_lifecycle_filter({ filter: { prefix: 'd' }, expiration: 0 }); + const filter_func = lifecycle_utils.build_lifecycle_filter({ filter: { prefix: 'd' }, expiration: 0 }); const objects_to_delete = [{ key, version_id: delete_res1.created_version_id }]; const delete_res = await nsfs.delete_multiple_objects({ objects: objects_to_delete, filter_func }, dummy_object_sdk); await assert_object_deletion_failed(delete_res, { latest_delete_marker: true }); @@ -525,7 +522,7 @@ describe('delete_multiple_objects + filter', () => { await nsfs.upload_object({ bucket: bucket_name, key: key, source_stream: data_buffer1 }, dummy_object_sdk); const delete_res1 = await nsfs.delete_object({ bucket: bucket_name, key: key }, dummy_object_sdk); expect(delete_res1.created_delete_marker).toBe(true); - const filter_func = nc_lifecycle._build_lifecycle_filter({ filter: { object_size_greater_than: 101 }, expiration: 0 }); + const filter_func = lifecycle_utils.build_lifecycle_filter({ filter: { object_size_greater_than: 101 }, expiration: 0 }); const objects_to_delete = [{ key, version_id: delete_res1.created_version_id }]; const delete_res = await nsfs.delete_multiple_objects({ objects: objects_to_delete, filter_func }, dummy_object_sdk); await assert_object_deletion_failed(delete_res, { latest_delete_marker: true }); @@ -537,7 +534,7 @@ describe('delete_multiple_objects + filter', () => { await nsfs.upload_object({ bucket: bucket_name, key: key, source_stream: data_buffer1 }, dummy_object_sdk); const delete_res1 = await nsfs.delete_object({ bucket: bucket_name, key: key }, dummy_object_sdk); expect(delete_res1.created_delete_marker).toBe(true); - const filter_func = nc_lifecycle._build_lifecycle_filter({ filter: { tags: [{ key: 'a', value: 'b' }] }, expiration: 0 }); + const filter_func = lifecycle_utils.build_lifecycle_filter({ filter: { tags: [{ key: 'a', value: 'b' }] }, expiration: 0 }); const objects_to_delete = [{ key, version_id: delete_res1.created_version_id }]; const delete_res = await nsfs.delete_multiple_objects({ objects: objects_to_delete, filter_func }, dummy_object_sdk); await assert_object_deletion_failed(delete_res, { latest_delete_marker: true }); @@ -549,7 +546,7 @@ describe('delete_multiple_objects + filter', () => { await nsfs.upload_object({ bucket: bucket_name, key: key, source_stream: data_buffer1 }, dummy_object_sdk); const delete_res1 = await nsfs.delete_object({ bucket: bucket_name, key: key }, dummy_object_sdk); expect(delete_res1.created_delete_marker).toBe(true); - const filter_func = nc_lifecycle._build_lifecycle_filter({ expiration: 5 }); + const filter_func = lifecycle_utils.build_lifecycle_filter({ expiration: 5 }); const objects_to_delete = [{ key, version_id: delete_res1.created_version_id }]; const delete_res2 = await nsfs.delete_multiple_objects({ objects: objects_to_delete, filter_func }, dummy_object_sdk); await assert_object_deletion_failed(delete_res2, { latest_delete_marker: true }); diff --git a/src/test/unit_tests/jest_tests/test_nc_lifecycle_posix_integration.test.js b/src/test/unit_tests/jest_tests/test_nc_lifecycle_posix_integration.test.js index 4e9e3fc9b7..fe578e0d07 100644 --- a/src/test/unit_tests/jest_tests/test_nc_lifecycle_posix_integration.test.js +++ b/src/test/unit_tests/jest_tests/test_nc_lifecycle_posix_integration.test.js @@ -11,11 +11,10 @@ const fs = require('fs'); const config = require('../../../../config'); const fs_utils = require('../../../util/fs_utils'); const { ConfigFS } = require('../../../sdk/config_fs'); -const { TMP_PATH, set_nc_config_dir_in_config, TEST_TIMEOUT, exec_manage_cli, create_system_json } = require('../../system_tests/test_utils'); +const { TMP_PATH, set_nc_config_dir_in_config, TEST_TIMEOUT, exec_manage_cli, create_system_json, update_file_mtime } = require('../../system_tests/test_utils'); const { TYPES, ACTIONS } = require('../../../manage_nsfs/manage_nsfs_constants'); const NamespaceFS = require('../../../sdk/namespace_fs'); const endpoint_stats_collector = require('../../../sdk/endpoint_stats_collector'); -const os_utils = require('../../../util/os_utils'); const { ManageCLIResponse } = require('../../../manage_nsfs/manage_nsfs_cli_responses'); const { ManageCLIError } = require('../../../manage_nsfs/manage_nsfs_cli_errors'); const buffer_utils = require('../../../util/buffer_utils'); @@ -2187,19 +2186,6 @@ async function create_object(object_sdk, bucket, key, size, is_old, tagging) { return res; } -/** - * update_file_mtime updates the mtime of the target path - * Warnings: - * - This operation would change the mtime of the file to 5 days ago - which means that it changes the etag / obj_id of the object - * - Please do not use on versioned objects (version_id will not be changed, but the mtime will be changed) - might cause issues. - * @param {String} target_path - * @returns {Promise} - */ -async function update_file_mtime(target_path) { - const update_file_mtime_cmp = os_utils.IS_MAC ? `touch -t $(date -v -5d +"%Y%m%d%H%M.%S") ${target_path}` : `touch -d "5 days ago" ${target_path}`; - await os_utils.exec(update_file_mtime_cmp, { return_stdout: true }); -} - /** * updates the number of noncurrent days xattr of target path to be 5 days older. use only on noncurrent objects. * is use this function on latest object the xattr will be changed when the object turns noncurrent diff --git a/src/test/unit_tests/nc_index.js b/src/test/unit_tests/nc_index.js index 027adc60f3..d11bd050bb 100644 --- a/src/test/unit_tests/nc_index.js +++ b/src/test/unit_tests/nc_index.js @@ -21,6 +21,7 @@ require('./test_bucketspace_versioning'); require('./test_nc_bucket_logging'); require('./test_nc_online_upgrade_s3_integrations'); require('./test_public_access_block'); +require('./test_nc_lifecycle_expiration'); // running with iam port require('./test_nc_iam_basic_integration.js'); // please notice that we use a different setup diff --git a/src/test/unit_tests/test_lifecycle.js b/src/test/unit_tests/test_lifecycle.js index 512e790fef..0087be6ffe 100644 --- a/src/test/unit_tests/test_lifecycle.js +++ b/src/test/unit_tests/test_lifecycle.js @@ -22,6 +22,7 @@ const MDStore = require('../../server/object_services/md_store').MDStore; const coretest = require('./coretest'); const lifecycle = require('../../server/bg_services/lifecycle'); const http_utils = require('../../util/http_utils'); +const test_utils = require('../../../src/test/system_tests/test_utils'); const commonTests = require('../lifecycle/common'); const seed = crypto.randomBytes(16); const generator = crypto.createCipheriv('aes-128-gcm', seed, Buffer.alloc(12)); @@ -799,6 +800,93 @@ mocha.describe('lifecycle', () => { } }); + mocha.describe('bucket-lifecycle-expiration-header', function() { + const bucket = Bucket; + + const run_expiration_test = async ({ rules, expected_id, expected_days, key, tagging = undefined, size = 1000}) => { + const putLifecycleParams = { + Bucket: bucket, + LifecycleConfiguration: { Rules: rules } + }; + await s3.putBucketLifecycleConfiguration(putLifecycleParams); + + const putObjectParams = { + Bucket: bucket, + Key: key, + Body: 'x'.repeat(size) // default 1KB if size not specified + }; + if (tagging) { + putObjectParams.Tagging = tagging; + } + const start_time = new Date(); + let res = await s3.putObject(putObjectParams); + assert.ok(res.Expiration, 'expiration header missing in putObject response'); + + res = await s3.headObject({ Bucket: bucket, Key: key }); + assert.ok(res.Expiration, 'expiration header missing in headObject response'); + + const valid = test_utils.validate_expiration_header(res.Expiration, start_time, expected_id, expected_days); + assert.ok(valid, `expected rule ${expected_id} to match`); + }; + + mocha.it('should select rule with longest prefix', async () => { + const rules = [ + test_utils.generate_lifecycle_rule(10, 'short-prefix', 'test1/', [], undefined, undefined), + test_utils.generate_lifecycle_rule(17, 'long-prefix', 'test1/logs/', [], undefined, undefined), + ]; + await run_expiration_test({ + rules, + key: 'test1/logs//file.txt', + expected_id: 'long-prefix', + expected_days: 17 + }); + }); + + mocha.it('should select rule with more tags when prefix is same', async () => { + const rules = [ + test_utils.generate_lifecycle_rule(5, 'one-tag', 'test2/', [{ Key: 'env', Value: 'prod' }], undefined, undefined), + test_utils.generate_lifecycle_rule(9, 'two-tags', 'test2/', [ + { Key: 'env', Value: 'prod' }, + { Key: 'team', Value: 'backend' } + ], undefined, undefined), + ]; + await run_expiration_test({ + rules, + key: 'test2/file2.txt', + tagging: 'env=prod&team=backend', + expected_id: 'two-tags', + expected_days: 9 + }); + }); + + mocha.it('should select rule with narrower size span when prefix and tags are matching', async () => { + const rules = [ + test_utils.generate_lifecycle_rule(4, 'wide-range', 'test3/', [], 100, 10000), + test_utils.generate_lifecycle_rule(6, 'narrow-range', 'test3/', [], 1000, 5000), + ]; + await run_expiration_test({ + rules, + key: 'test3/file3.txt', + size: 1500, + expected_id: 'narrow-range', + expected_days: 6 + }); + }); + + mocha.it('should fallback to first matching rule if all filters are equal', async () => { + const rules = [ + test_utils.generate_lifecycle_rule(7, 'rule-a', 'test4/', [], 0, 10000), + test_utils.generate_lifecycle_rule(11, 'rule-b', 'test4/', [], 0, 10000), + ]; + await run_expiration_test({ + rules, + key: 'test4/file4.txt', + expected_id: 'rule-a', + expected_days: 7 + }); + }); + }); + function readable_buffer(data, split = 1, finish = 'end') { const max = Math.ceil(data.length / split); let pos = 0; diff --git a/src/test/unit_tests/test_nc_lifecycle_expiration.js b/src/test/unit_tests/test_nc_lifecycle_expiration.js new file mode 100644 index 0000000000..7f73b0d626 --- /dev/null +++ b/src/test/unit_tests/test_nc_lifecycle_expiration.js @@ -0,0 +1,150 @@ +/* Copyright (C) 2024 NooBaa */ +'use strict'; + +const path = require('path'); +const mocha = require('mocha'); +const assert = require('assert'); +const fs_utils = require('../../util/fs_utils'); +const { TYPES, ACTIONS } = require('../../manage_nsfs/manage_nsfs_constants'); +const { TMP_PATH, set_path_permissions_and_owner, invalid_nsfs_root_permissions, generate_s3_client, get_coretest_path, + generate_lifecycle_rule, validate_expiration_header, update_file_mtime, exec_manage_cli } = require('../system_tests/test_utils'); + +const coretest_path = get_coretest_path(); +const coretest = require(coretest_path); +const { rpc_client, EMAIL, get_admin_mock_account_details } = coretest; +coretest.setup({}); + +let s3_admin; + +const tmp_fs_root = path.join(TMP_PATH, 'test_nc_lifecycle_expiration/'); + +/** + * is_nc_coretest returns true when the test runs on NC env + */ +const is_nc_coretest = process.env.NC_CORETEST === 'true'; + +mocha.describe('nc lifecycle - check expiration header', async function() { + const bucket_path = path.join(tmp_fs_root, 'test-bucket/'); + const bucket_name = 'test-bucket'; + + mocha.before(async function() { + this.timeout(0); // eslint-disable-line no-invalid-this + if (invalid_nsfs_root_permissions()) this.skip(); // eslint-disable-line no-invalid-this + // create paths + await fs_utils.create_fresh_path(tmp_fs_root, 0o777); + await fs_utils.create_fresh_path(bucket_path, 0o770); + await fs_utils.file_must_exist(bucket_path); + + // set permissions + if (is_nc_coretest) { + const { uid, gid } = get_admin_mock_account_details(); + await set_path_permissions_and_owner(bucket_path, { uid, gid }, 0o700); + } + + // create s3_admin client + const admin = (await rpc_client.account.read_account({ email: EMAIL, })); + const admin_keys = admin.access_keys; + s3_admin = generate_s3_client(admin_keys[0].access_key.unwrap(), + admin_keys[0].secret_key.unwrap(), + coretest.get_http_address()); + + // create test bucket + const cli_bucket_options = { + name: bucket_name, + owner: admin.name, + path: bucket_path, + }; + await exec_manage_cli(TYPES.BUCKET, ACTIONS.ADD, cli_bucket_options); + }); + + mocha.after(async function() { + this.timeout(0); // eslint-disable-line no-invalid-this + fs_utils.folder_delete(tmp_fs_root); + }); + + const run_expiration_test = async ({ rules, expected_id, expected_days, key, tagging = undefined, size = 1000}) => { + const putLifecycleParams = { + Bucket: bucket_name, + LifecycleConfiguration: { Rules: rules } + }; + await s3_admin.putBucketLifecycleConfiguration(putLifecycleParams); + + const putObjectParams = { + Bucket: bucket_name, + Key: key, + Body: 'x'.repeat(size) // default 1KB if size not specified + }; + if (tagging) { + putObjectParams.Tagging = tagging; + } + const start_time = new Date(); + let res = await s3_admin.putObject(putObjectParams); + assert.ok(res.Expiration, 'expiration header missing in putObject response'); + + // update file mtime to simulate a 5-days old object + await update_file_mtime(path.join(bucket_path, key)); + + res = await s3_admin.headObject({ Bucket: bucket_name, Key: key }); + assert.ok(res.Expiration, 'expiration header missing in headObject response'); + + const valid = validate_expiration_header(res.Expiration, start_time, expected_id, expected_days - 5); + assert.ok(valid, `expected rule ${expected_id} to match`); + }; + + mocha.it('should select rule with longest prefix', async () => { + const rules = [ + generate_lifecycle_rule(10, 'short-prefix', 'lifecycle-test1/', [], undefined, undefined), + generate_lifecycle_rule(17, 'long-prefix', 'lifecycle-test1/logs/', [], undefined, undefined), + ]; + await run_expiration_test({ + rules, + key: 'lifecycle-test1/logs//file.txt', + expected_id: 'long-prefix', + expected_days: 17 + }); + }); + + mocha.it('should select rule with more tags when prefix is same', async () => { + const rules = [ + generate_lifecycle_rule(5, 'one-tag', 'lifecycle-test2/', [{ Key: 'env', Value: 'prod' }], undefined, undefined), + generate_lifecycle_rule(9, 'two-tags', 'lifecycle-test2/', [ + { Key: 'env', Value: 'prod' }, + { Key: 'team', Value: 'backend' } + ], undefined, undefined), + ]; + await run_expiration_test({ + rules, + key: 'lifecycle-test2/file2.txt', + tagging: 'env=prod&team=backend', + expected_id: 'two-tags', + expected_days: 9 + }); + }); + + mocha.it('should select rule with narrower size span when prefix and tags are matching', async () => { + const rules = [ + generate_lifecycle_rule(4, 'wide-range', 'lifecycle-test3/', [], 100, 10000), + generate_lifecycle_rule(6, 'narrow-range', 'lifecycle-test3/', [], 1000, 5000), + ]; + await run_expiration_test({ + rules, + key: 'lifecycle-test3/file3.txt', + size: 1500, + expected_id: 'narrow-range', + expected_days: 6 + }); + }); + + mocha.it('should fallback to first matching rule if all filters are equal', async () => { + const rules = [ + generate_lifecycle_rule(7, 'rule-a', 'lifecycle/test4/', [], 0, 10000), + generate_lifecycle_rule(11, 'rule-b', 'lifecycle/test4/', [], 0, 10000), + ]; + await run_expiration_test({ + rules, + key: 'lifecycle/test4/file4.txt', + expected_id: 'rule-a', + expected_days: 7 + }); + }); +}); diff --git a/src/util/http_utils.js b/src/util/http_utils.js index 387db25a5a..670f4783da 100644 --- a/src/util/http_utils.js +++ b/src/util/http_utils.js @@ -23,6 +23,7 @@ const net_utils = require('./net_utils'); const time_utils = require('./time_utils'); const cloud_utils = require('./cloud_utils'); const ssl_utils = require('../util/ssl_utils'); +const lifecycle_utils = require('../../src/util/lifecycle_utils'); const RpcError = require('../rpc/rpc_error'); const S3Error = require('../endpoint/s3/s3_errors').S3Error; @@ -664,6 +665,27 @@ function set_amz_headers(req, res) { res.setHeader('x-amz-id-2', req.request_id); } +/** + * set_expiration_header sets the `x-amz-expiration` response header for GET, PUT, or HEAD object requests + * if the object matches any enabled bucket lifecycle rule + * + * @param {Object} req + * @param {http.ServerResponse} res + * @param {Object} object_info + */ +async function set_expiration_header(req, res, object_info) { + const rules = req.params.bucket && await req.object_sdk.get_bucket_lifecycle_configuration_rules({ name: req.params.bucket }); + + const matched_rule = lifecycle_utils.get_lifecycle_rule_for_object(rules, object_info); + if (matched_rule) { + const expiration_header = lifecycle_utils.build_expiration_header(matched_rule, object_info.create_time); + if (expiration_header) { + dbg.log1('set x_amz_expiration header from applied rule: ', matched_rule); + res.setHeader('x-amz-expiration', expiration_header); + } + } +} + /** * @typedef {{ * allow_origin: string; @@ -945,6 +967,7 @@ exports.set_keep_alive_whitespace_interval = set_keep_alive_whitespace_interval; exports.parse_xml_to_js = parse_xml_to_js; exports.check_headers = check_headers; exports.set_amz_headers = set_amz_headers; +exports.set_expiration_header = set_expiration_header; exports.set_cors_headers = set_cors_headers; exports.set_cors_headers_s3 = set_cors_headers_s3; exports.set_cors_headers_sts = set_cors_headers_sts; diff --git a/src/util/lifecycle_utils.js b/src/util/lifecycle_utils.js index dc7d22434c..239ea8116e 100644 --- a/src/util/lifecycle_utils.js +++ b/src/util/lifecycle_utils.js @@ -69,7 +69,174 @@ function file_matches_filter({obj_info, filter_func = undefined}) { return true; } +/** + * get_lifecycle_rule_for_object determines the most specific matching lifecycle rule for the given object metadata + * + * @param {Array} rules + * @param {Object} object_info + * @returns {Object|undefined} + */ +function get_lifecycle_rule_for_object(rules, object_info) { + if (!object_info?.key || !Array.isArray(rules) || rules.length < 1) return; + + let matched_rule; + let curr_priority = { + prefix_len: -1, + tag_count: -1, + size_span: Infinity, + }; + + for (const rule of rules) { + if (rule?.status !== 'Enabled') continue; + + const filter_func = build_lifecycle_filter(rule); + if (!filter_func(object_info)) continue; + + const new_priority = get_rule_priority(rule.filter); + + if (compare_rule_priority(curr_priority, new_priority)) { + matched_rule = rule; + curr_priority = new_priority; + } + } + return matched_rule; +} + +/** + * build_expiration_header converts an expiration rule (either with `date` or `days`) + * into an s3 style `x-amz-expiration` header value + * + * @param {Object} rule + * @param {Object} create_time + * @returns {string|undefined} + * + * Example output: + * expiry-date="Thu, 10 Apr 2025 00:00:00 GMT", rule-id="rule_id" + */ +function build_expiration_header(rule, create_time) { + const expiration = rule.expiration; + const rule_id = rule.id; + + if (!expiration || (!expiration.date && !expiration.days)) return undefined; + + const expiration_date = expiration.date ? + new Date(expiration.date) : + new Date(create_time + expiration.days * 24 * 60 * 60 * 1000); + + expiration_date.setUTCHours(0, 0, 0, 0); // adjust expiration to midnight UTC + + return `expiry-date="${expiration_date.toUTCString()}", rule-id="${rule_id}"`; +} + +////////////////// +// FILTERS HELPERS // +////////////////// + +/** + * @typedef {{ + * filter: Object + * expiration: Number + * }} filter_params + * + * builds lifecycle filter function + * + * @param {filter_params} params + * @returns + */ +function build_lifecycle_filter(params) { + /** + * @param {Object} object_info + */ + return function(object_info) { + if (params.filter?.prefix && !object_info.key.startsWith(params.filter.prefix)) return false; + if (params.expiration && object_info.age < params.expiration) return false; + if (params.filter?.tags && !file_contain_tags(object_info, params.filter.tags)) return false; + if (params.filter?.object_size_greater_than && object_info.size < params.filter.object_size_greater_than) return false; + if (params.filter?.object_size_less_than && object_info.size > params.filter.object_size_less_than) return false; + return true; + }; +} + +/** + * get_rule_priority calculates the priority of a lifecycle rule's filter + * + * @param {Object} filter + * @returns {Object} priority object + */ +function get_rule_priority(filter) { + return { + prefix_len: (filter?.prefix || '').length, + tag_count: Array.isArray(filter?.tags) ? filter.tags.length : 0, + size_span: (filter?.object_size_less_than ?? Infinity) - (filter?.object_size_greater_than ?? 0) + }; +} + +/** + * compare_rule_priority determines if a new rule has higher priority + * + * priority is based on: + * - longest matching prefix + * - most matching tags + * - narrowest object size range + * + * @param {Object} curr_priority + * @param {Object} new_priority + * @returns {boolean} + */ +function compare_rule_priority(curr_priority, new_priority) { + // compare prefix length + if (new_priority.prefix_len > curr_priority.prefix_len) return true; + + if (new_priority.prefix_len === curr_priority.prefix_len) { + // compare tag count (if prefixes are equal) + if (new_priority.tag_count > curr_priority.tag_count) return true; + + if (new_priority.tag_count === curr_priority.tag_count) { + // compare size span (if prefixes and tags are equal) + if (new_priority.size_span < curr_priority.size_span) return true; + } + } + + return false; +} + +////////////////// +// TAGS HELPERS // +////////////////// + +/** + * checks if tag query_tag is in the list tag_set + * @param {Object} query_tag + * @param {Array} tag_set + */ +function list_contain_tag(query_tag, tag_set) { + for (const t of tag_set) { + if (t.key === query_tag.key && t.value === query_tag.value) return true; + } + return false; +} + +/** + * checks if object has all the tags in filter_tags + * @param {Object} object_info + * @param {Array} filter_tags + * @returns + */ +function file_contain_tags(object_info, filter_tags) { + const object_tags = object_info.tags || object_info.tagging; + if (!object_tags) return false; + for (const tag of filter_tags) { + if (!list_contain_tag(tag, object_tags)) { + return false; + } + } + return true; +} + exports.get_latest_nc_lifecycle_run_status = get_latest_nc_lifecycle_run_status; exports.file_matches_filter = file_matches_filter; exports.get_lifecycle_object_info_for_filter = get_lifecycle_object_info_for_filter; exports.get_file_age_days = get_file_age_days; +exports.get_lifecycle_rule_for_object = get_lifecycle_rule_for_object; +exports.build_expiration_header = build_expiration_header; +exports.build_lifecycle_filter = build_lifecycle_filter;