Skip to content

Commit 4b1fdab

Browse files
authored
Merge pull request #8388 from romayalon/romy-threaded-multi-object-delete
NC | GPFS | Threaded Multiple Object Delete fix
2 parents 504f3b4 + 110abc2 commit 4b1fdab

File tree

4 files changed

+56
-5
lines changed

4 files changed

+56
-5
lines changed

docs/dev_guide/ceph_s3_tests/ceph_s3_tests_pending_list_status.md

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -50,3 +50,4 @@ Attached a table with tests that where investigated and their status (this table
5050
change in our repo) - stopped passing between the update of commit hash 6861c3d81081a6883fb90d66cb60392e1abdf3ca to da91ad8bbf899c72199df35b69e9393c706aabee |
5151
| test_get_object_ifmodifiedsince_failed | Internal Component | | It used to pass in the past (not related to code
5252
change in our repo) - stopped passing between the update of commit hash 6861c3d81081a6883fb90d66cb60392e1abdf3ca to da91ad8bbf899c72199df35b69e9393c706aabee |
53+
| test_versioning_concurrent_multi_object_delete | Faulty Test | [588](https://github.com/ceph/s3-tests/issues/588) |

src/sdk/namespace_fs.js

Lines changed: 8 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2766,9 +2766,16 @@ class NamespaceFS {
27662766
}
27672767
return version_info;
27682768
} catch (err) {
2769+
dbg.warn(`NamespaceFS._delete_single_object_versioned: retrying retries=${retries} file_path=${file_path}`, err);
27692770
retries -= 1;
2771+
// there are a few concurrency scenarios that might happen we should retry for -
2772+
// 1. the version id is the latest, concurrent put will might move the version id from being the latest to .versions/ -
2773+
// will throw safe unlink failed on non matching fd (on GPFS) or inode/mtime (on POSIX).
2774+
// 2. the version id is the second latest and stays under .versions/ - on concurrent delete of the latest,
2775+
// the version id might move to be the latest and we will get ENOENT
2776+
// 3. concurrent delete of this version - will get ENOENT, doing a retry will return successfully
2777+
// after we will see that the version was already deleted
27702778
if (retries <= 0 || !native_fs_utils.should_retry_link_unlink(is_gpfs, err)) throw err;
2771-
dbg.warn(`NamespaceFS._delete_single_object_versioned: retrying retries=${retries} file_path=${file_path}`, err);
27722779
} finally {
27732780
if (gpfs_options) await this._close_files_gpfs(fs_context, gpfs_options.delete_version, undefined, true);
27742781
}

src/test/unit_tests/jest_tests/test_versioning_concurrency.test.js

Lines changed: 43 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,7 @@ const fs_utils = require('../../../util/fs_utils');
77
const NamespaceFS = require('../../../sdk/namespace_fs');
88
const buffer_utils = require('../../../util/buffer_utils');
99
const { TMP_PATH } = require('../../system_tests/test_utils');
10+
const { crypto_random_string } = require('../../../util/string_utils');
1011
const endpoint_stats_collector = require('../../../sdk/endpoint_stats_collector');
1112

1213
function make_dummy_object_sdk(nsfs_config, uid, gid) {
@@ -81,4 +82,46 @@ describe('test versioning concurrency', () => {
8182
await P.delay(1000);
8283
expect(number_of_successful_operations.length).toBe(15);
8384
});
85+
86+
// same as s3tests_boto3/functional/test_s3.py::test_versioning_concurrent_multi_object_delete,
87+
// this test has a bug, it tries to create the bucket twice and fails
88+
// https://github.com/ceph/s3-tests/blob/master/s3tests_boto3/functional/test_s3.py#L1642
89+
// see - https://github.com/ceph/s3-tests/issues/588
90+
it('concurrent multi object delete', async () => {
91+
const bucket = 'bucket1';
92+
const concurrency_num = 10;
93+
const delete_objects_arr = [];
94+
for (let i = 0; i < concurrency_num; i++) {
95+
const key = `key${i}`;
96+
const random_data = Buffer.from(String(crypto_random_string(7)));
97+
const body = buffer_utils.buffer_to_read_stream(random_data);
98+
const res = await nsfs.upload_object({ bucket: bucket, key: key, source_stream: body }, DUMMY_OBJECT_SDK);
99+
delete_objects_arr.push({ key: key, version_id: res.version_id });
100+
}
101+
const versions = await nsfs.list_object_versions({ bucket: bucket }, DUMMY_OBJECT_SDK);
102+
103+
for (const { key, version_id } of delete_objects_arr) {
104+
const found = versions.objects.find(object => object.key === key && object.version_id === version_id);
105+
expect(found).toBeDefined();
106+
}
107+
108+
const delete_responses = [];
109+
const delete_errors = [];
110+
111+
for (let i = 0; i < concurrency_num; i++) {
112+
nsfs.delete_multiple_objects({ bucket, objects: delete_objects_arr }, DUMMY_OBJECT_SDK)
113+
.then(res => delete_responses.push(res))
114+
.catch(err => delete_errors.push(err));
115+
}
116+
await P.delay(5000);
117+
expect(delete_responses.length).toBe(concurrency_num);
118+
for (const res of delete_responses) {
119+
expect(res.length).toBe(concurrency_num);
120+
for (const single_delete_res of res) {
121+
expect(single_delete_res.err_message).toBe(undefined);
122+
}
123+
}
124+
const list_res = await nsfs.list_objects({ bucket: bucket }, DUMMY_OBJECT_SDK);
125+
expect(list_res.objects.length).toBe(0);
126+
}, 8000);
84127
});

src/util/native_fs_utils.js

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -268,10 +268,10 @@ async function safe_unlink_gpfs(fs_context, to_delete_path, to_delete_file, dir_
268268
}
269269

270270
function should_retry_link_unlink(is_gpfs, err) {
271-
return is_gpfs ?
272-
[gpfs_link_unlink_retry_err, gpfs_unlink_retry_catch].includes(err.code) :
273-
[posix_link_retry_err, posix_unlink_retry_err].includes(err.message) ||
274-
['EEXIST'].includes(err.code);
271+
const should_retry_general = ['ENOENT', 'EEXIST'].includes(err.code);
272+
const should_retry_gpfs = [gpfs_link_unlink_retry_err, gpfs_unlink_retry_catch].includes(err.code);
273+
const should_retry_posix = [posix_link_retry_err, posix_unlink_retry_err].includes(err.message);
274+
return should_retry_general || is_gpfs ? should_retry_gpfs : should_retry_posix;
275275
}
276276

277277
////////////////////////

0 commit comments

Comments
 (0)