Skip to content

Commit 0588fad

Browse files
committed
block delete_bucket requests by an OBC account
* Prevent the deletion of an OBC bucket by the OBC account. * A regular account can delete an OBC bucket according to the access policy or via RPC Signed-off-by: Danny Zaken <dannyzaken@gmail.com>
1 parent bc77785 commit 0588fad

File tree

2 files changed

+59
-9
lines changed

2 files changed

+59
-9
lines changed

src/endpoint/s3/s3_rest.js

Lines changed: 8 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -230,9 +230,16 @@ async function authorize_request_policy(req) {
230230
const account = req.object_sdk.requesting_account;
231231
const account_identifier_name = req.object_sdk.nsfs_config_root ? account.name.unwrap() : account.email.unwrap();
232232
const account_identifier_id = req.object_sdk.nsfs_config_root ? account._id : undefined;
233-
const is_system_owner = Boolean(system_owner) && system_owner.unwrap() === account_identifier_name;
233+
234+
// deny delete_bucket permissions from bucket_claim_owner accounts (accounts that were created by OBC from openshift\k8s)
235+
// the OBC bucket can still be delete by normal accounts according to the access policy which is checked below
236+
if (req.op_name === 'delete_bucket' && account.bucket_claim_owner) {
237+
dbg.error(`delete bucket request by an OBC account is restricted. an attempt to delete bucket by account ${account_identifier_name}`);
238+
throw new S3Error(S3Error.AccessDenied);
239+
}
234240

235241
// @TODO: System owner as a construct should be removed - Temporary
242+
const is_system_owner = Boolean(system_owner) && system_owner.unwrap() === account_identifier_name;
236243
if (is_system_owner) return;
237244

238245
const is_owner = (function() {

src/test/unit_tests/test_s3_ops.js

Lines changed: 51 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -153,6 +153,48 @@ mocha.describe('s3_ops', function() {
153153
const res = await s3.listBuckets({});
154154
assert(!res.Buckets.find(bucket => bucket.Name === BKT1));
155155
});
156+
157+
// Test that bucket_deletion is not allowed for an obc account
158+
mocha.it('OBC account should not be allowed to delete a bucket', async function() {
159+
// create the bucket to be deleted. use rpc to create, which is the same flow as the operator does
160+
await rpc_client.bucket.create_bucket({ name: "obc-bucket" });
161+
// create an obc account that is the bucket_claim_owner of the bucket
162+
const obc_account = await rpc_client.account.create_account({
163+
name: "obc-account",
164+
email: "obc-account@noobaa.io",
165+
has_login: false,
166+
s3_access: true,
167+
bucket_claim_owner: "obc-bucket",
168+
});
169+
const obc_s3_client = new S3({
170+
endpoint: coretest.get_http_address(),
171+
credentials: {
172+
accessKeyId: obc_account.access_keys[0].access_key.unwrap(),
173+
secretAccessKey: obc_account.access_keys[0].secret_key.unwrap(),
174+
},
175+
forcePathStyle: true,
176+
region: config.DEFAULT_REGION,
177+
requestHandler: new NodeHttpHandler({
178+
httpAgent: http_utils.get_unsecured_agent(coretest.get_http_address()),
179+
}),
180+
});
181+
try {
182+
await obc_s3_client.deleteBucket({ Bucket: "obc-bucket" });
183+
assert.fail('expected AccessDenied error. bucket deletion should not be allowed for obc account');
184+
} catch (err) {
185+
assert.strictEqual(err.Code, 'AccessDenied');
186+
assert.strictEqual(err.$metadata.httpStatusCode, 403);
187+
}
188+
189+
try {
190+
// bucket deletion should be allowed for regular accounts (not obc)
191+
await s3.deleteBucket({ Bucket: "obc-bucket" });
192+
} catch (err) {
193+
assert.fail('expected bucket deletion to be successful for regular accounts');
194+
}
195+
// cleanup
196+
await rpc_client.account.delete_account({ email: "obc-account@noobaa.io" });
197+
});
156198
});
157199

158200
async function test_object_ops(bucket_name, bucket_type, caching, remote_endpoint_options) {
@@ -201,7 +243,7 @@ mocha.describe('s3_ops', function() {
201243
const ENDPOINT_TYPE = USE_REMOTE_ENDPOINT ? process.env.ENDPOINT_TYPE : 'S3_COMPATIBLE';
202244
const AWS_ACCESS_KEY_ID = USE_REMOTE_ENDPOINT ? process.env.AWS_ACCESS_KEY_ID : s3_client_params.credentials.accessKeyId;
203245
const AWS_SECRET_ACCESS_KEY = USE_REMOTE_ENDPOINT ? process.env.AWS_SECRET_ACCESS_KEY :
204-
s3_client_params.credentials.secretAccessKey;
246+
s3_client_params.credentials.secretAccessKey;
205247
coretest.log("before creating azure target bucket: ", source_namespace_bucket, target_namespace_bucket);
206248
if (is_azure_mock) {
207249
coretest.log("creating azure target bucket: ", source_namespace_bucket, target_namespace_bucket);
@@ -341,7 +383,7 @@ mocha.describe('s3_ops', function() {
341383
s3.middlewareStack.add(next => args => {
342384
args.request.query.get_from_cache = 'true'; // represents query_params = { get_from_cache: true }
343385
return next(args);
344-
}, {step: 'build', name: 'getFromCache'});
386+
}, { step: 'build', name: 'getFromCache' });
345387
res = await s3.getObjectTagging(params_req);
346388
s3.middlewareStack.remove('getFromCache');
347389
} else {
@@ -404,7 +446,7 @@ mocha.describe('s3_ops', function() {
404446
s3.middlewareStack.add(next => args => {
405447
args.request.query.get_from_cache = 'true'; // represents query_params = { get_from_cache: true }
406448
return next(args);
407-
}, {step: 'build', name: 'getFromCache'});
449+
}, { step: 'build', name: 'getFromCache' });
408450
res = await s3.getObjectTagging(params_req);
409451
s3.middlewareStack.remove('getFromCache');
410452
} else {
@@ -604,7 +646,7 @@ mocha.describe('s3_ops', function() {
604646
CopySource: `/${bucket_name}/${text_file1}`,
605647
});
606648

607-
const res = await s3.getObjectTagging({Bucket: bucket_name, Key: text_file2});
649+
const res = await s3.getObjectTagging({ Bucket: bucket_name, Key: text_file2 });
608650

609651
assert.strictEqual(res.TagSet.length, params.Tagging.TagSet.length, 'Should be 1');
610652
assert.strictEqual(res.TagSet[0].Value, params.Tagging.TagSet[0].Value);
@@ -631,7 +673,7 @@ mocha.describe('s3_ops', function() {
631673
Key: text_file2,
632674
CopySource: `/${other_platform_bucket}/${text_file1}`,
633675
});
634-
const res = await s3.getObjectTagging({Bucket: bucket_name, Key: text_file2});
676+
const res = await s3.getObjectTagging({ Bucket: bucket_name, Key: text_file2 });
635677
assert.strictEqual(res.TagSet.length, 1, 'Should be 1');
636678
assert.strictEqual(res.TagSet[0].Value, params.Tagging.TagSet[0].Value);
637679
assert.strictEqual(res.TagSet[0].Key, params.Tagging.TagSet[0].Key);
@@ -808,7 +850,7 @@ mocha.describe('s3_ops', function() {
808850
s3.middlewareStack.add(next => args => {
809851
args.request.query.get_from_cache = 'true'; // represents query_params = { get_from_cache: true }
810852
return next(args);
811-
}, {step: 'build', name: 'getFromCache'});
853+
}, { step: 'build', name: 'getFromCache' });
812854
const res2 = await s3.listObjects(params_req);
813855
s3.middlewareStack.remove('getFromCache');
814856

@@ -825,7 +867,7 @@ mocha.describe('s3_ops', function() {
825867
mocha.it('should delete non existing object without failing', async function() {
826868
this.timeout(60000);
827869
const key_to_delete = 'non-existing-obj';
828-
const res = await s3.deleteObject({ Bucket: bucket_name, Key: key_to_delete});
870+
const res = await s3.deleteObject({ Bucket: bucket_name, Key: key_to_delete });
829871
const res_without_metadata = _.omit(res, '$metadata');
830872
assert.deepEqual(res_without_metadata, {});
831873

@@ -838,7 +880,8 @@ mocha.describe('s3_ops', function() {
838880
{ Key: 'non-existing-obj2' },
839881
{ Key: 'non-existing-obj3' }
840882
];
841-
const res = await s3.deleteObjects({ Bucket: bucket_name,
883+
const res = await s3.deleteObjects({
884+
Bucket: bucket_name,
842885
Delete: {
843886
Objects: keys_to_delete
844887
}

0 commit comments

Comments
 (0)