Skip to content

Commit

Permalink
Require oplog update only when archived
Browse files Browse the repository at this point in the history
An oplog update is only required when the object is archived, instead of
when lifecycle is enabled: so fix the condition to avoid extra
associated load.

The update is also required when bucket notification is enabled on the
bucket, no change there.

Issue: BB-590
  • Loading branch information
francoisferrand committed Dec 27, 2024
1 parent f59e7ca commit 5e429ef
Show file tree
Hide file tree
Showing 2 changed files with 36 additions and 7 deletions.
10 changes: 5 additions & 5 deletions extensions/mongoProcessor/MongoQueueProcessor.js
Original file line number Diff line number Diff line change
Expand Up @@ -430,9 +430,9 @@ class MongoQueueProcessor {
return done();
}

return cb();
return cb(null, zenkoObjMd);
},
cb => {
(zenkoObjMd, cb) => {
const options = {};

// Calling deleteObject with undefined options to use deleteObjectNoVer which is used for
Expand All @@ -446,9 +446,9 @@ class MongoQueueProcessor {
options.versionId = versionId;
}

// If the bucket has no lifecycle or notification configuration, we don't need the
// oplog update, and can skip it to lower the load on mongo
if (!bucketInfo.lifecycleConfiguration && !bucketInfo.notificationConfiguration) {
// If the bucket has notification configuration and the object is not archived, we
// don't need the oplog update, and can skip it to lower the load on mongo
if (!zenkoObjMd.archive && !bucketInfo.notificationConfiguration) {
options.doesNotNeedOpogUpdate = true;
}

Expand Down
33 changes: 31 additions & 2 deletions tests/functional/ingestion/MongoQueueProcessor.js
Original file line number Diff line number Diff line change
Expand Up @@ -849,7 +849,7 @@ describe('MongoQueueProcessor', function mqp() {
...bucketInfo,
lifecycleConfiguration: new LifecycleConfiguration(null, { replicationEndpoints: [] }),
}),
options: { versionId: VERSION_ID },
options: { doesNotNeedOpogUpdate: true, versionId: VERSION_ID },
},
].forEach(({
title, patchBucketInfo, options,
Expand Down Expand Up @@ -922,6 +922,8 @@ describe('MongoQueueProcessor', function mqp() {
dataStoreVersionId: encode(NEW_VERSION_ID),
dataStoreName: LOCATION,
}])
.setAmzStorageClass('cold')
.setArchive(mockArchive)
.getValue());
const deleteObject = sinon.stub(mongoClient, 'deleteObject').callThrough();
async.waterfall([
Expand All @@ -943,14 +945,39 @@ describe('MongoQueueProcessor', function mqp() {
assert.deepStrictEqual(deleteObject.getCall(0).args[0], BUCKET);
assert.deepStrictEqual(deleteObject.getCall(0).args[1], KEY);
assert.deepStrictEqual(deleteObject.getCall(0).args[2], {
doesNotNeedOpogUpdate: true,
versionId: VERSION_ID
});

done();
});
});

it('should not delete object from mongo when object is archived', done => {
// use existing version id
const versionKey = `${KEY}${VID_SEP}${VERSION_ID}`;
const entry = new DeleteOpQueueEntry(BUCKET, versionKey);
sinon.stub(mqp._mongoClient, 'getObject').yields(null, new ObjectMD()
.setKey(KEY)
.setVersionId(VERSION_ID)
.setDataStoreName('cold')
.setAmzStorageClass('cold')
.setArchive(mockArchive)
.setLocation([])
.getValue());
async.waterfall([
next => mongoClient.getBucketAttributes(BUCKET, fakeLogger,
next),
(bucketInfo, next) => mqp._processDeleteOpQueueEntry(fakeLogger,
entry, LOCATION, bucketInfo, next),
], err => {
assert.ifError(err);

const deleted = mqp.getDeleted();
assert.strictEqual(deleted.length, 0);
done();
});
});

it('should not delete object from mongo when object is in another location', done => {
// use existing version id
const versionKey = `${KEY}${VID_SEP}${VERSION_ID}`;
Expand All @@ -964,6 +991,8 @@ describe('MongoQueueProcessor', function mqp() {
dataStoreVersionId: encode(VERSION_ID),
dataStoreName: LOCATION,
}])
.setAmzStorageClass('cold')
.setArchive(mockArchive)
.getValue());
async.waterfall([
next => mongoClient.getBucketAttributes(BUCKET, fakeLogger,
Expand Down

0 comments on commit 5e429ef

Please sign in to comment.