-
Notifications
You must be signed in to change notification settings - Fork 253
CLDSRV-805: Fix flaky GCP tests due to rate limit #6111
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Merged
bert-e
merged 8 commits into
development/9.3
from
improvement/CLDSRV-805-gcp-rate-limit
Mar 18, 2026
+884
−1,318
Merged
Changes from all commits
Commits
Show all changes
8 commits
Select commit
Hold shift + click to select a range
0766a1f
CLDSRV-805: Increase GCP default retry attempts
BourgoisMickael 47ffd1f
CLDSRV-805: Encapsulate GCP command in retry
BourgoisMickael 2dd90ff
CLDSRV-805: Increase GCP test timeout to 120s
BourgoisMickael d1b760d
CLDSRV-805: Regroup and dedup bucket Head / Get
BourgoisMickael ed70012
CLDSRV-805: Regroup and dedup versioning Get/Put
BourgoisMickael 4b83110
CLDSRV-805: Regroup object related tests
BourgoisMickael 7279058
CLDSRV-805: Factorize setup and cleanup function
BourgoisMickael 5f1edfe
CLDSRV-805: Regroup object tagging into 1 test
BourgoisMickael File filter
Filter by extension
Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
There are no files selected for viewing
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,169 @@ | ||
| const assert = require('assert'); | ||
| const async = require('async'); | ||
| const util = require('util'); | ||
| const arsenal = require('arsenal'); | ||
| const { | ||
| HeadBucketCommand, | ||
| ListObjectsCommand, | ||
| CreateBucketCommand, | ||
| DeleteBucketCommand, | ||
| PutObjectCommand, | ||
| DeleteObjectCommand, | ||
| } = require('@aws-sdk/client-s3'); | ||
| const { GCP } = arsenal.storage.data.external.GCP; | ||
| const { genUniqID, genBucketName, gcpRetry } = require('../../../utils/gcpUtils'); | ||
| const { getRealAwsConfig } = | ||
| require('../../../../aws-node-sdk/test/support/awsConfig'); | ||
| const { listingHardLimit } = require('../../../../../../constants'); | ||
|
|
||
| const credentialOne = 'gcpbackend'; | ||
| const config = getRealAwsConfig(credentialOne); | ||
| const gcpClient = new GCP(config); | ||
|
|
||
| describe('GCP: Bucket', function testSuite() { | ||
| this.timeout(180000); | ||
|
|
||
| const bucketName = genBucketName('bucket'); | ||
|
|
||
| before(async () => { | ||
| process.stdout.write(`Creating test bucket ${bucketName}\n`); | ||
| await gcpRetry(gcpClient, new CreateBucketCommand({ Bucket: bucketName })); | ||
| }); | ||
|
|
||
| after(async () => { | ||
| await gcpRetry(gcpClient, new DeleteBucketCommand({ Bucket: bucketName })); | ||
| }); | ||
|
|
||
| describe('HEAD Bucket', () => { | ||
| it('should return 404 for non-existing bucket', async () => { | ||
| const badBucketName = `cldsrvci-bucket-${genUniqID()}`; | ||
| try { | ||
| await gcpClient.send(new HeadBucketCommand({ Bucket: badBucketName })); | ||
| assert.fail('Expected 404 error, but got success'); | ||
| } catch (err) { | ||
| assert(err); | ||
| assert.strictEqual(err.$metadata?.httpStatusCode, 404); | ||
| const errorName = err.name === 'NotFound' ? 'NoSuchBucket' : err.name; | ||
| assert.strictEqual(errorName, 'NoSuchBucket'); | ||
| } | ||
| }); | ||
|
|
||
| it('should return 200 and bucket metadata', async () => { | ||
| // Need to use the helper headBucket function for middleware with MetaVersionId | ||
| const res = await util.promisify(gcpClient.headBucket.bind(gcpClient))({ Bucket: bucketName }); | ||
| const { $metadata, ...data } = res; | ||
| assert.strictEqual($metadata?.httpStatusCode, 200); | ||
| // Ensure MetaVersionId is present and non-empty | ||
| assert.ok( | ||
| typeof data.MetaVersionId === 'string' | ||
| && data.MetaVersionId.length > 0 | ||
| ); | ||
| }); | ||
| }); | ||
|
|
||
| describe('GET Bucket (List Objects)', () => { | ||
| const smallSize = 20; | ||
| const bigSize = listingHardLimit + 1; | ||
|
|
||
| function populateBucket(createdObjects, callback) { | ||
| process.stdout.write( | ||
| `Putting ${createdObjects.length} objects into bucket\n`); | ||
| async.mapLimit( | ||
| createdObjects, | ||
| 10, | ||
| async object => gcpClient.send(new PutObjectCommand({ | ||
| Bucket: bucketName, | ||
| Key: object, | ||
| })), | ||
| err => { | ||
| if (err) { | ||
| process.stdout.write(`err putting objects ${err}\n`); | ||
| } | ||
| return callback(err); | ||
| } | ||
| ); | ||
| } | ||
|
|
||
| function removeObjects(createdObjects, callback) { | ||
| process.stdout.write( | ||
| `Deleting ${createdObjects.length} objects from bucket\n`); | ||
| async.mapLimit( | ||
| createdObjects, | ||
| 10, | ||
| async object => gcpClient.send(new DeleteObjectCommand({ | ||
| Bucket: bucketName, | ||
| Key: object, | ||
| })), | ||
| err => { | ||
| if (err) { | ||
| process.stdout.write(`err deleting objects ${err}\n`); | ||
| } | ||
| return callback(err); | ||
| } | ||
| ); | ||
| } | ||
|
|
||
| it('should return 200', async () => { | ||
| const res = await gcpClient.send( | ||
| new ListObjectsCommand({ Bucket: bucketName })); | ||
| assert.strictEqual(res.$metadata?.httpStatusCode, 200); | ||
| }); | ||
|
|
||
| describe('with less than listingHardLimit number of objects', () => { | ||
| const createdObjects = Array.from( | ||
| Array(smallSize).keys()).map(i => `someObject-${i}`); | ||
|
|
||
| before(done => populateBucket(createdObjects, done)); | ||
| after(done => removeObjects(createdObjects, done)); | ||
|
|
||
| it(`should list all ${smallSize} created objects`, async () => { | ||
| const res = await gcpClient.send( | ||
| new ListObjectsCommand({ Bucket: bucketName })); | ||
| assert.strictEqual(res.Contents.length, smallSize); | ||
| }); | ||
|
|
||
| it('should list MaxKeys number of objects with MaxKeys at 10', async () => { | ||
| const res = await gcpClient.send(new ListObjectsCommand({ | ||
| Bucket: bucketName, | ||
| MaxKeys: 10, | ||
| })); | ||
| assert.strictEqual(res.Contents.length, 10); | ||
| }); | ||
| }); | ||
|
|
||
| describe('with more than listingHardLimit number of objects', () => { | ||
| const createdObjects = Array.from( | ||
| Array(bigSize).keys()).map(i => `someObject-${i}`); | ||
|
|
||
| before(done => populateBucket(createdObjects, done)); | ||
| after(done => removeObjects(createdObjects, done)); | ||
|
|
||
| it('should list at max 1000 of objects created', async () => { | ||
| const res = await gcpClient.send( | ||
| new ListObjectsCommand({ Bucket: bucketName })); | ||
| assert.strictEqual(res.Contents.length, listingHardLimit); | ||
| }); | ||
|
|
||
| describe('with MaxKeys at 1001', () => { | ||
| // TODO: S3C-5445 | ||
| // Note: this test is testing GCP behaviour, not the Cloudserver one. | ||
| // It tests that GET https://<GCP_BUCKET_NAME>.storage.googleapis.com/?max-keys=1001 | ||
| // returns only the first 1000 objects. | ||
| // | ||
| // Expected behavior: the GCP XML API should not return a list longer | ||
| // than 1000 objects, even if max-keys is greater than 1000: | ||
| // https://cloud.google.com/storage/docs/xml-api/reference-headers#maxkeys | ||
| // | ||
| // Actual behavior: it returns a list longer than 1000 objects when | ||
| // max-keys is greater than 1000 | ||
| it.skip('should list at max 1000, ignoring MaxKeys', async () => { | ||
| const res = await gcpClient.send(new ListObjectsCommand({ | ||
| Bucket: bucketName, | ||
| MaxKeys: 1001, | ||
| })); | ||
| assert.strictEqual(res.Contents.length, listingHardLimit); | ||
| }); | ||
| }); | ||
| }); | ||
| }); | ||
| }); | ||
This file was deleted.
Oops, something went wrong.
Oops, something went wrong.
Oops, something went wrong.
Add this suggestion to a batch that can be applied as a single commit.
This suggestion is invalid because no changes were made to the code.
Suggestions cannot be applied while the pull request is closed.
Suggestions cannot be applied while viewing a subset of changes.
Only one suggestion per line can be applied in a batch.
Add this suggestion to a batch that can be applied as a single commit.
Applying suggestions on deleted lines is not supported.
You must change the existing code in this line in order to create a valid suggestion.
Outdated suggestions cannot be applied.
This suggestion has been applied or marked resolved.
Suggestions cannot be applied from pending reviews.
Suggestions cannot be applied on multi-line comments.
Suggestions cannot be applied while the pull request is queued to merge.
Suggestion cannot be applied right now. Please check back later.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
The populateBucket and removeObjects helpers call gcpClient.send() directly without gcpRetry. If a PutObjectCommand or DeleteObjectCommand hits a SlowDown/429 during setup/teardown, the test will still fail. Consider wrapping these calls with gcpRetry to match the PR goal of fixing rate-limit flakiness.
--- Claude Code
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
This is a copy of existing code to another file