Skip to content

Commit

Permalink
Change assertion and timeout for kafka stress test (#31089)
Browse files Browse the repository at this point in the history
  • Loading branch information
akashorabek authored Apr 24, 2024
1 parent 495e9b2 commit 8454cc9
Showing 1 changed file with 3 additions and 3 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -155,7 +155,7 @@ public void setup() {
Configuration.class),
"large",
Configuration.fromJsonString(
"{\"rowsPerSecond\":50000,\"numRecords\":5000000,\"valueSizeBytes\":1000,\"minutes\":60,\"pipelineTimeout\":120,\"runner\":\"DataflowRunner\"}",
"{\"rowsPerSecond\":50000,\"numRecords\":5000000,\"valueSizeBytes\":1000,\"minutes\":60,\"pipelineTimeout\":240,\"runner\":\"DataflowRunner\"}",
Configuration.class));
} catch (IOException e) {
throw new RuntimeException(e);
Expand Down Expand Up @@ -199,9 +199,9 @@ public void testWriteAndRead() throws IOException, ParseException, InterruptedEx
readInfo.jobId(),
getBeamMetricsName(PipelineMetricsType.COUNTER, READ_ELEMENT_METRIC_NAME));

// Assert that writeNumRecords equals or greater than readNumRecords since there might be
// Assert that readNumRecords equals or greater than writeNumRecords since there might be
// duplicates when testing big amount of data
assertTrue(writeNumRecords >= readNumRecords);
assertTrue(readNumRecords >= writeNumRecords);
} finally {
// clean up pipelines
if (pipelineLauncher.getJobStatus(project, region, writeInfo.jobId())
Expand Down

0 comments on commit 8454cc9

Please sign in to comment.