use of com.amazon.dataprepper.model.CheckpointState in project data-prepper by opensearch-project.
the class BlockingBuffer method doRead.
/**
* Retrieves and removes the batch of records from the head of the queue. The batch size is defined/determined by
* the configuration attribute {@link #ATTRIBUTE_BATCH_SIZE} or the @param timeoutInMillis. The timeoutInMillis
* is also used for retrieving each record
*
* @param timeoutInMillis how long to wait before giving up
* @return The earliest batch of records in the buffer which are still not read.
*/
@Override
public Map.Entry<Collection<T>, CheckpointState> doRead(int timeoutInMillis) {
final List<T> records = new ArrayList<>();
final Stopwatch stopwatch = Stopwatch.createStarted();
try {
while (stopwatch.elapsed(TimeUnit.MILLISECONDS) < timeoutInMillis && records.size() < batchSize) {
final T record = blockingQueue.poll(timeoutInMillis, TimeUnit.MILLISECONDS);
if (record != null) {
// record can be null, avoiding adding nulls
records.add(record);
}
if (records.size() < batchSize) {
blockingQueue.drainTo(records, batchSize - records.size());
}
}
} catch (InterruptedException ex) {
LOG.info("Pipeline [{}] - Interrupt received while reading from buffer", pipelineName);
throw new RuntimeException(ex);
}
final CheckpointState checkpointState = new CheckpointState(records.size());
return new AbstractMap.SimpleEntry<>(records, checkpointState);
}
use of com.amazon.dataprepper.model.CheckpointState in project data-prepper by opensearch-project.
the class BlockingBufferTests method testBatchRead.
@Test
public void testBatchRead() throws Exception {
final PluginSetting completePluginSetting = completePluginSettingForBlockingBuffer();
final BlockingBuffer<Record<String>> blockingBuffer = new BlockingBuffer<>(completePluginSetting);
assertThat(blockingBuffer, notNullValue());
final int testSize = 5;
for (int i = 0; i < testSize; i++) {
Record<String> record = new Record<>("TEST" + i);
blockingBuffer.write(record, TEST_WRITE_TIMEOUT);
}
final Map.Entry<Collection<Record<String>>, CheckpointState> partialReadResult = blockingBuffer.read(TEST_BATCH_READ_TIMEOUT);
final Collection<Record<String>> partialRecords = partialReadResult.getKey();
final CheckpointState partialCheckpointState = partialReadResult.getValue();
final int expectedBatchSize = (Integer) completePluginSetting.getAttributeFromSettings(ATTRIBUTE_BATCH_SIZE);
assertThat(partialRecords.size(), is(expectedBatchSize));
assertEquals(expectedBatchSize, partialCheckpointState.getNumRecordsToBeChecked());
int i = 0;
for (Record<String> record : partialRecords) {
assertThat(record.getData(), equalTo("TEST" + i));
i++;
}
final Map.Entry<Collection<Record<String>>, CheckpointState> finalReadResult = blockingBuffer.read(TEST_BATCH_READ_TIMEOUT);
final Collection<Record<String>> finalBatch = finalReadResult.getKey();
final CheckpointState finalCheckpointState = finalReadResult.getValue();
assertThat(finalBatch.size(), is(testSize - expectedBatchSize));
assertEquals(testSize - expectedBatchSize, finalCheckpointState.getNumRecordsToBeChecked());
for (Record<String> record : finalBatch) {
assertThat(record.getData(), equalTo("TEST" + i));
i++;
}
}
Aggregations