use of org.apache.flink.runtime.io.network.partition.ResultPartition in project flink by apache.
the class RecordWriterTest method testBroadcastEventBufferReferenceCounting.
/**
* Tests that event buffers are properly recycled when broadcasting events to multiple channels.
*/
@Test
public void testBroadcastEventBufferReferenceCounting() throws Exception {
int bufferSize = 32 * 1024;
int numSubpartitions = 2;
ResultPartition partition = createResultPartition(bufferSize, numSubpartitions);
RecordWriter<?> writer = createRecordWriter(partition);
writer.broadcastEvent(EndOfPartitionEvent.INSTANCE);
// get references to buffer consumers (copies from the original event buffer consumer)
Buffer[] buffers = new Buffer[numSubpartitions];
// process all collected events (recycles the buffer)
for (int i = 0; i < numSubpartitions; i++) {
assertEquals(1, partition.getNumberOfQueuedBuffers(i));
ResultSubpartitionView view = partition.createSubpartitionView(i, new NoOpBufferAvailablityListener());
buffers[i] = view.getNextBuffer().buffer();
assertTrue(parseBuffer(buffers[i], i).isEvent());
}
for (int i = 0; i < numSubpartitions; ++i) {
assertTrue(buffers[i].isRecycled());
}
}
use of org.apache.flink.runtime.io.network.partition.ResultPartition in project flink by apache.
the class RecordWriterTest method verifyBroadcastBufferOrEventIndependence.
private void verifyBroadcastBufferOrEventIndependence(boolean broadcastEvent) throws Exception {
ResultPartition partition = createResultPartition(4096, 2);
RecordWriter<IntValue> writer = createRecordWriter(partition);
if (broadcastEvent) {
writer.broadcastEvent(EndOfPartitionEvent.INSTANCE);
} else {
writer.broadcastEmit(new IntValue(0));
}
// verify added to all queues
assertEquals(1, partition.getNumberOfQueuedBuffers(0));
assertEquals(1, partition.getNumberOfQueuedBuffers(1));
ResultSubpartitionView view0 = partition.createSubpartitionView(0, new NoOpBufferAvailablityListener());
ResultSubpartitionView view1 = partition.createSubpartitionView(1, new NoOpBufferAvailablityListener());
// these two buffers may share the memory but not the indices!
Buffer buffer1 = view0.getNextBuffer().buffer();
Buffer buffer2 = view1.getNextBuffer().buffer();
assertEquals(0, buffer1.getReaderIndex());
assertEquals(0, buffer2.getReaderIndex());
buffer1.setReaderIndex(1);
assertEquals("Buffer 2 shares the same reader index as buffer 1", 0, buffer2.getReaderIndex());
}
use of org.apache.flink.runtime.io.network.partition.ResultPartition in project flink by apache.
the class LocalInputChannelTest method testConcurrentConsumeMultiplePartitions.
/**
* Tests the consumption of multiple subpartitions via local input channels.
*
* <p>Multiple producer tasks produce pipelined partitions, which are consumed by multiple tasks
* via local input channels.
*/
@Test
public void testConcurrentConsumeMultiplePartitions() throws Exception {
// Config
final int parallelism = 32;
final int producerBufferPoolSize = parallelism + 1;
final int numberOfBuffersPerChannel = 1024;
// Setup
// One thread per produced partition and one per consumer
final ExecutorService executor = Executors.newFixedThreadPool(2 * parallelism);
final NetworkBufferPool networkBuffers = new NetworkBufferPool((parallelism * producerBufferPoolSize) + (parallelism * parallelism), TestBufferFactory.BUFFER_SIZE);
final ResultPartitionManager partitionManager = new ResultPartitionManager();
final ResultPartitionID[] partitionIds = new ResultPartitionID[parallelism];
final TestPartitionProducer[] partitionProducers = new TestPartitionProducer[parallelism];
// Create all partitions
for (int i = 0; i < parallelism; i++) {
partitionIds[i] = new ResultPartitionID();
final ResultPartition partition = new ResultPartitionBuilder().setResultPartitionId(partitionIds[i]).setNumberOfSubpartitions(parallelism).setNumTargetKeyGroups(parallelism).setResultPartitionManager(partitionManager).setBufferPoolFactory(() -> networkBuffers.createBufferPool(producerBufferPoolSize, producerBufferPoolSize, parallelism, Integer.MAX_VALUE)).build();
// Create a buffer pool for this partition
partition.setup();
// Create the producer
partitionProducers[i] = new TestPartitionProducer((BufferWritingResultPartition) partition, false, new TestPartitionProducerBufferSource(parallelism, TestBufferFactory.BUFFER_SIZE, numberOfBuffersPerChannel));
}
// Test
try {
// Submit producer tasks
List<CompletableFuture<?>> results = Lists.newArrayListWithCapacity(parallelism + 1);
for (int i = 0; i < parallelism; i++) {
results.add(CompletableFuture.supplyAsync(CheckedSupplier.unchecked(partitionProducers[i]::call), executor));
}
// Submit consumer
for (int i = 0; i < parallelism; i++) {
final TestLocalInputChannelConsumer consumer = new TestLocalInputChannelConsumer(i, parallelism, numberOfBuffersPerChannel, networkBuffers.createBufferPool(parallelism, parallelism), partitionManager, new TaskEventDispatcher(), partitionIds);
results.add(CompletableFuture.supplyAsync(CheckedSupplier.unchecked(consumer::call), executor));
}
FutureUtils.waitForAll(results).get();
} finally {
networkBuffers.destroyAllBufferPools();
networkBuffers.destroy();
executor.shutdown();
}
}
use of org.apache.flink.runtime.io.network.partition.ResultPartition in project flink by apache.
the class StreamTaskFinalCheckpointsTest method testWaitingForFinalCheckpointNotTheFirsNotifiedComplete.
@Test
public void testWaitingForFinalCheckpointNotTheFirsNotifiedComplete() throws Exception {
ResultPartition[] partitionWriters = new ResultPartition[2];
try {
for (int i = 0; i < partitionWriters.length; ++i) {
partitionWriters[i] = PartitionTestUtils.createPartition(ResultPartitionType.PIPELINED_BOUNDED);
partitionWriters[i].setup();
}
CompletingCheckpointResponder checkpointResponder = new CompletingCheckpointResponder();
try (StreamTaskMailboxTestHarness<String> testHarness = createTestHarness(partitionWriters, checkpointResponder, false)) {
// complete only the third checkpoint
checkpointResponder.completeCheckpoints(Collections.singletonList(3L));
// finish data on all channels
testHarness.waitForTaskCompletion();
// trigger the first checkpoint
CompletableFuture<Boolean> firstCheckpoint = triggerCheckpoint(testHarness, 1);
// Notifies the result partition that all records are processed after the
// first checkpoint is triggered.
firstCheckpoint.thenAccept((ignored) -> {
for (ResultPartition resultPartition : partitionWriters) {
resultPartition.onSubpartitionAllDataProcessed(0);
}
});
testHarness.processAll();
testHarness.getTaskStateManager().getWaitForReportLatch().await();
// trigger a second checkpoint
triggerCheckpoint(testHarness, 2L);
testHarness.processAll();
testHarness.getTaskStateManager().getWaitForReportLatch().await();
// trigger the third checkpoint
triggerCheckpoint(testHarness, 3L);
testHarness.processAll();
testHarness.finishProcessing();
testHarness.getTaskStateManager().getWaitForReportLatch().await();
assertEquals(3L, testHarness.getTaskStateManager().getReportedCheckpointId());
assertEquals(3L, testHarness.getTaskStateManager().getNotifiedCompletedCheckpointId());
// Each result partition should have emitted 3 barriers and 1 EndOfUserRecordsEvent.
for (ResultPartition resultPartition : partitionWriters) {
assertEquals(4, resultPartition.getNumberOfQueuedBuffers());
}
}
} finally {
for (ResultPartitionWriter writer : partitionWriters) {
if (writer != null) {
writer.close();
}
}
}
}
use of org.apache.flink.runtime.io.network.partition.ResultPartition in project flink by apache.
the class StreamTaskFinalCheckpointsTest method testTriggerStopWithSavepointWhenWaitingForFinalCheckpoint.
@Test
public void testTriggerStopWithSavepointWhenWaitingForFinalCheckpoint() throws Exception {
ResultPartition[] partitionWriters = new ResultPartition[2];
try {
for (int i = 0; i < partitionWriters.length; ++i) {
partitionWriters[i] = PartitionTestUtils.createPartition(ResultPartitionType.PIPELINED_BOUNDED);
partitionWriters[i].setup();
}
int finalCheckpointId = 6;
int syncSavepointId = 7;
CompletingCheckpointResponder checkpointResponder = new CompletingCheckpointResponder() {
@Override
public void acknowledgeCheckpoint(JobID jobID, ExecutionAttemptID executionAttemptID, long checkpointId, CheckpointMetrics checkpointMetrics, TaskStateSnapshot subtaskState) {
if (syncSavepointId == checkpointId) {
// complete the final checkpoint when sync savepoint acknowledged
// we should wait for the sync savepoint to complete
super.acknowledgeCheckpoint(jobID, executionAttemptID, finalCheckpointId, checkpointMetrics, subtaskState);
try {
// Give some potential time for the task to finish before the
// savepoint is notified complete
Thread.sleep(CONCURRENT_EVENT_WAIT_PERIOD_MS);
} catch (InterruptedException e) {
throw new FlinkRuntimeException(e);
}
super.acknowledgeCheckpoint(jobID, executionAttemptID, syncSavepointId, checkpointMetrics, subtaskState);
}
}
};
try (StreamTaskMailboxTestHarness<String> testHarness = createTestHarness(partitionWriters, checkpointResponder, false)) {
// Tests triggering checkpoint after received all the inputs have received
// EndOfPartition.
testHarness.waitForTaskCompletion();
// trigger the final checkpoint
CompletableFuture<Boolean> checkpointFuture = triggerCheckpoint(testHarness, finalCheckpointId);
// Notifies the result partition that all records are processed after the
// last checkpoint is triggered.
checkpointFuture.thenAccept((ignored) -> {
for (ResultPartition resultPartition : partitionWriters) {
resultPartition.onSubpartitionAllDataProcessed(0);
}
});
// trigger the synchronous savepoint
CompletableFuture<Boolean> savepointFuture = triggerStopWithSavepointDrain(testHarness, syncSavepointId);
// The checkpoint 6 would be triggered successfully.
testHarness.finishProcessing();
assertTrue(checkpointFuture.isDone());
assertTrue(savepointFuture.isDone());
testHarness.getTaskStateManager().getWaitForReportLatch().await();
assertEquals(syncSavepointId, testHarness.getTaskStateManager().getReportedCheckpointId());
assertEquals(syncSavepointId, testHarness.getTaskStateManager().getNotifiedCompletedCheckpointId());
// Each result partition should have emitted 2 barriers and 1 EndOfUserRecordsEvent.
for (ResultPartition resultPartition : partitionWriters) {
assertEquals(3, resultPartition.getNumberOfQueuedBuffers());
}
}
} finally {
for (ResultPartitionWriter writer : partitionWriters) {
if (writer != null) {
writer.close();
}
}
}
}
Aggregations