use of org.apache.flink.runtime.io.network.partition.NoOpBufferAvailablityListener in project flink by apache.
the class RecordWriterDelegateTest method verifyAvailability.
private void verifyAvailability(RecordWriterDelegate writerDelegate) throws Exception {
// writer is available at the beginning
assertTrue(writerDelegate.isAvailable());
assertTrue(writerDelegate.getAvailableFuture().isDone());
// request one buffer from the local pool to make it unavailable
RecordWriter recordWriter = writerDelegate.getRecordWriter(0);
for (int i = 0; i < memorySegmentSize / recordSize; ++i) {
recordWriter.emit(new IntValue(i));
}
assertFalse(writerDelegate.isAvailable());
CompletableFuture future = writerDelegate.getAvailableFuture();
assertFalse(future.isDone());
// recycle the buffer to make the local pool available again
ResultSubpartitionView readView = recordWriter.getTargetPartition().createSubpartitionView(0, new NoOpBufferAvailablityListener());
Buffer buffer = readView.getNextBuffer().buffer();
buffer.recycleBuffer();
assertTrue(future.isDone());
assertTrue(writerDelegate.isAvailable());
assertTrue(writerDelegate.getAvailableFuture().isDone());
}
use of org.apache.flink.runtime.io.network.partition.NoOpBufferAvailablityListener in project flink by apache.
the class RecordWriterTest method testBroadcastEventMixedRecords.
/**
* Tests broadcasting events when records have been emitted.
*/
@Test
public void testBroadcastEventMixedRecords() throws Exception {
Random rand = new XORShiftRandom();
int numberOfChannels = 4;
int bufferSize = 32;
// serialized length
int lenBytes = 4;
ResultPartition partition = createResultPartition(bufferSize, numberOfChannels);
RecordWriter<ByteArrayIO> writer = createRecordWriter(partition);
CheckpointBarrier barrier = new CheckpointBarrier(Integer.MAX_VALUE + 1292L, Integer.MAX_VALUE + 199L, CheckpointOptions.forCheckpointWithDefaultLocation());
// Emit records on some channels first (requesting buffers), then
// broadcast the event. The record buffers should be emitted first, then
// the event. After the event, no new buffer should be requested.
// (i) Smaller than the buffer size
byte[] bytes = new byte[bufferSize / 2];
rand.nextBytes(bytes);
writer.emit(new ByteArrayIO(bytes));
// (ii) Larger than the buffer size
bytes = new byte[bufferSize + 1];
rand.nextBytes(bytes);
writer.emit(new ByteArrayIO(bytes));
// (iii) Exactly the buffer size
bytes = new byte[bufferSize - lenBytes];
rand.nextBytes(bytes);
writer.emit(new ByteArrayIO(bytes));
// (iv) Broadcast the event
writer.broadcastEvent(barrier);
if (isBroadcastWriter) {
assertEquals(3, partition.getBufferPool().bestEffortGetNumOfUsedBuffers());
for (int i = 0; i < numberOfChannels; i++) {
// 3 buffer + 1 event
assertEquals(4, partition.getNumberOfQueuedBuffers(i));
ResultSubpartitionView view = partition.createSubpartitionView(i, new NoOpBufferAvailablityListener());
for (int j = 0; j < 3; j++) {
assertTrue(parseBuffer(view.getNextBuffer().buffer(), 0).isBuffer());
}
BufferOrEvent boe = parseBuffer(view.getNextBuffer().buffer(), i);
assertTrue(boe.isEvent());
assertEquals(barrier, boe.getEvent());
}
} else {
assertEquals(4, partition.getBufferPool().bestEffortGetNumOfUsedBuffers());
ResultSubpartitionView[] views = new ResultSubpartitionView[4];
// 1 buffer + 1 event
assertEquals(2, partition.getNumberOfQueuedBuffers(0));
views[0] = partition.createSubpartitionView(0, new NoOpBufferAvailablityListener());
assertTrue(parseBuffer(views[0].getNextBuffer().buffer(), 0).isBuffer());
// 2 buffers + 1 event
assertEquals(3, partition.getNumberOfQueuedBuffers(1));
views[1] = partition.createSubpartitionView(1, new NoOpBufferAvailablityListener());
assertTrue(parseBuffer(views[1].getNextBuffer().buffer(), 1).isBuffer());
assertTrue(parseBuffer(views[1].getNextBuffer().buffer(), 1).isBuffer());
// 1 buffer + 1 event
assertEquals(2, partition.getNumberOfQueuedBuffers(2));
views[2] = partition.createSubpartitionView(2, new NoOpBufferAvailablityListener());
assertTrue(parseBuffer(views[2].getNextBuffer().buffer(), 2).isBuffer());
views[3] = partition.createSubpartitionView(3, new NoOpBufferAvailablityListener());
// 0 buffers + 1 event
assertEquals(1, partition.getNumberOfQueuedBuffers(3));
// every queue's last element should be the event
for (int i = 0; i < numberOfChannels; i++) {
BufferOrEvent boe = parseBuffer(views[i].getNextBuffer().buffer(), i);
assertTrue(boe.isEvent());
assertEquals(barrier, boe.getEvent());
}
}
}
use of org.apache.flink.runtime.io.network.partition.NoOpBufferAvailablityListener in project flink by apache.
the class RecordWriterTest method testBroadcastEventBufferReferenceCounting.
/**
* Tests that event buffers are properly recycled when broadcasting events to multiple channels.
*/
@Test
public void testBroadcastEventBufferReferenceCounting() throws Exception {
int bufferSize = 32 * 1024;
int numSubpartitions = 2;
ResultPartition partition = createResultPartition(bufferSize, numSubpartitions);
RecordWriter<?> writer = createRecordWriter(partition);
writer.broadcastEvent(EndOfPartitionEvent.INSTANCE);
// get references to buffer consumers (copies from the original event buffer consumer)
Buffer[] buffers = new Buffer[numSubpartitions];
// process all collected events (recycles the buffer)
for (int i = 0; i < numSubpartitions; i++) {
assertEquals(1, partition.getNumberOfQueuedBuffers(i));
ResultSubpartitionView view = partition.createSubpartitionView(i, new NoOpBufferAvailablityListener());
buffers[i] = view.getNextBuffer().buffer();
assertTrue(parseBuffer(buffers[i], i).isEvent());
}
for (int i = 0; i < numSubpartitions; ++i) {
assertTrue(buffers[i].isRecycled());
}
}
use of org.apache.flink.runtime.io.network.partition.NoOpBufferAvailablityListener in project flink by apache.
the class RecordWriterTest method verifyBroadcastBufferOrEventIndependence.
private void verifyBroadcastBufferOrEventIndependence(boolean broadcastEvent) throws Exception {
ResultPartition partition = createResultPartition(4096, 2);
RecordWriter<IntValue> writer = createRecordWriter(partition);
if (broadcastEvent) {
writer.broadcastEvent(EndOfPartitionEvent.INSTANCE);
} else {
writer.broadcastEmit(new IntValue(0));
}
// verify added to all queues
assertEquals(1, partition.getNumberOfQueuedBuffers(0));
assertEquals(1, partition.getNumberOfQueuedBuffers(1));
ResultSubpartitionView view0 = partition.createSubpartitionView(0, new NoOpBufferAvailablityListener());
ResultSubpartitionView view1 = partition.createSubpartitionView(1, new NoOpBufferAvailablityListener());
// these two buffers may share the memory but not the indices!
Buffer buffer1 = view0.getNextBuffer().buffer();
Buffer buffer2 = view1.getNextBuffer().buffer();
assertEquals(0, buffer1.getReaderIndex());
assertEquals(0, buffer2.getReaderIndex());
buffer1.setReaderIndex(1);
assertEquals("Buffer 2 shares the same reader index as buffer 1", 0, buffer2.getReaderIndex());
}
use of org.apache.flink.runtime.io.network.partition.NoOpBufferAvailablityListener in project flink by apache.
the class ChannelPersistenceITCase method upstreamBlocksAfterRecoveringState.
private void upstreamBlocksAfterRecoveringState(ResultPartitionType type) throws Exception {
NetworkBufferPool networkBufferPool = new NetworkBufferPool(4, 1024);
byte[] dataAfterRecovery = randomBytes(1024);
try {
BufferWritingResultPartition resultPartition = buildResultPartition(networkBufferPool, type, 0, 1);
new SequentialChannelStateReaderImpl(new TaskStateSnapshot()).readOutputData(new BufferWritingResultPartition[] { resultPartition }, true);
resultPartition.emitRecord(ByteBuffer.wrap(dataAfterRecovery), 0);
ResultSubpartitionView view = resultPartition.createSubpartitionView(0, new NoOpBufferAvailablityListener());
if (type != ResultPartitionType.PIPELINED_APPROXIMATE) {
assertEquals(RECOVERY_COMPLETION, view.getNextBuffer().buffer().getDataType());
assertNull(view.getNextBuffer());
view.resumeConsumption();
}
assertArrayEquals(dataAfterRecovery, collectBytes(view.getNextBuffer().buffer()));
} finally {
networkBufferPool.destroy();
}
}
Aggregations