use of org.apache.flink.runtime.io.network.partition.ResultPartition in project flink by apache.
the class InputBuffersMetricsTest method buildInputGate.
private Tuple2<SingleInputGate, List<RemoteInputChannel>> buildInputGate(NettyShuffleEnvironment network, int numberOfRemoteChannels, int numberOfLocalChannels) throws Exception {
SingleInputGate inputGate = new SingleInputGateBuilder().setNumberOfChannels(numberOfRemoteChannels + numberOfLocalChannels).setResultPartitionType(ResultPartitionType.PIPELINED_BOUNDED).setupBufferPoolFactory(network).build();
InputChannel[] inputChannels = new InputChannel[numberOfRemoteChannels + numberOfLocalChannels];
Tuple2<SingleInputGate, List<RemoteInputChannel>> res = Tuple2.of(inputGate, new ArrayList<>());
int channelIdx = 0;
for (int i = 0; i < numberOfRemoteChannels; i++) {
ResultPartition partition = PartitionTestUtils.createPartition(network, ResultPartitionType.PIPELINED_BOUNDED, 1);
closeableRegistry.registerCloseable(partition::close);
partition.setup();
RemoteInputChannel remoteChannel = buildRemoteChannel(channelIdx, inputGate, network, partition);
inputChannels[i] = remoteChannel;
res.f1.add(remoteChannel);
channelIdx++;
}
for (int i = 0; i < numberOfLocalChannels; i++) {
ResultPartition partition = PartitionTestUtils.createPartition(network, ResultPartitionType.PIPELINED_BOUNDED, 1);
closeableRegistry.registerCloseable(partition::close);
partition.setup();
inputChannels[numberOfRemoteChannels + i] = buildLocalChannel(channelIdx, inputGate, network, partition);
}
inputGate.setInputChannels(inputChannels);
return res;
}
use of org.apache.flink.runtime.io.network.partition.ResultPartition in project flink by apache.
the class RecordWriterDelegateTest method testSingleRecordWriterBroadcastEvent.
@Test
@SuppressWarnings("unchecked")
public void testSingleRecordWriterBroadcastEvent() throws Exception {
// setup
final ResultPartition partition = RecordWriterTest.createResultPartition(memorySegmentSize, 2);
final RecordWriter recordWriter = new RecordWriterBuilder<>().build(partition);
final RecordWriterDelegate writerDelegate = new SingleRecordWriter(recordWriter);
verifyBroadcastEvent(writerDelegate, Collections.singletonList(partition));
}
use of org.apache.flink.runtime.io.network.partition.ResultPartition in project flink by apache.
the class RecordWriterDelegateTest method verifyBroadcastEvent.
private void verifyBroadcastEvent(RecordWriterDelegate writerDelegate, List<ResultPartition> partitions) throws Exception {
final CancelCheckpointMarker message = new CancelCheckpointMarker(1);
writerDelegate.broadcastEvent(message);
// verify the added messages in all the queues
for (ResultPartition partition : partitions) {
for (int i = 0; i < partition.getNumberOfSubpartitions(); i++) {
assertEquals(1, partition.getNumberOfQueuedBuffers(i));
ResultSubpartitionView view = partition.createSubpartitionView(i, new NoOpBufferAvailablityListener());
BufferOrEvent boe = RecordWriterTest.parseBuffer(view.getNextBuffer().buffer(), i);
assertTrue(boe.isEvent());
assertEquals(message, boe.getEvent());
}
}
}
use of org.apache.flink.runtime.io.network.partition.ResultPartition in project flink by apache.
the class RecordWriterDelegateTest method testMultipleRecordWritersBroadcastEvent.
@Test
@SuppressWarnings("unchecked")
public void testMultipleRecordWritersBroadcastEvent() throws Exception {
// setup
final int numRecordWriters = 2;
final List<RecordWriter> recordWriters = new ArrayList<>(numRecordWriters);
final List<ResultPartition> partitions = new ArrayList<>(numRecordWriters);
for (int i = 0; i < numRecordWriters; i++) {
final ResultPartition partition = RecordWriterTest.createResultPartition(memorySegmentSize, 2);
partitions.add(partition);
recordWriters.add(new RecordWriterBuilder<>().build(partition));
}
final RecordWriterDelegate writerDelegate = new MultipleRecordWriters(recordWriters);
verifyBroadcastEvent(writerDelegate, partitions);
}
use of org.apache.flink.runtime.io.network.partition.ResultPartition in project flink by apache.
the class RecordWriterTest method testBroadcastEventMixedRecords.
/**
* Tests broadcasting events when records have been emitted.
*/
@Test
public void testBroadcastEventMixedRecords() throws Exception {
Random rand = new XORShiftRandom();
int numberOfChannels = 4;
int bufferSize = 32;
// serialized length
int lenBytes = 4;
ResultPartition partition = createResultPartition(bufferSize, numberOfChannels);
RecordWriter<ByteArrayIO> writer = createRecordWriter(partition);
CheckpointBarrier barrier = new CheckpointBarrier(Integer.MAX_VALUE + 1292L, Integer.MAX_VALUE + 199L, CheckpointOptions.forCheckpointWithDefaultLocation());
// Emit records on some channels first (requesting buffers), then
// broadcast the event. The record buffers should be emitted first, then
// the event. After the event, no new buffer should be requested.
// (i) Smaller than the buffer size
byte[] bytes = new byte[bufferSize / 2];
rand.nextBytes(bytes);
writer.emit(new ByteArrayIO(bytes));
// (ii) Larger than the buffer size
bytes = new byte[bufferSize + 1];
rand.nextBytes(bytes);
writer.emit(new ByteArrayIO(bytes));
// (iii) Exactly the buffer size
bytes = new byte[bufferSize - lenBytes];
rand.nextBytes(bytes);
writer.emit(new ByteArrayIO(bytes));
// (iv) Broadcast the event
writer.broadcastEvent(barrier);
if (isBroadcastWriter) {
assertEquals(3, partition.getBufferPool().bestEffortGetNumOfUsedBuffers());
for (int i = 0; i < numberOfChannels; i++) {
// 3 buffer + 1 event
assertEquals(4, partition.getNumberOfQueuedBuffers(i));
ResultSubpartitionView view = partition.createSubpartitionView(i, new NoOpBufferAvailablityListener());
for (int j = 0; j < 3; j++) {
assertTrue(parseBuffer(view.getNextBuffer().buffer(), 0).isBuffer());
}
BufferOrEvent boe = parseBuffer(view.getNextBuffer().buffer(), i);
assertTrue(boe.isEvent());
assertEquals(barrier, boe.getEvent());
}
} else {
assertEquals(4, partition.getBufferPool().bestEffortGetNumOfUsedBuffers());
ResultSubpartitionView[] views = new ResultSubpartitionView[4];
// 1 buffer + 1 event
assertEquals(2, partition.getNumberOfQueuedBuffers(0));
views[0] = partition.createSubpartitionView(0, new NoOpBufferAvailablityListener());
assertTrue(parseBuffer(views[0].getNextBuffer().buffer(), 0).isBuffer());
// 2 buffers + 1 event
assertEquals(3, partition.getNumberOfQueuedBuffers(1));
views[1] = partition.createSubpartitionView(1, new NoOpBufferAvailablityListener());
assertTrue(parseBuffer(views[1].getNextBuffer().buffer(), 1).isBuffer());
assertTrue(parseBuffer(views[1].getNextBuffer().buffer(), 1).isBuffer());
// 1 buffer + 1 event
assertEquals(2, partition.getNumberOfQueuedBuffers(2));
views[2] = partition.createSubpartitionView(2, new NoOpBufferAvailablityListener());
assertTrue(parseBuffer(views[2].getNextBuffer().buffer(), 2).isBuffer());
views[3] = partition.createSubpartitionView(3, new NoOpBufferAvailablityListener());
// 0 buffers + 1 event
assertEquals(1, partition.getNumberOfQueuedBuffers(3));
// every queue's last element should be the event
for (int i = 0; i < numberOfChannels; i++) {
BufferOrEvent boe = parseBuffer(views[i].getNextBuffer().buffer(), i);
assertTrue(boe.isEvent());
assertEquals(barrier, boe.getEvent());
}
}
}
Aggregations