use of org.apache.flink.runtime.io.network.partition.consumer.BufferOrEvent in project flink by apache.
the class SpilledBufferOrEventSequenceTest method testMultipleSequences.
@Test
public void testMultipleSequences() {
File secondFile = null;
FileChannel secondChannel = null;
try {
// create the second file channel
secondFile = File.createTempFile("testdata", "tmp");
secondChannel = new RandomAccessFile(secondFile, "rw").getChannel();
final Random rnd = new Random();
final Random bufferRnd = new Random();
final long bufferSeed = rnd.nextLong();
bufferRnd.setSeed(bufferSeed);
final int numEventsAndBuffers1 = 272;
final int numEventsAndBuffers2 = 151;
final int numChannels = 1656;
final ArrayList<BufferOrEvent> events1 = new ArrayList<BufferOrEvent>(128);
final ArrayList<BufferOrEvent> events2 = new ArrayList<BufferOrEvent>(128);
for (int i = 0; i < numEventsAndBuffers1; i++) {
boolean isEvent = rnd.nextDouble() < 0.05d;
if (isEvent) {
events1.add(generateAndWriteEvent(fileChannel, rnd, numChannels));
} else {
writeBuffer(fileChannel, bufferRnd.nextInt(pageSize) + 1, bufferRnd.nextInt(numChannels));
}
}
for (int i = 0; i < numEventsAndBuffers2; i++) {
boolean isEvent = rnd.nextDouble() < 0.05d;
if (isEvent) {
events2.add(generateAndWriteEvent(secondChannel, rnd, numChannels));
} else {
writeBuffer(secondChannel, bufferRnd.nextInt(pageSize) + 1, bufferRnd.nextInt(numChannels));
}
}
// reset and create reader
fileChannel.position(0L);
secondChannel.position(0L);
bufferRnd.setSeed(bufferSeed);
SpilledBufferOrEventSequence seq1 = new SpilledBufferOrEventSequence(tempFile, fileChannel, buffer, pageSize);
SpilledBufferOrEventSequence seq2 = new SpilledBufferOrEventSequence(secondFile, secondChannel, buffer, pageSize);
// read and validate the sequence 1
seq1.open();
int numEvent = 0;
for (int i = 0; i < numEventsAndBuffers1; i++) {
BufferOrEvent next = seq1.getNext();
if (next.isEvent()) {
BufferOrEvent expected = events1.get(numEvent++);
assertEquals(expected.getEvent(), next.getEvent());
assertEquals(expected.getChannelIndex(), next.getChannelIndex());
} else {
validateBuffer(next, bufferRnd.nextInt(pageSize) + 1, bufferRnd.nextInt(numChannels));
}
}
assertNull(seq1.getNext());
assertEquals(events1.size(), numEvent);
// read and validate the sequence 2
seq2.open();
numEvent = 0;
for (int i = 0; i < numEventsAndBuffers2; i++) {
BufferOrEvent next = seq2.getNext();
if (next.isEvent()) {
BufferOrEvent expected = events2.get(numEvent++);
assertEquals(expected.getEvent(), next.getEvent());
assertEquals(expected.getChannelIndex(), next.getChannelIndex());
} else {
validateBuffer(next, bufferRnd.nextInt(pageSize) + 1, bufferRnd.nextInt(numChannels));
}
}
assertNull(seq2.getNext());
assertEquals(events2.size(), numEvent);
} catch (Exception e) {
e.printStackTrace();
fail(e.getMessage());
} finally {
if (secondChannel != null) {
try {
secondChannel.close();
} catch (IOException e) {
// ignore here
}
}
if (secondFile != null) {
//noinspection ResultOfMethodCallIgnored
secondFile.delete();
}
}
}
use of org.apache.flink.runtime.io.network.partition.consumer.BufferOrEvent in project flink by apache.
the class CheckpointedInputGate method pollNext.
@Override
public Optional<BufferOrEvent> pollNext() throws IOException, InterruptedException {
Optional<BufferOrEvent> next = inputGate.pollNext();
if (!next.isPresent()) {
return handleEmptyBuffer();
}
BufferOrEvent bufferOrEvent = next.get();
if (bufferOrEvent.isEvent()) {
return handleEvent(bufferOrEvent);
} else if (bufferOrEvent.isBuffer()) {
/**
* https://issues.apache.org/jira/browse/FLINK-19537 This is not entirely true, as it's
* ignoring the buffer/bytes accumulated in the record deserializers. If buffer is
* processed here, it doesn't mean it was fully processed (so we can over estimate the
* amount of processed bytes). On the other hand some records/bytes might be processed
* without polling anything from this {@link CheckpointedInputGate} (underestimating the
* amount of processed bytes). All in all this should have been calculated on the {@link
* StreamTaskNetworkInput} level, where we have an access to the records deserializers.
* However the current is on average accurate and it might be just good enough (at least
* for the time being).
*/
barrierHandler.addProcessedBytes(bufferOrEvent.getBuffer().getSize());
}
return next;
}
use of org.apache.flink.runtime.io.network.partition.consumer.BufferOrEvent in project flink by apache.
the class CheckpointedInputGate method processPriorityEvents.
/**
* Eagerly pulls and processes all priority events. Must be called from task thread.
*
* <p>Basic assumption is that no priority event needs to be handled by the {@link
* StreamTaskNetworkInput}.
*/
private void processPriorityEvents() throws IOException, InterruptedException {
// check if the priority event is still not processed (could have been pulled before mail
// was being executed)
boolean hasPriorityEvent = inputGate.getPriorityEventAvailableFuture().isDone();
while (hasPriorityEvent) {
// process as many priority events as possible
final Optional<BufferOrEvent> bufferOrEventOpt = pollNext();
if (!bufferOrEventOpt.isPresent()) {
break;
}
final BufferOrEvent bufferOrEvent = bufferOrEventOpt.get();
checkState(bufferOrEvent.hasPriority(), "Should only poll priority events");
hasPriorityEvent = bufferOrEvent.morePriorityEvents();
}
// re-enqueue mail to process future priority events
waitForPriorityEvents(inputGate, mailboxExecutor);
}
use of org.apache.flink.runtime.io.network.partition.consumer.BufferOrEvent in project flink by apache.
the class StreamTaskNetworkInputTest method createDataBuffer.
private BufferOrEvent createDataBuffer() throws IOException {
try (BufferBuilder bufferBuilder = BufferBuilderTestUtils.createEmptyBufferBuilder(PAGE_SIZE)) {
BufferConsumer bufferConsumer = bufferBuilder.createBufferConsumer();
serializeRecord(42L, bufferBuilder);
serializeRecord(44L, bufferBuilder);
return new BufferOrEvent(bufferConsumer.build(), new InputChannelInfo(0, 0));
}
}
use of org.apache.flink.runtime.io.network.partition.consumer.BufferOrEvent in project flink by apache.
the class StreamTaskNetworkInputTest method testNoDataProcessedAfterCheckpointBarrier.
/**
* InputGate on CheckpointBarrier can enqueue a mailbox action to execute and
* StreamTaskNetworkInput must allow this action to execute before processing a following
* record.
*/
@Test
public void testNoDataProcessedAfterCheckpointBarrier() throws Exception {
CheckpointBarrier barrier = new CheckpointBarrier(0, 0, CheckpointOptions.forCheckpointWithDefaultLocation());
List<BufferOrEvent> buffers = new ArrayList<>(2);
buffers.add(new BufferOrEvent(barrier, new InputChannelInfo(0, 0)));
buffers.add(createDataBuffer());
VerifyRecordsDataOutput<Long> output = new VerifyRecordsDataOutput<>();
StreamTaskNetworkInput<Long> input = createStreamTaskNetworkInput(buffers);
assertHasNextElement(input, output);
assertEquals(0, output.getNumberOfEmittedRecords());
}
Aggregations