use of org.apache.flink.runtime.io.network.buffer.Buffer in project flink by apache.
the class CreditBasedPartitionRequestClientHandlerTest method testReceiveEmptyBuffer.
/**
* Tests a fix for FLINK-1761.
*
* <p>FLINK-1761 discovered an IndexOutOfBoundsException, when receiving buffers of size 0.
*/
@Test
public void testReceiveEmptyBuffer() throws Exception {
// Minimal mock of a remote input channel
final BufferProvider bufferProvider = mock(BufferProvider.class);
when(bufferProvider.requestBuffer()).thenReturn(TestBufferFactory.createBuffer(0));
final RemoteInputChannel inputChannel = mock(RemoteInputChannel.class);
when(inputChannel.getInputChannelId()).thenReturn(new InputChannelID());
when(inputChannel.getBufferProvider()).thenReturn(bufferProvider);
// An empty buffer of size 0
final Buffer emptyBuffer = TestBufferFactory.createBuffer(0);
final CreditBasedPartitionRequestClientHandler client = new CreditBasedPartitionRequestClientHandler();
client.addInputChannel(inputChannel);
final int backlog = 2;
final BufferResponse receivedBuffer = createBufferResponse(emptyBuffer, 0, inputChannel.getInputChannelId(), backlog, new NetworkBufferAllocator(client));
// Read the empty buffer
client.channelRead(mock(ChannelHandlerContext.class), receivedBuffer);
// This should not throw an exception
verify(inputChannel, never()).onError(any(Throwable.class));
verify(inputChannel, times(1)).onEmptyBuffer(0, backlog);
}
use of org.apache.flink.runtime.io.network.buffer.Buffer in project flink by apache.
the class SortMergeResultPartitionTest method testWriteAndRead.
@Test
public void testWriteAndRead() throws Exception {
int numBuffers = useHashDataBuffer ? 100 : 15;
int numSubpartitions = 10;
int numRecords = 1000;
Random random = new Random();
BufferPool bufferPool = globalPool.createBufferPool(numBuffers, numBuffers);
SortMergeResultPartition partition = createSortMergedPartition(numSubpartitions, bufferPool);
Queue<DataBufferTest.DataAndType>[] dataWritten = new Queue[numSubpartitions];
Queue<Buffer>[] buffersRead = new Queue[numSubpartitions];
for (int i = 0; i < numSubpartitions; ++i) {
dataWritten[i] = new ArrayDeque<>();
buffersRead[i] = new ArrayDeque<>();
}
int[] numBytesWritten = new int[numSubpartitions];
int[] numBytesRead = new int[numSubpartitions];
Arrays.fill(numBytesWritten, 0);
Arrays.fill(numBytesRead, 0);
for (int i = 0; i < numRecords; ++i) {
ByteBuffer record = generateRandomData(random.nextInt(2 * bufferSize) + 1, random);
boolean isBroadCast = random.nextBoolean();
if (isBroadCast) {
partition.broadcastRecord(record);
for (int subpartition = 0; subpartition < numSubpartitions; ++subpartition) {
recordDataWritten(record, dataWritten, subpartition, numBytesWritten, DataType.DATA_BUFFER);
}
} else {
int subpartition = random.nextInt(numSubpartitions);
partition.emitRecord(record, subpartition);
recordDataWritten(record, dataWritten, subpartition, numBytesWritten, DataType.DATA_BUFFER);
}
}
partition.finish();
partition.close();
for (int subpartition = 0; subpartition < numSubpartitions; ++subpartition) {
ByteBuffer record = EventSerializer.toSerializedEvent(EndOfPartitionEvent.INSTANCE);
recordDataWritten(record, dataWritten, subpartition, numBytesWritten, DataType.EVENT_BUFFER);
}
ResultSubpartitionView[] views = createSubpartitionViews(partition, numSubpartitions);
readData(views, bufferWithChannel -> {
Buffer buffer = bufferWithChannel.getBuffer();
int subpartition = bufferWithChannel.getChannelIndex();
int numBytes = buffer.readableBytes();
numBytesRead[subpartition] += numBytes;
MemorySegment segment = MemorySegmentFactory.allocateUnpooledSegment(numBytes);
segment.put(0, buffer.getNioBufferReadable(), numBytes);
buffersRead[subpartition].add(new NetworkBuffer(segment, (buf) -> {
}, buffer.getDataType(), numBytes));
});
DataBufferTest.checkWriteReadResult(numSubpartitions, numBytesWritten, numBytesRead, dataWritten, buffersRead);
}
use of org.apache.flink.runtime.io.network.buffer.Buffer in project flink by apache.
the class SortMergeResultPartitionTest method testWriteLargeRecord.
@Test
public void testWriteLargeRecord() throws Exception {
int numBuffers = useHashDataBuffer ? 100 : 15;
int numWriteBuffers = useHashDataBuffer ? 0 : numBuffers / 2;
BufferPool bufferPool = globalPool.createBufferPool(numBuffers, numBuffers);
SortMergeResultPartition partition = createSortMergedPartition(10, bufferPool);
ByteBuffer recordWritten = generateRandomData(bufferSize * numBuffers, new Random());
partition.emitRecord(recordWritten, 0);
assertEquals(useHashDataBuffer ? numBuffers : numWriteBuffers, bufferPool.bestEffortGetNumOfUsedBuffers());
partition.finish();
partition.close();
ResultSubpartitionView view = partition.createSubpartitionView(0, listener);
ByteBuffer recordRead = ByteBuffer.allocate(bufferSize * numBuffers);
readData(new ResultSubpartitionView[] { view }, bufferWithChannel -> {
Buffer buffer = bufferWithChannel.getBuffer();
if (buffer.isBuffer()) {
recordRead.put(buffer.getNioBufferReadable());
}
});
recordWritten.rewind();
recordRead.flip();
assertEquals(recordWritten, recordRead);
}
use of org.apache.flink.runtime.io.network.buffer.Buffer in project flink by apache.
the class ResultPartitionTest method testWaitForAllRecordProcessed.
@Test
public void testWaitForAllRecordProcessed() throws IOException {
// Creates a result partition with 2 channels.
BufferWritingResultPartition bufferWritingResultPartition = createResultPartition(ResultPartitionType.PIPELINED_BOUNDED);
bufferWritingResultPartition.notifyEndOfData(StopMode.DRAIN);
CompletableFuture<Void> allRecordsProcessedFuture = bufferWritingResultPartition.getAllDataProcessedFuture();
assertFalse(allRecordsProcessedFuture.isDone());
for (ResultSubpartition resultSubpartition : bufferWritingResultPartition.subpartitions) {
assertEquals(1, resultSubpartition.getTotalNumberOfBuffersUnsafe());
Buffer nextBuffer = ((PipelinedSubpartition) resultSubpartition).pollBuffer().buffer();
assertFalse(nextBuffer.isBuffer());
assertEquals(new EndOfData(StopMode.DRAIN), EventSerializer.fromBuffer(nextBuffer, getClass().getClassLoader()));
}
for (int i = 0; i < bufferWritingResultPartition.subpartitions.length; ++i) {
((PipelinedSubpartition) bufferWritingResultPartition.subpartitions[i]).acknowledgeAllDataProcessed();
if (i < bufferWritingResultPartition.subpartitions.length - 1) {
assertFalse(allRecordsProcessedFuture.isDone());
} else {
assertTrue(allRecordsProcessedFuture.isDone());
assertFalse(allRecordsProcessedFuture.isCompletedExceptionally());
}
}
}
use of org.apache.flink.runtime.io.network.buffer.Buffer in project flink by apache.
the class RemoteInputChannelTest method testExceptionOnPersisting.
@Test
public void testExceptionOnPersisting() throws Exception {
// Setup
final SingleInputGate inputGate = createSingleInputGate(1);
final RemoteInputChannel inputChannel = InputChannelBuilder.newBuilder().setStateWriter(new ChannelStateWriter.NoOpChannelStateWriter() {
@Override
public void addInputData(long checkpointId, InputChannelInfo info, int startSeqNum, CloseableIterator<Buffer> data) {
try {
data.close();
} catch (Exception e) {
throw new RuntimeException(e);
}
throw new ExpectedTestException();
}
}).buildRemoteChannel(inputGate);
inputChannel.checkpointStarted(new CheckpointBarrier(42, System.currentTimeMillis(), CheckpointOptions.unaligned(CheckpointType.CHECKPOINT, getDefault())));
final Buffer buffer = createBuffer(TestBufferFactory.BUFFER_SIZE);
assertFalse(buffer.isRecycled());
try {
inputChannel.onBuffer(buffer, 0, -1);
fail("This should have failed");
} catch (ExpectedTestException ex) {
// ignore
}
// This check is not strictly speaking necessary. Generally speaking if exception happens
// during persisting, there are two potentially correct outcomes:
// 1. buffer is recycled only once, in #onBuffer call when handling exception
// 2. buffer is stored inside RemoteInputChannel and recycled on releaseAllResources.
// What's not acceptable is that it would be released twice, in both places. Without this
// check below, we would be just relaying on Buffer throwing IllegalReferenceCountException.
// I've added this check just to be sure. It's freezing the current implementation that's
// unlikely to change, on the other hand, thanks to it we don't need to relay on
// IllegalReferenceCountException being thrown from the Buffer.
//
// In other words, if you end up reading this after refactoring RemoteInputChannel, it might
// be safe to remove this assertion. Just make sure double recycling of the same buffer is
// still throwing IllegalReferenceCountException.
assertFalse(buffer.isRecycled());
inputChannel.releaseAllResources();
assertTrue(buffer.isRecycled());
}
Aggregations