use of org.apache.flink.runtime.io.network.buffer.Buffer in project flink by apache.
the class RemoteInputChannelTest method testBuffersInUseCount.
@Test
public void testBuffersInUseCount() throws Exception {
// Setup
RemoteInputChannel remoteInputChannel = buildInputGateAndGetChannel();
final Buffer buffer = createBuffer(TestBufferFactory.BUFFER_SIZE);
// Receiving the buffer with backlog.
remoteInputChannel.onBuffer(buffer.retainBuffer(), 0, 1);
// 1 buffer + 1 backlog.
assertEquals(2, remoteInputChannel.getBuffersInUseCount());
remoteInputChannel.onBuffer(buffer.retainBuffer(), 1, 3);
// 2 buffer + 3 backlog.
assertEquals(5, remoteInputChannel.getBuffersInUseCount());
// 1 buffer + 3 backlog.
remoteInputChannel.getNextBuffer();
assertEquals(4, remoteInputChannel.getBuffersInUseCount());
// 0 buffer + 3 backlog.
remoteInputChannel.getNextBuffer();
assertEquals(3, remoteInputChannel.getBuffersInUseCount());
// 0 buffer + 3 backlog. Nothing changes from previous case because receivedBuffers was
// already empty.
remoteInputChannel.getNextBuffer();
assertEquals(3, remoteInputChannel.getBuffersInUseCount());
}
use of org.apache.flink.runtime.io.network.buffer.Buffer in project flink by apache.
the class RemoteInputChannelTest method testFairDistributionFloatingBuffers.
/**
* Tests to verify that the buffer pool will distribute available floating buffers among all the
* channel listeners in a fair way.
*/
@Test
public void testFairDistributionFloatingBuffers() throws Exception {
// Setup
final int numExclusiveBuffers = 2;
final NetworkBufferPool networkBufferPool = new NetworkBufferPool(12, 32);
final int numFloatingBuffers = 3;
final SingleInputGate inputGate = createSingleInputGate(3, networkBufferPool);
final RemoteInputChannel[] inputChannels = new RemoteInputChannel[3];
inputChannels[0] = createRemoteInputChannel(inputGate);
inputChannels[1] = createRemoteInputChannel(inputGate);
inputChannels[2] = createRemoteInputChannel(inputGate);
inputGate.setInputChannels(inputChannels);
Throwable thrown = null;
try {
final BufferPool bufferPool = spy(networkBufferPool.createBufferPool(numFloatingBuffers, numFloatingBuffers));
inputGate.setBufferPool(bufferPool);
inputGate.setupChannels();
inputGate.requestPartitions();
for (RemoteInputChannel inputChannel : inputChannels) {
inputChannel.requestSubpartition();
}
// Exhaust all the floating buffers
final List<Buffer> floatingBuffers = new ArrayList<>(numFloatingBuffers);
for (int i = 0; i < numFloatingBuffers; i++) {
Buffer buffer = bufferPool.requestBuffer();
assertNotNull(buffer);
floatingBuffers.add(buffer);
}
// and register as listeners as a result
for (RemoteInputChannel inputChannel : inputChannels) {
inputChannel.onSenderBacklog(8);
verify(bufferPool, times(1)).addBufferListener(inputChannel.getBufferManager());
assertEquals("There should be " + numExclusiveBuffers + " buffers available in the channel", numExclusiveBuffers, inputChannel.getNumberOfAvailableBuffers());
}
// Recycle three floating buffers to trigger notify buffer available
for (Buffer buffer : floatingBuffers) {
buffer.recycleBuffer();
}
for (RemoteInputChannel inputChannel : inputChannels) {
assertEquals("There should be 3 buffers available in the channel", 3, inputChannel.getNumberOfAvailableBuffers());
assertEquals("There should be 1 unannounced credits in the channel", 1, inputChannel.getUnannouncedCredit());
}
} catch (Throwable t) {
thrown = t;
} finally {
cleanup(networkBufferPool, null, null, thrown, inputChannels);
}
}
use of org.apache.flink.runtime.io.network.buffer.Buffer in project flink by apache.
the class AsynchronousFileIOChannelTest method testClosedButAddRequestAndRegisterListenerRace.
@Test
public void testClosedButAddRequestAndRegisterListenerRace() throws Exception {
// -- Config ----------------------------------------------------------
final int numberOfRuns = 1024;
// -- Setup -----------------------------------------------------------
final ExecutorService executor = Executors.newFixedThreadPool(2);
final RequestQueue<WriteRequest> requestQueue = new RequestQueue<WriteRequest>();
@SuppressWarnings("unchecked") final RequestDoneCallback<Buffer> ioChannelCallback = mock(RequestDoneCallback.class);
final TestNotificationListener listener = new TestNotificationListener();
// -- The Test --------------------------------------------------------
try (final IOManagerAsync ioManager = new IOManagerAsync()) {
// Repeatedly close the channel and add a request.
for (int i = 0; i < numberOfRuns; i++) {
final TestAsyncFileIOChannel ioChannel = new TestAsyncFileIOChannel(ioManager.createChannel(), requestQueue, ioChannelCallback, true);
final CountDownLatch sync = new CountDownLatch(2);
final WriteRequest request = mock(WriteRequest.class);
ioChannel.close();
// Add request task
Callable<Void> addRequestTask = new Callable<Void>() {
@Override
public Void call() throws Exception {
try {
ioChannel.addRequest(request);
} catch (Throwable expected) {
} finally {
sync.countDown();
}
return null;
}
};
// Listener
Callable<Void> registerListenerTask = new Callable<Void>() {
@Override
public Void call() throws Exception {
try {
while (true) {
int current = listener.getNumberOfNotifications();
if (ioChannel.registerAllRequestsProcessedListener(listener)) {
listener.waitForNotification(current);
} else if (ioChannel.isClosed()) {
break;
}
}
} finally {
sync.countDown();
}
return null;
}
};
executor.submit(addRequestTask);
executor.submit(registerListenerTask);
if (!sync.await(2, TimeUnit.MINUTES)) {
fail("Test failed due to a timeout. This indicates a deadlock due to the way" + "that listeners are registered/notified in the asynchronous file I/O" + "channel.");
}
}
} finally {
executor.shutdown();
}
}
use of org.apache.flink.runtime.io.network.buffer.Buffer in project flink by apache.
the class BufferFileWriterReaderTest method testWriteSkipRead.
@Test
public void testWriteSkipRead() throws IOException {
int numBuffers = 1024;
int currentNumber = 0;
// Write buffers filled with ascending numbers...
for (int i = 0; i < numBuffers; i++) {
final Buffer buffer = createBuffer();
currentNumber = fillBufferWithAscendingNumbers(buffer, currentNumber, buffer.getMaxCapacity());
writer.writeBlock(buffer);
}
// Make sure that the writes are finished
writer.close();
final int toSkip = 32;
// Skip first buffers...
reader.seekToPosition((8 + BUFFER_SIZE) * toSkip);
numBuffers -= toSkip;
// Read buffers back in...
for (int i = 0; i < numBuffers; i++) {
assertFalse(reader.hasReachedEndOfFile());
reader.readInto(createBuffer());
}
reader.close();
assertTrue(reader.hasReachedEndOfFile());
// Verify that the content is the same
assertEquals("Read less buffers than written.", numBuffers, returnedBuffers.size());
// Start number after skipped buffers...
currentNumber = (BUFFER_SIZE / 4) * toSkip;
Buffer buffer;
while ((buffer = returnedBuffers.poll()) != null) {
currentNumber = verifyBufferFilledWithAscendingNumbers(buffer, currentNumber);
}
}
use of org.apache.flink.runtime.io.network.buffer.Buffer in project flink by apache.
the class RecordWriterDelegateTest method verifyAvailability.
private void verifyAvailability(RecordWriterDelegate writerDelegate) throws Exception {
// writer is available at the beginning
assertTrue(writerDelegate.isAvailable());
assertTrue(writerDelegate.getAvailableFuture().isDone());
// request one buffer from the local pool to make it unavailable
RecordWriter recordWriter = writerDelegate.getRecordWriter(0);
for (int i = 0; i < memorySegmentSize / recordSize; ++i) {
recordWriter.emit(new IntValue(i));
}
assertFalse(writerDelegate.isAvailable());
CompletableFuture future = writerDelegate.getAvailableFuture();
assertFalse(future.isDone());
// recycle the buffer to make the local pool available again
ResultSubpartitionView readView = recordWriter.getTargetPartition().createSubpartitionView(0, new NoOpBufferAvailablityListener());
Buffer buffer = readView.getNextBuffer().buffer();
buffer.recycleBuffer();
assertTrue(future.isDone());
assertTrue(writerDelegate.isAvailable());
assertTrue(writerDelegate.getAvailableFuture().isDone());
}
Aggregations