use of org.apache.flink.runtime.io.network.buffer.NetworkBufferPool in project flink by apache.
the class BackPressureStatsTrackerITCase method setup.
@BeforeClass
public static void setup() {
testActorSystem = AkkaUtils.createLocalActorSystem(new Configuration());
networkBufferPool = new NetworkBufferPool(100, 8192, MemoryType.HEAP);
}
use of org.apache.flink.runtime.io.network.buffer.NetworkBufferPool in project flink by apache.
the class TaskManagerServices method createNetworkEnvironment.
/**
* Creates the {@link NetworkEnvironment} from the given {@link TaskManagerServicesConfiguration}.
*
* @param taskManagerServicesConfiguration to construct the network environment from
* @return Network environment
* @throws IOException
*/
private static NetworkEnvironment createNetworkEnvironment(TaskManagerServicesConfiguration taskManagerServicesConfiguration) throws IOException {
NetworkEnvironmentConfiguration networkEnvironmentConfiguration = taskManagerServicesConfiguration.getNetworkConfig();
NetworkBufferPool networkBufferPool = new NetworkBufferPool(networkEnvironmentConfiguration.numNetworkBuffers(), networkEnvironmentConfiguration.networkBufferSize(), networkEnvironmentConfiguration.memoryType());
ConnectionManager connectionManager;
if (networkEnvironmentConfiguration.nettyConfig() != null) {
connectionManager = new NettyConnectionManager(networkEnvironmentConfiguration.nettyConfig());
} else {
connectionManager = new LocalConnectionManager();
}
ResultPartitionManager resultPartitionManager = new ResultPartitionManager();
TaskEventDispatcher taskEventDispatcher = new TaskEventDispatcher();
KvStateRegistry kvStateRegistry = new KvStateRegistry();
KvStateServer kvStateServer;
if (taskManagerServicesConfiguration.getQueryableStateConfig().enabled()) {
QueryableStateConfiguration qsConfig = taskManagerServicesConfiguration.getQueryableStateConfig();
int numNetworkThreads = qsConfig.numServerThreads() == 0 ? taskManagerServicesConfiguration.getNumberOfSlots() : qsConfig.numServerThreads();
int numQueryThreads = qsConfig.numQueryThreads() == 0 ? taskManagerServicesConfiguration.getNumberOfSlots() : qsConfig.numQueryThreads();
kvStateServer = new KvStateServer(taskManagerServicesConfiguration.getTaskManagerAddress(), qsConfig.port(), numNetworkThreads, numQueryThreads, kvStateRegistry, new DisabledKvStateRequestStats());
} else {
kvStateServer = null;
}
// we start the network first, to make sure it can allocate its buffers first
return new NetworkEnvironment(networkBufferPool, connectionManager, resultPartitionManager, taskEventDispatcher, kvStateRegistry, kvStateServer, networkEnvironmentConfiguration.ioMode(), networkEnvironmentConfiguration.partitionRequestInitialBackoff(), networkEnvironmentConfiguration.partitionRequestMaxBackoff(), networkEnvironmentConfiguration.networkBuffersPerChannel(), networkEnvironmentConfiguration.extraNetworkBuffersPerGate());
}
use of org.apache.flink.runtime.io.network.buffer.NetworkBufferPool in project flink by apache.
the class RecordWriterTest method testClearBuffersAfterExceptionInPartitionWriter.
@Test
public void testClearBuffersAfterExceptionInPartitionWriter() throws Exception {
NetworkBufferPool buffers = null;
BufferPool bufferPool = null;
try {
buffers = new NetworkBufferPool(1, 1024, MemoryType.HEAP);
bufferPool = spy(buffers.createBufferPool(1, Integer.MAX_VALUE));
ResultPartitionWriter partitionWriter = mock(ResultPartitionWriter.class);
when(partitionWriter.getBufferProvider()).thenReturn(checkNotNull(bufferPool));
when(partitionWriter.getNumberOfOutputChannels()).thenReturn(1);
// Recycle buffer and throw Exception
doAnswer(new Answer<Void>() {
@Override
public Void answer(InvocationOnMock invocation) throws Throwable {
Buffer buffer = (Buffer) invocation.getArguments()[0];
buffer.recycle();
throw new RuntimeException("Expected test Exception");
}
}).when(partitionWriter).writeBuffer(any(Buffer.class), anyInt());
RecordWriter<IntValue> recordWriter = new RecordWriter<>(partitionWriter);
try {
// manual flush here doesn't test this case (see next).
for (; ; ) {
recordWriter.emit(new IntValue(0));
}
} catch (Exception e) {
// Verify that the buffer is not part of the record writer state after a failure
// to flush it out. If the buffer is still part of the record writer state, this
// will fail, because the buffer has already been recycled. NOTE: The mock
// partition writer needs to recycle the buffer to correctly test this.
recordWriter.clearBuffers();
}
// Verify expected methods have been called
verify(partitionWriter, times(1)).writeBuffer(any(Buffer.class), anyInt());
verify(bufferPool, times(1)).requestBufferBlocking();
try {
// Verify that manual flushing correctly clears the buffer.
recordWriter.emit(new IntValue(0));
recordWriter.flush();
Assert.fail("Did not throw expected test Exception");
} catch (Exception e) {
recordWriter.clearBuffers();
}
// Verify expected methods have been called
verify(partitionWriter, times(2)).writeBuffer(any(Buffer.class), anyInt());
verify(bufferPool, times(2)).requestBufferBlocking();
try {
// Verify that broadcast emit correctly clears the buffer.
for (; ; ) {
recordWriter.broadcastEmit(new IntValue(0));
}
} catch (Exception e) {
recordWriter.clearBuffers();
}
// Verify expected methods have been called
verify(partitionWriter, times(3)).writeBuffer(any(Buffer.class), anyInt());
verify(bufferPool, times(3)).requestBufferBlocking();
try {
// Verify that end of super step correctly clears the buffer.
recordWriter.emit(new IntValue(0));
recordWriter.broadcastEvent(EndOfSuperstepEvent.INSTANCE);
Assert.fail("Did not throw expected test Exception");
} catch (Exception e) {
recordWriter.clearBuffers();
}
// Verify expected methods have been called
verify(partitionWriter, times(4)).writeBuffer(any(Buffer.class), anyInt());
verify(bufferPool, times(4)).requestBufferBlocking();
try {
// Verify that broadcasting and event correctly clears the buffer.
recordWriter.emit(new IntValue(0));
recordWriter.broadcastEvent(new TestTaskEvent());
Assert.fail("Did not throw expected test Exception");
} catch (Exception e) {
recordWriter.clearBuffers();
}
// Verify expected methods have been called
verify(partitionWriter, times(5)).writeBuffer(any(Buffer.class), anyInt());
verify(bufferPool, times(5)).requestBufferBlocking();
} finally {
if (bufferPool != null) {
assertEquals(1, bufferPool.getNumberOfAvailableMemorySegments());
bufferPool.lazyDestroy();
}
if (buffers != null) {
assertEquals(1, buffers.getNumberOfAvailableMemorySegments());
buffers.destroy();
}
}
}
use of org.apache.flink.runtime.io.network.buffer.NetworkBufferPool in project flink by apache.
the class NetworkEnvironmentTest method testRegisterTaskUsesBoundedBuffers.
/**
* Verifies that {@link NetworkEnvironment#registerTask(Task)} sets up (un)bounded buffer pool
* instances for various types of input and output channels.
*/
@Test
public void testRegisterTaskUsesBoundedBuffers() throws Exception {
final NetworkEnvironment network = new NetworkEnvironment(new NetworkBufferPool(numBuffers, memorySegmentSize, MemoryType.HEAP), new LocalConnectionManager(), new ResultPartitionManager(), new TaskEventDispatcher(), new KvStateRegistry(), null, IOManager.IOMode.SYNC, 0, 0, 2, 8);
// result partitions
ResultPartition rp1 = createResultPartition(ResultPartitionType.PIPELINED, 2);
ResultPartition rp2 = createResultPartition(ResultPartitionType.BLOCKING, 2);
ResultPartition rp3 = createResultPartition(ResultPartitionType.PIPELINED_BOUNDED, 2);
ResultPartition rp4 = createResultPartition(ResultPartitionType.PIPELINED_BOUNDED, 8);
final ResultPartition[] resultPartitions = new ResultPartition[] { rp1, rp2, rp3, rp4 };
final ResultPartitionWriter[] resultPartitionWriters = new ResultPartitionWriter[] { new ResultPartitionWriter(rp1), new ResultPartitionWriter(rp2), new ResultPartitionWriter(rp3), new ResultPartitionWriter(rp4) };
// input gates
final SingleInputGate[] inputGates = new SingleInputGate[] { createSingleInputGateMock(ResultPartitionType.PIPELINED, 2), createSingleInputGateMock(ResultPartitionType.BLOCKING, 2), createSingleInputGateMock(ResultPartitionType.PIPELINED_BOUNDED, 2), createSingleInputGateMock(ResultPartitionType.PIPELINED_BOUNDED, 8) };
// overall task to register
Task task = mock(Task.class);
when(task.getProducedPartitions()).thenReturn(resultPartitions);
when(task.getAllWriters()).thenReturn(resultPartitionWriters);
when(task.getAllInputGates()).thenReturn(inputGates);
network.registerTask(task);
assertEquals(Integer.MAX_VALUE, rp1.getBufferPool().getMaxNumberOfMemorySegments());
assertEquals(Integer.MAX_VALUE, rp2.getBufferPool().getMaxNumberOfMemorySegments());
assertEquals(2 * 2 + 8, rp3.getBufferPool().getMaxNumberOfMemorySegments());
assertEquals(8 * 2 + 8, rp4.getBufferPool().getMaxNumberOfMemorySegments());
network.shutdown();
}
use of org.apache.flink.runtime.io.network.buffer.NetworkBufferPool in project flink by apache.
the class BarrierBufferMassiveRandomTest method testWithTwoChannelsAndRandomBarriers.
@Test
public void testWithTwoChannelsAndRandomBarriers() {
IOManager ioMan = null;
try {
ioMan = new IOManagerAsync();
BufferPool pool1 = new NetworkBufferPool(100, PAGE_SIZE, MemoryType.HEAP).createBufferPool(100, 100);
BufferPool pool2 = new NetworkBufferPool(100, PAGE_SIZE, MemoryType.HEAP).createBufferPool(100, 100);
RandomGeneratingInputGate myIG = new RandomGeneratingInputGate(new BufferPool[] { pool1, pool2 }, new BarrierGenerator[] { new CountBarrier(100000), new RandomBarrier(100000) });
BarrierBuffer barrierBuffer = new BarrierBuffer(myIG, ioMan);
for (int i = 0; i < 2000000; i++) {
BufferOrEvent boe = barrierBuffer.getNextNonBlocked();
if (boe.isBuffer()) {
boe.getBuffer().recycle();
}
}
} catch (Exception e) {
e.printStackTrace();
fail(e.getMessage());
} finally {
if (ioMan != null) {
ioMan.shutdown();
}
}
}
Aggregations