use of org.apache.flink.runtime.io.network.partition.ResultPartition in project flink by apache.
the class NetworkEnvironment method unregisterTask.
public void unregisterTask(Task task) {
LOG.debug("Unregister task {} from network environment (state: {}).", task.getTaskInfo().getTaskNameWithSubtasks(), task.getExecutionState());
final ExecutionAttemptID executionId = task.getExecutionId();
synchronized (lock) {
if (isShutdown) {
// no need to do anything when we are not operational
return;
}
if (task.isCanceledOrFailed()) {
resultPartitionManager.releasePartitionsProducedBy(executionId, task.getFailureCause());
}
ResultPartitionWriter[] writers = task.getAllWriters();
if (writers != null) {
for (ResultPartitionWriter writer : writers) {
taskEventDispatcher.unregisterWriter(writer);
}
}
ResultPartition[] partitions = task.getProducedPartitions();
if (partitions != null) {
for (ResultPartition partition : partitions) {
partition.destroyBufferPool();
}
}
final SingleInputGate[] inputGates = task.getAllInputGates();
if (inputGates != null) {
for (SingleInputGate gate : inputGates) {
try {
if (gate != null) {
gate.releaseAllResources();
}
} catch (IOException e) {
LOG.error("Error during release of reader resources: " + e.getMessage(), e);
}
}
}
}
}
use of org.apache.flink.runtime.io.network.partition.ResultPartition in project flink by apache.
the class NetworkEnvironmentTest method testRegisterTaskUsesBoundedBuffers.
/**
* Verifies that {@link NetworkEnvironment#registerTask(Task)} sets up (un)bounded buffer pool
* instances for various types of input and output channels.
*/
@Test
public void testRegisterTaskUsesBoundedBuffers() throws Exception {
final NetworkEnvironment network = new NetworkEnvironment(new NetworkBufferPool(numBuffers, memorySegmentSize, MemoryType.HEAP), new LocalConnectionManager(), new ResultPartitionManager(), new TaskEventDispatcher(), new KvStateRegistry(), null, IOManager.IOMode.SYNC, 0, 0, 2, 8);
// result partitions
ResultPartition rp1 = createResultPartition(ResultPartitionType.PIPELINED, 2);
ResultPartition rp2 = createResultPartition(ResultPartitionType.BLOCKING, 2);
ResultPartition rp3 = createResultPartition(ResultPartitionType.PIPELINED_BOUNDED, 2);
ResultPartition rp4 = createResultPartition(ResultPartitionType.PIPELINED_BOUNDED, 8);
final ResultPartition[] resultPartitions = new ResultPartition[] { rp1, rp2, rp3, rp4 };
final ResultPartitionWriter[] resultPartitionWriters = new ResultPartitionWriter[] { new ResultPartitionWriter(rp1), new ResultPartitionWriter(rp2), new ResultPartitionWriter(rp3), new ResultPartitionWriter(rp4) };
// input gates
final SingleInputGate[] inputGates = new SingleInputGate[] { createSingleInputGateMock(ResultPartitionType.PIPELINED, 2), createSingleInputGateMock(ResultPartitionType.BLOCKING, 2), createSingleInputGateMock(ResultPartitionType.PIPELINED_BOUNDED, 2), createSingleInputGateMock(ResultPartitionType.PIPELINED_BOUNDED, 8) };
// overall task to register
Task task = mock(Task.class);
when(task.getProducedPartitions()).thenReturn(resultPartitions);
when(task.getAllWriters()).thenReturn(resultPartitionWriters);
when(task.getAllInputGates()).thenReturn(inputGates);
network.registerTask(task);
assertEquals(Integer.MAX_VALUE, rp1.getBufferPool().getMaxNumberOfMemorySegments());
assertEquals(Integer.MAX_VALUE, rp2.getBufferPool().getMaxNumberOfMemorySegments());
assertEquals(2 * 2 + 8, rp3.getBufferPool().getMaxNumberOfMemorySegments());
assertEquals(8 * 2 + 8, rp4.getBufferPool().getMaxNumberOfMemorySegments());
network.shutdown();
}
use of org.apache.flink.runtime.io.network.partition.ResultPartition in project flink by apache.
the class SequentialChannelStateReaderImplTest method collectBuffers.
private Map<ResultSubpartitionInfo, List<Buffer>> collectBuffers(BufferWritingResultPartition[] resultPartitions) throws IOException {
Map<ResultSubpartitionInfo, List<Buffer>> actual = new HashMap<>();
for (BufferWritingResultPartition resultPartition : resultPartitions) {
for (int i = 0; i < resultPartition.getNumberOfSubpartitions(); i++) {
ResultSubpartitionInfo info = resultPartition.getAllPartitions()[i].getSubpartitionInfo();
ResultSubpartitionView view = resultPartition.createSubpartitionView(info.getSubPartitionIdx(), new NoOpBufferAvailablityListener());
for (BufferAndBacklog buffer = view.getNextBuffer(); buffer != null; buffer = view.getNextBuffer()) {
if (buffer.buffer().isBuffer()) {
actual.computeIfAbsent(info, unused -> new ArrayList<>()).add(buffer.buffer());
}
}
}
}
return actual;
}
use of org.apache.flink.runtime.io.network.partition.ResultPartition in project flink by apache.
the class SequentialChannelStateReaderImplTest method withResultPartitions.
private void withResultPartitions(ThrowingConsumer<BufferWritingResultPartition[], Exception> action) throws Exception {
int segmentsToAllocate = parLevel * parLevel * buffersPerChannel;
NetworkBufferPool networkBufferPool = new NetworkBufferPool(segmentsToAllocate, bufferSize);
BufferWritingResultPartition[] resultPartitions = range(0, parLevel).mapToObj(i -> new ResultPartitionBuilder().setResultPartitionIndex(i).setNumberOfSubpartitions(parLevel).setNetworkBufferPool(networkBufferPool).build()).toArray(BufferWritingResultPartition[]::new);
try {
for (ResultPartition resultPartition : resultPartitions) {
resultPartition.setup();
}
action.accept(resultPartitions);
} finally {
for (ResultPartition resultPartition : resultPartitions) {
resultPartition.close();
}
try {
assertEquals(segmentsToAllocate, networkBufferPool.getNumberOfAvailableMemorySegments());
} finally {
networkBufferPool.destroyAllBufferPools();
networkBufferPool.destroy();
}
}
}
use of org.apache.flink.runtime.io.network.partition.ResultPartition in project flink by apache.
the class OutputBufferPoolUsageGauge method getValue.
@Override
public Float getValue() {
int usedBuffers = 0;
int bufferPoolSize = 0;
for (ResultPartition resultPartition : resultPartitions) {
BufferPool bufferPool = resultPartition.getBufferPool();
if (bufferPool != null) {
usedBuffers += bufferPool.bestEffortGetNumOfUsedBuffers();
bufferPoolSize += bufferPool.getNumBuffers();
}
}
if (bufferPoolSize != 0) {
return ((float) usedBuffers) / bufferPoolSize;
} else {
return 0.0f;
}
}
Aggregations