use of org.apache.flink.runtime.io.network.partition.consumer.SingleInputGate in project flink by apache.
the class TaskTest method testOnPartitionStateUpdate.
@Test
public void testOnPartitionStateUpdate() throws Exception {
IntermediateDataSetID resultId = new IntermediateDataSetID();
ResultPartitionID partitionId = new ResultPartitionID();
SingleInputGate inputGate = mock(SingleInputGate.class);
when(inputGate.getConsumedResultId()).thenReturn(resultId);
final Task task = createTask(InvokableBlockingInInvoke.class);
// Set the mock input gate
setInputGate(task, inputGate);
// Expected task state for each producer state
final Map<ExecutionState, ExecutionState> expected = new HashMap<>(ExecutionState.values().length);
// Fail the task for unexpected states
for (ExecutionState state : ExecutionState.values()) {
expected.put(state, ExecutionState.FAILED);
}
expected.put(ExecutionState.RUNNING, ExecutionState.RUNNING);
expected.put(ExecutionState.SCHEDULED, ExecutionState.RUNNING);
expected.put(ExecutionState.DEPLOYING, ExecutionState.RUNNING);
expected.put(ExecutionState.FINISHED, ExecutionState.RUNNING);
expected.put(ExecutionState.CANCELED, ExecutionState.CANCELING);
expected.put(ExecutionState.CANCELING, ExecutionState.CANCELING);
expected.put(ExecutionState.FAILED, ExecutionState.CANCELING);
for (ExecutionState state : ExecutionState.values()) {
setState(task, ExecutionState.RUNNING);
task.onPartitionStateUpdate(resultId, partitionId, state);
ExecutionState newTaskState = task.getExecutionState();
assertEquals(expected.get(state), newTaskState);
}
verify(inputGate, times(4)).retriggerPartitionRequest(eq(partitionId.getPartitionId()));
}
use of org.apache.flink.runtime.io.network.partition.consumer.SingleInputGate in project flink by apache.
the class SequentialChannelStateReaderImplTest method withInputGates.
private void withInputGates(ThrowingConsumer<InputGate[], Exception> action) throws Exception {
SingleInputGate[] gates = new SingleInputGate[parLevel];
final int segmentsToAllocate = parLevel + parLevel * parLevel * buffersPerChannel;
NetworkBufferPool networkBufferPool = new NetworkBufferPool(segmentsToAllocate, bufferSize);
try (Closer poolCloser = Closer.create()) {
poolCloser.register(networkBufferPool::destroy);
poolCloser.register(networkBufferPool::destroyAllBufferPools);
try (Closer gateCloser = Closer.create()) {
for (int i = 0; i < parLevel; i++) {
gates[i] = new SingleInputGateBuilder().setNumberOfChannels(parLevel).setSingleInputGateIndex(i).setBufferPoolFactory(networkBufferPool.createBufferPool(1, buffersPerChannel)).setSegmentProvider(networkBufferPool).setChannelFactory((builder, gate) -> builder.setNetworkBuffersPerChannel(buffersPerChannel).buildRemoteRecoveredChannel(gate)).build();
gates[i].setup();
gateCloser.register(gates[i]::close);
}
action.accept(gates);
}
assertEquals(segmentsToAllocate, networkBufferPool.getNumberOfAvailableMemorySegments());
}
}
use of org.apache.flink.runtime.io.network.partition.consumer.SingleInputGate in project flink by apache.
the class NettyShuffleEnvironment method createInputGates.
@Override
public List<SingleInputGate> createInputGates(ShuffleIOOwnerContext ownerContext, PartitionProducerStateProvider partitionProducerStateProvider, List<InputGateDeploymentDescriptor> inputGateDeploymentDescriptors) {
synchronized (lock) {
Preconditions.checkState(!isClosed, "The NettyShuffleEnvironment has already been shut down.");
MetricGroup networkInputGroup = ownerContext.getInputGroup();
SingleInputGate[] inputGates = new SingleInputGate[inputGateDeploymentDescriptors.size()];
for (int gateIndex = 0; gateIndex < inputGates.length; gateIndex++) {
final InputGateDeploymentDescriptor igdd = inputGateDeploymentDescriptors.get(gateIndex);
SingleInputGate inputGate = singleInputGateFactory.create(ownerContext, gateIndex, igdd, partitionProducerStateProvider);
InputGateID id = new InputGateID(igdd.getConsumedResultId(), ownerContext.getExecutionAttemptID());
inputGatesById.put(id, inputGate);
inputGate.getCloseFuture().thenRun(() -> inputGatesById.remove(id));
inputGates[gateIndex] = inputGate;
}
if (config.getDebloatConfiguration().isEnabled()) {
registerDebloatingTaskMetrics(inputGates, ownerContext.getParentGroup());
}
registerInputMetrics(config.isNetworkDetailedMetrics(), networkInputGroup, inputGates);
return Arrays.asList(inputGates);
}
}
use of org.apache.flink.runtime.io.network.partition.consumer.SingleInputGate in project flink by apache.
the class NettyShuffleEnvironment method updatePartitionInfo.
@Override
public boolean updatePartitionInfo(ExecutionAttemptID consumerID, PartitionInfo partitionInfo) throws IOException, InterruptedException {
IntermediateDataSetID intermediateResultPartitionID = partitionInfo.getIntermediateDataSetID();
InputGateID id = new InputGateID(intermediateResultPartitionID, consumerID);
SingleInputGate inputGate = inputGatesById.get(id);
if (inputGate == null) {
return false;
}
ShuffleDescriptor shuffleDescriptor = partitionInfo.getShuffleDescriptor();
checkArgument(shuffleDescriptor instanceof NettyShuffleDescriptor, "Tried to update unknown channel with unknown ShuffleDescriptor %s.", shuffleDescriptor.getClass().getName());
inputGate.updateInputChannel(taskExecutorResourceId, (NettyShuffleDescriptor) shuffleDescriptor);
return true;
}
use of org.apache.flink.runtime.io.network.partition.consumer.SingleInputGate in project flink by apache.
the class InputGateFairnessTest method testFairConsumptionLocalChannelsPreFilled.
@Test
public void testFairConsumptionLocalChannelsPreFilled() throws Exception {
final int numberOfChannels = 37;
final int buffersPerChannel = 27;
PipelinedResultPartition[] resultPartitions = IntStream.range(0, numberOfChannels).mapToObj(i -> (PipelinedResultPartition) new ResultPartitionBuilder().build()).toArray(PipelinedResultPartition[]::new);
final BufferConsumer bufferConsumer = createFilledFinishedBufferConsumer(42);
// ----- create some source channels and fill them with buffers -----
final PipelinedSubpartition[] sources = Arrays.stream(resultPartitions).map(resultPartition -> resultPartition.getAllPartitions()[0]).toArray(PipelinedSubpartition[]::new);
for (final PipelinedSubpartition subpartition : sources) {
for (int p = 0; p < buffersPerChannel; p++) {
subpartition.add(bufferConsumer.copy());
}
subpartition.finish();
}
for (ResultPartition rp : resultPartitions) {
rp.setup();
}
// ----- create reading side -----
final SingleInputGate gate = createFairnessVerifyingInputGate(numberOfChannels);
final InputChannel[] inputChannels = IntStream.range(0, numberOfChannels).mapToObj(i -> InputChannelBuilder.newBuilder().setChannelIndex(i).setPartitionManager(resultPartitions[i].partitionManager).setPartitionId(resultPartitions[i].getPartitionId()).buildLocalChannel(gate)).toArray(InputChannel[]::new);
setupInputGate(gate, inputChannels);
// read all the buffers and the EOF event
for (int i = numberOfChannels * (buffersPerChannel + 1); i > 0; --i) {
assertNotNull(gate.getNext());
int min = Integer.MAX_VALUE;
int max = 0;
for (PipelinedSubpartition source : sources) {
int size = source.getNumberOfQueuedBuffers();
min = Math.min(min, size);
max = Math.max(max, size);
}
assertTrue(max == min || max == (min + 1));
}
assertFalse(gate.getNext().isPresent());
}
Aggregations