use of org.apache.flink.runtime.io.network.api.writer.ResultPartitionWriter in project flink by apache.
the class OutputBlockedInvokable method invoke.
@Override
public void invoke() throws Exception {
final IntValue value = new IntValue(1234);
final ResultPartitionWriter resultPartitionWriter = getEnvironment().getWriter(0);
final RecordWriter<IntValue> writer = new RecordWriterBuilder<IntValue>().build(resultPartitionWriter);
while (true) {
writer.emit(value);
}
}
use of org.apache.flink.runtime.io.network.api.writer.ResultPartitionWriter in project flink by apache.
the class StreamTask method injectChannelStateWriterIntoChannels.
private void injectChannelStateWriterIntoChannels() {
final Environment env = getEnvironment();
final ChannelStateWriter channelStateWriter = subtaskCheckpointCoordinator.getChannelStateWriter();
for (final InputGate gate : env.getAllInputGates()) {
gate.setChannelStateWriter(channelStateWriter);
}
for (ResultPartitionWriter writer : env.getAllWriters()) {
if (writer instanceof ChannelStateHolder) {
((ChannelStateHolder) writer).setChannelStateWriter(channelStateWriter);
}
}
}
use of org.apache.flink.runtime.io.network.api.writer.ResultPartitionWriter in project flink by apache.
the class StreamTask method endData.
protected void endData(StopMode mode) throws Exception {
if (mode == StopMode.DRAIN) {
advanceToEndOfEventTime();
}
// finish all operators in a chain effect way
operatorChain.finishOperators(actionExecutor, mode);
this.finishedOperators = true;
for (ResultPartitionWriter partitionWriter : getEnvironment().getAllWriters()) {
partitionWriter.notifyEndOfData(mode);
}
this.endOfDataReceived = true;
}
use of org.apache.flink.runtime.io.network.api.writer.ResultPartitionWriter in project flink by apache.
the class SubtaskCheckpointCoordinatorTest method testBroadcastCancelCheckpointMarkerOnAbortingFromCoordinator.
@Test
public void testBroadcastCancelCheckpointMarkerOnAbortingFromCoordinator() throws Exception {
OneInputStreamTaskTestHarness<String, String> testHarness = new OneInputStreamTaskTestHarness<>(OneInputStreamTask::new, 1, 1, BasicTypeInfo.STRING_TYPE_INFO, BasicTypeInfo.STRING_TYPE_INFO);
testHarness.setupOutputForSingletonOperatorChain();
StreamConfig streamConfig = testHarness.getStreamConfig();
streamConfig.setStreamOperator(new MapOperator());
testHarness.invoke();
testHarness.waitForTaskRunning();
MockEnvironment mockEnvironment = MockEnvironment.builder().build();
try (SubtaskCheckpointCoordinator subtaskCheckpointCoordinator = new MockSubtaskCheckpointCoordinatorBuilder().setEnvironment(mockEnvironment).build()) {
ArrayList<Object> recordOrEvents = new ArrayList<>();
StreamElementSerializer<String> stringStreamElementSerializer = new StreamElementSerializer<>(StringSerializer.INSTANCE);
ResultPartitionWriter resultPartitionWriter = new RecordOrEventCollectingResultPartitionWriter<>(recordOrEvents, stringStreamElementSerializer);
mockEnvironment.addOutputs(Collections.singletonList(resultPartitionWriter));
OneInputStreamTask<String, String> task = testHarness.getTask();
OperatorChain<String, OneInputStreamOperator<String, String>> operatorChain = new RegularOperatorChain<>(task, StreamTask.createRecordWriterDelegate(streamConfig, mockEnvironment));
long checkpointId = 42L;
// notify checkpoint aborted before execution.
subtaskCheckpointCoordinator.notifyCheckpointAborted(checkpointId, operatorChain, () -> true);
subtaskCheckpointCoordinator.checkpointState(new CheckpointMetaData(checkpointId, System.currentTimeMillis()), CheckpointOptions.forCheckpointWithDefaultLocation(), new CheckpointMetricsBuilder(), operatorChain, false, () -> false);
assertEquals(1, recordOrEvents.size());
Object recordOrEvent = recordOrEvents.get(0);
// ensure CancelCheckpointMarker is broadcast downstream.
assertTrue(recordOrEvent instanceof CancelCheckpointMarker);
assertEquals(checkpointId, ((CancelCheckpointMarker) recordOrEvent).getCheckpointId());
testHarness.endInput();
testHarness.waitForTaskCompletion();
}
}
use of org.apache.flink.runtime.io.network.api.writer.ResultPartitionWriter in project flink by apache.
the class StreamTaskFinalCheckpointsTest method testWaitingForFinalCheckpoint.
@Test
public void testWaitingForFinalCheckpoint() throws Exception {
ResultPartition[] partitionWriters = new ResultPartition[2];
try {
for (int i = 0; i < partitionWriters.length; ++i) {
partitionWriters[i] = PartitionTestUtils.createPartition(ResultPartitionType.PIPELINED_BOUNDED);
partitionWriters[i].setup();
}
int lastCheckpointId = 6;
CompletingCheckpointResponder checkpointResponder = new CompletingCheckpointResponder();
try (StreamTaskMailboxTestHarness<String> testHarness = createTestHarness(partitionWriters, checkpointResponder, false)) {
// Tests triggering checkpoint when all the inputs are alive.
CompletableFuture<Boolean> checkpointFuture = triggerCheckpoint(testHarness, 2);
processMailTillCheckpointSucceeds(testHarness, checkpointFuture);
assertEquals(2, testHarness.getTaskStateManager().getReportedCheckpointId());
// Tests triggering checkpoint after some inputs have received EndOfPartition.
testHarness.processEvent(new EndOfData(StopMode.DRAIN), 0, 0);
testHarness.processEvent(EndOfPartitionEvent.INSTANCE, 0, 0);
checkpointFuture = triggerCheckpoint(testHarness, 4);
processMailTillCheckpointSucceeds(testHarness, checkpointFuture);
assertEquals(4, testHarness.getTaskStateManager().getReportedCheckpointId());
// Tests triggering checkpoint after received all the inputs have received
// EndOfPartition.
testHarness.processEvent(new EndOfData(StopMode.DRAIN), 0, 1);
testHarness.processEvent(new EndOfData(StopMode.DRAIN), 0, 2);
testHarness.processEvent(EndOfPartitionEvent.INSTANCE, 0, 1);
testHarness.processEvent(EndOfPartitionEvent.INSTANCE, 0, 2);
checkpointFuture = triggerCheckpoint(testHarness, lastCheckpointId);
// Notifies the result partition that all records are processed after the
// last checkpoint is triggered.
checkpointFuture.thenAccept((ignored) -> {
for (ResultPartition resultPartition : partitionWriters) {
resultPartition.onSubpartitionAllDataProcessed(0);
}
});
// The checkpoint 6 would be triggered successfully.
testHarness.finishProcessing();
assertTrue(checkpointFuture.isDone());
testHarness.getTaskStateManager().getWaitForReportLatch().await();
assertEquals(6, testHarness.getTaskStateManager().getReportedCheckpointId());
assertEquals(6, testHarness.getTaskStateManager().getNotifiedCompletedCheckpointId());
// Each result partition should have emitted 3 barriers and 1 EndOfUserRecordsEvent.
for (ResultPartition resultPartition : partitionWriters) {
assertEquals(4, resultPartition.getNumberOfQueuedBuffers());
}
}
} finally {
for (ResultPartitionWriter writer : partitionWriters) {
if (writer != null) {
writer.close();
}
}
}
}
Aggregations