use of org.apache.flink.runtime.jobgraph.OperatorID in project flink by apache.
the class CollectResultFetcher method sendRequest.
private CollectCoordinationResponse sendRequest(String version, long offset) throws InterruptedException, ExecutionException {
checkJobClientConfigured();
OperatorID operatorId = operatorIdFuture.getNow(null);
Preconditions.checkNotNull(operatorId, "Unknown operator ID. This is a bug.");
CollectCoordinationRequest request = new CollectCoordinationRequest(version, offset);
return (CollectCoordinationResponse) gateway.sendCoordinationRequest(operatorId, request).get();
}
use of org.apache.flink.runtime.jobgraph.OperatorID in project flink by apache.
the class SubtaskCheckpointCoordinatorImpl method checkpointState.
@Override
public void checkpointState(CheckpointMetaData metadata, CheckpointOptions options, CheckpointMetricsBuilder metrics, OperatorChain<?, ?> operatorChain, boolean isTaskFinished, Supplier<Boolean> isRunning) throws Exception {
checkNotNull(options);
checkNotNull(metrics);
if (lastCheckpointId >= metadata.getCheckpointId()) {
LOG.info("Out of order checkpoint barrier (aborted previously?): {} >= {}", lastCheckpointId, metadata.getCheckpointId());
channelStateWriter.abort(metadata.getCheckpointId(), new CancellationException(), true);
checkAndClearAbortedStatus(metadata.getCheckpointId());
return;
}
logCheckpointProcessingDelay(metadata);
// Step (0): Record the last triggered checkpointId and abort the sync phase of checkpoint
// if necessary.
lastCheckpointId = metadata.getCheckpointId();
if (checkAndClearAbortedStatus(metadata.getCheckpointId())) {
// broadcast cancel checkpoint marker to avoid downstream back-pressure due to
// checkpoint barrier align.
operatorChain.broadcastEvent(new CancelCheckpointMarker(metadata.getCheckpointId()));
LOG.info("Checkpoint {} has been notified as aborted, would not trigger any checkpoint.", metadata.getCheckpointId());
return;
}
// connection), revert it here so that it can jump over output data
if (options.getAlignment() == CheckpointOptions.AlignmentType.FORCED_ALIGNED) {
options = options.withUnalignedSupported();
initInputsCheckpoint(metadata.getCheckpointId(), options);
}
// Step (1): Prepare the checkpoint, allow operators to do some pre-barrier work.
// The pre-barrier work should be nothing or minimal in the common case.
operatorChain.prepareSnapshotPreBarrier(metadata.getCheckpointId());
// Step (2): Send the checkpoint barrier downstream
operatorChain.broadcastEvent(new CheckpointBarrier(metadata.getCheckpointId(), metadata.getTimestamp(), options), options.isUnalignedCheckpoint());
// Step (3): Prepare to spill the in-flight buffers for input and output
if (options.isUnalignedCheckpoint()) {
// output data already written while broadcasting event
channelStateWriter.finishOutput(metadata.getCheckpointId());
}
// Step (4): Take the state snapshot. This should be largely asynchronous, to not impact
// progress of the
// streaming topology
Map<OperatorID, OperatorSnapshotFutures> snapshotFutures = new HashMap<>(operatorChain.getNumberOfOperators());
try {
if (takeSnapshotSync(snapshotFutures, metadata, metrics, options, operatorChain, isRunning)) {
finishAndReportAsync(snapshotFutures, metadata, metrics, operatorChain.isTaskDeployedAsFinished(), isTaskFinished, isRunning);
} else {
cleanup(snapshotFutures, metadata, metrics, new Exception("Checkpoint declined"));
}
} catch (Exception ex) {
cleanup(snapshotFutures, metadata, metrics, ex);
throw ex;
}
}
use of org.apache.flink.runtime.jobgraph.OperatorID in project flink by apache.
the class AsyncCheckpointRunnable method finalizedFinishedSnapshots.
private SnapshotsFinalizeResult finalizedFinishedSnapshots() throws Exception {
for (Map.Entry<OperatorID, OperatorSnapshotFutures> entry : operatorSnapshotsInProgress.entrySet()) {
OperatorSnapshotFutures snapshotInProgress = entry.getValue();
// We should wait for the channels states get completed before continuing,
// otherwise the alignment of barriers might have not finished yet.
snapshotInProgress.getInputChannelStateFuture().get();
snapshotInProgress.getResultSubpartitionStateFuture().get();
}
return new SnapshotsFinalizeResult(TaskStateSnapshot.FINISHED_ON_RESTORE, TaskStateSnapshot.FINISHED_ON_RESTORE, 0L);
}
use of org.apache.flink.runtime.jobgraph.OperatorID in project flink by apache.
the class StreamElementSerializer method deserialize.
@Override
public StreamElement deserialize(StreamElement reuse, DataInputView source) throws IOException {
int tag = source.readByte();
if (tag == TAG_REC_WITH_TIMESTAMP) {
long timestamp = source.readLong();
T value = typeSerializer.deserialize(source);
StreamRecord<T> reuseRecord = reuse.asRecord();
reuseRecord.replace(value, timestamp);
return reuseRecord;
} else if (tag == TAG_REC_WITHOUT_TIMESTAMP) {
T value = typeSerializer.deserialize(source);
StreamRecord<T> reuseRecord = reuse.asRecord();
reuseRecord.replace(value);
return reuseRecord;
} else if (tag == TAG_WATERMARK) {
return new Watermark(source.readLong());
} else if (tag == TAG_LATENCY_MARKER) {
return new LatencyMarker(source.readLong(), new OperatorID(source.readLong(), source.readLong()), source.readInt());
} else {
throw new IOException("Corrupt stream, found tag: " + tag);
}
}
use of org.apache.flink.runtime.jobgraph.OperatorID in project flink by apache.
the class MultipleInputStreamTaskTest method testWatermarkMetrics.
@Test
@SuppressWarnings("unchecked")
public void testWatermarkMetrics() throws Exception {
OperatorID mainOperatorId = new OperatorID();
OperatorID chainedOperatorId = new OperatorID();
InterceptingOperatorMetricGroup mainOperatorMetricGroup = new InterceptingOperatorMetricGroup();
InterceptingOperatorMetricGroup chainedOperatorMetricGroup = new InterceptingOperatorMetricGroup();
InterceptingTaskMetricGroup taskMetricGroup = new InterceptingTaskMetricGroup() {
@Override
public InternalOperatorMetricGroup getOrAddOperator(OperatorID id, String name) {
if (id.equals(mainOperatorId)) {
return mainOperatorMetricGroup;
} else if (id.equals(chainedOperatorId)) {
return chainedOperatorMetricGroup;
} else {
return super.getOrAddOperator(id, name);
}
}
};
try (StreamTaskMailboxTestHarness<String> testHarness = new StreamTaskMailboxTestHarnessBuilder<>(MultipleInputStreamTask::new, BasicTypeInfo.STRING_TYPE_INFO).modifyExecutionConfig(applyObjectReuse(objectReuse)).addInput(BasicTypeInfo.STRING_TYPE_INFO).addSourceInput(new SourceOperatorFactory<>(new MockSource(Boundedness.CONTINUOUS_UNBOUNDED, 2, true, false), WatermarkStrategy.forGenerator(ctx -> new RecordToWatermarkGenerator())), BasicTypeInfo.INT_TYPE_INFO).addInput(BasicTypeInfo.DOUBLE_TYPE_INFO).setupOperatorChain(mainOperatorId, new MapToStringMultipleInputOperatorFactory(3)).chain(chainedOperatorId, new WatermarkMetricOperator(), BasicTypeInfo.STRING_TYPE_INFO.createSerializer(new ExecutionConfig())).finish().setTaskMetricGroup(taskMetricGroup).build()) {
Gauge<Long> taskInputWatermarkGauge = (Gauge<Long>) taskMetricGroup.get(MetricNames.IO_CURRENT_INPUT_WATERMARK);
Gauge<Long> mainInput1WatermarkGauge = (Gauge<Long>) mainOperatorMetricGroup.get(MetricNames.currentInputWatermarkName(1));
Gauge<Long> mainInput2WatermarkGauge = (Gauge<Long>) mainOperatorMetricGroup.get(MetricNames.currentInputWatermarkName(2));
Gauge<Long> mainInput3WatermarkGauge = (Gauge<Long>) mainOperatorMetricGroup.get(MetricNames.currentInputWatermarkName(3));
Gauge<Long> mainInputWatermarkGauge = (Gauge<Long>) mainOperatorMetricGroup.get(MetricNames.IO_CURRENT_INPUT_WATERMARK);
Gauge<Long> mainOutputWatermarkGauge = (Gauge<Long>) mainOperatorMetricGroup.get(MetricNames.IO_CURRENT_OUTPUT_WATERMARK);
Gauge<Long> chainedInputWatermarkGauge = (Gauge<Long>) chainedOperatorMetricGroup.get(MetricNames.IO_CURRENT_INPUT_WATERMARK);
Gauge<Long> chainedOutputWatermarkGauge = (Gauge<Long>) chainedOperatorMetricGroup.get(MetricNames.IO_CURRENT_OUTPUT_WATERMARK);
assertEquals(Long.MIN_VALUE, taskInputWatermarkGauge.getValue().longValue());
assertEquals(Long.MIN_VALUE, mainInputWatermarkGauge.getValue().longValue());
assertEquals(Long.MIN_VALUE, mainInput1WatermarkGauge.getValue().longValue());
assertEquals(Long.MIN_VALUE, mainInput2WatermarkGauge.getValue().longValue());
assertEquals(Long.MIN_VALUE, mainInput3WatermarkGauge.getValue().longValue());
assertEquals(Long.MIN_VALUE, mainOutputWatermarkGauge.getValue().longValue());
assertEquals(Long.MIN_VALUE, chainedInputWatermarkGauge.getValue().longValue());
assertEquals(Long.MIN_VALUE, chainedOutputWatermarkGauge.getValue().longValue());
testHarness.processElement(new Watermark(1L), 0);
assertEquals(Long.MIN_VALUE, taskInputWatermarkGauge.getValue().longValue());
assertEquals(Long.MIN_VALUE, mainInputWatermarkGauge.getValue().longValue());
assertEquals(1L, mainInput1WatermarkGauge.getValue().longValue());
assertEquals(Long.MIN_VALUE, mainInput2WatermarkGauge.getValue().longValue());
assertEquals(Long.MIN_VALUE, mainInput3WatermarkGauge.getValue().longValue());
assertEquals(Long.MIN_VALUE, mainOutputWatermarkGauge.getValue().longValue());
assertEquals(Long.MIN_VALUE, chainedInputWatermarkGauge.getValue().longValue());
assertEquals(Long.MIN_VALUE, chainedOutputWatermarkGauge.getValue().longValue());
addSourceRecords(testHarness, 1, 2);
testHarness.processAll();
assertEquals(Long.MIN_VALUE, taskInputWatermarkGauge.getValue().longValue());
assertEquals(Long.MIN_VALUE, mainInputWatermarkGauge.getValue().longValue());
assertEquals(1L, mainInput1WatermarkGauge.getValue().longValue());
assertEquals(2L, mainInput2WatermarkGauge.getValue().longValue());
assertEquals(Long.MIN_VALUE, mainInput3WatermarkGauge.getValue().longValue());
assertEquals(Long.MIN_VALUE, mainOutputWatermarkGauge.getValue().longValue());
assertEquals(Long.MIN_VALUE, chainedInputWatermarkGauge.getValue().longValue());
assertEquals(Long.MIN_VALUE, chainedOutputWatermarkGauge.getValue().longValue());
testHarness.processElement(new Watermark(2L), 1);
assertEquals(1L, taskInputWatermarkGauge.getValue().longValue());
assertEquals(1L, mainInputWatermarkGauge.getValue().longValue());
assertEquals(1L, mainInput1WatermarkGauge.getValue().longValue());
assertEquals(2L, mainInput2WatermarkGauge.getValue().longValue());
assertEquals(2L, mainInput3WatermarkGauge.getValue().longValue());
assertEquals(1L, mainOutputWatermarkGauge.getValue().longValue());
assertEquals(1L, chainedInputWatermarkGauge.getValue().longValue());
assertEquals(2L, chainedOutputWatermarkGauge.getValue().longValue());
testHarness.processElement(new Watermark(4L), 0);
addSourceRecords(testHarness, 1, 3);
testHarness.processAll();
assertEquals(2L, taskInputWatermarkGauge.getValue().longValue());
assertEquals(2L, mainInputWatermarkGauge.getValue().longValue());
assertEquals(4L, mainInput1WatermarkGauge.getValue().longValue());
assertEquals(3L, mainInput2WatermarkGauge.getValue().longValue());
assertEquals(2L, mainInput3WatermarkGauge.getValue().longValue());
assertEquals(2L, mainOutputWatermarkGauge.getValue().longValue());
assertEquals(2L, chainedInputWatermarkGauge.getValue().longValue());
assertEquals(4L, chainedOutputWatermarkGauge.getValue().longValue());
finishAddingRecords(testHarness, 1);
testHarness.endInput();
testHarness.waitForTaskCompletion();
testHarness.finishProcessing();
}
}
Aggregations