use of org.apache.flink.runtime.checkpoint.OperatorSubtaskState in project flink by apache.
the class MetadataV2Serializer method deserializeOperatorState.
@Override
protected OperatorState deserializeOperatorState(DataInputStream dis, @Nullable DeserializationContext context) throws IOException {
final OperatorID jobVertexId = new OperatorID(dis.readLong(), dis.readLong());
final int parallelism = dis.readInt();
final int maxParallelism = dis.readInt();
// this field was "chain length" before Flink 1.3, and it is still part
// of the format, despite being unused
dis.readInt();
// Add task state
final OperatorState taskState = new OperatorState(jobVertexId, parallelism, maxParallelism);
// Sub task states
final int numSubTaskStates = dis.readInt();
for (int j = 0; j < numSubTaskStates; j++) {
final int subtaskIndex = dis.readInt();
final OperatorSubtaskState subtaskState = deserializeSubtaskState(dis, context);
taskState.putState(subtaskIndex, subtaskState);
}
return taskState;
}
use of org.apache.flink.runtime.checkpoint.OperatorSubtaskState in project flink by apache.
the class MetadataV2V3SerializerBase method deserializeSubtaskState.
protected OperatorSubtaskState deserializeSubtaskState(DataInputStream dis, @Nullable DeserializationContext context) throws IOException {
final OperatorSubtaskState.Builder state = OperatorSubtaskState.builder();
final boolean hasManagedOperatorState = dis.readInt() != 0;
if (hasManagedOperatorState) {
state.setManagedOperatorState(deserializeOperatorStateHandle(dis, context));
}
final boolean hasRawOperatorState = dis.readInt() != 0;
if (hasRawOperatorState) {
state.setRawOperatorState(deserializeOperatorStateHandle(dis, context));
}
final KeyedStateHandle managedKeyedState = deserializeKeyedStateHandle(dis, context);
if (managedKeyedState != null) {
state.setManagedKeyedState(managedKeyedState);
}
final KeyedStateHandle rawKeyedState = deserializeKeyedStateHandle(dis, context);
if (rawKeyedState != null) {
state.setRawKeyedState(rawKeyedState);
}
state.setInputChannelState(deserializeInputChannelStateHandle(dis, context));
state.setResultSubpartitionState(deserializeResultSubpartitionStateHandle(dis, context));
return state.build();
}
use of org.apache.flink.runtime.checkpoint.OperatorSubtaskState in project flink by apache.
the class RMQSourceTest method testCheckpointing.
@Test
public void testCheckpointing() throws Exception {
source.autoAck = false;
StreamSource<String, RMQSource<String>> src = new StreamSource<>(source);
AbstractStreamOperatorTestHarness<String> testHarness = new AbstractStreamOperatorTestHarness<>(src, 1, 1, 0);
testHarness.open();
sourceThread.start();
Thread.sleep(5);
final Random random = new Random(System.currentTimeMillis());
int numSnapshots = 50;
long previousSnapshotId;
long lastSnapshotId = 0;
long totalNumberOfAcks = 0;
for (int i = 0; i < numSnapshots; i++) {
long snapshotId = random.nextLong();
OperatorSubtaskState data;
synchronized (DummySourceContext.lock) {
data = testHarness.snapshot(snapshotId, System.currentTimeMillis());
previousSnapshotId = lastSnapshotId;
lastSnapshotId = messageId;
}
// let some time pass
Thread.sleep(5);
// check if the correct number of messages have been snapshotted
final long numIds = lastSnapshotId - previousSnapshotId;
RMQTestSource sourceCopy = new RMQTestSource();
StreamSource<String, RMQTestSource> srcCopy = new StreamSource<>(sourceCopy);
AbstractStreamOperatorTestHarness<String> testHarnessCopy = new AbstractStreamOperatorTestHarness<>(srcCopy, 1, 1, 0);
testHarnessCopy.setup();
testHarnessCopy.initializeState(data);
testHarnessCopy.open();
ArrayDeque<Tuple2<Long, Set<String>>> deque = sourceCopy.getRestoredState();
Set<String> messageIds = deque.getLast().f1;
assertEquals(numIds, messageIds.size());
if (messageIds.size() > 0) {
assertTrue(messageIds.contains(Long.toString(lastSnapshotId - 1)));
}
// check if the messages are being acknowledged and the transaction committed
synchronized (DummySourceContext.lock) {
source.notifyCheckpointComplete(snapshotId);
}
totalNumberOfAcks += numIds;
}
Mockito.verify(source.channel, Mockito.times((int) totalNumberOfAcks)).basicAck(Mockito.anyLong(), Mockito.eq(false));
Mockito.verify(source.channel, Mockito.times(numSnapshots)).txCommit();
}
use of org.apache.flink.runtime.checkpoint.OperatorSubtaskState in project flink by apache.
the class StreamingFileWriterTest method testCommitFileWhenPartitionIsCommittableByPartitionTime.
@Test
public void testCommitFileWhenPartitionIsCommittableByPartitionTime() throws Exception {
// the rolling policy is not to roll file by filesize and roll file after one day,
// it can ensure the file can be closed only when the partition is committable in this test.
FileSystemTableSink.TableRollingPolicy tableRollingPolicy = new FileSystemTableSink.TableRollingPolicy(false, Long.MAX_VALUE, Duration.ofDays(1).toMillis(), Duration.ofDays(1).toMillis());
List<String> partitionKeys = Collections.singletonList("d");
// commit delay is 1 day with partition-time trigger
Configuration conf = getPartitionCommitTriggerConf(Duration.ofDays(1).toMillis());
long currentTimeMillis = System.currentTimeMillis();
Date nextYear = new Date(currentTimeMillis + Duration.ofDays(365).toMillis());
String nextYearPartition = "d=" + dateFormat.format(nextYear);
Date yesterday = new Date(currentTimeMillis - Duration.ofDays(1).toMillis());
String yesterdayPartition = "d=" + dateFormat.format(yesterday);
Date today = new Date(currentTimeMillis);
String todayPartition = "d=" + dateFormat.format(today);
Date tomorrow = new Date(currentTimeMillis + Duration.ofDays(1).toMillis());
String tomorrowPartition = "d=" + dateFormat.format(tomorrow);
OperatorSubtaskState state;
try (OneInputStreamOperatorTestHarness<RowData, PartitionCommitInfo> harness = create(tableRollingPolicy, partitionKeys, conf)) {
harness.setup();
harness.initializeEmptyState();
harness.open();
harness.processElement(row(yesterdayPartition), 0);
harness.processWatermark(currentTimeMillis);
state = harness.snapshot(1, 1);
harness.notifyOfCompletedCheckpoint(1);
// assert yesterday partition file is committed
Assert.assertTrue(isPartitionFileCommitted(yesterdayPartition, 0, 0));
}
// first retry
try (OneInputStreamOperatorTestHarness<RowData, PartitionCommitInfo> harness = create(tableRollingPolicy, partitionKeys, conf)) {
harness.setup();
harness.initializeState(state);
harness.open();
harness.processElement(row(tomorrowPartition), 0);
harness.processElement(row(todayPartition), 0);
// simulate waiting for 1 day
currentTimeMillis += Duration.ofDays(1).toMillis();
harness.processWatermark(currentTimeMillis);
harness.snapshot(2, 2);
harness.notifyOfCompletedCheckpoint(2);
// assert today partition file is committed
Assert.assertTrue(isPartitionFileCommitted(todayPartition, 0, 2));
// assert tomorrow partition file isn't committed
Assert.assertFalse(isPartitionFileCommitted(tomorrowPartition, 0, 1));
// simulate waiting for 1 day again, now tomorrow partition is committable
currentTimeMillis += Duration.ofDays(1).toMillis();
harness.processWatermark(currentTimeMillis);
state = harness.snapshot(3, 3);
harness.notifyOfCompletedCheckpoint(3);
Assert.assertTrue(isPartitionFileCommitted(tomorrowPartition, 0, 1));
harness.processElement(row(nextYearPartition), 0);
}
// second retry
try (OneInputStreamOperatorTestHarness<RowData, PartitionCommitInfo> harness = create(tableRollingPolicy, partitionKeys, conf)) {
harness.setup();
harness.initializeState(state);
harness.open();
harness.processElement(row(nextYearPartition), 0);
harness.processElement(row(tomorrowPartition), 0);
harness.endInput();
// assert files in all partition have been committed
Assert.assertTrue(isPartitionFileCommitted(tomorrowPartition, 0, 4));
Assert.assertTrue(isPartitionFileCommitted(nextYearPartition, 0, 3));
}
}
use of org.apache.flink.runtime.checkpoint.OperatorSubtaskState in project flink by apache.
the class CompactCoordinatorTest method testStateHandler.
@Test
public void testStateHandler() throws Exception {
FileCompactStrategy strategy = Builder.newBuilder().setSizeThreshold(10).build();
CompactCoordinator coordinator = new CompactCoordinator(strategy, getTestCommittableSerializer());
// with . prefix
FileSinkCommittable committable0 = committable("0", ".0", 5);
FileSinkCommittable committable1 = committable("0", ".1", 6);
// without . prefix
FileSinkCommittable committable2 = committable("0", "2", 6);
OperatorSubtaskState state;
try (OneInputStreamOperatorTestHarness<CommittableMessage<FileSinkCommittable>, CompactorRequest> harness = new OneInputStreamOperatorTestHarness<>(coordinator)) {
harness.setup();
harness.open();
harness.processElement(message(committable0));
Assert.assertEquals(0, harness.extractOutputValues().size());
harness.prepareSnapshotPreBarrier(1);
state = harness.snapshot(1, 1);
}
CompactCoordinatorStateHandler handler = new CompactCoordinatorStateHandler(getTestCommittableSerializer());
try (OneInputStreamOperatorTestHarness<CommittableMessage<FileSinkCommittable>, Either<CommittableMessage<FileSinkCommittable>, CompactorRequest>> harness = new OneInputStreamOperatorTestHarness<>(handler)) {
harness.setup(new EitherSerializer<>(new SimpleVersionedSerializerTypeSerializerProxy<>(() -> new CommittableMessageSerializer<>(getTestCommittableSerializer())), new SimpleVersionedSerializerTypeSerializerProxy<>(() -> new CompactorRequestSerializer(getTestCommittableSerializer()))));
harness.initializeState(state);
harness.open();
Assert.assertEquals(1, harness.extractOutputValues().size());
harness.processElement(message(committable1));
harness.processElement(message(committable2));
List<Either<CommittableMessage<FileSinkCommittable>, CompactorRequest>> results = harness.extractOutputValues();
Assert.assertEquals(3, results.size());
// restored request
Assert.assertTrue(results.get(0).isRight());
assertToCompact(results.get(0).right(), committable0);
// committable with . prefix should also be passed through
Assert.assertTrue(results.get(1).isLeft() && results.get(1).left() instanceof CommittableWithLineage);
Assert.assertEquals(((CommittableWithLineage<FileSinkCommittable>) results.get(1).left()).getCommittable(), committable1);
// committable without . prefix should be passed through normally
Assert.assertTrue(results.get(2).isLeft() && results.get(2).left() instanceof CommittableWithLineage);
Assert.assertEquals(((CommittableWithLineage<FileSinkCommittable>) results.get(2).left()).getCommittable(), committable2);
}
}
Aggregations