use of org.apache.flink.runtime.state.changelog.SequenceNumber in project flink by apache.
the class FsStateChangelogWriterTest method testTruncate.
@Test(expected = IllegalArgumentException.class)
public void testTruncate() throws Exception {
withWriter((writer, uploader) -> {
SequenceNumber sqn = append(writer, getBytes());
writer.truncate(sqn.next());
writer.persist(sqn);
});
}
use of org.apache.flink.runtime.state.changelog.SequenceNumber in project flink by apache.
the class ChangelogStorageMetricsTest method testAttemptsPerUpload.
@Test
public void testAttemptsPerUpload() throws Exception {
int numUploads = 7, maxAttempts = 3;
ChangelogStorageMetricGroup metrics = new ChangelogStorageMetricGroup(createUnregisteredTaskManagerJobMetricGroup());
BatchingStateChangeUploader batcher = new BatchingStateChangeUploader(Long.MAX_VALUE, 1, Long.MAX_VALUE, RetryPolicy.fixed(maxAttempts, Long.MAX_VALUE, 0), new MaxAttemptUploader(maxAttempts), newSingleThreadScheduledExecutor(), new RetryingExecutor(1, metrics.getAttemptsPerUpload()), metrics);
FsStateChangelogStorage storage = new FsStateChangelogStorage(batcher, Integer.MAX_VALUE);
FsStateChangelogWriter writer = storage.createWriter("writer", EMPTY_KEY_GROUP_RANGE);
try {
for (int upload = 0; upload < numUploads; upload++) {
SequenceNumber from = writer.nextSequenceNumber();
writer.append(0, new byte[] { 0, 1, 2, 3 });
writer.persist(from).get();
}
HistogramStatistics histogram = metrics.getAttemptsPerUpload().getStatistics();
assertEquals(maxAttempts, histogram.getMin());
assertEquals(maxAttempts, histogram.getMax());
} finally {
storage.close();
}
}
use of org.apache.flink.runtime.state.changelog.SequenceNumber in project flink by apache.
the class ChangelogStorageMetricsTest method testQueueSize.
@Test
public void testQueueSize() throws Exception {
AtomicReference<Gauge<Integer>> queueSizeGauge = new AtomicReference<>();
ChangelogStorageMetricGroup metrics = new ChangelogStorageMetricGroup(new TaskManagerJobMetricGroup(TestingMetricRegistry.builder().setRegisterConsumer((metric, name, unused) -> {
if (name.equals(CHANGELOG_STORAGE_UPLOAD_QUEUE_SIZE)) {
queueSizeGauge.set((Gauge<Integer>) metric);
}
}).build(), createUnregisteredTaskManagerMetricGroup(), new JobID(), "test"));
Path path = Path.fromLocalFile(temporaryFolder.newFolder());
StateChangeFsUploader delegate = new StateChangeFsUploader(path, path.getFileSystem(), false, 100, metrics);
ManuallyTriggeredScheduledExecutorService scheduler = new ManuallyTriggeredScheduledExecutorService();
BatchingStateChangeUploader batcher = new BatchingStateChangeUploader(Long.MAX_VALUE, Long.MAX_VALUE, Long.MAX_VALUE, RetryPolicy.NONE, delegate, scheduler, new RetryingExecutor(1, metrics.getAttemptsPerUpload()), metrics);
try (FsStateChangelogStorage storage = new FsStateChangelogStorage(batcher, Long.MAX_VALUE)) {
FsStateChangelogWriter writer = storage.createWriter("writer", EMPTY_KEY_GROUP_RANGE);
int numUploads = 11;
for (int i = 0; i < numUploads; i++) {
SequenceNumber from = writer.nextSequenceNumber();
writer.append(0, new byte[] { 0 });
writer.persist(from);
}
assertEquals(numUploads, (int) queueSizeGauge.get().getValue());
scheduler.triggerScheduledTasks();
assertEquals(0, (int) queueSizeGauge.get().getValue());
}
}
use of org.apache.flink.runtime.state.changelog.SequenceNumber in project flink by apache.
the class ChangelogKeyedStateBackend method initMaterialization.
/**
* Initialize state materialization so that materialized data can be persisted durably and
* included into the checkpoint.
*
* <p>This method is not thread safe. It should be called either under a lock or through task
* mailbox executor.
*
* @return a tuple of - future snapshot result from the underlying state backend - a {@link
* SequenceNumber} identifying the latest change in the changelog
*/
public Optional<MaterializationRunnable> initMaterialization() throws Exception {
SequenceNumber upTo = stateChangelogWriter.nextSequenceNumber();
SequenceNumber lastMaterializedTo = changelogSnapshotState.lastMaterializedTo();
LOG.info("Initialize Materialization. Current changelog writers last append to sequence number {}", upTo);
if (upTo.compareTo(lastMaterializedTo) > 0) {
LOG.info("Starting materialization from {} : {}", lastMaterializedTo, upTo);
// This ID is not needed for materialization; But since we are re-using the
// streamFactory that is designed for state backend snapshot, which requires unique
// checkpoint ID. A faked materialized Id is provided here.
long materializationID = materializedId++;
MaterializationRunnable materializationRunnable = new MaterializationRunnable(keyedStateBackend.snapshot(materializationID, System.currentTimeMillis(), // TODO: implement its own streamFactory.
streamFactory, CHECKPOINT_OPTIONS), materializationID, upTo);
// log metadata after materialization is triggered
for (ChangelogState changelogState : changelogStates.values()) {
changelogState.resetWritingMetaFlag();
}
for (ChangelogKeyGroupedPriorityQueue<?> priorityQueueState : priorityQueueStatesByName.values()) {
priorityQueueState.resetWritingMetaFlag();
}
return Optional.of(materializationRunnable);
} else {
LOG.debug("Skip materialization, last materialized to {} : last log to {}", lastMaterializedTo, upTo);
return Optional.empty();
}
}
use of org.apache.flink.runtime.state.changelog.SequenceNumber in project flink by apache.
the class StateChangelogStorageTest method testWriteAndRead.
@Test
public void testWriteAndRead() throws Exception {
KeyGroupRange kgRange = KeyGroupRange.of(0, 5);
Map<Integer, List<byte[]>> appendsByKeyGroup = generateAppends(kgRange, 10, 20);
try (StateChangelogStorage<T> client = getFactory();
StateChangelogWriter<T> writer = client.createWriter(new OperatorID().toString(), kgRange)) {
SequenceNumber prev = writer.initialSequenceNumber();
for (Map.Entry<Integer, List<byte[]>> entry : appendsByKeyGroup.entrySet()) {
Integer group = entry.getKey();
List<byte[]> appends = entry.getValue();
for (byte[] bytes : appends) {
writer.append(group, bytes);
}
}
T handle = writer.persist(prev).get();
StateChangelogHandleReader<T> reader = client.createReader();
assertByteMapsEqual(appendsByKeyGroup, extract(handle, reader));
}
}
Aggregations