use of org.apache.flink.changelog.fs.ChangelogStorageMetricGroup.CHANGELOG_STORAGE_UPLOAD_QUEUE_SIZE in project flink by apache.
the class ChangelogStorageMetricsTest method testQueueSize.
@Test
public void testQueueSize() throws Exception {
AtomicReference<Gauge<Integer>> queueSizeGauge = new AtomicReference<>();
ChangelogStorageMetricGroup metrics = new ChangelogStorageMetricGroup(new TaskManagerJobMetricGroup(TestingMetricRegistry.builder().setRegisterConsumer((metric, name, unused) -> {
if (name.equals(CHANGELOG_STORAGE_UPLOAD_QUEUE_SIZE)) {
queueSizeGauge.set((Gauge<Integer>) metric);
}
}).build(), createUnregisteredTaskManagerMetricGroup(), new JobID(), "test"));
Path path = Path.fromLocalFile(temporaryFolder.newFolder());
StateChangeFsUploader delegate = new StateChangeFsUploader(path, path.getFileSystem(), false, 100, metrics);
ManuallyTriggeredScheduledExecutorService scheduler = new ManuallyTriggeredScheduledExecutorService();
BatchingStateChangeUploader batcher = new BatchingStateChangeUploader(Long.MAX_VALUE, Long.MAX_VALUE, Long.MAX_VALUE, RetryPolicy.NONE, delegate, scheduler, new RetryingExecutor(1, metrics.getAttemptsPerUpload()), metrics);
try (FsStateChangelogStorage storage = new FsStateChangelogStorage(batcher, Long.MAX_VALUE)) {
FsStateChangelogWriter writer = storage.createWriter("writer", EMPTY_KEY_GROUP_RANGE);
int numUploads = 11;
for (int i = 0; i < numUploads; i++) {
SequenceNumber from = writer.nextSequenceNumber();
writer.append(0, new byte[] { 0 });
writer.persist(from);
}
assertEquals(numUploads, (int) queueSizeGauge.get().getValue());
scheduler.triggerScheduledTasks();
assertEquals(0, (int) queueSizeGauge.get().getValue());
}
}
Aggregations