use of org.apache.flink.runtime.state.PriorityQueueSetFactory in project flink by apache.
the class RocksDBKeyedStateBackendBuilder method build.
@Override
public RocksDBKeyedStateBackend<K> build() throws BackendBuildingException {
RocksDBWriteBatchWrapper writeBatchWrapper = null;
ColumnFamilyHandle defaultColumnFamilyHandle = null;
RocksDBNativeMetricMonitor nativeMetricMonitor = null;
CloseableRegistry cancelStreamRegistryForBackend = new CloseableRegistry();
LinkedHashMap<String, RocksDBKeyedStateBackend.RocksDbKvStateInfo> kvStateInformation = new LinkedHashMap<>();
LinkedHashMap<String, HeapPriorityQueueSnapshotRestoreWrapper<?>> registeredPQStates = new LinkedHashMap<>();
RocksDB db = null;
RocksDBRestoreOperation restoreOperation = null;
RocksDbTtlCompactFiltersManager ttlCompactFiltersManager = new RocksDbTtlCompactFiltersManager(ttlTimeProvider);
ResourceGuard rocksDBResourceGuard = new ResourceGuard();
RocksDBSnapshotStrategyBase<K, ?> checkpointStrategy = null;
PriorityQueueSetFactory priorityQueueFactory;
SerializedCompositeKeyBuilder<K> sharedRocksKeyBuilder;
// Number of bytes required to prefix the key groups.
int keyGroupPrefixBytes = CompositeKeySerializationUtils.computeRequiredBytesInKeyGroupPrefix(numberOfKeyGroups);
try {
// Variables for snapshot strategy when incremental checkpoint is enabled
UUID backendUID = UUID.randomUUID();
SortedMap<Long, Map<StateHandleID, StreamStateHandle>> materializedSstFiles = new TreeMap<>();
long lastCompletedCheckpointId = -1L;
if (injectedTestDB != null) {
db = injectedTestDB;
defaultColumnFamilyHandle = injectedDefaultColumnFamilyHandle;
nativeMetricMonitor = nativeMetricOptions.isEnabled() ? new RocksDBNativeMetricMonitor(nativeMetricOptions, metricGroup, db) : null;
} else {
prepareDirectories();
restoreOperation = getRocksDBRestoreOperation(keyGroupPrefixBytes, cancelStreamRegistry, kvStateInformation, registeredPQStates, ttlCompactFiltersManager);
RocksDBRestoreResult restoreResult = restoreOperation.restore();
db = restoreResult.getDb();
defaultColumnFamilyHandle = restoreResult.getDefaultColumnFamilyHandle();
nativeMetricMonitor = restoreResult.getNativeMetricMonitor();
if (restoreOperation instanceof RocksDBIncrementalRestoreOperation) {
backendUID = restoreResult.getBackendUID();
materializedSstFiles = restoreResult.getRestoredSstFiles();
lastCompletedCheckpointId = restoreResult.getLastCompletedCheckpointId();
}
}
writeBatchWrapper = new RocksDBWriteBatchWrapper(db, optionsContainer.getWriteOptions(), writeBatchSize);
// it is important that we only create the key builder after the restore, and not
// before;
// restore operations may reconfigure the key serializer, so accessing the key
// serializer
// only now we can be certain that the key serializer used in the builder is final.
sharedRocksKeyBuilder = new SerializedCompositeKeyBuilder<>(keySerializerProvider.currentSchemaSerializer(), keyGroupPrefixBytes, 32);
// init snapshot strategy after db is assured to be initialized
checkpointStrategy = initializeSavepointAndCheckpointStrategies(cancelStreamRegistryForBackend, rocksDBResourceGuard, kvStateInformation, registeredPQStates, keyGroupPrefixBytes, db, backendUID, materializedSstFiles, lastCompletedCheckpointId);
// init priority queue factory
priorityQueueFactory = initPriorityQueueFactory(keyGroupPrefixBytes, kvStateInformation, db, writeBatchWrapper, nativeMetricMonitor);
} catch (Throwable e) {
// Do clean up
List<ColumnFamilyOptions> columnFamilyOptions = new ArrayList<>(kvStateInformation.values().size());
IOUtils.closeQuietly(cancelStreamRegistryForBackend);
IOUtils.closeQuietly(writeBatchWrapper);
RocksDBOperationUtils.addColumnFamilyOptionsToCloseLater(columnFamilyOptions, defaultColumnFamilyHandle);
IOUtils.closeQuietly(defaultColumnFamilyHandle);
IOUtils.closeQuietly(nativeMetricMonitor);
for (RocksDBKeyedStateBackend.RocksDbKvStateInfo kvStateInfo : kvStateInformation.values()) {
RocksDBOperationUtils.addColumnFamilyOptionsToCloseLater(columnFamilyOptions, kvStateInfo.columnFamilyHandle);
IOUtils.closeQuietly(kvStateInfo.columnFamilyHandle);
}
IOUtils.closeQuietly(db);
// it's possible that db has been initialized but later restore steps failed
IOUtils.closeQuietly(restoreOperation);
IOUtils.closeAllQuietly(columnFamilyOptions);
IOUtils.closeQuietly(optionsContainer);
ttlCompactFiltersManager.disposeAndClearRegisteredCompactionFactories();
kvStateInformation.clear();
IOUtils.closeQuietly(checkpointStrategy);
try {
FileUtils.deleteDirectory(instanceBasePath);
} catch (Exception ex) {
logger.warn("Failed to delete base path for RocksDB: " + instanceBasePath, ex);
}
// Log and rethrow
if (e instanceof BackendBuildingException) {
throw (BackendBuildingException) e;
} else {
String errMsg = "Caught unexpected exception.";
logger.error(errMsg, e);
throw new BackendBuildingException(errMsg, e);
}
}
InternalKeyContext<K> keyContext = new InternalKeyContextImpl<>(keyGroupRange, numberOfKeyGroups);
logger.info("Finished building RocksDB keyed state-backend at {}.", instanceBasePath);
return new RocksDBKeyedStateBackend<>(this.userCodeClassLoader, this.instanceBasePath, this.optionsContainer, columnFamilyOptionsFactory, this.kvStateRegistry, this.keySerializerProvider.currentSchemaSerializer(), this.executionConfig, this.ttlTimeProvider, latencyTrackingStateConfig, db, kvStateInformation, registeredPQStates, keyGroupPrefixBytes, cancelStreamRegistryForBackend, this.keyGroupCompressionDecorator, rocksDBResourceGuard, checkpointStrategy, writeBatchWrapper, defaultColumnFamilyHandle, nativeMetricMonitor, sharedRocksKeyBuilder, priorityQueueFactory, ttlCompactFiltersManager, keyContext, writeBatchSize);
}
use of org.apache.flink.runtime.state.PriorityQueueSetFactory in project flink by apache.
the class InternalTimerServiceImplTest method testTimerAssignmentToKeyGroups.
@Test
public void testTimerAssignmentToKeyGroups() {
int totalNoOfTimers = 100;
int totalNoOfKeyGroups = 100;
int startKeyGroupIdx = 0;
// we have 0 to 99
int endKeyGroupIdx = totalNoOfKeyGroups - 1;
@SuppressWarnings("unchecked") Set<TimerHeapInternalTimer<Integer, String>>[] expectedNonEmptyTimerSets = new HashSet[totalNoOfKeyGroups];
TestKeyContext keyContext = new TestKeyContext();
final KeyGroupRange keyGroupRange = new KeyGroupRange(startKeyGroupIdx, endKeyGroupIdx);
final PriorityQueueSetFactory priorityQueueSetFactory = createQueueFactory(keyGroupRange, totalNoOfKeyGroups);
InternalTimerServiceImpl<Integer, String> timerService = createInternalTimerService(keyGroupRange, keyContext, new TestProcessingTimeService(), IntSerializer.INSTANCE, StringSerializer.INSTANCE, priorityQueueSetFactory);
timerService.startTimerService(IntSerializer.INSTANCE, StringSerializer.INSTANCE, mock(Triggerable.class));
for (int i = 0; i < totalNoOfTimers; i++) {
// create the timer to be registered
TimerHeapInternalTimer<Integer, String> timer = new TimerHeapInternalTimer<>(10 + i, i, "hello_world_" + i);
int keyGroupIdx = KeyGroupRangeAssignment.assignToKeyGroup(timer.getKey(), totalNoOfKeyGroups);
// add it in the adequate expected set of timers per keygroup
Set<TimerHeapInternalTimer<Integer, String>> timerSet = expectedNonEmptyTimerSets[keyGroupIdx];
if (timerSet == null) {
timerSet = new HashSet<>();
expectedNonEmptyTimerSets[keyGroupIdx] = timerSet;
}
timerSet.add(timer);
// register the timer as both processing and event time one
keyContext.setCurrentKey(timer.getKey());
timerService.registerEventTimeTimer(timer.getNamespace(), timer.getTimestamp());
timerService.registerProcessingTimeTimer(timer.getNamespace(), timer.getTimestamp());
}
List<Set<TimerHeapInternalTimer<Integer, String>>> eventTimeTimers = timerService.getEventTimeTimersPerKeyGroup();
List<Set<TimerHeapInternalTimer<Integer, String>>> processingTimeTimers = timerService.getProcessingTimeTimersPerKeyGroup();
// finally verify that the actual timers per key group sets are the expected ones.
for (int i = 0; i < expectedNonEmptyTimerSets.length; i++) {
Set<TimerHeapInternalTimer<Integer, String>> expected = expectedNonEmptyTimerSets[i];
Set<TimerHeapInternalTimer<Integer, String>> actualEvent = eventTimeTimers.get(i);
Set<TimerHeapInternalTimer<Integer, String>> actualProcessing = processingTimeTimers.get(i);
if (expected == null) {
Assert.assertTrue(actualEvent.isEmpty());
Assert.assertTrue(actualProcessing.isEmpty());
} else {
Assert.assertEquals(expected, actualEvent);
Assert.assertEquals(expected, actualProcessing);
}
}
}
use of org.apache.flink.runtime.state.PriorityQueueSetFactory in project flink by apache.
the class InternalTimerServiceImplTest method testSnapshotAndRebalancingRestore.
private void testSnapshotAndRebalancingRestore(int snapshotVersion) throws Exception {
@SuppressWarnings("unchecked") Triggerable<Integer, String> mockTriggerable = mock(Triggerable.class);
TestKeyContext keyContext = new TestKeyContext();
TestProcessingTimeService processingTimeService = new TestProcessingTimeService();
final PriorityQueueSetFactory queueFactory = createQueueFactory();
InternalTimerServiceImpl<Integer, String> timerService = createAndStartInternalTimerService(mockTriggerable, keyContext, processingTimeService, testKeyGroupRange, queueFactory);
int midpoint = testKeyGroupRange.getStartKeyGroup() + (testKeyGroupRange.getEndKeyGroup() - testKeyGroupRange.getStartKeyGroup()) / 2;
// get two sub key-ranges so that we can restore two ranges separately
KeyGroupRange subKeyGroupRange1 = new KeyGroupRange(testKeyGroupRange.getStartKeyGroup(), midpoint);
KeyGroupRange subKeyGroupRange2 = new KeyGroupRange(midpoint + 1, testKeyGroupRange.getEndKeyGroup());
// get two different keys, one per sub range
int key1 = getKeyInKeyGroupRange(subKeyGroupRange1, maxParallelism);
int key2 = getKeyInKeyGroupRange(subKeyGroupRange2, maxParallelism);
keyContext.setCurrentKey(key1);
timerService.registerProcessingTimeTimer("ciao", 10);
timerService.registerEventTimeTimer("hello", 10);
keyContext.setCurrentKey(key2);
timerService.registerEventTimeTimer("ciao", 10);
timerService.registerProcessingTimeTimer("hello", 10);
assertEquals(2, timerService.numProcessingTimeTimers());
assertEquals(1, timerService.numProcessingTimeTimers("hello"));
assertEquals(1, timerService.numProcessingTimeTimers("ciao"));
assertEquals(2, timerService.numEventTimeTimers());
assertEquals(1, timerService.numEventTimeTimers("hello"));
assertEquals(1, timerService.numEventTimeTimers("ciao"));
// one map per sub key-group range
Map<Integer, byte[]> snapshot1 = new HashMap<>();
Map<Integer, byte[]> snapshot2 = new HashMap<>();
for (Integer keyGroupIndex : testKeyGroupRange) {
try (ByteArrayOutputStream outStream = new ByteArrayOutputStream()) {
InternalTimersSnapshot<Integer, String> timersSnapshot = timerService.snapshotTimersForKeyGroup(keyGroupIndex);
InternalTimersSnapshotReaderWriters.getWriterForVersion(snapshotVersion, timersSnapshot, timerService.getKeySerializer(), timerService.getNamespaceSerializer()).writeTimersSnapshot(new DataOutputViewStreamWrapper(outStream));
if (subKeyGroupRange1.contains(keyGroupIndex)) {
snapshot1.put(keyGroupIndex, outStream.toByteArray());
} else if (subKeyGroupRange2.contains(keyGroupIndex)) {
snapshot2.put(keyGroupIndex, outStream.toByteArray());
} else {
throw new IllegalStateException("Key-Group index doesn't belong to any sub range.");
}
}
}
// from now on we need everything twice. once per sub key-group range
@SuppressWarnings("unchecked") Triggerable<Integer, String> mockTriggerable1 = mock(Triggerable.class);
@SuppressWarnings("unchecked") Triggerable<Integer, String> mockTriggerable2 = mock(Triggerable.class);
TestKeyContext keyContext1 = new TestKeyContext();
TestKeyContext keyContext2 = new TestKeyContext();
TestProcessingTimeService processingTimeService1 = new TestProcessingTimeService();
TestProcessingTimeService processingTimeService2 = new TestProcessingTimeService();
InternalTimerServiceImpl<Integer, String> timerService1 = restoreTimerService(snapshot1, snapshotVersion, mockTriggerable1, keyContext1, processingTimeService1, subKeyGroupRange1, queueFactory);
InternalTimerServiceImpl<Integer, String> timerService2 = restoreTimerService(snapshot2, snapshotVersion, mockTriggerable2, keyContext2, processingTimeService2, subKeyGroupRange2, queueFactory);
processingTimeService1.setCurrentTime(10);
timerService1.advanceWatermark(10);
verify(mockTriggerable1, times(1)).onProcessingTime(anyInternalTimer());
verify(mockTriggerable1, times(1)).onProcessingTime(eq(new TimerHeapInternalTimer<>(10, key1, "ciao")));
verify(mockTriggerable1, never()).onProcessingTime(eq(new TimerHeapInternalTimer<>(10, key2, "hello")));
verify(mockTriggerable1, times(1)).onEventTime(anyInternalTimer());
verify(mockTriggerable1, times(1)).onEventTime(eq(new TimerHeapInternalTimer<>(10, key1, "hello")));
verify(mockTriggerable1, never()).onEventTime(eq(new TimerHeapInternalTimer<>(10, key2, "ciao")));
assertEquals(0, timerService1.numEventTimeTimers());
processingTimeService2.setCurrentTime(10);
timerService2.advanceWatermark(10);
verify(mockTriggerable2, times(1)).onProcessingTime(anyInternalTimer());
verify(mockTriggerable2, never()).onProcessingTime(eq(new TimerHeapInternalTimer<>(10, key1, "ciao")));
verify(mockTriggerable2, times(1)).onProcessingTime(eq(new TimerHeapInternalTimer<>(10, key2, "hello")));
verify(mockTriggerable2, times(1)).onEventTime(anyInternalTimer());
verify(mockTriggerable2, never()).onEventTime(eq(new TimerHeapInternalTimer<>(10, key1, "hello")));
verify(mockTriggerable2, times(1)).onEventTime(eq(new TimerHeapInternalTimer<>(10, key2, "ciao")));
assertEquals(0, timerService2.numEventTimeTimers());
}
use of org.apache.flink.runtime.state.PriorityQueueSetFactory in project flink by apache.
the class InternalTimerServiceImplTest method testOnlySetsOnePhysicalProcessingTimeTimer.
/**
* Verify that we only ever have one processing-time task registered at the {@link
* ProcessingTimeService}.
*/
@Test
public void testOnlySetsOnePhysicalProcessingTimeTimer() throws Exception {
@SuppressWarnings("unchecked") Triggerable<Integer, String> mockTriggerable = mock(Triggerable.class);
TestKeyContext keyContext = new TestKeyContext();
TestProcessingTimeService processingTimeService = new TestProcessingTimeService();
PriorityQueueSetFactory priorityQueueSetFactory = new HeapPriorityQueueSetFactory(testKeyGroupRange, maxParallelism, 128);
InternalTimerServiceImpl<Integer, String> timerService = createAndStartInternalTimerService(mockTriggerable, keyContext, processingTimeService, testKeyGroupRange, priorityQueueSetFactory);
int key = getKeyInKeyGroupRange(testKeyGroupRange, maxParallelism);
keyContext.setCurrentKey(key);
timerService.registerProcessingTimeTimer("ciao", 10);
timerService.registerProcessingTimeTimer("ciao", 20);
timerService.registerProcessingTimeTimer("ciao", 30);
timerService.registerProcessingTimeTimer("hello", 10);
timerService.registerProcessingTimeTimer("hello", 20);
assertEquals(5, timerService.numProcessingTimeTimers());
assertEquals(2, timerService.numProcessingTimeTimers("hello"));
assertEquals(3, timerService.numProcessingTimeTimers("ciao"));
assertEquals(1, processingTimeService.getNumActiveTimers());
assertThat(processingTimeService.getActiveTimerTimestamps(), containsInAnyOrder(10L));
processingTimeService.setCurrentTime(10);
assertEquals(3, timerService.numProcessingTimeTimers());
assertEquals(1, timerService.numProcessingTimeTimers("hello"));
assertEquals(2, timerService.numProcessingTimeTimers("ciao"));
assertEquals(1, processingTimeService.getNumActiveTimers());
assertThat(processingTimeService.getActiveTimerTimestamps(), containsInAnyOrder(20L));
processingTimeService.setCurrentTime(20);
assertEquals(1, timerService.numProcessingTimeTimers());
assertEquals(0, timerService.numProcessingTimeTimers("hello"));
assertEquals(1, timerService.numProcessingTimeTimers("ciao"));
assertEquals(1, processingTimeService.getNumActiveTimers());
assertThat(processingTimeService.getActiveTimerTimestamps(), containsInAnyOrder(30L));
processingTimeService.setCurrentTime(30);
assertEquals(0, timerService.numProcessingTimeTimers());
assertEquals(0, processingTimeService.getNumActiveTimers());
timerService.registerProcessingTimeTimer("ciao", 40);
assertEquals(1, processingTimeService.getNumActiveTimers());
}
Aggregations