use of org.apache.flink.runtime.state.KeyGroupRange in project flink by apache.
the class KvStateLocationRegistryTest method testRegisterDuplicateName.
/**
* Tests that registrations with duplicate names throw an Exception.
*/
@Test
public void testRegisterDuplicateName() throws Exception {
ExecutionJobVertex[] vertices = new ExecutionJobVertex[] { createJobVertex(32), createJobVertex(13) };
Map<JobVertexID, ExecutionJobVertex> vertexMap = createVertexMap(vertices);
String registrationName = "duplicated-name";
KvStateLocationRegistry registry = new KvStateLocationRegistry(new JobID(), vertexMap);
// First operator registers
registry.notifyKvStateRegistered(vertices[0].getJobVertexId(), new KeyGroupRange(0, 0), registrationName, new KvStateID(), new InetSocketAddress(InetAddress.getLocalHost(), 12328));
try {
// Second operator registers same name
registry.notifyKvStateRegistered(vertices[1].getJobVertexId(), new KeyGroupRange(0, 0), registrationName, new KvStateID(), new InetSocketAddress(InetAddress.getLocalHost(), 12032));
fail("Did not throw expected Exception after duplicated name");
} catch (IllegalStateException ignored) {
// Expected
}
}
use of org.apache.flink.runtime.state.KeyGroupRange in project flink by apache.
the class LatencyTrackingStateTestBase method createKeyedBackend.
protected AbstractKeyedStateBackend<K> createKeyedBackend(TypeSerializer<K> keySerializer) throws Exception {
Environment env = new DummyEnvironment();
KeyGroupRange keyGroupRange = new KeyGroupRange(0, 127);
int numberOfKeyGroups = keyGroupRange.getNumberOfKeyGroups();
Configuration configuration = new Configuration();
configuration.setBoolean(StateBackendOptions.LATENCY_TRACK_ENABLED, true);
configuration.setInteger(StateBackendOptions.LATENCY_TRACK_SAMPLE_INTERVAL, SAMPLE_INTERVAL);
// use a very large value to not let metrics data overridden.
int historySize = 1000_000;
configuration.setInteger(StateBackendOptions.LATENCY_TRACK_HISTORY_SIZE, historySize);
HashMapStateBackend stateBackend = new HashMapStateBackend().configure(configuration, Thread.currentThread().getContextClassLoader());
return stateBackend.createKeyedStateBackend(env, new JobID(), "test_op", keySerializer, numberOfKeyGroups, keyGroupRange, env.getTaskKvStateRegistry(), TtlTimeProvider.DEFAULT, new UnregisteredMetricsGroup(), Collections.emptyList(), new CloseableRegistry());
}
use of org.apache.flink.runtime.state.KeyGroupRange in project flink by apache.
the class RocksDBKeyedStateBackendTestFactory method create.
public <K> RocksDBKeyedStateBackend<K> create(TemporaryFolder tmp, TypeSerializer<K> keySerializer, int maxKeyGroupNumber) throws Exception {
RocksDBStateBackend backend = getRocksDBStateBackend(tmp);
env = MockEnvironment.builder().build();
keyedStateBackend = (RocksDBKeyedStateBackend<K>) backend.createKeyedStateBackend(env, new JobID(), "Test", keySerializer, maxKeyGroupNumber, new KeyGroupRange(0, maxKeyGroupNumber - 1), mock(TaskKvStateRegistry.class), TtlTimeProvider.DEFAULT, new UnregisteredMetricsGroup(), Collections.emptyList(), new CloseableRegistry());
return (RocksDBKeyedStateBackend<K>) keyedStateBackend;
}
use of org.apache.flink.runtime.state.KeyGroupRange in project flink by apache.
the class InternalTimerServiceImplTest method testTimerAssignmentToKeyGroups.
@Test
public void testTimerAssignmentToKeyGroups() {
int totalNoOfTimers = 100;
int totalNoOfKeyGroups = 100;
int startKeyGroupIdx = 0;
// we have 0 to 99
int endKeyGroupIdx = totalNoOfKeyGroups - 1;
@SuppressWarnings("unchecked") Set<TimerHeapInternalTimer<Integer, String>>[] expectedNonEmptyTimerSets = new HashSet[totalNoOfKeyGroups];
TestKeyContext keyContext = new TestKeyContext();
final KeyGroupRange keyGroupRange = new KeyGroupRange(startKeyGroupIdx, endKeyGroupIdx);
final PriorityQueueSetFactory priorityQueueSetFactory = createQueueFactory(keyGroupRange, totalNoOfKeyGroups);
InternalTimerServiceImpl<Integer, String> timerService = createInternalTimerService(keyGroupRange, keyContext, new TestProcessingTimeService(), IntSerializer.INSTANCE, StringSerializer.INSTANCE, priorityQueueSetFactory);
timerService.startTimerService(IntSerializer.INSTANCE, StringSerializer.INSTANCE, mock(Triggerable.class));
for (int i = 0; i < totalNoOfTimers; i++) {
// create the timer to be registered
TimerHeapInternalTimer<Integer, String> timer = new TimerHeapInternalTimer<>(10 + i, i, "hello_world_" + i);
int keyGroupIdx = KeyGroupRangeAssignment.assignToKeyGroup(timer.getKey(), totalNoOfKeyGroups);
// add it in the adequate expected set of timers per keygroup
Set<TimerHeapInternalTimer<Integer, String>> timerSet = expectedNonEmptyTimerSets[keyGroupIdx];
if (timerSet == null) {
timerSet = new HashSet<>();
expectedNonEmptyTimerSets[keyGroupIdx] = timerSet;
}
timerSet.add(timer);
// register the timer as both processing and event time one
keyContext.setCurrentKey(timer.getKey());
timerService.registerEventTimeTimer(timer.getNamespace(), timer.getTimestamp());
timerService.registerProcessingTimeTimer(timer.getNamespace(), timer.getTimestamp());
}
List<Set<TimerHeapInternalTimer<Integer, String>>> eventTimeTimers = timerService.getEventTimeTimersPerKeyGroup();
List<Set<TimerHeapInternalTimer<Integer, String>>> processingTimeTimers = timerService.getProcessingTimeTimersPerKeyGroup();
// finally verify that the actual timers per key group sets are the expected ones.
for (int i = 0; i < expectedNonEmptyTimerSets.length; i++) {
Set<TimerHeapInternalTimer<Integer, String>> expected = expectedNonEmptyTimerSets[i];
Set<TimerHeapInternalTimer<Integer, String>> actualEvent = eventTimeTimers.get(i);
Set<TimerHeapInternalTimer<Integer, String>> actualProcessing = processingTimeTimers.get(i);
if (expected == null) {
Assert.assertTrue(actualEvent.isEmpty());
Assert.assertTrue(actualProcessing.isEmpty());
} else {
Assert.assertEquals(expected, actualEvent);
Assert.assertEquals(expected, actualProcessing);
}
}
}
use of org.apache.flink.runtime.state.KeyGroupRange in project flink by apache.
the class OperatorSnapshotFinalizerTest method testRunAndExtract.
/**
* Test that the runnable futures are executed and the result is correctly extracted.
*/
@Test
public void testRunAndExtract() throws Exception {
Random random = new Random(0x42);
KeyedStateHandle keyedTemplate = StateHandleDummyUtil.createNewKeyedStateHandle(new KeyGroupRange(0, 0));
OperatorStateHandle operatorTemplate = StateHandleDummyUtil.createNewOperatorStateHandle(2, random);
InputChannelStateHandle inputChannelTemplate = StateHandleDummyUtil.createNewInputChannelStateHandle(2, random);
ResultSubpartitionStateHandle resultSubpartitionTemplate = StateHandleDummyUtil.createNewResultSubpartitionStateHandle(2, random);
SnapshotResult<KeyedStateHandle> manKeyed = withLocalState(deepDummyCopy(keyedTemplate), deepDummyCopy(keyedTemplate));
SnapshotResult<KeyedStateHandle> rawKeyed = withLocalState(deepDummyCopy(keyedTemplate), deepDummyCopy(keyedTemplate));
SnapshotResult<OperatorStateHandle> manOper = withLocalState(deepDummyCopy(operatorTemplate), deepDummyCopy(operatorTemplate));
SnapshotResult<OperatorStateHandle> rawOper = withLocalState(deepDummyCopy(operatorTemplate), deepDummyCopy(operatorTemplate));
SnapshotResult<StateObjectCollection<InputChannelStateHandle>> inputChannel = withLocalState(singleton(deepDummyCopy(inputChannelTemplate)), singleton(deepDummyCopy(inputChannelTemplate)));
SnapshotResult<StateObjectCollection<ResultSubpartitionStateHandle>> resultSubpartition = withLocalState(singleton(deepDummyCopy(resultSubpartitionTemplate)), singleton(deepDummyCopy(resultSubpartitionTemplate)));
OperatorSnapshotFutures snapshotFutures = new OperatorSnapshotFutures(new PseudoNotDoneFuture<>(manKeyed), new PseudoNotDoneFuture<>(rawKeyed), new PseudoNotDoneFuture<>(manOper), new PseudoNotDoneFuture<>(rawOper), new PseudoNotDoneFuture<>(inputChannel), new PseudoNotDoneFuture<>(resultSubpartition));
for (Future<?> f : snapshotFutures.getAllFutures()) {
assertFalse(f.isDone());
}
OperatorSnapshotFinalizer finalizer = new OperatorSnapshotFinalizer(snapshotFutures);
for (Future<?> f : snapshotFutures.getAllFutures()) {
assertTrue(f.isDone());
}
Map<SnapshotResult<?>, Function<OperatorSubtaskState, ? extends StateObject>> map = new HashMap<>();
map.put(manKeyed, headExtractor(OperatorSubtaskState::getManagedKeyedState));
map.put(rawKeyed, headExtractor(OperatorSubtaskState::getRawKeyedState));
map.put(manOper, headExtractor(OperatorSubtaskState::getManagedOperatorState));
map.put(rawOper, headExtractor(OperatorSubtaskState::getRawOperatorState));
map.put(inputChannel, OperatorSubtaskState::getInputChannelState);
map.put(resultSubpartition, OperatorSubtaskState::getResultSubpartitionState);
for (Map.Entry<SnapshotResult<?>, Function<OperatorSubtaskState, ? extends StateObject>> e : map.entrySet()) {
assertEquals(e.getKey().getJobManagerOwnedSnapshot(), e.getValue().apply(finalizer.getJobManagerOwnedState()));
}
for (Map.Entry<SnapshotResult<?>, Function<OperatorSubtaskState, ? extends StateObject>> e : map.entrySet()) {
assertEquals(e.getKey().getTaskLocalSnapshot(), e.getValue().apply(finalizer.getTaskLocalState()));
}
}
Aggregations