use of org.apache.flink.runtime.state.changelog.inmemory.InMemoryStateChangelogStorage in project flink by apache.
the class StateInitializationContextImplTest method setUp.
@Before
public void setUp() throws Exception {
this.writtenKeyGroups = 0;
this.writtenOperatorStates = new HashSet<>();
this.closableRegistry = new CloseableRegistry();
ByteArrayOutputStreamWithPos out = new ByteArrayOutputStreamWithPos(64);
List<KeyedStateHandle> keyedStateHandles = new ArrayList<>(NUM_HANDLES);
int prev = 0;
for (int i = 0; i < NUM_HANDLES; ++i) {
out.reset();
int size = i % 4;
int end = prev + size;
DataOutputView dov = new DataOutputViewStreamWrapper(out);
KeyGroupRangeOffsets offsets = new KeyGroupRangeOffsets(i == 9 ? KeyGroupRange.EMPTY_KEY_GROUP_RANGE : new KeyGroupRange(prev, end));
prev = end + 1;
for (int kg : offsets.getKeyGroupRange()) {
offsets.setKeyGroupOffset(kg, out.getPosition());
dov.writeInt(kg);
++writtenKeyGroups;
}
KeyedStateHandle handle = new KeyGroupsStateHandle(offsets, new ByteStateHandleCloseChecking("kg-" + i, out.toByteArray()));
keyedStateHandles.add(handle);
}
List<OperatorStateHandle> operatorStateHandles = new ArrayList<>(NUM_HANDLES);
for (int i = 0; i < NUM_HANDLES; ++i) {
int size = i % 4;
out.reset();
DataOutputView dov = new DataOutputViewStreamWrapper(out);
LongArrayList offsets = new LongArrayList(size);
for (int s = 0; s < size; ++s) {
offsets.add(out.getPosition());
int val = i * NUM_HANDLES + s;
dov.writeInt(val);
writtenOperatorStates.add(val);
}
Map<String, OperatorStateHandle.StateMetaInfo> offsetsMap = new HashMap<>();
offsetsMap.put(DefaultOperatorStateBackend.DEFAULT_OPERATOR_STATE_NAME, new OperatorStateHandle.StateMetaInfo(offsets.toArray(), OperatorStateHandle.Mode.SPLIT_DISTRIBUTE));
OperatorStateHandle operatorStateHandle = new OperatorStreamStateHandle(offsetsMap, new ByteStateHandleCloseChecking("os-" + i, out.toByteArray()));
operatorStateHandles.add(operatorStateHandle);
}
OperatorSubtaskState operatorSubtaskState = OperatorSubtaskState.builder().setRawOperatorState(new StateObjectCollection<>(operatorStateHandles)).setRawKeyedState(new StateObjectCollection<>(keyedStateHandles)).build();
OperatorID operatorID = new OperatorID();
TaskStateSnapshot taskStateSnapshot = new TaskStateSnapshot();
taskStateSnapshot.putSubtaskStateByOperatorID(operatorID, operatorSubtaskState);
JobManagerTaskRestore jobManagerTaskRestore = new JobManagerTaskRestore(0L, taskStateSnapshot);
TaskStateManager manager = new TaskStateManagerImpl(new JobID(), new ExecutionAttemptID(), new TestTaskLocalStateStore(), new InMemoryStateChangelogStorage(), jobManagerTaskRestore, mock(CheckpointResponder.class));
DummyEnvironment environment = new DummyEnvironment("test", 1, 0, prev);
environment.setTaskStateManager(manager);
StateBackend stateBackend = new MemoryStateBackend(1024);
StreamTaskStateInitializer streamTaskStateManager = new StreamTaskStateInitializerImpl(environment, stateBackend, TtlTimeProvider.DEFAULT, new InternalTimeServiceManager.Provider() {
@Override
public <K> InternalTimeServiceManager<K> create(CheckpointableKeyedStateBackend<K> keyedStatedBackend, ClassLoader userClassloader, KeyContext keyContext, ProcessingTimeService processingTimeService, Iterable<KeyGroupStatePartitionStreamProvider> rawKeyedStates) throws Exception {
// stream.
return null;
}
});
AbstractStreamOperator<?> mockOperator = mock(AbstractStreamOperator.class);
when(mockOperator.getOperatorID()).thenReturn(operatorID);
StreamOperatorStateContext stateContext = streamTaskStateManager.streamOperatorStateContext(operatorID, "TestOperatorClass", mock(ProcessingTimeService.class), mockOperator, // consumed by the timer service.
IntSerializer.INSTANCE, closableRegistry, new UnregisteredMetricsGroup(), 1.0, false);
OptionalLong restoredCheckpointId = stateContext.getRestoredCheckpointId();
this.initializationContext = new StateInitializationContextImpl(restoredCheckpointId.isPresent() ? restoredCheckpointId.getAsLong() : null, stateContext.operatorStateBackend(), mock(KeyedStateStore.class), stateContext.rawKeyedStateInputs(), stateContext.rawOperatorStateInputs());
}
use of org.apache.flink.runtime.state.changelog.inmemory.InMemoryStateChangelogStorage in project flink by apache.
the class StreamTaskStateInitializerImplTest method streamTaskStateManager.
private StreamTaskStateInitializer streamTaskStateManager(StateBackend stateBackend, JobManagerTaskRestore jobManagerTaskRestore, boolean createTimerServiceManager) {
JobID jobID = new JobID(42L, 43L);
ExecutionAttemptID executionAttemptID = new ExecutionAttemptID();
TestCheckpointResponder checkpointResponderMock = new TestCheckpointResponder();
TaskLocalStateStore taskLocalStateStore = new TestTaskLocalStateStore();
InMemoryStateChangelogStorage changelogStorage = new InMemoryStateChangelogStorage();
TaskStateManager taskStateManager = TaskStateManagerImplTest.taskStateManager(jobID, executionAttemptID, checkpointResponderMock, jobManagerTaskRestore, taskLocalStateStore, changelogStorage);
DummyEnvironment dummyEnvironment = new DummyEnvironment("test-task", 1, 0);
dummyEnvironment.setTaskStateManager(taskStateManager);
if (createTimerServiceManager) {
return new StreamTaskStateInitializerImpl(dummyEnvironment, stateBackend);
} else {
return new StreamTaskStateInitializerImpl(dummyEnvironment, stateBackend, TtlTimeProvider.DEFAULT, new InternalTimeServiceManager.Provider() {
@Override
public <K> InternalTimeServiceManager<K> create(CheckpointableKeyedStateBackend<K> keyedStatedBackend, ClassLoader userClassloader, KeyContext keyContext, ProcessingTimeService processingTimeService, Iterable<KeyGroupStatePartitionStreamProvider> rawKeyedStates) throws Exception {
return null;
}
});
}
}
use of org.apache.flink.runtime.state.changelog.inmemory.InMemoryStateChangelogStorage in project flink by apache.
the class TaskStateManagerImplTest method testForwardingSubtaskLocalStateBaseDirFromLocalStateStore.
/**
* This tests if the {@link TaskStateManager} properly returns the subtask local state dir from
* the corresponding {@link TaskLocalStateStoreImpl}.
*/
@Test
public void testForwardingSubtaskLocalStateBaseDirFromLocalStateStore() throws IOException {
JobID jobID = new JobID(42L, 43L);
AllocationID allocationID = new AllocationID(4711L, 23L);
JobVertexID jobVertexID = new JobVertexID(12L, 34L);
ExecutionAttemptID executionAttemptID = new ExecutionAttemptID();
TestCheckpointResponder checkpointResponderMock = new TestCheckpointResponder();
Executor directExecutor = Executors.directExecutor();
TemporaryFolder tmpFolder = new TemporaryFolder();
try {
tmpFolder.create();
File[] allocBaseDirs = new File[] { tmpFolder.newFolder(), tmpFolder.newFolder(), tmpFolder.newFolder() };
LocalRecoveryDirectoryProviderImpl directoryProvider = new LocalRecoveryDirectoryProviderImpl(allocBaseDirs, jobID, jobVertexID, 0);
LocalRecoveryConfig localRecoveryConfig = new LocalRecoveryConfig(directoryProvider);
TaskLocalStateStore taskLocalStateStore = new TaskLocalStateStoreImpl(jobID, allocationID, jobVertexID, 13, localRecoveryConfig, directExecutor);
InMemoryStateChangelogStorage changelogStorage = new InMemoryStateChangelogStorage();
TaskStateManager taskStateManager = taskStateManager(jobID, executionAttemptID, checkpointResponderMock, null, taskLocalStateStore, changelogStorage);
LocalRecoveryConfig localRecoveryConfFromTaskLocalStateStore = taskLocalStateStore.getLocalRecoveryConfig();
LocalRecoveryConfig localRecoveryConfFromTaskStateManager = taskStateManager.createLocalRecoveryConfig();
for (int i = 0; i < 10; ++i) {
Assert.assertEquals(allocBaseDirs[i % allocBaseDirs.length], localRecoveryConfFromTaskLocalStateStore.getLocalStateDirectoryProvider().get().allocationBaseDirectory(i));
Assert.assertEquals(allocBaseDirs[i % allocBaseDirs.length], localRecoveryConfFromTaskStateManager.getLocalStateDirectoryProvider().get().allocationBaseDirectory(i));
}
Assert.assertEquals(localRecoveryConfFromTaskLocalStateStore.isLocalRecoveryEnabled(), localRecoveryConfFromTaskStateManager.isLocalRecoveryEnabled());
} finally {
tmpFolder.delete();
}
}
use of org.apache.flink.runtime.state.changelog.inmemory.InMemoryStateChangelogStorage in project flink by apache.
the class LocalStateForwardingTest method testReportingFromTaskStateManagerToResponderAndTaskLocalStateStore.
/**
* This tests that state that was reported to the {@link
* org.apache.flink.runtime.state.TaskStateManager} is also reported to {@link
* org.apache.flink.runtime.taskmanager.CheckpointResponder} and {@link
* TaskLocalStateStoreImpl}.
*/
@Test
public void testReportingFromTaskStateManagerToResponderAndTaskLocalStateStore() throws Exception {
final JobID jobID = new JobID();
final AllocationID allocationID = new AllocationID();
final ExecutionAttemptID executionAttemptID = new ExecutionAttemptID();
final CheckpointMetaData checkpointMetaData = new CheckpointMetaData(42L, 4711L);
final CheckpointMetrics checkpointMetrics = new CheckpointMetrics();
final int subtaskIdx = 42;
JobVertexID jobVertexID = new JobVertexID();
TaskStateSnapshot jmSnapshot = new TaskStateSnapshot();
TaskStateSnapshot tmSnapshot = new TaskStateSnapshot();
final AtomicBoolean jmReported = new AtomicBoolean(false);
final AtomicBoolean tmReported = new AtomicBoolean(false);
TestCheckpointResponder checkpointResponder = new TestCheckpointResponder() {
@Override
public void acknowledgeCheckpoint(JobID lJobID, ExecutionAttemptID lExecutionAttemptID, long lCheckpointId, CheckpointMetrics lCheckpointMetrics, TaskStateSnapshot lSubtaskState) {
Assert.assertEquals(jobID, lJobID);
Assert.assertEquals(executionAttemptID, lExecutionAttemptID);
Assert.assertEquals(checkpointMetaData.getCheckpointId(), lCheckpointId);
Assert.assertEquals(checkpointMetrics, lCheckpointMetrics);
jmReported.set(true);
}
};
Executor executor = Executors.directExecutor();
LocalRecoveryDirectoryProviderImpl directoryProvider = new LocalRecoveryDirectoryProviderImpl(temporaryFolder.newFolder(), jobID, jobVertexID, subtaskIdx);
LocalRecoveryConfig localRecoveryConfig = new LocalRecoveryConfig(directoryProvider);
TaskLocalStateStore taskLocalStateStore = new TaskLocalStateStoreImpl(jobID, allocationID, jobVertexID, subtaskIdx, localRecoveryConfig, executor) {
@Override
public void storeLocalState(@Nonnegative long checkpointId, @Nullable TaskStateSnapshot localState) {
Assert.assertEquals(tmSnapshot, localState);
tmReported.set(true);
}
};
StateChangelogStorage<?> stateChangelogStorage = new InMemoryStateChangelogStorage();
TaskStateManagerImpl taskStateManager = new TaskStateManagerImpl(jobID, executionAttemptID, taskLocalStateStore, stateChangelogStorage, null, checkpointResponder);
taskStateManager.reportTaskStateSnapshots(checkpointMetaData, checkpointMetrics, jmSnapshot, tmSnapshot);
Assert.assertTrue("Reporting for JM state was not called.", jmReported.get());
Assert.assertTrue("Reporting for TM state was not called.", tmReported.get());
}
use of org.apache.flink.runtime.state.changelog.inmemory.InMemoryStateChangelogStorage in project flink by apache.
the class RocksDBAsyncSnapshotTest method testFullyAsyncSnapshot.
/**
* This ensures that asynchronous state handles are actually materialized asynchronously.
*
* <p>We use latches to block at various stages and see if the code still continues through the
* parts that are not asynchronous. If the checkpoint is not done asynchronously the test will
* simply lock forever.
*/
@Test
public void testFullyAsyncSnapshot() throws Exception {
final OneInputStreamTaskTestHarness<String, String> testHarness = new OneInputStreamTaskTestHarness<>(OneInputStreamTask::new, BasicTypeInfo.STRING_TYPE_INFO, BasicTypeInfo.STRING_TYPE_INFO);
testHarness.setupOutputForSingletonOperatorChain();
testHarness.configureForKeyedStream(new KeySelector<String, String>() {
@Override
public String getKey(String value) throws Exception {
return value;
}
}, BasicTypeInfo.STRING_TYPE_INFO);
StreamConfig streamConfig = testHarness.getStreamConfig();
File dbDir = temporaryFolder.newFolder();
RocksDBStateBackend backend = new RocksDBStateBackend(new MemoryStateBackend());
backend.setDbStoragePath(dbDir.getAbsolutePath());
streamConfig.setStateBackend(backend);
streamConfig.setStreamOperator(new AsyncCheckpointOperator());
streamConfig.setOperatorID(new OperatorID());
final OneShotLatch delayCheckpointLatch = new OneShotLatch();
final OneShotLatch ensureCheckpointLatch = new OneShotLatch();
CheckpointResponder checkpointResponderMock = new CheckpointResponder() {
@Override
public void acknowledgeCheckpoint(JobID jobID, ExecutionAttemptID executionAttemptID, long checkpointId, CheckpointMetrics checkpointMetrics, TaskStateSnapshot subtaskState) {
// even though the async checkpoint would not finish
try {
delayCheckpointLatch.await();
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
boolean hasManagedKeyedState = false;
for (Map.Entry<OperatorID, OperatorSubtaskState> entry : subtaskState.getSubtaskStateMappings()) {
OperatorSubtaskState state = entry.getValue();
if (state != null) {
hasManagedKeyedState |= state.getManagedKeyedState() != null;
}
}
// should be one k/v state
assertTrue(hasManagedKeyedState);
// we now know that the checkpoint went through
ensureCheckpointLatch.trigger();
}
@Override
public void reportCheckpointMetrics(JobID jobID, ExecutionAttemptID executionAttemptID, long checkpointId, CheckpointMetrics checkpointMetrics) {
}
@Override
public void declineCheckpoint(JobID jobID, ExecutionAttemptID executionAttemptID, long checkpointId, CheckpointException checkpointException) {
}
};
JobID jobID = new JobID();
ExecutionAttemptID executionAttemptID = new ExecutionAttemptID();
TestTaskStateManager taskStateManagerTestMock = new TestTaskStateManager(jobID, executionAttemptID, checkpointResponderMock, TestLocalRecoveryConfig.disabled(), new InMemoryStateChangelogStorage(), new HashMap<>(), -1L, new OneShotLatch());
StreamMockEnvironment mockEnv = new StreamMockEnvironment(testHarness.jobConfig, testHarness.taskConfig, testHarness.memorySize, new MockInputSplitProvider(), testHarness.bufferSize, taskStateManagerTestMock);
AtomicReference<Throwable> errorRef = new AtomicReference<>();
mockEnv.setExternalExceptionHandler(errorRef::set);
testHarness.invoke(mockEnv);
testHarness.waitForTaskRunning();
final OneInputStreamTask<String, String> task = testHarness.getTask();
task.triggerCheckpointAsync(new CheckpointMetaData(42, 17), CheckpointOptions.forCheckpointWithDefaultLocation()).get();
testHarness.processElement(new StreamRecord<>("Wohoo", 0));
// now we allow the checkpoint
delayCheckpointLatch.trigger();
// wait for the checkpoint to go through
ensureCheckpointLatch.await();
testHarness.endInput();
ExecutorService threadPool = task.getAsyncOperationsThreadPool();
threadPool.shutdown();
Assert.assertTrue(threadPool.awaitTermination(60_000, TimeUnit.MILLISECONDS));
testHarness.waitForTaskCompletion();
if (errorRef.get() != null) {
fail("Unexpected exception during execution.");
}
}
Aggregations