use of org.apache.samza.checkpoint.CheckpointId in project samza by apache.
the class TestTransactionalStateTaskRestoreManager method testGetStoreActionsForLoggedPersistentStore_FullRestoreIfNullCheckpointedOffsetAndRetainExistingChangelogState.
/**
* We need to trim the changelog topic to handle the scenario where container wrote some messages to store and
* changelog, but died before the first commit (leaving checkpointed changelog offset as null).
*
* Retain existing state flag exists to support cases when user is turning on transactional support for the first
* time and does not have an existing checkpointed changelog offset. Retain existing state flag allows them to
* carry over the existing changelog state after a full bootstrap. Flag should be turned off after the first deploy.
*/
@Test
public void testGetStoreActionsForLoggedPersistentStore_FullRestoreIfNullCheckpointedOffsetAndRetainExistingChangelogState() {
TaskModel mockTaskModel = mock(TaskModel.class);
TaskName taskName = new TaskName("Partition 0");
when(mockTaskModel.getTaskName()).thenReturn(taskName);
Partition taskChangelogPartition = new Partition(0);
when(mockTaskModel.getChangelogPartition()).thenReturn(taskChangelogPartition);
String store1Name = "store1";
StorageEngine store1Engine = mock(StorageEngine.class);
StoreProperties mockStore1Properties = mock(StoreProperties.class);
when(store1Engine.getStoreProperties()).thenReturn(mockStore1Properties);
when(mockStore1Properties.isLoggedStore()).thenReturn(true);
when(mockStore1Properties.isPersistedToDisk()).thenReturn(true);
Map<String, StorageEngine> mockStoreEngines = ImmutableMap.of(store1Name, store1Engine);
String changelog1SystemName = "system1";
String changelog1StreamName = "store1Changelog";
SystemStream changelog1SystemStream = new SystemStream(changelog1SystemName, changelog1StreamName);
SystemStreamPartition changelog1SSP = new SystemStreamPartition(changelog1SystemStream, taskChangelogPartition);
SystemStreamPartitionMetadata changelog1SSPMetadata = new SystemStreamPartitionMetadata("0", "10", "11");
Map<String, SystemStream> mockStoreChangelogs = ImmutableMap.of(store1Name, changelog1SystemStream);
String changelog1CheckpointedOffset = null;
CheckpointId checkpointId = CheckpointId.create();
KafkaStateCheckpointMarker kafkaStateCheckpointMarker = new KafkaStateCheckpointMarker(changelog1SSP, changelog1CheckpointedOffset);
Map<String, KafkaStateCheckpointMarker> mockCheckpointedChangelogOffset = new HashMap<String, KafkaStateCheckpointMarker>() {
{
put(store1Name, kafkaStateCheckpointMarker);
}
};
Map<SystemStreamPartition, SystemStreamPartitionMetadata> mockCurrentChangelogOffsets = ImmutableMap.of(changelog1SSP, changelog1SSPMetadata);
SystemAdmins mockSystemAdmins = mock(SystemAdmins.class);
SystemAdmin mockSystemAdmin = mock(SystemAdmin.class);
when(mockSystemAdmins.getSystemAdmin(changelog1SSP.getSystem())).thenReturn(mockSystemAdmin);
StorageManagerUtil mockStorageManagerUtil = mock(StorageManagerUtil.class);
File mockLoggedStoreBaseDir = mock(File.class);
File mockNonLoggedStoreBaseDir = mock(File.class);
HashMap<String, String> configMap = new HashMap<>();
configMap.put(TaskConfig.TRANSACTIONAL_STATE_RETAIN_EXISTING_STATE, "true");
Config mockConfig = new MapConfig(configMap);
Clock mockClock = mock(Clock.class);
File mockCurrentStoreDir = mock(File.class);
File mockStoreNewerCheckpointDir = mock(File.class);
File mockStoreOlderCheckpointDir = mock(File.class);
String olderCheckpointDirLocalOffset = "3";
String newerCheckpointDirLocalOffset = "5";
when(mockStorageManagerUtil.getTaskStoreDir(eq(mockLoggedStoreBaseDir), eq(store1Name), eq(taskName), any())).thenReturn(mockCurrentStoreDir);
when(mockStorageManagerUtil.getTaskStoreCheckpointDirs(eq(mockLoggedStoreBaseDir), eq(store1Name), eq(taskName), any())).thenReturn(ImmutableList.of(mockStoreNewerCheckpointDir, mockStoreOlderCheckpointDir));
when(mockStorageManagerUtil.isLoggedStoreValid(eq(store1Name), eq(mockStoreNewerCheckpointDir), any(), eq(mockStoreChangelogs), eq(mockTaskModel), any(), eq(mockStoreEngines))).thenReturn(true);
when(mockStorageManagerUtil.isLoggedStoreValid(eq(store1Name), eq(mockStoreOlderCheckpointDir), any(), eq(mockStoreChangelogs), eq(mockTaskModel), any(), eq(mockStoreEngines))).thenReturn(true);
Set<SystemStreamPartition> mockChangelogSSPs = ImmutableSet.of(changelog1SSP);
when(mockStorageManagerUtil.readOffsetFile(eq(mockStoreNewerCheckpointDir), eq(mockChangelogSSPs), eq(false))).thenReturn(ImmutableMap.of(changelog1SSP, newerCheckpointDirLocalOffset));
when(mockStorageManagerUtil.readOffsetFile(eq(mockStoreOlderCheckpointDir), eq(mockChangelogSSPs), eq(false))).thenReturn(// less than checkpointed offset (5)
ImmutableMap.of(changelog1SSP, olderCheckpointDirLocalOffset));
Mockito.when(mockSystemAdmin.offsetComparator(anyString(), anyString())).thenAnswer((Answer<Integer>) invocation -> {
String offset1 = (String) invocation.getArguments()[0];
String offset2 = (String) invocation.getArguments()[1];
return Long.valueOf(offset1).compareTo(Long.valueOf(offset2));
});
StoreActions storeActions = TransactionalStateTaskRestoreManager.getStoreActions(mockTaskModel, mockStoreEngines, mockStoreChangelogs, mockCheckpointedChangelogOffset, checkpointId, mockCurrentChangelogOffsets, mockSystemAdmins, mockStorageManagerUtil, mockLoggedStoreBaseDir, mockNonLoggedStoreBaseDir, mockConfig, mockClock);
// ensure that all the store dirs (current or checkpoint) are marked for deletion
assertEquals(3, storeActions.storeDirsToDelete.get(store1Name).size());
assertTrue(storeActions.storeDirsToDelete.get(store1Name).contains(mockCurrentStoreDir));
assertTrue(storeActions.storeDirsToDelete.get(store1Name).contains(mockStoreOlderCheckpointDir));
assertTrue(storeActions.storeDirsToDelete.get(store1Name).contains(mockStoreNewerCheckpointDir));
// ensure that no directories are retained
assertEquals(0, storeActions.storeDirsToRetain.size());
// ensure that we mark the store for full restore (from oldest to newest)
assertEquals("0", storeActions.storesToRestore.get(store1Name).startingOffset);
assertEquals("10", storeActions.storesToRestore.get(store1Name).endingOffset);
}
use of org.apache.samza.checkpoint.CheckpointId in project samza by apache.
the class TestTransactionalStateTaskRestoreManager method testGetStoreActionsForLoggedNonPersistentStore_FullTrimIfNullCheckpointedOffsetAndNotRetainExistingChangelogState.
/**
* We need to trim the changelog topic to handle the scenario where container wrote some messages to store and
* changelog, but died before the first commit (leaving checkpointed changelog offset as null).
*
* Retain existing state flag exists to support cases when user is turning on transactional support for the first
* time and does not have an existing checkpointed changelog offset. Retain existing state flag allows them to
* carry over the existing changelog state after a full bootstrap. Flag should be turned off after the first deploy.
*/
@Test
public void testGetStoreActionsForLoggedNonPersistentStore_FullTrimIfNullCheckpointedOffsetAndNotRetainExistingChangelogState() {
TaskModel mockTaskModel = mock(TaskModel.class);
TaskName taskName = new TaskName("Partition 0");
when(mockTaskModel.getTaskName()).thenReturn(taskName);
Partition taskChangelogPartition = new Partition(0);
when(mockTaskModel.getChangelogPartition()).thenReturn(taskChangelogPartition);
String store1Name = "store1";
StorageEngine store1Engine = mock(StorageEngine.class);
StoreProperties mockStore1Properties = mock(StoreProperties.class);
when(store1Engine.getStoreProperties()).thenReturn(mockStore1Properties);
when(mockStore1Properties.isLoggedStore()).thenReturn(true);
// non-persistent store
when(mockStore1Properties.isPersistedToDisk()).thenReturn(false);
Map<String, StorageEngine> mockStoreEngines = ImmutableMap.of(store1Name, store1Engine);
String changelog1SystemName = "system1";
String changelog1StreamName = "store1Changelog";
SystemStream changelog1SystemStream = new SystemStream(changelog1SystemName, changelog1StreamName);
SystemStreamPartition changelog1SSP = new SystemStreamPartition(changelog1SystemStream, taskChangelogPartition);
SystemStreamPartitionMetadata changelog1SSPMetadata = new SystemStreamPartitionMetadata("0", "10", "11");
Map<String, SystemStream> mockStoreChangelogs = ImmutableMap.of(store1Name, changelog1SystemStream);
String changelog1CheckpointedOffset = null;
CheckpointId checkpointId = CheckpointId.create();
KafkaStateCheckpointMarker kafkaStateCheckpointMarker = new KafkaStateCheckpointMarker(changelog1SSP, changelog1CheckpointedOffset);
Map<String, KafkaStateCheckpointMarker> mockCheckpointedChangelogOffset = new HashMap<String, KafkaStateCheckpointMarker>() {
{
put(store1Name, kafkaStateCheckpointMarker);
}
};
Map<SystemStreamPartition, SystemStreamPartitionMetadata> mockCurrentChangelogOffsets = ImmutableMap.of(changelog1SSP, changelog1SSPMetadata);
SystemAdmins mockSystemAdmins = mock(SystemAdmins.class);
SystemAdmin mockSystemAdmin = mock(SystemAdmin.class);
when(mockSystemAdmins.getSystemAdmin(changelog1SSP.getSystem())).thenReturn(mockSystemAdmin);
StorageManagerUtil mockStorageManagerUtil = mock(StorageManagerUtil.class);
File mockLoggedStoreBaseDir = mock(File.class);
File mockNonLoggedStoreBaseDir = mock(File.class);
HashMap<String, String> configMap = new HashMap<>();
configMap.put(TaskConfig.TRANSACTIONAL_STATE_RETAIN_EXISTING_STATE, "false");
Config mockConfig = new MapConfig(configMap);
Clock mockClock = mock(Clock.class);
Mockito.when(mockSystemAdmin.offsetComparator(anyString(), anyString())).thenAnswer((Answer<Integer>) invocation -> {
String offset1 = (String) invocation.getArguments()[0];
String offset2 = (String) invocation.getArguments()[1];
return Long.valueOf(offset1).compareTo(Long.valueOf(offset2));
});
StoreActions storeActions = TransactionalStateTaskRestoreManager.getStoreActions(mockTaskModel, mockStoreEngines, mockStoreChangelogs, mockCheckpointedChangelogOffset, checkpointId, mockCurrentChangelogOffsets, mockSystemAdmins, mockStorageManagerUtil, mockLoggedStoreBaseDir, mockNonLoggedStoreBaseDir, mockConfig, mockClock);
// ensure that there is nothing to delete or retain
assertEquals(0, storeActions.storeDirsToDelete.size());
assertEquals(0, storeActions.storeDirsToRetain.size());
// ensure that we mark the store for restore (full trim == restore from oldest to null)
assertEquals("0", storeActions.storesToRestore.get(store1Name).startingOffset);
assertNull(storeActions.storesToRestore.get(store1Name).endingOffset);
}
use of org.apache.samza.checkpoint.CheckpointId in project samza by apache.
the class TestTaskStorageCommitManager method testPersistToFileSystemCheckpointV2Only.
@Test
public void testPersistToFileSystemCheckpointV2Only() throws IOException {
ContainerStorageManager containerStorageManager = mock(ContainerStorageManager.class);
StorageEngine mockLPStore = mock(StorageEngine.class);
StoreProperties lpStoreProps = mock(StoreProperties.class);
when(mockLPStore.getStoreProperties()).thenReturn(lpStoreProps);
when(lpStoreProps.isPersistedToDisk()).thenReturn(true);
when(lpStoreProps.isDurableStore()).thenReturn(true);
Path mockPath = mock(Path.class);
when(mockLPStore.checkpoint(any())).thenReturn(Optional.of(mockPath));
StorageEngine mockPStore = mock(StorageEngine.class);
StoreProperties pStoreProps = mock(StoreProperties.class);
when(mockPStore.getStoreProperties()).thenReturn(pStoreProps);
when(pStoreProps.isPersistedToDisk()).thenReturn(true);
when(pStoreProps.isDurableStore()).thenReturn(false);
StorageEngine mockLIStore = mock(StorageEngine.class);
StoreProperties liStoreProps = mock(StoreProperties.class);
when(mockLIStore.getStoreProperties()).thenReturn(liStoreProps);
when(liStoreProps.isPersistedToDisk()).thenReturn(false);
when(liStoreProps.isDurableStore()).thenReturn(true);
StorageEngine mockIStore = mock(StorageEngine.class);
StoreProperties iStoreProps = mock(StoreProperties.class);
when(mockIStore.getStoreProperties()).thenReturn(iStoreProps);
when(iStoreProps.isPersistedToDisk()).thenReturn(false);
when(iStoreProps.isDurableStore()).thenReturn(false);
java.util.Map<String, StorageEngine> taskStores = ImmutableMap.of("loggedPersistentStore", mockLPStore, "persistentStore", mockPStore, "loggedInMemStore", mockLIStore, "inMemStore", mockIStore);
Partition changelogPartition = new Partition(0);
SystemStream changelogSystemStream = new SystemStream("changelogSystem", "changelogStream");
SystemStreamPartition changelogSSP = new SystemStreamPartition(changelogSystemStream, changelogPartition);
java.util.Map<String, SystemStream> storeChangelogsStreams = ImmutableMap.of("loggedPersistentStore", changelogSystemStream, "loggedInMemStore", new SystemStream("system", "stream"));
StorageManagerUtil storageManagerUtil = mock(StorageManagerUtil.class);
File durableStoreDir = new File("durableStorePath");
when(storageManagerUtil.getTaskStoreDir(eq(durableStoreDir), eq("loggedPersistentStore"), any(), any())).thenReturn(durableStoreDir);
TaskName taskName = new TaskName("task");
TaskInstanceMetrics metrics = mock(TaskInstanceMetrics.class);
Timer checkpointTimer = mock(Timer.class);
when(metrics.storeCheckpointNs()).thenReturn(checkpointTimer);
when(containerStorageManager.getAllStores(taskName)).thenReturn(taskStores);
TaskStorageCommitManager commitManager = spy(new TaskStorageCommitManager(taskName, Collections.emptyMap(), containerStorageManager, storeChangelogsStreams, changelogPartition, null, null, ForkJoinPool.commonPool(), storageManagerUtil, durableStoreDir, metrics));
doNothing().when(commitManager).writeChangelogOffsetFile(any(), any(), any(), any());
when(storageManagerUtil.getStoreCheckpointDir(any(File.class), any(CheckpointId.class))).thenAnswer((Answer<String>) invocation -> {
File file = invocation.getArgumentAt(0, File.class);
CheckpointId checkpointId = invocation.getArgumentAt(1, CheckpointId.class);
return file + "-" + checkpointId;
});
CheckpointId newCheckpointId = CheckpointId.create();
java.util.Map<String, String> storeSCM = ImmutableMap.of("loggedPersistentStore", "system;loggedPersistentStoreStream;1", "persistentStore", "system;persistentStoreStream;1", "loggedInMemStore", "system;loggedInMemStoreStream;1", "inMemStore", "system;inMemStoreStream;1");
CheckpointV2 checkpoint = new CheckpointV2(newCheckpointId, Collections.emptyMap(), Collections.singletonMap("factory", storeSCM));
commitManager.init();
// invoke persist to file system
commitManager.writeCheckpointToStoreDirectories(checkpoint);
// Validate only durable and persisted stores are persisted
verify(storageManagerUtil).getTaskStoreDir(eq(durableStoreDir), eq("loggedPersistentStore"), eq(taskName), any());
File checkpointPath = Paths.get(storageManagerUtil.getStoreCheckpointDir(durableStoreDir, newCheckpointId)).toFile();
verify(storageManagerUtil).writeCheckpointV2File(eq(checkpointPath), eq(checkpoint));
}
use of org.apache.samza.checkpoint.CheckpointId in project samza by apache.
the class TestBlobStoreBackupManager method testUploadWithPreviousCheckpoints.
@Test
public void testUploadWithPreviousCheckpoints() throws IOException {
// Track directory for post cleanup
List<String> checkpointDirsToClean = new ArrayList<>();
// Setup: init back manager with previous checkpoints
// indexBlobIdAndLocalRemoteSnapshotsPair = setupRemoteAndLocalSnapshots(true);
Map<String, String> previousCheckpoints = // map store name, previous snapshot index blob id
indexBlobIdAndLocalRemoteSnapshotsPair.entrySet().stream().collect(Collectors.toMap(e -> e.getValue().getLeft(), e -> e.getValue().getRight().getPrevSnapshotIndexBlobId().get()));
Checkpoint checkpoint = new CheckpointV2(checkpointId, new HashMap<>(), ImmutableMap.of(BlobStoreStateBackendFactory.class.getName(), previousCheckpoints));
when(blobStoreUtil.getStoreSnapshotIndexes(anyString(), anyString(), anyString(), any(Checkpoint.class), anySetOf(String.class))).thenCallRealMethod();
blobStoreBackupManager.init(checkpoint);
// mock: set task store dir to return corresponding test local store and create checkpoint dir
ArgumentCaptor<String> stringCaptor = ArgumentCaptor.forClass(String.class);
when(storageManagerUtil.getTaskStoreDir(any(File.class), stringCaptor.capture(), any(TaskName.class), any(TaskMode.class))).then((Answer<File>) invocation -> {
String storeName = invocation.getArgumentAt(1, String.class);
String snapshotIndexBlobId = testStoreNameAndSCMMap.get(storeName);
String storeDir = indexBlobIdAndLocalRemoteSnapshotsPair.get(snapshotIndexBlobId).getLeft();
try {
BlobStoreTestUtil.createTestCheckpointDirectory(storeDir, checkpointId.serialize());
checkpointDirsToClean.add(storeDir + "-" + checkpointId.serialize());
} catch (IOException e) {
Assert.fail("Couldn't create checkpoint directory. Test failed.");
}
return new File(storeDir);
});
ArgumentCaptor<File> storeDirCaptor = ArgumentCaptor.forClass(File.class);
when(storageManagerUtil.getStoreCheckpointDir(storeDirCaptor.capture(), eq(checkpointId))).thenAnswer(new Answer<String>() {
@Override
public String answer(InvocationOnMock invocation) throws Throwable {
File storeDir = invocation.getArgumentAt(0, File.class);
return storeDir.getAbsolutePath() + "-" + checkpointId.serialize();
}
});
// mock: mock putDir and capture DirDiff
SortedSet<DirDiff> actualDirDiffs = new TreeSet<>(Comparator.comparing(DirDiff::getDirName));
ArgumentCaptor<DirDiff> dirDiffCaptor = ArgumentCaptor.forClass(DirDiff.class);
ArgumentCaptor<SnapshotMetadata> snapshotMetadataCaptor = ArgumentCaptor.forClass(SnapshotMetadata.class);
when(blobStoreUtil.putDir(dirDiffCaptor.capture(), snapshotMetadataCaptor.capture())).then((Answer<CompletableFuture<DirIndex>>) invocation -> {
DirDiff dirDiff = invocation.getArgumentAt(0, DirDiff.class);
SnapshotMetadata snapshotMetadata = invocation.getArgumentAt(1, SnapshotMetadata.class);
actualDirDiffs.add(dirDiff);
SnapshotIndex snapshotIndex = testBlobStore.get(testStoreNameAndSCMMap.get(snapshotMetadata.getStoreName()));
return CompletableFuture.completedFuture(snapshotIndex.getDirIndex());
});
// mock: mock putSnapshotIndex and capture previous snapshot index
SortedSet<SnapshotIndex> expectedSnapshotIndexesUploaded = indexBlobIdAndLocalRemoteSnapshotsPair.values().stream().map(Pair::getRight).collect(Collectors.toCollection(() -> new TreeSet<>(Comparator.comparing(SnapshotIndex::getCreationTimeMillis))));
SortedSet<SnapshotIndex> actualSnapshotIndexesUploaded = new TreeSet<>(Comparator.comparing(SnapshotIndex::getCreationTimeMillis));
SortedSet<String> actualPreviousSnapshotIndexBlobIds = new TreeSet<>();
SortedSet<String> expectedPreviousSnapshotIndexBlobIds = new TreeSet<>(previousCheckpoints.values());
ArgumentCaptor<SnapshotIndex> snapshotIndexCaptor = ArgumentCaptor.forClass(SnapshotIndex.class);
when(blobStoreUtil.putSnapshotIndex(snapshotIndexCaptor.capture())).then((Answer<CompletableFuture<String>>) invocation -> {
SnapshotIndex snapshotIndex = invocation.getArgumentAt(0, SnapshotIndex.class);
actualSnapshotIndexesUploaded.add(snapshotIndex);
if (snapshotIndex.getPrevSnapshotIndexBlobId().isPresent()) {
actualPreviousSnapshotIndexBlobIds.add(snapshotIndex.getPrevSnapshotIndexBlobId().get());
}
return CompletableFuture.completedFuture("random-blob-id");
});
// execute
blobStoreBackupManager.upload(checkpointId, ImmutableMap.of());
TreeSet<DirDiff> expectedDirDiffs = indexBlobIdAndLocalRemoteSnapshotsPair.values().stream().map(localRemoteSnapshotPair -> DirDiffUtil.getDirDiff(new File(localRemoteSnapshotPair.getLeft() + "-" + checkpointId.serialize()), localRemoteSnapshotPair.getRight().getDirIndex(), DirDiffUtil.areSameFile(false))).collect(Collectors.toCollection(() -> new TreeSet<>(Comparator.comparing(DirDiff::getDirName))));
// assert - asset all DirDiff are put to blob store
Assert.assertEquals(actualDirDiffs, expectedDirDiffs);
// assert - assert no previous snapshot indexes were found
Assert.assertEquals(actualPreviousSnapshotIndexBlobIds, expectedPreviousSnapshotIndexBlobIds);
// assert - assert all snapshot indexes are uploaded
Assert.assertEquals(actualSnapshotIndexesUploaded, expectedSnapshotIndexesUploaded);
// cleanup
checkpointDirsToClean.forEach(path -> {
try {
FileUtils.deleteDirectory(new File(path));
} catch (IOException exception) {
Assert.fail("Failed to cleanup temporary checkpoint dirs.");
}
});
}
use of org.apache.samza.checkpoint.CheckpointId in project samza by apache.
the class TestBlobStoreBackupManager method testUploadWithNoPreviousCheckpoints.
@Test
public void testUploadWithNoPreviousCheckpoints() throws IOException {
// Track directory for post cleanup
List<String> checkpointDirsToClean = new ArrayList<>();
// Setup: init local/remote snapshots and back manager with no previous checkpoints
indexBlobIdAndLocalRemoteSnapshotsPair = setupRemoteAndLocalSnapshots(false);
Checkpoint checkpoint = new CheckpointV2(checkpointId, new HashMap<>(), ImmutableMap.of(BlobStoreStateBackendFactory.class.getName(), new HashMap<>()));
blobStoreBackupManager.init(checkpoint);
// mock: set task store dir to return corresponding test local store and create checkpoint dir
ArgumentCaptor<String> stringCaptor = ArgumentCaptor.forClass(String.class);
when(storageManagerUtil.getTaskStoreDir(any(File.class), stringCaptor.capture(), any(TaskName.class), any(TaskMode.class))).then((Answer<File>) invocation -> {
String storeName = invocation.getArgumentAt(1, String.class);
String snapshotIndexBlobId = testStoreNameAndSCMMap.get(storeName);
String storeDir = indexBlobIdAndLocalRemoteSnapshotsPair.get(snapshotIndexBlobId).getLeft();
try {
BlobStoreTestUtil.createTestCheckpointDirectory(storeDir, checkpointId.serialize());
checkpointDirsToClean.add(storeDir + "-" + checkpointId.serialize());
} catch (IOException e) {
Assert.fail("Couldn't create checkpoint directory. Test failed.");
}
return new File(storeDir);
});
ArgumentCaptor<File> storeDirCaptor = ArgumentCaptor.forClass(File.class);
when(storageManagerUtil.getStoreCheckpointDir(storeDirCaptor.capture(), eq(checkpointId))).thenAnswer(new Answer<String>() {
@Override
public String answer(InvocationOnMock invocation) throws Throwable {
File storeDir = invocation.getArgumentAt(0, File.class);
return storeDir.getAbsolutePath() + "-" + checkpointId.serialize();
}
});
SortedSet<DirDiff> actualDirDiffs = new TreeSet<>(Comparator.comparing(DirDiff::getDirName));
// mock: mock putDir and capture DirDiff
ArgumentCaptor<DirDiff> dirDiffCaptor = ArgumentCaptor.forClass(DirDiff.class);
ArgumentCaptor<SnapshotMetadata> snapshotMetadataCaptor = ArgumentCaptor.forClass(SnapshotMetadata.class);
when(blobStoreUtil.putDir(dirDiffCaptor.capture(), snapshotMetadataCaptor.capture())).then((Answer<CompletableFuture<DirIndex>>) invocation -> {
DirDiff dirDiff = invocation.getArgumentAt(0, DirDiff.class);
SnapshotMetadata snapshotMetadata = invocation.getArgumentAt(1, SnapshotMetadata.class);
actualDirDiffs.add(dirDiff);
SnapshotIndex snapshotIndex = testBlobStore.get(testStoreNameAndSCMMap.get(snapshotMetadata.getStoreName()));
return CompletableFuture.completedFuture(snapshotIndex.getDirIndex());
});
SortedSet<SnapshotIndex> expectedSnapshotIndexesUploaded = indexBlobIdAndLocalRemoteSnapshotsPair.values().stream().map(Pair::getRight).collect(Collectors.toCollection(() -> new TreeSet<>(Comparator.comparing(SnapshotIndex::getCreationTimeMillis))));
String expectedPreviousSnapshotIndexBlobId = "empty";
// mock: mock putSnapshotIndex and capture previous snapshot index
SortedSet<SnapshotIndex> actualSnapshotIndexesUploaded = new TreeSet<>(Comparator.comparing(SnapshotIndex::getCreationTimeMillis));
final String[] actualPreviousSnapshotIndexBlobId = { "empty" };
ArgumentCaptor<SnapshotIndex> snapshotIndexCaptor = ArgumentCaptor.forClass(SnapshotIndex.class);
when(blobStoreUtil.putSnapshotIndex(snapshotIndexCaptor.capture())).then((Answer<CompletableFuture<String>>) invocation -> {
SnapshotIndex snapshotIndex = invocation.getArgumentAt(0, SnapshotIndex.class);
actualSnapshotIndexesUploaded.add(snapshotIndex);
if (!snapshotIndex.getPrevSnapshotIndexBlobId().equals(Optional.empty())) {
actualPreviousSnapshotIndexBlobId[0] = "not-empty";
}
return CompletableFuture.completedFuture("random-blob-id");
});
// execute
blobStoreBackupManager.upload(checkpointId, testStoreNameAndSCMMap);
// setup expected dir diffs after execute: needs checkpoint dirs created in upload()
TreeSet<DirDiff> expectedDirDiffs = indexBlobIdAndLocalRemoteSnapshotsPair.values().stream().map(localRemoteSnapshotPair -> {
File localCheckpointDir = new File(localRemoteSnapshotPair.getLeft() + "-" + checkpointId.serialize());
DirIndex dirIndex = new DirIndex(localCheckpointDir.getName(), Collections.emptyList(), Collections.emptyList(), Collections.emptyList(), Collections.emptyList());
return DirDiffUtil.getDirDiff(localCheckpointDir, dirIndex, DirDiffUtil.areSameFile(false));
}).collect(Collectors.toCollection(() -> new TreeSet<>(Comparator.comparing(DirDiff::getDirName))));
// assert - asset all DirDiff are put to blob store
Assert.assertEquals(actualDirDiffs, expectedDirDiffs);
// assert - assert no previous snapshot indexes were found
Assert.assertEquals(actualPreviousSnapshotIndexBlobId[0], expectedPreviousSnapshotIndexBlobId);
// assert - assert all snapshot indexes are uploaded
Assert.assertEquals(actualSnapshotIndexesUploaded, expectedSnapshotIndexesUploaded);
// cleanup
checkpointDirsToClean.forEach(path -> {
try {
FileUtils.deleteDirectory(new File(path));
} catch (IOException exception) {
Assert.fail("Failed to cleanup temporary checkpoint dirs.");
}
});
}
Aggregations