use of org.apache.jackrabbit.oak.spi.blob.BlobStore in project jackrabbit-oak by apache.
the class DataStoreTrackerGCTest method consistencyCheckNoActiveDeletion.
@Test
public void consistencyCheckNoActiveDeletion() throws Exception {
File tmpFolder = folder.newFolder();
String previousTmp = System.setProperty(JAVA_IO_TMPDIR.key(), tmpFolder.getAbsolutePath());
try {
Cluster cluster = new Cluster("cluster1");
BlobStore s = cluster.blobStore;
BlobIdTracker tracker = (BlobIdTracker) ((BlobTrackingStore) s).getTracker();
DataStoreState state = init(cluster.nodeStore, 0);
// Since datastore in consistent state and only active deletions the missing list should be empty
assertEquals(0, cluster.gc.checkConsistency());
assertTrue(FileUtils.listFiles(tmpFolder, null, true).size() == 0);
} finally {
if (previousTmp != null) {
System.setProperty(JAVA_IO_TMPDIR.key(), previousTmp);
} else {
System.clearProperty(JAVA_IO_TMPDIR.key());
}
}
}
use of org.apache.jackrabbit.oak.spi.blob.BlobStore in project jackrabbit-oak by apache.
the class DataStoreTrackerGCTest method gcReconcileActiveDeletionMarkCleared.
@Test
public void gcReconcileActiveDeletionMarkCleared() throws Exception {
Cluster cluster = new Cluster("cluster1");
BlobStore s = cluster.blobStore;
BlobIdTracker tracker = (BlobIdTracker) ((BlobTrackingStore) s).getTracker();
// Simulate active deletion before the init to ensure that the references also cleared
List<String> addlAdded = doActiveDelete(cluster.nodeStore, (DataStoreBlobStore) cluster.blobStore, tracker, folder, 0, 2);
DataStoreState state = init(cluster.nodeStore, 0);
// Force a snapshot of the tracker to refresh
File f = folder.newFile();
tracker.remove(f, BlobTracker.Options.ACTIVE_DELETION);
List<String> addlPresent = Lists.newArrayList(addlAdded.get(2), addlAdded.get(3));
List<String> activeDeleted = Lists.newArrayList(addlAdded.get(0), addlAdded.get(1));
state.blobsPresent.addAll(addlPresent);
state.blobsAdded.addAll(addlPresent);
cluster.gc.collectGarbage(false);
Set<String> existingAfterGC = iterate(s);
// Check the state of the blob store after gc
assertEquals(state.blobsPresent, existingAfterGC);
// Tracked blobs should reflect deletions after gc
assertEquals(state.blobsPresent, retrieveTracked(tracker));
// Check that the delete tracker is refreshed
assertEquals(Sets.newHashSet(), retrieveActiveDeleteTracked(tracker, folder));
}
use of org.apache.jackrabbit.oak.spi.blob.BlobStore in project jackrabbit-oak by apache.
the class DataStoreTrackerGCTest method consistencyCheckOnlyActiveDeletion.
@Test
public void consistencyCheckOnlyActiveDeletion() throws Exception {
Cluster cluster = new Cluster("cluster1");
BlobStore s = cluster.blobStore;
BlobIdTracker tracker = (BlobIdTracker) ((BlobTrackingStore) s).getTracker();
DataStoreState state = init(cluster.nodeStore, 0);
List<String> addlAdded = doActiveDelete(cluster.nodeStore, (DataStoreBlobStore) cluster.blobStore, tracker, folder, 0, 2);
List<String> addlPresent = Lists.newArrayList(addlAdded.get(2), addlAdded.get(3));
List<String> activeDeleted = Lists.newArrayList(addlAdded.get(0), addlAdded.get(1));
state.blobsPresent.addAll(addlPresent);
state.blobsAdded.addAll(addlPresent);
// Since datastore in consistent state and only active deletions the missing list should be empty
assertEquals(0, cluster.gc.checkConsistency());
}
use of org.apache.jackrabbit.oak.spi.blob.BlobStore in project jackrabbit-oak by apache.
the class DataStoreTrackerGCTest method gcReconcileActiveDeletion.
@Test
public void gcReconcileActiveDeletion() throws Exception {
Cluster cluster = new Cluster("cluster1");
BlobStore s = cluster.blobStore;
BlobIdTracker tracker = (BlobIdTracker) ((BlobTrackingStore) s).getTracker();
DataStoreState state = init(cluster.nodeStore, 0);
// Simulate creation and active deletion after init without version gc to enable references to hang around
List<String> addlAdded = doActiveDelete(cluster.nodeStore, (DataStoreBlobStore) cluster.blobStore, tracker, folder, 0, 2);
List<String> addlPresent = Lists.newArrayList(addlAdded.get(2), addlAdded.get(3));
List<String> activeDeleted = Lists.newArrayList(addlAdded.get(0), addlAdded.get(1));
state.blobsPresent.addAll(addlPresent);
state.blobsAdded.addAll(addlPresent);
cluster.gc.collectGarbage(false);
Set<String> existingAfterGC = iterate(s);
// Check the state of the blob store after gc
assertEquals(state.blobsPresent, existingAfterGC);
// Tracked blobs should reflect deletions after gc
assertEquals(state.blobsPresent, retrieveTracked(tracker));
// Check that the delete tracker is refreshed
assertEquals(Sets.newHashSet(activeDeleted), retrieveActiveDeleteTracked(tracker, folder));
}
use of org.apache.jackrabbit.oak.spi.blob.BlobStore in project jackrabbit-oak by apache.
the class DataStoreTrackerGCTest method clusterGCInternal.
private void clusterGCInternal(Cluster cluster1, Cluster cluster2, boolean same) throws Exception {
BlobStore s1 = cluster1.blobStore;
BlobIdTracker tracker1 = (BlobIdTracker) ((BlobTrackingStore) s1).getTracker();
DataStoreState state1 = init(cluster1.nodeStore, 0);
cluster1.nodeStore.runBackgroundOperations();
ScheduledFuture<?> scheduledFuture1 = newSingleThreadScheduledExecutor().schedule(tracker1.new SnapshotJob(), 0, MILLISECONDS);
scheduledFuture1.get();
// Add blobs to cluster1
BlobStore s2 = cluster2.blobStore;
BlobIdTracker tracker2 = (BlobIdTracker) ((BlobTrackingStore) s2).getTracker();
cluster2.nodeStore.runBackgroundOperations();
DataStoreState state2 = init(cluster2.nodeStore, 20);
cluster2.nodeStore.runBackgroundOperations();
cluster1.nodeStore.runBackgroundOperations();
ScheduledFuture<?> scheduledFuture2 = newSingleThreadScheduledExecutor().schedule(tracker2.new SnapshotJob(), 0, MILLISECONDS);
scheduledFuture2.get();
// If not same cluster need to mark references on other repositories
if (!same) {
cluster2.gc.collectGarbage(true);
}
// do a gc on cluster1 with sweep
cluster1.gc.collectGarbage(false);
Set<String> existingAfterGC = iterate(s1);
// Check the state of the blob store after gc
assertEquals(union(state1.blobsPresent, state2.blobsPresent), existingAfterGC);
// Tracked blobs should reflect deletions after gc
assertEquals(union(state1.blobsPresent, state2.blobsPresent), retrieveTracked(tracker1));
// Again create snapshots at both cluster nodes to synchronize the latest state of
// local references with datastore at each node
scheduledFuture1 = newSingleThreadScheduledExecutor().schedule(tracker1.new SnapshotJob(), 0, MILLISECONDS);
scheduledFuture1.get();
scheduledFuture2 = newSingleThreadScheduledExecutor().schedule(tracker2.new SnapshotJob(), 0, MILLISECONDS);
scheduledFuture2.get();
// Capture logs for the second round of gc
LogCustomizer customLogs = LogCustomizer.forLogger(MarkSweepGarbageCollector.class.getName()).enable(Level.WARN).filter(Level.WARN).contains("Error occurred while deleting blob with id").create();
customLogs.starting();
if (!same) {
cluster2.gc.collectGarbage(true);
}
cluster1.gc.collectGarbage(false);
existingAfterGC = iterate(s1);
assertEquals(0, customLogs.getLogs().size());
customLogs.finished();
// Check the state of the blob store after gc
assertEquals(union(state1.blobsPresent, state2.blobsPresent), existingAfterGC);
}
Aggregations