use of org.apache.jackrabbit.oak.spi.blob.BlobStore in project jackrabbit-oak by apache.
the class DataStoreTrackerGCTest method gcReconcileActiveDeletion.
@Test
public void gcReconcileActiveDeletion() throws Exception {
Cluster cluster = new Cluster("cluster1");
BlobStore s = cluster.blobStore;
BlobIdTracker tracker = (BlobIdTracker) ((BlobTrackingStore) s).getTracker();
DataStoreState state = init(cluster.nodeStore, 0);
// Simulate creation and active deletion after init without version gc to enable references to hang around
List<String> addlAdded = doActiveDelete(cluster.nodeStore, (DataStoreBlobStore) cluster.blobStore, tracker, folder, 0, 2);
List<String> addlPresent = Lists.newArrayList(addlAdded.get(2), addlAdded.get(3));
List<String> activeDeleted = Lists.newArrayList(addlAdded.get(0), addlAdded.get(1));
state.blobsPresent.addAll(addlPresent);
state.blobsAdded.addAll(addlPresent);
cluster.gc.collectGarbage(false);
Set<String> existingAfterGC = iterate(s);
// Check the state of the blob store after gc
assertEquals(state.blobsPresent, existingAfterGC);
// Tracked blobs should reflect deletions after gc
assertEquals(state.blobsPresent, retrieveTracked(tracker));
// Check that the delete tracker is refreshed
assertEquals(Sets.newHashSet(activeDeleted), retrieveActiveDeleteTracked(tracker, folder));
}
use of org.apache.jackrabbit.oak.spi.blob.BlobStore in project jackrabbit-oak by apache.
the class DataStoreTrackerGCTest method consistencyCheckDeletedWithActiveDeletion.
@Test
public void consistencyCheckDeletedWithActiveDeletion() throws Exception {
Cluster cluster = new Cluster("cluster1");
BlobStore s = cluster.blobStore;
BlobIdTracker tracker = (BlobIdTracker) ((BlobTrackingStore) s).getTracker();
DataStoreState state = init(cluster.nodeStore, 0);
// Directly delete from blobstore
ArrayList<String> blobs = Lists.newArrayList(state.blobsPresent);
String removedId = blobs.remove(0);
((DataStoreBlobStore) s).deleteChunks(Lists.newArrayList(removedId), 0);
state.blobsPresent = Sets.newHashSet(blobs);
File f = folder.newFile();
writeStrings(Lists.newArrayList(removedId).iterator(), f, false);
tracker.remove(f);
List<String> addlAdded = doActiveDelete(cluster.nodeStore, (DataStoreBlobStore) cluster.blobStore, tracker, folder, 0, 2);
List<String> addlPresent = Lists.newArrayList(addlAdded.get(2), addlAdded.get(3));
state.blobsPresent.addAll(addlPresent);
state.blobsAdded.addAll(addlPresent);
// Only the missing blob should be reported and not the active deletions
assertEquals(1, cluster.gc.checkConsistency());
}
use of org.apache.jackrabbit.oak.spi.blob.BlobStore in project jackrabbit-oak by apache.
the class DataStoreTrackerGCTest method clusterGCInternal.
private void clusterGCInternal(Cluster cluster1, Cluster cluster2, boolean same) throws Exception {
BlobStore s1 = cluster1.blobStore;
BlobIdTracker tracker1 = (BlobIdTracker) ((BlobTrackingStore) s1).getTracker();
DataStoreState state1 = init(cluster1.nodeStore, 0);
cluster1.nodeStore.runBackgroundOperations();
ScheduledFuture<?> scheduledFuture1 = newSingleThreadScheduledExecutor().schedule(tracker1.new SnapshotJob(), 0, MILLISECONDS);
scheduledFuture1.get();
// Add blobs to cluster1
BlobStore s2 = cluster2.blobStore;
BlobIdTracker tracker2 = (BlobIdTracker) ((BlobTrackingStore) s2).getTracker();
cluster2.nodeStore.runBackgroundOperations();
DataStoreState state2 = init(cluster2.nodeStore, 20);
cluster2.nodeStore.runBackgroundOperations();
cluster1.nodeStore.runBackgroundOperations();
ScheduledFuture<?> scheduledFuture2 = newSingleThreadScheduledExecutor().schedule(tracker2.new SnapshotJob(), 0, MILLISECONDS);
scheduledFuture2.get();
// If not same cluster need to mark references on other repositories
if (!same) {
cluster2.gc.collectGarbage(true);
}
// do a gc on cluster1 with sweep
cluster1.gc.collectGarbage(false);
Set<String> existingAfterGC = iterate(s1);
// Check the state of the blob store after gc
assertEquals(union(state1.blobsPresent, state2.blobsPresent), existingAfterGC);
// Tracked blobs should reflect deletions after gc
assertEquals(union(state1.blobsPresent, state2.blobsPresent), retrieveTracked(tracker1));
// Again create snapshots at both cluster nodes to synchronize the latest state of
// local references with datastore at each node
scheduledFuture1 = newSingleThreadScheduledExecutor().schedule(tracker1.new SnapshotJob(), 0, MILLISECONDS);
scheduledFuture1.get();
scheduledFuture2 = newSingleThreadScheduledExecutor().schedule(tracker2.new SnapshotJob(), 0, MILLISECONDS);
scheduledFuture2.get();
// Capture logs for the second round of gc
LogCustomizer customLogs = LogCustomizer.forLogger(MarkSweepGarbageCollector.class.getName()).enable(Level.WARN).filter(Level.WARN).contains("Error occurred while deleting blob with id").create();
customLogs.starting();
if (!same) {
cluster2.gc.collectGarbage(true);
}
cluster1.gc.collectGarbage(false);
existingAfterGC = iterate(s1);
assertEquals(0, customLogs.getLogs().size());
customLogs.finished();
// Check the state of the blob store after gc
assertEquals(union(state1.blobsPresent, state2.blobsPresent), existingAfterGC);
}
use of org.apache.jackrabbit.oak.spi.blob.BlobStore in project jackrabbit-oak by apache.
the class DataStoreTrackerGCTest method gcForcedRetrieve.
@Test
public void gcForcedRetrieve() throws Exception {
Cluster cluster = new Cluster("cluster1");
BlobStore s = cluster.blobStore;
BlobIdTracker tracker = (BlobIdTracker) ((BlobTrackingStore) s).getTracker();
DataStoreState state = init(cluster.nodeStore, 0);
ScheduledFuture<?> scheduledFuture = newSingleThreadScheduledExecutor().schedule(tracker.new SnapshotJob(), 0, MILLISECONDS);
scheduledFuture.get();
// All blobs added should be tracked now
assertEquals(state.blobsAdded, retrieveTracked(tracker));
// Do addition and deletion which would not have been tracked as yet
Set<String> newBlobs = addNodeSpecialChars(cluster.nodeStore);
state.blobsAdded.addAll(newBlobs);
state.blobsPresent.addAll(newBlobs);
// The new blobs should not be found now as new snapshot not done
assertEquals(Sets.difference(state.blobsAdded, retrieveTracked(tracker)), newBlobs);
// force gc to retrieve blob ids from datastore
cluster.gc.collectGarbage(false, true);
Set<String> existingAfterGC = iterate(s);
// Check the state of the blob store after gc
assertEquals(state.blobsPresent, existingAfterGC);
// Tracked blobs should reflect deletions after gc and also the additions after
assertEquals(state.blobsPresent, retrieveTracked(tracker));
// Create a snapshot
scheduledFuture = newSingleThreadScheduledExecutor().schedule(tracker.new SnapshotJob(), 0, MILLISECONDS);
scheduledFuture.get();
// Tracked blobs should reflect deletions after gc and the deleted should not get resurrected
assertEquals(state.blobsPresent, retrieveTracked(tracker));
}
Aggregations