use of org.apache.jackrabbit.oak.spi.blob.BlobStore in project jackrabbit-oak by apache.
the class DataStoreTrackerGCTest method gcWithInlined.
@Test
public void gcWithInlined() throws Exception {
Cluster cluster = new Cluster("cluster1");
BlobStore s = cluster.blobStore;
BlobIdTracker tracker = (BlobIdTracker) ((BlobTrackingStore) s).getTracker();
DataStoreState state = init(cluster.nodeStore, 0);
addInlined(cluster.nodeStore);
ScheduledFuture<?> scheduledFuture = newSingleThreadScheduledExecutor().schedule(tracker.new SnapshotJob(), 0, MILLISECONDS);
scheduledFuture.get();
// All blobs added should be tracked now
assertEquals(state.blobsAdded, retrieveTracked(tracker));
cluster.gc.collectGarbage(false);
Set<String> existingAfterGC = iterate(s);
// Check the state of the blob store after gc
assertEquals(state.blobsPresent, existingAfterGC);
// Tracked blobs should reflect deletions after gc
assertEquals(state.blobsPresent, retrieveTracked(tracker));
}
use of org.apache.jackrabbit.oak.spi.blob.BlobStore in project jackrabbit-oak by apache.
the class DataStoreTrackerGCTest method gc.
@Test
public void gc() throws Exception {
Cluster cluster = new Cluster("cluster1");
BlobStore s = cluster.blobStore;
BlobIdTracker tracker = (BlobIdTracker) ((BlobTrackingStore) s).getTracker();
DataStoreState state = init(cluster.nodeStore, 0);
ScheduledFuture<?> scheduledFuture = newSingleThreadScheduledExecutor().schedule(tracker.new SnapshotJob(), 0, MILLISECONDS);
scheduledFuture.get();
// All blobs added should be tracked now
assertEquals(state.blobsAdded, retrieveTracked(tracker));
cluster.gc.collectGarbage(false);
Set<String> existingAfterGC = iterate(s);
// Check the state of the blob store after gc
assertEquals(state.blobsPresent, existingAfterGC);
// Tracked blobs should reflect deletions after gc
assertEquals(state.blobsPresent, retrieveTracked(tracker));
}
use of org.apache.jackrabbit.oak.spi.blob.BlobStore in project jackrabbit-oak by apache.
the class DataStoreTrackerGCTest method clusterGCInternal.
private void clusterGCInternal(Cluster cluster1, Cluster cluster2, boolean same) throws Exception {
BlobStore s1 = cluster1.blobStore;
BlobIdTracker tracker1 = (BlobIdTracker) ((BlobTrackingStore) s1).getTracker();
DataStoreState state1 = init(cluster1.nodeStore, 0);
cluster1.nodeStore.runBackgroundOperations();
ScheduledFuture<?> scheduledFuture1 = newSingleThreadScheduledExecutor().schedule(tracker1.new SnapshotJob(), 0, MILLISECONDS);
scheduledFuture1.get();
// Add blobs to cluster1
BlobStore s2 = cluster2.blobStore;
BlobIdTracker tracker2 = (BlobIdTracker) ((BlobTrackingStore) s2).getTracker();
cluster2.nodeStore.runBackgroundOperations();
DataStoreState state2 = init(cluster2.nodeStore, 20);
cluster2.nodeStore.runBackgroundOperations();
cluster1.nodeStore.runBackgroundOperations();
ScheduledFuture<?> scheduledFuture2 = newSingleThreadScheduledExecutor().schedule(tracker2.new SnapshotJob(), 0, MILLISECONDS);
scheduledFuture2.get();
// If not same cluster need to mark references on other repositories
if (!same) {
cluster2.gc.collectGarbage(true);
}
// do a gc on cluster1 with sweep
cluster1.gc.collectGarbage(false);
Set<String> existingAfterGC = iterate(s1);
// Check the state of the blob store after gc
assertEquals(union(state1.blobsPresent, state2.blobsPresent), existingAfterGC);
// Tracked blobs should reflect deletions after gc
assertEquals(union(state1.blobsPresent, state2.blobsPresent), retrieveTracked(tracker1));
// Again create snapshots at both cluster nodes to synchronize the latest state of
// local references with datastore at each node
scheduledFuture1 = newSingleThreadScheduledExecutor().schedule(tracker1.new SnapshotJob(), 0, MILLISECONDS);
scheduledFuture1.get();
scheduledFuture2 = newSingleThreadScheduledExecutor().schedule(tracker2.new SnapshotJob(), 0, MILLISECONDS);
scheduledFuture2.get();
// Capture logs for the second round of gc
LogCustomizer customLogs = LogCustomizer.forLogger(MarkSweepGarbageCollector.class.getName()).enable(Level.WARN).filter(Level.WARN).contains("Error occurred while deleting blob with id").create();
customLogs.starting();
if (!same) {
cluster2.gc.collectGarbage(true);
}
cluster1.gc.collectGarbage(false);
existingAfterGC = iterate(s1);
assertEquals(0, customLogs.getLogs().size());
customLogs.finished();
// Check the state of the blob store after gc
assertEquals(union(state1.blobsPresent, state2.blobsPresent), existingAfterGC);
}
use of org.apache.jackrabbit.oak.spi.blob.BlobStore in project jackrabbit-oak by apache.
the class DataStoreTrackerGCTest method gcForcedRetrieve.
@Test
public void gcForcedRetrieve() throws Exception {
Cluster cluster = new Cluster("cluster1");
BlobStore s = cluster.blobStore;
BlobIdTracker tracker = (BlobIdTracker) ((BlobTrackingStore) s).getTracker();
DataStoreState state = init(cluster.nodeStore, 0);
ScheduledFuture<?> scheduledFuture = newSingleThreadScheduledExecutor().schedule(tracker.new SnapshotJob(), 0, MILLISECONDS);
scheduledFuture.get();
// All blobs added should be tracked now
assertEquals(state.blobsAdded, retrieveTracked(tracker));
// Do addition and deletion which would not have been tracked as yet
Set<String> newBlobs = addNodeSpecialChars(cluster.nodeStore);
state.blobsAdded.addAll(newBlobs);
state.blobsPresent.addAll(newBlobs);
// The new blobs should not be found now as new snapshot not done
assertEquals(Sets.difference(state.blobsAdded, retrieveTracked(tracker)), newBlobs);
//force gc to retrieve blob ids from datastore
cluster.gc.collectGarbage(false, true);
Set<String> existingAfterGC = iterate(s);
// Check the state of the blob store after gc
assertEquals(state.blobsPresent, existingAfterGC);
// Tracked blobs should reflect deletions after gc and also the additions after
assertEquals(state.blobsPresent, retrieveTracked(tracker));
// Create a snapshot
scheduledFuture = newSingleThreadScheduledExecutor().schedule(tracker.new SnapshotJob(), 0, MILLISECONDS);
scheduledFuture.get();
// Tracked blobs should reflect deletions after gc and the deleted should not get resurrected
assertEquals(state.blobsPresent, retrieveTracked(tracker));
}
use of org.apache.jackrabbit.oak.spi.blob.BlobStore in project jackrabbit-oak by apache.
the class MigrationFactory method createSidegrade.
public RepositorySidegrade createSidegrade() throws IOException, CliArgumentException {
BlobStore srcBlobStore = datastores.getSrcBlobStore().create(closer);
NodeStore srcStore = stores.getSrcStore().create(srcBlobStore, closer);
NodeStore dstStore = createTarget(closer, srcBlobStore);
return createSidegrade(srcStore, dstStore);
}
Aggregations