use of org.apache.jackrabbit.oak.commons.junit.LogCustomizer in project jackrabbit-oak by apache.
the class RDBDocumentStoreSchemaUpgradeTest method init01.
@Test
public void init01() {
LogCustomizer logCustomizer = LogCustomizer.forLogger(RDBDocumentStore.class.getName()).enable(Level.INFO).contains("to DB level 1").create();
logCustomizer.starting();
RDBOptions op = new RDBOptions().tablePrefix("T01").initialSchema(0).upgradeToSchema(1).dropTablesOnClose(true);
RDBDocumentStore rdb = null;
try {
rdb = new RDBDocumentStore(this.ds, new DocumentMK.Builder(), op);
RDBTableMetaData meta = rdb.getTable(Collection.NODES);
assertEquals(op.getTablePrefix() + "_NODES", meta.getName());
assertTrue(meta.hasVersion());
assertEquals("unexpected # of log entries: " + logCustomizer.getLogs(), RDBDocumentStore.getTableNames().size(), logCustomizer.getLogs().size());
} finally {
logCustomizer.finished();
if (rdb != null) {
rdb.dispose();
}
}
}
use of org.apache.jackrabbit.oak.commons.junit.LogCustomizer in project jackrabbit-oak by apache.
the class ObservationQueueFullWarnTest method warnOnQueueFull.
@Test
public void warnOnQueueFull() throws RepositoryException, InterruptedException, ExecutionException {
LogCustomizer customLogs = LogCustomizer.forLogger(ChangeProcessor.class.getName()).filter(Level.WARN).contains(OBS_QUEUE_FULL_WARN).create();
observationManager.addEventListener(listener, NODE_ADDED, TEST_PATH, true, null, null, false);
try {
customLogs.starting();
addNodeToFillObsQueue();
assertTrue("Observation queue full warning must get logged", customLogs.getLogs().size() > 0);
customLogs.finished();
} finally {
observationManager.removeEventListener(listener);
}
}
use of org.apache.jackrabbit.oak.commons.junit.LogCustomizer in project jackrabbit-oak by apache.
the class SegmentDataStoreBlobGCIT method checkMark.
@Test
public void checkMark() throws Exception {
LogCustomizer customLogs = LogCustomizer.forLogger(MarkSweepGarbageCollector.class.getName()).enable(Level.TRACE).filter(Level.TRACE).create();
DataStoreState state = setUp(10);
log.info("{} blobs available : {}", state.blobsPresent.size(), state.blobsPresent);
customLogs.starting();
ThreadPoolExecutor executor = (ThreadPoolExecutor) Executors.newFixedThreadPool(10);
String rootFolder = folder.newFolder().getAbsolutePath();
MarkSweepGarbageCollector gcObj = init(0, executor, rootFolder);
gcObj.collectGarbage(true);
customLogs.finished();
assertBlobReferenceRecords(state.blobsPresent, rootFolder);
}
use of org.apache.jackrabbit.oak.commons.junit.LogCustomizer in project jackrabbit-oak by apache.
the class DataStoreTrackerGCTest method clusterGCInternal.
private void clusterGCInternal(Cluster cluster1, Cluster cluster2, boolean same) throws Exception {
BlobStore s1 = cluster1.blobStore;
BlobIdTracker tracker1 = (BlobIdTracker) ((BlobTrackingStore) s1).getTracker();
DataStoreState state1 = init(cluster1.nodeStore, 0);
cluster1.nodeStore.runBackgroundOperations();
ScheduledFuture<?> scheduledFuture1 = newSingleThreadScheduledExecutor().schedule(tracker1.new SnapshotJob(), 0, MILLISECONDS);
scheduledFuture1.get();
// Add blobs to cluster1
BlobStore s2 = cluster2.blobStore;
BlobIdTracker tracker2 = (BlobIdTracker) ((BlobTrackingStore) s2).getTracker();
cluster2.nodeStore.runBackgroundOperations();
DataStoreState state2 = init(cluster2.nodeStore, 20);
cluster2.nodeStore.runBackgroundOperations();
cluster1.nodeStore.runBackgroundOperations();
ScheduledFuture<?> scheduledFuture2 = newSingleThreadScheduledExecutor().schedule(tracker2.new SnapshotJob(), 0, MILLISECONDS);
scheduledFuture2.get();
// If not same cluster need to mark references on other repositories
if (!same) {
cluster2.gc.collectGarbage(true);
}
// do a gc on cluster1 with sweep
cluster1.gc.collectGarbage(false);
Set<String> existingAfterGC = iterate(s1);
// Check the state of the blob store after gc
assertEquals(union(state1.blobsPresent, state2.blobsPresent), existingAfterGC);
// Tracked blobs should reflect deletions after gc
assertEquals(union(state1.blobsPresent, state2.blobsPresent), retrieveTracked(tracker1));
// Again create snapshots at both cluster nodes to synchronize the latest state of
// local references with datastore at each node
scheduledFuture1 = newSingleThreadScheduledExecutor().schedule(tracker1.new SnapshotJob(), 0, MILLISECONDS);
scheduledFuture1.get();
scheduledFuture2 = newSingleThreadScheduledExecutor().schedule(tracker2.new SnapshotJob(), 0, MILLISECONDS);
scheduledFuture2.get();
// Capture logs for the second round of gc
LogCustomizer customLogs = LogCustomizer.forLogger(MarkSweepGarbageCollector.class.getName()).enable(Level.WARN).filter(Level.WARN).contains("Error occurred while deleting blob with id").create();
customLogs.starting();
if (!same) {
cluster2.gc.collectGarbage(true);
}
cluster1.gc.collectGarbage(false);
existingAfterGC = iterate(s1);
assertEquals(0, customLogs.getLogs().size());
customLogs.finished();
// Check the state of the blob store after gc
assertEquals(union(state1.blobsPresent, state2.blobsPresent), existingAfterGC);
}
use of org.apache.jackrabbit.oak.commons.junit.LogCustomizer in project jackrabbit-oak by apache.
the class MongoBlobGCTest method checkGcPathLogging.
@Test
public void checkGcPathLogging() throws Exception {
String rootFolder = folder.newFolder().getAbsolutePath();
LogCustomizer customLogs = LogCustomizer.forLogger(MarkSweepGarbageCollector.class.getName()).enable(Level.TRACE).filter(Level.TRACE).create();
setUp(false);
customLogs.starting();
ThreadPoolExecutor executor = (ThreadPoolExecutor) Executors.newFixedThreadPool(10);
MarkSweepGarbageCollector gcObj = init(0, executor, rootFolder);
gcObj.collectGarbage(true);
customLogs.finished();
assertBlobReferenceRecords(1, rootFolder);
}
Aggregations