Search in sources :

Example 6 with LogCustomizer

use of org.apache.jackrabbit.oak.commons.junit.LogCustomizer in project jackrabbit-oak by apache.

the class RDBDocumentStoreSchemaUpgradeTest method init01.

@Test
public void init01() {
    LogCustomizer logCustomizer = LogCustomizer.forLogger(RDBDocumentStore.class.getName()).enable(Level.INFO).contains("to DB level 1").create();
    logCustomizer.starting();
    RDBOptions op = new RDBOptions().tablePrefix("T01").initialSchema(0).upgradeToSchema(1).dropTablesOnClose(true);
    RDBDocumentStore rdb = null;
    try {
        rdb = new RDBDocumentStore(this.ds, new DocumentMK.Builder(), op);
        RDBTableMetaData meta = rdb.getTable(Collection.NODES);
        assertEquals(op.getTablePrefix() + "_NODES", meta.getName());
        assertTrue(meta.hasVersion());
        assertEquals("unexpected # of log entries: " + logCustomizer.getLogs(), RDBDocumentStore.getTableNames().size(), logCustomizer.getLogs().size());
    } finally {
        logCustomizer.finished();
        if (rdb != null) {
            rdb.dispose();
        }
    }
}
Also used : RDBTableMetaData(org.apache.jackrabbit.oak.plugins.document.rdb.RDBDocumentStore.RDBTableMetaData) LogCustomizer(org.apache.jackrabbit.oak.commons.junit.LogCustomizer) Test(org.junit.Test)

Example 7 with LogCustomizer

use of org.apache.jackrabbit.oak.commons.junit.LogCustomizer in project jackrabbit-oak by apache.

the class ObservationQueueFullWarnTest method warnOnQueueFull.

@Test
public void warnOnQueueFull() throws RepositoryException, InterruptedException, ExecutionException {
    LogCustomizer customLogs = LogCustomizer.forLogger(ChangeProcessor.class.getName()).filter(Level.WARN).contains(OBS_QUEUE_FULL_WARN).create();
    observationManager.addEventListener(listener, NODE_ADDED, TEST_PATH, true, null, null, false);
    try {
        customLogs.starting();
        addNodeToFillObsQueue();
        assertTrue("Observation queue full warning must get logged", customLogs.getLogs().size() > 0);
        customLogs.finished();
    } finally {
        observationManager.removeEventListener(listener);
    }
}
Also used : LogCustomizer(org.apache.jackrabbit.oak.commons.junit.LogCustomizer) Test(org.junit.Test) AbstractRepositoryTest(org.apache.jackrabbit.oak.jcr.AbstractRepositoryTest)

Example 8 with LogCustomizer

use of org.apache.jackrabbit.oak.commons.junit.LogCustomizer in project jackrabbit-oak by apache.

the class SegmentDataStoreBlobGCIT method checkMark.

@Test
public void checkMark() throws Exception {
    LogCustomizer customLogs = LogCustomizer.forLogger(MarkSweepGarbageCollector.class.getName()).enable(Level.TRACE).filter(Level.TRACE).create();
    DataStoreState state = setUp(10);
    log.info("{} blobs available : {}", state.blobsPresent.size(), state.blobsPresent);
    customLogs.starting();
    ThreadPoolExecutor executor = (ThreadPoolExecutor) Executors.newFixedThreadPool(10);
    String rootFolder = folder.newFolder().getAbsolutePath();
    MarkSweepGarbageCollector gcObj = init(0, executor, rootFolder);
    gcObj.collectGarbage(true);
    customLogs.finished();
    assertBlobReferenceRecords(state.blobsPresent, rootFolder);
}
Also used : LogCustomizer(org.apache.jackrabbit.oak.commons.junit.LogCustomizer) ThreadPoolExecutor(java.util.concurrent.ThreadPoolExecutor) MarkSweepGarbageCollector(org.apache.jackrabbit.oak.plugins.blob.MarkSweepGarbageCollector) Test(org.junit.Test)

Example 9 with LogCustomizer

use of org.apache.jackrabbit.oak.commons.junit.LogCustomizer in project jackrabbit-oak by apache.

the class DataStoreTrackerGCTest method clusterGCInternal.

private void clusterGCInternal(Cluster cluster1, Cluster cluster2, boolean same) throws Exception {
    BlobStore s1 = cluster1.blobStore;
    BlobIdTracker tracker1 = (BlobIdTracker) ((BlobTrackingStore) s1).getTracker();
    DataStoreState state1 = init(cluster1.nodeStore, 0);
    cluster1.nodeStore.runBackgroundOperations();
    ScheduledFuture<?> scheduledFuture1 = newSingleThreadScheduledExecutor().schedule(tracker1.new SnapshotJob(), 0, MILLISECONDS);
    scheduledFuture1.get();
    // Add blobs to cluster1
    BlobStore s2 = cluster2.blobStore;
    BlobIdTracker tracker2 = (BlobIdTracker) ((BlobTrackingStore) s2).getTracker();
    cluster2.nodeStore.runBackgroundOperations();
    DataStoreState state2 = init(cluster2.nodeStore, 20);
    cluster2.nodeStore.runBackgroundOperations();
    cluster1.nodeStore.runBackgroundOperations();
    ScheduledFuture<?> scheduledFuture2 = newSingleThreadScheduledExecutor().schedule(tracker2.new SnapshotJob(), 0, MILLISECONDS);
    scheduledFuture2.get();
    // If not same cluster need to mark references on other repositories
    if (!same) {
        cluster2.gc.collectGarbage(true);
    }
    // do a gc on cluster1 with sweep
    cluster1.gc.collectGarbage(false);
    Set<String> existingAfterGC = iterate(s1);
    // Check the state of the blob store after gc
    assertEquals(union(state1.blobsPresent, state2.blobsPresent), existingAfterGC);
    // Tracked blobs should reflect deletions after gc
    assertEquals(union(state1.blobsPresent, state2.blobsPresent), retrieveTracked(tracker1));
    // Again create snapshots at both cluster nodes to synchronize the latest state of
    // local references with datastore at each node
    scheduledFuture1 = newSingleThreadScheduledExecutor().schedule(tracker1.new SnapshotJob(), 0, MILLISECONDS);
    scheduledFuture1.get();
    scheduledFuture2 = newSingleThreadScheduledExecutor().schedule(tracker2.new SnapshotJob(), 0, MILLISECONDS);
    scheduledFuture2.get();
    // Capture logs for the second round of gc
    LogCustomizer customLogs = LogCustomizer.forLogger(MarkSweepGarbageCollector.class.getName()).enable(Level.WARN).filter(Level.WARN).contains("Error occurred while deleting blob with id").create();
    customLogs.starting();
    if (!same) {
        cluster2.gc.collectGarbage(true);
    }
    cluster1.gc.collectGarbage(false);
    existingAfterGC = iterate(s1);
    assertEquals(0, customLogs.getLogs().size());
    customLogs.finished();
    // Check the state of the blob store after gc
    assertEquals(union(state1.blobsPresent, state2.blobsPresent), existingAfterGC);
}
Also used : LogCustomizer(org.apache.jackrabbit.oak.commons.junit.LogCustomizer) MarkSweepGarbageCollector(org.apache.jackrabbit.oak.plugins.blob.MarkSweepGarbageCollector) GarbageCollectableBlobStore(org.apache.jackrabbit.oak.spi.blob.GarbageCollectableBlobStore) DataStoreUtils.getBlobStore(org.apache.jackrabbit.oak.plugins.blob.datastore.DataStoreUtils.getBlobStore) BlobStore(org.apache.jackrabbit.oak.spi.blob.BlobStore)

Example 10 with LogCustomizer

use of org.apache.jackrabbit.oak.commons.junit.LogCustomizer in project jackrabbit-oak by apache.

the class MongoBlobGCTest method checkGcPathLogging.

@Test
public void checkGcPathLogging() throws Exception {
    String rootFolder = folder.newFolder().getAbsolutePath();
    LogCustomizer customLogs = LogCustomizer.forLogger(MarkSweepGarbageCollector.class.getName()).enable(Level.TRACE).filter(Level.TRACE).create();
    setUp(false);
    customLogs.starting();
    ThreadPoolExecutor executor = (ThreadPoolExecutor) Executors.newFixedThreadPool(10);
    MarkSweepGarbageCollector gcObj = init(0, executor, rootFolder);
    gcObj.collectGarbage(true);
    customLogs.finished();
    assertBlobReferenceRecords(1, rootFolder);
}
Also used : LogCustomizer(org.apache.jackrabbit.oak.commons.junit.LogCustomizer) ThreadPoolExecutor(java.util.concurrent.ThreadPoolExecutor) MarkSweepGarbageCollector(org.apache.jackrabbit.oak.plugins.blob.MarkSweepGarbageCollector) Test(org.junit.Test)

Aggregations

LogCustomizer (org.apache.jackrabbit.oak.commons.junit.LogCustomizer)18 Test (org.junit.Test)16 MarkSweepGarbageCollector (org.apache.jackrabbit.oak.plugins.blob.MarkSweepGarbageCollector)5 RDBTableMetaData (org.apache.jackrabbit.oak.plugins.document.rdb.RDBDocumentStore.RDBTableMetaData)5 ThreadPoolExecutor (java.util.concurrent.ThreadPoolExecutor)4 PropertyIndexEditorProvider (org.apache.jackrabbit.oak.plugins.index.property.PropertyIndexEditorProvider)4 MemoryNodeStore (org.apache.jackrabbit.oak.plugins.memory.MemoryNodeStore)4 NodeStore (org.apache.jackrabbit.oak.spi.state.NodeStore)4 ProxyNodeStore (org.apache.jackrabbit.oak.spi.state.ProxyNodeStore)4 AbstractDocumentStoreTest (org.apache.jackrabbit.oak.plugins.document.AbstractDocumentStoreTest)3 QueryCondition (org.apache.jackrabbit.oak.plugins.document.rdb.RDBDocumentStore.QueryCondition)3 NodeBuilder (org.apache.jackrabbit.oak.spi.state.NodeBuilder)3 Closeable (java.io.Closeable)2 Semaphore (java.util.concurrent.Semaphore)2 AtomicBoolean (java.util.concurrent.atomic.AtomicBoolean)2 AbstractRepositoryTest (org.apache.jackrabbit.oak.jcr.AbstractRepositoryTest)2 AsyncIndexStats (org.apache.jackrabbit.oak.plugins.index.AsyncIndexUpdate.AsyncIndexStats)2 CoreMatchers.containsString (org.hamcrest.CoreMatchers.containsString)2 Method (java.lang.reflect.Method)1 Connection (java.sql.Connection)1