use of org.apache.jackrabbit.oak.commons.junit.LogCustomizer in project jackrabbit-oak by apache.
the class RDBDocumentStoreSchemaUpgradeTest method init12.
@Test
public void init12() {
LogCustomizer logCustomizer = LogCustomizer.forLogger(RDBDocumentStore.class.getName()).enable(Level.INFO).contains("to DB level 2").create();
logCustomizer.starting();
RDBOptions op = new RDBOptions().tablePrefix("T12").initialSchema(1).upgradeToSchema(2).dropTablesOnClose(true);
RDBDocumentStore rdb = null;
try {
rdb = new RDBDocumentStore(this.ds, new DocumentMK.Builder(), op);
RDBTableMetaData meta = rdb.getTable(Collection.NODES);
assertEquals(op.getTablePrefix() + "_NODES", meta.getName());
assertTrue(meta.hasSplitDocs());
int statementsPerTable = 5;
assertEquals("unexpected # of log entries: " + logCustomizer.getLogs(), statementsPerTable * RDBDocumentStore.getTableNames().size(), logCustomizer.getLogs().size());
} finally {
logCustomizer.finished();
if (rdb != null) {
rdb.dispose();
}
}
}
use of org.apache.jackrabbit.oak.commons.junit.LogCustomizer in project jackrabbit-oak by apache.
the class RDBDocumentStoreSchemaUpgradeTest method init01fail.
@Test
public void init01fail() {
LogCustomizer logCustomizer = LogCustomizer.forLogger(RDBDocumentStore.class.getName()).enable(Level.INFO).contains("Attempted to upgrade").create();
logCustomizer.starting();
Assume.assumeTrue(ds instanceof RDBDataSourceWrapper);
RDBDataSourceWrapper wds = (RDBDataSourceWrapper) ds;
wds.setFailAlterTableAddColumnStatements(true);
RDBOptions op = new RDBOptions().tablePrefix("T01F").initialSchema(0).upgradeToSchema(1).dropTablesOnClose(true);
RDBDocumentStore rdb = null;
try {
rdb = new RDBDocumentStore(this.ds, new DocumentMK.Builder(), op);
RDBTableMetaData meta = rdb.getTable(Collection.NODES);
assertEquals(op.getTablePrefix() + "_NODES", meta.getName());
assertFalse(meta.hasVersion());
assertEquals("unexpected # of log entries: " + logCustomizer.getLogs(), RDBDocumentStore.getTableNames().size(), logCustomizer.getLogs().size());
UpdateOp testInsert = new UpdateOp(Utils.getIdFromPath("/foo"), true);
assertTrue(rdb.create(Collection.NODES, Collections.singletonList(testInsert)));
} finally {
wds.setFailAlterTableAddColumnStatements(false);
logCustomizer.finished();
if (rdb != null) {
rdb.dispose();
}
}
}
use of org.apache.jackrabbit.oak.commons.junit.LogCustomizer in project jackrabbit-oak by apache.
the class RDBDocumentStoreSchemaUpgradeTest method init22.
@Test
public void init22() {
LogCustomizer logCustomizer = LogCustomizer.forLogger(RDBDocumentStore.class.getName()).enable(Level.INFO).contains("to DB level").create();
logCustomizer.starting();
RDBOptions op = new RDBOptions().tablePrefix("T" + "22").initialSchema(2).upgradeToSchema(2).dropTablesOnClose(true);
RDBDocumentStore rdb = null;
try {
rdb = new RDBDocumentStore(this.ds, new DocumentMK.Builder(), op);
RDBTableMetaData meta = rdb.getTable(Collection.NODES);
assertEquals(op.getTablePrefix() + "_NODES", meta.getName());
assertTrue(meta.hasVersion());
assertTrue(meta.hasSplitDocs());
assertEquals("unexpected # of log entries: " + logCustomizer.getLogs(), 0, logCustomizer.getLogs().size());
} finally {
logCustomizer.finished();
if (rdb != null) {
rdb.dispose();
}
}
}
use of org.apache.jackrabbit.oak.commons.junit.LogCustomizer in project jackrabbit-oak by apache.
the class ObservationQueueFullWarnTest method warnOnRepeatedQueueFull.
@Test
public void warnOnRepeatedQueueFull() throws RepositoryException, InterruptedException, ExecutionException {
LogCustomizer warnLogs = LogCustomizer.forLogger(ChangeProcessor.class.getName()).filter(Level.WARN).contains(OBS_QUEUE_FULL_WARN).create();
LogCustomizer debugLogs = LogCustomizer.forLogger(ChangeProcessor.class.getName()).filter(Level.DEBUG).contains(OBS_QUEUE_FULL_WARN).create();
LogCustomizer logLevelSetting = LogCustomizer.forLogger(ChangeProcessor.class.getName()).enable(Level.DEBUG).create();
logLevelSetting.starting();
long oldWarnLogInterval = ChangeProcessor.QUEUE_FULL_WARN_INTERVAL;
// Assumption is that 10 (virtual) minutes won't pass by the time we move from one stage of queue fill to next.
ChangeProcessor.QUEUE_FULL_WARN_INTERVAL = TimeUnit.MINUTES.toMillis(10);
Clock oldClockInstance = ChangeProcessor.clock;
Clock virtualClock = new Clock.Virtual();
ChangeProcessor.clock = virtualClock;
virtualClock.waitUntil(System.currentTimeMillis());
observationManager.addEventListener(listener, NODE_ADDED, TEST_PATH, true, null, null, false);
try {
// Create first level WARN message
addNodeToFillObsQueue();
emptyObsQueue();
// Don't wait, fill up the queue again
warnLogs.starting();
debugLogs.starting();
addNodeToFillObsQueue();
assertTrue("Observation queue full warning must not logged until some time has past since last log", warnLogs.getLogs().size() == 0);
assertTrue("Observation queue full warning should get logged on debug though in the mean time", debugLogs.getLogs().size() > 0);
warnLogs.finished();
debugLogs.finished();
emptyObsQueue();
// Wait some time so reach WARN level again
virtualClock.waitUntil(virtualClock.getTime() + ChangeProcessor.QUEUE_FULL_WARN_INTERVAL);
warnLogs.starting();
debugLogs.starting();
addNodeToFillObsQueue();
assertTrue("Observation queue full warning must get logged after some time has past since last log", warnLogs.getLogs().size() > 0);
warnLogs.finished();
debugLogs.finished();
} finally {
observationManager.removeEventListener(listener);
ChangeProcessor.clock = oldClockInstance;
ChangeProcessor.QUEUE_FULL_WARN_INTERVAL = oldWarnLogInterval;
logLevelSetting.finished();
}
}
use of org.apache.jackrabbit.oak.commons.junit.LogCustomizer in project jackrabbit-oak by apache.
the class ActiveDeletedBlobCollectorTest method doDebugLogWhileErrorsWhileDeletingBlobs.
@Test
public void doDebugLogWhileErrorsWhileDeletingBlobs() throws Exception {
LogCustomizer warnLogCustomizer = LogCustomizer.forLogger(ActiveDeletedBlobCollectorFactory.class.getName()).enable(Level.DEBUG).contains("Exception occurred while ").create();
BlobDeletionCallback bdc = adbc.getBlobDeletionCallback();
bdc.deleted("blobId1", Collections.singleton("/a"));
bdc.deleted("blobId2", Collections.singleton("/b"));
bdc.deleted("blobId3", Collections.singleton("/c"));
bdc.commitProgress(COMMIT_SUCCEDED);
List<String> externallyDeletedChunks = Lists.newArrayList(blobStore.resolveChunks("blobId2"));
blobStore.countDeleteChunks(externallyDeletedChunks, 0);
warnLogCustomizer.starting();
adbc.purgeBlobsDeleted(clock.getTimeIncreasing(), blobStore);
blobStore.deletedChunkIds.removeAll(externallyDeletedChunks);
verifyBlobsDeleted("blobId1", "blobId3");
assertEquals("Should log on debug", 1, warnLogCustomizer.getLogs().size());
warnLogCustomizer.finished();
bdc = adbc.getBlobDeletionCallback();
bdc.deleted("blobId4", Collections.singleton("/d"));
bdc.deleted("blobId5", Collections.singleton("/e"));
bdc.commitProgress(COMMIT_SUCCEDED);
blobStore.resetLists();
blobStore.failWithDSEForChunkIds.addAll(Lists.newArrayList(blobStore.resolveChunks("blobId4")));
warnLogCustomizer.starting();
adbc.purgeBlobsDeleted(clock.getTimeIncreasing(), blobStore);
verifyBlobsDeleted("blobId3", "blobId5");
assertEquals("Should log on debug", 1, warnLogCustomizer.getLogs().size());
warnLogCustomizer.finished();
}
Aggregations