use of org.apache.jackrabbit.oak.stats.DefaultStatisticsProvider in project jackrabbit-oak by apache.
the class CompactionAndCleanupIT method offlineCompactionTool.
/**
* Test for the Offline compaction tool (OAK-5971)
*/
@Test
public void offlineCompactionTool() throws Exception {
SegmentGCOptions gcOptions = defaultGCOptions().setOffline();
ScheduledExecutorService executor = newSingleThreadScheduledExecutor();
FileStore fileStore = fileStoreBuilder(getFileStoreFolder()).withMaxFileSize(1).withGCOptions(gcOptions).withStatisticsProvider(new DefaultStatisticsProvider(executor)).build();
SegmentNodeStore nodeStore = SegmentNodeStoreBuilders.builder(fileStore).build();
try {
NodeBuilder root = nodeStore.getRoot().builder();
root.child("content");
nodeStore.merge(root, EmptyHook.INSTANCE, CommitInfo.EMPTY);
fileStore.flush();
} finally {
fileStore.close();
}
Compact.builder().withPath(getFileStoreFolder()).build().run();
fileStore = fileStoreBuilder(getFileStoreFolder()).withMaxFileSize(1).withGCOptions(gcOptions).withStatisticsProvider(new DefaultStatisticsProvider(executor)).build();
nodeStore = SegmentNodeStoreBuilders.builder(fileStore).build();
try {
assertTrue(nodeStore.getRoot().hasChildNode("content"));
} finally {
fileStore.close();
}
}
use of org.apache.jackrabbit.oak.stats.DefaultStatisticsProvider in project jackrabbit-oak by apache.
the class CompactionAndCleanupIT method offlineCompactionBinR1.
/**
* Create 2 binary nodes with same content and same reference. Verify
* de-duplication capabilities of compaction
*/
@Test
public void offlineCompactionBinR1() throws Exception {
SegmentGCOptions gcOptions = defaultGCOptions().setOffline();
ScheduledExecutorService executor = newSingleThreadScheduledExecutor();
FileStore fileStore = fileStoreBuilder(getFileStoreFolder()).withMaxFileSize(1).withGCOptions(gcOptions).withStatisticsProvider(new DefaultStatisticsProvider(executor)).build();
SegmentNodeStore nodeStore = SegmentNodeStoreBuilders.builder(fileStore).build();
try {
NodeBuilder extra = nodeStore.getRoot().builder();
NodeBuilder content = extra.child("content");
int blobSize = 5 * 1024 * 1024;
byte[] data = new byte[blobSize];
new Random().nextBytes(data);
Blob b = nodeStore.createBlob(new ByteArrayInputStream(data));
NodeBuilder c1 = content.child("c1");
c1.setProperty("blob1", b);
NodeBuilder c2 = content.child("c2");
c2.setProperty("blob2", b);
nodeStore.merge(extra, EmptyHook.INSTANCE, CommitInfo.EMPTY);
fileStore.flush();
int cpNo = 4;
Set<String> cps = new HashSet<String>();
for (int i = 0; i < cpNo; i++) {
cps.add(nodeStore.checkpoint(60000));
}
assertEquals(cpNo, cps.size());
for (String cp : cps) {
assertTrue(nodeStore.retrieve(cp) != null);
}
// 5Mb, de-duplication by the SegmentWriter
long size1 = fileStore.getStats().getApproximateSize();
fileStore.compact();
fileStore.cleanup();
long size2 = fileStore.getStats().getApproximateSize();
assertSize("with compacted binaries", size2, 0, size1 * 11 / 10);
} finally {
fileStore.close();
}
}
use of org.apache.jackrabbit.oak.stats.DefaultStatisticsProvider in project jackrabbit-oak by apache.
the class CompactionAndCleanupIT method concurrentWritesCleanupZeroReclaimedSize.
@Test
public void concurrentWritesCleanupZeroReclaimedSize() throws Exception {
ScheduledExecutorService scheduler = newSingleThreadScheduledExecutor();
StatisticsProvider statsProvider = new DefaultStatisticsProvider(scheduler);
final FileStoreGCMonitor fileStoreGCMonitor = new FileStoreGCMonitor(Clock.SIMPLE);
final FileStore fileStore = fileStoreBuilder(getFileStoreFolder()).withGCOptions(defaultGCOptions().setRetainedGenerations(2)).withGCMonitor(fileStoreGCMonitor).withStatisticsProvider(statsProvider).withMaxFileSize(1).withMemoryMapping(false).build();
final SegmentNodeStore nodeStore = SegmentNodeStoreBuilders.builder(fileStore).build();
ExecutorService executorService = newFixedThreadPool(100);
final AtomicInteger counter = new AtomicInteger();
try {
Callable<Void> concurrentWriteTask = new Callable<Void>() {
@Override
public Void call() throws Exception {
NodeBuilder builder = nodeStore.getRoot().builder();
builder.setProperty("blob-" + counter.getAndIncrement(), createBlob(nodeStore, 25 * 25));
nodeStore.merge(builder, EmptyHook.INSTANCE, CommitInfo.EMPTY);
fileStore.flush();
return null;
}
};
List<Future<?>> results = newArrayList();
for (int i = 0; i < 100; i++) {
results.add(executorService.submit(concurrentWriteTask));
}
Thread.sleep(100);
fileStore.cleanup();
for (Future<?> result : results) {
assertNull(result.get());
}
long reclaimedSize = fileStoreGCMonitor.getLastReclaimedSize();
assertEquals("Reclaimed size expected is 0, but instead was: " + reclaimedSize, 0, reclaimedSize);
} finally {
new ExecutorCloser(executorService).close();
fileStore.close();
new ExecutorCloser(scheduler).close();
}
}
use of org.apache.jackrabbit.oak.stats.DefaultStatisticsProvider in project jackrabbit-oak by apache.
the class CompactionAndCleanupIT method offlineCompactionCps.
/**
* Create a lot of data nodes (no binaries) and a few checkpoints, verify
* that compacting checkpoints will not cause the size to explode
*/
@Test
public void offlineCompactionCps() throws Exception {
SegmentGCOptions gcOptions = defaultGCOptions().setOffline();
ScheduledExecutorService executor = newSingleThreadScheduledExecutor();
FileStore fileStore = fileStoreBuilder(getFileStoreFolder()).withMaxFileSize(1).withGCOptions(gcOptions).withStatisticsProvider(new DefaultStatisticsProvider(executor)).build();
SegmentNodeStore nodeStore = SegmentNodeStoreBuilders.builder(fileStore).build();
try {
// Create ~2MB of data
NodeBuilder extra = nodeStore.getRoot().builder();
NodeBuilder content = extra.child("content");
for (int i = 0; i < 10000; i++) {
NodeBuilder c = content.child("c" + i);
for (int j = 0; j < 1000; j++) {
c.setProperty("p" + i, "v" + i);
}
}
nodeStore.merge(extra, EmptyHook.INSTANCE, CommitInfo.EMPTY);
fileStore.flush();
fileStore.compact();
fileStore.cleanup();
// Compacts to 548Kb
long size0 = fileStore.getStats().getApproximateSize();
int cpNo = 4;
Set<String> cps = new HashSet<String>();
for (int i = 0; i < cpNo; i++) {
cps.add(nodeStore.checkpoint(60000));
}
assertEquals(cpNo, cps.size());
for (String cp : cps) {
assertTrue(nodeStore.retrieve(cp) != null);
}
long size1 = fileStore.getStats().getApproximateSize();
assertTrue("the size should grow or stay the same", size1 >= size0);
// TODO the following assertion doesn't say anything useful. The
// conveyed message is "the repository can shrink, grow or stay the
// same, as long as it remains in a 10% margin of the previous size
// that I took out of thin air". It has to be fixed or removed.
// fileStore.compact();
// fileStore.cleanup();
// long size2 = fileStore.getStats().getApproximateSize();
// assertSize("with checkpoints compacted", size2, size1 * 9/10, size1 * 11 / 10);
} finally {
fileStore.close();
}
}
use of org.apache.jackrabbit.oak.stats.DefaultStatisticsProvider in project jackrabbit-oak by apache.
the class ExternalBlobIT method getNodeStore.
protected SegmentNodeStore getNodeStore(BlobStore blobStore) throws Exception {
if (nodeStore == null) {
ScheduledExecutorService executor = Executors.newSingleThreadScheduledExecutor();
store = fileStoreBuilder(getWorkDir()).withBlobStore(blobStore).withMaxFileSize(1).withStatisticsProvider(new DefaultStatisticsProvider(executor)).build();
nodeStore = SegmentNodeStoreBuilders.builder(store).build();
}
return nodeStore;
}
Aggregations