use of org.apache.jackrabbit.oak.stats.DefaultStatisticsProvider in project jackrabbit-oak by apache.
the class CachingDataStoreTest method init.
private void init(int i, int cacheSize, int uploadSplit) throws Exception {
LOG.info("Starting init");
// create executor
taskLatch = new CountDownLatch(1);
callbackLatch = new CountDownLatch(1);
afterExecuteLatch = new CountDownLatch(i);
TestExecutor listeningExecutor = new TestExecutor(1, taskLatch, callbackLatch, afterExecuteLatch);
// stats
ScheduledExecutorService statsExecutor = Executors.newSingleThreadScheduledExecutor();
closer.register(new ExecutorCloser(statsExecutor, 500, TimeUnit.MILLISECONDS));
StatisticsProvider statsProvider = new DefaultStatisticsProvider(statsExecutor);
scheduledExecutor = Executors.newSingleThreadScheduledExecutor();
closer.register(new ExecutorCloser(scheduledExecutor, 500, TimeUnit.MILLISECONDS));
final File datastoreRoot = folder.newFolder();
final TestMemoryBackend testBackend = new TestMemoryBackend(datastoreRoot);
this.backend = testBackend;
dataStore = new AbstractSharedCachingDataStore() {
@Override
protected AbstractSharedBackend createBackend() {
return testBackend;
}
@Override
public int getMinRecordLength() {
return 0;
}
};
dataStore.setStatisticsProvider(statsProvider);
dataStore.setCacheSize(cacheSize);
dataStore.setStagingSplitPercentage(uploadSplit);
dataStore.listeningExecutor = listeningExecutor;
dataStore.schedulerExecutor = scheduledExecutor;
dataStore.executor = sameThreadExecutor();
dataStore.init(root.getAbsolutePath());
LOG.info("Finished init");
}
use of org.apache.jackrabbit.oak.stats.DefaultStatisticsProvider in project jackrabbit-oak by apache.
the class CompactionAndCleanupIT method offlineCompactionBinC1.
@Test
public void offlineCompactionBinC1() throws Exception {
SegmentGCOptions gcOptions = defaultGCOptions().setOffline().withBinaryDeduplication();
ScheduledExecutorService executor = newSingleThreadScheduledExecutor();
FileStore fileStore = fileStoreBuilder(getFileStoreFolder()).withMaxFileSize(1).withGCOptions(gcOptions).withStatisticsProvider(new DefaultStatisticsProvider(executor)).build();
SegmentNodeStore nodeStore = SegmentNodeStoreBuilders.builder(fileStore).build();
try {
NodeBuilder extra = nodeStore.getRoot().builder();
NodeBuilder content = extra.child("content");
int blobSize = 5 * 1024 * 1024;
byte[] data = new byte[blobSize];
new Random().nextBytes(data);
NodeBuilder c1 = content.child("c1");
Blob b1 = nodeStore.createBlob(new ByteArrayInputStream(data));
c1.setProperty("blob1", b1);
NodeBuilder c2 = content.child("c2");
Blob b2 = nodeStore.createBlob(new ByteArrayInputStream(data));
c2.setProperty("blob2", b2);
nodeStore.merge(extra, EmptyHook.INSTANCE, CommitInfo.EMPTY);
fileStore.flush();
int cpNo = 4;
Set<String> cps = new HashSet<String>();
for (int i = 0; i < cpNo; i++) {
cps.add(nodeStore.checkpoint(60000));
}
assertEquals(cpNo, cps.size());
for (String cp : cps) {
assertTrue(nodeStore.retrieve(cp) != null);
}
long size1 = fileStore.getStats().getApproximateSize();
fileStore.compact();
fileStore.cleanup();
long size2 = fileStore.getStats().getApproximateSize();
assertSize("with compacted binaries", size2, 0, size1 - blobSize);
} finally {
fileStore.close();
}
}
use of org.apache.jackrabbit.oak.stats.DefaultStatisticsProvider in project jackrabbit-oak by apache.
the class CompactionAndCleanupIT method offlineCompaction.
@Test
public void offlineCompaction() throws Exception {
SegmentGCOptions gcOptions = defaultGCOptions().setOffline();
ScheduledExecutorService executor = newSingleThreadScheduledExecutor();
FileStore fileStore = fileStoreBuilder(getFileStoreFolder()).withMaxFileSize(1).withGCOptions(gcOptions).withStatisticsProvider(new DefaultStatisticsProvider(executor)).build();
SegmentNodeStore nodeStore = SegmentNodeStoreBuilders.builder(fileStore).build();
try {
// 5MB blob
int blobSize = 5 * 1024 * 1024;
// Create ~2MB of data
NodeBuilder extra = nodeStore.getRoot().builder();
NodeBuilder content = extra.child("content");
for (int i = 0; i < 10000; i++) {
NodeBuilder c = content.child("c" + i);
for (int j = 0; j < 1000; j++) {
c.setProperty("p" + i, "v" + i);
}
}
nodeStore.merge(extra, EmptyHook.INSTANCE, CommitInfo.EMPTY);
fileStore.flush();
long size1 = fileStore.getStats().getApproximateSize();
log.debug("File store size {}", byteCountToDisplaySize(size1));
// Create a property with 5 MB blob
NodeBuilder builder = nodeStore.getRoot().builder();
builder.setProperty("blob1", createBlob(nodeStore, blobSize));
nodeStore.merge(builder, EmptyHook.INSTANCE, CommitInfo.EMPTY);
fileStore.flush();
long size2 = fileStore.getStats().getApproximateSize();
assertTrue("the store should grow", size2 > size1);
assertTrue("the store should grow of at least the size of the blob", size2 - size1 > blobSize);
// Now remove the property. No gc yet -> size doesn't shrink
builder = nodeStore.getRoot().builder();
builder.removeProperty("blob1");
nodeStore.merge(builder, EmptyHook.INSTANCE, CommitInfo.EMPTY);
fileStore.flush();
long size3 = fileStore.getStats().getApproximateSize();
assertTrue("the size should grow", size3 > size2);
// 1st gc cycle -> 1st blob should get collected
fileStore.compact();
fileStore.cleanup();
long size4 = fileStore.getStats().getApproximateSize();
assertTrue("the store should shrink", size4 < size3);
assertTrue("the store should shrink of at least the size of the blob", size3 - size4 >= blobSize);
// Add another 5MB binary
builder = nodeStore.getRoot().builder();
builder.setProperty("blob2", createBlob(nodeStore, blobSize));
nodeStore.merge(builder, EmptyHook.INSTANCE, CommitInfo.EMPTY);
fileStore.flush();
long size5 = fileStore.getStats().getApproximateSize();
assertTrue("the store should grow", size5 > size4);
assertTrue("the store should grow of at least the size of the blob", size5 - size4 > blobSize);
// 2st gc cycle -> 2nd blob should *not* be collected
fileStore.compact();
fileStore.cleanup();
long size6 = fileStore.getStats().getApproximateSize();
assertTrue("the blob should not be collected", Math.abs(size5 - size6) < blobSize);
// 3rd gc cycle -> no significant change
fileStore.compact();
fileStore.cleanup();
long size7 = fileStore.getStats().getApproximateSize();
assertTrue("the blob should not be collected", Math.abs(size6 - size7) < blobSize);
// No data loss
byte[] blob = ByteStreams.toByteArray(nodeStore.getRoot().getProperty("blob2").getValue(Type.BINARY).getNewStream());
assertEquals(blobSize, blob.length);
} finally {
fileStore.close();
}
}
use of org.apache.jackrabbit.oak.stats.DefaultStatisticsProvider in project jackrabbit-oak by apache.
the class CompactionAndCleanupIT method offlineCompactionBinC2.
/**
* Create 2 binary nodes with same content but not same reference. Reduce
* the max size if de-duplicated binaries under the binary length. Verify
* de-duplication capabilities of compaction.
*/
@Test
public void offlineCompactionBinC2() throws Exception {
int blobSize = 5 * 1024 * 1024;
SegmentGCOptions gcOptions = defaultGCOptions().setOffline().withBinaryDeduplication().setBinaryDeduplicationMaxSize(blobSize / 2);
ScheduledExecutorService executor = newSingleThreadScheduledExecutor();
FileStore fileStore = fileStoreBuilder(getFileStoreFolder()).withMaxFileSize(1).withGCOptions(gcOptions).withStatisticsProvider(new DefaultStatisticsProvider(executor)).build();
SegmentNodeStore nodeStore = SegmentNodeStoreBuilders.builder(fileStore).build();
try {
NodeBuilder extra = nodeStore.getRoot().builder();
NodeBuilder content = extra.child("content");
byte[] data = new byte[blobSize];
new Random().nextBytes(data);
NodeBuilder c1 = content.child("c1");
Blob b1 = nodeStore.createBlob(new ByteArrayInputStream(data));
c1.setProperty("blob1", b1);
NodeBuilder c2 = content.child("c2");
Blob b2 = nodeStore.createBlob(new ByteArrayInputStream(data));
c2.setProperty("blob2", b2);
nodeStore.merge(extra, EmptyHook.INSTANCE, CommitInfo.EMPTY);
fileStore.flush();
int cpNo = 4;
Set<String> cps = new HashSet<String>();
for (int i = 0; i < cpNo; i++) {
cps.add(nodeStore.checkpoint(60000));
}
assertEquals(cpNo, cps.size());
for (String cp : cps) {
assertTrue(nodeStore.retrieve(cp) != null);
}
long size1 = fileStore.getStats().getApproximateSize();
fileStore.compact();
fileStore.cleanup();
long size2 = fileStore.getStats().getApproximateSize();
// not expected to reduce the size too much, as the binaries are
// above the threshold
assertSize("with compacted binaries", size2, size1 * 9 / 10, size1 * 11 / 10);
} finally {
fileStore.close();
}
}
use of org.apache.jackrabbit.oak.stats.DefaultStatisticsProvider in project jackrabbit-oak by apache.
the class CompactionAndCleanupIT method concurrentWritesCleanupNoNewGen.
/**
* Test asserting OAK-4669: No new generation of tar should be created when the segments are the same
* and when various indices are created.
*/
@Test
public void concurrentWritesCleanupNoNewGen() throws Exception {
ScheduledExecutorService scheduler = newSingleThreadScheduledExecutor();
StatisticsProvider statsProvider = new DefaultStatisticsProvider(scheduler);
final FileStoreGCMonitor fileStoreGCMonitor = new FileStoreGCMonitor(Clock.SIMPLE);
File fileStoreFolder = getFileStoreFolder();
final FileStore fileStore = fileStoreBuilder(fileStoreFolder).withGCOptions(defaultGCOptions().setRetainedGenerations(2)).withGCMonitor(fileStoreGCMonitor).withStatisticsProvider(statsProvider).withMaxFileSize(1).withMemoryMapping(false).build();
final SegmentNodeStore nodeStore = SegmentNodeStoreBuilders.builder(fileStore).build();
ExecutorService executorService = newFixedThreadPool(5);
final AtomicInteger counter = new AtomicInteger();
try {
Callable<Void> concurrentWriteTask = new Callable<Void>() {
@Override
public Void call() throws Exception {
NodeBuilder builder = nodeStore.getRoot().builder();
builder.setProperty("blob-" + counter.getAndIncrement(), createBlob(nodeStore, 512 * 512));
nodeStore.merge(builder, EmptyHook.INSTANCE, CommitInfo.EMPTY);
fileStore.flush();
return null;
}
};
List<Future<?>> results = newArrayList();
for (int i = 0; i < 5; i++) {
results.add(executorService.submit(concurrentWriteTask));
}
for (Future<?> result : results) {
assertNull(result.get());
}
fileStore.cleanup();
for (String fileName : fileStoreFolder.list()) {
if (fileName.endsWith(".tar")) {
int pos = fileName.length() - "a.tar".length();
char generation = fileName.charAt(pos);
assertTrue("Expected generation is 'a', but instead was: '" + generation + "' for file " + fileName, generation == 'a');
}
}
} finally {
new ExecutorCloser(executorService).close();
fileStore.close();
new ExecutorCloser(scheduler).close();
}
}
Aggregations