Search in sources :

Example 11 with DefaultStatisticsProvider

use of org.apache.jackrabbit.oak.stats.DefaultStatisticsProvider in project jackrabbit-oak by apache.

the class FileStoreStatsTest method testJournalWriteStats.

@Test
public void testJournalWriteStats() throws Exception {
    StatisticsProvider statsProvider = new DefaultStatisticsProvider(executor);
    FileStore fileStore = fileStoreBuilder(segmentFolder.newFolder()).withStatisticsProvider(statsProvider).build();
    FileStoreStats stats = new FileStoreStats(statsProvider, fileStore, 0);
    SegmentNodeStore nodeStore = SegmentNodeStoreBuilders.builder(fileStore).build();
    for (int i = 0; i < 10; i++) {
        NodeBuilder root = nodeStore.getRoot().builder();
        root.setProperty("count", i);
        nodeStore.merge(root, EmptyHook.INSTANCE, CommitInfo.EMPTY);
        fileStore.flush();
    }
    assertEquals(10, stats.getJournalWriteStatsAsCount());
}
Also used : DefaultStatisticsProvider(org.apache.jackrabbit.oak.stats.DefaultStatisticsProvider) SegmentNodeStore(org.apache.jackrabbit.oak.segment.SegmentNodeStore) NodeBuilder(org.apache.jackrabbit.oak.spi.state.NodeBuilder) DefaultStatisticsProvider(org.apache.jackrabbit.oak.stats.DefaultStatisticsProvider) StatisticsProvider(org.apache.jackrabbit.oak.stats.StatisticsProvider) Test(org.junit.Test)

Example 12 with DefaultStatisticsProvider

use of org.apache.jackrabbit.oak.stats.DefaultStatisticsProvider in project jackrabbit-oak by apache.

the class CompactionAndCleanupIT method compactionNoBinaryClone.

@Test
public void compactionNoBinaryClone() throws Exception {
    ScheduledExecutorService executor = newSingleThreadScheduledExecutor();
    FileStore fileStore = fileStoreBuilder(getFileStoreFolder()).withGCOptions(defaultGCOptions().setRetainedGenerations(2)).withStatisticsProvider(new DefaultStatisticsProvider(executor)).withMaxFileSize(1).build();
    SegmentNodeStore nodeStore = SegmentNodeStoreBuilders.builder(fileStore).build();
    try {
        // 5MB blob
        int blobSize = 5 * 1024 * 1024;
        // Create ~2MB of data
        NodeBuilder extra = nodeStore.getRoot().builder();
        NodeBuilder content = extra.child("content");
        for (int i = 0; i < 10000; i++) {
            NodeBuilder c = content.child("c" + i);
            for (int j = 0; j < 1000; j++) {
                c.setProperty("p" + i, "v" + i);
            }
        }
        nodeStore.merge(extra, EmptyHook.INSTANCE, CommitInfo.EMPTY);
        fileStore.flush();
        long size1 = fileStore.getStats().getApproximateSize();
        log.debug("File store size {}", byteCountToDisplaySize(size1));
        // Create a property with 5 MB blob
        NodeBuilder builder = nodeStore.getRoot().builder();
        builder.setProperty("blob1", createBlob(nodeStore, blobSize));
        nodeStore.merge(builder, EmptyHook.INSTANCE, CommitInfo.EMPTY);
        fileStore.flush();
        long size2 = fileStore.getStats().getApproximateSize();
        assertTrue("the store should grow", size2 > size1);
        assertTrue("the store should grow of at least the size of the blob", size2 - size1 >= blobSize);
        // Now remove the property. No gc yet -> size doesn't shrink
        builder = nodeStore.getRoot().builder();
        builder.removeProperty("blob1");
        nodeStore.merge(builder, EmptyHook.INSTANCE, CommitInfo.EMPTY);
        fileStore.flush();
        long size3 = fileStore.getStats().getApproximateSize();
        assertTrue("the store should grow", size3 > size2);
        // 1st gc cycle -> no reclaimable garbage...
        fileStore.compact();
        fileStore.cleanup();
        long size4 = fileStore.getStats().getApproximateSize();
        // Add another 5MB binary doubling the blob size
        builder = nodeStore.getRoot().builder();
        builder.setProperty("blob2", createBlob(nodeStore, blobSize));
        nodeStore.merge(builder, EmptyHook.INSTANCE, CommitInfo.EMPTY);
        fileStore.flush();
        long size5 = fileStore.getStats().getApproximateSize();
        assertTrue("the store should grow", size5 > size4);
        assertTrue("the store should grow of at least the size of the blob", size5 - size4 >= blobSize);
        // 2st gc cycle -> 1st blob should get collected
        fileStore.compact();
        fileStore.cleanup();
        long size6 = fileStore.getStats().getApproximateSize();
        assertTrue("the store should shrink", size6 < size5);
        assertTrue("the store should shrink of at least the size of the blob", size5 - size6 >= blobSize);
        // 3rtd gc cycle -> no  significant change
        fileStore.compact();
        fileStore.cleanup();
        long size7 = fileStore.getStats().getApproximateSize();
        // No data loss
        byte[] blob = ByteStreams.toByteArray(nodeStore.getRoot().getProperty("blob2").getValue(Type.BINARY).getNewStream());
        assertEquals(blobSize, blob.length);
    } finally {
        fileStore.close();
    }
}
Also used : FileStore(org.apache.jackrabbit.oak.segment.file.FileStore) ScheduledExecutorService(java.util.concurrent.ScheduledExecutorService) DefaultStatisticsProvider(org.apache.jackrabbit.oak.stats.DefaultStatisticsProvider) NodeBuilder(org.apache.jackrabbit.oak.spi.state.NodeBuilder) Test(org.junit.Test)

Example 13 with DefaultStatisticsProvider

use of org.apache.jackrabbit.oak.stats.DefaultStatisticsProvider in project jackrabbit-oak by apache.

the class CompactionAndCleanupIT method offlineCompactionTool.

/**
     * Test for the Offline compaction tool (OAK-5971)
     */
@Test
public void offlineCompactionTool() throws Exception {
    SegmentGCOptions gcOptions = defaultGCOptions().setOffline();
    ScheduledExecutorService executor = newSingleThreadScheduledExecutor();
    FileStore fileStore = fileStoreBuilder(getFileStoreFolder()).withMaxFileSize(1).withGCOptions(gcOptions).withStatisticsProvider(new DefaultStatisticsProvider(executor)).build();
    SegmentNodeStore nodeStore = SegmentNodeStoreBuilders.builder(fileStore).build();
    try {
        NodeBuilder root = nodeStore.getRoot().builder();
        root.child("content");
        nodeStore.merge(root, EmptyHook.INSTANCE, CommitInfo.EMPTY);
        fileStore.flush();
    } finally {
        fileStore.close();
    }
    Compact.builder().withPath(getFileStoreFolder()).build().run();
    fileStore = fileStoreBuilder(getFileStoreFolder()).withMaxFileSize(1).withGCOptions(gcOptions).withStatisticsProvider(new DefaultStatisticsProvider(executor)).build();
    nodeStore = SegmentNodeStoreBuilders.builder(fileStore).build();
    try {
        assertTrue(nodeStore.getRoot().hasChildNode("content"));
    } finally {
        fileStore.close();
    }
}
Also used : FileStore(org.apache.jackrabbit.oak.segment.file.FileStore) ScheduledExecutorService(java.util.concurrent.ScheduledExecutorService) SegmentGCOptions(org.apache.jackrabbit.oak.segment.compaction.SegmentGCOptions) DefaultStatisticsProvider(org.apache.jackrabbit.oak.stats.DefaultStatisticsProvider) NodeBuilder(org.apache.jackrabbit.oak.spi.state.NodeBuilder) Test(org.junit.Test)

Example 14 with DefaultStatisticsProvider

use of org.apache.jackrabbit.oak.stats.DefaultStatisticsProvider in project jackrabbit-oak by apache.

the class CompactionAndCleanupIT method offlineCompactionBinR1.

/**
     * Create 2 binary nodes with same content and same reference. Verify
     * de-duplication capabilities of compaction
     */
@Test
public void offlineCompactionBinR1() throws Exception {
    SegmentGCOptions gcOptions = defaultGCOptions().setOffline();
    ScheduledExecutorService executor = newSingleThreadScheduledExecutor();
    FileStore fileStore = fileStoreBuilder(getFileStoreFolder()).withMaxFileSize(1).withGCOptions(gcOptions).withStatisticsProvider(new DefaultStatisticsProvider(executor)).build();
    SegmentNodeStore nodeStore = SegmentNodeStoreBuilders.builder(fileStore).build();
    try {
        NodeBuilder extra = nodeStore.getRoot().builder();
        NodeBuilder content = extra.child("content");
        int blobSize = 5 * 1024 * 1024;
        byte[] data = new byte[blobSize];
        new Random().nextBytes(data);
        Blob b = nodeStore.createBlob(new ByteArrayInputStream(data));
        NodeBuilder c1 = content.child("c1");
        c1.setProperty("blob1", b);
        NodeBuilder c2 = content.child("c2");
        c2.setProperty("blob2", b);
        nodeStore.merge(extra, EmptyHook.INSTANCE, CommitInfo.EMPTY);
        fileStore.flush();
        int cpNo = 4;
        Set<String> cps = new HashSet<String>();
        for (int i = 0; i < cpNo; i++) {
            cps.add(nodeStore.checkpoint(60000));
        }
        assertEquals(cpNo, cps.size());
        for (String cp : cps) {
            assertTrue(nodeStore.retrieve(cp) != null);
        }
        // 5Mb, de-duplication by the SegmentWriter
        long size1 = fileStore.getStats().getApproximateSize();
        fileStore.compact();
        fileStore.cleanup();
        long size2 = fileStore.getStats().getApproximateSize();
        assertSize("with compacted binaries", size2, 0, size1 * 11 / 10);
    } finally {
        fileStore.close();
    }
}
Also used : ScheduledExecutorService(java.util.concurrent.ScheduledExecutorService) Blob(org.apache.jackrabbit.oak.api.Blob) SegmentGCOptions(org.apache.jackrabbit.oak.segment.compaction.SegmentGCOptions) DefaultStatisticsProvider(org.apache.jackrabbit.oak.stats.DefaultStatisticsProvider) NodeBuilder(org.apache.jackrabbit.oak.spi.state.NodeBuilder) FileStore(org.apache.jackrabbit.oak.segment.file.FileStore) Random(java.util.Random) ByteArrayInputStream(java.io.ByteArrayInputStream) HashSet(java.util.HashSet) Test(org.junit.Test)

Example 15 with DefaultStatisticsProvider

use of org.apache.jackrabbit.oak.stats.DefaultStatisticsProvider in project jackrabbit-oak by apache.

the class CompactionAndCleanupIT method concurrentWritesCleanupZeroReclaimedSize.

@Test
public void concurrentWritesCleanupZeroReclaimedSize() throws Exception {
    ScheduledExecutorService scheduler = newSingleThreadScheduledExecutor();
    StatisticsProvider statsProvider = new DefaultStatisticsProvider(scheduler);
    final FileStoreGCMonitor fileStoreGCMonitor = new FileStoreGCMonitor(Clock.SIMPLE);
    final FileStore fileStore = fileStoreBuilder(getFileStoreFolder()).withGCOptions(defaultGCOptions().setRetainedGenerations(2)).withGCMonitor(fileStoreGCMonitor).withStatisticsProvider(statsProvider).withMaxFileSize(1).withMemoryMapping(false).build();
    final SegmentNodeStore nodeStore = SegmentNodeStoreBuilders.builder(fileStore).build();
    ExecutorService executorService = newFixedThreadPool(100);
    final AtomicInteger counter = new AtomicInteger();
    try {
        Callable<Void> concurrentWriteTask = new Callable<Void>() {

            @Override
            public Void call() throws Exception {
                NodeBuilder builder = nodeStore.getRoot().builder();
                builder.setProperty("blob-" + counter.getAndIncrement(), createBlob(nodeStore, 25 * 25));
                nodeStore.merge(builder, EmptyHook.INSTANCE, CommitInfo.EMPTY);
                fileStore.flush();
                return null;
            }
        };
        List<Future<?>> results = newArrayList();
        for (int i = 0; i < 100; i++) {
            results.add(executorService.submit(concurrentWriteTask));
        }
        Thread.sleep(100);
        fileStore.cleanup();
        for (Future<?> result : results) {
            assertNull(result.get());
        }
        long reclaimedSize = fileStoreGCMonitor.getLastReclaimedSize();
        assertEquals("Reclaimed size expected is 0, but instead was: " + reclaimedSize, 0, reclaimedSize);
    } finally {
        new ExecutorCloser(executorService).close();
        fileStore.close();
        new ExecutorCloser(scheduler).close();
    }
}
Also used : ScheduledExecutorService(java.util.concurrent.ScheduledExecutorService) FileStoreGCMonitor(org.apache.jackrabbit.oak.segment.file.FileStoreGCMonitor) DefaultStatisticsProvider(org.apache.jackrabbit.oak.stats.DefaultStatisticsProvider) NodeBuilder(org.apache.jackrabbit.oak.spi.state.NodeBuilder) DefaultStatisticsProvider(org.apache.jackrabbit.oak.stats.DefaultStatisticsProvider) StatisticsProvider(org.apache.jackrabbit.oak.stats.StatisticsProvider) Callable(java.util.concurrent.Callable) FileStore(org.apache.jackrabbit.oak.segment.file.FileStore) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) ScheduledExecutorService(java.util.concurrent.ScheduledExecutorService) ExecutorService(java.util.concurrent.ExecutorService) Future(java.util.concurrent.Future) ExecutorCloser(org.apache.jackrabbit.oak.commons.concurrent.ExecutorCloser) Test(org.junit.Test)

Aggregations

DefaultStatisticsProvider (org.apache.jackrabbit.oak.stats.DefaultStatisticsProvider)20 ScheduledExecutorService (java.util.concurrent.ScheduledExecutorService)15 Test (org.junit.Test)12 NodeBuilder (org.apache.jackrabbit.oak.spi.state.NodeBuilder)10 FileStore (org.apache.jackrabbit.oak.segment.file.FileStore)9 ExecutorCloser (org.apache.jackrabbit.oak.commons.concurrent.ExecutorCloser)6 SegmentGCOptions (org.apache.jackrabbit.oak.segment.compaction.SegmentGCOptions)6 StatisticsProvider (org.apache.jackrabbit.oak.stats.StatisticsProvider)6 File (java.io.File)5 HashSet (java.util.HashSet)4 CountDownLatch (java.util.concurrent.CountDownLatch)4 Blob (org.apache.jackrabbit.oak.api.Blob)4 ByteArrayInputStream (java.io.ByteArrayInputStream)3 Random (java.util.Random)3 Callable (java.util.concurrent.Callable)2 ExecutorService (java.util.concurrent.ExecutorService)2 Future (java.util.concurrent.Future)2 AtomicInteger (java.util.concurrent.atomic.AtomicInteger)2 FileStoreBuilder (org.apache.jackrabbit.oak.segment.file.FileStoreBuilder)2 FileStoreGCMonitor (org.apache.jackrabbit.oak.segment.file.FileStoreGCMonitor)2