Search in sources :

Example 1 with SharedDataStore

use of org.apache.jackrabbit.oak.plugins.blob.SharedDataStore in project jackrabbit-oak by apache.

the class DocumentNodeStoreService method registerNodeStore.

private void registerNodeStore() throws IOException {
    String uri = PropertiesUtil.toString(prop(PROP_URI, FWK_PROP_URI), DEFAULT_URI);
    String db = PropertiesUtil.toString(prop(PROP_DB, FWK_PROP_DB), DEFAULT_DB);
    boolean soKeepAlive = PropertiesUtil.toBoolean(prop(PROP_SO_KEEP_ALIVE, FWK_PROP_SO_KEEP_ALIVE), DEFAULT_SO_KEEP_ALIVE);
    int cacheSize = toInteger(prop(PROP_CACHE), DEFAULT_CACHE);
    int nodeCachePercentage = toInteger(prop(PROP_NODE_CACHE_PERCENTAGE), DEFAULT_NODE_CACHE_PERCENTAGE);
    int prevDocCachePercentage = toInteger(prop(PROP_PREV_DOC_CACHE_PERCENTAGE), DEFAULT_NODE_CACHE_PERCENTAGE);
    int childrenCachePercentage = toInteger(prop(PROP_CHILDREN_CACHE_PERCENTAGE), DEFAULT_CHILDREN_CACHE_PERCENTAGE);
    int diffCachePercentage = toInteger(prop(PROP_DIFF_CACHE_PERCENTAGE), DEFAULT_DIFF_CACHE_PERCENTAGE);
    int blobCacheSize = toInteger(prop(PROP_BLOB_CACHE_SIZE), DEFAULT_BLOB_CACHE_SIZE);
    String persistentCache = getPath(PROP_PERSISTENT_CACHE, DEFAULT_PERSISTENT_CACHE);
    String journalCache = getPath(PROP_JOURNAL_CACHE, DEFAULT_JOURNAL_CACHE);
    int cacheSegmentCount = toInteger(prop(PROP_CACHE_SEGMENT_COUNT), DEFAULT_CACHE_SEGMENT_COUNT);
    int cacheStackMoveDistance = toInteger(prop(PROP_CACHE_STACK_MOVE_DISTANCE), DEFAULT_CACHE_STACK_MOVE_DISTANCE);
    boolean bundlingDisabled = toBoolean(prop(PROP_BUNDLING_DISABLED), DEFAULT_BUNDLING_DISABLED);
    boolean prefetchExternalChanges = toBoolean(prop(PROP_PREFETCH_EXTERNAL_CHANGES), false);
    int updateLimit = toInteger(prop(PROP_UPDATE_LIMIT), DocumentMK.UPDATE_LIMIT);
    DocumentMK.Builder mkBuilder = new DocumentMK.Builder().setStatisticsProvider(statisticsProvider).memoryCacheSize(cacheSize * MB).memoryCacheDistribution(nodeCachePercentage, prevDocCachePercentage, childrenCachePercentage, diffCachePercentage).setCacheSegmentCount(cacheSegmentCount).setCacheStackMoveDistance(cacheStackMoveDistance).setBundlingDisabled(bundlingDisabled).setJournalPropertyHandlerFactory(journalPropertyHandlerFactory).setLeaseCheck(!ClusterNodeInfo.DEFAULT_LEASE_CHECK_DISABLED).setLeaseFailureHandler(new LeaseFailureHandler() {

        @Override
        public void handleLeaseFailure() {
            try {
                // plan A: try stopping oak-core
                log.error("handleLeaseFailure: stopping oak-core...");
                Bundle bundle = context.getBundleContext().getBundle();
                bundle.stop(Bundle.STOP_TRANSIENT);
                log.error("handleLeaseFailure: stopped oak-core.");
            // plan A worked, perfect!
            } catch (BundleException e) {
                log.error("handleLeaseFailure: exception while stopping oak-core: " + e, e);
                // plan B: stop only DocumentNodeStoreService (to stop the background threads)
                log.error("handleLeaseFailure: stopping DocumentNodeStoreService...");
                context.disableComponent(DocumentNodeStoreService.class.getName());
                log.error("handleLeaseFailure: stopped DocumentNodeStoreService");
            // plan B succeeded.
            }
        }
    }).setPrefetchExternalChanges(prefetchExternalChanges).setUpdateLimit(updateLimit);
    if (!Strings.isNullOrEmpty(persistentCache)) {
        mkBuilder.setPersistentCache(persistentCache);
    }
    if (!Strings.isNullOrEmpty(journalCache)) {
        mkBuilder.setJournalCache(journalCache);
    }
    boolean wrappingCustomBlobStore = customBlobStore && blobStore instanceof BlobStoreWrapper;
    //Set blobstore before setting the DB
    if (customBlobStore && !wrappingCustomBlobStore) {
        checkNotNull(blobStore, "Use of custom BlobStore enabled via  [%s] but blobStore reference not " + "initialized", CUSTOM_BLOB_STORE);
        mkBuilder.setBlobStore(blobStore);
    }
    if (documentStoreType == DocumentStoreType.RDB) {
        checkNotNull(dataSource, "DataStore type set [%s] but DataSource reference not initialized", PROP_DS_TYPE);
        if (!customBlobStore) {
            checkNotNull(blobDataSource, "DataStore type set [%s] but BlobDataSource reference not initialized", PROP_DS_TYPE);
            mkBuilder.setRDBConnection(dataSource, blobDataSource);
            log.info("Connected to datasources {} {}", dataSource, blobDataSource);
        } else {
            if (blobDataSource != null && blobDataSource != dataSource) {
                log.info("Ignoring blobDataSource {} as custom blob store takes precedence.", blobDataSource);
            }
            mkBuilder.setRDBConnection(dataSource);
            log.info("Connected to datasource {}", dataSource);
        }
    } else {
        MongoClientURI mongoURI = new MongoClientURI(uri);
        if (log.isInfoEnabled()) {
            // Take care around not logging the uri directly as it
            // might contain passwords
            log.info("Starting DocumentNodeStore with host={}, db={}, cache size (MB)={}, persistentCache={}, " + "journalCache={}, blobCacheSize (MB)={}, maxReplicationLagInSecs={}", mongoURI.getHosts(), db, cacheSize, persistentCache, journalCache, blobCacheSize, maxReplicationLagInSecs);
            log.info("Mongo Connection details {}", MongoConnection.toString(mongoURI.getOptions()));
        }
        mkBuilder.setMaxReplicationLag(maxReplicationLagInSecs, TimeUnit.SECONDS);
        mkBuilder.setSocketKeepAlive(soKeepAlive);
        mkBuilder.setMongoDB(uri, db, blobCacheSize);
        log.info("Connected to database '{}'", db);
    }
    if (!customBlobStore) {
        defaultBlobStore = mkBuilder.getBlobStore();
        log.info("Registering the BlobStore with ServiceRegistry");
        blobStoreReg = context.getBundleContext().registerService(BlobStore.class.getName(), defaultBlobStore, null);
    }
    //Set wrapping blob store after setting the DB
    if (wrappingCustomBlobStore) {
        ((BlobStoreWrapper) blobStore).setBlobStore(mkBuilder.getBlobStore());
        mkBuilder.setBlobStore(blobStore);
    }
    mkBuilder.setExecutor(executor);
    // attach GCMonitor
    final GCMonitorTracker gcMonitor = new GCMonitorTracker();
    gcMonitor.start(whiteboard);
    closer.register(asCloseable(gcMonitor));
    mkBuilder.setGCMonitor(gcMonitor);
    nodeStore = mkBuilder.getNodeStore();
    // ensure a clusterId is initialized 
    // and expose it as 'oak.clusterid' repository descriptor
    GenericDescriptors clusterIdDesc = new GenericDescriptors();
    clusterIdDesc.put(ClusterRepositoryInfo.OAK_CLUSTERID_REPOSITORY_DESCRIPTOR_KEY, new SimpleValueFactory().createValue(ClusterRepositoryInfo.getOrCreateId(nodeStore)), true, false);
    whiteboard.register(Descriptors.class, clusterIdDesc, Collections.emptyMap());
    // If a shared data store register the repo id in the data store
    if (SharedDataStoreUtils.isShared(blobStore)) {
        String repoId = null;
        try {
            repoId = ClusterRepositoryInfo.getOrCreateId(nodeStore);
            ((SharedDataStore) blobStore).addMetadataRecord(new ByteArrayInputStream(new byte[0]), SharedDataStoreUtils.SharedStoreRecordType.REPOSITORY.getNameFromId(repoId));
        } catch (Exception e) {
            throw new IOException("Could not register a unique repositoryId", e);
        }
        if (blobStore instanceof BlobTrackingStore) {
            final long trackSnapshotInterval = toLong(prop(PROP_BLOB_SNAPSHOT_INTERVAL), DEFAULT_BLOB_SNAPSHOT_INTERVAL);
            String root = getRepositoryHome();
            BlobTrackingStore trackingStore = (BlobTrackingStore) blobStore;
            if (trackingStore.getTracker() != null) {
                trackingStore.getTracker().close();
            }
            ((BlobTrackingStore) blobStore).addTracker(new BlobIdTracker(root, repoId, trackSnapshotInterval, (SharedDataStore) blobStore));
        }
    }
    registerJMXBeans(nodeStore, mkBuilder);
    registerLastRevRecoveryJob(nodeStore);
    registerJournalGC(nodeStore);
    if (!isNodeStoreProvider()) {
        observerTracker = new ObserverTracker(nodeStore);
        observerTracker.start(context.getBundleContext());
    }
    journalPropertyHandlerFactory.start(whiteboard);
    DocumentStore ds = nodeStore.getDocumentStore();
    // OAK-2682: time difference detection applied at startup with a default
    // max time diff of 2000 millis (2sec)
    final long maxDiff = Long.parseLong(System.getProperty("oak.documentMK.maxServerTimeDiffMillis", "2000"));
    try {
        if (maxDiff >= 0) {
            final long timeDiff = ds.determineServerTimeDifferenceMillis();
            log.info("registerNodeStore: server time difference: {}ms (max allowed: {}ms)", timeDiff, maxDiff);
            if (Math.abs(timeDiff) > maxDiff) {
                throw new AssertionError("Server clock seems off (" + timeDiff + "ms) by more than configured amount (" + maxDiff + "ms)");
            }
        }
    } catch (RuntimeException e) {
        // no checked exception
        // in case of a RuntimeException, just log but continue
        log.warn("registerNodeStore: got RuntimeException while trying to determine time difference to server: " + e, e);
    }
    String[] serviceClasses;
    if (isNodeStoreProvider()) {
        registerNodeStoreProvider(nodeStore);
        serviceClasses = new String[] { DocumentNodeStore.class.getName(), Clusterable.class.getName() };
    } else {
        serviceClasses = new String[] { NodeStore.class.getName(), DocumentNodeStore.class.getName(), Clusterable.class.getName() };
    }
    Dictionary<String, Object> props = new Hashtable<String, Object>();
    props.put(Constants.SERVICE_PID, DocumentNodeStore.class.getName());
    props.put(DESCRIPTION, getMetadata(ds));
    // OAK-2844: in order to allow DocumentDiscoveryLiteService to directly
    // require a service DocumentNodeStore (instead of having to do an 'instanceof')
    // the registration is now done for both NodeStore and DocumentNodeStore here.
    nodeStoreReg = context.getBundleContext().registerService(serviceClasses, nodeStore, props);
}
Also used : ObserverTracker(org.apache.jackrabbit.oak.spi.commit.ObserverTracker) MongoClientURI(com.mongodb.MongoClientURI) Clusterable(org.apache.jackrabbit.oak.spi.state.Clusterable) NodeStore(org.apache.jackrabbit.oak.spi.state.NodeStore) BlobTrackingStore(org.apache.jackrabbit.oak.plugins.blob.BlobTrackingStore) BundleException(org.osgi.framework.BundleException) SharedDataStore(org.apache.jackrabbit.oak.plugins.blob.SharedDataStore) GCMonitorTracker(org.apache.jackrabbit.oak.spi.gc.GCMonitorTracker) GenericDescriptors(org.apache.jackrabbit.oak.spi.descriptors.GenericDescriptors) Bundle(org.osgi.framework.Bundle) Hashtable(java.util.Hashtable) IOException(java.io.IOException) SimpleValueFactory(org.apache.jackrabbit.commons.SimpleValueFactory) BundleException(org.osgi.framework.BundleException) IOException(java.io.IOException) BlobIdTracker(org.apache.jackrabbit.oak.plugins.blob.datastore.BlobIdTracker) ByteArrayInputStream(java.io.ByteArrayInputStream) BlobStoreWrapper(org.apache.jackrabbit.oak.spi.blob.BlobStoreWrapper)

Example 2 with SharedDataStore

use of org.apache.jackrabbit.oak.plugins.blob.SharedDataStore in project jackrabbit-oak by apache.

the class SharedBlobStoreGCTest method setUp.

@Before
public void setUp() throws Exception {
    log.debug("In setUp()");
    clock = new Clock.Virtual();
    clock.waitUntil(Revision.getCurrentTimestamp());
    DataStoreUtils.time = clock.getTime();
    File rootFolder = folder.newFolder();
    BlobStore blobeStore1 = getBlobStore(rootFolder);
    DocumentNodeStore ds1 = new DocumentMK.Builder().setAsyncDelay(0).setDocumentStore(new MemoryDocumentStore()).setBlobStore(blobeStore1).clock(clock).getNodeStore();
    String repoId1 = ClusterRepositoryInfo.getOrCreateId(ds1);
    // Register the unique repository id in the data store
    ((SharedDataStore) blobeStore1).addMetadataRecord(new ByteArrayInputStream(new byte[0]), SharedStoreRecordType.REPOSITORY.getNameFromId(repoId1));
    BlobStore blobeStore2 = getBlobStore(rootFolder);
    DocumentNodeStore ds2 = new DocumentMK.Builder().setAsyncDelay(0).setDocumentStore(new MemoryDocumentStore()).setBlobStore(blobeStore2).clock(clock).getNodeStore();
    String repoId2 = ClusterRepositoryInfo.getOrCreateId(ds2);
    // Register the unique repository id in the data store
    ((SharedDataStore) blobeStore2).addMetadataRecord(new ByteArrayInputStream(new byte[0]), SharedStoreRecordType.REPOSITORY.getNameFromId(repoId2));
    cluster1 = new Cluster(ds1, repoId1, 20);
    cluster1.init();
    log.debug("Initialized {}", cluster1);
    cluster2 = new Cluster(ds2, repoId2, 100);
    cluster2.init();
    log.debug("Initialized {}", cluster2);
}
Also used : MemoryDocumentStore(org.apache.jackrabbit.oak.plugins.document.memory.MemoryDocumentStore) ByteArrayInputStream(java.io.ByteArrayInputStream) SharedDataStore(org.apache.jackrabbit.oak.plugins.blob.SharedDataStore) Clock(org.apache.jackrabbit.oak.stats.Clock) File(java.io.File) GarbageCollectableBlobStore(org.apache.jackrabbit.oak.spi.blob.GarbageCollectableBlobStore) DataStoreBlobStore(org.apache.jackrabbit.oak.plugins.blob.datastore.DataStoreBlobStore) BlobStore(org.apache.jackrabbit.oak.spi.blob.BlobStore) Before(org.junit.Before)

Example 3 with SharedDataStore

use of org.apache.jackrabbit.oak.plugins.blob.SharedDataStore in project jackrabbit-oak by apache.

the class MongoBlobGCTest method init.

private MarkSweepGarbageCollector init(int blobGcMaxAgeInSecs, ThreadPoolExecutor executor, String root) throws Exception {
    DocumentNodeStore store = mk.getNodeStore();
    String repoId = null;
    if (SharedDataStoreUtils.isShared(store.getBlobStore())) {
        repoId = ClusterRepositoryInfo.getOrCreateId(store);
        ((SharedDataStore) store.getBlobStore()).addMetadataRecord(new ByteArrayInputStream(new byte[0]), REPOSITORY.getNameFromId(repoId));
    }
    if (Strings.isNullOrEmpty(root)) {
        root = folder.newFolder().getAbsolutePath();
    }
    MarkSweepGarbageCollector gc = new MarkSweepGarbageCollector(new DocumentBlobReferenceRetriever(store), (GarbageCollectableBlobStore) store.getBlobStore(), executor, root, 5, blobGcMaxAgeInSecs, repoId);
    return gc;
}
Also used : ByteArrayInputStream(java.io.ByteArrayInputStream) SharedDataStore(org.apache.jackrabbit.oak.plugins.blob.SharedDataStore) MarkSweepGarbageCollector(org.apache.jackrabbit.oak.plugins.blob.MarkSweepGarbageCollector)

Example 4 with SharedDataStore

use of org.apache.jackrabbit.oak.plugins.blob.SharedDataStore in project jackrabbit-oak by apache.

the class MongoBlobGCTest method gcLongRunningBlobCollection.

@Test
public void gcLongRunningBlobCollection() throws Exception {
    DataStoreState state = setUp(true);
    log.info("{} Blobs added {}", state.blobsAdded.size(), state.blobsAdded);
    log.info("{} Blobs should be present {}", state.blobsPresent.size(), state.blobsPresent);
    ThreadPoolExecutor executor = (ThreadPoolExecutor) Executors.newFixedThreadPool(10);
    DocumentNodeStore store = mk.getNodeStore();
    String repoId = null;
    if (SharedDataStoreUtils.isShared(store.getBlobStore())) {
        repoId = ClusterRepositoryInfo.getOrCreateId(store);
        ((SharedDataStore) store.getBlobStore()).addMetadataRecord(new ByteArrayInputStream(new byte[0]), REPOSITORY.getNameFromId(repoId));
    }
    TestGarbageCollector gc = new TestGarbageCollector(new DocumentBlobReferenceRetriever(store), (GarbageCollectableBlobStore) store.getBlobStore(), executor, folder.newFolder().getAbsolutePath(), 5, 5000, repoId);
    gc.collectGarbage(false);
    Set<String> existingAfterGC = iterate();
    log.info("{} Blobs existing after gc {}", existingAfterGC.size(), existingAfterGC);
    assertTrue(Sets.difference(state.blobsPresent, existingAfterGC).isEmpty());
    assertEquals(gc.additionalBlobs, Sets.symmetricDifference(state.blobsPresent, existingAfterGC));
}
Also used : ByteArrayInputStream(java.io.ByteArrayInputStream) ThreadPoolExecutor(java.util.concurrent.ThreadPoolExecutor) SharedDataStore(org.apache.jackrabbit.oak.plugins.blob.SharedDataStore) Test(org.junit.Test)

Example 5 with SharedDataStore

use of org.apache.jackrabbit.oak.plugins.blob.SharedDataStore in project jackrabbit-oak by apache.

the class SegmentDataStoreBlobGCIT method init.

private MarkSweepGarbageCollector init(long blobGcMaxAgeInSecs, ThreadPoolExecutor executor, String root) throws Exception {
    String repoId = null;
    if (SharedDataStoreUtils.isShared(store.getBlobStore())) {
        repoId = ClusterRepositoryInfo.getOrCreateId(nodeStore);
        ((SharedDataStore) store.getBlobStore()).addMetadataRecord(new ByteArrayInputStream(new byte[0]), REPOSITORY.getNameFromId(repoId));
    }
    MarkSweepGarbageCollector gc = new MarkSweepGarbageCollector(new SegmentBlobReferenceRetriever(store), (GarbageCollectableBlobStore) store.getBlobStore(), executor, root, 2048, blobGcMaxAgeInSecs, repoId);
    return gc;
}
Also used : ByteArrayInputStream(java.io.ByteArrayInputStream) SharedDataStore(org.apache.jackrabbit.oak.plugins.blob.SharedDataStore) MarkSweepGarbageCollector(org.apache.jackrabbit.oak.plugins.blob.MarkSweepGarbageCollector)

Aggregations

ByteArrayInputStream (java.io.ByteArrayInputStream)7 SharedDataStore (org.apache.jackrabbit.oak.plugins.blob.SharedDataStore)7 MarkSweepGarbageCollector (org.apache.jackrabbit.oak.plugins.blob.MarkSweepGarbageCollector)3 IOException (java.io.IOException)2 ThreadPoolExecutor (java.util.concurrent.ThreadPoolExecutor)2 SimpleValueFactory (org.apache.jackrabbit.commons.SimpleValueFactory)2 BlobTrackingStore (org.apache.jackrabbit.oak.plugins.blob.BlobTrackingStore)2 BlobIdTracker (org.apache.jackrabbit.oak.plugins.blob.datastore.BlobIdTracker)2 GarbageCollectableBlobStore (org.apache.jackrabbit.oak.spi.blob.GarbageCollectableBlobStore)2 ObserverTracker (org.apache.jackrabbit.oak.spi.commit.ObserverTracker)2 GenericDescriptors (org.apache.jackrabbit.oak.spi.descriptors.GenericDescriptors)2 GCMonitorTracker (org.apache.jackrabbit.oak.spi.gc.GCMonitorTracker)2 NodeStore (org.apache.jackrabbit.oak.spi.state.NodeStore)2 Test (org.junit.Test)2 Supplier (com.google.common.base.Supplier)1 MongoClientURI (com.mongodb.MongoClientURI)1 File (java.io.File)1 HashMap (java.util.HashMap)1 Hashtable (java.util.Hashtable)1 Descriptors (org.apache.jackrabbit.oak.api.Descriptors)1