use of org.apache.jackrabbit.core.data.DataStore in project jackrabbit-oak by apache.
the class AbstractDataStoreService method activate.
protected void activate(ComponentContext context, Map<String, Object> config) throws RepositoryException {
// change to mutable map. may be modified in createDS call
config = Maps.newHashMap(config);
DataStore ds = createDataStore(context, config);
boolean encodeLengthInId = PropertiesUtil.toBoolean(config.get(PROP_ENCODE_LENGTH), true);
int cacheSizeInMB = PropertiesUtil.toInteger(config.get(PROP_CACHE_SIZE), DataStoreBlobStore.DEFAULT_CACHE_SIZE);
String homeDir = lookup(context, PROP_HOME);
if (homeDir != null) {
log.info("Initializing the DataStore with homeDir [{}]", homeDir);
}
PropertiesUtil.populate(ds, config, false);
ds.init(homeDir);
BlobStoreStats stats = new BlobStoreStats(getStatisticsProvider());
this.dataStore = new DataStoreBlobStore(ds, encodeLengthInId, cacheSizeInMB);
this.dataStore.setBlobStatsCollector(stats);
PropertiesUtil.populate(dataStore, config, false);
Dictionary<String, Object> props = new Hashtable<String, Object>();
props.put(Constants.SERVICE_PID, ds.getClass().getName());
props.put(DESCRIPTION, getDescription());
if (context.getProperties().get(PROP_SPLIT_BLOBSTORE) != null) {
props.put(PROP_SPLIT_BLOBSTORE, context.getProperties().get(PROP_SPLIT_BLOBSTORE));
}
reg = context.getBundleContext().registerService(new String[] { BlobStore.class.getName(), GarbageCollectableBlobStore.class.getName() }, dataStore, props);
mbeanReg = registerMBeans(context.getBundleContext(), dataStore, stats);
}
use of org.apache.jackrabbit.core.data.DataStore in project jackrabbit-oak by apache.
the class DataStoreCheckTest method setup.
@Before
public void setup() throws Exception {
if (S3DataStoreUtils.isS3Configured()) {
Properties props = S3DataStoreUtils.getS3Config();
props.setProperty("cacheSize", "0");
container = props.getProperty(S3Constants.S3_BUCKET);
DataStore ds = S3DataStoreUtils.getS3DataStore(S3DataStoreUtils.getFixtures().get(0), props, temporaryFolder.newFolder().getAbsolutePath());
setupDataStore = new DataStoreBlobStore(ds);
cfgFilePath = createTempConfig(temporaryFolder.newFile(), props);
dsOption = "s3ds";
} else if (AzureDataStoreUtils.isAzureConfigured()) {
Properties props = AzureDataStoreUtils.getAzureConfig();
props.setProperty("cacheSize", "0");
container = props.getProperty(AzureConstants.AZURE_BLOB_CONTAINER_NAME);
DataStore ds = AzureDataStoreUtils.getAzureDataStore(props, temporaryFolder.newFolder().getAbsolutePath());
setupDataStore = new DataStoreBlobStore(ds);
cfgFilePath = createTempConfig(temporaryFolder.newFile(), props);
dsOption = "azureblobds";
} else {
OakFileDataStore delegate = new OakFileDataStore();
dsPath = temporaryFolder.newFolder().getAbsolutePath();
delegate.setPath(dsPath);
delegate.init(null);
setupDataStore = new DataStoreBlobStore(delegate);
File cfgFile = temporaryFolder.newFile();
Properties props = new Properties();
props.put("path", dsPath);
props.put("minRecordLength", new Long(4096));
cfgFilePath = createTempConfig(cfgFile, props);
dsOption = "fds";
}
File storeFile = temporaryFolder.newFolder();
storePath = storeFile.getAbsolutePath();
FileStore fileStore = FileStoreBuilder.fileStoreBuilder(storeFile).withBlobStore(setupDataStore).withMaxFileSize(256).withSegmentCacheSize(64).build();
NodeStore store = SegmentNodeStoreBuilders.builder(fileStore).build();
/* Create nodes with blobs stored in DS*/
NodeBuilder a = store.getRoot().builder();
int numBlobs = 10;
blobsAdded = Sets.newHashSet();
for (int i = 0; i < numBlobs; i++) {
SegmentBlob b = (SegmentBlob) store.createBlob(randomStream(i, 18342));
Iterator<String> idIter = setupDataStore.resolveChunks(b.getBlobId());
while (idIter.hasNext()) {
String chunk = idIter.next();
blobsAdded.add(chunk);
}
a.child("c" + i).setProperty("x", b);
}
store.merge(a, EmptyHook.INSTANCE, CommitInfo.EMPTY);
log.info("Created blobs : {}", blobsAdded);
fileStore.close();
}
Aggregations