use of org.apache.jackrabbit.oak.plugins.blob.datastore.DataStoreBlobStore in project jackrabbit-oak by apache.
the class MongoAzureDataStoreBlobGCTest method setUpConnection.
@Before
@Override
public void setUpConnection() throws Exception {
Properties props = AzureDataStoreUtils.getAzureConfig();
startDate = new Date();
mongoConnection = connectionFactory.getConnection();
MongoUtils.dropCollections(mongoConnection.getDB());
File root = folder.newFolder();
bucket = root.getName();
props.setProperty(AzureConstants.AZURE_BLOB_CONTAINER_NAME, bucket);
props.setProperty("cacheSize", "0");
blobStore = new DataStoreBlobStore(AzureDataStoreUtils.getAzureDataStore(props, root.getAbsolutePath()));
mk = new DocumentMK.Builder().clock(getTestClock()).setMongoDB(mongoConnection.getDB()).setBlobStore(blobStore).open();
}
use of org.apache.jackrabbit.oak.plugins.blob.datastore.DataStoreBlobStore in project jackrabbit-oak by apache.
the class ActiveDeletedBlobCollectionIT method createRepository.
@Override
protected ContentRepository createRepository() {
adbc = new ActiveDeletedBlobCollectorImpl(clock, new File(blobCollectionRoot.getRoot(), "deleted-blobs"), executorService);
IndexCopier copier = createIndexCopier();
editorProvider = new LuceneIndexEditorProvider(copier, null, new ExtractedTextCache(10 * FileUtils.ONE_MB, 100), null, Mounts.defaultMountInfoProvider(), adbc);
provider = new LuceneIndexProvider(copier);
mongoConnection = connectionFactory.getConnection();
MongoUtils.dropCollections(mongoConnection.getDB());
if (dataStoreType == DataStoreType.WITHOUT_FDS) {
MongoBlobStore blobStore = new MongoBlobStore(mongoConnection.getDB());
blobStore.setBlockSize(128);
blobStore.setBlockSizeMin(48);
this.blobStore = new CountingBlobStore(blobStore);
} else {
FileDataStore fds = new FileDataStore();
fds.init(fileDataStoreRoot.getRoot().getAbsolutePath());
DataStoreBlobStore dsbs = new DataStoreBlobStore(fds);
dsbs.setBlockSize(128);
this.blobStore = new CountingBlobStore(dsbs);
}
nodeStore = new DocumentMK.Builder().setMongoDB(mongoConnection.getDB()).setBlobStore(this.blobStore).getNodeStore();
asyncIndexUpdate = new AsyncIndexUpdate("async", nodeStore, editorProvider);
return new Oak(nodeStore).with(new InitialContent()).with(new OpenSecurityProvider()).with((QueryIndexProvider) provider).with((Observer) provider).with(editorProvider).createContentRepository();
}
use of org.apache.jackrabbit.oak.plugins.blob.datastore.DataStoreBlobStore in project jackrabbit-oak by apache.
the class MultiplexingIndexWriterTest method writesInDefaultMountBlobStore.
@Test
public void writesInDefaultMountBlobStore() throws Exception {
CachingFileDataStore ds = DataStoreUtils.createCachingFDS(folder.newFolder().getAbsolutePath(), folder.newFolder().getAbsolutePath());
LuceneIndexWriterFactory factory = new DefaultIndexWriterFactory(mip, null, new DataStoreBlobStore(ds));
LuceneIndexWriter writer = factory.newInstance(defn, builder, true);
//1. Add entry in foo mount
writer.updateDocument("/libs/config", newDoc("/libs/config"));
writer.close(0);
List<String> names = getIndexDirNodes();
//Only dirNode for mount foo should be present
assertThat(names, contains(indexDirName(fooMount)));
//2. Add entry in default mount
writer = factory.newInstance(defn, builder, true);
writer.updateDocument("/content", newDoc("/content"));
writer.close(0);
names = getIndexDirNodes();
//Dir names for both mounts should be present
assertThat(names, containsInAnyOrder(indexDirName(fooMount), indexDirName(defaultMount)));
}
use of org.apache.jackrabbit.oak.plugins.blob.datastore.DataStoreBlobStore in project jackrabbit-oak by apache.
the class Utils method bootstrapDataStore.
@Nullable
public static GarbageCollectableBlobStore bootstrapDataStore(String[] args, Closer closer) throws IOException, RepositoryException {
OptionParser parser = new OptionParser();
parser.allowsUnrecognizedOptions();
ArgumentAcceptingOptionSpec<String> s3dsConfig = parser.accepts("s3ds", "S3DataStore config").withRequiredArg().ofType(String.class);
ArgumentAcceptingOptionSpec<String> fdsConfig = parser.accepts("fds", "FileDataStore config").withRequiredArg().ofType(String.class);
ArgumentAcceptingOptionSpec<String> azureBlobDSConfig = parser.accepts("azureblobds", "AzureBlobStorageDataStore config").withRequiredArg().ofType(String.class);
OptionSet options = parser.parse(args);
if (!options.has(s3dsConfig) && !options.has(fdsConfig) && !options.has(azureBlobDSConfig)) {
return null;
}
DataStore delegate;
if (options.has(s3dsConfig)) {
SharedS3DataStore s3ds = new SharedS3DataStore();
String cfgPath = s3dsConfig.value(options);
Properties props = loadAndTransformProps(cfgPath);
s3ds.setProperties(props);
File homeDir = Files.createTempDir();
closer.register(asCloseable(homeDir));
s3ds.init(homeDir.getAbsolutePath());
delegate = s3ds;
} else if (options.has(azureBlobDSConfig)) {
AzureDataStore azureds = new AzureDataStore();
String cfgPath = azureBlobDSConfig.value(options);
Properties props = loadAndTransformProps(cfgPath);
azureds.setProperties(props);
File homeDir = Files.createTempDir();
azureds.init(homeDir.getAbsolutePath());
closer.register(asCloseable(homeDir));
delegate = azureds;
} else {
delegate = new OakFileDataStore();
String cfgPath = fdsConfig.value(options);
Properties props = loadAndTransformProps(cfgPath);
populate(delegate, asMap(props), true);
delegate.init(null);
}
DataStoreBlobStore blobStore = new DataStoreBlobStore(delegate);
closer.register(Utils.asCloseable(blobStore));
return blobStore;
}
use of org.apache.jackrabbit.oak.plugins.blob.datastore.DataStoreBlobStore in project jackrabbit-oak by apache.
the class BlobStoreFixtureProvider method create.
@CheckForNull
public static BlobStoreFixture create(Options options) throws Exception {
BlobStoreOptions bsopts = options.getOptionBean(BlobStoreOptions.class);
if (bsopts == null) {
return null;
}
Type bsType = bsopts.getBlobStoreType();
if (bsType == Type.NONE) {
return null;
}
Closer closer = Closer.create();
DataStore delegate;
if (bsType == Type.S3) {
SharedS3DataStore s3ds = new SharedS3DataStore();
Properties props = loadAndTransformProps(bsopts.getS3ConfigPath());
s3ds.setProperties(props);
File homeDir = Files.createTempDir();
closer.register(asCloseable(homeDir));
s3ds.init(homeDir.getAbsolutePath());
delegate = s3ds;
} else if (bsType == Type.AZURE) {
AzureDataStore azureds = new AzureDataStore();
String cfgPath = bsopts.getAzureConfigPath();
Properties props = loadAndTransformProps(cfgPath);
azureds.setProperties(props);
File homeDir = Files.createTempDir();
azureds.init(homeDir.getAbsolutePath());
closer.register(asCloseable(homeDir));
delegate = azureds;
} else if (bsType == Type.FAKE) {
FileDataStore fakeDs = new DummyDataStore();
fakeDs.setPath(bsopts.getFakeDataStorePath());
fakeDs.init(null);
delegate = fakeDs;
} else {
FileDataStore fds = new OakFileDataStore();
delegate = fds;
if (bsopts.getFDSPath() != null) {
fds.setPath(bsopts.getFDSPath());
} else {
String cfgPath = bsopts.getFDSConfigPath();
Properties props = loadAndTransformProps(cfgPath);
populate(delegate, asMap(props), true);
}
delegate.init(null);
}
DataStoreBlobStore blobStore = new DataStoreBlobStore(delegate);
return new DataStoreFixture(blobStore, closer, !options.getCommonOpts().isReadWrite());
}
Aggregations