use of org.apache.jackrabbit.oak.spi.blob.MemoryBlobStore in project jackrabbit-oak by apache.
the class CacheTest method test.
@Test
public void test() throws Exception {
FileUtils.deleteDirectory(new File("target/cacheTest"));
PersistentCache cache = new PersistentCache("target/cacheTest,size=1,-compress");
try {
MemoryBlobStore mem = new MemoryBlobStore();
mem.setBlockSizeMin(100);
BlobStore b = cache.wrapBlobStore(mem);
Random r = new Random();
for (int i = 0; i < 10000; i++) {
byte[] data = new byte[100];
r.nextBytes(data);
String id = b.writeBlob(new ByteArrayInputStream(data));
b.readBlob(id, 0, new byte[1], 0, 1);
}
} finally {
cache.close();
}
}
use of org.apache.jackrabbit.oak.spi.blob.MemoryBlobStore in project jackrabbit-oak by apache.
the class CSVFileBinaryResourceProviderTest method testGetBinaries.
@Test
public void testGetBinaries() throws Exception {
StringBuilder sb = new StringBuilder();
CSVPrinter p = new CSVPrinter(sb, CSVFileBinaryResourceProvider.FORMAT);
// BLOB_ID, LENGTH, JCR_MIMETYPE, JCR_ENCODING, JCR_PATH
p.printRecord("a", 123, "text/plain", null, "/a");
p.printRecord("a2", 123, "text/plain", null, "/a/c");
p.printRecord("b", null, "text/plain", null, "/b");
p.printRecord(null, null, "text/plain", null, "/c");
File dataFile = temporaryFolder.newFile();
Files.write(sb, dataFile, Charsets.UTF_8);
CSVFileBinaryResourceProvider provider = new CSVFileBinaryResourceProvider(dataFile, new MemoryBlobStore());
Map<String, BinaryResource> binaries = provider.getBinaries("/").uniqueIndex(BinarySourceMapper.BY_BLOBID);
assertEquals(3, binaries.size());
assertEquals("a", binaries.get("a").getBlobId());
assertEquals("/a", binaries.get("a").getPath());
binaries = provider.getBinaries("/a").uniqueIndex(BinarySourceMapper.BY_BLOBID);
assertEquals(1, binaries.size());
provider.close();
}
use of org.apache.jackrabbit.oak.spi.blob.MemoryBlobStore in project jackrabbit-oak by apache.
the class NodeStoreBinaryResourceProviderTest method countBinaries.
@Test
public void countBinaries() throws Exception {
NodeBuilder builder = root.builder();
createFileNode(builder, "a", new IdBlob("hello", null), "text/plain");
createFileNode(builder, "b", new IdBlob("hello", "id1"), "text/plain");
createFileNode(builder.child("a2"), "c", new IdBlob("hello", "id2"), "text/foo").setProperty(JcrConstants.JCR_ENCODING, "bar");
NodeStore store = new MemoryNodeStore(builder.getNodeState());
BlobStore blobStore = new MemoryBlobStore();
NodeStoreBinaryResourceProvider extractor = new NodeStoreBinaryResourceProvider(store, blobStore);
assertEquals(2, extractor.getBinaries("/").size());
assertEquals(1, extractor.getBinaries("/a2").size());
BinaryResource bs = extractor.getBinaries("/a2").first().get();
assertEquals("text/foo", bs.getMimeType());
assertEquals("bar", bs.getEncoding());
assertEquals("id2", bs.getBlobId());
}
use of org.apache.jackrabbit.oak.spi.blob.MemoryBlobStore in project jackrabbit-oak by apache.
the class NodeStoreBinaryResourceProviderTest method csvGenerator.
@Test
public void csvGenerator() throws Exception {
File csv = new File(temporaryFolder.getRoot(), "test.csv");
BlobStore blobStore = new MemoryBlobStore();
NodeBuilder builder = root.builder();
createFileNode(builder, "a", blobOf("foo", blobStore), "text/plain");
createFileNode(builder, "b", blobOf("hello", blobStore), "text/plain");
NodeStore store = new MemoryNodeStore(builder.getNodeState());
NodeStoreBinaryResourceProvider extractor = new NodeStoreBinaryResourceProvider(store, blobStore);
CSVFileGenerator generator = new CSVFileGenerator(csv);
generator.generate(extractor.getBinaries("/"));
CSVFileBinaryResourceProvider csvbrp = new CSVFileBinaryResourceProvider(csv, blobStore);
assertEquals(2, csvbrp.getBinaries("/").size());
csvbrp.close();
}
use of org.apache.jackrabbit.oak.spi.blob.MemoryBlobStore in project jackrabbit-oak by apache.
the class ActiveDeletedBlobCollectorMBeanImplTest method clonedNSWithSharedDS.
@Test
public void clonedNSWithSharedDS() throws Exception {
MemoryBlobStore bs = new MemoryBlobStore();
bs.setBlockSizeMin(48);
MemoryDocumentStore mds1 = new MemoryDocumentStore();
DocumentNodeStore dns1 = builderProvider.newBuilder().setDocumentStore(mds1).setBlobStore(bs).build();
// Create initial repo with InitialContent. It has enough data to create blobs
LuceneIndexEditorProvider editorProvider = new LuceneIndexEditorProvider();
ContentRepository repository = new Oak(dns1).with(new InitialContent()).with(new OpenSecurityProvider()).with(editorProvider).with(new PropertyIndexEditorProvider()).with(new NodeTypeIndexProvider()).createContentRepository();
ContentSession session = repository.login(null, null);
Root root = session.getLatestRoot();
TestUtil.createFulltextIndex(root.getTree("/"), "testIndex");
root.commit();
// pause active deletion
IndexPathService indexPathService = new IndexPathServiceImpl(dns1);
AsyncIndexInfoService asyncIndexInfoService = MockRegistrar.getAsyncIndexInfoService(newArrayList(new IndexMBeanInfoSupplier("foo-async", () -> STATUS_DONE, () -> 2L)));
ActiveDeletedBlobCollectorMBeanImpl bean = new ActiveDeletedBlobCollectorMBeanImpl(ActiveDeletedBlobCollectorFactory.NOOP, wb, dns1, indexPathService, asyncIndexInfoService, new MemoryBlobStore(), sameThreadExecutor());
bean.clock = clock;
bean.flagActiveDeletionUnsafeForCurrentState();
// we try here to create some churn and we want some files to get created at dns1
// BUT get deleted at dns2. "segments_1" is one such file.
// since our "creation" of churn is assumed, we should assert that dns1 has "segments_1"
// (and later dns2 doesn't have it)
root = session.getLatestRoot();
assertTrue("First pass indexing should generate segments_1", root.getTree("/oak:index/testIndex/:data/segments_1").exists());
// shutdown first instance
dns1.dispose();
// clone
MemoryDocumentStore mds2 = mds1.copy();
DocumentNodeStore dns2 = builderProvider.newBuilder().setDocumentStore(mds2).setBlobStore(bs).build();
// create some churn to delete some index files - using clone store
// we'd setup lucene editor with active deletion collector
DeletedFileTrackingADBC deletedFileTrackingADBC = new DeletedFileTrackingADBC(new File(temporaryFolder.getRoot(), "adbc-workdir"));
editorProvider = new LuceneIndexEditorProvider(null, null, new ExtractedTextCache(0, 0), null, Mounts.defaultMountInfoProvider(), deletedFileTrackingADBC);
repository = new Oak(dns2).with(new OpenSecurityProvider()).with(editorProvider).with(new PropertyIndexEditorProvider()).with(new NodeTypeIndexProvider()).createContentRepository();
session = repository.login(null, null);
root = session.getLatestRoot();
Tree rootTree = root.getTree("/");
for (int i = 0; i < 20; i++) {
Tree child = rootTree.addChild("a" + i);
for (int j = 0; j < 20; j++) {
child.setProperty("foo" + j, "bar" + j);
}
}
root.commit();
// since our index is not async, we are unable to track commit progress automatically.
// OR, iow, we need to play the rold of AsyncIndexUpdate explicitly
deletedFileTrackingADBC.blobDeletionCallback.commitProgress(COMMIT_SUCCEDED);
deletedFileTrackingADBC.purgeBlobsDeleted(Clock.SIMPLE.getTime() + TimeUnit.SECONDS.toMillis(1), bs);
root = session.getLatestRoot();
assertFalse("Churn created via dns2 should delete segments_1", root.getTree("/oak:index/testIndex/:data/segments_1").exists());
dns2.dispose();
// validate index using dns1 which should still have valid index data even
// after dns2's churn
dns1 = builderProvider.newBuilder().setDocumentStore(mds1).setBlobStore(bs).build();
IndexConsistencyChecker checker = new IndexConsistencyChecker(dns1.getRoot(), "/oak:index/testIndex", new File(temporaryFolder.getRoot(), "checker-workdir"));
IndexConsistencyChecker.Result result = checker.check(IndexConsistencyChecker.Level.BLOBS_ONLY);
assertFalse("Nodestore1 can't read blobs: " + result.missingBlobIds + " while reading index", result.missingBlobs);
}
Aggregations