use of org.apache.jackrabbit.oak.spi.blob.MemoryBlobStore in project jackrabbit-oak by apache.
the class ActiveDeletedBlobCollectorMBeanImplTest method failureToFlagAllIndexFilesShouldAutoResume.
@Test
public void failureToFlagAllIndexFilesShouldAutoResume() {
IndexPathService indexPathService = MockRegistrar.getIndexPathsService(indexPaths);
AsyncIndexInfoService asyncIndexInfoService = MockRegistrar.getAsyncIndexInfoService(newArrayList(new IndexMBeanInfoSupplier("foo-async", () -> STATUS_DONE, () -> 2L)));
NodeStore failingNodeStore = new MemoryNodeStore() {
@Nonnull
@Override
public synchronized NodeState merge(@Nonnull NodeBuilder builder, @Nonnull CommitHook commitHook, @Nonnull CommitInfo info) throws CommitFailedException {
throw new CommitFailedException("TestFail", 1, "We must never merge");
}
};
ActiveDeletedBlobCollectorMBeanImpl bean = new ActiveDeletedBlobCollectorMBeanImpl(ActiveDeletedBlobCollectorFactory.NOOP, wb, failingNodeStore, indexPathService, asyncIndexInfoService, new MemoryBlobStore(), sameThreadExecutor());
bean.clock = clock;
bean.flagActiveDeletionUnsafeForCurrentState();
assertFalse("Failure to update head index files didn't resume marking blobs", bean.isActiveDeletionUnsafe());
}
use of org.apache.jackrabbit.oak.spi.blob.MemoryBlobStore in project jackrabbit-oak by apache.
the class SecondaryStoreCacheServiceTest method configureDefaultServices.
@Before
public void configureDefaultServices() {
context.registerService(BlobStore.class, new MemoryBlobStore());
context.registerService(NodeStoreProvider.class, new NodeStoreProvider() {
@Override
public NodeStore getNodeStore() {
return secondaryStore;
}
}, ImmutableMap.<String, Object>of("role", "secondary"));
context.registerService(Executor.class, Executors.newSingleThreadExecutor());
context.registerService(StatisticsProvider.class, StatisticsProvider.NOOP);
MockOsgi.injectServices(cacheService, context.bundleContext());
}
use of org.apache.jackrabbit.oak.spi.blob.MemoryBlobStore in project jackrabbit-oak by apache.
the class JournalIT method doLargeCleanupTest.
private void doLargeCleanupTest(int offset, int size) throws Exception {
Clock clock = new Clock.Virtual();
DocumentMK mk1 = createMK(0, /* clusterId: 0 => uses clusterNodes collection */
0, new MemoryDocumentStore(), new MemoryBlobStore());
DocumentNodeStore ns1 = mk1.getNodeStore();
// make sure we're visible and marked as active
renewClusterIdLease(ns1);
JournalGarbageCollector gc = new JournalGarbageCollector(ns1, 0);
clock.getTimeIncreasing();
clock.getTimeIncreasing();
// cleanup everything that might still be there
gc.gc();
// create entries as parametrized:
for (int i = offset; i < size + offset; i++) {
mk1.commit("/", "+\"regular" + i + "\": {}", null, null);
// always run background ops to 'flush' the change
// into the journal:
ns1.runBackgroundOperations();
}
// sleep 100millis
Thread.sleep(100);
// should now be able to clean up everything
assertEquals(size, gc.gc());
}
use of org.apache.jackrabbit.oak.spi.blob.MemoryBlobStore in project jackrabbit-oak by apache.
the class DocumentSplitTest method manyRevisions.
// OAK-1233
@Test
public void manyRevisions() {
final int numMKs = 3;
MemoryDocumentStore ds = new MemoryDocumentStore();
MemoryBlobStore bs = new MemoryBlobStore();
List<Set<String>> changes = new ArrayList<Set<String>>();
List<DocumentMK> mks = new ArrayList<DocumentMK>();
for (int i = 1; i <= numMKs; i++) {
DocumentMK.Builder builder = new DocumentMK.Builder();
builder.setDocumentStore(ds).setBlobStore(bs).setAsyncDelay(0);
DocumentMK mk = builder.setClusterId(i).open();
mks.add(mk);
changes.add(new HashSet<String>());
if (i == 1) {
mk.commit("/", "+\"test\":{}", null, null);
mk.runBackgroundOperations();
}
}
List<String> propNames = Arrays.asList("prop1", "prop2", "prop3");
Random random = new Random(0);
for (int i = 0; i < 1000; i++) {
int mkIdx = random.nextInt(mks.size());
// pick mk
DocumentMK mk = mks.get(mkIdx);
DocumentNodeStore ns = mk.getNodeStore();
// pick property name to update
String name = propNames.get(random.nextInt(propNames.size()));
// need to sync?
for (int j = 0; j < changes.size(); j++) {
Set<String> c = changes.get(j);
if (c.contains(name)) {
syncMKs(mks, j);
c.clear();
break;
}
}
// read current value
NodeDocument doc = ds.find(NODES, Utils.getIdFromPath("/test"));
assertNotNull(doc);
RevisionVector head = ns.getHeadRevision();
Revision lastRev = ns.getPendingModifications().get("/test");
DocumentNodeState n = doc.getNodeAtRevision(mk.getNodeStore(), head, lastRev);
assertNotNull(n);
String value = n.getPropertyAsString(name);
// set or increment
if (value == null) {
value = String.valueOf(0);
} else {
value = String.valueOf(Integer.parseInt(value) + 1);
}
mk.commit("/test", "^\"" + name + "\":" + value, null, null);
changes.get(mkIdx).add(name);
}
for (DocumentMK mk : mks) {
mk.dispose();
}
}
use of org.apache.jackrabbit.oak.spi.blob.MemoryBlobStore in project jackrabbit-oak by apache.
the class RandomizedClusterTest method createMK.
private DocumentMK createMK(int clusterId) {
DocumentMK.Builder builder = new DocumentMK.Builder();
builder.setAsyncDelay(0);
if (MONGO_DB) {
MongoConnection c = connectionFactory.getConnection();
MongoUtils.dropCollections(c.getDBName());
builder.setMongoDB(c.getMongoClient(), c.getDBName());
} else {
if (ds == null) {
ds = new MemoryDocumentStore();
}
if (bs == null) {
bs = new MemoryBlobStore();
}
builder.setDocumentStore(ds).setBlobStore(bs);
}
return builder.setClusterId(clusterId + 1).open();
}
Aggregations