use of org.apache.jackrabbit.oak.plugins.document.Document in project jackrabbit-oak by apache.
the class RevisionsCommandTest method reset.
@Test
public void reset() throws Exception {
ns.getVersionGarbageCollector().gc(1, TimeUnit.HOURS);
Document doc = ns.getDocumentStore().find(Collection.SETTINGS, "versionGC");
assertNotNull(doc);
ns.dispose();
String output = captureSystemOut(new RevisionsCmd("reset"));
assertTrue(output.contains("resetting recommendations and statistics"));
MongoConnection c = connectionFactory.getConnection();
ns = builderProvider.newBuilder().setMongoDB(c.getDB()).getNodeStore();
doc = ns.getDocumentStore().find(Collection.SETTINGS, "versionGC");
assertNull(doc);
}
use of org.apache.jackrabbit.oak.plugins.document.Document in project jackrabbit-oak by apache.
the class RDBDocumentStore method internalQuery.
private <T extends Document> List<T> internalQuery(Collection<T> collection, String fromKey, String toKey, List<String> excludeKeyPatterns, List<QueryCondition> conditions, int limit) {
Connection connection = null;
RDBTableMetaData tmd = getTable(collection);
for (QueryCondition cond : conditions) {
if (!INDEXEDPROPERTIES.contains(cond.getPropertyName())) {
String message = "indexed property " + cond.getPropertyName() + " not supported, query was '" + cond + "'; supported properties are " + INDEXEDPROPERTIES;
LOG.info(message);
throw new DocumentStoreException(message);
}
}
final Stopwatch watch = startWatch();
int resultSize = 0;
try (CacheChangesTracker tracker = obtainTracker(collection, fromKey, toKey)) {
long now = System.currentTimeMillis();
connection = this.ch.getROConnection();
String from = collection == Collection.NODES && NodeDocument.MIN_ID_VALUE.equals(fromKey) ? null : fromKey;
String to = collection == Collection.NODES && NodeDocument.MAX_ID_VALUE.equals(toKey) ? null : toKey;
// OAK-6839: only populate the cache with *new* entries if the query
// isn't open-ended (something done by GC processes)
boolean populateCache = to != null;
List<RDBRow> dbresult = db.query(connection, tmd, from, to, excludeKeyPatterns, conditions, limit);
connection.commit();
int size = dbresult.size();
List<T> result = new ArrayList<T>(size);
for (int i = 0; i < size; i++) {
// free RDBRow as early as possible
RDBRow row = dbresult.set(i, null);
T doc = getIfCached(collection, row.getId(), row.getModcount());
if (doc == null) {
// parse DB contents into document if and only if it's not
// already in the cache
doc = convertFromDBObject(collection, row);
} else {
// we got a document from the cache, thus collection is NODES
// and a tracker is present
long lastmodified = modifiedOf(doc);
if (lastmodified == row.getModified() && lastmodified >= 1) {
try (CacheLock lock = acquireLockFor(row.getId())) {
if (!tracker.mightBeenAffected(row.getId())) {
// otherwise mark it as fresh
((NodeDocument) doc).markUpToDate(now);
}
}
} else {
// we need a fresh document instance
doc = convertFromDBObject(collection, row);
}
}
result.add(doc);
}
if (collection == Collection.NODES) {
if (populateCache) {
nodesCache.putNonConflictingDocs(tracker, castAsNodeDocumentList(result));
} else {
Map<String, ModificationStamp> invMap = Maps.newHashMap();
for (Document doc : result) {
invMap.put(doc.getId(), new ModificationStamp(modcountOf(doc), modifiedOf(doc)));
}
nodesCache.invalidateOutdated(invMap);
}
}
resultSize = result.size();
return result;
} catch (Exception ex) {
LOG.error("SQL exception on query", ex);
throw asDocumentStoreException(ex, "SQL exception on query");
} finally {
this.ch.closeConnection(connection);
stats.doneQuery(watch.elapsed(TimeUnit.NANOSECONDS), collection, fromKey, toKey, !conditions.isEmpty(), resultSize, -1, false);
}
}
use of org.apache.jackrabbit.oak.plugins.document.Document in project jackrabbit-oak by apache.
the class MongoDocumentTraverser method getAllDocuments.
public <T extends Document> CloseableIterable<T> getAllDocuments(Collection<T> collection, Predicate<String> filter) {
if (!disableReadOnlyCheck) {
checkState(mongoStore.isReadOnly(), "Traverser can only be used with readOnly store");
}
MongoCollection<BasicDBObject> dbCollection = mongoStore.getDBCollection(collection);
// TODO This may lead to reads being routed to secondary depending on MongoURI
// So caller must ensure that its safe to read from secondary
Iterable<BasicDBObject> cursor = dbCollection.withReadPreference(mongoStore.getConfiguredReadPreference(collection)).find();
CloseableIterable<BasicDBObject> closeableCursor = CloseableIterable.wrap(cursor);
cursor = closeableCursor;
@SuppressWarnings("Guava") Iterable<T> result = FluentIterable.from(cursor).filter(o -> filter.test((String) o.get(Document.ID))).transform(o -> {
T doc = mongoStore.convertFromDBObject(collection, o);
// TODO Review the cache update approach where tracker has to track *all* docs
if (collection == Collection.NODES) {
NodeDocument nodeDoc = (NodeDocument) doc;
getNodeDocCache().put(nodeDoc);
}
return doc;
});
return CloseableIterable.wrap(result, closeableCursor);
}
use of org.apache.jackrabbit.oak.plugins.document.Document in project jackrabbit-oak by apache.
the class ReadOnlyDocumentStoreWrapperTest method testPassthrough.
@Test
public void testPassthrough() throws NoSuchMethodException, InvocationTargetException, IllegalAccessException {
final List<String> disallowedMethods = Lists.newArrayList("create", "update", "remove", "createOrUpdate", "findAndUpdate");
InvocationHandler handler = new InvocationHandler() {
@Override
public Object invoke(Object proxy, Method method, Object[] args) throws Throwable {
String methodName = method.getName();
if (disallowedMethods.contains(methodName)) {
Assert.fail(String.format("Invalid passthrough of method (%s) with params %s", method, Arrays.toString(args)));
}
if ("determineServerTimeDifferenceMillis".equals(methodName)) {
return new Long(0);
} else {
return null;
}
}
};
DocumentStore proxyStore = (DocumentStore) Proxy.newProxyInstance(DocumentStore.class.getClassLoader(), new Class[] { DocumentStore.class }, handler);
DocumentStore readOnlyStore = ReadOnlyDocumentStoreWrapperFactory.getInstance(proxyStore);
Collection<? extends Document>[] collections = new Collection[] { Collection.CLUSTER_NODES, Collection.JOURNAL, Collection.NODES, Collection.SETTINGS };
for (Collection collection : collections) {
readOnlyStore.find(collection, null);
readOnlyStore.find(collection, null, 0);
readOnlyStore.query(collection, null, null, 0);
readOnlyStore.query(collection, null, null, null, 0, 0);
boolean uoeThrown = false;
try {
readOnlyStore.remove(collection, "");
} catch (UnsupportedOperationException uoe) {
// catch uoe thrown by read only wrapper
uoeThrown = true;
}
assertTrue("remove must throw UnsupportedOperationException", uoeThrown);
uoeThrown = false;
try {
readOnlyStore.remove(collection, Lists.<String>newArrayList());
} catch (UnsupportedOperationException uoe) {
// catch uoe thrown by read only wrapper
uoeThrown = true;
}
assertTrue("remove must throw UnsupportedOperationException", uoeThrown);
uoeThrown = false;
try {
readOnlyStore.remove(collection, Maps.<String, Long>newHashMap());
} catch (UnsupportedOperationException uoe) {
// catch uoe thrown by read only wrapper
uoeThrown = true;
}
assertTrue("remove must throw UnsupportedOperationException", uoeThrown);
uoeThrown = false;
try {
readOnlyStore.create(collection, null);
} catch (UnsupportedOperationException uoe) {
// catch uoe thrown by read only wrapper
uoeThrown = true;
}
assertTrue("create must throw UnsupportedOperationException", uoeThrown);
uoeThrown = false;
try {
readOnlyStore.createOrUpdate(collection, (UpdateOp) null);
} catch (UnsupportedOperationException uoe) {
// catch uoe thrown by read only wrapper
uoeThrown = true;
}
assertTrue("createOrUpdate must throw UnsupportedOperationException", uoeThrown);
uoeThrown = false;
try {
readOnlyStore.createOrUpdate(collection, Lists.<UpdateOp>newArrayList());
} catch (UnsupportedOperationException uoe) {
// catch uoe thrown by read only wrapper
uoeThrown = true;
}
assertTrue("createOrUpdate must throw UnsupportedOperationException", uoeThrown);
uoeThrown = false;
try {
readOnlyStore.findAndUpdate(collection, null);
} catch (UnsupportedOperationException uoe) {
// catch uoe thrown by read only wrapper
uoeThrown = true;
}
assertTrue("findAndUpdate must throw UnsupportedOperationException", uoeThrown);
readOnlyStore.invalidateCache(collection, null);
readOnlyStore.getIfCached(collection, null);
}
readOnlyStore.invalidateCache();
readOnlyStore.invalidateCache(null);
readOnlyStore.dispose();
readOnlyStore.setReadWriteMode(null);
readOnlyStore.getCacheStats();
readOnlyStore.getMetadata();
readOnlyStore.determineServerTimeDifferenceMillis();
}
use of org.apache.jackrabbit.oak.plugins.document.Document in project jackrabbit-oak by apache.
the class MongoDocumentStore method getModStamps.
/**
* Returns the {@link Document#MOD_COUNT} and
* {@link NodeDocument#MODIFIED_IN_SECS} values of the documents with the
* given {@code keys}. The returned map will only contain entries for
* existing documents. The default value is -1 if the document does not have
* a modCount field. The same applies to the modified field.
*
* @param keys the keys of the documents.
* @return map with key to modification stamp mapping.
* @throws MongoException if the call fails
*/
@Nonnull
private Map<String, ModificationStamp> getModStamps(Iterable<String> keys) throws MongoException {
// Fetch only the modCount and id
final BasicDBObject fields = new BasicDBObject(Document.ID, 1);
fields.put(Document.MOD_COUNT, 1);
fields.put(NodeDocument.MODIFIED_IN_SECS, 1);
Map<String, ModificationStamp> modCounts = Maps.newHashMap();
nodes.withReadPreference(ReadPreference.primary()).find(Filters.in(Document.ID, keys)).projection(fields).forEach((Block<BasicDBObject>) obj -> {
String id = (String) obj.get(Document.ID);
Long modCount = Utils.asLong((Number) obj.get(Document.MOD_COUNT));
if (modCount == null) {
modCount = -1L;
}
Long modified = Utils.asLong((Number) obj.get(NodeDocument.MODIFIED_IN_SECS));
if (modified == null) {
modified = -1L;
}
modCounts.put(id, new ModificationStamp(modCount, modified));
});
return modCounts;
}
Aggregations