use of org.apache.jackrabbit.core.persistence.util.NodeInfo in project jackrabbit by apache.
the class BundleDbPersistenceManager method getAllNodeInfos.
/**
* {@inheritDoc}
*/
@Override
public synchronized Map<NodeId, NodeInfo> getAllNodeInfos(NodeId bigger, int maxCount) throws ItemStateException {
ResultSet rs = null;
try {
String sql = bundleSelectAllBundlesSQL;
NodeId lowId = null;
Object[] keys = new Object[0];
if (bigger != null) {
sql = bundleSelectAllBundlesFromSQL;
lowId = bigger;
keys = getKey(bigger);
}
if (getStorageModel() == SM_LONGLONG_KEYS && maxCount > 0) {
// get some more rows, in case the first row is smaller
// only required for SM_LONGLONG_KEYS
// probability is very low to get get the wrong first key, < 1 : 2^64
// see also bundleSelectAllIdsFrom SQL statement
maxCount += 10;
}
rs = conHelper.exec(sql, keys, false, maxCount);
Map<NodeId, NodeInfo> result = new LinkedHashMap<NodeId, NodeInfo>(maxCount);
while ((maxCount == 0 || result.size() < maxCount) && rs.next()) {
NodeId current;
if (getStorageModel() == SM_BINARY_KEYS) {
current = new NodeId(rs.getBytes(1));
} else {
long high = rs.getLong(1);
long low = rs.getLong(2);
current = new NodeId(high, low);
}
if (getStorageModel() == SM_LONGLONG_KEYS && lowId != null) {
// skip the keys that are smaller or equal (see above, maxCount += 10)
if (current.compareTo(lowId) <= 0) {
continue;
}
}
NodePropBundle bundle = readBundle(current, rs, getStorageModel() == SM_LONGLONG_KEYS ? 3 : 2);
NodeInfo nodeInfo = new NodeInfo(bundle);
result.put(nodeInfo.getId(), nodeInfo);
}
return result;
} catch (SQLException e) {
String msg = "getAllNodeIds failed.";
log.error(msg, e);
throw new ItemStateException(msg, e);
} finally {
DbUtility.close(rs);
}
}
use of org.apache.jackrabbit.core.persistence.util.NodeInfo in project jackrabbit by apache.
the class GarbageCollector method scanPersistenceManagersByNodeInfos.
private void scanPersistenceManagersByNodeInfos() throws RepositoryException, ItemStateException {
int pmCount = 0;
for (IterablePersistenceManager pm : pmList) {
pmCount++;
int count = 0;
Map<NodeId, NodeInfo> batch = pm.getAllNodeInfos(null, NODESATONCE);
while (!batch.isEmpty()) {
NodeId lastId = null;
for (NodeInfo info : batch.values()) {
count++;
if (count % 1000 == 0) {
LOG.debug(pm.toString() + " (" + pmCount + "/" + pmList.length + "): analyzed " + count + " nodes...");
}
lastId = info.getId();
if (callback != null) {
callback.beforeScanning(null);
}
if (info.hasBlobsInDataStore()) {
try {
NodeState state = pm.load(info.getId());
Set<Name> propertyNames = state.getPropertyNames();
for (Name name : propertyNames) {
PropertyId pid = new PropertyId(info.getId(), name);
PropertyState ps = pm.load(pid);
if (ps.getType() == PropertyType.BINARY) {
for (InternalValue v : ps.getValues()) {
// getLength will update the last modified date
// if the persistence manager scan is running
v.getLength();
}
}
}
} catch (NoSuchItemStateException ignored) {
// the node may have been deleted in the meantime
}
}
}
batch = pm.getAllNodeInfos(lastId, NODESATONCE);
}
}
NodeInfo.clearPool();
}
use of org.apache.jackrabbit.core.persistence.util.NodeInfo in project jackrabbit by apache.
the class ConsistencyCheckerImpl method internalCheckConsistency.
private int internalCheckConsistency(String[] uuids, boolean recursive) throws RepositoryException {
int count = 0;
if (uuids == null) {
// check all nodes
try {
Map<NodeId, NodeInfo> batch = pm.getAllNodeInfos(null, NODESATONCE);
Map<NodeId, NodeInfo> allInfos = batch;
NodeId lastId = null;
while (!batch.isEmpty()) {
for (Map.Entry<NodeId, NodeInfo> entry : batch.entrySet()) {
lastId = entry.getKey();
count++;
if (count % 1000 == 0) {
log.info(pm + ": loaded " + count + " infos...");
}
}
batch = pm.getAllNodeInfos(lastId, NODESATONCE);
allInfos.putAll(batch);
}
if (pm.exists(lastId)) {
for (Map.Entry<NodeId, NodeInfo> entry : allInfos.entrySet()) {
checkBundleConsistency(entry.getKey(), entry.getValue(), allInfos);
}
} else {
log.info("Failed to read all nodes, starting over");
internalCheckConsistency(uuids, recursive);
}
} catch (ItemStateException e) {
throw new RepositoryException("Error loading nodes", e);
} finally {
NodeInfo.clearPool();
}
} else {
// check only given uuids, handle recursive flag
List<NodeId> idList = new ArrayList<NodeId>(uuids.length);
for (final String uuid : uuids) {
try {
idList.add(new NodeId(uuid));
} catch (IllegalArgumentException e) {
error(uuid, "Invalid id for consistency check, skipping: '" + uuid + "': " + e);
}
}
for (int i = 0; i < idList.size(); i++) {
NodeId id = idList.get(i);
try {
final NodePropBundle bundle = pm.loadBundle(id);
if (bundle == null) {
if (!isVirtualNode(id)) {
error(id.toString(), "No bundle found for id '" + id + "'");
}
} else {
checkBundleConsistency(id, new NodeInfo(bundle), Collections.<NodeId, NodeInfo>emptyMap());
if (recursive) {
for (NodePropBundle.ChildNodeEntry entry : bundle.getChildNodeEntries()) {
idList.add(entry.getId());
}
}
count++;
if (count % 1000 == 0 && listener == null) {
log.info(pm + ": checked " + count + "/" + idList.size() + " bundles...");
}
}
} catch (ItemStateException ignored) {
// problem already logged
}
}
}
log.info(pm + ": checked " + count + " bundles.");
return count;
}
Aggregations