use of org.apache.jackrabbit.core.cluster.ClusterNode in project jackrabbit by apache.
the class SearchIndex method getChangeLogRecords.
/**
* Polls the underlying journal for events of the type ChangeLogRecord that
* happened after a given revision, on a given workspace.
*
* @param revision
* starting revision
* @param workspace
* the workspace name
* @return
*/
private List<ChangeLogRecord> getChangeLogRecords(long revision, final String workspace) {
log.debug("Get changes from the Journal for revision {} and workspace {}.", revision, workspace);
ClusterNode cn = getContext().getClusterNode();
if (cn == null) {
return Collections.emptyList();
}
Journal journal = cn.getJournal();
final List<ChangeLogRecord> events = new ArrayList<ChangeLogRecord>();
ClusterRecordDeserializer deserializer = new ClusterRecordDeserializer();
RecordIterator records = null;
try {
records = journal.getRecords(revision);
while (records.hasNext()) {
Record record = records.nextRecord();
if (!record.getProducerId().equals(cn.getId())) {
continue;
}
ClusterRecord r = null;
try {
r = deserializer.deserialize(record);
} catch (JournalException e) {
log.error("Unable to read revision '" + record.getRevision() + "'.", e);
}
if (r == null) {
continue;
}
r.process(new ClusterRecordProcessor() {
public void process(ChangeLogRecord record) {
String eventW = record.getWorkspace();
if (eventW != null ? eventW.equals(workspace) : workspace == null) {
events.add(record);
}
}
public void process(LockRecord record) {
}
public void process(NamespaceRecord record) {
}
public void process(NodeTypeRecord record) {
}
public void process(PrivilegeRecord record) {
}
public void process(WorkspaceRecord record) {
}
});
}
} catch (JournalException e1) {
log.error(e1.getMessage(), e1);
} finally {
if (records != null) {
records.close();
}
}
return events;
}
use of org.apache.jackrabbit.core.cluster.ClusterNode in project jackrabbit by apache.
the class SearchIndex method checkPendingJournalChanges.
/**
* In the case of an initial index build operation, this checks if there are
* some new nodes pending in the journal and tries to preemptively delete
* them, to keep the index consistent.
*
* See JCR-3162
*
* @param context
* @throws IOException
*/
private void checkPendingJournalChanges(QueryHandlerContext context) {
ClusterNode cn = context.getClusterNode();
if (cn == null) {
return;
}
List<NodeId> addedIds = new ArrayList<NodeId>();
long rev = cn.getRevision();
List<ChangeLogRecord> changes = getChangeLogRecords(rev, context.getWorkspace());
Iterator<ChangeLogRecord> iterator = changes.iterator();
while (iterator.hasNext()) {
ChangeLogRecord record = iterator.next();
for (ItemState state : record.getChanges().addedStates()) {
if (!state.isNode()) {
continue;
}
addedIds.add((NodeId) state.getId());
}
}
if (!addedIds.isEmpty()) {
Collection<NodeState> empty = Collections.emptyList();
try {
updateNodes(addedIds.iterator(), empty.iterator());
} catch (Exception e) {
log.error(e.getMessage(), e);
}
}
}
use of org.apache.jackrabbit.core.cluster.ClusterNode in project jackrabbit by apache.
the class FileJournalTest method testClusterInitIncompleteMissingParam.
/**
* Verify that <code>ClusterNode.stop</code> can be invoked even when
* <code>ClusterNode.init</code> throws because the journal can not
* be initialized. Note: this is done by omitting the required argument
* <code>directory</code>.
*
* @throws Exception
*/
public void testClusterInitIncompleteMissingParam() throws Exception {
JournalFactory jf = new JournalFactory() {
public Journal getJournal(NamespaceResolver resolver) throws RepositoryException {
try {
FileJournal journal = new FileJournal();
// no setDirectory() call here
journal.init(CLUSTER_NODE_ID, resolver);
return journal;
} catch (JournalException e) {
throw new RepositoryException("Expected failure", e);
}
}
};
ClusterConfig cc = new ClusterConfig(CLUSTER_NODE_ID, SYNC_DELAY, jf);
SimpleClusterContext context = new SimpleClusterContext(cc);
ClusterNode clusterNode = new ClusterNode();
try {
clusterNode.init(context);
fail("Bad cluster configuration.");
} catch (Exception e) {
}
clusterNode.stop();
}
use of org.apache.jackrabbit.core.cluster.ClusterNode in project jackrabbit by apache.
the class ConsistencyCheckerImplTest method createClusterNode.
private ClusterNode createClusterNode(String id) throws Exception {
final MemoryJournal journal = new MemoryJournal() {
protected boolean syncAgainOnNewRecords() {
return true;
}
};
JournalFactory jf = new JournalFactory() {
public Journal getJournal(NamespaceResolver resolver) throws RepositoryException {
return journal;
}
};
ClusterConfig cc = new ClusterConfig(id, SYNC_DELAY, jf);
SimpleClusterContext context = new SimpleClusterContext(cc);
journal.setRepositoryHome(context.getRepositoryHome());
journal.init(id, context.getNamespaceResolver());
journal.setRecords(records);
ClusterNode clusterNode = new ClusterNode();
clusterNode.init(context);
return clusterNode;
}
use of org.apache.jackrabbit.core.cluster.ClusterNode in project jackrabbit by apache.
the class ConsistencyCheck method doubleCheckErrors.
public void doubleCheckErrors() {
if (!errors.isEmpty()) {
log.info("Double checking errors");
final ClusterNode clusterNode = handler.getContext().getClusterNode();
if (clusterNode != null) {
try {
clusterNode.sync();
} catch (ClusterException e) {
log.error("Could not sync cluster node for double checking errors");
}
}
final Iterator<ConsistencyCheckError> iterator = errors.iterator();
while (iterator.hasNext()) {
try {
final ConsistencyCheckError error = iterator.next();
if (!error.doubleCheck(handler, stateMgr)) {
log.info("False positive: " + error.toString());
iterator.remove();
}
} catch (RepositoryException e) {
log.error("Failed to double check consistency error", e);
} catch (IOException e) {
log.error("Failed to double check consistency error", e);
}
}
}
}
Aggregations