use of org.apache.jackrabbit.core.cluster.ClusterRecordDeserializer in project jackrabbit by apache.
the class SearchIndex method getChangeLogRecords.
/**
* Polls the underlying journal for events of the type ChangeLogRecord that
* happened after a given revision, on a given workspace.
*
* @param revision
* starting revision
* @param workspace
* the workspace name
* @return
*/
private List<ChangeLogRecord> getChangeLogRecords(long revision, final String workspace) {
log.debug("Get changes from the Journal for revision {} and workspace {}.", revision, workspace);
ClusterNode cn = getContext().getClusterNode();
if (cn == null) {
return Collections.emptyList();
}
Journal journal = cn.getJournal();
final List<ChangeLogRecord> events = new ArrayList<ChangeLogRecord>();
ClusterRecordDeserializer deserializer = new ClusterRecordDeserializer();
RecordIterator records = null;
try {
records = journal.getRecords(revision);
while (records.hasNext()) {
Record record = records.nextRecord();
if (!record.getProducerId().equals(cn.getId())) {
continue;
}
ClusterRecord r = null;
try {
r = deserializer.deserialize(record);
} catch (JournalException e) {
log.error("Unable to read revision '" + record.getRevision() + "'.", e);
}
if (r == null) {
continue;
}
r.process(new ClusterRecordProcessor() {
public void process(ChangeLogRecord record) {
String eventW = record.getWorkspace();
if (eventW != null ? eventW.equals(workspace) : workspace == null) {
events.add(record);
}
}
public void process(LockRecord record) {
}
public void process(NamespaceRecord record) {
}
public void process(NodeTypeRecord record) {
}
public void process(PrivilegeRecord record) {
}
public void process(WorkspaceRecord record) {
}
});
}
} catch (JournalException e1) {
log.error(e1.getMessage(), e1);
} finally {
if (records != null) {
records.close();
}
}
return events;
}
use of org.apache.jackrabbit.core.cluster.ClusterRecordDeserializer in project jackrabbit by apache.
the class EventJournalImpl method refill.
/**
* Refills the {@link #eventBundleBuffer}.
*/
private void refill() {
assert eventBundleBuffer.isEmpty();
try {
RecordProcessor processor = new RecordProcessor();
ClusterRecordDeserializer deserializer = new ClusterRecordDeserializer();
RecordIterator records;
if (lastRevision != null) {
log.debug("refilling event bundle buffer starting at revision {}", lastRevision);
records = journal.getRecords(lastRevision.longValue());
} else {
log.debug("refilling event bundle buffer starting at journal beginning");
records = journal.getRecords();
}
try {
while (processor.getNumEvents() < MIN_BUFFER_SIZE && records.hasNext()) {
Record record = records.nextRecord();
if (record.getProducerId().equals(producerId)) {
ClusterRecord cr = deserializer.deserialize(record);
if (!session.getWorkspace().getName().equals(cr.getWorkspace())) {
continue;
}
cr.process(processor);
lastRevision = new Long(cr.getRevision());
}
}
if (processor.getNumEvents() >= MIN_BUFFER_SIZE) {
// remember in skip map
SortedMap<Long, Long> skipMap = getSkipMap();
Long timestamp = new Long(processor.getLastTimestamp());
synchronized (skipMap) {
if (log.isDebugEnabled()) {
DateFormat df = DateFormat.getDateTimeInstance();
log.debug("remember record in skip map: {} -> {}", df.format(new Date(timestamp.longValue())), lastRevision);
}
skipMap.put(timestamp, lastRevision);
}
}
} finally {
records.close();
}
} catch (JournalException e) {
log.warn("Unable to read journal records", e);
}
}
Aggregations