use of org.apache.hadoop.hdfs.server.protocol.RemoteEditLog in project hadoop by apache.
the class JournalNodeSyncer method syncWithJournalAtIndex.
private void syncWithJournalAtIndex(int index) {
LOG.info("Syncing Journal " + jn.getBoundIpcAddress().getAddress() + ":" + jn.getBoundIpcAddress().getPort() + " with " + otherJNProxies.get(index) + ", journal id: " + jid);
final QJournalProtocolPB jnProxy = otherJNProxies.get(index).jnProxy;
if (jnProxy == null) {
LOG.error("JournalNode Proxy not found.");
return;
}
List<RemoteEditLog> thisJournalEditLogs;
try {
thisJournalEditLogs = journal.getEditLogManifest(0, false).getLogs();
} catch (IOException e) {
LOG.error("Exception in getting local edit log manifest", e);
return;
}
GetEditLogManifestResponseProto editLogManifest;
try {
editLogManifest = jnProxy.getEditLogManifest(null, GetEditLogManifestRequestProto.newBuilder().setJid(jidProto).setSinceTxId(0).setInProgressOk(false).build());
} catch (ServiceException e) {
LOG.error("Could not sync with Journal at " + otherJNProxies.get(journalNodeIndexForSync), e);
return;
}
getMissingLogSegments(thisJournalEditLogs, editLogManifest, otherJNProxies.get(index));
}
use of org.apache.hadoop.hdfs.server.protocol.RemoteEditLog in project hadoop by apache.
the class JournalSet method getEditLogManifest.
/**
* Return a manifest of what finalized edit logs are available. All available
* edit logs are returned starting from the transaction id passed. If
* 'fromTxId' falls in the middle of a log, that log is returned as well.
*
* @param fromTxId Starting transaction id to read the logs.
* @return RemoteEditLogManifest object.
*/
public synchronized RemoteEditLogManifest getEditLogManifest(long fromTxId) {
// Collect RemoteEditLogs available from each FileJournalManager
List<RemoteEditLog> allLogs = Lists.newArrayList();
for (JournalAndStream j : journals) {
if (j.getManager() instanceof FileJournalManager) {
FileJournalManager fjm = (FileJournalManager) j.getManager();
try {
allLogs.addAll(fjm.getRemoteEditLogs(fromTxId, false));
} catch (Throwable t) {
LOG.warn("Cannot list edit logs in " + fjm, t);
}
}
}
// Group logs by their starting txid
ImmutableListMultimap<Long, RemoteEditLog> logsByStartTxId = Multimaps.index(allLogs, RemoteEditLog.GET_START_TXID);
long curStartTxId = fromTxId;
List<RemoteEditLog> logs = Lists.newArrayList();
while (true) {
ImmutableList<RemoteEditLog> logGroup = logsByStartTxId.get(curStartTxId);
if (logGroup.isEmpty()) {
// we have a gap in logs - for example because we recovered some old
// storage directory with ancient logs. Clear out any logs we've
// accumulated so far, and then skip to the next segment of logs
// after the gap.
SortedSet<Long> startTxIds = Sets.newTreeSet(logsByStartTxId.keySet());
startTxIds = startTxIds.tailSet(curStartTxId);
if (startTxIds.isEmpty()) {
break;
} else {
if (LOG.isDebugEnabled()) {
LOG.debug("Found gap in logs at " + curStartTxId + ": " + "not returning previous logs in manifest.");
}
logs.clear();
curStartTxId = startTxIds.first();
continue;
}
}
// Find the one that extends the farthest forward
RemoteEditLog bestLog = Collections.max(logGroup);
logs.add(bestLog);
// And then start looking from after that point
curStartTxId = bestLog.getEndTxId() + 1;
}
RemoteEditLogManifest ret = new RemoteEditLogManifest(logs, curStartTxId - 1);
if (LOG.isDebugEnabled()) {
LOG.debug("Generated manifest for logs since " + fromTxId + ":" + ret);
}
return ret;
}
Aggregations