use of alluxio.proto.journal.Journal.JournalEntry in project alluxio by Alluxio.
the class AbstractJournalFormatterTest method read.
/**
* De-serializes a {@link JournalEntry} from file.
*
* @return the {@link JournalEntry} de-serialized
*/
protected JournalEntry read() throws IOException {
JournalInputStream jis = mFormatter.deserialize(mIs);
JournalEntry entry = jis.getNextEntry();
Assert.assertEquals(TEST_SEQUENCE_NUMBER, jis.getLatestSequenceNumber());
return entry;
}
use of alluxio.proto.journal.Journal.JournalEntry in project alluxio by Alluxio.
the class AbstractJournalFormatterTest method entryTest.
/**
* Tests serialization and deserialization for a {@link JournalEntry}.
*
* @param entry the {@link JournalEntry} to be tested
*/
protected void entryTest(JournalEntry entry) throws IOException {
write(entry);
JournalEntry readEntry = read();
assertSameEntry(entry, readEntry);
}
use of alluxio.proto.journal.Journal.JournalEntry in project alluxio by Alluxio.
the class JournalTailer method processNextJournalLogFiles.
/**
* Processes all the next completed journal log files. This method will return when the next
* complete file is not found.
*
* {@link #processJournalCheckpoint(boolean)} must have been called previously.
*
* @return the number of completed log files processed
* @throws IOException if an I/O error occurs
*/
public int processNextJournalLogFiles() throws IOException {
int numFilesProcessed = 0;
while (mReader.isValid()) {
// Process the new completed log file, if it exists.
JournalInputStream inputStream = mReader.getNextInputStream();
if (inputStream != null) {
LOG.info("{}: Processing a completed log file.", mMaster.getName());
JournalEntry entry;
while ((entry = inputStream.getNextEntry()) != null) {
mMaster.processJournalEntry(entry);
// update the latest sequence number seen.
mLatestSequenceNumber = inputStream.getLatestSequenceNumber();
}
inputStream.close();
numFilesProcessed++;
LOG.info("{}: Finished processing the log file.", mMaster.getName());
} else {
return numFilesProcessed;
}
}
LOG.info("{}: The checkpoint is out of date. Must reload checkpoint file.", mMaster.getName(), mJournal.getCheckpointFilePath());
return numFilesProcessed;
}
use of alluxio.proto.journal.Journal.JournalEntry in project alluxio by Alluxio.
the class JournalTool method main.
/**
* Reads a journal via
* {@code java -cp \
* assembly/target/alluxio-assemblies-<ALLUXIO-VERSION>-jar-with-dependencies.jar \
* alluxio.master.journal.JournalTool < journal/FileSystemMaster/log.out}.
*
* @param args arguments passed to the tool
* @throws IOException if a non-Alluxio related exception occurs
*/
public static void main(String[] args) throws IOException {
if (!parseInputArgs(args)) {
usage();
System.exit(EXIT_FAILED);
}
if (sHelp) {
usage();
System.exit(EXIT_SUCCEEDED);
}
if (!sNoTimeout && !stdinHasData()) {
System.exit(EXIT_FAILED);
}
JournalFormatter formatter = new ProtoBufJournalFormatter();
JournalInputStream journalStream = formatter.deserialize(System.in);
JournalEntry entry;
while ((entry = journalStream.getNextEntry()) != null) {
System.out.print(entry);
System.out.println(ENTRY_SEPARATOR);
}
}
use of alluxio.proto.journal.Journal.JournalEntry in project alluxio by Alluxio.
the class BackupManager method initFromBackup.
/**
* Restores master state from the specified backup.
*
* @param is an input stream to read from the backup
*/
public void initFromBackup(InputStream is) throws IOException {
try (GzipCompressorInputStream gzIn = new GzipCompressorInputStream(is);
JournalEntryStreamReader reader = new JournalEntryStreamReader(gzIn)) {
List<Master> masters = mRegistry.getServers();
// Executor for applying backup.
CompletionService<Boolean> completionService = new ExecutorCompletionService<>(Executors.newFixedThreadPool(2, ThreadFactoryUtils.build("master-backup-%d", true)));
// List of active tasks.
Set<Future<?>> activeTasks = new HashSet<>();
// Entry queue will be used as a buffer and synchronization between readers and appliers.
LinkedBlockingQueue<JournalEntry> journalEntryQueue = new LinkedBlockingQueue<>(ServerConfiguration.getInt(PropertyKey.MASTER_BACKUP_ENTRY_BUFFER_COUNT));
// Whether still reading from backup.
AtomicBoolean readingActive = new AtomicBoolean(true);
// Index masters by name.
Map<String, Master> mastersByName = Maps.uniqueIndex(masters, Master::getName);
// Shows how many entries have been applied.
AtomicLong appliedEntryCount = new AtomicLong(0);
// Progress executor
ScheduledExecutorService traceExecutor = Executors.newScheduledThreadPool(1, ThreadFactoryUtils.build("master-backup-tracer-%d", true));
traceExecutor.scheduleAtFixedRate(() -> {
LOG.info("{} entries from backup applied so far...", appliedEntryCount.get());
}, 30, 30, TimeUnit.SECONDS);
// Start the timer for backup metrics.
long startRestoreTime = System.currentTimeMillis();
// Create backup reader task.
activeTasks.add(completionService.submit(() -> {
try {
JournalEntry entry;
while ((entry = reader.readEntry()) != null) {
journalEntryQueue.put(entry);
}
// Put termination entry for signaling the applier.
journalEntryQueue.put(JournalEntry.newBuilder().setSequenceNumber(TERMINATION_SEQ).build());
return true;
} catch (InterruptedException ie) {
// Continue interrupt chain.
Thread.currentThread().interrupt();
throw new RuntimeException("Thread interrupted while reading from backup stream.", ie);
} finally {
readingActive.set(false);
}
}));
// Create applier task.
activeTasks.add(completionService.submit(() -> {
try {
// Read entries from backup.
while (readingActive.get() || journalEntryQueue.size() > 0) {
// Drain current elements.
// Draining entries makes it possible to allow writes while current ones are
// being applied.
List<JournalEntry> drainedEntries = new LinkedList<>();
if (0 == journalEntryQueue.drainTo(drainedEntries)) {
// No elements at the moment. Fall back to polling.
JournalEntry entry = journalEntryQueue.poll(10, TimeUnit.MILLISECONDS);
if (entry == null) {
// No entry yet.
continue;
}
drainedEntries.add(entry);
}
// Apply drained entries.
// Map for storing journal contexts.
Map<Master, JournalContext> masterJCMap = new HashMap<>();
try {
// They should be closed after applying drained entries.
for (Master master : masters) {
masterJCMap.put(master, master.createJournalContext());
}
// Apply entries.
for (JournalEntry entry : drainedEntries) {
// Check for termination entry.
if (entry.getSequenceNumber() == TERMINATION_SEQ) {
// Reading finished.
return true;
}
String masterName;
try {
masterName = JournalEntryAssociation.getMasterForEntry(entry);
} catch (IllegalStateException ise) {
ProcessUtils.fatalError(LOG, ise, "Unrecognized journal entry: %s", entry);
throw ise;
}
try {
Master master = mastersByName.get(masterName);
master.applyAndJournal(masterJCMap.get(master), entry);
appliedEntryCount.incrementAndGet();
} catch (Exception e) {
JournalUtils.handleJournalReplayFailure(LOG, e, "Failed to apply " + "journal entry to master %s. Entry: %s", masterName, entry);
}
}
} finally {
// before next round.
for (JournalContext journalContext : masterJCMap.values()) {
journalContext.close();
}
}
}
return true;
} catch (InterruptedException ie) {
// Continue interrupt chain.
Thread.currentThread().interrupt();
throw new RuntimeException("Thread interrupted while applying backup content.", ie);
}
}));
// Wait until backup tasks are completed and stop metrics timer.
try {
safeWaitTasks(activeTasks, completionService);
} finally {
mRestoreTimeMs = System.currentTimeMillis() - startRestoreTime;
mRestoreEntriesCount = appliedEntryCount.get();
traceExecutor.shutdownNow();
}
LOG.info("Restored {} entries from backup", appliedEntryCount.get());
}
}
Aggregations