use of alluxio.master.journal.JournalContext in project alluxio by Alluxio.
the class BackupManager method initFromBackup.
/**
* Restores master state from the specified backup.
*
* @param is an input stream to read from the backup
*/
public void initFromBackup(InputStream is) throws IOException {
try (GzipCompressorInputStream gzIn = new GzipCompressorInputStream(is);
JournalEntryStreamReader reader = new JournalEntryStreamReader(gzIn)) {
List<Master> masters = mRegistry.getServers();
// Executor for applying backup.
CompletionService<Boolean> completionService = new ExecutorCompletionService<>(Executors.newFixedThreadPool(2, ThreadFactoryUtils.build("master-backup-%d", true)));
// List of active tasks.
Set<Future<?>> activeTasks = new HashSet<>();
// Entry queue will be used as a buffer and synchronization between readers and appliers.
LinkedBlockingQueue<JournalEntry> journalEntryQueue = new LinkedBlockingQueue<>(ServerConfiguration.getInt(PropertyKey.MASTER_BACKUP_ENTRY_BUFFER_COUNT));
// Whether still reading from backup.
AtomicBoolean readingActive = new AtomicBoolean(true);
// Index masters by name.
Map<String, Master> mastersByName = Maps.uniqueIndex(masters, Master::getName);
// Shows how many entries have been applied.
AtomicLong appliedEntryCount = new AtomicLong(0);
// Progress executor
ScheduledExecutorService traceExecutor = Executors.newScheduledThreadPool(1, ThreadFactoryUtils.build("master-backup-tracer-%d", true));
traceExecutor.scheduleAtFixedRate(() -> {
LOG.info("{} entries from backup applied so far...", appliedEntryCount.get());
}, 30, 30, TimeUnit.SECONDS);
// Start the timer for backup metrics.
long startRestoreTime = System.currentTimeMillis();
// Create backup reader task.
activeTasks.add(completionService.submit(() -> {
try {
JournalEntry entry;
while ((entry = reader.readEntry()) != null) {
journalEntryQueue.put(entry);
}
// Put termination entry for signaling the applier.
journalEntryQueue.put(JournalEntry.newBuilder().setSequenceNumber(TERMINATION_SEQ).build());
return true;
} catch (InterruptedException ie) {
// Continue interrupt chain.
Thread.currentThread().interrupt();
throw new RuntimeException("Thread interrupted while reading from backup stream.", ie);
} finally {
readingActive.set(false);
}
}));
// Create applier task.
activeTasks.add(completionService.submit(() -> {
try {
// Read entries from backup.
while (readingActive.get() || journalEntryQueue.size() > 0) {
// Drain current elements.
// Draining entries makes it possible to allow writes while current ones are
// being applied.
List<JournalEntry> drainedEntries = new LinkedList<>();
if (0 == journalEntryQueue.drainTo(drainedEntries)) {
// No elements at the moment. Fall back to polling.
JournalEntry entry = journalEntryQueue.poll(10, TimeUnit.MILLISECONDS);
if (entry == null) {
// No entry yet.
continue;
}
drainedEntries.add(entry);
}
// Apply drained entries.
// Map for storing journal contexts.
Map<Master, JournalContext> masterJCMap = new HashMap<>();
try {
// They should be closed after applying drained entries.
for (Master master : masters) {
masterJCMap.put(master, master.createJournalContext());
}
// Apply entries.
for (JournalEntry entry : drainedEntries) {
// Check for termination entry.
if (entry.getSequenceNumber() == TERMINATION_SEQ) {
// Reading finished.
return true;
}
String masterName;
try {
masterName = JournalEntryAssociation.getMasterForEntry(entry);
} catch (IllegalStateException ise) {
ProcessUtils.fatalError(LOG, ise, "Unrecognized journal entry: %s", entry);
throw ise;
}
try {
Master master = mastersByName.get(masterName);
master.applyAndJournal(masterJCMap.get(master), entry);
appliedEntryCount.incrementAndGet();
} catch (Exception e) {
JournalUtils.handleJournalReplayFailure(LOG, e, "Failed to apply " + "journal entry to master %s. Entry: %s", masterName, entry);
}
}
} finally {
// before next round.
for (JournalContext journalContext : masterJCMap.values()) {
journalContext.close();
}
}
}
return true;
} catch (InterruptedException ie) {
// Continue interrupt chain.
Thread.currentThread().interrupt();
throw new RuntimeException("Thread interrupted while applying backup content.", ie);
}
}));
// Wait until backup tasks are completed and stop metrics timer.
try {
safeWaitTasks(activeTasks, completionService);
} finally {
mRestoreTimeMs = System.currentTimeMillis() - startRestoreTime;
mRestoreEntriesCount = appliedEntryCount.get();
traceExecutor.shutdownNow();
}
LOG.info("Restored {} entries from backup", appliedEntryCount.get());
}
}
use of alluxio.master.journal.JournalContext in project alluxio by Alluxio.
the class DefaultBlockMaster method getNewContainerId.
/**
* @return a new block container id
*/
@Override
public long getNewContainerId() throws UnavailableException {
synchronized (mBlockContainerIdGenerator) {
long containerId = mBlockContainerIdGenerator.getNewContainerId();
if (containerId < mJournaledNextContainerId) {
// without having to write anything to the journal.
return containerId;
}
// This container id is not safe with respect to the last journaled container id.
// Therefore, journal the new state of the container id. This implies that when a master
// crashes, the container ids within the reservation which have not been used yet will
// never be used. This is a tradeoff between fully utilizing the container id space, vs.
// improving master scalability.
// TODO(gpang): investigate if dynamic reservation sizes could be effective
// Set the next id to journal with a reservation of container ids, to avoid having to write
// to the journal for ids within the reservation.
mJournaledNextContainerId = containerId + CONTAINER_ID_RESERVATION_SIZE;
try (JournalContext journalContext = createJournalContext()) {
// This must be flushed while holding the lock on mBlockContainerIdGenerator, in order to
// prevent subsequent calls to return ids that have not been journaled and flushed.
journalContext.append(getContainerIdJournalEntry());
}
return containerId;
}
}
use of alluxio.master.journal.JournalContext in project alluxio by Alluxio.
the class DefaultBlockMaster method commitBlockInUFS.
@Override
public void commitBlockInUFS(long blockId, long length) throws UnavailableException {
LOG.debug("Commit block in ufs. blockId: {}, length: {}", blockId, length);
try (JournalContext journalContext = createJournalContext();
LockResource r = lockBlock(blockId)) {
if (mBlockStore.getBlock(blockId).isPresent()) {
// Block metadata already exists, so do not need to create a new one.
return;
}
mBlockStore.putBlock(blockId, BlockMeta.newBuilder().setLength(length).build());
BlockInfoEntry blockInfo = BlockInfoEntry.newBuilder().setBlockId(blockId).setLength(length).build();
journalContext.append(JournalEntry.newBuilder().setBlockInfo(blockInfo).build());
}
}
use of alluxio.master.journal.JournalContext in project alluxio by Alluxio.
the class MountTable method add.
/**
* Mounts the given UFS path at the given Alluxio path. The Alluxio path should not be nested
* under an existing mount point.
*
* @param journalContext the journal context
* @param alluxioUri an Alluxio path URI
* @param ufsUri a UFS path URI
* @param mountId the mount id
* @param options the mount options
* @throws FileAlreadyExistsException if the mount point already exists
* @throws InvalidPathException if an invalid path is encountered
*/
public void add(Supplier<JournalContext> journalContext, AlluxioURI alluxioUri, AlluxioURI ufsUri, long mountId, MountPOptions options) throws FileAlreadyExistsException, InvalidPathException {
String alluxioPath = alluxioUri.getPath().isEmpty() ? "/" : alluxioUri.getPath();
LOG.info("Mounting {} at {}", ufsUri, alluxioPath);
try (LockResource r = new LockResource(mWriteLock)) {
if (mState.getMountTable().containsKey(alluxioPath)) {
throw new FileAlreadyExistsException(ExceptionMessage.MOUNT_POINT_ALREADY_EXISTS.getMessage(alluxioPath));
}
// or suffix of any existing mount path.
for (Map.Entry<String, MountInfo> entry : mState.getMountTable().entrySet()) {
AlluxioURI mountedUfsUri = entry.getValue().getUfsUri();
if ((ufsUri.getScheme() == null || ufsUri.getScheme().equals(mountedUfsUri.getScheme())) && (ufsUri.getAuthority().toString().equals(mountedUfsUri.getAuthority().toString()))) {
String ufsPath = ufsUri.getPath().isEmpty() ? "/" : ufsUri.getPath();
String mountedUfsPath = mountedUfsUri.getPath().isEmpty() ? "/" : mountedUfsUri.getPath();
if (PathUtils.hasPrefix(ufsPath, mountedUfsPath)) {
throw new InvalidPathException(ExceptionMessage.MOUNT_POINT_PREFIX_OF_ANOTHER.getMessage(mountedUfsUri.toString(), ufsUri.toString()));
}
if (PathUtils.hasPrefix(mountedUfsPath, ufsPath)) {
throw new InvalidPathException(ExceptionMessage.MOUNT_POINT_PREFIX_OF_ANOTHER.getMessage(ufsUri.toString(), mountedUfsUri.toString()));
}
}
}
Map<String, String> properties = options.getPropertiesMap();
mState.applyAndJournal(journalContext, AddMountPointEntry.newBuilder().addAllProperties(properties.entrySet().stream().map(entry -> StringPairEntry.newBuilder().setKey(entry.getKey()).setValue(entry.getValue()).build()).collect(Collectors.toList())).setAlluxioPath(alluxioPath).setMountId(mountId).setReadOnly(options.getReadOnly()).setShared(options.getShared()).setUfsPath(ufsUri.toString()).build());
}
}
use of alluxio.master.journal.JournalContext in project alluxio by Alluxio.
the class InodeTtlChecker method heartbeat.
@Override
public void heartbeat() throws InterruptedException {
Set<TtlBucket> expiredBuckets = mTtlBuckets.getExpiredBuckets(System.currentTimeMillis());
for (TtlBucket bucket : expiredBuckets) {
for (Inode inode : bucket.getInodes()) {
// Throw if interrupted.
if (Thread.interrupted()) {
throw new InterruptedException("InodeTtlChecker interrupted.");
}
AlluxioURI path = null;
try (LockedInodePath inodePath = mInodeTree.lockFullInodePath(inode.getId(), LockPattern.READ)) {
path = inodePath.getUri();
} catch (FileDoesNotExistException e) {
// The inode has already been deleted, nothing needs to be done.
continue;
} catch (Exception e) {
LOG.error("Exception trying to clean up {} for ttl check: {}", inode.toString(), e.toString());
}
if (path != null) {
try {
TtlAction ttlAction = inode.getTtlAction();
LOG.info("Path {} TTL has expired, performing action {}", path.getPath(), ttlAction);
switch(ttlAction) {
case FREE:
// parent of file
if (inode.isDirectory()) {
mFileSystemMaster.free(path, FreeContext.mergeFrom(FreePOptions.newBuilder().setForced(true).setRecursive(true)));
} else {
mFileSystemMaster.free(path, FreeContext.mergeFrom(FreePOptions.newBuilder().setForced(true)));
}
try (JournalContext journalContext = mFileSystemMaster.createJournalContext()) {
// Reset state
mInodeTree.updateInode(journalContext, UpdateInodeEntry.newBuilder().setId(inode.getId()).setTtl(Constants.NO_TTL).setTtlAction(ProtobufUtils.toProtobuf(TtlAction.DELETE)).build());
}
mTtlBuckets.remove(inode);
break;
case // Default if not set is DELETE
DELETE:
// parent of file
if (inode.isDirectory()) {
mFileSystemMaster.delete(path, DeleteContext.mergeFrom(DeletePOptions.newBuilder().setRecursive(true)));
} else {
mFileSystemMaster.delete(path, DeleteContext.defaults());
}
break;
default:
LOG.error("Unknown ttl action {}", ttlAction);
}
} catch (Exception e) {
LOG.error("Exception trying to clean up {} for ttl check", inode.toString(), e);
}
}
}
}
mTtlBuckets.removeBuckets(expiredBuckets);
}
Aggregations