use of alluxio.master.file.meta.LockedInodePath in project alluxio by Alluxio.
the class InodeSyncStream method loadFileMetadataInternal.
/**
* Loads metadata for the file identified by the given path from UFS into Alluxio.
*
* This method doesn't require any specific type of locking on inodePath. If the path needs to be
* loaded, we will acquire a write-edge lock.
*
* @param rpcContext the rpc context
* @param inodePath the path for which metadata should be loaded
* @param resolution the UFS resolution of path
* @param context the load metadata context
*/
static void loadFileMetadataInternal(RpcContext rpcContext, LockedInodePath inodePath, MountTable.Resolution resolution, LoadMetadataContext context, DefaultFileSystemMaster fsMaster) throws BlockInfoException, FileDoesNotExistException, InvalidPathException, FileAlreadyCompletedException, InvalidFileSizeException, IOException {
if (inodePath.fullPathExists()) {
return;
}
AlluxioURI ufsUri = resolution.getUri();
long ufsBlockSizeByte;
long ufsLength;
AccessControlList acl = null;
try (CloseableResource<UnderFileSystem> ufsResource = resolution.acquireUfsResource()) {
UnderFileSystem ufs = ufsResource.get();
if (context.getUfsStatus() == null) {
context.setUfsStatus(ufs.getExistingFileStatus(ufsUri.toString()));
}
ufsLength = ((UfsFileStatus) context.getUfsStatus()).getContentLength();
long blockSize = ((UfsFileStatus) context.getUfsStatus()).getBlockSize();
ufsBlockSizeByte = blockSize != UfsFileStatus.UNKNOWN_BLOCK_SIZE ? blockSize : ufs.getBlockSizeByte(ufsUri.toString());
if (fsMaster.isAclEnabled()) {
Pair<AccessControlList, DefaultAccessControlList> aclPair = ufs.getAclPair(ufsUri.toString());
if (aclPair != null) {
acl = aclPair.getFirst();
// DefaultACL should be null, because it is a file
if (aclPair.getSecond() != null) {
LOG.warn("File {} has default ACL in the UFS", inodePath.getUri());
}
}
}
}
// Metadata loaded from UFS has no TTL set.
CreateFileContext createFileContext = CreateFileContext.defaults();
createFileContext.getOptions().setBlockSizeBytes(ufsBlockSizeByte);
createFileContext.getOptions().setRecursive(context.getOptions().getCreateAncestors());
createFileContext.getOptions().setCommonOptions(FileSystemMasterCommonPOptions.newBuilder().setTtl(context.getOptions().getCommonOptions().getTtl()).setTtlAction(context.getOptions().getCommonOptions().getTtlAction()));
// set as through since already in UFS
createFileContext.setWriteType(WriteType.THROUGH);
createFileContext.setMetadataLoad(true);
createFileContext.setOwner(context.getUfsStatus().getOwner());
createFileContext.setGroup(context.getUfsStatus().getGroup());
createFileContext.setXAttr(context.getUfsStatus().getXAttr());
short ufsMode = context.getUfsStatus().getMode();
Mode mode = new Mode(ufsMode);
Long ufsLastModified = context.getUfsStatus().getLastModifiedTime();
if (resolution.getShared()) {
mode.setOtherBits(mode.getOtherBits().or(mode.getOwnerBits()));
}
createFileContext.getOptions().setMode(mode.toProto());
if (acl != null) {
createFileContext.setAcl(acl.getEntries());
}
if (ufsLastModified != null) {
createFileContext.setOperationTimeMs(ufsLastModified);
}
try (LockedInodePath writeLockedPath = inodePath.lockFinalEdgeWrite();
MergeJournalContext merger = new MergeJournalContext(rpcContext.getJournalContext(), writeLockedPath.getUri(), InodeSyncStream::mergeCreateComplete)) {
// We do not want to close this wrapRpcContext because it uses elements from another context
RpcContext wrapRpcContext = new RpcContext(rpcContext.getBlockDeletionContext(), merger, rpcContext.getOperationContext());
fsMaster.createFileInternal(wrapRpcContext, writeLockedPath, createFileContext);
CompleteFileContext completeContext = CompleteFileContext.mergeFrom(CompleteFilePOptions.newBuilder().setUfsLength(ufsLength)).setUfsStatus(context.getUfsStatus());
if (ufsLastModified != null) {
completeContext.setOperationTimeMs(ufsLastModified);
}
fsMaster.completeFileInternal(wrapRpcContext, writeLockedPath, completeContext);
} catch (FileAlreadyExistsException e) {
// This may occur if a thread created or loaded the file before we got the write lock.
// The file already exists, so nothing needs to be loaded.
LOG.debug("Failed to load file metadata: {}", e.toString());
}
// Re-traverse the path to pick up any newly created inodes.
inodePath.traverse();
}
use of alluxio.master.file.meta.LockedInodePath in project alluxio by Alluxio.
the class InodeTtlChecker method heartbeat.
@Override
public void heartbeat() throws InterruptedException {
Set<TtlBucket> expiredBuckets = mTtlBuckets.getExpiredBuckets(System.currentTimeMillis());
for (TtlBucket bucket : expiredBuckets) {
for (Inode inode : bucket.getInodes()) {
// Throw if interrupted.
if (Thread.interrupted()) {
throw new InterruptedException("InodeTtlChecker interrupted.");
}
AlluxioURI path = null;
try (LockedInodePath inodePath = mInodeTree.lockFullInodePath(inode.getId(), LockPattern.READ)) {
path = inodePath.getUri();
} catch (FileDoesNotExistException e) {
// The inode has already been deleted, nothing needs to be done.
continue;
} catch (Exception e) {
LOG.error("Exception trying to clean up {} for ttl check: {}", inode.toString(), e.toString());
}
if (path != null) {
try {
TtlAction ttlAction = inode.getTtlAction();
LOG.info("Path {} TTL has expired, performing action {}", path.getPath(), ttlAction);
switch(ttlAction) {
case FREE:
// parent of file
if (inode.isDirectory()) {
mFileSystemMaster.free(path, FreeContext.mergeFrom(FreePOptions.newBuilder().setForced(true).setRecursive(true)));
} else {
mFileSystemMaster.free(path, FreeContext.mergeFrom(FreePOptions.newBuilder().setForced(true)));
}
try (JournalContext journalContext = mFileSystemMaster.createJournalContext()) {
// Reset state
mInodeTree.updateInode(journalContext, UpdateInodeEntry.newBuilder().setId(inode.getId()).setTtl(Constants.NO_TTL).setTtlAction(ProtobufUtils.toProtobuf(TtlAction.DELETE)).build());
}
mTtlBuckets.remove(inode);
break;
case // Default if not set is DELETE
DELETE:
// parent of file
if (inode.isDirectory()) {
mFileSystemMaster.delete(path, DeleteContext.mergeFrom(DeletePOptions.newBuilder().setRecursive(true)));
} else {
mFileSystemMaster.delete(path, DeleteContext.defaults());
}
break;
default:
LOG.error("Unknown ttl action {}", ttlAction);
}
} catch (Exception e) {
LOG.error("Exception trying to clean up {} for ttl check", inode.toString(), e);
}
}
}
}
mTtlBuckets.removeBuckets(expiredBuckets);
}
use of alluxio.master.file.meta.LockedInodePath in project alluxio by Alluxio.
the class AccessTimeUpdaterTest method updateAccessTimeImmediately.
@Test
public void updateAccessTimeImmediately() throws Exception {
mAccessTimeUpdater = new AccessTimeUpdater(mFileSystemMaster, mInodeTree, mContext.getJournalSystem(), 0, 0, 0);
mAccessTimeUpdater.start();
String path = "/foo";
JournalContext journalContext = mock(JournalContext.class);
when(journalContext.get()).thenReturn(journalContext);
createInode(path, CreateFileContext.defaults());
long accessTime = CommonUtils.getCurrentMs() + 100L;
long inodeId;
try (LockedInodePath lockedInodes = mInodeTree.lockFullInodePath(new AlluxioURI(path), InodeTree.LockPattern.READ)) {
mAccessTimeUpdater.updateAccessTime(journalContext, lockedInodes.getInode(), accessTime);
inodeId = lockedInodes.getInode().getId();
}
// verify journal entry is logged
ArgumentCaptor<Journal.JournalEntry> captor = ArgumentCaptor.forClass(Journal.JournalEntry.class);
verify(journalContext).append(captor.capture());
assertTrue(captor.getValue().hasUpdateInode());
assertEquals(inodeId, captor.getValue().getUpdateInode().getId());
assertEquals(accessTime, captor.getValue().getUpdateInode().getLastAccessTimeMs());
// verify inode attribute is updated
assertEquals(accessTime, mInodeStore.get(inodeId).get().getLastAccessTimeMs());
}
use of alluxio.master.file.meta.LockedInodePath in project alluxio by Alluxio.
the class AccessTimeUpdaterTest method updateAccessTimeAsyncOnShutdown.
@Test
public void updateAccessTimeAsyncOnShutdown() throws Exception {
mAccessTimeUpdater = new AccessTimeUpdater(mFileSystemMaster, mInodeTree, mContext.getJournalSystem(), 10 * Constants.SECOND_MS, 0, 0);
mAccessTimeUpdater.start(mScheduler);
String path = "/foo";
createInode(path, CreateFileContext.defaults());
JournalContext journalContext = mock(JournalContext.class);
when(journalContext.get()).thenReturn(journalContext);
when(mFileSystemMaster.createJournalContext()).thenReturn(journalContext);
long accessTime = CommonUtils.getCurrentMs() + 100L;
long inodeId;
try (LockedInodePath lockedInodes = mInodeTree.lockFullInodePath(new AlluxioURI(path), InodeTree.LockPattern.READ)) {
mAccessTimeUpdater.updateAccessTime(journalContext, lockedInodes.getInode(), accessTime);
inodeId = lockedInodes.getInode().getId();
}
// verify inode attribute is updated
assertEquals(accessTime, mInodeStore.get(inodeId).get().getLastAccessTimeMs());
mScheduler.jumpAndExecute(1, TimeUnit.SECONDS);
// verify journal entry is NOT logged yet
verify(journalContext, never()).append(any(Journal.JournalEntry.class));
// wait for the flush to complete
mContext.getJournalSystem().stop();
// / verify journal entry is logged after the flush interval
ArgumentCaptor<Journal.JournalEntry> captor = ArgumentCaptor.forClass(Journal.JournalEntry.class);
verify(journalContext).append(captor.capture());
assertTrue(captor.getValue().hasUpdateInode());
assertEquals(inodeId, captor.getValue().getUpdateInode().getId());
assertEquals(accessTime, captor.getValue().getUpdateInode().getLastAccessTimeMs());
}
use of alluxio.master.file.meta.LockedInodePath in project alluxio by Alluxio.
the class AccessTimeUpdaterTest method createInode.
private void createInode(String path, CreateFileContext context) throws Exception {
try (LockedInodePath inodePath = mInodeTree.lockInodePath(new AlluxioURI(path), InodeTree.LockPattern.WRITE_EDGE)) {
List<Inode> result = mInodeTree.createPath(RpcContext.NOOP, inodePath, context);
MutableInode<?> inode = mInodeStore.getMutable(result.get(result.size() - 1).getId()).get();
mInodeStore.writeInode(inode);
}
}
Aggregations