Search in sources :

Example 1 with BlocksMapUpdateInfo

use of org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo in project hadoop by apache.

the class FSNamesystem method delete.

/**
   * Remove the indicated file from namespace.
   * 
   * @see ClientProtocol#delete(String, boolean) for detailed description and 
   * description of exceptions
   */
boolean delete(String src, boolean recursive, boolean logRetryCache) throws IOException {
    final String operationName = "delete";
    BlocksMapUpdateInfo toRemovedBlocks = null;
    writeLock();
    boolean ret = false;
    try {
        checkOperation(OperationCategory.WRITE);
        checkNameNodeSafeMode("Cannot delete " + src);
        toRemovedBlocks = FSDirDeleteOp.delete(this, src, recursive, logRetryCache);
        ret = toRemovedBlocks != null;
    } catch (AccessControlException e) {
        logAuditEvent(false, operationName, src);
        throw e;
    } finally {
        writeUnlock(operationName);
    }
    getEditLog().logSync();
    if (toRemovedBlocks != null) {
        // Incremental deletion of blocks
        removeBlocks(toRemovedBlocks);
    }
    logAuditEvent(true, operationName, src);
    return ret;
}
Also used : BlocksMapUpdateInfo(org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo) AccessControlException(org.apache.hadoop.security.AccessControlException) SnapshotAccessControlException(org.apache.hadoop.hdfs.protocol.SnapshotAccessControlException)

Example 2 with BlocksMapUpdateInfo

use of org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo in project hadoop by apache.

the class FSEditLogLoader method applyEditLogOp.

@SuppressWarnings("deprecation")
private long applyEditLogOp(FSEditLogOp op, FSDirectory fsDir, StartupOption startOpt, int logVersion, long lastInodeId) throws IOException {
    long inodeId = HdfsConstants.GRANDFATHER_INODE_ID;
    if (LOG.isTraceEnabled()) {
        LOG.trace("replaying edit log: " + op);
    }
    final boolean toAddRetryCache = fsNamesys.hasRetryCache() && op.hasRpcIds();
    switch(op.opCode) {
        case OP_ADD:
            {
                AddCloseOp addCloseOp = (AddCloseOp) op;
                final String path = renameReservedPathsOnUpgrade(addCloseOp.path, logVersion);
                if (FSNamesystem.LOG.isDebugEnabled()) {
                    FSNamesystem.LOG.debug(op.opCode + ": " + path + " numblocks : " + addCloseOp.blocks.length + " clientHolder " + addCloseOp.clientName + " clientMachine " + addCloseOp.clientMachine);
                }
                // There are 3 cases here:
                // 1. OP_ADD to create a new file
                // 2. OP_ADD to update file blocks
                // 3. OP_ADD to open file for append (old append)
                // See if the file already exists (persistBlocks call)
                INodesInPath iip = fsDir.getINodesInPath(path, DirOp.WRITE);
                INodeFile oldFile = INodeFile.valueOf(iip.getLastINode(), path, true);
                if (oldFile != null && addCloseOp.overwrite) {
                    // This is OP_ADD with overwrite
                    FSDirDeleteOp.deleteForEditLog(fsDir, iip, addCloseOp.mtime);
                    iip = INodesInPath.replace(iip, iip.length() - 1, null);
                    oldFile = null;
                }
                INodeFile newFile = oldFile;
                if (oldFile == null) {
                    // this is OP_ADD on a new file (case 1)
                    // versions > 0 support per file replication
                    // get name and replication
                    final short replication = fsNamesys.getBlockManager().adjustReplication(addCloseOp.replication);
                    assert addCloseOp.blocks.length == 0;
                    // add to the file tree
                    inodeId = getAndUpdateLastInodeId(addCloseOp.inodeId, logVersion, lastInodeId);
                    newFile = FSDirWriteFileOp.addFileForEditLog(fsDir, inodeId, iip.getExistingINodes(), iip.getLastLocalName(), addCloseOp.permissions, addCloseOp.aclEntries, addCloseOp.xAttrs, replication, addCloseOp.mtime, addCloseOp.atime, addCloseOp.blockSize, true, addCloseOp.clientName, addCloseOp.clientMachine, addCloseOp.storagePolicyId);
                    assert newFile != null;
                    iip = INodesInPath.replace(iip, iip.length() - 1, newFile);
                    fsNamesys.leaseManager.addLease(addCloseOp.clientName, newFile.getId());
                    // add the op into retry cache if necessary
                    if (toAddRetryCache) {
                        HdfsFileStatus stat = FSDirStatAndListingOp.createFileStatusForEditLog(fsDir, iip);
                        fsNamesys.addCacheEntryWithPayload(addCloseOp.rpcClientId, addCloseOp.rpcCallId, stat);
                    }
                } else {
                    // This is OP_ADD on an existing file (old append)
                    if (!oldFile.isUnderConstruction()) {
                        // This is case 3: a call to append() on an already-closed file.
                        if (FSNamesystem.LOG.isDebugEnabled()) {
                            FSNamesystem.LOG.debug("Reopening an already-closed file " + "for append");
                        }
                        LocatedBlock lb = FSDirAppendOp.prepareFileForAppend(fsNamesys, iip, addCloseOp.clientName, addCloseOp.clientMachine, false, false, false);
                        // add the op into retry cache if necessary
                        if (toAddRetryCache) {
                            HdfsFileStatus stat = FSDirStatAndListingOp.createFileStatusForEditLog(fsDir, iip);
                            fsNamesys.addCacheEntryWithPayload(addCloseOp.rpcClientId, addCloseOp.rpcCallId, new LastBlockWithStatus(lb, stat));
                        }
                    }
                }
                // Fall-through for case 2.
                // Regardless of whether it's a new file or an updated file,
                // update the block list.
                // Update the salient file attributes.
                newFile.setAccessTime(addCloseOp.atime, Snapshot.CURRENT_STATE_ID);
                newFile.setModificationTime(addCloseOp.mtime, Snapshot.CURRENT_STATE_ID);
                ErasureCodingPolicy ecPolicy = FSDirErasureCodingOp.unprotectedGetErasureCodingPolicy(fsDir.getFSNamesystem(), iip);
                updateBlocks(fsDir, addCloseOp, iip, newFile, ecPolicy);
                break;
            }
        case OP_CLOSE:
            {
                AddCloseOp addCloseOp = (AddCloseOp) op;
                final String path = renameReservedPathsOnUpgrade(addCloseOp.path, logVersion);
                if (FSNamesystem.LOG.isDebugEnabled()) {
                    FSNamesystem.LOG.debug(op.opCode + ": " + path + " numblocks : " + addCloseOp.blocks.length + " clientHolder " + addCloseOp.clientName + " clientMachine " + addCloseOp.clientMachine);
                }
                final INodesInPath iip = fsDir.getINodesInPath(path, DirOp.READ);
                final INodeFile file = INodeFile.valueOf(iip.getLastINode(), path);
                // Update the salient file attributes.
                file.setAccessTime(addCloseOp.atime, Snapshot.CURRENT_STATE_ID);
                file.setModificationTime(addCloseOp.mtime, Snapshot.CURRENT_STATE_ID);
                ErasureCodingPolicy ecPolicy = FSDirErasureCodingOp.unprotectedGetErasureCodingPolicy(fsDir.getFSNamesystem(), iip);
                updateBlocks(fsDir, addCloseOp, iip, file, ecPolicy);
                // Now close the file
                if (!file.isUnderConstruction() && logVersion <= LayoutVersion.BUGFIX_HDFS_2991_VERSION) {
                    // should be fixed, so we should treat it as an error.
                    throw new IOException("File is not under construction: " + path);
                }
                // but OP_CLOSE doesn't serialize the holder. So, remove the inode.
                if (file.isUnderConstruction()) {
                    fsNamesys.getLeaseManager().removeLease(file.getId());
                    file.toCompleteFile(file.getModificationTime(), 0, fsNamesys.getBlockManager().getMinReplication());
                }
                break;
            }
        case OP_APPEND:
            {
                AppendOp appendOp = (AppendOp) op;
                final String path = renameReservedPathsOnUpgrade(appendOp.path, logVersion);
                if (FSNamesystem.LOG.isDebugEnabled()) {
                    FSNamesystem.LOG.debug(op.opCode + ": " + path + " clientName " + appendOp.clientName + " clientMachine " + appendOp.clientMachine + " newBlock " + appendOp.newBlock);
                }
                INodesInPath iip = fsDir.getINodesInPath(path, DirOp.WRITE);
                INodeFile file = INodeFile.valueOf(iip.getLastINode(), path);
                if (!file.isUnderConstruction()) {
                    LocatedBlock lb = FSDirAppendOp.prepareFileForAppend(fsNamesys, iip, appendOp.clientName, appendOp.clientMachine, appendOp.newBlock, false, false);
                    // add the op into retry cache if necessary
                    if (toAddRetryCache) {
                        HdfsFileStatus stat = FSDirStatAndListingOp.createFileStatusForEditLog(fsDir, iip);
                        fsNamesys.addCacheEntryWithPayload(appendOp.rpcClientId, appendOp.rpcCallId, new LastBlockWithStatus(lb, stat));
                    }
                }
                break;
            }
        case OP_UPDATE_BLOCKS:
            {
                UpdateBlocksOp updateOp = (UpdateBlocksOp) op;
                final String path = renameReservedPathsOnUpgrade(updateOp.path, logVersion);
                if (FSNamesystem.LOG.isDebugEnabled()) {
                    FSNamesystem.LOG.debug(op.opCode + ": " + path + " numblocks : " + updateOp.blocks.length);
                }
                INodesInPath iip = fsDir.getINodesInPath(path, DirOp.READ);
                INodeFile oldFile = INodeFile.valueOf(iip.getLastINode(), path);
                // Update in-memory data structures
                ErasureCodingPolicy ecPolicy = FSDirErasureCodingOp.unprotectedGetErasureCodingPolicy(fsDir.getFSNamesystem(), iip);
                updateBlocks(fsDir, updateOp, iip, oldFile, ecPolicy);
                if (toAddRetryCache) {
                    fsNamesys.addCacheEntry(updateOp.rpcClientId, updateOp.rpcCallId);
                }
                break;
            }
        case OP_ADD_BLOCK:
            {
                AddBlockOp addBlockOp = (AddBlockOp) op;
                String path = renameReservedPathsOnUpgrade(addBlockOp.getPath(), logVersion);
                if (FSNamesystem.LOG.isDebugEnabled()) {
                    FSNamesystem.LOG.debug(op.opCode + ": " + path + " new block id : " + addBlockOp.getLastBlock().getBlockId());
                }
                INodesInPath iip = fsDir.getINodesInPath(path, DirOp.READ);
                INodeFile oldFile = INodeFile.valueOf(iip.getLastINode(), path);
                // add the new block to the INodeFile
                ErasureCodingPolicy ecPolicy = FSDirErasureCodingOp.unprotectedGetErasureCodingPolicy(fsDir.getFSNamesystem(), iip);
                addNewBlock(addBlockOp, oldFile, ecPolicy);
                break;
            }
        case OP_SET_REPLICATION:
            {
                SetReplicationOp setReplicationOp = (SetReplicationOp) op;
                String src = renameReservedPathsOnUpgrade(setReplicationOp.path, logVersion);
                INodesInPath iip = fsDir.getINodesInPath(src, DirOp.WRITE);
                short replication = fsNamesys.getBlockManager().adjustReplication(setReplicationOp.replication);
                FSDirAttrOp.unprotectedSetReplication(fsDir, iip, replication);
                break;
            }
        case OP_CONCAT_DELETE:
            {
                ConcatDeleteOp concatDeleteOp = (ConcatDeleteOp) op;
                String trg = renameReservedPathsOnUpgrade(concatDeleteOp.trg, logVersion);
                String[] srcs = new String[concatDeleteOp.srcs.length];
                for (int i = 0; i < srcs.length; i++) {
                    srcs[i] = renameReservedPathsOnUpgrade(concatDeleteOp.srcs[i], logVersion);
                }
                INodesInPath targetIIP = fsDir.getINodesInPath(trg, DirOp.WRITE);
                INodeFile[] srcFiles = new INodeFile[srcs.length];
                for (int i = 0; i < srcs.length; i++) {
                    INodesInPath srcIIP = fsDir.getINodesInPath(srcs[i], DirOp.WRITE);
                    srcFiles[i] = srcIIP.getLastINode().asFile();
                }
                FSDirConcatOp.unprotectedConcat(fsDir, targetIIP, srcFiles, concatDeleteOp.timestamp);
                if (toAddRetryCache) {
                    fsNamesys.addCacheEntry(concatDeleteOp.rpcClientId, concatDeleteOp.rpcCallId);
                }
                break;
            }
        case OP_RENAME_OLD:
            {
                RenameOldOp renameOp = (RenameOldOp) op;
                final String src = renameReservedPathsOnUpgrade(renameOp.src, logVersion);
                final String dst = renameReservedPathsOnUpgrade(renameOp.dst, logVersion);
                FSDirRenameOp.renameForEditLog(fsDir, src, dst, renameOp.timestamp);
                if (toAddRetryCache) {
                    fsNamesys.addCacheEntry(renameOp.rpcClientId, renameOp.rpcCallId);
                }
                break;
            }
        case OP_DELETE:
            {
                DeleteOp deleteOp = (DeleteOp) op;
                final String src = renameReservedPathsOnUpgrade(deleteOp.path, logVersion);
                final INodesInPath iip = fsDir.getINodesInPath(src, DirOp.WRITE_LINK);
                FSDirDeleteOp.deleteForEditLog(fsDir, iip, deleteOp.timestamp);
                if (toAddRetryCache) {
                    fsNamesys.addCacheEntry(deleteOp.rpcClientId, deleteOp.rpcCallId);
                }
                break;
            }
        case OP_MKDIR:
            {
                MkdirOp mkdirOp = (MkdirOp) op;
                inodeId = getAndUpdateLastInodeId(mkdirOp.inodeId, logVersion, lastInodeId);
                FSDirMkdirOp.mkdirForEditLog(fsDir, inodeId, renameReservedPathsOnUpgrade(mkdirOp.path, logVersion), mkdirOp.permissions, mkdirOp.aclEntries, mkdirOp.timestamp);
                break;
            }
        case OP_SET_GENSTAMP_V1:
            {
                SetGenstampV1Op setGenstampV1Op = (SetGenstampV1Op) op;
                blockManager.getBlockIdManager().setLegacyGenerationStamp(setGenstampV1Op.genStampV1);
                break;
            }
        case OP_SET_PERMISSIONS:
            {
                SetPermissionsOp setPermissionsOp = (SetPermissionsOp) op;
                final String src = renameReservedPathsOnUpgrade(setPermissionsOp.src, logVersion);
                final INodesInPath iip = fsDir.getINodesInPath(src, DirOp.WRITE);
                FSDirAttrOp.unprotectedSetPermission(fsDir, iip, setPermissionsOp.permissions);
                break;
            }
        case OP_SET_OWNER:
            {
                SetOwnerOp setOwnerOp = (SetOwnerOp) op;
                final String src = renameReservedPathsOnUpgrade(setOwnerOp.src, logVersion);
                final INodesInPath iip = fsDir.getINodesInPath(src, DirOp.WRITE);
                FSDirAttrOp.unprotectedSetOwner(fsDir, iip, setOwnerOp.username, setOwnerOp.groupname);
                break;
            }
        case OP_SET_NS_QUOTA:
            {
                SetNSQuotaOp setNSQuotaOp = (SetNSQuotaOp) op;
                final String src = renameReservedPathsOnUpgrade(setNSQuotaOp.src, logVersion);
                final INodesInPath iip = fsDir.getINodesInPath(src, DirOp.WRITE);
                FSDirAttrOp.unprotectedSetQuota(fsDir, iip, setNSQuotaOp.nsQuota, HdfsConstants.QUOTA_DONT_SET, null);
                break;
            }
        case OP_CLEAR_NS_QUOTA:
            {
                ClearNSQuotaOp clearNSQuotaOp = (ClearNSQuotaOp) op;
                final String src = renameReservedPathsOnUpgrade(clearNSQuotaOp.src, logVersion);
                final INodesInPath iip = fsDir.getINodesInPath(src, DirOp.WRITE);
                FSDirAttrOp.unprotectedSetQuota(fsDir, iip, HdfsConstants.QUOTA_RESET, HdfsConstants.QUOTA_DONT_SET, null);
                break;
            }
        case OP_SET_QUOTA:
            {
                SetQuotaOp setQuotaOp = (SetQuotaOp) op;
                final String src = renameReservedPathsOnUpgrade(setQuotaOp.src, logVersion);
                final INodesInPath iip = fsDir.getINodesInPath(src, DirOp.WRITE);
                FSDirAttrOp.unprotectedSetQuota(fsDir, iip, setQuotaOp.nsQuota, setQuotaOp.dsQuota, null);
                break;
            }
        case OP_SET_QUOTA_BY_STORAGETYPE:
            {
                FSEditLogOp.SetQuotaByStorageTypeOp setQuotaByStorageTypeOp = (FSEditLogOp.SetQuotaByStorageTypeOp) op;
                final String src = renameReservedPathsOnUpgrade(setQuotaByStorageTypeOp.src, logVersion);
                final INodesInPath iip = fsDir.getINodesInPath(src, DirOp.WRITE);
                FSDirAttrOp.unprotectedSetQuota(fsDir, iip, HdfsConstants.QUOTA_DONT_SET, setQuotaByStorageTypeOp.dsQuota, setQuotaByStorageTypeOp.type);
                break;
            }
        case OP_TIMES:
            {
                TimesOp timesOp = (TimesOp) op;
                final String src = renameReservedPathsOnUpgrade(timesOp.path, logVersion);
                final INodesInPath iip = fsDir.getINodesInPath(src, DirOp.WRITE);
                FSDirAttrOp.unprotectedSetTimes(fsDir, iip, timesOp.mtime, timesOp.atime, true);
                break;
            }
        case OP_SYMLINK:
            {
                if (!FileSystem.areSymlinksEnabled()) {
                    throw new IOException("Symlinks not supported - please remove symlink before upgrading to this version of HDFS");
                }
                SymlinkOp symlinkOp = (SymlinkOp) op;
                inodeId = getAndUpdateLastInodeId(symlinkOp.inodeId, logVersion, lastInodeId);
                final String path = renameReservedPathsOnUpgrade(symlinkOp.path, logVersion);
                final INodesInPath iip = fsDir.getINodesInPath(path, DirOp.WRITE_LINK);
                FSDirSymlinkOp.unprotectedAddSymlink(fsDir, iip.getExistingINodes(), iip.getLastLocalName(), inodeId, symlinkOp.value, symlinkOp.mtime, symlinkOp.atime, symlinkOp.permissionStatus);
                if (toAddRetryCache) {
                    fsNamesys.addCacheEntry(symlinkOp.rpcClientId, symlinkOp.rpcCallId);
                }
                break;
            }
        case OP_RENAME:
            {
                RenameOp renameOp = (RenameOp) op;
                FSDirRenameOp.renameForEditLog(fsDir, renameReservedPathsOnUpgrade(renameOp.src, logVersion), renameReservedPathsOnUpgrade(renameOp.dst, logVersion), renameOp.timestamp, renameOp.options);
                if (toAddRetryCache) {
                    fsNamesys.addCacheEntry(renameOp.rpcClientId, renameOp.rpcCallId);
                }
                break;
            }
        case OP_GET_DELEGATION_TOKEN:
            {
                GetDelegationTokenOp getDelegationTokenOp = (GetDelegationTokenOp) op;
                fsNamesys.getDelegationTokenSecretManager().addPersistedDelegationToken(getDelegationTokenOp.token, getDelegationTokenOp.expiryTime);
                break;
            }
        case OP_RENEW_DELEGATION_TOKEN:
            {
                RenewDelegationTokenOp renewDelegationTokenOp = (RenewDelegationTokenOp) op;
                fsNamesys.getDelegationTokenSecretManager().updatePersistedTokenRenewal(renewDelegationTokenOp.token, renewDelegationTokenOp.expiryTime);
                break;
            }
        case OP_CANCEL_DELEGATION_TOKEN:
            {
                CancelDelegationTokenOp cancelDelegationTokenOp = (CancelDelegationTokenOp) op;
                fsNamesys.getDelegationTokenSecretManager().updatePersistedTokenCancellation(cancelDelegationTokenOp.token);
                break;
            }
        case OP_UPDATE_MASTER_KEY:
            {
                UpdateMasterKeyOp updateMasterKeyOp = (UpdateMasterKeyOp) op;
                fsNamesys.getDelegationTokenSecretManager().updatePersistedMasterKey(updateMasterKeyOp.key);
                break;
            }
        case OP_REASSIGN_LEASE:
            {
                ReassignLeaseOp reassignLeaseOp = (ReassignLeaseOp) op;
                Lease lease = fsNamesys.leaseManager.getLease(reassignLeaseOp.leaseHolder);
                final String path = renameReservedPathsOnUpgrade(reassignLeaseOp.path, logVersion);
                INodeFile pendingFile = fsDir.getINode(path, DirOp.READ).asFile();
                Preconditions.checkState(pendingFile.isUnderConstruction());
                fsNamesys.reassignLeaseInternal(lease, reassignLeaseOp.newHolder, pendingFile);
                break;
            }
        case OP_START_LOG_SEGMENT:
        case OP_END_LOG_SEGMENT:
            {
                // no data in here currently.
                break;
            }
        case OP_CREATE_SNAPSHOT:
            {
                CreateSnapshotOp createSnapshotOp = (CreateSnapshotOp) op;
                final String snapshotRoot = renameReservedPathsOnUpgrade(createSnapshotOp.snapshotRoot, logVersion);
                INodesInPath iip = fsDir.getINodesInPath(snapshotRoot, DirOp.WRITE);
                String path = fsNamesys.getSnapshotManager().createSnapshot(iip, snapshotRoot, createSnapshotOp.snapshotName);
                if (toAddRetryCache) {
                    fsNamesys.addCacheEntryWithPayload(createSnapshotOp.rpcClientId, createSnapshotOp.rpcCallId, path);
                }
                break;
            }
        case OP_DELETE_SNAPSHOT:
            {
                DeleteSnapshotOp deleteSnapshotOp = (DeleteSnapshotOp) op;
                BlocksMapUpdateInfo collectedBlocks = new BlocksMapUpdateInfo();
                List<INode> removedINodes = new ChunkedArrayList<INode>();
                final String snapshotRoot = renameReservedPathsOnUpgrade(deleteSnapshotOp.snapshotRoot, logVersion);
                INodesInPath iip = fsDir.getINodesInPath(snapshotRoot, DirOp.WRITE);
                fsNamesys.getSnapshotManager().deleteSnapshot(iip, deleteSnapshotOp.snapshotName, new INode.ReclaimContext(fsNamesys.dir.getBlockStoragePolicySuite(), collectedBlocks, removedINodes, null));
                fsNamesys.getBlockManager().removeBlocksAndUpdateSafemodeTotal(collectedBlocks);
                collectedBlocks.clear();
                fsNamesys.dir.removeFromInodeMap(removedINodes);
                removedINodes.clear();
                if (toAddRetryCache) {
                    fsNamesys.addCacheEntry(deleteSnapshotOp.rpcClientId, deleteSnapshotOp.rpcCallId);
                }
                break;
            }
        case OP_RENAME_SNAPSHOT:
            {
                RenameSnapshotOp renameSnapshotOp = (RenameSnapshotOp) op;
                final String snapshotRoot = renameReservedPathsOnUpgrade(renameSnapshotOp.snapshotRoot, logVersion);
                INodesInPath iip = fsDir.getINodesInPath(snapshotRoot, DirOp.WRITE);
                fsNamesys.getSnapshotManager().renameSnapshot(iip, snapshotRoot, renameSnapshotOp.snapshotOldName, renameSnapshotOp.snapshotNewName);
                if (toAddRetryCache) {
                    fsNamesys.addCacheEntry(renameSnapshotOp.rpcClientId, renameSnapshotOp.rpcCallId);
                }
                break;
            }
        case OP_ALLOW_SNAPSHOT:
            {
                AllowSnapshotOp allowSnapshotOp = (AllowSnapshotOp) op;
                final String snapshotRoot = renameReservedPathsOnUpgrade(allowSnapshotOp.snapshotRoot, logVersion);
                fsNamesys.getSnapshotManager().setSnapshottable(snapshotRoot, false);
                break;
            }
        case OP_DISALLOW_SNAPSHOT:
            {
                DisallowSnapshotOp disallowSnapshotOp = (DisallowSnapshotOp) op;
                final String snapshotRoot = renameReservedPathsOnUpgrade(disallowSnapshotOp.snapshotRoot, logVersion);
                fsNamesys.getSnapshotManager().resetSnapshottable(snapshotRoot);
                break;
            }
        case OP_SET_GENSTAMP_V2:
            {
                SetGenstampV2Op setGenstampV2Op = (SetGenstampV2Op) op;
                blockManager.getBlockIdManager().setGenerationStamp(setGenstampV2Op.genStampV2);
                break;
            }
        case OP_ALLOCATE_BLOCK_ID:
            {
                AllocateBlockIdOp allocateBlockIdOp = (AllocateBlockIdOp) op;
                if (BlockIdManager.isStripedBlockID(allocateBlockIdOp.blockId)) {
                    // ALLOCATE_BLOCK_ID is added for sequential block id, thus if the id
                    // is negative, it must belong to striped blocks
                    blockManager.getBlockIdManager().setLastAllocatedStripedBlockId(allocateBlockIdOp.blockId);
                } else {
                    blockManager.getBlockIdManager().setLastAllocatedContiguousBlockId(allocateBlockIdOp.blockId);
                }
                break;
            }
        case OP_ROLLING_UPGRADE_START:
            {
                if (startOpt == StartupOption.ROLLINGUPGRADE) {
                    final RollingUpgradeStartupOption rollingUpgradeOpt = startOpt.getRollingUpgradeStartupOption();
                    if (rollingUpgradeOpt == RollingUpgradeStartupOption.ROLLBACK) {
                        throw new RollingUpgradeOp.RollbackException();
                    }
                }
                // start rolling upgrade
                final long startTime = ((RollingUpgradeOp) op).getTime();
                fsNamesys.startRollingUpgradeInternal(startTime);
                fsNamesys.triggerRollbackCheckpoint();
                break;
            }
        case OP_ROLLING_UPGRADE_FINALIZE:
            {
                final long finalizeTime = ((RollingUpgradeOp) op).getTime();
                if (fsNamesys.isRollingUpgrade()) {
                    // Only do it when NN is actually doing rolling upgrade.
                    // We can get FINALIZE without corresponding START, if NN is restarted
                    // before this op is consumed and a new checkpoint is created.
                    fsNamesys.finalizeRollingUpgradeInternal(finalizeTime);
                }
                fsNamesys.getFSImage().updateStorageVersion();
                fsNamesys.getFSImage().renameCheckpoint(NameNodeFile.IMAGE_ROLLBACK, NameNodeFile.IMAGE);
                break;
            }
        case OP_ADD_CACHE_DIRECTIVE:
            {
                AddCacheDirectiveInfoOp addOp = (AddCacheDirectiveInfoOp) op;
                CacheDirectiveInfo result = fsNamesys.getCacheManager().addDirectiveFromEditLog(addOp.directive);
                if (toAddRetryCache) {
                    Long id = result.getId();
                    fsNamesys.addCacheEntryWithPayload(op.rpcClientId, op.rpcCallId, id);
                }
                break;
            }
        case OP_MODIFY_CACHE_DIRECTIVE:
            {
                ModifyCacheDirectiveInfoOp modifyOp = (ModifyCacheDirectiveInfoOp) op;
                fsNamesys.getCacheManager().modifyDirectiveFromEditLog(modifyOp.directive);
                if (toAddRetryCache) {
                    fsNamesys.addCacheEntry(op.rpcClientId, op.rpcCallId);
                }
                break;
            }
        case OP_REMOVE_CACHE_DIRECTIVE:
            {
                RemoveCacheDirectiveInfoOp removeOp = (RemoveCacheDirectiveInfoOp) op;
                fsNamesys.getCacheManager().removeDirective(removeOp.id, null);
                if (toAddRetryCache) {
                    fsNamesys.addCacheEntry(op.rpcClientId, op.rpcCallId);
                }
                break;
            }
        case OP_ADD_CACHE_POOL:
            {
                AddCachePoolOp addOp = (AddCachePoolOp) op;
                fsNamesys.getCacheManager().addCachePool(addOp.info);
                if (toAddRetryCache) {
                    fsNamesys.addCacheEntry(op.rpcClientId, op.rpcCallId);
                }
                break;
            }
        case OP_MODIFY_CACHE_POOL:
            {
                ModifyCachePoolOp modifyOp = (ModifyCachePoolOp) op;
                fsNamesys.getCacheManager().modifyCachePool(modifyOp.info);
                if (toAddRetryCache) {
                    fsNamesys.addCacheEntry(op.rpcClientId, op.rpcCallId);
                }
                break;
            }
        case OP_REMOVE_CACHE_POOL:
            {
                RemoveCachePoolOp removeOp = (RemoveCachePoolOp) op;
                fsNamesys.getCacheManager().removeCachePool(removeOp.poolName);
                if (toAddRetryCache) {
                    fsNamesys.addCacheEntry(op.rpcClientId, op.rpcCallId);
                }
                break;
            }
        case OP_SET_ACL:
            {
                SetAclOp setAclOp = (SetAclOp) op;
                INodesInPath iip = fsDir.getINodesInPath(setAclOp.src, DirOp.WRITE);
                FSDirAclOp.unprotectedSetAcl(fsDir, iip, setAclOp.aclEntries, true);
                break;
            }
        case OP_SET_XATTR:
            {
                SetXAttrOp setXAttrOp = (SetXAttrOp) op;
                INodesInPath iip = fsDir.getINodesInPath(setXAttrOp.src, DirOp.WRITE);
                FSDirXAttrOp.unprotectedSetXAttrs(fsDir, iip, setXAttrOp.xAttrs, EnumSet.of(XAttrSetFlag.CREATE, XAttrSetFlag.REPLACE));
                if (toAddRetryCache) {
                    fsNamesys.addCacheEntry(setXAttrOp.rpcClientId, setXAttrOp.rpcCallId);
                }
                break;
            }
        case OP_REMOVE_XATTR:
            {
                RemoveXAttrOp removeXAttrOp = (RemoveXAttrOp) op;
                FSDirXAttrOp.unprotectedRemoveXAttrs(fsDir, removeXAttrOp.src, removeXAttrOp.xAttrs);
                if (toAddRetryCache) {
                    fsNamesys.addCacheEntry(removeXAttrOp.rpcClientId, removeXAttrOp.rpcCallId);
                }
                break;
            }
        case OP_TRUNCATE:
            {
                TruncateOp truncateOp = (TruncateOp) op;
                INodesInPath iip = fsDir.getINodesInPath(truncateOp.src, DirOp.WRITE);
                FSDirTruncateOp.unprotectedTruncate(fsNamesys, iip, truncateOp.clientName, truncateOp.clientMachine, truncateOp.newLength, truncateOp.timestamp, truncateOp.truncateBlock);
                break;
            }
        case OP_SET_STORAGE_POLICY:
            {
                SetStoragePolicyOp setStoragePolicyOp = (SetStoragePolicyOp) op;
                final String path = renameReservedPathsOnUpgrade(setStoragePolicyOp.path, logVersion);
                final INodesInPath iip = fsDir.getINodesInPath(path, DirOp.WRITE);
                FSDirAttrOp.unprotectedSetStoragePolicy(fsDir, fsNamesys.getBlockManager(), iip, setStoragePolicyOp.policyId);
                break;
            }
        default:
            throw new IOException("Invalid operation read " + op.opCode);
    }
    return inodeId;
}
Also used : RollingUpgradeStartupOption(org.apache.hadoop.hdfs.server.common.HdfsServerConstants.RollingUpgradeStartupOption) ErasureCodingPolicy(org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy) AddCloseOp(org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.AddCloseOp) SymlinkOp(org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SymlinkOp) ClearNSQuotaOp(org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.ClearNSQuotaOp) HdfsFileStatus(org.apache.hadoop.hdfs.protocol.HdfsFileStatus) SetGenstampV2Op(org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetGenstampV2Op) UpdateBlocksOp(org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.UpdateBlocksOp) ChunkedArrayList(org.apache.hadoop.util.ChunkedArrayList) List(java.util.List) AllowSnapshotOp(org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.AllowSnapshotOp) SetXAttrOp(org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetXAttrOp) RemoveCachePoolOp(org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RemoveCachePoolOp) CreateSnapshotOp(org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.CreateSnapshotOp) SetStoragePolicyOp(org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetStoragePolicyOp) SetNSQuotaOp(org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetNSQuotaOp) RollingUpgradeOp(org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RollingUpgradeOp) RenewDelegationTokenOp(org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RenewDelegationTokenOp) DeleteOp(org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.DeleteOp) ConcatDeleteOp(org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.ConcatDeleteOp) TimesOp(org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.TimesOp) SetPermissionsOp(org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetPermissionsOp) BlocksMapUpdateInfo(org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo) AddBlockOp(org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.AddBlockOp) AllocateBlockIdOp(org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.AllocateBlockIdOp) DeleteSnapshotOp(org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.DeleteSnapshotOp) RenameOldOp(org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RenameOldOp) UpdateMasterKeyOp(org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.UpdateMasterKeyOp) RenameSnapshotOp(org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RenameSnapshotOp) CancelDelegationTokenOp(org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.CancelDelegationTokenOp) AddCachePoolOp(org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.AddCachePoolOp) SetGenstampV1Op(org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetGenstampV1Op) CacheDirectiveInfo(org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo) ModifyCacheDirectiveInfoOp(org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.ModifyCacheDirectiveInfoOp) RemoveXAttrOp(org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RemoveXAttrOp) MkdirOp(org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.MkdirOp) ReassignLeaseOp(org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.ReassignLeaseOp) RemoveCacheDirectiveInfoOp(org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RemoveCacheDirectiveInfoOp) SetOwnerOp(org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetOwnerOp) TruncateOp(org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.TruncateOp) SetAclOp(org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetAclOp) Lease(org.apache.hadoop.hdfs.server.namenode.LeaseManager.Lease) LastBlockWithStatus(org.apache.hadoop.hdfs.protocol.LastBlockWithStatus) SetReplicationOp(org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetReplicationOp) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) IOException(java.io.IOException) RenameOp(org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RenameOp) GetDelegationTokenOp(org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.GetDelegationTokenOp) ConcatDeleteOp(org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.ConcatDeleteOp) DisallowSnapshotOp(org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.DisallowSnapshotOp) AppendOp(org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.AppendOp) ModifyCachePoolOp(org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.ModifyCachePoolOp) AddCacheDirectiveInfoOp(org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.AddCacheDirectiveInfoOp) SetQuotaOp(org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetQuotaOp)

Example 3 with BlocksMapUpdateInfo

use of org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo in project hadoop by apache.

the class FSNamesystem method startFileInt.

private HdfsFileStatus startFileInt(String src, PermissionStatus permissions, String holder, String clientMachine, EnumSet<CreateFlag> flag, boolean createParent, short replication, long blockSize, CryptoProtocolVersion[] supportedVersions, boolean logRetryCache) throws IOException {
    if (NameNode.stateChangeLog.isDebugEnabled()) {
        StringBuilder builder = new StringBuilder();
        builder.append("DIR* NameSystem.startFile: src=").append(src).append(", holder=").append(holder).append(", clientMachine=").append(clientMachine).append(", createParent=").append(createParent).append(", replication=").append(replication).append(", createFlag=").append(flag).append(", blockSize=").append(blockSize).append(", supportedVersions=").append(Arrays.toString(supportedVersions));
        NameNode.stateChangeLog.debug(builder.toString());
    }
    if (!DFSUtil.isValidName(src) || FSDirectory.isExactReservedName(src) || (FSDirectory.isReservedName(src) && !FSDirectory.isReservedRawName(src) && !FSDirectory.isReservedInodesName(src))) {
        throw new InvalidPathException(src);
    }
    FSPermissionChecker pc = getPermissionChecker();
    INodesInPath iip = null;
    // until we do something that might create edits
    boolean skipSync = true;
    HdfsFileStatus stat = null;
    BlocksMapUpdateInfo toRemoveBlocks = null;
    checkOperation(OperationCategory.WRITE);
    writeLock();
    try {
        checkOperation(OperationCategory.WRITE);
        checkNameNodeSafeMode("Cannot create file" + src);
        iip = FSDirWriteFileOp.resolvePathForStartFile(dir, pc, src, flag, createParent);
        if (!FSDirErasureCodingOp.hasErasureCodingPolicy(this, iip)) {
            blockManager.verifyReplication(src, replication, clientMachine);
        }
        if (blockSize < minBlockSize) {
            throw new IOException("Specified block size is less than configured" + " minimum value (" + DFSConfigKeys.DFS_NAMENODE_MIN_BLOCK_SIZE_KEY + "): " + blockSize + " < " + minBlockSize);
        }
        FileEncryptionInfo feInfo = null;
        if (provider != null) {
            EncryptionKeyInfo ezInfo = FSDirEncryptionZoneOp.getEncryptionKeyInfo(this, iip, supportedVersions);
            // and/or EZ has not mutated
            if (ezInfo != null) {
                checkOperation(OperationCategory.WRITE);
                iip = FSDirWriteFileOp.resolvePathForStartFile(dir, pc, iip.getPath(), flag, createParent);
                feInfo = FSDirEncryptionZoneOp.getFileEncryptionInfo(dir, iip, ezInfo);
            }
        }
        // following might generate edits
        skipSync = false;
        toRemoveBlocks = new BlocksMapUpdateInfo();
        dir.writeLock();
        try {
            stat = FSDirWriteFileOp.startFile(this, iip, permissions, holder, clientMachine, flag, createParent, replication, blockSize, feInfo, toRemoveBlocks, logRetryCache);
        } catch (IOException e) {
            skipSync = e instanceof StandbyException;
            throw e;
        } finally {
            dir.writeUnlock();
        }
    } finally {
        writeUnlock("create");
        // They need to be sync'ed even when an exception was thrown.
        if (!skipSync) {
            getEditLog().logSync();
            if (toRemoveBlocks != null) {
                removeBlocks(toRemoveBlocks);
                toRemoveBlocks.clear();
            }
        }
    }
    return stat;
}
Also used : BlocksMapUpdateInfo(org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo) StandbyException(org.apache.hadoop.ipc.StandbyException) HdfsFileStatus(org.apache.hadoop.hdfs.protocol.HdfsFileStatus) IOException(java.io.IOException) EncryptionKeyInfo(org.apache.hadoop.hdfs.server.namenode.FSDirEncryptionZoneOp.EncryptionKeyInfo) FileEncryptionInfo(org.apache.hadoop.fs.FileEncryptionInfo) InvalidPathException(org.apache.hadoop.fs.InvalidPathException)

Example 4 with BlocksMapUpdateInfo

use of org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo in project hadoop by apache.

the class FSNamesystem method truncate.

/**
   * Truncate file to a lower length.
   * Truncate cannot be reverted / recovered from as it causes data loss.
   * Truncation at block boundary is atomic, otherwise it requires
   * block recovery to truncate the last block of the file.
   *
   * @return true if client does not need to wait for block recovery,
   *         false if client needs to wait for block recovery.
   */
boolean truncate(String src, long newLength, String clientName, String clientMachine, long mtime) throws IOException, UnresolvedLinkException {
    final String operationName = "truncate";
    requireEffectiveLayoutVersionForFeature(Feature.TRUNCATE);
    final FSDirTruncateOp.TruncateResult r;
    try {
        NameNode.stateChangeLog.debug("DIR* NameSystem.truncate: src={} newLength={}", src, newLength);
        if (newLength < 0) {
            throw new HadoopIllegalArgumentException("Cannot truncate to a negative file size: " + newLength + ".");
        }
        final FSPermissionChecker pc = getPermissionChecker();
        checkOperation(OperationCategory.WRITE);
        writeLock();
        BlocksMapUpdateInfo toRemoveBlocks = new BlocksMapUpdateInfo();
        try {
            checkOperation(OperationCategory.WRITE);
            checkNameNodeSafeMode("Cannot truncate for " + src);
            r = FSDirTruncateOp.truncate(this, src, newLength, clientName, clientMachine, mtime, toRemoveBlocks, pc);
        } finally {
            writeUnlock(operationName);
        }
        getEditLog().logSync();
        if (!toRemoveBlocks.getToDeleteList().isEmpty()) {
            removeBlocks(toRemoveBlocks);
            toRemoveBlocks.clear();
        }
        logAuditEvent(true, operationName, src, null, r.getFileStatus());
    } catch (AccessControlException e) {
        logAuditEvent(false, operationName, src);
        throw e;
    }
    return r.getResult();
}
Also used : BlocksMapUpdateInfo(org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo) HadoopIllegalArgumentException(org.apache.hadoop.HadoopIllegalArgumentException) AccessControlException(org.apache.hadoop.security.AccessControlException) SnapshotAccessControlException(org.apache.hadoop.hdfs.protocol.SnapshotAccessControlException)

Example 5 with BlocksMapUpdateInfo

use of org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo in project hadoop by apache.

the class FSDirDeleteOp method deleteForEditLog.

/**
   * Delete a path from the name space
   * Update the count at each ancestor directory with quota
   * <br>
   * Note: This is to be used by
   * {@link org.apache.hadoop.hdfs.server.namenode.FSEditLog} only.
   * <br>
   *
   * @param fsd the FSDirectory instance
   * @param src a string representation of a path to an inode
   * @param mtime the time the inode is removed
   */
static void deleteForEditLog(FSDirectory fsd, INodesInPath iip, long mtime) throws IOException {
    assert fsd.hasWriteLock();
    FSNamesystem fsn = fsd.getFSNamesystem();
    BlocksMapUpdateInfo collectedBlocks = new BlocksMapUpdateInfo();
    List<INode> removedINodes = new ChunkedArrayList<>();
    List<Long> removedUCFiles = new ChunkedArrayList<>();
    if (!deleteAllowed(iip)) {
        return;
    }
    List<INodeDirectory> snapshottableDirs = new ArrayList<>();
    FSDirSnapshotOp.checkSnapshot(fsd, iip, snapshottableDirs);
    boolean filesRemoved = unprotectedDelete(fsd, iip, new ReclaimContext(fsd.getBlockStoragePolicySuite(), collectedBlocks, removedINodes, removedUCFiles), mtime);
    fsn.removeSnapshottableDirs(snapshottableDirs);
    if (filesRemoved) {
        fsn.removeLeasesAndINodes(removedUCFiles, removedINodes, false);
        fsn.getBlockManager().removeBlocksAndUpdateSafemodeTotal(collectedBlocks);
    }
}
Also used : BlocksMapUpdateInfo(org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo) ChunkedArrayList(org.apache.hadoop.util.ChunkedArrayList) ArrayList(java.util.ArrayList) ReclaimContext(org.apache.hadoop.hdfs.server.namenode.INode.ReclaimContext) ChunkedArrayList(org.apache.hadoop.util.ChunkedArrayList)

Aggregations

BlocksMapUpdateInfo (org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo)11 SnapshotAccessControlException (org.apache.hadoop.hdfs.protocol.SnapshotAccessControlException)4 AccessControlException (org.apache.hadoop.security.AccessControlException)4 ChunkedArrayList (org.apache.hadoop.util.ChunkedArrayList)3 IOException (java.io.IOException)2 HdfsFileStatus (org.apache.hadoop.hdfs.protocol.HdfsFileStatus)2 ArrayList (java.util.ArrayList)1 List (java.util.List)1 HadoopIllegalArgumentException (org.apache.hadoop.HadoopIllegalArgumentException)1 FileEncryptionInfo (org.apache.hadoop.fs.FileEncryptionInfo)1 InvalidPathException (org.apache.hadoop.fs.InvalidPathException)1 Block (org.apache.hadoop.hdfs.protocol.Block)1 CacheDirectiveInfo (org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo)1 ErasureCodingPolicy (org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy)1 LastBlockWithStatus (org.apache.hadoop.hdfs.protocol.LastBlockWithStatus)1 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)1 BlockInfo (org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo)1 RollingUpgradeStartupOption (org.apache.hadoop.hdfs.server.common.HdfsServerConstants.RollingUpgradeStartupOption)1 EncryptionKeyInfo (org.apache.hadoop.hdfs.server.namenode.FSDirEncryptionZoneOp.EncryptionKeyInfo)1 AddBlockOp (org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.AddBlockOp)1