Search in sources :

Example 41 with HdfsFileStatus

use of org.apache.hadoop.hdfs.protocol.HdfsFileStatus in project hadoop by apache.

the class TestRpcProgramNfs3 method testGetattr.

@Test(timeout = 60000)
public void testGetattr() throws Exception {
    HdfsFileStatus status = nn.getRpcServer().getFileInfo("/tmp/bar");
    long dirId = status.getFileId();
    FileHandle handle = new FileHandle(dirId);
    XDR xdr_req = new XDR();
    GETATTR3Request req = new GETATTR3Request(handle);
    req.serialize(xdr_req);
    // Attempt by an unpriviledged user should fail.
    GETATTR3Response response1 = nfsd.getattr(xdr_req.asReadOnlyWrap(), securityHandlerUnpriviledged, new InetSocketAddress("localhost", 1234));
    assertEquals("Incorrect return code", Nfs3Status.NFS3ERR_ACCES, response1.getStatus());
    // Attempt by a priviledged user should pass.
    GETATTR3Response response2 = nfsd.getattr(xdr_req.asReadOnlyWrap(), securityHandler, new InetSocketAddress("localhost", 1234));
    assertEquals("Incorrect return code", Nfs3Status.NFS3_OK, response2.getStatus());
}
Also used : FileHandle(org.apache.hadoop.nfs.nfs3.FileHandle) InetSocketAddress(java.net.InetSocketAddress) HdfsFileStatus(org.apache.hadoop.hdfs.protocol.HdfsFileStatus) XDR(org.apache.hadoop.oncrpc.XDR) GETATTR3Response(org.apache.hadoop.nfs.nfs3.response.GETATTR3Response) GETATTR3Request(org.apache.hadoop.nfs.nfs3.request.GETATTR3Request) Test(org.junit.Test)

Example 42 with HdfsFileStatus

use of org.apache.hadoop.hdfs.protocol.HdfsFileStatus in project hadoop by apache.

the class FSDirAppendOp method appendFile.

/**
   * Append to an existing file.
   * <p>
   *
   * The method returns the last block of the file if this is a partial block,
   * which can still be used for writing more data. The client uses the
   * returned block locations to form the data pipeline for this block.<br>
   * The {@link LocatedBlock} will be null if the last block is full.
   * The client then allocates a new block with the next call using
   * {@link org.apache.hadoop.hdfs.protocol.ClientProtocol#addBlock}.
   * <p>
   *
   * For description of parameters and exceptions thrown see
   * {@link org.apache.hadoop.hdfs.protocol.ClientProtocol#append}
   *
   * @param fsn namespace
   * @param srcArg path name
   * @param pc permission checker to check fs permission
   * @param holder client name
   * @param clientMachine client machine info
   * @param newBlock if the data is appended to a new block
   * @param logRetryCache whether to record RPC ids in editlog for retry cache
   *                      rebuilding
   * @return the last block with status
   */
static LastBlockWithStatus appendFile(final FSNamesystem fsn, final String srcArg, final FSPermissionChecker pc, final String holder, final String clientMachine, final boolean newBlock, final boolean logRetryCache) throws IOException {
    assert fsn.hasWriteLock();
    final LocatedBlock lb;
    final FSDirectory fsd = fsn.getFSDirectory();
    final INodesInPath iip;
    fsd.writeLock();
    try {
        iip = fsd.resolvePath(pc, srcArg, DirOp.WRITE);
        // Verify that the destination does not exist as a directory already
        final INode inode = iip.getLastINode();
        final String path = iip.getPath();
        if (inode != null && inode.isDirectory()) {
            throw new FileAlreadyExistsException("Cannot append to directory " + path + "; already exists as a directory.");
        }
        if (fsd.isPermissionEnabled()) {
            fsd.checkPathAccess(pc, iip, FsAction.WRITE);
        }
        if (inode == null) {
            throw new FileNotFoundException("Failed to append to non-existent file " + path + " for client " + clientMachine);
        }
        final INodeFile file = INodeFile.valueOf(inode, path, true);
        // not support appending file with striped blocks
        if (file.isStriped()) {
            throw new UnsupportedOperationException("Cannot append to files with striped block " + path);
        }
        BlockManager blockManager = fsd.getBlockManager();
        final BlockStoragePolicy lpPolicy = blockManager.getStoragePolicy("LAZY_PERSIST");
        if (lpPolicy != null && lpPolicy.getId() == file.getStoragePolicyID()) {
            throw new UnsupportedOperationException("Cannot append to lazy persist file " + path);
        }
        // Opening an existing file for append - may need to recover lease.
        fsn.recoverLeaseInternal(RecoverLeaseOp.APPEND_FILE, iip, path, holder, clientMachine, false);
        final BlockInfo lastBlock = file.getLastBlock();
        // Check that the block has at least minimum replication.
        if (lastBlock != null) {
            if (lastBlock.getBlockUCState() == BlockUCState.COMMITTED) {
                throw new RetriableException(new NotReplicatedYetException("append: lastBlock=" + lastBlock + " of src=" + path + " is COMMITTED but not yet COMPLETE."));
            } else if (lastBlock.isComplete() && !blockManager.isSufficientlyReplicated(lastBlock)) {
                throw new IOException("append: lastBlock=" + lastBlock + " of src=" + path + " is not sufficiently replicated yet.");
            }
        }
        lb = prepareFileForAppend(fsn, iip, holder, clientMachine, newBlock, true, logRetryCache);
    } catch (IOException ie) {
        NameNode.stateChangeLog.warn("DIR* NameSystem.append: " + ie.getMessage());
        throw ie;
    } finally {
        fsd.writeUnlock();
    }
    HdfsFileStatus stat = FSDirStatAndListingOp.getFileInfo(fsd, iip);
    if (lb != null) {
        NameNode.stateChangeLog.debug("DIR* NameSystem.appendFile: file {} for {} at {} block {} block" + " size {}", srcArg, holder, clientMachine, lb.getBlock(), lb.getBlock().getNumBytes());
    }
    return new LastBlockWithStatus(lb, stat);
}
Also used : FileAlreadyExistsException(org.apache.hadoop.fs.FileAlreadyExistsException) LastBlockWithStatus(org.apache.hadoop.hdfs.protocol.LastBlockWithStatus) FileNotFoundException(java.io.FileNotFoundException) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) IOException(java.io.IOException) BlockManager(org.apache.hadoop.hdfs.server.blockmanagement.BlockManager) BlockInfo(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo) HdfsFileStatus(org.apache.hadoop.hdfs.protocol.HdfsFileStatus) BlockStoragePolicy(org.apache.hadoop.hdfs.protocol.BlockStoragePolicy) RetriableException(org.apache.hadoop.ipc.RetriableException)

Example 43 with HdfsFileStatus

use of org.apache.hadoop.hdfs.protocol.HdfsFileStatus in project hadoop by apache.

the class FSDirStatAndListingOp method getListing.

/**
   * Get a partial listing of the indicated directory
   *
   * We will stop when any of the following conditions is met:
   * 1) this.lsLimit files have been added
   * 2) needLocation is true AND enough files have been added such
   * that at least this.lsLimit block locations are in the response
   *
   * @param fsd FSDirectory
   * @param iip the INodesInPath instance containing all the INodes along the
   *            path
   * @param startAfter the name to start listing after
   * @param needLocation if block locations are returned
   * @param includeStoragePolicy if storage policy is returned
   * @return a partial listing starting after startAfter
   */
private static DirectoryListing getListing(FSDirectory fsd, INodesInPath iip, byte[] startAfter, boolean needLocation, boolean includeStoragePolicy) throws IOException {
    if (FSDirectory.isExactReservedName(iip.getPathComponents())) {
        return getReservedListing(fsd);
    }
    fsd.readLock();
    try {
        if (iip.isDotSnapshotDir()) {
            return getSnapshotsListing(fsd, iip, startAfter);
        }
        final int snapshot = iip.getPathSnapshotId();
        final INode targetNode = iip.getLastINode();
        if (targetNode == null) {
            return null;
        }
        byte parentStoragePolicy = includeStoragePolicy ? targetNode.getStoragePolicyID() : HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED;
        if (!targetNode.isDirectory()) {
            // target INode
            return new DirectoryListing(new HdfsFileStatus[] { createFileStatus(fsd, iip, null, parentStoragePolicy, needLocation) }, 0);
        }
        final INodeDirectory dirInode = targetNode.asDirectory();
        final ReadOnlyList<INode> contents = dirInode.getChildrenList(snapshot);
        int startChild = INodeDirectory.nextChild(contents, startAfter);
        int totalNumChildren = contents.size();
        int numOfListing = Math.min(totalNumChildren - startChild, fsd.getLsLimit());
        int locationBudget = fsd.getLsLimit();
        int listingCnt = 0;
        HdfsFileStatus[] listing = new HdfsFileStatus[numOfListing];
        for (int i = 0; i < numOfListing && locationBudget > 0; i++) {
            INode child = contents.get(startChild + i);
            byte childStoragePolicy = (includeStoragePolicy && !child.isSymlink()) ? getStoragePolicyID(child.getLocalStoragePolicyID(), parentStoragePolicy) : parentStoragePolicy;
            listing[i] = createFileStatus(fsd, iip, child, childStoragePolicy, needLocation);
            listingCnt++;
            if (listing[i] instanceof HdfsLocatedFileStatus) {
                // Once we  hit lsLimit locations, stop.
                // This helps to prevent excessively large response payloads.
                // Approximate #locations with locatedBlockCount() * repl_factor
                LocatedBlocks blks = ((HdfsLocatedFileStatus) listing[i]).getBlockLocations();
                locationBudget -= (blks == null) ? 0 : blks.locatedBlockCount() * listing[i].getReplication();
            }
        }
        // truncate return array if necessary
        if (listingCnt < numOfListing) {
            listing = Arrays.copyOf(listing, listingCnt);
        }
        return new DirectoryListing(listing, totalNumChildren - startChild - listingCnt);
    } finally {
        fsd.readUnlock();
    }
}
Also used : DirectoryListing(org.apache.hadoop.hdfs.protocol.DirectoryListing) HdfsLocatedFileStatus(org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus) HdfsFileStatus(org.apache.hadoop.hdfs.protocol.HdfsFileStatus) LocatedBlocks(org.apache.hadoop.hdfs.protocol.LocatedBlocks)

Example 44 with HdfsFileStatus

use of org.apache.hadoop.hdfs.protocol.HdfsFileStatus in project hadoop by apache.

the class TestPersistBlocks method testRestartDfsWithAbandonedBlock.

@Test
public void testRestartDfsWithAbandonedBlock() throws Exception {
    final Configuration conf = new HdfsConfiguration();
    // Turn off persistent IPC, so that the DFSClient can survive NN restart
    conf.setInt(CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY, 0);
    MiniDFSCluster cluster = null;
    long len = 0;
    FSDataOutputStream stream;
    try {
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
        FileSystem fs = cluster.getFileSystem();
        // Creating a file with 4096 blockSize to write multiple blocks
        stream = fs.create(FILE_PATH, true, BLOCK_SIZE, (short) 1, BLOCK_SIZE);
        stream.write(DATA_BEFORE_RESTART);
        stream.hflush();
        // Wait for all of the blocks to get through
        while (len < BLOCK_SIZE * (NUM_BLOCKS - 1)) {
            FileStatus status = fs.getFileStatus(FILE_PATH);
            len = status.getLen();
            Thread.sleep(100);
        }
        // Abandon the last block
        DFSClient dfsclient = DFSClientAdapter.getDFSClient((DistributedFileSystem) fs);
        HdfsFileStatus fileStatus = dfsclient.getNamenode().getFileInfo(FILE_NAME);
        LocatedBlocks blocks = dfsclient.getNamenode().getBlockLocations(FILE_NAME, 0, BLOCK_SIZE * NUM_BLOCKS);
        assertEquals(NUM_BLOCKS, blocks.getLocatedBlocks().size());
        LocatedBlock b = blocks.getLastLocatedBlock();
        dfsclient.getNamenode().abandonBlock(b.getBlock(), fileStatus.getFileId(), FILE_NAME, dfsclient.clientName);
        // explicitly do NOT close the file.
        cluster.restartNameNode();
        // Check that the file has no less bytes than before the restart
        // This would mean that blocks were successfully persisted to the log
        FileStatus status = fs.getFileStatus(FILE_PATH);
        assertTrue("Length incorrect: " + status.getLen(), status.getLen() == len - BLOCK_SIZE);
        // Verify the data showed up from before restart, sans abandoned block.
        FSDataInputStream readStream = fs.open(FILE_PATH);
        try {
            byte[] verifyBuf = new byte[DATA_BEFORE_RESTART.length - BLOCK_SIZE];
            IOUtils.readFully(readStream, verifyBuf, 0, verifyBuf.length);
            byte[] expectedBuf = new byte[DATA_BEFORE_RESTART.length - BLOCK_SIZE];
            System.arraycopy(DATA_BEFORE_RESTART, 0, expectedBuf, 0, expectedBuf.length);
            assertArrayEquals(expectedBuf, verifyBuf);
        } finally {
            IOUtils.closeStream(readStream);
        }
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}
Also used : FileStatus(org.apache.hadoop.fs.FileStatus) HdfsFileStatus(org.apache.hadoop.hdfs.protocol.HdfsFileStatus) Configuration(org.apache.hadoop.conf.Configuration) LocatedBlocks(org.apache.hadoop.hdfs.protocol.LocatedBlocks) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) FileSystem(org.apache.hadoop.fs.FileSystem) HdfsFileStatus(org.apache.hadoop.hdfs.protocol.HdfsFileStatus) FSDataInputStream(org.apache.hadoop.fs.FSDataInputStream) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) Test(org.junit.Test)

Example 45 with HdfsFileStatus

use of org.apache.hadoop.hdfs.protocol.HdfsFileStatus in project hadoop by apache.

the class TestBlockPlacementPolicyRackFaultTolerant method doTestChooseTargetSpecialCase.

/**
   * Test more randomly. So it covers some special cases.
   * Like when some racks already have 2 replicas, while some racks have none,
   * we should choose the racks that have none.
   */
private void doTestChooseTargetSpecialCase() throws Exception {
    String clientMachine = "client.foo.com";
    // Test 5 files
    String src = "/testfile_1_";
    // Create the file with client machine
    HdfsFileStatus fileStatus = namesystem.startFile(src, perm, clientMachine, clientMachine, EnumSet.of(CreateFlag.CREATE), true, (short) 20, DEFAULT_BLOCK_SIZE, null, false);
    //test chooseTarget for new file
    LocatedBlock locatedBlock = nameNodeRpc.addBlock(src, clientMachine, null, null, fileStatus.getFileId(), null, null);
    doTestLocatedBlock(20, locatedBlock);
    DatanodeInfo[] locs = locatedBlock.getLocations();
    String[] storageIDs = locatedBlock.getStorageIDs();
    for (int time = 0; time < 5; time++) {
        shuffle(locs, storageIDs);
        for (int i = 1; i < locs.length; i++) {
            DatanodeInfo[] partLocs = new DatanodeInfo[i];
            String[] partStorageIDs = new String[i];
            System.arraycopy(locs, 0, partLocs, 0, i);
            System.arraycopy(storageIDs, 0, partStorageIDs, 0, i);
            for (int j = 1; j < 20 - i; j++) {
                LocatedBlock additionalLocatedBlock = nameNodeRpc.getAdditionalDatanode(src, fileStatus.getFileId(), locatedBlock.getBlock(), partLocs, partStorageIDs, new DatanodeInfo[0], j, clientMachine);
                doTestLocatedBlock(i + j, additionalLocatedBlock);
            }
        }
    }
}
Also used : DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) HdfsFileStatus(org.apache.hadoop.hdfs.protocol.HdfsFileStatus) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock)

Aggregations

HdfsFileStatus (org.apache.hadoop.hdfs.protocol.HdfsFileStatus)124 Test (org.junit.Test)51 FileHandle (org.apache.hadoop.nfs.nfs3.FileHandle)34 IOException (java.io.IOException)28 InetSocketAddress (java.net.InetSocketAddress)28 XDR (org.apache.hadoop.oncrpc.XDR)28 AccessControlException (org.apache.hadoop.security.AccessControlException)26 Path (org.apache.hadoop.fs.Path)23 SnapshotAccessControlException (org.apache.hadoop.hdfs.protocol.SnapshotAccessControlException)23 FileNotFoundException (java.io.FileNotFoundException)16 DFSClient (org.apache.hadoop.hdfs.DFSClient)11 DirectoryListing (org.apache.hadoop.hdfs.protocol.DirectoryListing)11 FsPermission (org.apache.hadoop.fs.permission.FsPermission)10 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)9 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)9 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)8 SetAttr3 (org.apache.hadoop.nfs.nfs3.request.SetAttr3)8 FileStatus (org.apache.hadoop.fs.FileStatus)7 Matchers.anyString (org.mockito.Matchers.anyString)7 Configuration (org.apache.hadoop.conf.Configuration)6