Search in sources :

Example 41 with LocatedBlock

use of org.apache.hadoop.hdfs.protocol.LocatedBlock in project hadoop by apache.

the class FSDirWriteFileOp method storeAllocatedBlock.

/**
   * Part II of getAdditionalBlock().
   * Should repeat the same analysis of the file state as in Part 1,
   * but under the write lock.
   * If the conditions still hold, then allocate a new block with
   * the new targets, add it to the INode and to the BlocksMap.
   */
static LocatedBlock storeAllocatedBlock(FSNamesystem fsn, String src, long fileId, String clientName, ExtendedBlock previous, DatanodeStorageInfo[] targets) throws IOException {
    long offset;
    // Run the full analysis again, since things could have changed
    // while chooseTarget() was executing.
    LocatedBlock[] onRetryBlock = new LocatedBlock[1];
    INodesInPath iip = fsn.dir.resolvePath(null, src, fileId);
    FileState fileState = analyzeFileState(fsn, iip, fileId, clientName, previous, onRetryBlock);
    final INodeFile pendingFile = fileState.inode;
    src = fileState.path;
    if (onRetryBlock[0] != null) {
        if (onRetryBlock[0].getLocations().length > 0) {
            // This is a retry. Just return the last block if having locations.
            return onRetryBlock[0];
        } else {
            // add new chosen targets to already allocated block and return
            BlockInfo lastBlockInFile = pendingFile.getLastBlock();
            lastBlockInFile.getUnderConstructionFeature().setExpectedLocations(lastBlockInFile, targets, pendingFile.getBlockType());
            offset = pendingFile.computeFileSize();
            return makeLocatedBlock(fsn, lastBlockInFile, targets, offset);
        }
    }
    // commit the last block and complete it if it has minimum replicas
    fsn.commitOrCompleteLastBlock(pendingFile, fileState.iip, ExtendedBlock.getLocalBlock(previous));
    // allocate new block, record block locations in INode.
    final BlockType blockType = pendingFile.getBlockType();
    // allocate new block, record block locations in INode.
    Block newBlock = fsn.createNewBlock(blockType);
    INodesInPath inodesInPath = INodesInPath.fromINode(pendingFile);
    saveAllocatedBlock(fsn, src, inodesInPath, newBlock, targets, blockType);
    persistNewBlock(fsn, src, pendingFile);
    offset = pendingFile.computeFileSize();
    // Return located block
    return makeLocatedBlock(fsn, fsn.getStoredBlock(newBlock), targets, offset);
}
Also used : BlockType(org.apache.hadoop.hdfs.protocol.BlockType) BlockInfo(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) Block(org.apache.hadoop.hdfs.protocol.Block)

Example 42 with LocatedBlock

use of org.apache.hadoop.hdfs.protocol.LocatedBlock in project hadoop by apache.

the class FSDirWriteFileOp method makeLocatedBlock.

static LocatedBlock makeLocatedBlock(FSNamesystem fsn, BlockInfo blk, DatanodeStorageInfo[] locs, long offset) throws IOException {
    LocatedBlock lBlk = BlockManager.newLocatedBlock(fsn.getExtendedBlock(new Block(blk)), blk, locs, offset);
    fsn.getBlockManager().setBlockToken(lBlk, BlockTokenIdentifier.AccessMode.WRITE);
    return lBlk;
}
Also used : LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) Block(org.apache.hadoop.hdfs.protocol.Block)

Example 43 with LocatedBlock

use of org.apache.hadoop.hdfs.protocol.LocatedBlock in project hadoop by apache.

the class DFSTestUtil method verifyFileReplicasOnStorageType.

/**
   * Helper function that verified blocks of a file are placed on the
   * expected storage type.
   *
   * @param fs The file system containing the the file.
   * @param client The DFS client used to access the file
   * @param path name to the file to verify
   * @param storageType expected storage type
   * @returns true if file exists and its blocks are located on the expected
   *            storage type.
   *          false otherwise.
   */
public static boolean verifyFileReplicasOnStorageType(FileSystem fs, DFSClient client, Path path, StorageType storageType) throws IOException {
    if (!fs.exists(path)) {
        LOG.info("verifyFileReplicasOnStorageType: file " + path + "does not exist");
        return false;
    }
    long fileLength = client.getFileInfo(path.toString()).getLen();
    LocatedBlocks locatedBlocks = client.getLocatedBlocks(path.toString(), 0, fileLength);
    for (LocatedBlock locatedBlock : locatedBlocks.getLocatedBlocks()) {
        if (locatedBlock.getStorageTypes()[0] != storageType) {
            LOG.info("verifyFileReplicasOnStorageType: for file " + path + ". Expect blk" + locatedBlock + " on Type: " + storageType + ". Actual Type: " + locatedBlock.getStorageTypes()[0]);
            return false;
        }
    }
    return true;
}
Also used : LocatedBlocks(org.apache.hadoop.hdfs.protocol.LocatedBlocks) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock)

Example 44 with LocatedBlock

use of org.apache.hadoop.hdfs.protocol.LocatedBlock in project hadoop by apache.

the class TestPersistBlocks method testRestartDfsWithAbandonedBlock.

@Test
public void testRestartDfsWithAbandonedBlock() throws Exception {
    final Configuration conf = new HdfsConfiguration();
    // Turn off persistent IPC, so that the DFSClient can survive NN restart
    conf.setInt(CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY, 0);
    MiniDFSCluster cluster = null;
    long len = 0;
    FSDataOutputStream stream;
    try {
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
        FileSystem fs = cluster.getFileSystem();
        // Creating a file with 4096 blockSize to write multiple blocks
        stream = fs.create(FILE_PATH, true, BLOCK_SIZE, (short) 1, BLOCK_SIZE);
        stream.write(DATA_BEFORE_RESTART);
        stream.hflush();
        // Wait for all of the blocks to get through
        while (len < BLOCK_SIZE * (NUM_BLOCKS - 1)) {
            FileStatus status = fs.getFileStatus(FILE_PATH);
            len = status.getLen();
            Thread.sleep(100);
        }
        // Abandon the last block
        DFSClient dfsclient = DFSClientAdapter.getDFSClient((DistributedFileSystem) fs);
        HdfsFileStatus fileStatus = dfsclient.getNamenode().getFileInfo(FILE_NAME);
        LocatedBlocks blocks = dfsclient.getNamenode().getBlockLocations(FILE_NAME, 0, BLOCK_SIZE * NUM_BLOCKS);
        assertEquals(NUM_BLOCKS, blocks.getLocatedBlocks().size());
        LocatedBlock b = blocks.getLastLocatedBlock();
        dfsclient.getNamenode().abandonBlock(b.getBlock(), fileStatus.getFileId(), FILE_NAME, dfsclient.clientName);
        // explicitly do NOT close the file.
        cluster.restartNameNode();
        // Check that the file has no less bytes than before the restart
        // This would mean that blocks were successfully persisted to the log
        FileStatus status = fs.getFileStatus(FILE_PATH);
        assertTrue("Length incorrect: " + status.getLen(), status.getLen() == len - BLOCK_SIZE);
        // Verify the data showed up from before restart, sans abandoned block.
        FSDataInputStream readStream = fs.open(FILE_PATH);
        try {
            byte[] verifyBuf = new byte[DATA_BEFORE_RESTART.length - BLOCK_SIZE];
            IOUtils.readFully(readStream, verifyBuf, 0, verifyBuf.length);
            byte[] expectedBuf = new byte[DATA_BEFORE_RESTART.length - BLOCK_SIZE];
            System.arraycopy(DATA_BEFORE_RESTART, 0, expectedBuf, 0, expectedBuf.length);
            assertArrayEquals(expectedBuf, verifyBuf);
        } finally {
            IOUtils.closeStream(readStream);
        }
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}
Also used : FileStatus(org.apache.hadoop.fs.FileStatus) HdfsFileStatus(org.apache.hadoop.hdfs.protocol.HdfsFileStatus) Configuration(org.apache.hadoop.conf.Configuration) LocatedBlocks(org.apache.hadoop.hdfs.protocol.LocatedBlocks) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) FileSystem(org.apache.hadoop.fs.FileSystem) HdfsFileStatus(org.apache.hadoop.hdfs.protocol.HdfsFileStatus) FSDataInputStream(org.apache.hadoop.fs.FSDataInputStream) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) Test(org.junit.Test)

Example 45 with LocatedBlock

use of org.apache.hadoop.hdfs.protocol.LocatedBlock in project hadoop by apache.

the class TestPipelines method pipeline_01.

/**
   * Creates and closes a file of certain length.
   * Calls append to allow next write() operation to add to the end of it
   * After write() invocation, calls hflush() to make sure that data sunk through
   * the pipeline and check the state of the last block's replica.
   * It supposes to be in RBW state
   *
   * @throws IOException in case of an error
   */
@Test
public void pipeline_01() throws IOException {
    final String METHOD_NAME = GenericTestUtils.getMethodName();
    if (LOG.isDebugEnabled()) {
        LOG.debug("Running " + METHOD_NAME);
    }
    Path filePath = new Path("/" + METHOD_NAME + ".dat");
    DFSTestUtil.createFile(fs, filePath, FILE_SIZE, REPL_FACTOR, rand.nextLong());
    if (LOG.isDebugEnabled()) {
        LOG.debug("Invoking append but doing nothing otherwise...");
    }
    FSDataOutputStream ofs = fs.append(filePath);
    ofs.writeBytes("Some more stuff to write");
    ((DFSOutputStream) ofs.getWrappedStream()).hflush();
    List<LocatedBlock> lb = cluster.getNameNodeRpc().getBlockLocations(filePath.toString(), FILE_SIZE - 1, FILE_SIZE).getLocatedBlocks();
    for (DataNode dn : cluster.getDataNodes()) {
        Replica r = cluster.getFsDatasetTestUtils(dn).fetchReplica(lb.get(0).getBlock());
        assertTrue("Replica on DN " + dn + " shouldn't be null", r != null);
        assertEquals("Should be RBW replica on " + dn + " after sequence of calls append()/write()/hflush()", HdfsServerConstants.ReplicaState.RBW, r.getState());
    }
    ofs.close();
}
Also used : Path(org.apache.hadoop.fs.Path) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) Replica(org.apache.hadoop.hdfs.server.datanode.Replica) Test(org.junit.Test)

Aggregations

LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)196 Test (org.junit.Test)92 Path (org.apache.hadoop.fs.Path)86 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)72 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)49 LocatedBlocks (org.apache.hadoop.hdfs.protocol.LocatedBlocks)49 Configuration (org.apache.hadoop.conf.Configuration)40 IOException (java.io.IOException)34 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)34 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)33 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)33 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)25 DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)25 LocatedStripedBlock (org.apache.hadoop.hdfs.protocol.LocatedStripedBlock)24 StorageType (org.apache.hadoop.fs.StorageType)23 ArrayList (java.util.ArrayList)22 Block (org.apache.hadoop.hdfs.protocol.Block)16 FileSystem (org.apache.hadoop.fs.FileSystem)15 InetSocketAddress (java.net.InetSocketAddress)11 File (java.io.File)9