Search in sources :

Example 46 with ExtendedBlock

use of org.apache.hadoop.hdfs.protocol.ExtendedBlock in project hadoop by apache.

the class FSDirWriteFileOp method storeAllocatedBlock.

/**
   * Part II of getAdditionalBlock().
   * Should repeat the same analysis of the file state as in Part 1,
   * but under the write lock.
   * If the conditions still hold, then allocate a new block with
   * the new targets, add it to the INode and to the BlocksMap.
   */
static LocatedBlock storeAllocatedBlock(FSNamesystem fsn, String src, long fileId, String clientName, ExtendedBlock previous, DatanodeStorageInfo[] targets) throws IOException {
    long offset;
    // Run the full analysis again, since things could have changed
    // while chooseTarget() was executing.
    LocatedBlock[] onRetryBlock = new LocatedBlock[1];
    INodesInPath iip = fsn.dir.resolvePath(null, src, fileId);
    FileState fileState = analyzeFileState(fsn, iip, fileId, clientName, previous, onRetryBlock);
    final INodeFile pendingFile = fileState.inode;
    src = fileState.path;
    if (onRetryBlock[0] != null) {
        if (onRetryBlock[0].getLocations().length > 0) {
            // This is a retry. Just return the last block if having locations.
            return onRetryBlock[0];
        } else {
            // add new chosen targets to already allocated block and return
            BlockInfo lastBlockInFile = pendingFile.getLastBlock();
            lastBlockInFile.getUnderConstructionFeature().setExpectedLocations(lastBlockInFile, targets, pendingFile.getBlockType());
            offset = pendingFile.computeFileSize();
            return makeLocatedBlock(fsn, lastBlockInFile, targets, offset);
        }
    }
    // commit the last block and complete it if it has minimum replicas
    fsn.commitOrCompleteLastBlock(pendingFile, fileState.iip, ExtendedBlock.getLocalBlock(previous));
    // allocate new block, record block locations in INode.
    final BlockType blockType = pendingFile.getBlockType();
    // allocate new block, record block locations in INode.
    Block newBlock = fsn.createNewBlock(blockType);
    INodesInPath inodesInPath = INodesInPath.fromINode(pendingFile);
    saveAllocatedBlock(fsn, src, inodesInPath, newBlock, targets, blockType);
    persistNewBlock(fsn, src, pendingFile);
    offset = pendingFile.computeFileSize();
    // Return located block
    return makeLocatedBlock(fsn, fsn.getStoredBlock(newBlock), targets, offset);
}
Also used : BlockType(org.apache.hadoop.hdfs.protocol.BlockType) BlockInfo(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) Block(org.apache.hadoop.hdfs.protocol.Block)

Example 47 with ExtendedBlock

use of org.apache.hadoop.hdfs.protocol.ExtendedBlock in project hadoop by apache.

the class DFSTestUtil method createStripedFile.

/**
   * Creates the metadata of a file in striped layout. This method only
   * manipulates the NameNode state without injecting data to DataNode.
   * You should disable periodical heartbeat before use this.
   * @param file Path of the file to create
   * @param dir Parent path of the file
   * @param numBlocks Number of striped block groups to add to the file
   * @param numStripesPerBlk Number of striped cells in each block
   * @param toMkdir
   * @param ecPolicy erasure coding policy apply to created file. A null value
   *                 means using default erasure coding policy.
   */
public static void createStripedFile(MiniDFSCluster cluster, Path file, Path dir, int numBlocks, int numStripesPerBlk, boolean toMkdir, ErasureCodingPolicy ecPolicy) throws Exception {
    DistributedFileSystem dfs = cluster.getFileSystem();
    // If outer test already set EC policy, dir should be left as null
    if (toMkdir) {
        assert dir != null;
        dfs.mkdirs(dir);
        try {
            dfs.getClient().setErasureCodingPolicy(dir.toString(), ecPolicy.getName());
        } catch (IOException e) {
            if (!e.getMessage().contains("non-empty directory")) {
                throw e;
            }
        }
    }
    cluster.getNameNodeRpc().create(file.toString(), new FsPermission((short) 0755), dfs.getClient().getClientName(), new EnumSetWritable<>(EnumSet.of(CreateFlag.CREATE)), false, (short) 1, 128 * 1024 * 1024L, null);
    FSNamesystem ns = cluster.getNamesystem();
    FSDirectory fsdir = ns.getFSDirectory();
    INodeFile fileNode = fsdir.getINode4Write(file.toString()).asFile();
    ExtendedBlock previous = null;
    for (int i = 0; i < numBlocks; i++) {
        Block newBlock = addBlockToFile(true, cluster.getDataNodes(), dfs, ns, file.toString(), fileNode, dfs.getClient().getClientName(), previous, numStripesPerBlk, 0);
        previous = new ExtendedBlock(ns.getBlockPoolId(), newBlock);
    }
    dfs.getClient().namenode.complete(file.toString(), dfs.getClient().getClientName(), previous, fileNode.getId());
}
Also used : ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) Block(org.apache.hadoop.hdfs.protocol.Block) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) FSDirectory(org.apache.hadoop.hdfs.server.namenode.FSDirectory) InterruptedIOException(java.io.InterruptedIOException) IOException(java.io.IOException) FsPermission(org.apache.hadoop.fs.permission.FsPermission) FSNamesystem(org.apache.hadoop.hdfs.server.namenode.FSNamesystem) INodeFile(org.apache.hadoop.hdfs.server.namenode.INodeFile)

Example 48 with ExtendedBlock

use of org.apache.hadoop.hdfs.protocol.ExtendedBlock in project hadoop by apache.

the class DFSTestUtil method addBlockToFile.

/**
   * Adds a block or a striped block group to a file.
   * This method only manipulates NameNode
   * states of the file and the block without injecting data to DataNode.
   * It does mimic block reports.
   * You should disable periodical heartbeat before use this.
   * @param isStripedBlock a boolean tell if the block added a striped block
   * @param dataNodes List DataNodes to host the striped block group
   * @param previous Previous block in the file
   * @param numStripes Number of stripes in each block group
   * @param len block size for a non striped block added
   * @return The added block or block group
   */
public static Block addBlockToFile(boolean isStripedBlock, List<DataNode> dataNodes, DistributedFileSystem fs, FSNamesystem ns, String file, INodeFile fileNode, String clientName, ExtendedBlock previous, int numStripes, int len) throws Exception {
    fs.getClient().namenode.addBlock(file, clientName, previous, null, fileNode.getId(), null, null);
    final BlockInfo lastBlock = fileNode.getLastBlock();
    final int groupSize = fileNode.getPreferredBlockReplication();
    assert dataNodes.size() >= groupSize;
    // 1. RECEIVING_BLOCK IBR
    for (int i = 0; i < groupSize; i++) {
        DataNode dn = dataNodes.get(i);
        final Block block = new Block(lastBlock.getBlockId() + i, 0, lastBlock.getGenerationStamp());
        DatanodeStorage storage = new DatanodeStorage(UUID.randomUUID().toString());
        StorageReceivedDeletedBlocks[] reports = DFSTestUtil.makeReportForReceivedBlock(block, ReceivedDeletedBlockInfo.BlockStatus.RECEIVING_BLOCK, storage);
        for (StorageReceivedDeletedBlocks report : reports) {
            ns.processIncrementalBlockReport(dn.getDatanodeId(), report);
        }
    }
    final ErasureCodingPolicy ecPolicy = fs.getErasureCodingPolicy(new Path(file));
    // 2. RECEIVED_BLOCK IBR
    long blockSize = isStripedBlock ? numStripes * ecPolicy.getCellSize() : len;
    for (int i = 0; i < groupSize; i++) {
        DataNode dn = dataNodes.get(i);
        final Block block = new Block(lastBlock.getBlockId() + i, blockSize, lastBlock.getGenerationStamp());
        DatanodeStorage storage = new DatanodeStorage(UUID.randomUUID().toString());
        StorageReceivedDeletedBlocks[] reports = DFSTestUtil.makeReportForReceivedBlock(block, ReceivedDeletedBlockInfo.BlockStatus.RECEIVED_BLOCK, storage);
        for (StorageReceivedDeletedBlocks report : reports) {
            ns.processIncrementalBlockReport(dn.getDatanodeId(), report);
        }
    }
    long bytes = isStripedBlock ? numStripes * ecPolicy.getCellSize() * ecPolicy.getNumDataUnits() : len;
    lastBlock.setNumBytes(bytes);
    return lastBlock;
}
Also used : Path(org.apache.hadoop.fs.Path) BlockInfo(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo) ReceivedDeletedBlockInfo(org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) DatanodeStorage(org.apache.hadoop.hdfs.server.protocol.DatanodeStorage) ErasureCodingPolicy(org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy) Block(org.apache.hadoop.hdfs.protocol.Block) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) StorageReceivedDeletedBlocks(org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks)

Example 49 with ExtendedBlock

use of org.apache.hadoop.hdfs.protocol.ExtendedBlock in project hadoop by apache.

the class TestLeaseRecovery method testBlockRecoveryWithLessMetafile.

/**
   * Block Recovery when the meta file not having crcs for all chunks in block
   * file
   */
@Test
public void testBlockRecoveryWithLessMetafile() throws Exception {
    Configuration conf = new Configuration();
    conf.set(DFSConfigKeys.DFS_BLOCK_LOCAL_PATH_ACCESS_USER_KEY, UserGroupInformation.getCurrentUser().getShortUserName());
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
    Path file = new Path("/testRecoveryFile");
    DistributedFileSystem dfs = cluster.getFileSystem();
    FSDataOutputStream out = dfs.create(file);
    final int FILE_SIZE = 2 * 1024 * 1024;
    int count = 0;
    while (count < FILE_SIZE) {
        out.writeBytes("Data");
        count += 4;
    }
    out.hsync();
    // abort the original stream
    ((DFSOutputStream) out.getWrappedStream()).abort();
    LocatedBlocks locations = cluster.getNameNodeRpc().getBlockLocations(file.toString(), 0, count);
    ExtendedBlock block = locations.get(0).getBlock();
    // Calculate meta file size
    // From DataNode.java, checksum size is given by:
    // (length of data + BYTE_PER_CHECKSUM - 1)/BYTES_PER_CHECKSUM *
    // CHECKSUM_SIZE
    // CRC32 & CRC32C
    final int CHECKSUM_SIZE = 4;
    final int bytesPerChecksum = conf.getInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_DEFAULT);
    final int metaFileSize = (FILE_SIZE + bytesPerChecksum - 1) / bytesPerChecksum * CHECKSUM_SIZE + // meta file header is 8 bytes
    8;
    final int newMetaFileSize = metaFileSize - CHECKSUM_SIZE;
    // Corrupt the block meta file by dropping checksum for bytesPerChecksum
    // bytes. Lease recovery is expected to recover the uncorrupted file length.
    cluster.truncateMeta(0, block, newMetaFileSize);
    // restart DN to make replica to RWR
    DataNodeProperties dnProp = cluster.stopDataNode(0);
    cluster.restartDataNode(dnProp, true);
    // try to recover the lease
    DistributedFileSystem newdfs = (DistributedFileSystem) FileSystem.newInstance(cluster.getConfiguration(0));
    count = 0;
    while (++count < 10 && !newdfs.recoverLease(file)) {
        Thread.sleep(1000);
    }
    assertTrue("File should be closed", newdfs.recoverLease(file));
    // Verify file length after lease recovery. The new file length should not
    // include the bytes with corrupted checksum.
    final long expectedNewFileLen = FILE_SIZE - bytesPerChecksum;
    final long newFileLen = newdfs.getFileStatus(file).getLen();
    assertEquals(newFileLen, expectedNewFileLen);
}
Also used : Path(org.apache.hadoop.fs.Path) Configuration(org.apache.hadoop.conf.Configuration) DataNodeProperties(org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties) LocatedBlocks(org.apache.hadoop.hdfs.protocol.LocatedBlocks) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) Test(org.junit.Test)

Example 50 with ExtendedBlock

use of org.apache.hadoop.hdfs.protocol.ExtendedBlock in project hadoop by apache.

the class TestBlocksWithNotEnoughRacks method testSufficientlySingleReplBlockUsesNewRack.

/*
   * Like the previous test but the block starts with a single replica,
   * and therefore unlike the previous test the block does not start
   * off needing replicas.
   */
@Test
public void testSufficientlySingleReplBlockUsesNewRack() throws Exception {
    Configuration conf = getConf();
    short REPLICATION_FACTOR = 1;
    final Path filePath = new Path("/testFile");
    String[] racks = { "/rack1", "/rack1", "/rack1", "/rack2" };
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(racks.length).racks(racks).build();
    final FSNamesystem ns = cluster.getNameNode().getNamesystem();
    try {
        // Create a file with one block with a replication factor of 1
        final FileSystem fs = cluster.getFileSystem();
        DFSTestUtil.createFile(fs, filePath, 1L, REPLICATION_FACTOR, 1L);
        ExtendedBlock b = DFSTestUtil.getFirstBlock(fs, filePath);
        DFSTestUtil.waitForReplication(cluster, b, 1, REPLICATION_FACTOR, 0);
        REPLICATION_FACTOR = 2;
        NameNodeAdapter.setReplication(ns, "/testFile", REPLICATION_FACTOR);
        DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0);
    } finally {
        cluster.shutdown();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) Configuration(org.apache.hadoop.conf.Configuration) FileSystem(org.apache.hadoop.fs.FileSystem) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) FSNamesystem(org.apache.hadoop.hdfs.server.namenode.FSNamesystem) Test(org.junit.Test)

Aggregations

ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)208 Test (org.junit.Test)124 Path (org.apache.hadoop.fs.Path)91 Configuration (org.apache.hadoop.conf.Configuration)71 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)63 FileSystem (org.apache.hadoop.fs.FileSystem)62 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)55 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)53 IOException (java.io.IOException)41 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)41 Block (org.apache.hadoop.hdfs.protocol.Block)38 DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)34 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)32 File (java.io.File)22 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)20 LocatedBlocks (org.apache.hadoop.hdfs.protocol.LocatedBlocks)20 DatanodeID (org.apache.hadoop.hdfs.protocol.DatanodeID)18 FSNamesystem (org.apache.hadoop.hdfs.server.namenode.FSNamesystem)18 InetSocketAddress (java.net.InetSocketAddress)17 ArrayList (java.util.ArrayList)17