Search in sources :

Example 6 with ErasureCodingPolicy

use of org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy in project hadoop by apache.

the class TestErasureCodingPolicyWithSnapshot method testSnapshotsOnErasureCodingDirAfterNNRestart.

/**
   * Test verify erasure coding policy is present after restarting the NameNode.
   */
@Test(timeout = 120000)
public void testSnapshotsOnErasureCodingDirAfterNNRestart() throws Exception {
    final Path ecDir = new Path("/ecdir");
    fs.mkdirs(ecDir);
    fs.allowSnapshot(ecDir);
    // set erasure coding policy
    fs.setErasureCodingPolicy(ecDir, sysDefaultPolicy.getName());
    final Path snap1 = fs.createSnapshot(ecDir, "snap1");
    ErasureCodingPolicy ecSnap = fs.getErasureCodingPolicy(snap1);
    assertEquals("Got unexpected erasure coding policy", sysDefaultPolicy, ecSnap);
    // save namespace, restart namenode, and check ec policy correctness.
    fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
    fs.saveNamespace();
    fs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
    cluster.restartNameNode(true);
    ErasureCodingPolicy ecSnap1 = fs.getErasureCodingPolicy(snap1);
    assertEquals("Got unexpected erasure coding policy", sysDefaultPolicy, ecSnap1);
    assertEquals("Got unexpected ecSchema", ecSnap.getSchema(), ecSnap1.getSchema());
}
Also used : Path(org.apache.hadoop.fs.Path) ErasureCodingPolicy(org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy) Test(org.junit.Test)

Example 7 with ErasureCodingPolicy

use of org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy in project hadoop by apache.

the class TestBlockRecovery method testSafeLength.

@Test(timeout = 60000)
public void testSafeLength() throws Exception {
    // hard coded policy to work with hard coded test suite
    ErasureCodingPolicy ecPolicy = ErasureCodingPolicyManager.getSystemPolicies()[0];
    RecoveringStripedBlock rBlockStriped = new RecoveringStripedBlock(rBlock, new byte[9], ecPolicy);
    BlockRecoveryWorker recoveryWorker = new BlockRecoveryWorker(dn);
    BlockRecoveryWorker.RecoveryTaskStriped recoveryTask = recoveryWorker.new RecoveryTaskStriped(rBlockStriped);
    for (int i = 0; i < blockLengthsSuite.length; i++) {
        int[] blockLengths = blockLengthsSuite[i][0];
        int safeLength = blockLengthsSuite[i][1][0];
        Map<Long, BlockRecord> syncList = new HashMap<>();
        for (int id = 0; id < blockLengths.length; id++) {
            ReplicaRecoveryInfo rInfo = new ReplicaRecoveryInfo(id, blockLengths[id], 0, null);
            syncList.put((long) id, new BlockRecord(null, null, rInfo));
        }
        Assert.assertEquals("BLOCK_LENGTHS_SUITE[" + i + "]", safeLength, recoveryTask.getSafeLength(syncList));
    }
}
Also used : RecoveringStripedBlock(org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringStripedBlock) HashMap(java.util.HashMap) ReplicaRecoveryInfo(org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo) BlockRecord(org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.BlockRecord) ErasureCodingPolicy(org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy) Matchers.anyLong(org.mockito.Matchers.anyLong) Test(org.junit.Test)

Example 8 with ErasureCodingPolicy

use of org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy in project hadoop by apache.

the class FSDirWriteFileOp method addFileForEditLog.

static INodeFile addFileForEditLog(FSDirectory fsd, long id, INodesInPath existing, byte[] localName, PermissionStatus permissions, List<AclEntry> aclEntries, List<XAttr> xAttrs, short replication, long modificationTime, long atime, long preferredBlockSize, boolean underConstruction, String clientName, String clientMachine, byte storagePolicyId) {
    final INodeFile newNode;
    Preconditions.checkNotNull(existing);
    assert fsd.hasWriteLock();
    try {
        // check if the file has an EC policy
        boolean isStriped = false;
        ErasureCodingPolicy ecPolicy = FSDirErasureCodingOp.unprotectedGetErasureCodingPolicy(fsd.getFSNamesystem(), existing);
        if (ecPolicy != null) {
            isStriped = true;
        }
        final BlockType blockType = isStriped ? BlockType.STRIPED : BlockType.CONTIGUOUS;
        final Short replicationFactor = (!isStriped ? replication : null);
        final Byte ecPolicyID = (isStriped ? ecPolicy.getId() : null);
        if (underConstruction) {
            newNode = newINodeFile(id, permissions, modificationTime, modificationTime, replicationFactor, ecPolicyID, preferredBlockSize, storagePolicyId, blockType);
            newNode.toUnderConstruction(clientName, clientMachine);
        } else {
            newNode = newINodeFile(id, permissions, modificationTime, atime, replicationFactor, ecPolicyID, preferredBlockSize, storagePolicyId, blockType);
        }
        newNode.setLocalName(localName);
        INodesInPath iip = fsd.addINode(existing, newNode, permissions.getPermission());
        if (iip != null) {
            if (aclEntries != null) {
                AclStorage.updateINodeAcl(newNode, aclEntries, CURRENT_STATE_ID);
            }
            if (xAttrs != null) {
                XAttrStorage.updateINodeXAttrs(newNode, xAttrs, CURRENT_STATE_ID);
            }
            return newNode;
        }
    } catch (IOException e) {
        NameNode.stateChangeLog.warn("DIR* FSDirectory.unprotectedAddFile: exception when add " + existing.getPath() + " to the file system", e);
        if (e instanceof FSLimitException.MaxDirectoryItemsExceededException) {
            NameNode.stateChangeLog.warn("Please increase " + "dfs.namenode.fs-limits.max-directory-items and make it " + "consistent across all NameNodes.");
        }
    }
    return null;
}
Also used : FSLimitException(org.apache.hadoop.hdfs.protocol.FSLimitException) BlockType(org.apache.hadoop.hdfs.protocol.BlockType) ErasureCodingPolicy(org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy) IOException(java.io.IOException)

Example 9 with ErasureCodingPolicy

use of org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy in project hadoop by apache.

the class DFSTestUtil method addBlockToFile.

/**
   * Adds a block or a striped block group to a file.
   * This method only manipulates NameNode
   * states of the file and the block without injecting data to DataNode.
   * It does mimic block reports.
   * You should disable periodical heartbeat before use this.
   * @param isStripedBlock a boolean tell if the block added a striped block
   * @param dataNodes List DataNodes to host the striped block group
   * @param previous Previous block in the file
   * @param numStripes Number of stripes in each block group
   * @param len block size for a non striped block added
   * @return The added block or block group
   */
public static Block addBlockToFile(boolean isStripedBlock, List<DataNode> dataNodes, DistributedFileSystem fs, FSNamesystem ns, String file, INodeFile fileNode, String clientName, ExtendedBlock previous, int numStripes, int len) throws Exception {
    fs.getClient().namenode.addBlock(file, clientName, previous, null, fileNode.getId(), null, null);
    final BlockInfo lastBlock = fileNode.getLastBlock();
    final int groupSize = fileNode.getPreferredBlockReplication();
    assert dataNodes.size() >= groupSize;
    // 1. RECEIVING_BLOCK IBR
    for (int i = 0; i < groupSize; i++) {
        DataNode dn = dataNodes.get(i);
        final Block block = new Block(lastBlock.getBlockId() + i, 0, lastBlock.getGenerationStamp());
        DatanodeStorage storage = new DatanodeStorage(UUID.randomUUID().toString());
        StorageReceivedDeletedBlocks[] reports = DFSTestUtil.makeReportForReceivedBlock(block, ReceivedDeletedBlockInfo.BlockStatus.RECEIVING_BLOCK, storage);
        for (StorageReceivedDeletedBlocks report : reports) {
            ns.processIncrementalBlockReport(dn.getDatanodeId(), report);
        }
    }
    final ErasureCodingPolicy ecPolicy = fs.getErasureCodingPolicy(new Path(file));
    // 2. RECEIVED_BLOCK IBR
    long blockSize = isStripedBlock ? numStripes * ecPolicy.getCellSize() : len;
    for (int i = 0; i < groupSize; i++) {
        DataNode dn = dataNodes.get(i);
        final Block block = new Block(lastBlock.getBlockId() + i, blockSize, lastBlock.getGenerationStamp());
        DatanodeStorage storage = new DatanodeStorage(UUID.randomUUID().toString());
        StorageReceivedDeletedBlocks[] reports = DFSTestUtil.makeReportForReceivedBlock(block, ReceivedDeletedBlockInfo.BlockStatus.RECEIVED_BLOCK, storage);
        for (StorageReceivedDeletedBlocks report : reports) {
            ns.processIncrementalBlockReport(dn.getDatanodeId(), report);
        }
    }
    long bytes = isStripedBlock ? numStripes * ecPolicy.getCellSize() * ecPolicy.getNumDataUnits() : len;
    lastBlock.setNumBytes(bytes);
    return lastBlock;
}
Also used : Path(org.apache.hadoop.fs.Path) BlockInfo(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo) ReceivedDeletedBlockInfo(org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) DatanodeStorage(org.apache.hadoop.hdfs.server.protocol.DatanodeStorage) ErasureCodingPolicy(org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy) Block(org.apache.hadoop.hdfs.protocol.Block) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) StorageReceivedDeletedBlocks(org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks)

Example 10 with ErasureCodingPolicy

use of org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy in project hadoop by apache.

the class DFSClient method getFileChecksum.

/**
   * Get the checksum of the whole file or a range of the file. Note that the
   * range always starts from the beginning of the file. The file can be
   * in replicated form, or striped mode. It can be used to checksum and compare
   * two replicated files, or two striped files, but not applicable for two
   * files of different block layout forms.
   * @param src The file path
   * @param length the length of the range, i.e., the range is [0, length]
   * @return The checksum
   * @see DistributedFileSystem#getFileChecksum(Path)
   */
public MD5MD5CRC32FileChecksum getFileChecksum(String src, long length) throws IOException {
    checkOpen();
    Preconditions.checkArgument(length >= 0);
    LocatedBlocks blockLocations = null;
    FileChecksumHelper.FileChecksumComputer maker = null;
    ErasureCodingPolicy ecPolicy = null;
    if (length > 0) {
        blockLocations = getBlockLocations(src, length);
        ecPolicy = blockLocations.getErasureCodingPolicy();
    }
    maker = ecPolicy != null ? new FileChecksumHelper.StripedFileNonStripedChecksumComputer(src, length, blockLocations, namenode, this, ecPolicy) : new FileChecksumHelper.ReplicatedFileChecksumComputer(src, length, blockLocations, namenode, this);
    maker.compute();
    return maker.getFileChecksum();
}
Also used : LocatedBlocks(org.apache.hadoop.hdfs.protocol.LocatedBlocks) ErasureCodingPolicy(org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy)

Aggregations

ErasureCodingPolicy (org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy)46 Path (org.apache.hadoop.fs.Path)18 Test (org.junit.Test)16 IOException (java.io.IOException)9 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)5 LocatedBlocks (org.apache.hadoop.hdfs.protocol.LocatedBlocks)5 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)4 HdfsFileStatus (org.apache.hadoop.hdfs.protocol.HdfsFileStatus)4 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)4 ServiceException (com.google.protobuf.ServiceException)3 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)3 BlockType (org.apache.hadoop.hdfs.protocol.BlockType)3 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)3 BlockInfo (org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo)3 BlockInfoStriped (org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped)3 ActionException (org.smartdata.action.ActionException)3 ByteString (com.google.protobuf.ByteString)2 HashSet (java.util.HashSet)2 List (java.util.List)2 Random (java.util.Random)2