Search in sources :

Example 16 with ErasureCodingPolicy

use of org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy in project hadoop by apache.

the class StripedFileTestUtil method checkData.

static void checkData(DistributedFileSystem dfs, Path srcPath, int length, List<DatanodeInfo> killedList, List<Long> oldGSList, int blkGroupSize) throws IOException {
    StripedFileTestUtil.verifyLength(dfs, srcPath, length);
    List<List<LocatedBlock>> blockGroupList = new ArrayList<>();
    LocatedBlocks lbs = dfs.getClient().getLocatedBlocks(srcPath.toString(), 0L, Long.MAX_VALUE);
    int expectedNumGroup = 0;
    if (length > 0) {
        expectedNumGroup = (length - 1) / blkGroupSize + 1;
    }
    assertEquals(expectedNumGroup, lbs.getLocatedBlocks().size());
    final ErasureCodingPolicy ecPolicy = dfs.getErasureCodingPolicy(srcPath);
    final int cellSize = ecPolicy.getCellSize();
    final int dataBlkNum = ecPolicy.getNumDataUnits();
    final int parityBlkNum = ecPolicy.getNumParityUnits();
    int index = 0;
    for (LocatedBlock firstBlock : lbs.getLocatedBlocks()) {
        Assert.assertTrue(firstBlock instanceof LocatedStripedBlock);
        final long gs = firstBlock.getBlock().getGenerationStamp();
        final long oldGS = oldGSList != null ? oldGSList.get(index++) : -1L;
        final String s = "gs=" + gs + ", oldGS=" + oldGS;
        LOG.info(s);
        Assert.assertTrue(s, gs >= oldGS);
        LocatedBlock[] blocks = StripedBlockUtil.parseStripedBlockGroup((LocatedStripedBlock) firstBlock, cellSize, dataBlkNum, parityBlkNum);
        blockGroupList.add(Arrays.asList(blocks));
    }
    // test each block group
    for (int group = 0; group < blockGroupList.size(); group++) {
        final boolean isLastGroup = group == blockGroupList.size() - 1;
        final int groupSize = !isLastGroup ? blkGroupSize : length - (blockGroupList.size() - 1) * blkGroupSize;
        final int numCellInGroup = (groupSize - 1) / cellSize + 1;
        final int lastCellIndex = (numCellInGroup - 1) % dataBlkNum;
        final int lastCellSize = groupSize - (numCellInGroup - 1) * cellSize;
        //get the data of this block
        List<LocatedBlock> blockList = blockGroupList.get(group);
        byte[][] dataBlockBytes = new byte[dataBlkNum][];
        byte[][] parityBlockBytes = new byte[parityBlkNum][];
        Set<Integer> checkSet = new HashSet<>();
        // for each block, use BlockReader to read data
        for (int i = 0; i < blockList.size(); i++) {
            final int j = i >= dataBlkNum ? 0 : i;
            final int numCellInBlock = (numCellInGroup - 1) / dataBlkNum + (j <= lastCellIndex ? 1 : 0);
            final int blockSize = numCellInBlock * cellSize + (isLastGroup && j == lastCellIndex ? lastCellSize - cellSize : 0);
            final byte[] blockBytes = new byte[blockSize];
            if (i < dataBlkNum) {
                dataBlockBytes[i] = blockBytes;
            } else {
                parityBlockBytes[i - dataBlkNum] = blockBytes;
            }
            final LocatedBlock lb = blockList.get(i);
            LOG.info("i,j=" + i + ", " + j + ", numCellInBlock=" + numCellInBlock + ", blockSize=" + blockSize + ", lb=" + lb);
            if (lb == null) {
                continue;
            }
            final ExtendedBlock block = lb.getBlock();
            assertEquals(blockSize, block.getNumBytes());
            if (block.getNumBytes() == 0) {
                continue;
            }
            DatanodeInfo dn = blockList.get(i).getLocations()[0];
            if (!killedList.contains(dn)) {
                final BlockReader blockReader = BlockReaderTestUtil.getBlockReader(dfs, lb, 0, block.getNumBytes());
                blockReader.readAll(blockBytes, 0, (int) block.getNumBytes());
                blockReader.close();
                checkSet.add(i);
            }
        }
        LOG.info("Internal blocks to check: " + checkSet);
        // check data
        final int groupPosInFile = group * blkGroupSize;
        for (int i = 0; i < dataBlockBytes.length; i++) {
            boolean killed = false;
            if (!checkSet.contains(i)) {
                killed = true;
            }
            final byte[] actual = dataBlockBytes[i];
            for (int posInBlk = 0; posInBlk < actual.length; posInBlk++) {
                final long posInFile = StripedBlockUtil.offsetInBlkToOffsetInBG(cellSize, dataBlkNum, posInBlk, i) + groupPosInFile;
                Assert.assertTrue(posInFile < length);
                final byte expected = getByte(posInFile);
                if (killed) {
                    actual[posInBlk] = expected;
                } else {
                    if (expected != actual[posInBlk]) {
                        String s = "expected=" + expected + " but actual=" + actual[posInBlk] + ", posInFile=" + posInFile + ", posInBlk=" + posInBlk + ". group=" + group + ", i=" + i;
                        Assert.fail(s);
                    }
                }
            }
        }
        // check parity
        verifyParityBlocks(dfs.getConf(), lbs.getLocatedBlocks().get(group).getBlockSize(), cellSize, dataBlockBytes, parityBlockBytes, checkSet, ecPolicy.getCodecName());
    }
}
Also used : DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) LocatedBlocks(org.apache.hadoop.hdfs.protocol.LocatedBlocks) ErasureCodingPolicy(org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy) ArrayList(java.util.ArrayList) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) LocatedStripedBlock(org.apache.hadoop.hdfs.protocol.LocatedStripedBlock) ArrayList(java.util.ArrayList) List(java.util.List) HashSet(java.util.HashSet)

Example 17 with ErasureCodingPolicy

use of org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy in project hadoop by apache.

the class TestUnsetAndChangeDirectoryEcPolicy method testUnsetEcPolicy.

/*
   * Test unset EC policy on directory.
   */
@Test
public void testUnsetEcPolicy() throws Exception {
    final int numBlocks = 1;
    final int fileLen = blockGroupSize * numBlocks;
    final Path dirPath = new Path("/striped");
    final Path ecFilePath = new Path(dirPath, "ec_file");
    final Path replicateFilePath = new Path(dirPath, "3x_file");
    fs.mkdirs(dirPath);
    // Test unset a directory which has no EC policy
    fs.unsetErasureCodingPolicy(dirPath);
    // Set EC policy on directory
    fs.setErasureCodingPolicy(dirPath, ecPolicy.getName());
    DFSTestUtil.createFile(fs, ecFilePath, fileLen, (short) 1, 0L);
    fs.unsetErasureCodingPolicy(dirPath);
    DFSTestUtil.createFile(fs, replicateFilePath, fileLen, (short) 1, 0L);
    // ec_file should has EC policy
    ErasureCodingPolicy tempEcPolicy = fs.getErasureCodingPolicy(ecFilePath);
    Assert.assertTrue("Erasure coding policy mismatch!", tempEcPolicy.getName().equals(ecPolicy.getName()));
    // rep_file should not have EC policy
    tempEcPolicy = fs.getErasureCodingPolicy(replicateFilePath);
    Assert.assertNull("Replicate file should not have erasure coding policy!", tempEcPolicy);
    // Directory should not return erasure coding policy
    tempEcPolicy = fs.getErasureCodingPolicy(dirPath);
    Assert.assertNull("Directory should no have erasure coding policy set!", tempEcPolicy);
    fs.delete(dirPath, true);
}
Also used : Path(org.apache.hadoop.fs.Path) ErasureCodingPolicy(org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy) Test(org.junit.Test)

Example 18 with ErasureCodingPolicy

use of org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy in project hadoop by apache.

the class TestUnsetAndChangeDirectoryEcPolicy method testChangeRootDirEcPolicy.

/*
  * Test change EC policy on root directory.
  */
@Test
public void testChangeRootDirEcPolicy() throws Exception {
    final int numBlocks = 1;
    final int fileLen = blockGroupSize * numBlocks;
    final Path rootPath = new Path("/");
    final Path ec63FilePath = new Path(rootPath, "ec_6_3_file");
    final Path ec32FilePath = new Path(rootPath, "ec_3_2_file");
    final ErasureCodingPolicy ec32Policy = ErasureCodingPolicyManager.getPolicyByID(HdfsConstants.RS_3_2_POLICY_ID);
    fs.unsetErasureCodingPolicy(rootPath);
    fs.setErasureCodingPolicy(rootPath, ecPolicy.getName());
    // Create RS(6,3) EC policy file
    DFSTestUtil.createFile(fs, ec63FilePath, fileLen, (short) 1, 0L);
    // Change EC policy from RS(6,3) to RS(3,2)
    fs.setErasureCodingPolicy(rootPath, ec32Policy.getName());
    DFSTestUtil.createFile(fs, ec32FilePath, fileLen, (short) 1, 0L);
    // start to check
    // ec_6_3_file should has RS-6-3 ec policy set
    ErasureCodingPolicy tempEcPolicy = fs.getErasureCodingPolicy(ec63FilePath);
    Assert.assertTrue("Erasure coding policy mismatch!", tempEcPolicy.getName().equals(ecPolicy.getName()));
    // ec_3_2_file should have RS-3-2 policy
    tempEcPolicy = fs.getErasureCodingPolicy(ec32FilePath);
    Assert.assertTrue("Erasure coding policy mismatch!", tempEcPolicy.getName().equals(ec32Policy.getName()));
    // Root directory should have RS-3-2 policy
    tempEcPolicy = fs.getErasureCodingPolicy(rootPath);
    Assert.assertTrue("Directory should have erasure coding policy!", tempEcPolicy.getName().equals(ec32Policy.getName()));
    fs.delete(rootPath, true);
}
Also used : Path(org.apache.hadoop.fs.Path) ErasureCodingPolicy(org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy) Test(org.junit.Test)

Example 19 with ErasureCodingPolicy

use of org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy in project hadoop by apache.

the class TestUnsetAndChangeDirectoryEcPolicy method testDifferentReplicaFactor.

/*
   * Test different replica factor files.
   */
@Test
public void testDifferentReplicaFactor() throws Exception {
    final int numBlocks = 1;
    final int fileLen = blockGroupSize * numBlocks;
    final Path ecDirPath = new Path("/striped");
    final Path ecFilePath = new Path(ecDirPath, "ec_file");
    final Path replicateFilePath = new Path(ecDirPath, "rep_file");
    final Path replicateFilePath2 = new Path(ecDirPath, "rep_file2");
    fs.mkdirs(ecDirPath);
    fs.setErasureCodingPolicy(ecDirPath, ecPolicy.getName());
    DFSTestUtil.createFile(fs, ecFilePath, fileLen, (short) 1, 0L);
    fs.unsetErasureCodingPolicy(ecDirPath);
    DFSTestUtil.createFile(fs, replicateFilePath, fileLen, (short) 3, 0L);
    DFSTestUtil.createFile(fs, replicateFilePath2, fileLen, (short) 2, 0L);
    // ec_file should has EC policy set
    ErasureCodingPolicy tempEcPolicy = fs.getErasureCodingPolicy(ecFilePath);
    Assert.assertTrue("Erasure coding policy mismatch!", tempEcPolicy.getName().equals(ecPolicy.getName()));
    // rep_file should not have EC policy set
    tempEcPolicy = fs.getErasureCodingPolicy(replicateFilePath);
    Assert.assertNull("Replicate file should not have erasure coding policy!", tempEcPolicy);
    tempEcPolicy = fs.getErasureCodingPolicy(replicateFilePath2);
    Assert.assertNull("Replicate file should not have erasure coding policy!", tempEcPolicy);
    // Directory should not return erasure coding policy
    tempEcPolicy = fs.getErasureCodingPolicy(ecDirPath);
    Assert.assertNull("Directory should not have erasure coding policy set!", tempEcPolicy);
    fs.delete(ecDirPath, true);
}
Also used : Path(org.apache.hadoop.fs.Path) ErasureCodingPolicy(org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy) Test(org.junit.Test)

Example 20 with ErasureCodingPolicy

use of org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy in project hadoop by apache.

the class TestOfflineImageViewer method createOriginalFSImage.

// Create a populated namespace for later testing. Save its contents to a
// data structure and store its fsimage location.
// We only want to generate the fsimage file once and use it for
// multiple tests.
@BeforeClass
public static void createOriginalFSImage() throws IOException {
    tempDir = Files.createTempDir();
    MiniDFSCluster cluster = null;
    try {
        final ErasureCodingPolicy ecPolicy = ErasureCodingPolicyManager.getPolicyByID(HdfsConstants.XOR_2_1_POLICY_ID);
        Configuration conf = new Configuration();
        conf.setLong(DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_MAX_LIFETIME_KEY, 10000);
        conf.setLong(DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_RENEW_INTERVAL_KEY, 5000);
        conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true);
        conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
        conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTH_TO_LOCAL, "RULE:[2:$1@$0](JobTracker@.*FOO.COM)s/@.*//" + "DEFAULT");
        conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY, ecPolicy.getName());
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
        cluster.waitActive();
        DistributedFileSystem hdfs = cluster.getFileSystem();
        // Create a reasonable namespace
        for (int i = 0; i < NUM_DIRS; i++, dirCount++) {
            Path dir = new Path("/dir" + i);
            hdfs.mkdirs(dir);
            writtenFiles.put(dir.toString(), pathToFileEntry(hdfs, dir.toString()));
            for (int j = 0; j < FILES_PER_DIR; j++) {
                Path file = new Path(dir, "file" + j);
                FSDataOutputStream o = hdfs.create(file);
                o.write(23);
                o.close();
                writtenFiles.put(file.toString(), pathToFileEntry(hdfs, file.toString()));
            }
        }
        // Create an empty directory
        Path emptydir = new Path("/emptydir");
        hdfs.mkdirs(emptydir);
        dirCount++;
        writtenFiles.put(emptydir.toString(), hdfs.getFileStatus(emptydir));
        //Create a directory whose name should be escaped in XML
        Path invalidXMLDir = new Path("/dirContainingInvalidXMLCharhere");
        hdfs.mkdirs(invalidXMLDir);
        dirCount++;
        //Create a directory with sticky bits
        Path stickyBitDir = new Path("/stickyBit");
        hdfs.mkdirs(stickyBitDir);
        hdfs.setPermission(stickyBitDir, new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL, true));
        dirCount++;
        writtenFiles.put(stickyBitDir.toString(), hdfs.getFileStatus(stickyBitDir));
        // Get delegation tokens so we log the delegation token op
        Token<?>[] delegationTokens = hdfs.addDelegationTokens(TEST_RENEWER, null);
        for (Token<?> t : delegationTokens) {
            LOG.debug("got token " + t);
        }
        // Create INodeReference
        final Path src = new Path("/src");
        hdfs.mkdirs(src);
        dirCount++;
        writtenFiles.put(src.toString(), hdfs.getFileStatus(src));
        // Create snapshot and snapshotDiff.
        final Path orig = new Path("/src/orig");
        hdfs.mkdirs(orig);
        final Path file1 = new Path("/src/file");
        FSDataOutputStream o = hdfs.create(file1);
        o.write(23);
        o.write(45);
        o.close();
        hdfs.allowSnapshot(src);
        hdfs.createSnapshot(src, "snapshot");
        final Path dst = new Path("/dst");
        // Rename a directory in the snapshot directory to add snapshotCopy
        // field to the dirDiff entry.
        hdfs.rename(orig, dst);
        dirCount++;
        writtenFiles.put(dst.toString(), hdfs.getFileStatus(dst));
        // Truncate a file in the snapshot directory to add snapshotCopy and
        // blocks fields to the fileDiff entry.
        hdfs.truncate(file1, 1);
        writtenFiles.put(file1.toString(), hdfs.getFileStatus(file1));
        // Set XAttrs so the fsimage contains XAttr ops
        final Path xattr = new Path("/xattr");
        hdfs.mkdirs(xattr);
        dirCount++;
        hdfs.setXAttr(xattr, "user.a1", new byte[] { 0x31, 0x32, 0x33 });
        hdfs.setXAttr(xattr, "user.a2", new byte[] { 0x37, 0x38, 0x39 });
        // OIV should be able to handle empty value XAttrs
        hdfs.setXAttr(xattr, "user.a3", null);
        // OIV should be able to handle XAttr values that can't be expressed
        // as UTF8
        hdfs.setXAttr(xattr, "user.a4", new byte[] { -0x3d, 0x28 });
        writtenFiles.put(xattr.toString(), hdfs.getFileStatus(xattr));
        // Set ACLs
        hdfs.setAcl(xattr, Lists.newArrayList(aclEntry(ACCESS, USER, ALL), aclEntry(ACCESS, USER, "foo", ALL), aclEntry(ACCESS, GROUP, READ_EXECUTE), aclEntry(ACCESS, GROUP, "bar", READ_EXECUTE), aclEntry(ACCESS, OTHER, EXECUTE)));
        // Create an Erasure Coded dir
        Path ecDir = new Path("/ec");
        hdfs.mkdirs(ecDir);
        dirCount++;
        hdfs.getClient().setErasureCodingPolicy(ecDir.toString(), ecPolicy.getName());
        writtenFiles.put(ecDir.toString(), hdfs.getFileStatus(ecDir));
        // Create an empty Erasure Coded file
        Path emptyECFile = new Path(ecDir, "EmptyECFile.txt");
        hdfs.create(emptyECFile).close();
        writtenFiles.put(emptyECFile.toString(), pathToFileEntry(hdfs, emptyECFile.toString()));
        filesECCount++;
        // Create a small Erasure Coded file
        Path smallECFile = new Path(ecDir, "SmallECFile.txt");
        FSDataOutputStream out = hdfs.create(smallECFile);
        Random r = new Random();
        byte[] bytes = new byte[1024 * 10];
        r.nextBytes(bytes);
        out.write(bytes);
        writtenFiles.put(smallECFile.toString(), pathToFileEntry(hdfs, smallECFile.toString()));
        filesECCount++;
        // Write results to the fsimage file
        hdfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER, false);
        hdfs.saveNamespace();
        hdfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE, false);
        // Determine location of fsimage file
        originalFsimage = FSImageTestUtil.findLatestImageFile(FSImageTestUtil.getFSImage(cluster.getNameNode()).getStorage().getStorageDir(0));
        if (originalFsimage == null) {
            throw new RuntimeException("Didn't generate or can't find fsimage");
        }
        LOG.debug("original FS image file is " + originalFsimage);
    } finally {
        if (cluster != null)
            cluster.shutdown();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) ErasureCodingPolicy(org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy) Token(org.apache.hadoop.security.token.Token) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) Random(java.util.Random) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) FsPermission(org.apache.hadoop.fs.permission.FsPermission) BeforeClass(org.junit.BeforeClass)

Aggregations

ErasureCodingPolicy (org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy)46 Path (org.apache.hadoop.fs.Path)18 Test (org.junit.Test)16 IOException (java.io.IOException)9 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)5 LocatedBlocks (org.apache.hadoop.hdfs.protocol.LocatedBlocks)5 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)4 HdfsFileStatus (org.apache.hadoop.hdfs.protocol.HdfsFileStatus)4 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)4 ServiceException (com.google.protobuf.ServiceException)3 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)3 BlockType (org.apache.hadoop.hdfs.protocol.BlockType)3 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)3 BlockInfo (org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo)3 BlockInfoStriped (org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped)3 ActionException (org.smartdata.action.ActionException)3 ByteString (com.google.protobuf.ByteString)2 HashSet (java.util.HashSet)2 List (java.util.List)2 Random (java.util.Random)2