Search in sources :

Example 81 with DistributedFileSystem

use of org.apache.hadoop.hdfs.DistributedFileSystem in project hadoop by apache.

the class TestFSImage method testHasNonEcBlockUsingStripedIDForLoadFile.

@Test
public void testHasNonEcBlockUsingStripedIDForLoadFile() throws IOException {
    // start a cluster
    Configuration conf = new HdfsConfiguration();
    MiniDFSCluster cluster = null;
    try {
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(9).build();
        cluster.waitActive();
        DistributedFileSystem fs = cluster.getFileSystem();
        FSNamesystem fns = cluster.getNamesystem();
        String testDir = "/test_block_manager";
        String testFile = "testfile_loadfile";
        String testFilePath = testDir + "/" + testFile;
        String clientName = "testUser_loadfile";
        String clientMachine = "testMachine_loadfile";
        long blkId = -1;
        long blkNumBytes = 1024;
        long timestamp = 1426222918;
        fs.mkdir(new Path(testDir), new FsPermission("755"));
        Path p = new Path(testFilePath);
        DFSTestUtil.createFile(fs, p, 0, (short) 1, 1);
        BlockInfoContiguous cBlk = new BlockInfoContiguous(new Block(blkId, blkNumBytes, timestamp), (short) 3);
        INodeFile file = (INodeFile) fns.getFSDirectory().getINode(testFilePath);
        file.toUnderConstruction(clientName, clientMachine);
        file.addBlock(cBlk);
        TestINodeFile.toCompleteFile(file);
        fns.enterSafeMode(false);
        fns.saveNamespace(0, 0);
        cluster.restartNameNodes();
        cluster.waitActive();
        fns = cluster.getNamesystem();
        assertTrue(fns.getBlockManager().hasNonEcBlockUsingStripedID());
        //after nonEcBlockUsingStripedID is deleted
        //the hasNonEcBlockUsingStripedID is set to false
        fs = cluster.getFileSystem();
        fs.delete(p, false);
        fns.enterSafeMode(false);
        fns.saveNamespace(0, 0);
        cluster.restartNameNodes();
        cluster.waitActive();
        fns = cluster.getNamesystem();
        assertFalse(fns.getBlockManager().hasNonEcBlockUsingStripedID());
        cluster.shutdown();
        cluster = null;
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) BlockInfoContiguous(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) Block(org.apache.hadoop.hdfs.protocol.Block) FsPermission(org.apache.hadoop.fs.permission.FsPermission) Test(org.junit.Test)

Example 82 with DistributedFileSystem

use of org.apache.hadoop.hdfs.DistributedFileSystem in project hadoop by apache.

the class TestFSImage method testHasNonEcBlockUsingStripedIDForLoadSnapshot.

@Test
public void testHasNonEcBlockUsingStripedIDForLoadSnapshot() throws IOException {
    // start a cluster
    Configuration conf = new HdfsConfiguration();
    MiniDFSCluster cluster = null;
    try {
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(9).build();
        cluster.waitActive();
        DistributedFileSystem fs = cluster.getFileSystem();
        FSNamesystem fns = cluster.getNamesystem();
        String testDir = "/test_block_manager";
        String testFile = "testfile_loadSnapshot";
        String testFilePath = testDir + "/" + testFile;
        String clientName = "testUser_loadSnapshot";
        String clientMachine = "testMachine_loadSnapshot";
        long blkId = -1;
        long blkNumBytes = 1024;
        long timestamp = 1426222918;
        Path d = new Path(testDir);
        fs.mkdir(d, new FsPermission("755"));
        fs.allowSnapshot(d);
        Path p = new Path(testFilePath);
        DFSTestUtil.createFile(fs, p, 0, (short) 1, 1);
        BlockInfoContiguous cBlk = new BlockInfoContiguous(new Block(blkId, blkNumBytes, timestamp), (short) 3);
        INodeFile file = (INodeFile) fns.getFSDirectory().getINode(testFilePath);
        file.toUnderConstruction(clientName, clientMachine);
        file.addBlock(cBlk);
        TestINodeFile.toCompleteFile(file);
        fs.createSnapshot(d, "testHasNonEcBlockUsingStripeID");
        fs.truncate(p, 0);
        fns.enterSafeMode(false);
        fns.saveNamespace(0, 0);
        cluster.restartNameNodes();
        cluster.waitActive();
        fns = cluster.getNamesystem();
        assertTrue(fns.getBlockManager().hasNonEcBlockUsingStripedID());
        cluster.shutdown();
        cluster = null;
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) BlockInfoContiguous(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) Block(org.apache.hadoop.hdfs.protocol.Block) FsPermission(org.apache.hadoop.fs.permission.FsPermission) Test(org.junit.Test)

Example 83 with DistributedFileSystem

use of org.apache.hadoop.hdfs.DistributedFileSystem in project hadoop by apache.

the class TestFSImageWithAcl method testRootACLAfterLoadingFsImage.

@Test
public void testRootACLAfterLoadingFsImage() throws IOException {
    DistributedFileSystem fs = cluster.getFileSystem();
    Path rootdir = new Path("/");
    AclEntry e1 = new AclEntry.Builder().setName("foo").setPermission(ALL).setScope(ACCESS).setType(GROUP).build();
    AclEntry e2 = new AclEntry.Builder().setName("bar").setPermission(READ).setScope(ACCESS).setType(GROUP).build();
    fs.modifyAclEntries(rootdir, Lists.newArrayList(e1, e2));
    AclStatus s = cluster.getNamesystem().getAclStatus(rootdir.toString());
    AclEntry[] returned = Lists.newArrayList(s.getEntries()).toArray(new AclEntry[0]);
    Assert.assertArrayEquals(new AclEntry[] { aclEntry(ACCESS, GROUP, READ_EXECUTE), aclEntry(ACCESS, GROUP, "bar", READ), aclEntry(ACCESS, GROUP, "foo", ALL) }, returned);
    // restart - hence save and load from fsimage
    restart(fs, true);
    s = cluster.getNamesystem().getAclStatus(rootdir.toString());
    returned = Lists.newArrayList(s.getEntries()).toArray(new AclEntry[0]);
    Assert.assertArrayEquals(new AclEntry[] { aclEntry(ACCESS, GROUP, READ_EXECUTE), aclEntry(ACCESS, GROUP, "bar", READ), aclEntry(ACCESS, GROUP, "foo", ALL) }, returned);
}
Also used : Path(org.apache.hadoop.fs.Path) AclStatus(org.apache.hadoop.fs.permission.AclStatus) AclEntry(org.apache.hadoop.fs.permission.AclEntry) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) Test(org.junit.Test)

Example 84 with DistributedFileSystem

use of org.apache.hadoop.hdfs.DistributedFileSystem in project hadoop by apache.

the class TestFsck method testFsckCorruptECFile.

@Test(timeout = 300000)
public void testFsckCorruptECFile() throws Exception {
    DistributedFileSystem fs = null;
    int dataBlocks = StripedFileTestUtil.getDefaultECPolicy().getNumDataUnits();
    int parityBlocks = StripedFileTestUtil.getDefaultECPolicy().getNumParityUnits();
    int cellSize = StripedFileTestUtil.getDefaultECPolicy().getCellSize();
    int totalSize = dataBlocks + parityBlocks;
    conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY, StripedFileTestUtil.getDefaultECPolicy().getName());
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(totalSize).build();
    fs = cluster.getFileSystem();
    Map<Integer, Integer> dnIndices = new HashMap<>();
    ArrayList<DataNode> dnList = cluster.getDataNodes();
    for (int i = 0; i < totalSize; i++) {
        dnIndices.put(dnList.get(i).getIpcPort(), i);
    }
    // create file
    Path ecDirPath = new Path("/striped");
    fs.mkdir(ecDirPath, FsPermission.getDirDefault());
    fs.getClient().setErasureCodingPolicy(ecDirPath.toString(), StripedFileTestUtil.getDefaultECPolicy().getName());
    Path file = new Path(ecDirPath, "corrupted");
    final int length = cellSize * dataBlocks;
    final byte[] bytes = StripedFileTestUtil.generateBytes(length);
    DFSTestUtil.writeFile(fs, file, bytes);
    LocatedStripedBlock lsb = (LocatedStripedBlock) fs.getClient().getLocatedBlocks(file.toString(), 0, cellSize * dataBlocks).get(0);
    final LocatedBlock[] blks = StripedBlockUtil.parseStripedBlockGroup(lsb, cellSize, dataBlocks, parityBlocks);
    // make an unrecoverable ec file with corrupted blocks
    for (int i = 0; i < parityBlocks + 1; i++) {
        int ipcPort = blks[i].getLocations()[0].getIpcPort();
        int dnIndex = dnIndices.get(ipcPort);
        File storageDir = cluster.getInstanceStorageDir(dnIndex, 0);
        File blkFile = MiniDFSCluster.getBlockFile(storageDir, blks[i].getBlock());
        Assert.assertTrue("Block file does not exist", blkFile.exists());
        FileOutputStream out = new FileOutputStream(blkFile);
        out.write("corruption".getBytes());
    }
    // kept in NameNode
    for (DataNode dn : cluster.getDataNodes()) {
        DataNodeTestUtils.setHeartbeatsDisabledForTests(dn, true);
    }
    // Read the file to trigger reportBadBlocks
    try {
        IOUtils.copyBytes(fs.open(file), new IOUtils.NullOutputStream(), conf, true);
    } catch (IOException ie) {
        assertTrue(ie.getMessage().contains("missingChunksNum=" + (parityBlocks + 1)));
    }
    waitForUnrecoverableBlockGroup(conf);
    String outStr = runFsck(conf, 1, true, "/");
    assertTrue(outStr.contains(NamenodeFsck.CORRUPT_STATUS));
    assertTrue(outStr.contains("Under-erasure-coded block groups:\t0"));
    outStr = runFsck(conf, -1, true, "/", "-list-corruptfileblocks");
    assertTrue(outStr.contains("has 1 CORRUPT files"));
}
Also used : Path(org.apache.hadoop.fs.Path) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) HashMap(java.util.HashMap) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) IOException(java.io.IOException) Matchers.anyString(org.mockito.Matchers.anyString) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) LocatedStripedBlock(org.apache.hadoop.hdfs.protocol.LocatedStripedBlock) IOUtils(org.apache.hadoop.io.IOUtils) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) FileOutputStream(java.io.FileOutputStream) RandomAccessFile(java.io.RandomAccessFile) File(java.io.File) Test(org.junit.Test)

Example 85 with DistributedFileSystem

use of org.apache.hadoop.hdfs.DistributedFileSystem in project hadoop by apache.

the class TestFSImageWithXAttr method testXAttr.

private void testXAttr(boolean persistNamespace) throws IOException {
    Path path = new Path("/p");
    DistributedFileSystem fs = cluster.getFileSystem();
    fs.create(path).close();
    fs.setXAttr(path, name1, value1, EnumSet.of(XAttrSetFlag.CREATE));
    fs.setXAttr(path, name2, value2, EnumSet.of(XAttrSetFlag.CREATE));
    fs.setXAttr(path, name3, null, EnumSet.of(XAttrSetFlag.CREATE));
    restart(fs, persistNamespace);
    Map<String, byte[]> xattrs = fs.getXAttrs(path);
    Assert.assertEquals(xattrs.size(), 3);
    Assert.assertArrayEquals(value1, xattrs.get(name1));
    Assert.assertArrayEquals(value2, xattrs.get(name2));
    Assert.assertArrayEquals(value3, xattrs.get(name3));
    fs.setXAttr(path, name1, newValue1, EnumSet.of(XAttrSetFlag.REPLACE));
    restart(fs, persistNamespace);
    xattrs = fs.getXAttrs(path);
    Assert.assertEquals(xattrs.size(), 3);
    Assert.assertArrayEquals(newValue1, xattrs.get(name1));
    Assert.assertArrayEquals(value2, xattrs.get(name2));
    Assert.assertArrayEquals(value3, xattrs.get(name3));
    fs.removeXAttr(path, name1);
    fs.removeXAttr(path, name2);
    fs.removeXAttr(path, name3);
    restart(fs, persistNamespace);
    xattrs = fs.getXAttrs(path);
    Assert.assertEquals(xattrs.size(), 0);
}
Also used : Path(org.apache.hadoop.fs.Path) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem)

Aggregations

DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)252 Test (org.junit.Test)175 Path (org.apache.hadoop.fs.Path)169 Configuration (org.apache.hadoop.conf.Configuration)126 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)126 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)86 IOException (java.io.IOException)63 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)36 FileSystem (org.apache.hadoop.fs.FileSystem)31 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)31 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)26 URI (java.net.URI)24 FsPermission (org.apache.hadoop.fs.permission.FsPermission)22 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)19 AccessControlException (org.apache.hadoop.security.AccessControlException)19 DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)18 Matchers.anyString (org.mockito.Matchers.anyString)18 FileStatus (org.apache.hadoop.fs.FileStatus)16 ArrayList (java.util.ArrayList)14 CachePoolInfo (org.apache.hadoop.hdfs.protocol.CachePoolInfo)14