Search in sources :

Example 31 with DistributedFileSystem

use of org.apache.hadoop.hdfs.DistributedFileSystem in project hadoop by apache.

the class TestFsck method testStoragePoliciesCK.

/**
   * Test storage policy display.
   */
@Test
public void testStoragePoliciesCK() throws Exception {
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).storageTypes(new StorageType[] { StorageType.DISK, StorageType.ARCHIVE }).build();
    cluster.waitActive();
    final DistributedFileSystem dfs = cluster.getFileSystem();
    writeFile(dfs, "/testhot", "file", "HOT");
    writeFile(dfs, "/testwarm", "file", "WARM");
    writeFile(dfs, "/testcold", "file", "COLD");
    String outStr = runFsck(conf, 0, true, "/", "-storagepolicies");
    assertTrue(outStr.contains("DISK:3(HOT)"));
    assertTrue(outStr.contains("DISK:1,ARCHIVE:2(WARM)"));
    assertTrue(outStr.contains("ARCHIVE:3(COLD)"));
    assertTrue(outStr.contains("All blocks satisfy specified storage policy."));
    dfs.setStoragePolicy(new Path("/testhot"), "COLD");
    dfs.setStoragePolicy(new Path("/testwarm"), "COLD");
    outStr = runFsck(conf, 0, true, "/", "-storagepolicies");
    assertTrue(outStr.contains("DISK:3(HOT)"));
    assertTrue(outStr.contains("DISK:1,ARCHIVE:2(WARM)"));
    assertTrue(outStr.contains("ARCHIVE:3(COLD)"));
    assertFalse(outStr.contains("All blocks satisfy specified storage policy."));
}
Also used : Path(org.apache.hadoop.fs.Path) Matchers.anyString(org.mockito.Matchers.anyString) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) Test(org.junit.Test)

Example 32 with DistributedFileSystem

use of org.apache.hadoop.hdfs.DistributedFileSystem in project hadoop by apache.

the class TestFsck method testUpgradeDomain.

private void testUpgradeDomain(boolean defineUpgradeDomain, boolean displayUpgradeDomain) throws Exception {
    final short replFactor = 1;
    final short numDN = 1;
    final long blockSize = 512;
    final long fileSize = 1024;
    final String upgradeDomain = "ud1";
    final String[] racks = { "/rack1" };
    final String[] hosts = { "127.0.0.1" };
    HostsFileWriter hostsFileWriter = new HostsFileWriter();
    conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
    conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, replFactor);
    if (defineUpgradeDomain) {
        conf.setClass(DFSConfigKeys.DFS_NAMENODE_HOSTS_PROVIDER_CLASSNAME_KEY, CombinedHostFileManager.class, HostConfigManager.class);
        hostsFileWriter.initialize(conf, "temp/fsckupgradedomain");
    }
    DistributedFileSystem dfs;
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDN).hosts(hosts).racks(racks).build();
    cluster.waitClusterUp();
    dfs = cluster.getFileSystem();
    // Configure the upgrade domain on the datanode
    if (defineUpgradeDomain) {
        DatanodeAdminProperties dnProp = new DatanodeAdminProperties();
        DatanodeID datanodeID = cluster.getDataNodes().get(0).getDatanodeId();
        dnProp.setHostName(datanodeID.getHostName());
        dnProp.setPort(datanodeID.getXferPort());
        dnProp.setUpgradeDomain(upgradeDomain);
        hostsFileWriter.initIncludeHosts(new DatanodeAdminProperties[] { dnProp });
        cluster.getFileSystem().refreshNodes();
    }
    // create files
    final String testFile = new String("/testfile");
    final Path path = new Path(testFile);
    DFSTestUtil.createFile(dfs, path, fileSize, replFactor, 1000L);
    DFSTestUtil.waitReplication(dfs, path, replFactor);
    try {
        String fsckOut = runFsck(conf, 0, true, testFile, "-files", "-blocks", displayUpgradeDomain ? "-upgradedomains" : "-locations");
        assertTrue(fsckOut.contains(NamenodeFsck.HEALTHY_STATUS));
        String udValue = defineUpgradeDomain ? upgradeDomain : NamenodeFsck.UNDEFINED;
        assertEquals(displayUpgradeDomain, fsckOut.contains("(ud=" + udValue + ")"));
    } finally {
        if (defineUpgradeDomain) {
            hostsFileWriter.cleanup();
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) DatanodeID(org.apache.hadoop.hdfs.protocol.DatanodeID) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) HostsFileWriter(org.apache.hadoop.hdfs.util.HostsFileWriter) Matchers.anyString(org.mockito.Matchers.anyString) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) DatanodeAdminProperties(org.apache.hadoop.hdfs.protocol.DatanodeAdminProperties)

Example 33 with DistributedFileSystem

use of org.apache.hadoop.hdfs.DistributedFileSystem in project hadoop by apache.

the class TestFsck method testFsckWithMaintenanceReplicas.

/**
   * Test for blocks on maintenance hosts are not shown as missing.
   */
@Test(timeout = 90000)
public void testFsckWithMaintenanceReplicas() throws Exception {
    final short replFactor = 2;
    short numDn = 2;
    final long blockSize = 512;
    String[] hosts = { "host1", "host2" };
    String[] racks = { "/rack1", "/rack2" };
    conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
    conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, replFactor);
    conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_MIN_KEY, replFactor);
    conf.setInt(DFSConfigKeys.DFS_NAMENODE_MAINTENANCE_REPLICATION_MIN_KEY, replFactor);
    DistributedFileSystem dfs;
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDn).hosts(hosts).racks(racks).build();
    assertNotNull("Failed Cluster Creation", cluster);
    cluster.waitClusterUp();
    dfs = cluster.getFileSystem();
    assertNotNull("Failed to get FileSystem", dfs);
    DFSTestUtil util = new DFSTestUtil.Builder().setName(getClass().getSimpleName()).setNumFiles(1).build();
    //create files
    final String testFile = new String("/testfile");
    final Path path = new Path(testFile);
    util.createFile(dfs, path, 1024, replFactor, 1000L);
    util.waitReplication(dfs, path, replFactor);
    StringBuilder sb = new StringBuilder();
    for (LocatedBlock lb : util.getAllBlocks(dfs, path)) {
        sb.append(lb.getBlock().getLocalBlock().getBlockName() + " ");
    }
    String[] bIds = sb.toString().split(" ");
    //make sure datanode that has replica is fine before maintenance
    String outStr = runFsck(conf, 0, true, testFile);
    System.out.println(outStr);
    assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
    FSNamesystem fsn = cluster.getNameNode().getNamesystem();
    BlockManager bm = fsn.getBlockManager();
    DatanodeManager dnm = bm.getDatanodeManager();
    DatanodeDescriptor dn = dnm.getDatanode(cluster.getDataNodes().get(0).getDatanodeId());
    bm.getDatanodeManager().getDecomManager().startMaintenance(dn, Long.MAX_VALUE);
    final String dnName = dn.getXferAddr();
    //wait for the node to enter maintenance state
    GenericTestUtils.waitFor(new Supplier<Boolean>() {

        @Override
        public Boolean get() {
            DatanodeInfo datanodeInfo = null;
            try {
                for (DatanodeInfo info : dfs.getDataNodeStats()) {
                    if (dnName.equals(info.getXferAddr())) {
                        datanodeInfo = info;
                    }
                }
                if (datanodeInfo != null && datanodeInfo.isEnteringMaintenance()) {
                    // verify fsck returns Healthy status
                    String fsckOut = runFsck(conf, 0, true, testFile, "-maintenance");
                    assertTrue(fsckOut.contains(NamenodeFsck.HEALTHY_STATUS));
                    return true;
                }
            } catch (Exception e) {
                LOG.warn("Unexpected exception: " + e);
                return false;
            }
            return false;
        }
    }, 500, 30000);
    // Start 3rd DataNode and wait for node to reach in maintenance state
    cluster.startDataNodes(conf, 1, true, null, new String[] { "/rack3" }, new String[] { "host3" }, null, false);
    GenericTestUtils.waitFor(new Supplier<Boolean>() {

        @Override
        public Boolean get() {
            DatanodeInfo datanodeInfo = null;
            try {
                for (DatanodeInfo info : dfs.getDataNodeStats()) {
                    if (dnName.equals(info.getXferAddr())) {
                        datanodeInfo = info;
                    }
                }
                if (datanodeInfo != null && datanodeInfo.isInMaintenance()) {
                    return true;
                }
            } catch (Exception e) {
                LOG.warn("Unexpected exception: " + e);
                return false;
            }
            return false;
        }
    }, 500, 30000);
    // verify fsck returns Healthy status
    String fsckOut = runFsck(conf, 0, true, testFile, "-maintenance");
    assertTrue(fsckOut.contains(NamenodeFsck.HEALTHY_STATUS));
    // verify fsck returns Healthy status even without maintenance option
    fsckOut = runFsck(conf, 0, true, testFile);
    assertTrue(fsckOut.contains(NamenodeFsck.HEALTHY_STATUS));
}
Also used : Path(org.apache.hadoop.fs.Path) DFSTestUtil(org.apache.hadoop.hdfs.DFSTestUtil) DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) Matchers.anyString(org.mockito.Matchers.anyString) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) IOException(java.io.IOException) ChecksumException(org.apache.hadoop.fs.ChecksumException) TimeoutException(java.util.concurrent.TimeoutException) UnresolvedLinkException(org.apache.hadoop.fs.UnresolvedLinkException) FileNotFoundException(java.io.FileNotFoundException) AccessControlException(org.apache.hadoop.security.AccessControlException) DatanodeDescriptor(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor) DatanodeManager(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager) BlockManager(org.apache.hadoop.hdfs.server.blockmanagement.BlockManager) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) Test(org.junit.Test)

Example 34 with DistributedFileSystem

use of org.apache.hadoop.hdfs.DistributedFileSystem in project hadoop by apache.

the class TestINodeFile method testInodeIdBasedPaths.

/**
   * Tests for addressing files using /.reserved/.inodes/<inodeID> in file system
   * operations.
   */
@Test
public void testInodeIdBasedPaths() throws Exception {
    Configuration conf = new Configuration();
    conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_DEFAULT);
    conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
    MiniDFSCluster cluster = null;
    try {
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
        cluster.waitActive();
        DistributedFileSystem fs = cluster.getFileSystem();
        NamenodeProtocols nnRpc = cluster.getNameNodeRpc();
        // FileSystem#mkdirs "/testInodeIdBasedPaths"
        Path baseDir = getInodePath(INodeId.ROOT_INODE_ID, "testInodeIdBasedPaths");
        Path baseDirRegPath = new Path("/testInodeIdBasedPaths");
        fs.mkdirs(baseDir);
        fs.exists(baseDir);
        long baseDirFileId = nnRpc.getFileInfo(baseDir.toString()).getFileId();
        // FileSystem#create file and FileSystem#close
        Path testFileInodePath = getInodePath(baseDirFileId, "test1");
        Path testFileRegularPath = new Path(baseDir, "test1");
        final int testFileBlockSize = 1024;
        FileSystemTestHelper.createFile(fs, testFileInodePath, 1, testFileBlockSize);
        assertTrue(fs.exists(testFileInodePath));
        // FileSystem#setPermission
        FsPermission perm = new FsPermission((short) 0666);
        fs.setPermission(testFileInodePath, perm);
        // FileSystem#getFileStatus and FileSystem#getPermission
        FileStatus fileStatus = fs.getFileStatus(testFileInodePath);
        assertEquals(perm, fileStatus.getPermission());
        // FileSystem#setOwner
        fs.setOwner(testFileInodePath, fileStatus.getOwner(), fileStatus.getGroup());
        // FileSystem#setTimes
        fs.setTimes(testFileInodePath, 0, 0);
        fileStatus = fs.getFileStatus(testFileInodePath);
        assertEquals(0, fileStatus.getModificationTime());
        assertEquals(0, fileStatus.getAccessTime());
        // FileSystem#setReplication
        fs.setReplication(testFileInodePath, (short) 3);
        fileStatus = fs.getFileStatus(testFileInodePath);
        assertEquals(3, fileStatus.getReplication());
        fs.setReplication(testFileInodePath, (short) 1);
        // ClientProtocol#getPreferredBlockSize
        assertEquals(testFileBlockSize, nnRpc.getPreferredBlockSize(testFileInodePath.toString()));
        /*
       * HDFS-6749 added missing calls to FSDirectory.resolvePath in the
       * following four methods. The calls below ensure that
       * /.reserved/.inodes paths work properly. No need to check return
       * values as these methods are tested elsewhere.
       */
        {
            fs.isFileClosed(testFileInodePath);
            fs.getAclStatus(testFileInodePath);
            fs.getXAttrs(testFileInodePath);
            fs.listXAttrs(testFileInodePath);
            fs.access(testFileInodePath, FsAction.READ_WRITE);
        }
        // symbolic link related tests
        // Reserved path is not allowed as a target
        String invalidTarget = new Path(baseDir, "invalidTarget").toString();
        String link = new Path(baseDir, "link").toString();
        testInvalidSymlinkTarget(nnRpc, invalidTarget, link);
        // Test creating a link using reserved inode path
        String validTarget = "/validtarget";
        testValidSymlinkTarget(nnRpc, validTarget, link);
        // FileSystem#append
        fs.append(testFileInodePath);
        // DistributedFileSystem#recoverLease
        fs.recoverLease(testFileInodePath);
        // Namenode#getBlockLocations
        LocatedBlocks l1 = nnRpc.getBlockLocations(testFileInodePath.toString(), 0, Long.MAX_VALUE);
        LocatedBlocks l2 = nnRpc.getBlockLocations(testFileRegularPath.toString(), 0, Long.MAX_VALUE);
        checkEquals(l1, l2);
        // FileSystem#rename - both the variants
        Path renameDst = getInodePath(baseDirFileId, "test2");
        fileStatus = fs.getFileStatus(testFileInodePath);
        // Rename variant 1: rename and rename bacck
        fs.rename(testFileInodePath, renameDst);
        fs.rename(renameDst, testFileInodePath);
        assertEquals(fileStatus, fs.getFileStatus(testFileInodePath));
        // Rename variant 2: rename and rename bacck
        fs.rename(testFileInodePath, renameDst, Rename.OVERWRITE);
        fs.rename(renameDst, testFileInodePath, Rename.OVERWRITE);
        assertEquals(fileStatus, fs.getFileStatus(testFileInodePath));
        // FileSystem#getContentSummary
        assertEquals(fs.getContentSummary(testFileRegularPath).toString(), fs.getContentSummary(testFileInodePath).toString());
        // FileSystem#listFiles
        checkEquals(fs.listFiles(baseDirRegPath, false), fs.listFiles(baseDir, false));
        // FileSystem#delete
        fs.delete(testFileInodePath, true);
        assertFalse(fs.exists(testFileInodePath));
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) NamenodeProtocols(org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) FileStatus(org.apache.hadoop.fs.FileStatus) LocatedFileStatus(org.apache.hadoop.fs.LocatedFileStatus) HdfsFileStatus(org.apache.hadoop.hdfs.protocol.HdfsFileStatus) Configuration(org.apache.hadoop.conf.Configuration) LocatedBlocks(org.apache.hadoop.hdfs.protocol.LocatedBlocks) FsPermission(org.apache.hadoop.fs.permission.FsPermission) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) Test(org.junit.Test)

Example 35 with DistributedFileSystem

use of org.apache.hadoop.hdfs.DistributedFileSystem in project hadoop by apache.

the class TestLeaseManager method testLeaseRestorationOnRestart.

/**
   * Make sure the lease is restored even if only the inode has the record.
   */
@Test
public void testLeaseRestorationOnRestart() throws Exception {
    MiniDFSCluster cluster = null;
    try {
        cluster = new MiniDFSCluster.Builder(new HdfsConfiguration()).numDataNodes(1).build();
        DistributedFileSystem dfs = cluster.getFileSystem();
        // Create an empty file
        String path = "/testLeaseRestorationOnRestart";
        FSDataOutputStream out = dfs.create(new Path(path));
        // Remove the lease from the lease manager, but leave it in the inode.
        FSDirectory dir = cluster.getNamesystem().getFSDirectory();
        INodeFile file = dir.getINode(path).asFile();
        cluster.getNamesystem().leaseManager.removeLease(file.getFileUnderConstructionFeature().getClientName(), file);
        // Save a fsimage.
        dfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
        cluster.getNameNodeRpc().saveNamespace(0, 0);
        dfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
        // Restart the namenode.
        cluster.restartNameNode(true);
        // Check whether the lease manager has the lease
        dir = cluster.getNamesystem().getFSDirectory();
        file = dir.getINode(path).asFile();
        assertTrue("Lease should exist.", cluster.getNamesystem().leaseManager.getLease(file) != null);
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) Test(org.junit.Test)

Aggregations

DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)252 Test (org.junit.Test)175 Path (org.apache.hadoop.fs.Path)169 Configuration (org.apache.hadoop.conf.Configuration)126 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)126 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)86 IOException (java.io.IOException)63 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)36 FileSystem (org.apache.hadoop.fs.FileSystem)31 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)31 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)26 URI (java.net.URI)24 FsPermission (org.apache.hadoop.fs.permission.FsPermission)22 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)19 AccessControlException (org.apache.hadoop.security.AccessControlException)19 DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)18 Matchers.anyString (org.mockito.Matchers.anyString)18 FileStatus (org.apache.hadoop.fs.FileStatus)16 ArrayList (java.util.ArrayList)14 CachePoolInfo (org.apache.hadoop.hdfs.protocol.CachePoolInfo)14