Search in sources :

Example 36 with DistributedFileSystem

use of org.apache.hadoop.hdfs.DistributedFileSystem in project hadoop by apache.

the class TestListCorruptFileBlocks method testlistCorruptFileBlocksDFS.

/**
   * test listCorruptFileBlocks in DistributedFileSystem
   */
@Test(timeout = 300000)
public void testlistCorruptFileBlocksDFS() throws Exception {
    Configuration conf = new Configuration();
    conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000);
    // datanode scans
    conf.setInt(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY, 1);
    // directories
    FileSystem fs = null;
    MiniDFSCluster cluster = null;
    try {
        cluster = new MiniDFSCluster.Builder(conf).build();
        cluster.waitActive();
        fs = cluster.getFileSystem();
        DistributedFileSystem dfs = (DistributedFileSystem) fs;
        DFSTestUtil util = new DFSTestUtil.Builder().setName("testGetCorruptFiles").setNumFiles(3).setMaxLevels(1).setMaxSize(1024).build();
        util.createFiles(fs, "/corruptData");
        RemoteIterator<Path> corruptFileBlocks = dfs.listCorruptFileBlocks(new Path("/corruptData"));
        int numCorrupt = countPaths(corruptFileBlocks);
        assertTrue(numCorrupt == 0);
        // delete the blocks
        String bpid = cluster.getNamesystem().getBlockPoolId();
        // For loop through number of datadirectories per datanode (2)
        for (int i = 0; i < 2; i++) {
            File storageDir = cluster.getInstanceStorageDir(0, i);
            File data_dir = MiniDFSCluster.getFinalizedDir(storageDir, bpid);
            List<File> metadataFiles = MiniDFSCluster.getAllBlockMetadataFiles(data_dir);
            if (metadataFiles == null)
                continue;
            // (blocks.length > 0));
            for (File metadataFile : metadataFiles) {
                File blockFile = Block.metaToBlockFile(metadataFile);
                LOG.info("Deliberately removing file " + blockFile.getName());
                assertTrue("Cannot remove file.", blockFile.delete());
                LOG.info("Deliberately removing file " + metadataFile.getName());
                assertTrue("Cannot remove file.", metadataFile.delete());
            // break;
            }
        }
        int count = 0;
        corruptFileBlocks = dfs.listCorruptFileBlocks(new Path("/corruptData"));
        numCorrupt = countPaths(corruptFileBlocks);
        while (numCorrupt < 3) {
            Thread.sleep(1000);
            corruptFileBlocks = dfs.listCorruptFileBlocks(new Path("/corruptData"));
            numCorrupt = countPaths(corruptFileBlocks);
            count++;
            if (count > 30)
                break;
        }
        // Validate we get all the corrupt files
        LOG.info("Namenode has bad files. " + numCorrupt);
        assertTrue(numCorrupt == 3);
        util.cleanup(fs, "/corruptData");
        util.cleanup(fs, "/goodData");
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) DFSTestUtil(org.apache.hadoop.hdfs.DFSTestUtil) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) FileSystem(org.apache.hadoop.fs.FileSystem) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) RandomAccessFile(java.io.RandomAccessFile) File(java.io.File) Test(org.junit.Test)

Example 37 with DistributedFileSystem

use of org.apache.hadoop.hdfs.DistributedFileSystem in project hadoop by apache.

the class TestINodeFile method testLocationLimitInListingOps.

@Test
public void testLocationLimitInListingOps() throws Exception {
    final Configuration conf = new Configuration();
    // 3 blocks * 3 replicas
    conf.setInt(DFSConfigKeys.DFS_LIST_LIMIT, 9);
    MiniDFSCluster cluster = null;
    try {
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
        cluster.waitActive();
        final DistributedFileSystem hdfs = cluster.getFileSystem();
        ArrayList<String> source = new ArrayList<String>();
        // tmp1 holds files with 3 blocks, 3 replicas
        // tmp2 holds files with 3 blocks, 1 replica
        hdfs.mkdirs(new Path("/tmp1"));
        hdfs.mkdirs(new Path("/tmp2"));
        source.add("f1");
        source.add("f2");
        int numEntries = source.size();
        for (int j = 0; j < numEntries; j++) {
            DFSTestUtil.createFile(hdfs, new Path("/tmp1/" + source.get(j)), 4096, 3 * 1024 - 100, 1024, (short) 3, 0);
        }
        byte[] start = HdfsFileStatus.EMPTY_NAME;
        for (int j = 0; j < numEntries; j++) {
            DirectoryListing dl = cluster.getNameNodeRpc().getListing("/tmp1", start, true);
            assertTrue(dl.getPartialListing().length == 1);
            for (int i = 0; i < dl.getPartialListing().length; i++) {
                source.remove(dl.getPartialListing()[i].getLocalName());
            }
            start = dl.getLastName();
        }
        // Verify we have listed all entries in the directory.
        assertTrue(source.size() == 0);
        // Now create 6 files, each with 3 locations. Should take 2 iterations of 3
        source.add("f1");
        source.add("f2");
        source.add("f3");
        source.add("f4");
        source.add("f5");
        source.add("f6");
        numEntries = source.size();
        for (int j = 0; j < numEntries; j++) {
            DFSTestUtil.createFile(hdfs, new Path("/tmp2/" + source.get(j)), 4096, 3 * 1024 - 100, 1024, (short) 1, 0);
        }
        start = HdfsFileStatus.EMPTY_NAME;
        for (int j = 0; j < numEntries / 3; j++) {
            DirectoryListing dl = cluster.getNameNodeRpc().getListing("/tmp2", start, true);
            assertTrue(dl.getPartialListing().length == 3);
            for (int i = 0; i < dl.getPartialListing().length; i++) {
                source.remove(dl.getPartialListing()[i].getLocalName());
            }
            start = dl.getLastName();
        }
        // Verify we have listed all entries in tmp2.
        assertTrue(source.size() == 0);
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) DirectoryListing(org.apache.hadoop.hdfs.protocol.DirectoryListing) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) ArrayList(java.util.ArrayList) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) Test(org.junit.Test)

Example 38 with DistributedFileSystem

use of org.apache.hadoop.hdfs.DistributedFileSystem in project hadoop by apache.

the class TestSymlinkHdfsDisable method testSymlinkHdfsDisable.

@Test(timeout = 60000)
public void testSymlinkHdfsDisable() throws Exception {
    Configuration conf = new HdfsConfiguration();
    // disable symlink resolution
    conf.setBoolean(CommonConfigurationKeys.FS_CLIENT_RESOLVE_REMOTE_SYMLINKS_KEY, false);
    // spin up minicluster, get dfs and filecontext
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
    DistributedFileSystem dfs = cluster.getFileSystem();
    FileContext fc = FileContext.getFileContext(cluster.getURI(0), conf);
    // Create test files/links
    FileContextTestHelper helper = new FileContextTestHelper("/tmp/TestSymlinkHdfsDisable");
    Path root = helper.getTestRootPath(fc);
    Path target = new Path(root, "target");
    Path link = new Path(root, "link");
    DFSTestUtil.createFile(dfs, target, 4096, (short) 1, 0xDEADDEAD);
    fc.createSymlink(target, link, false);
    // Try to resolve links with FileSystem and FileContext
    try {
        fc.open(link);
        fail("Expected error when attempting to resolve link");
    } catch (IOException e) {
        GenericTestUtils.assertExceptionContains("resolution is disabled", e);
    }
    try {
        dfs.open(link);
        fail("Expected error when attempting to resolve link");
    } catch (IOException e) {
        GenericTestUtils.assertExceptionContains("resolution is disabled", e);
    }
}
Also used : MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) Configuration(org.apache.hadoop.conf.Configuration) IOException(java.io.IOException) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) Test(org.junit.Test)

Example 39 with DistributedFileSystem

use of org.apache.hadoop.hdfs.DistributedFileSystem in project hadoop by apache.

the class TestViewFsDefaultValue method testGetQuotaUsage.

/**
   * Test that getQuotaUsage can be retrieved on the client side.
   */
@Test
public void testGetQuotaUsage() throws IOException {
    FileSystem hFs = cluster.getFileSystem(0);
    final DistributedFileSystem dfs = (DistributedFileSystem) hFs;
    dfs.setQuota(testFileDirPath, 100, 500);
    QuotaUsage qu = vfs.getQuotaUsage(testFileDirPath);
    assertEquals(100, qu.getQuota());
    assertEquals(500, qu.getSpaceQuota());
}
Also used : FileSystem(org.apache.hadoop.fs.FileSystem) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) QuotaUsage(org.apache.hadoop.fs.QuotaUsage) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) Test(org.junit.Test)

Example 40 with DistributedFileSystem

use of org.apache.hadoop.hdfs.DistributedFileSystem in project hadoop by apache.

the class TestViewFsDefaultValue method testGetQuotaUsageWithQuotaDefined.

/**
   * Test that getQuotaUsage can be retrieved on the client side if
   * quota isn't defined.
   */
@Test
public void testGetQuotaUsageWithQuotaDefined() throws IOException {
    FileSystem hFs = cluster.getFileSystem(0);
    final DistributedFileSystem dfs = (DistributedFileSystem) hFs;
    dfs.setQuota(testFileDirPath, -1, -1);
    dfs.setQuotaByStorageType(testFileDirPath, StorageType.SSD, -1);
    dfs.setQuotaByStorageType(testFileDirPath, StorageType.DISK, -1);
    QuotaUsage qu = vfs.getQuotaUsage(testFileDirPath);
    assertEquals(-1, qu.getTypeQuota(StorageType.SSD));
    assertEquals(-1, qu.getQuota());
    assertEquals(-1, qu.getSpaceQuota());
    assertEquals(2, qu.getFileAndDirectoryCount());
    assertEquals(0, qu.getTypeConsumed(StorageType.SSD));
    assertTrue(qu.getSpaceConsumed() > 0);
}
Also used : FileSystem(org.apache.hadoop.fs.FileSystem) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) QuotaUsage(org.apache.hadoop.fs.QuotaUsage) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) Test(org.junit.Test)

Aggregations

DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)252 Test (org.junit.Test)175 Path (org.apache.hadoop.fs.Path)169 Configuration (org.apache.hadoop.conf.Configuration)126 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)126 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)86 IOException (java.io.IOException)63 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)36 FileSystem (org.apache.hadoop.fs.FileSystem)31 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)31 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)26 URI (java.net.URI)24 FsPermission (org.apache.hadoop.fs.permission.FsPermission)22 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)19 AccessControlException (org.apache.hadoop.security.AccessControlException)19 DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)18 Matchers.anyString (org.mockito.Matchers.anyString)18 FileStatus (org.apache.hadoop.fs.FileStatus)16 ArrayList (java.util.ArrayList)14 CachePoolInfo (org.apache.hadoop.hdfs.protocol.CachePoolInfo)14