Search in sources :

Example 56 with DistributedFileSystem

use of org.apache.hadoop.hdfs.DistributedFileSystem in project hadoop by apache.

the class TestAuditLoggerWithCommands method testGetEZForPath.

@Test
public void testGetEZForPath() throws Exception {
    Path path = new Path("/test");
    fileSys = DFSTestUtil.getFileSystemAs(user1, conf);
    fs.mkdirs(path, new FsPermission((short) 0));
    String aceGetEzForPathPattern = ".*allowed=false.*ugi=theDoctor.*cmd=getEZForPath.*";
    try {
        ((DistributedFileSystem) fileSys).getEZForPath(path);
        fail("The operation should have failed with AccessControlException");
    } catch (AccessControlException ace) {
    }
    int length = verifyAuditLogs(aceGetEzForPathPattern);
    fileSys.close();
    try {
        ((DistributedFileSystem) fileSys).getEZForPath(path);
        fail("The operation should have failed with IOException");
    } catch (IOException e) {
    }
    assertTrue("Unexpected log!", length == auditlog.getOutput().split("\n").length);
}
Also used : Path(org.apache.hadoop.fs.Path) AccessControlException(org.apache.hadoop.security.AccessControlException) FsPermission(org.apache.hadoop.fs.permission.FsPermission) IOException(java.io.IOException) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) Test(org.junit.Test)

Example 57 with DistributedFileSystem

use of org.apache.hadoop.hdfs.DistributedFileSystem in project hadoop by apache.

the class TestAuditLoggerWithCommands method testModifyCachePool.

@Test
public void testModifyCachePool() throws Exception {
    removeExistingCachePools(null);
    CachePoolInfo cacheInfo = new CachePoolInfo("pool1").setMode(new FsPermission((short) 0));
    ((DistributedFileSystem) fs).addCachePool(cacheInfo);
    fileSys = DFSTestUtil.getFileSystemAs(user1, conf);
    try {
        ((DistributedFileSystem) fileSys).modifyCachePool(cacheInfo);
        fail("The operation should have failed with AccessControlException");
    } catch (AccessControlException ace) {
    }
    String aceModifyCachePoolPattern = ".*allowed=false.*ugi=theDoctor.*cmd=modifyCachePool.*";
    int length = verifyAuditLogs(aceModifyCachePoolPattern);
    try {
        fileSys.close();
        ((DistributedFileSystem) fileSys).modifyCachePool(cacheInfo);
        fail("The operation should have failed with IOException");
    } catch (IOException e) {
    }
    assertTrue("Unexpected log!", length == auditlog.getOutput().split("\n").length);
}
Also used : AccessControlException(org.apache.hadoop.security.AccessControlException) FsPermission(org.apache.hadoop.fs.permission.FsPermission) IOException(java.io.IOException) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) CachePoolInfo(org.apache.hadoop.hdfs.protocol.CachePoolInfo) Test(org.junit.Test)

Example 58 with DistributedFileSystem

use of org.apache.hadoop.hdfs.DistributedFileSystem in project hadoop by apache.

the class TestWebHdfsDataLocality method testExcludeDataNodes.

@Test
public void testExcludeDataNodes() throws Exception {
    final Configuration conf = WebHdfsTestUtil.createConf();
    final String[] racks = { RACK0, RACK0, RACK1, RACK1, RACK2, RACK2 };
    final String[] hosts = { "DataNode1", "DataNode2", "DataNode3", "DataNode4", "DataNode5", "DataNode6" };
    final int nDataNodes = hosts.length;
    LOG.info("nDataNodes=" + nDataNodes + ", racks=" + Arrays.asList(racks) + ", hosts=" + Arrays.asList(hosts));
    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).hosts(hosts).numDataNodes(nDataNodes).racks(racks).build();
    try {
        cluster.waitActive();
        final DistributedFileSystem dfs = cluster.getFileSystem();
        final NameNode namenode = cluster.getNameNode();
        final DatanodeManager dm = namenode.getNamesystem().getBlockManager().getDatanodeManager();
        LOG.info("dm=" + dm);
        final long blocksize = DFSConfigKeys.DFS_BLOCK_SIZE_DEFAULT;
        final String f = "/foo";
        //create a file with three replica.
        final Path p = new Path(f);
        final FSDataOutputStream out = dfs.create(p, (short) 3);
        out.write(1);
        out.close();
        //get replica location.
        final LocatedBlocks locatedblocks = NameNodeAdapter.getBlockLocations(namenode, f, 0, 1);
        final List<LocatedBlock> lb = locatedblocks.getLocatedBlocks();
        Assert.assertEquals(1, lb.size());
        final DatanodeInfo[] locations = lb.get(0).getLocations();
        Assert.assertEquals(3, locations.length);
        //For GETFILECHECKSUM, OPEN and APPEND,
        //the chosen datanode must be different with exclude nodes.
        StringBuffer sb = new StringBuffer();
        for (int i = 0; i < 2; i++) {
            sb.append(locations[i].getXferAddr());
            {
                // test GETFILECHECKSUM
                final DatanodeInfo chosen = NamenodeWebHdfsMethods.chooseDatanode(namenode, f, GetOpParam.Op.GETFILECHECKSUM, -1L, blocksize, sb.toString(), LOCALHOST);
                for (int j = 0; j <= i; j++) {
                    Assert.assertNotEquals(locations[j].getHostName(), chosen.getHostName());
                }
            }
            {
                // test OPEN
                final DatanodeInfo chosen = NamenodeWebHdfsMethods.chooseDatanode(namenode, f, GetOpParam.Op.OPEN, 0, blocksize, sb.toString(), LOCALHOST);
                for (int j = 0; j <= i; j++) {
                    Assert.assertNotEquals(locations[j].getHostName(), chosen.getHostName());
                }
            }
            {
                // test APPEND
                final DatanodeInfo chosen = NamenodeWebHdfsMethods.chooseDatanode(namenode, f, PostOpParam.Op.APPEND, -1L, blocksize, sb.toString(), LOCALHOST);
                for (int j = 0; j <= i; j++) {
                    Assert.assertNotEquals(locations[j].getHostName(), chosen.getHostName());
                }
            }
            sb.append(",");
        }
    } finally {
        cluster.shutdown();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) NameNode(org.apache.hadoop.hdfs.server.namenode.NameNode) DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) LocatedBlocks(org.apache.hadoop.hdfs.protocol.LocatedBlocks) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) DatanodeManager(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) Test(org.junit.Test)

Example 59 with DistributedFileSystem

use of org.apache.hadoop.hdfs.DistributedFileSystem in project hadoop by apache.

the class TestShortCircuitCache method testDataXceiverHandlesRequestShortCircuitShmFailure.

// Regression test for HADOOP-11802
@Test(timeout = 60000)
public void testDataXceiverHandlesRequestShortCircuitShmFailure() throws Exception {
    BlockReaderTestUtil.enableShortCircuitShmTracing();
    TemporarySocketDirectory sockDir = new TemporarySocketDirectory();
    Configuration conf = createShortCircuitConf("testDataXceiverHandlesRequestShortCircuitShmFailure", sockDir);
    conf.setLong(HdfsClientConfigKeys.Read.ShortCircuit.STREAMS_CACHE_EXPIRY_MS_KEY, 1000000000L);
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
    cluster.waitActive();
    DistributedFileSystem fs = cluster.getFileSystem();
    final Path TEST_PATH1 = new Path("/test_file1");
    DFSTestUtil.createFile(fs, TEST_PATH1, 4096, (short) 1, 0xFADE1);
    LOG.info("Setting failure injector and performing a read which " + "should fail...");
    DataNodeFaultInjector failureInjector = Mockito.mock(DataNodeFaultInjector.class);
    Mockito.doAnswer(new Answer<Void>() {

        @Override
        public Void answer(InvocationOnMock invocation) throws Throwable {
            throw new IOException("injected error into sendShmResponse");
        }
    }).when(failureInjector).sendShortCircuitShmResponse();
    DataNodeFaultInjector prevInjector = DataNodeFaultInjector.instance;
    DataNodeFaultInjector.instance = failureInjector;
    try {
        // The first read will try to allocate a shared memory segment and slot.
        // The shared memory segment allocation will fail because of the failure
        // injector.
        DFSTestUtil.readFileBuffer(fs, TEST_PATH1);
        Assert.fail("expected readFileBuffer to fail, but it succeeded.");
    } catch (Throwable t) {
        GenericTestUtils.assertExceptionContains("TCP reads were disabled for " + "testing, but we failed to do a non-TCP read.", t);
    }
    checkNumberOfSegmentsAndSlots(0, 0, cluster.getDataNodes().get(0).getShortCircuitRegistry());
    LOG.info("Clearing failure injector and performing another read...");
    DataNodeFaultInjector.instance = prevInjector;
    fs.getClient().getClientContext().getDomainSocketFactory().clearPathMap();
    // The second read should succeed.
    DFSTestUtil.readFileBuffer(fs, TEST_PATH1);
    // We should have added a new short-circuit shared memory segment and slot.
    checkNumberOfSegmentsAndSlots(1, 1, cluster.getDataNodes().get(0).getShortCircuitRegistry());
    cluster.shutdown();
    sockDir.close();
}
Also used : Path(org.apache.hadoop.fs.Path) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) DataNodeFaultInjector(org.apache.hadoop.hdfs.server.datanode.DataNodeFaultInjector) DatanodeInfoBuilder(org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder) IOException(java.io.IOException) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) TemporarySocketDirectory(org.apache.hadoop.net.unix.TemporarySocketDirectory) InvocationOnMock(org.mockito.invocation.InvocationOnMock) Test(org.junit.Test)

Example 60 with DistributedFileSystem

use of org.apache.hadoop.hdfs.DistributedFileSystem in project hadoop by apache.

the class TestShortCircuitCache method testDataXceiverCleansUpSlotsOnFailure.

// Regression test for HDFS-7915
@Test(timeout = 60000)
public void testDataXceiverCleansUpSlotsOnFailure() throws Exception {
    BlockReaderTestUtil.enableShortCircuitShmTracing();
    TemporarySocketDirectory sockDir = new TemporarySocketDirectory();
    Configuration conf = createShortCircuitConf("testDataXceiverCleansUpSlotsOnFailure", sockDir);
    conf.setLong(HdfsClientConfigKeys.Read.ShortCircuit.STREAMS_CACHE_EXPIRY_MS_KEY, 1000000000L);
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
    cluster.waitActive();
    DistributedFileSystem fs = cluster.getFileSystem();
    final Path TEST_PATH1 = new Path("/test_file1");
    final Path TEST_PATH2 = new Path("/test_file2");
    final int TEST_FILE_LEN = 4096;
    final int SEED = 0xFADE1;
    DFSTestUtil.createFile(fs, TEST_PATH1, TEST_FILE_LEN, (short) 1, SEED);
    DFSTestUtil.createFile(fs, TEST_PATH2, TEST_FILE_LEN, (short) 1, SEED);
    // The first read should allocate one shared memory segment and slot.
    DFSTestUtil.readFileBuffer(fs, TEST_PATH1);
    // The second read should fail, and we should only have 1 segment and 1 slot
    // left.
    BlockReaderFactory.setFailureInjectorForTesting(new TestCleanupFailureInjector());
    try {
        DFSTestUtil.readFileBuffer(fs, TEST_PATH2);
    } catch (Throwable t) {
        GenericTestUtils.assertExceptionContains("TCP reads were disabled for " + "testing, but we failed to do a non-TCP read.", t);
    }
    checkNumberOfSegmentsAndSlots(1, 1, cluster.getDataNodes().get(0).getShortCircuitRegistry());
    cluster.shutdown();
    sockDir.close();
}
Also used : Path(org.apache.hadoop.fs.Path) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) DatanodeInfoBuilder(org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) TemporarySocketDirectory(org.apache.hadoop.net.unix.TemporarySocketDirectory) Test(org.junit.Test)

Aggregations

DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)252 Test (org.junit.Test)175 Path (org.apache.hadoop.fs.Path)169 Configuration (org.apache.hadoop.conf.Configuration)126 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)126 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)86 IOException (java.io.IOException)63 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)36 FileSystem (org.apache.hadoop.fs.FileSystem)31 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)31 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)26 URI (java.net.URI)24 FsPermission (org.apache.hadoop.fs.permission.FsPermission)22 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)19 AccessControlException (org.apache.hadoop.security.AccessControlException)19 DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)18 Matchers.anyString (org.mockito.Matchers.anyString)18 FileStatus (org.apache.hadoop.fs.FileStatus)16 ArrayList (java.util.ArrayList)14 CachePoolInfo (org.apache.hadoop.hdfs.protocol.CachePoolInfo)14