Search in sources :

Example 21 with DistributedFileSystem

use of org.apache.hadoop.hdfs.DistributedFileSystem in project hadoop by apache.

the class TestWebHDFS method testWebHdfsAppend.

@Test
public void testWebHdfsAppend() throws Exception {
    MiniDFSCluster cluster = null;
    final Configuration conf = WebHdfsTestUtil.createConf();
    final int dnNumber = 3;
    try {
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(dnNumber).build();
        final WebHdfsFileSystem webFS = WebHdfsTestUtil.getWebHdfsFileSystem(conf, WebHdfsConstants.WEBHDFS_SCHEME);
        final DistributedFileSystem fs = cluster.getFileSystem();
        final Path appendFile = new Path("/testAppend.txt");
        final String content = "hello world";
        DFSTestUtil.writeFile(fs, appendFile, content);
        for (int index = 0; index < dnNumber - 1; index++) {
            cluster.shutdownDataNode(index);
        }
        cluster.restartNameNodes();
        cluster.waitActive();
        try {
            DFSTestUtil.appendFile(webFS, appendFile, content);
            fail("Should fail to append file since " + "datanode number is 1 and replication is 3");
        } catch (IOException ignored) {
            String resultContent = DFSTestUtil.readFile(fs, appendFile);
            assertTrue(resultContent.equals(content));
        }
    } finally {
        if (cluster != null) {
            cluster.shutdown(true);
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) IOException(java.io.IOException) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) Test(org.junit.Test) HttpServerFunctionalTest(org.apache.hadoop.http.HttpServerFunctionalTest)

Example 22 with DistributedFileSystem

use of org.apache.hadoop.hdfs.DistributedFileSystem in project hadoop by apache.

the class TestDelegationToken method testAddDelegationTokensDFSApi.

@Test
public void testAddDelegationTokensDFSApi() throws Exception {
    UserGroupInformation ugi = UserGroupInformation.createRemoteUser("JobTracker");
    DistributedFileSystem dfs = cluster.getFileSystem();
    Credentials creds = new Credentials();
    final Token<?>[] tokens = dfs.addDelegationTokens("JobTracker", creds);
    Assert.assertEquals(1, tokens.length);
    Assert.assertEquals(1, creds.numberOfTokens());
    checkTokenIdentifier(ugi, tokens[0]);
    final Token<?>[] tokens2 = dfs.addDelegationTokens("JobTracker", creds);
    // already have token
    Assert.assertEquals(0, tokens2.length);
    Assert.assertEquals(1, creds.numberOfTokens());
}
Also used : InvalidToken(org.apache.hadoop.security.token.SecretManager.InvalidToken) Token(org.apache.hadoop.security.token.Token) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) Credentials(org.apache.hadoop.security.Credentials) UserGroupInformation(org.apache.hadoop.security.UserGroupInformation) Test(org.junit.Test)

Example 23 with DistributedFileSystem

use of org.apache.hadoop.hdfs.DistributedFileSystem in project hadoop by apache.

the class TestDelegationToken method testDelegationTokenWithDoAs.

@Test
public void testDelegationTokenWithDoAs() throws Exception {
    final DistributedFileSystem dfs = cluster.getFileSystem();
    final Credentials creds = new Credentials();
    final Token<?>[] tokens = dfs.addDelegationTokens("JobTracker", creds);
    Assert.assertEquals(1, tokens.length);
    @SuppressWarnings("unchecked") final Token<DelegationTokenIdentifier> token = (Token<DelegationTokenIdentifier>) tokens[0];
    final UserGroupInformation longUgi = UserGroupInformation.createRemoteUser("JobTracker/foo.com@FOO.COM");
    final UserGroupInformation shortUgi = UserGroupInformation.createRemoteUser("JobTracker");
    longUgi.doAs(new PrivilegedExceptionAction<Object>() {

        @Override
        public Object run() throws IOException {
            try {
                token.renew(config);
            } catch (Exception e) {
                Assert.fail("Could not renew delegation token for user " + longUgi);
            }
            return null;
        }
    });
    shortUgi.doAs(new PrivilegedExceptionAction<Object>() {

        @Override
        public Object run() throws Exception {
            token.renew(config);
            return null;
        }
    });
    longUgi.doAs(new PrivilegedExceptionAction<Object>() {

        @Override
        public Object run() throws IOException {
            try {
                token.cancel(config);
            } catch (Exception e) {
                Assert.fail("Could not cancel delegation token for user " + longUgi);
            }
            return null;
        }
    });
}
Also used : DelegationTokenIdentifier(org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier) InvalidToken(org.apache.hadoop.security.token.SecretManager.InvalidToken) Token(org.apache.hadoop.security.token.Token) IOException(java.io.IOException) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) IOException(java.io.IOException) AccessControlException(org.apache.hadoop.security.AccessControlException) Credentials(org.apache.hadoop.security.Credentials) UserGroupInformation(org.apache.hadoop.security.UserGroupInformation) Test(org.junit.Test)

Example 24 with DistributedFileSystem

use of org.apache.hadoop.hdfs.DistributedFileSystem in project hadoop by apache.

the class TestMover method testWithinSameNode.

private void testWithinSameNode(Configuration conf) throws Exception {
    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).storageTypes(new StorageType[] { StorageType.DISK, StorageType.ARCHIVE }).build();
    try {
        cluster.waitActive();
        final DistributedFileSystem dfs = cluster.getFileSystem();
        final String file = "/testScheduleWithinSameNode/file";
        Path dir = new Path("/testScheduleWithinSameNode");
        dfs.mkdirs(dir);
        // write to DISK
        dfs.setStoragePolicy(dir, "HOT");
        final FSDataOutputStream out = dfs.create(new Path(file));
        out.writeChars("testScheduleWithinSameNode");
        out.close();
        // verify before movement
        LocatedBlock lb = dfs.getClient().getLocatedBlocks(file, 0).get(0);
        StorageType[] storageTypes = lb.getStorageTypes();
        for (StorageType storageType : storageTypes) {
            Assert.assertTrue(StorageType.DISK == storageType);
        }
        // move to ARCHIVE
        dfs.setStoragePolicy(dir, "COLD");
        int rc = ToolRunner.run(conf, new Mover.Cli(), new String[] { "-p", dir.toString() });
        Assert.assertEquals("Movement to ARCHIVE should be successful", 0, rc);
        // Wait till namenode notified about the block location details
        waitForLocatedBlockWithArchiveStorageType(dfs, file, 3);
    } finally {
        cluster.shutdown();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) StorageType(org.apache.hadoop.fs.StorageType) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem)

Example 25 with DistributedFileSystem

use of org.apache.hadoop.hdfs.DistributedFileSystem in project hadoop by apache.

the class TestMover method testMoverFailedRetryWithPinnedBlocks.

/**
   * Test to verify that mover should work well with pinned blocks as well as
   * failed blocks. Mover should continue retrying the failed blocks only.
   */
@Test(timeout = 90000)
public void testMoverFailedRetryWithPinnedBlocks() throws Exception {
    final Configuration conf = new HdfsConfiguration();
    initConf(conf);
    conf.set(DFSConfigKeys.DFS_MOVER_RETRY_MAX_ATTEMPTS_KEY, "2");
    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).storageTypes(new StorageType[][] { { StorageType.DISK, StorageType.ARCHIVE }, { StorageType.DISK, StorageType.ARCHIVE } }).build();
    try {
        cluster.waitActive();
        final DistributedFileSystem dfs = cluster.getFileSystem();
        final String parenDir = "/parent";
        dfs.mkdirs(new Path(parenDir));
        final String file1 = "/parent/testMoverFailedRetryWithPinnedBlocks1";
        // write to DISK
        final FSDataOutputStream out = dfs.create(new Path(file1), (short) 2);
        byte[] fileData = StripedFileTestUtil.generateBytes(DEFAULT_BLOCK_SIZE * 2);
        out.write(fileData);
        out.close();
        // Adding pinned blocks.
        createFileWithFavoredDatanodes(conf, cluster, dfs);
        // Delete block file so, block move will fail with FileNotFoundException
        LocatedBlocks locatedBlocks = dfs.getClient().getLocatedBlocks(file1, 0);
        Assert.assertEquals("Wrong block count", 2, locatedBlocks.locatedBlockCount());
        LocatedBlock lb = locatedBlocks.get(0);
        cluster.corruptBlockOnDataNodesByDeletingBlockFile(lb.getBlock());
        // move to ARCHIVE
        dfs.setStoragePolicy(new Path(parenDir), "COLD");
        int rc = ToolRunner.run(conf, new Mover.Cli(), new String[] { "-p", parenDir.toString() });
        Assert.assertEquals("Movement should fail after some retry", ExitStatus.NO_MOVE_PROGRESS.getExitCode(), rc);
    } finally {
        cluster.shutdown();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) StorageType(org.apache.hadoop.fs.StorageType) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) LocatedBlocks(org.apache.hadoop.hdfs.protocol.LocatedBlocks) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) Test(org.junit.Test)

Aggregations

DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)252 Test (org.junit.Test)175 Path (org.apache.hadoop.fs.Path)169 Configuration (org.apache.hadoop.conf.Configuration)126 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)126 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)86 IOException (java.io.IOException)63 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)36 FileSystem (org.apache.hadoop.fs.FileSystem)31 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)31 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)26 URI (java.net.URI)24 FsPermission (org.apache.hadoop.fs.permission.FsPermission)22 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)19 AccessControlException (org.apache.hadoop.security.AccessControlException)19 DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)18 Matchers.anyString (org.mockito.Matchers.anyString)18 FileStatus (org.apache.hadoop.fs.FileStatus)16 ArrayList (java.util.ArrayList)14 CachePoolInfo (org.apache.hadoop.hdfs.protocol.CachePoolInfo)14