Search in sources :

Example 31 with HdfsConfiguration

use of org.apache.hadoop.hdfs.HdfsConfiguration in project hadoop by apache.

the class TestSaslDataTransfer method testClientAndServerDoNotHaveCommonQop.

@Test
public void testClientAndServerDoNotHaveCommonQop() throws Exception {
    HdfsConfiguration clusterConf = createSecureConfig("privacy");
    startCluster(clusterConf);
    HdfsConfiguration clientConf = new HdfsConfiguration(clusterConf);
    clientConf.set(DFS_DATA_TRANSFER_PROTECTION_KEY, "authentication");
    exception.expect(IOException.class);
    exception.expectMessage("could only be replicated to 0 nodes");
    doTest(clientConf);
}
Also used : HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) Test(org.junit.Test)

Example 32 with HdfsConfiguration

use of org.apache.hadoop.hdfs.HdfsConfiguration in project hadoop by apache.

the class TestBlockToken method testBlockTokenInLastLocatedBlock.

/**
   * This test writes a file and gets the block locations without closing the
   * file, and tests the block token in the last block. Block token is verified
   * by ensuring it is of correct kind.
   *
   * @throws IOException
   * @throws InterruptedException
   */
private void testBlockTokenInLastLocatedBlock(boolean enableProtobuf) throws IOException, InterruptedException {
    Configuration conf = new HdfsConfiguration();
    conf.setBoolean(DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY, true);
    conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 512);
    conf.setBoolean(DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_PROTOBUF_ENABLE, enableProtobuf);
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
    cluster.waitActive();
    try {
        FileSystem fs = cluster.getFileSystem();
        String fileName = "/testBlockTokenInLastLocatedBlock";
        Path filePath = new Path(fileName);
        FSDataOutputStream out = fs.create(filePath, (short) 1);
        out.write(new byte[1000]);
        // ensure that the first block is written out (see FSOutputSummer#flush)
        out.flush();
        LocatedBlocks locatedBlocks = cluster.getNameNodeRpc().getBlockLocations(fileName, 0, 1000);
        while (locatedBlocks.getLastLocatedBlock() == null) {
            Thread.sleep(100);
            locatedBlocks = cluster.getNameNodeRpc().getBlockLocations(fileName, 0, 1000);
        }
        Token<BlockTokenIdentifier> token = locatedBlocks.getLastLocatedBlock().getBlockToken();
        Assert.assertEquals(BlockTokenIdentifier.KIND_NAME, token.getKind());
        out.close();
    } finally {
        cluster.shutdown();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) FileSystem(org.apache.hadoop.fs.FileSystem) LocatedBlocks(org.apache.hadoop.hdfs.protocol.LocatedBlocks) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration)

Example 33 with HdfsConfiguration

use of org.apache.hadoop.hdfs.HdfsConfiguration in project hadoop by apache.

the class TestBalancer method testBalancerCliWithIncludeList.

/**
   * Test a cluster with even distribution,
   * then three nodes are added to the cluster,
   * runs balancer with two of the nodes in the include list
   */
@Test(timeout = 100000)
public void testBalancerCliWithIncludeList() throws Exception {
    final Configuration conf = new HdfsConfiguration();
    initConf(conf);
    Set<String> includeHosts = new HashSet<String>();
    includeHosts.add("datanodeY");
    doTest(conf, new long[] { CAPACITY, CAPACITY }, new String[] { RACK0, RACK1 }, CAPACITY, RACK2, new HostNameBasedNodes(new String[] { "datanodeX", "datanodeY", "datanodeZ" }, BalancerParameters.DEFAULT.getExcludedNodes(), includeHosts), true, false);
}
Also used : Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) HashSet(java.util.HashSet) Test(org.junit.Test)

Example 34 with HdfsConfiguration

use of org.apache.hadoop.hdfs.HdfsConfiguration in project hadoop by apache.

the class TestDelegationToken method setUp.

@Before
public void setUp() throws Exception {
    config = new HdfsConfiguration();
    config.setLong(DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_MAX_LIFETIME_KEY, 10000);
    config.setLong(DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_RENEW_INTERVAL_KEY, 5000);
    config.setBoolean(DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true);
    config.set("hadoop.security.auth_to_local", "RULE:[2:$1@$0](JobTracker@.*FOO.COM)s/@.*//" + "DEFAULT");
    FileSystem.setDefaultUri(config, "hdfs://localhost:" + "0");
    cluster = new MiniDFSCluster.Builder(config).numDataNodes(0).build();
    cluster.waitActive();
    dtSecretManager = NameNodeAdapter.getDtSecretManager(cluster.getNamesystem());
}
Also used : MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) Before(org.junit.Before)

Example 35 with HdfsConfiguration

use of org.apache.hadoop.hdfs.HdfsConfiguration in project hadoop by apache.

the class TestDelegationTokenForProxyUser method setUp.

@BeforeClass
public static void setUp() throws Exception {
    config = new HdfsConfiguration();
    config.setLong(DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_MAX_LIFETIME_KEY, 10000);
    config.setLong(DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_RENEW_INTERVAL_KEY, 5000);
    config.setStrings(DefaultImpersonationProvider.getTestProvider().getProxySuperuserGroupConfKey(REAL_USER), "group1");
    config.setBoolean(DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true);
    configureSuperUserIPAddresses(config, REAL_USER);
    FileSystem.setDefaultUri(config, "hdfs://localhost:" + "0");
    cluster = new MiniDFSCluster.Builder(config).build();
    cluster.waitActive();
    ProxyUsers.refreshSuperUserGroupsConfiguration(config);
    ugi = UserGroupInformation.createRemoteUser(REAL_USER);
    proxyUgi = UserGroupInformation.createProxyUserForTesting(PROXY_USER, ugi, GROUP_NAMES);
}
Also used : HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) BeforeClass(org.junit.BeforeClass)

Aggregations

HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)454 Configuration (org.apache.hadoop.conf.Configuration)311 Test (org.junit.Test)311 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)267 Path (org.apache.hadoop.fs.Path)152 FileSystem (org.apache.hadoop.fs.FileSystem)94 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)92 File (java.io.File)72 IOException (java.io.IOException)69 Before (org.junit.Before)56 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)40 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)35 MetricsRecordBuilder (org.apache.hadoop.metrics2.MetricsRecordBuilder)33 DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)30 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)27 RandomAccessFile (java.io.RandomAccessFile)22 ArrayList (java.util.ArrayList)20 NameNodeFile (org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeFile)20 URI (java.net.URI)19 FsPermission (org.apache.hadoop.fs.permission.FsPermission)19