Search in sources :

Example 6 with MiniDFSNNTopology

use of org.apache.hadoop.hdfs.MiniDFSNNTopology in project hadoop by apache.

the class TestStandbyCheckpoints method setupCluster.

@SuppressWarnings("rawtypes")
@Before
public void setupCluster() throws Exception {
    Configuration conf = setupCommonConfig();
    // Dial down the retention of extra edits and checkpoints. This is to
    // help catch regressions of HDFS-4238 (SBN should not purge shared edits)
    conf.setInt(DFSConfigKeys.DFS_NAMENODE_NUM_CHECKPOINTS_RETAINED_KEY, 1);
    conf.setInt(DFSConfigKeys.DFS_NAMENODE_NUM_EXTRA_EDITS_RETAINED_KEY, 0);
    int retryCount = 0;
    while (true) {
        try {
            int basePort = 10060 + random.nextInt(100) * 2;
            MiniDFSNNTopology topology = new MiniDFSNNTopology().addNameservice(new MiniDFSNNTopology.NSConf("ns1").addNN(new MiniDFSNNTopology.NNConf("nn1").setHttpPort(basePort)).addNN(new MiniDFSNNTopology.NNConf("nn2").setHttpPort(basePort + 1)).addNN(new MiniDFSNNTopology.NNConf("nn3").setHttpPort(basePort + 2)));
            cluster = new MiniDFSCluster.Builder(conf).nnTopology(topology).numDataNodes(1).build();
            cluster.waitActive();
            setNNs();
            fs = HATestUtil.configureFailoverFs(cluster, conf);
            cluster.transitionToActive(0);
            ++retryCount;
            break;
        } catch (BindException e) {
            LOG.info("Set up MiniDFSCluster failed due to port conflicts, retry " + retryCount + " times");
        }
    }
}
Also used : Configuration(org.apache.hadoop.conf.Configuration) MiniDFSNNTopology(org.apache.hadoop.hdfs.MiniDFSNNTopology) BindException(java.net.BindException) Before(org.junit.Before)

Example 7 with MiniDFSNNTopology

use of org.apache.hadoop.hdfs.MiniDFSNNTopology in project hadoop by apache.

the class TestDataNodeMultipleRegistrations method testClusterIdMismatchAtStartupWithHA.

@Test(timeout = 20000)
public void testClusterIdMismatchAtStartupWithHA() throws Exception {
    MiniDFSNNTopology top = new MiniDFSNNTopology().addNameservice(new MiniDFSNNTopology.NSConf("ns1").addNN(new MiniDFSNNTopology.NNConf("nn0")).addNN(new MiniDFSNNTopology.NNConf("nn1"))).addNameservice(new MiniDFSNNTopology.NSConf("ns2").addNN(new MiniDFSNNTopology.NNConf("nn2").setClusterId("bad-cid")).addNN(new MiniDFSNNTopology.NNConf("nn3").setClusterId("bad-cid")));
    top.setFederation(true);
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).nnTopology(top).numDataNodes(0).build();
    try {
        cluster.startDataNodes(conf, 1, true, null, null);
        // let the initialization be complete
        Thread.sleep(10000);
        DataNode dn = cluster.getDataNodes().get(0);
        assertTrue("Datanode should be running", dn.isDatanodeUp());
        assertEquals("Only one BPOfferService should be running", 1, dn.getAllBpOs().size());
    } finally {
        cluster.shutdown();
    }
}
Also used : MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) MiniDFSNNTopology(org.apache.hadoop.hdfs.MiniDFSNNTopology) Test(org.junit.Test)

Example 8 with MiniDFSNNTopology

use of org.apache.hadoop.hdfs.MiniDFSNNTopology in project hadoop by apache.

the class TestDataNodeMultipleRegistrations method testDNWithInvalidStorageWithHA.

@Test
public void testDNWithInvalidStorageWithHA() throws Exception {
    MiniDFSNNTopology top = new MiniDFSNNTopology().addNameservice(new MiniDFSNNTopology.NSConf("ns1").addNN(new MiniDFSNNTopology.NNConf("nn0").setClusterId("cluster-1")).addNN(new MiniDFSNNTopology.NNConf("nn1").setClusterId("cluster-1")));
    top.setFederation(true);
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).nnTopology(top).numDataNodes(0).build();
    try {
        cluster.startDataNodes(conf, 1, true, null, null);
        // let the initialization be complete
        Thread.sleep(10000);
        DataNode dn = cluster.getDataNodes().get(0);
        assertTrue("Datanode should be running", dn.isDatanodeUp());
        assertEquals("BPOfferService should be running", 1, dn.getAllBpOs().size());
        DataNodeProperties dnProp = cluster.stopDataNode(0);
        cluster.getNameNode(0).stop();
        cluster.getNameNode(1).stop();
        Configuration nn1 = cluster.getConfiguration(0);
        Configuration nn2 = cluster.getConfiguration(1);
        // setting up invalid cluster
        StartupOption.FORMAT.setClusterId("cluster-2");
        DFSTestUtil.formatNameNode(nn1);
        MiniDFSCluster.copyNameDirs(FSNamesystem.getNamespaceDirs(nn1), FSNamesystem.getNamespaceDirs(nn2), nn2);
        cluster.restartNameNode(0, false);
        cluster.restartNameNode(1, false);
        cluster.restartDataNode(dnProp);
        // let the initialization be complete
        Thread.sleep(10000);
        dn = cluster.getDataNodes().get(0);
        assertFalse("Datanode should have shutdown as only service failed", dn.isDatanodeUp());
    } finally {
        cluster.shutdown();
    }
}
Also used : MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) DataNodeProperties(org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties) MiniDFSNNTopology(org.apache.hadoop.hdfs.MiniDFSNNTopology) Test(org.junit.Test)

Example 9 with MiniDFSNNTopology

use of org.apache.hadoop.hdfs.MiniDFSNNTopology in project hadoop by apache.

the class TestBootstrapStandby method setupCluster.

@Before
public void setupCluster() throws IOException {
    Configuration conf = new Configuration();
    // duplicate code with MiniQJMHACluster#createDefaultTopology, but don't want to cross
    // dependencies or munge too much code to support it all correctly
    MiniDFSNNTopology.NSConf nameservice = new MiniDFSNNTopology.NSConf("ns1");
    for (int i = 0; i < maxNNCount; i++) {
        nameservice.addNN(new MiniDFSNNTopology.NNConf("nn" + i).setHttpPort(STARTING_PORT + i + 1));
    }
    MiniDFSNNTopology topology = new MiniDFSNNTopology().addNameservice(nameservice);
    cluster = new MiniDFSCluster.Builder(conf).nnTopology(topology).numDataNodes(0).build();
    cluster.waitActive();
    nn0 = cluster.getNameNode(0);
    cluster.transitionToActive(0);
    // shutdown the other NNs
    for (int i = 1; i < maxNNCount; i++) {
        cluster.shutdownNameNode(i);
    }
}
Also used : Configuration(org.apache.hadoop.conf.Configuration) MiniDFSNNTopology(org.apache.hadoop.hdfs.MiniDFSNNTopology) Before(org.junit.Before)

Example 10 with MiniDFSNNTopology

use of org.apache.hadoop.hdfs.MiniDFSNNTopology in project hive by apache.

the class Hadoop23Shims method getMiniDfs.

// Don't move this code to the parent class. There's a binary
// incompatibility between hadoop 1 and 2 wrt MiniDFSCluster and we
// need to have two different shim classes even though they are
// exactly the same.
@Override
public HadoopShims.MiniDFSShim getMiniDfs(Configuration conf, int numDataNodes, boolean format, String[] racks, boolean isHA) throws IOException {
    configureImpersonation(conf);
    MiniDFSCluster miniDFSCluster;
    if (isHA) {
        MiniDFSNNTopology topo = new MiniDFSNNTopology().addNameservice(new MiniDFSNNTopology.NSConf("minidfs").addNN(new MiniDFSNNTopology.NNConf("nn1")).addNN(new MiniDFSNNTopology.NNConf("nn2")));
        miniDFSCluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDataNodes).format(format).racks(racks).nnTopology(topo).build();
        miniDFSCluster.waitActive();
        miniDFSCluster.transitionToActive(0);
    } else {
        miniDFSCluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDataNodes).format(format).racks(racks).build();
    }
    // Need to set the client's KeyProvider to the NN's for JKS,
    // else the updates do not get flushed properly
    KeyProviderCryptoExtension keyProvider = miniDFSCluster.getNameNode(0).getNamesystem().getProvider();
    if (keyProvider != null) {
        try {
            setKeyProvider(miniDFSCluster.getFileSystem(0).getClient(), keyProvider);
        } catch (Exception err) {
            throw new IOException(err);
        }
    }
    cluster = new MiniDFSShim(miniDFSCluster);
    return cluster;
}
Also used : MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) MiniDFSNNTopology(org.apache.hadoop.hdfs.MiniDFSNNTopology) KeyProviderCryptoExtension(org.apache.hadoop.crypto.key.KeyProviderCryptoExtension) IOException(java.io.IOException) FileNotFoundException(java.io.FileNotFoundException) InvocationTargetException(java.lang.reflect.InvocationTargetException) NoSuchAlgorithmException(java.security.NoSuchAlgorithmException) AccessControlException(java.security.AccessControlException) MalformedURLException(java.net.MalformedURLException) IOException(java.io.IOException)

Aggregations

MiniDFSNNTopology (org.apache.hadoop.hdfs.MiniDFSNNTopology)18 Configuration (org.apache.hadoop.conf.Configuration)15 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)11 Test (org.junit.Test)8 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)6 Before (org.junit.Before)6 BindException (java.net.BindException)5 Path (org.apache.hadoop.fs.Path)3 IOException (java.io.IOException)2 NoSuchAlgorithmException (java.security.NoSuchAlgorithmException)2 FileSystem (org.apache.hadoop.fs.FileSystem)2 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)2 FileNotFoundException (java.io.FileNotFoundException)1 InvocationTargetException (java.lang.reflect.InvocationTargetException)1 InetSocketAddress (java.net.InetSocketAddress)1 MalformedURLException (java.net.MalformedURLException)1 AccessControlException (java.security.AccessControlException)1 Random (java.util.Random)1 TimeoutException (java.util.concurrent.TimeoutException)1 KeyProviderCryptoExtension (org.apache.hadoop.crypto.key.KeyProviderCryptoExtension)1