Search in sources :

Example 76 with HdfsConfiguration

use of org.apache.hadoop.hdfs.HdfsConfiguration in project hadoop by apache.

the class TestBalancer method testUpgradeDomainPolicyAfterBalance.

/**
   * Verify balancer won't violate upgrade domain block placement policy.
   * @throws Exception
   */
@Test(timeout = 100000)
public void testUpgradeDomainPolicyAfterBalance() throws Exception {
    final Configuration conf = new HdfsConfiguration();
    initConf(conf);
    conf.setClass(DFSConfigKeys.DFS_BLOCK_REPLICATOR_CLASSNAME_KEY, BlockPlacementPolicyWithUpgradeDomain.class, BlockPlacementPolicy.class);
    long[] capacities = new long[] { CAPACITY, CAPACITY, CAPACITY };
    String[] hosts = { "host0", "host1", "host2" };
    String[] racks = { RACK0, RACK1, RACK1 };
    String[] UDs = { "ud0", "ud1", "ud2" };
    runBalancerAndVerifyBlockPlacmentPolicy(conf, capacities, hosts, racks, UDs, CAPACITY, "host3", RACK2, "ud2");
}
Also used : Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) Test(org.junit.Test)

Example 77 with HdfsConfiguration

use of org.apache.hadoop.hdfs.HdfsConfiguration in project hadoop by apache.

the class TestBalancer method testMinBlockSizeAndSourceNodes.

/** Balancer should not move blocks with size < minBlockSize. */
@Test(timeout = 60000)
public void testMinBlockSizeAndSourceNodes() throws Exception {
    final Configuration conf = new HdfsConfiguration();
    initConf(conf);
    final short replication = 3;
    final long[] lengths = { 10, 10, 10, 10 };
    final long[] capacities = new long[replication];
    final long totalUsed = capacities.length * sum(lengths);
    Arrays.fill(capacities, 1000);
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(capacities.length).simulatedCapacities(capacities).build();
    final DistributedFileSystem dfs = cluster.getFileSystem();
    cluster.waitActive();
    client = NameNodeProxies.createProxy(conf, dfs.getUri(), ClientProtocol.class).getProxy();
    // fill up the cluster to be 80% full
    for (int i = 0; i < lengths.length; i++) {
        final long size = lengths[i];
        final Path p = new Path("/file" + i + "_size" + size);
        try (final OutputStream out = dfs.create(p)) {
            for (int j = 0; j < size; j++) {
                out.write(j);
            }
        }
    }
    // start up an empty node with the same capacity
    cluster.startDataNodes(conf, capacities.length, true, null, null, capacities);
    LOG.info("capacities    = " + Arrays.toString(capacities));
    LOG.info("totalUsedSpace= " + totalUsed);
    LOG.info("lengths       = " + Arrays.toString(lengths) + ", #=" + lengths.length);
    waitForHeartBeat(totalUsed, 2 * capacities[0] * capacities.length, client, cluster);
    final Collection<URI> namenodes = DFSUtil.getInternalNsRpcUris(conf);
    {
        // run Balancer with min-block-size=50
        final BalancerParameters p = Balancer.Cli.parse(new String[] { "-policy", BalancingPolicy.Node.INSTANCE.getName(), "-threshold", "1" });
        assertEquals(p.getBalancingPolicy(), BalancingPolicy.Node.INSTANCE);
        assertEquals(p.getThreshold(), 1.0, 0.001);
        conf.setLong(DFSConfigKeys.DFS_BALANCER_GETBLOCKS_MIN_BLOCK_SIZE_KEY, 50);
        final int r = Balancer.run(namenodes, p, conf);
        assertEquals(ExitStatus.NO_MOVE_PROGRESS.getExitCode(), r);
    }
    conf.setLong(DFSConfigKeys.DFS_BALANCER_GETBLOCKS_MIN_BLOCK_SIZE_KEY, 1);
    {
        // run Balancer with empty nodes as source nodes
        final Set<String> sourceNodes = new HashSet<>();
        final List<DataNode> datanodes = cluster.getDataNodes();
        for (int i = capacities.length; i < datanodes.size(); i++) {
            sourceNodes.add(datanodes.get(i).getDisplayName());
        }
        final BalancerParameters p = Balancer.Cli.parse(new String[] { "-policy", BalancingPolicy.Node.INSTANCE.getName(), "-threshold", "1", "-source", StringUtils.join(sourceNodes, ',') });
        assertEquals(p.getBalancingPolicy(), BalancingPolicy.Node.INSTANCE);
        assertEquals(p.getThreshold(), 1.0, 0.001);
        assertEquals(p.getSourceNodes(), sourceNodes);
        conf.setLong(DFSConfigKeys.DFS_BALANCER_GETBLOCKS_MIN_BLOCK_SIZE_KEY, 50);
        final int r = Balancer.run(namenodes, p, conf);
        assertEquals(ExitStatus.NO_MOVE_BLOCK.getExitCode(), r);
    }
    {
        // run Balancer with a filled node as a source node
        final Set<String> sourceNodes = new HashSet<>();
        final List<DataNode> datanodes = cluster.getDataNodes();
        sourceNodes.add(datanodes.get(0).getDisplayName());
        final BalancerParameters p = Balancer.Cli.parse(new String[] { "-policy", BalancingPolicy.Node.INSTANCE.getName(), "-threshold", "1", "-source", StringUtils.join(sourceNodes, ',') });
        assertEquals(p.getBalancingPolicy(), BalancingPolicy.Node.INSTANCE);
        assertEquals(p.getThreshold(), 1.0, 0.001);
        assertEquals(p.getSourceNodes(), sourceNodes);
        conf.setLong(DFSConfigKeys.DFS_BALANCER_GETBLOCKS_MIN_BLOCK_SIZE_KEY, 1);
        final int r = Balancer.run(namenodes, p, conf);
        assertEquals(ExitStatus.NO_MOVE_BLOCK.getExitCode(), r);
    }
    {
        // run Balancer with all filled node as source nodes
        final Set<String> sourceNodes = new HashSet<>();
        final List<DataNode> datanodes = cluster.getDataNodes();
        for (int i = 0; i < capacities.length; i++) {
            sourceNodes.add(datanodes.get(i).getDisplayName());
        }
        final BalancerParameters p = Balancer.Cli.parse(new String[] { "-policy", BalancingPolicy.Node.INSTANCE.getName(), "-threshold", "1", "-source", StringUtils.join(sourceNodes, ',') });
        assertEquals(p.getBalancingPolicy(), BalancingPolicy.Node.INSTANCE);
        assertEquals(p.getThreshold(), 1.0, 0.001);
        assertEquals(p.getSourceNodes(), sourceNodes);
        conf.setLong(DFSConfigKeys.DFS_BALANCER_GETBLOCKS_MIN_BLOCK_SIZE_KEY, 1);
        final int r = Balancer.run(namenodes, p, conf);
        assertEquals(ExitStatus.SUCCESS.getExitCode(), r);
    }
}
Also used : Path(org.apache.hadoop.fs.Path) Set(java.util.Set) HashSet(java.util.HashSet) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) OutputStream(java.io.OutputStream) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) URI(java.net.URI) ArrayList(java.util.ArrayList) List(java.util.List) Test(org.junit.Test)

Example 78 with HdfsConfiguration

use of org.apache.hadoop.hdfs.HdfsConfiguration in project hadoop by apache.

the class TestBalancer method testBalancerDuringUpgrade.

/**
   * Check that the balancer exits when there is an unfinalized upgrade.
   */
@Test(timeout = 300000)
public void testBalancerDuringUpgrade() throws Exception {
    final int SEED = 0xFADED;
    Configuration conf = new HdfsConfiguration();
    conf.setLong(DFS_HEARTBEAT_INTERVAL_KEY, 1);
    conf.setInt(DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 500);
    conf.setLong(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY, 1);
    conf.setLong(DFSConfigKeys.DFS_BALANCER_GETBLOCKS_MIN_BLOCK_SIZE_KEY, 1L);
    final int BLOCK_SIZE = 1024 * 1024;
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).storageCapacities(new long[] { BLOCK_SIZE * 10 }).storageTypes(new StorageType[] { DEFAULT }).storagesPerDatanode(1).build();
    cluster.waitActive();
    // Create a file on the single DN
    final String METHOD_NAME = GenericTestUtils.getMethodName();
    final Path path1 = new Path("/" + METHOD_NAME + ".01.dat");
    DistributedFileSystem fs = cluster.getFileSystem();
    DFSTestUtil.createFile(fs, path1, BLOCK_SIZE, BLOCK_SIZE * 2, BLOCK_SIZE, (short) 1, SEED);
    // Add another DN with the same capacity, cluster is now unbalanced
    cluster.startDataNodes(conf, 1, true, null, null);
    cluster.triggerHeartbeats();
    Collection<URI> namenodes = DFSUtil.getInternalNsRpcUris(conf);
    // Run balancer
    final BalancerParameters p = BalancerParameters.DEFAULT;
    fs.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_ENTER);
    fs.rollingUpgrade(HdfsConstants.RollingUpgradeAction.PREPARE);
    fs.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_LEAVE);
    // Rolling upgrade should abort the balancer
    assertEquals(ExitStatus.UNFINALIZED_UPGRADE.getExitCode(), Balancer.run(namenodes, p, conf));
    // Should work with the -runDuringUpgrade flag.
    BalancerParameters.Builder b = new BalancerParameters.Builder();
    b.setRunDuringUpgrade(true);
    final BalancerParameters runDuringUpgrade = b.build();
    assertEquals(ExitStatus.SUCCESS.getExitCode(), Balancer.run(namenodes, runDuringUpgrade, conf));
    // Finalize the rolling upgrade
    fs.rollingUpgrade(HdfsConstants.RollingUpgradeAction.FINALIZE);
    // Should also work after finalization.
    assertEquals(ExitStatus.SUCCESS.getExitCode(), Balancer.run(namenodes, p, conf));
}
Also used : Path(org.apache.hadoop.fs.Path) StorageType(org.apache.hadoop.fs.StorageType) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) URI(java.net.URI) Test(org.junit.Test)

Example 79 with HdfsConfiguration

use of org.apache.hadoop.hdfs.HdfsConfiguration in project hadoop by apache.

the class TestBlocksWithNotEnoughRacks method getConf.

/*
   * Return a configuration object with low timeouts for testing and 
   * a topology script set (which enables rack awareness).  
   */
private Configuration getConf() {
    Configuration conf = new HdfsConfiguration();
    // Lower the heart beat interval so the NN quickly learns of dead
    // or decommissioned DNs and the NN issues replication and invalidation
    // commands quickly (as replies to heartbeats)
    conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1L);
    // Have the NN RedundancyMonitor compute the reconstruction and
    // invalidation commands to send DNs every second.
    conf.setInt(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY, 1);
    // Have the NN check for pending replications every second so it
    // quickly schedules additional replicas as they are identified.
    conf.setInt(DFSConfigKeys.DFS_NAMENODE_RECONSTRUCTION_PENDING_TIMEOUT_SEC_KEY, 1);
    // The DNs report blocks every second.
    conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000L);
    // Indicates we have multiple racks
    conf.set(DFSConfigKeys.NET_TOPOLOGY_SCRIPT_FILE_NAME_KEY, "xyz");
    return conf;
}
Also used : HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration)

Example 80 with HdfsConfiguration

use of org.apache.hadoop.hdfs.HdfsConfiguration in project hadoop by apache.

the class TestBlockStatsMXBean method setup.

@Before
public void setup() throws IOException {
    HdfsConfiguration conf = new HdfsConfiguration();
    conf.setTimeDuration(DFSConfigKeys.DFS_DATANODE_DISK_CHECK_MIN_GAP_KEY, 0, TimeUnit.MILLISECONDS);
    cluster = null;
    StorageType[][] types = new StorageType[6][];
    for (int i = 0; i < 3; i++) {
        types[i] = new StorageType[] { StorageType.RAM_DISK, StorageType.DISK };
    }
    for (int i = 3; i < 5; i++) {
        types[i] = new StorageType[] { StorageType.RAM_DISK, StorageType.ARCHIVE };
    }
    types[5] = new StorageType[] { StorageType.RAM_DISK, StorageType.ARCHIVE, StorageType.ARCHIVE };
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(6).storageTypes(types).storagesPerDatanode(3).build();
    cluster.waitActive();
}
Also used : StorageType(org.apache.hadoop.fs.StorageType) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) Before(org.junit.Before)

Aggregations

HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)454 Configuration (org.apache.hadoop.conf.Configuration)311 Test (org.junit.Test)311 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)267 Path (org.apache.hadoop.fs.Path)152 FileSystem (org.apache.hadoop.fs.FileSystem)94 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)92 File (java.io.File)72 IOException (java.io.IOException)69 Before (org.junit.Before)56 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)40 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)35 MetricsRecordBuilder (org.apache.hadoop.metrics2.MetricsRecordBuilder)33 DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)30 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)27 RandomAccessFile (java.io.RandomAccessFile)22 ArrayList (java.util.ArrayList)20 NameNodeFile (org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeFile)20 URI (java.net.URI)19 FsPermission (org.apache.hadoop.fs.permission.FsPermission)19