use of org.apache.hadoop.hdfs.HdfsConfiguration in project hadoop by apache.
the class TestBalancer method testUpgradeDomainPolicyAfterBalance.
/**
* Verify balancer won't violate upgrade domain block placement policy.
* @throws Exception
*/
@Test(timeout = 100000)
public void testUpgradeDomainPolicyAfterBalance() throws Exception {
final Configuration conf = new HdfsConfiguration();
initConf(conf);
conf.setClass(DFSConfigKeys.DFS_BLOCK_REPLICATOR_CLASSNAME_KEY, BlockPlacementPolicyWithUpgradeDomain.class, BlockPlacementPolicy.class);
long[] capacities = new long[] { CAPACITY, CAPACITY, CAPACITY };
String[] hosts = { "host0", "host1", "host2" };
String[] racks = { RACK0, RACK1, RACK1 };
String[] UDs = { "ud0", "ud1", "ud2" };
runBalancerAndVerifyBlockPlacmentPolicy(conf, capacities, hosts, racks, UDs, CAPACITY, "host3", RACK2, "ud2");
}
use of org.apache.hadoop.hdfs.HdfsConfiguration in project hadoop by apache.
the class TestBalancer method testMinBlockSizeAndSourceNodes.
/** Balancer should not move blocks with size < minBlockSize. */
@Test(timeout = 60000)
public void testMinBlockSizeAndSourceNodes() throws Exception {
final Configuration conf = new HdfsConfiguration();
initConf(conf);
final short replication = 3;
final long[] lengths = { 10, 10, 10, 10 };
final long[] capacities = new long[replication];
final long totalUsed = capacities.length * sum(lengths);
Arrays.fill(capacities, 1000);
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(capacities.length).simulatedCapacities(capacities).build();
final DistributedFileSystem dfs = cluster.getFileSystem();
cluster.waitActive();
client = NameNodeProxies.createProxy(conf, dfs.getUri(), ClientProtocol.class).getProxy();
// fill up the cluster to be 80% full
for (int i = 0; i < lengths.length; i++) {
final long size = lengths[i];
final Path p = new Path("/file" + i + "_size" + size);
try (final OutputStream out = dfs.create(p)) {
for (int j = 0; j < size; j++) {
out.write(j);
}
}
}
// start up an empty node with the same capacity
cluster.startDataNodes(conf, capacities.length, true, null, null, capacities);
LOG.info("capacities = " + Arrays.toString(capacities));
LOG.info("totalUsedSpace= " + totalUsed);
LOG.info("lengths = " + Arrays.toString(lengths) + ", #=" + lengths.length);
waitForHeartBeat(totalUsed, 2 * capacities[0] * capacities.length, client, cluster);
final Collection<URI> namenodes = DFSUtil.getInternalNsRpcUris(conf);
{
// run Balancer with min-block-size=50
final BalancerParameters p = Balancer.Cli.parse(new String[] { "-policy", BalancingPolicy.Node.INSTANCE.getName(), "-threshold", "1" });
assertEquals(p.getBalancingPolicy(), BalancingPolicy.Node.INSTANCE);
assertEquals(p.getThreshold(), 1.0, 0.001);
conf.setLong(DFSConfigKeys.DFS_BALANCER_GETBLOCKS_MIN_BLOCK_SIZE_KEY, 50);
final int r = Balancer.run(namenodes, p, conf);
assertEquals(ExitStatus.NO_MOVE_PROGRESS.getExitCode(), r);
}
conf.setLong(DFSConfigKeys.DFS_BALANCER_GETBLOCKS_MIN_BLOCK_SIZE_KEY, 1);
{
// run Balancer with empty nodes as source nodes
final Set<String> sourceNodes = new HashSet<>();
final List<DataNode> datanodes = cluster.getDataNodes();
for (int i = capacities.length; i < datanodes.size(); i++) {
sourceNodes.add(datanodes.get(i).getDisplayName());
}
final BalancerParameters p = Balancer.Cli.parse(new String[] { "-policy", BalancingPolicy.Node.INSTANCE.getName(), "-threshold", "1", "-source", StringUtils.join(sourceNodes, ',') });
assertEquals(p.getBalancingPolicy(), BalancingPolicy.Node.INSTANCE);
assertEquals(p.getThreshold(), 1.0, 0.001);
assertEquals(p.getSourceNodes(), sourceNodes);
conf.setLong(DFSConfigKeys.DFS_BALANCER_GETBLOCKS_MIN_BLOCK_SIZE_KEY, 50);
final int r = Balancer.run(namenodes, p, conf);
assertEquals(ExitStatus.NO_MOVE_BLOCK.getExitCode(), r);
}
{
// run Balancer with a filled node as a source node
final Set<String> sourceNodes = new HashSet<>();
final List<DataNode> datanodes = cluster.getDataNodes();
sourceNodes.add(datanodes.get(0).getDisplayName());
final BalancerParameters p = Balancer.Cli.parse(new String[] { "-policy", BalancingPolicy.Node.INSTANCE.getName(), "-threshold", "1", "-source", StringUtils.join(sourceNodes, ',') });
assertEquals(p.getBalancingPolicy(), BalancingPolicy.Node.INSTANCE);
assertEquals(p.getThreshold(), 1.0, 0.001);
assertEquals(p.getSourceNodes(), sourceNodes);
conf.setLong(DFSConfigKeys.DFS_BALANCER_GETBLOCKS_MIN_BLOCK_SIZE_KEY, 1);
final int r = Balancer.run(namenodes, p, conf);
assertEquals(ExitStatus.NO_MOVE_BLOCK.getExitCode(), r);
}
{
// run Balancer with all filled node as source nodes
final Set<String> sourceNodes = new HashSet<>();
final List<DataNode> datanodes = cluster.getDataNodes();
for (int i = 0; i < capacities.length; i++) {
sourceNodes.add(datanodes.get(i).getDisplayName());
}
final BalancerParameters p = Balancer.Cli.parse(new String[] { "-policy", BalancingPolicy.Node.INSTANCE.getName(), "-threshold", "1", "-source", StringUtils.join(sourceNodes, ',') });
assertEquals(p.getBalancingPolicy(), BalancingPolicy.Node.INSTANCE);
assertEquals(p.getThreshold(), 1.0, 0.001);
assertEquals(p.getSourceNodes(), sourceNodes);
conf.setLong(DFSConfigKeys.DFS_BALANCER_GETBLOCKS_MIN_BLOCK_SIZE_KEY, 1);
final int r = Balancer.run(namenodes, p, conf);
assertEquals(ExitStatus.SUCCESS.getExitCode(), r);
}
}
use of org.apache.hadoop.hdfs.HdfsConfiguration in project hadoop by apache.
the class TestBalancer method testBalancerDuringUpgrade.
/**
* Check that the balancer exits when there is an unfinalized upgrade.
*/
@Test(timeout = 300000)
public void testBalancerDuringUpgrade() throws Exception {
final int SEED = 0xFADED;
Configuration conf = new HdfsConfiguration();
conf.setLong(DFS_HEARTBEAT_INTERVAL_KEY, 1);
conf.setInt(DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 500);
conf.setLong(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY, 1);
conf.setLong(DFSConfigKeys.DFS_BALANCER_GETBLOCKS_MIN_BLOCK_SIZE_KEY, 1L);
final int BLOCK_SIZE = 1024 * 1024;
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).storageCapacities(new long[] { BLOCK_SIZE * 10 }).storageTypes(new StorageType[] { DEFAULT }).storagesPerDatanode(1).build();
cluster.waitActive();
// Create a file on the single DN
final String METHOD_NAME = GenericTestUtils.getMethodName();
final Path path1 = new Path("/" + METHOD_NAME + ".01.dat");
DistributedFileSystem fs = cluster.getFileSystem();
DFSTestUtil.createFile(fs, path1, BLOCK_SIZE, BLOCK_SIZE * 2, BLOCK_SIZE, (short) 1, SEED);
// Add another DN with the same capacity, cluster is now unbalanced
cluster.startDataNodes(conf, 1, true, null, null);
cluster.triggerHeartbeats();
Collection<URI> namenodes = DFSUtil.getInternalNsRpcUris(conf);
// Run balancer
final BalancerParameters p = BalancerParameters.DEFAULT;
fs.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_ENTER);
fs.rollingUpgrade(HdfsConstants.RollingUpgradeAction.PREPARE);
fs.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_LEAVE);
// Rolling upgrade should abort the balancer
assertEquals(ExitStatus.UNFINALIZED_UPGRADE.getExitCode(), Balancer.run(namenodes, p, conf));
// Should work with the -runDuringUpgrade flag.
BalancerParameters.Builder b = new BalancerParameters.Builder();
b.setRunDuringUpgrade(true);
final BalancerParameters runDuringUpgrade = b.build();
assertEquals(ExitStatus.SUCCESS.getExitCode(), Balancer.run(namenodes, runDuringUpgrade, conf));
// Finalize the rolling upgrade
fs.rollingUpgrade(HdfsConstants.RollingUpgradeAction.FINALIZE);
// Should also work after finalization.
assertEquals(ExitStatus.SUCCESS.getExitCode(), Balancer.run(namenodes, p, conf));
}
use of org.apache.hadoop.hdfs.HdfsConfiguration in project hadoop by apache.
the class TestBlocksWithNotEnoughRacks method getConf.
/*
* Return a configuration object with low timeouts for testing and
* a topology script set (which enables rack awareness).
*/
private Configuration getConf() {
Configuration conf = new HdfsConfiguration();
// Lower the heart beat interval so the NN quickly learns of dead
// or decommissioned DNs and the NN issues replication and invalidation
// commands quickly (as replies to heartbeats)
conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1L);
// Have the NN RedundancyMonitor compute the reconstruction and
// invalidation commands to send DNs every second.
conf.setInt(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY, 1);
// Have the NN check for pending replications every second so it
// quickly schedules additional replicas as they are identified.
conf.setInt(DFSConfigKeys.DFS_NAMENODE_RECONSTRUCTION_PENDING_TIMEOUT_SEC_KEY, 1);
// The DNs report blocks every second.
conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000L);
// Indicates we have multiple racks
conf.set(DFSConfigKeys.NET_TOPOLOGY_SCRIPT_FILE_NAME_KEY, "xyz");
return conf;
}
use of org.apache.hadoop.hdfs.HdfsConfiguration in project hadoop by apache.
the class TestBlockStatsMXBean method setup.
@Before
public void setup() throws IOException {
HdfsConfiguration conf = new HdfsConfiguration();
conf.setTimeDuration(DFSConfigKeys.DFS_DATANODE_DISK_CHECK_MIN_GAP_KEY, 0, TimeUnit.MILLISECONDS);
cluster = null;
StorageType[][] types = new StorageType[6][];
for (int i = 0; i < 3; i++) {
types[i] = new StorageType[] { StorageType.RAM_DISK, StorageType.DISK };
}
for (int i = 3; i < 5; i++) {
types[i] = new StorageType[] { StorageType.RAM_DISK, StorageType.ARCHIVE };
}
types[5] = new StorageType[] { StorageType.RAM_DISK, StorageType.ARCHIVE, StorageType.ARCHIVE };
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(6).storageTypes(types).storagesPerDatanode(3).build();
cluster.waitActive();
}
Aggregations