use of org.apache.hadoop.hdfs.HdfsConfiguration in project hadoop by apache.
the class TestDiskBalancer method testBalanceDataBetweenMultiplePairsOfVolumes.
@Test
public void testBalanceDataBetweenMultiplePairsOfVolumes() throws Exception {
Configuration conf = new HdfsConfiguration();
conf.setBoolean(DFSConfigKeys.DFS_DISK_BALANCER_ENABLED, true);
final int blockCount = 1000;
final int blockSize = 1024;
// create 3 disks, that means we will have 2 plans
// Move Data from disk0->disk1 and disk0->disk2.
final int diskCount = 3;
final int dataNodeCount = 1;
final int dataNodeIndex = 0;
final int sourceDiskIndex = 0;
final long cap = blockSize * 2L * blockCount;
MiniDFSCluster cluster = new ClusterBuilder().setBlockCount(blockCount).setBlockSize(blockSize).setDiskCount(diskCount).setNumDatanodes(dataNodeCount).setConf(conf).setCapacities(new long[] { cap, cap, cap }).build();
try {
DataMover dataMover = new DataMover(cluster, dataNodeIndex, sourceDiskIndex, conf, blockSize, blockCount);
dataMover.moveDataToSourceDisk();
NodePlan plan = dataMover.generatePlan();
// 3 disks , The plan should move data both disks,
// so we must have 2 plan steps.
assertEquals(plan.getVolumeSetPlans().size(), 2);
dataMover.executePlan(plan);
dataMover.verifyPlanExectionDone();
dataMover.verifyAllVolumesHaveData();
dataMover.verifyTolerance(plan, 0, sourceDiskIndex, 10);
} finally {
cluster.shutdown();
}
}
use of org.apache.hadoop.hdfs.HdfsConfiguration in project hadoop by apache.
the class TestDiskBalancerRPC method setUp.
@Before
public void setUp() throws Exception {
conf = new HdfsConfiguration();
conf.setBoolean(DFSConfigKeys.DFS_DISK_BALANCER_ENABLED, true);
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
cluster.waitActive();
}
use of org.apache.hadoop.hdfs.HdfsConfiguration in project hadoop by apache.
the class TestDiskBalancerRPC method testMoveBlockAcrossVolume.
@Test
public void testMoveBlockAcrossVolume() throws Exception {
Configuration conf = new HdfsConfiguration();
final int defaultBlockSize = 100;
conf.setBoolean(DFSConfigKeys.DFS_DISK_BALANCER_ENABLED, true);
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, defaultBlockSize);
conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, defaultBlockSize);
String fileName = "/tmp.txt";
Path filePath = new Path(fileName);
final int numDatanodes = 1;
final int dnIndex = 0;
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).build();
FsVolumeImpl source = null;
FsVolumeImpl dest = null;
try {
cluster.waitActive();
Random r = new Random();
FileSystem fs = cluster.getFileSystem(dnIndex);
DFSTestUtil.createFile(fs, filePath, 10 * 1024, (short) 1, r.nextLong());
DataNode dnNode = cluster.getDataNodes().get(dnIndex);
FsDatasetSpi.FsVolumeReferences refs = dnNode.getFSDataset().getFsVolumeReferences();
try {
source = (FsVolumeImpl) refs.get(0);
dest = (FsVolumeImpl) refs.get(1);
DiskBalancerTestUtil.moveAllDataToDestVolume(dnNode.getFSDataset(), source, dest);
assertTrue(DiskBalancerTestUtil.getBlockCount(source) == 0);
} finally {
refs.close();
}
} finally {
cluster.shutdown();
}
}
use of org.apache.hadoop.hdfs.HdfsConfiguration in project hadoop by apache.
the class TestDiskBalancerWithMockMover method testDiskBalancerEnabled.
/**
* Checks that Enable flag works correctly.
*
* @throws DiskBalancerException
*/
@Test
public void testDiskBalancerEnabled() throws DiskBalancerException {
Configuration conf = new HdfsConfiguration();
conf.setBoolean(DFSConfigKeys.DFS_DISK_BALANCER_ENABLED, true);
TestMover blockMover = new TestMover(cluster.getDataNodes().get(0).getFSDataset());
DiskBalancer balancer = new DiskBalancerBuilder(conf).setMover(blockMover).build();
DiskBalancerWorkStatus status = balancer.queryWorkStatus();
assertEquals(NO_PLAN, status.getResult());
}
use of org.apache.hadoop.hdfs.HdfsConfiguration in project hadoop by apache.
the class TestDiskBalancerCommand method testDiskBalancerQueryWithoutSubmit.
/**
* Making sure that we can query the node without having done a submit.
* @throws Exception
*/
@Test
public void testDiskBalancerQueryWithoutSubmit() throws Exception {
Configuration conf = new HdfsConfiguration();
conf.setBoolean(DFSConfigKeys.DFS_DISK_BALANCER_ENABLED, true);
final int numDatanodes = 2;
MiniDFSCluster miniDFSCluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).build();
try {
miniDFSCluster.waitActive();
DataNode dataNode = miniDFSCluster.getDataNodes().get(0);
final String queryArg = String.format("-query localhost:%d", dataNode.getIpcPort());
final String cmdLine = String.format("hdfs diskbalancer %s", queryArg);
runCommand(cmdLine);
} finally {
miniDFSCluster.shutdown();
}
}
Aggregations