use of org.apache.hadoop.hdfs.HdfsConfiguration in project hadoop by apache.
the class TestBalancer method testBalancerWithKeytabs.
/**
* Test Balancer runs fine when logging in with a keytab in kerberized env.
* Reusing testUnknownDatanode here for basic functionality testing.
*/
@Test(timeout = 300000)
public void testBalancerWithKeytabs() throws Exception {
final Configuration conf = new HdfsConfiguration();
try {
initSecureConf(conf);
final UserGroupInformation ugi = UserGroupInformation.loginUserFromKeytabAndReturnUGI(principal, keytabFile.getAbsolutePath());
ugi.doAs(new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
// verify that balancer runs Ok.
testUnknownDatanode(conf);
// verify that UGI was logged in using keytab.
assertTrue(UserGroupInformation.isLoginKeytabBased());
return null;
}
});
} finally {
// Reset UGI so that other tests are not affected.
UserGroupInformation.reset();
UserGroupInformation.setConfiguration(new Configuration());
}
}
use of org.apache.hadoop.hdfs.HdfsConfiguration in project hadoop by apache.
the class TestBalancer method testUnknownDatanodeSimple.
@Test(timeout = 100000)
public void testUnknownDatanodeSimple() throws Exception {
Configuration conf = new HdfsConfiguration();
initConf(conf);
testUnknownDatanode(conf);
}
use of org.apache.hadoop.hdfs.HdfsConfiguration in project hadoop by apache.
the class TestBalancerWithHANameNodes method testBalancerWithHANameNodes.
/**
* Test a cluster with even distribution, then a new empty node is added to
* the cluster. Test start a cluster with specified number of nodes, and fills
* it to be 30% full (with a single file replicated identically to all
* datanodes); It then adds one new empty node and starts balancing.
*/
@Test(timeout = 60000)
public void testBalancerWithHANameNodes() throws Exception {
Configuration conf = new HdfsConfiguration();
TestBalancer.initConf(conf);
// new node's capacity
long newNodeCapacity = TestBalancer.CAPACITY;
// new node's rack
String newNodeRack = TestBalancer.RACK2;
// array of racks for original nodes in cluster
String[] racks = new String[] { TestBalancer.RACK0, TestBalancer.RACK1 };
// array of capacities of original nodes in cluster
long[] capacities = new long[] { TestBalancer.CAPACITY, TestBalancer.CAPACITY };
assertEquals(capacities.length, racks.length);
int numOfDatanodes = capacities.length;
NNConf nn1Conf = new MiniDFSNNTopology.NNConf("nn1");
nn1Conf.setIpcPort(HdfsClientConfigKeys.DFS_NAMENODE_RPC_PORT_DEFAULT);
Configuration copiedConf = new Configuration(conf);
cluster = new MiniDFSCluster.Builder(copiedConf).nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(capacities.length).racks(racks).simulatedCapacities(capacities).build();
HATestUtil.setFailoverConfigurations(cluster, conf);
try {
cluster.waitActive();
cluster.transitionToActive(1);
Thread.sleep(500);
client = NameNodeProxies.createProxy(conf, FileSystem.getDefaultUri(conf), ClientProtocol.class).getProxy();
long totalCapacity = TestBalancer.sum(capacities);
// fill up the cluster to be 30% full
long totalUsedSpace = totalCapacity * 3 / 10;
TestBalancer.createFile(cluster, TestBalancer.filePath, totalUsedSpace / numOfDatanodes, (short) numOfDatanodes, 1);
// start up an empty node with the same capacity and on the same rack
cluster.startDataNodes(conf, 1, true, null, new String[] { newNodeRack }, new long[] { newNodeCapacity });
totalCapacity += newNodeCapacity;
TestBalancer.waitForHeartBeat(totalUsedSpace, totalCapacity, client, cluster);
Collection<URI> namenodes = DFSUtil.getInternalNsRpcUris(conf);
assertEquals(1, namenodes.size());
assertTrue(namenodes.contains(HATestUtil.getLogicalUri(cluster)));
final int r = Balancer.run(namenodes, BalancerParameters.DEFAULT, conf);
assertEquals(ExitStatus.SUCCESS.getExitCode(), r);
TestBalancer.waitForBalancer(totalUsedSpace, totalCapacity, client, cluster, BalancerParameters.DEFAULT);
} finally {
cluster.shutdown();
}
}
use of org.apache.hadoop.hdfs.HdfsConfiguration in project hadoop by apache.
the class TestNameNodePrunesMissingStorages method testNameNodePrunesUnreportedStorages.
@Test(timeout = 300000)
public void testNameNodePrunesUnreportedStorages() throws Exception {
Configuration conf = new HdfsConfiguration();
// Create a cluster with one datanode with two storages
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).storagesPerDatanode(2).build();
// Create two files to ensure each storage has a block
DFSTestUtil.createFile(cluster.getFileSystem(), new Path("file1"), 102400, 102400, 102400, (short) 1, 0x1BAD5EE);
DFSTestUtil.createFile(cluster.getFileSystem(), new Path("file2"), 102400, 102400, 102400, (short) 1, 0x1BAD5EED);
// Get the datanode storages and data directories
DataNode dn = cluster.getDataNodes().get(0);
BlockManager bm = cluster.getNameNode().getNamesystem().getBlockManager();
DatanodeDescriptor dnDescriptor = bm.getDatanodeManager().getDatanode(cluster.getDataNodes().get(0).getDatanodeUuid());
DatanodeStorageInfo[] dnStoragesInfosBeforeRestart = dnDescriptor.getStorageInfos();
Collection<String> oldDirs = new ArrayList<String>(dn.getConf().getTrimmedStringCollection(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY));
// Keep the first data directory and remove the second.
String newDirs = oldDirs.iterator().next();
conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, newDirs);
// Restart the datanode with the new conf
cluster.stopDataNode(0);
cluster.startDataNodes(conf, 1, false, null, null);
dn = cluster.getDataNodes().get(0);
cluster.waitActive();
// Assert that the dnDescriptor has both the storages after restart
assertArrayEquals(dnStoragesInfosBeforeRestart, dnDescriptor.getStorageInfos());
// Assert that the removed storage is marked as FAILED
// when DN heartbeats to the NN
int numFailedStoragesWithBlocks = 0;
DatanodeStorageInfo failedStorageInfo = null;
for (DatanodeStorageInfo dnStorageInfo : dnDescriptor.getStorageInfos()) {
if (dnStorageInfo.areBlocksOnFailedStorage()) {
numFailedStoragesWithBlocks++;
failedStorageInfo = dnStorageInfo;
}
}
assertEquals(1, numFailedStoragesWithBlocks);
// Heartbeat manager removes the blocks associated with this failed storage
bm.getDatanodeManager().getHeartbeatManager().heartbeatCheck();
assertTrue(!failedStorageInfo.areBlocksOnFailedStorage());
// pruneStorageMap removes the unreported storage
cluster.triggerHeartbeats();
// Assert that the unreported storage is pruned
assertEquals(DataNode.getStorageLocations(dn.getConf()).size(), dnDescriptor.getStorageInfos().length);
}
use of org.apache.hadoop.hdfs.HdfsConfiguration in project hadoop by apache.
the class TestOverReplicatedBlocks method testInvalidateOverReplicatedBlock.
/**
* Test over replicated block should get invalidated when decreasing the
* replication for a partial block.
*/
@Test
public void testInvalidateOverReplicatedBlock() throws Exception {
Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
try {
final FSNamesystem namesystem = cluster.getNamesystem();
final BlockManager bm = namesystem.getBlockManager();
FileSystem fs = cluster.getFileSystem();
Path p = new Path(MiniDFSCluster.getBaseDirectory(), "/foo1");
FSDataOutputStream out = fs.create(p, (short) 2);
out.writeBytes("HDFS-3119: " + p);
out.hsync();
fs.setReplication(p, (short) 1);
out.close();
ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, p);
assertEquals("Expected only one live replica for the block", 1, bm.countNodes(bm.getStoredBlock(block.getLocalBlock())).liveReplicas());
} finally {
cluster.shutdown();
}
}
Aggregations