use of org.apache.hadoop.hdfs.server.datanode.DataNode in project hadoop by apache.
the class TestBalancerBandwidth method testBalancerBandwidth.
@Test
public void testBalancerBandwidth() throws Exception {
/* Set bandwidthPerSec to a low value of 1M bps. */
conf.setLong(DFSConfigKeys.DFS_DATANODE_BALANCE_BANDWIDTHPERSEC_KEY, DEFAULT_BANDWIDTH);
/* Create and start cluster */
try (MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(NUM_OF_DATANODES).build()) {
cluster.waitActive();
DistributedFileSystem fs = cluster.getFileSystem();
ArrayList<DataNode> datanodes = cluster.getDataNodes();
// Ensure value from the configuration is reflected in the datanodes.
assertEquals(DEFAULT_BANDWIDTH, (long) datanodes.get(0).getBalancerBandwidth());
assertEquals(DEFAULT_BANDWIDTH, (long) datanodes.get(1).getBalancerBandwidth());
DFSAdmin admin = new DFSAdmin(conf);
String dn1Address = datanodes.get(0).ipcServer.getListenerAddress().getHostName() + ":" + datanodes.get(0).getIpcPort();
String dn2Address = datanodes.get(1).ipcServer.getListenerAddress().getHostName() + ":" + datanodes.get(1).getIpcPort();
// verifies the dfsadmin command execution
String[] args = new String[] { "-getBalancerBandwidth", dn1Address };
runGetBalancerBandwidthCmd(admin, args, DEFAULT_BANDWIDTH);
args = new String[] { "-getBalancerBandwidth", dn2Address };
runGetBalancerBandwidthCmd(admin, args, DEFAULT_BANDWIDTH);
// Dynamically change balancer bandwidth and ensure the updated value
// is reflected on the datanodes.
// 12M bps
long newBandwidth = 12 * DEFAULT_BANDWIDTH;
fs.setBalancerBandwidth(newBandwidth);
verifyBalancerBandwidth(datanodes, newBandwidth);
// verifies the dfsadmin command execution
args = new String[] { "-getBalancerBandwidth", dn1Address };
runGetBalancerBandwidthCmd(admin, args, newBandwidth);
args = new String[] { "-getBalancerBandwidth", dn2Address };
runGetBalancerBandwidthCmd(admin, args, newBandwidth);
// Dynamically change balancer bandwidth to 0. Balancer bandwidth on the
// datanodes should remain as it was.
fs.setBalancerBandwidth(0);
verifyBalancerBandwidth(datanodes, newBandwidth);
// verifies the dfsadmin command execution
args = new String[] { "-getBalancerBandwidth", dn1Address };
runGetBalancerBandwidthCmd(admin, args, newBandwidth);
args = new String[] { "-getBalancerBandwidth", dn2Address };
runGetBalancerBandwidthCmd(admin, args, newBandwidth);
}
}
use of org.apache.hadoop.hdfs.server.datanode.DataNode in project hadoop by apache.
the class TestBlockStoragePolicy method testChangeFileRep.
private void testChangeFileRep(String policyName, byte policyId, StorageType[] before, StorageType[] after) throws Exception {
final int numDataNodes = 5;
final StorageType[][] types = genStorageTypes(numDataNodes);
final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDataNodes).storageTypes(types).build();
cluster.waitActive();
final DistributedFileSystem fs = cluster.getFileSystem();
try {
final Path dir = new Path("/test");
fs.mkdirs(dir);
fs.setStoragePolicy(dir, policyName);
final Path foo = new Path(dir, "foo");
DFSTestUtil.createFile(fs, foo, FILE_LEN, REPLICATION, 0L);
HdfsFileStatus[] status = fs.getClient().listPaths(foo.toString(), HdfsFileStatus.EMPTY_NAME, true).getPartialListing();
checkDirectoryListing(status, policyId);
HdfsLocatedFileStatus fooStatus = (HdfsLocatedFileStatus) status[0];
checkLocatedBlocks(fooStatus, 1, 3, before);
// change the replication factor to 5
fs.setReplication(foo, (short) numDataNodes);
Thread.sleep(1000);
for (DataNode dn : cluster.getDataNodes()) {
DataNodeTestUtils.triggerHeartbeat(dn);
}
Thread.sleep(1000);
status = fs.getClient().listPaths(foo.toString(), HdfsFileStatus.EMPTY_NAME, true).getPartialListing();
checkDirectoryListing(status, policyId);
fooStatus = (HdfsLocatedFileStatus) status[0];
checkLocatedBlocks(fooStatus, 1, numDataNodes, after);
// change the replication factor back to 3
fs.setReplication(foo, REPLICATION);
Thread.sleep(1000);
for (DataNode dn : cluster.getDataNodes()) {
DataNodeTestUtils.triggerHeartbeat(dn);
}
Thread.sleep(1000);
for (DataNode dn : cluster.getDataNodes()) {
DataNodeTestUtils.triggerBlockReport(dn);
}
Thread.sleep(1000);
status = fs.getClient().listPaths(foo.toString(), HdfsFileStatus.EMPTY_NAME, true).getPartialListing();
checkDirectoryListing(status, policyId);
fooStatus = (HdfsLocatedFileStatus) status[0];
checkLocatedBlocks(fooStatus, 1, REPLICATION, before);
} finally {
cluster.shutdown();
}
}
use of org.apache.hadoop.hdfs.server.datanode.DataNode in project hadoop by apache.
the class TestCrcCorruption method thistest.
/**
* check if DFS can handle corrupted CRC blocks
*/
private void thistest(Configuration conf, DFSTestUtil util) throws Exception {
MiniDFSCluster cluster = null;
int numDataNodes = 2;
short replFactor = 2;
Random random = new Random();
// Set short retry timeouts so this test runs faster
conf.setInt(HdfsClientConfigKeys.Retry.WINDOW_BASE_KEY, 10);
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDataNodes).build();
cluster.waitActive();
FileSystem fs = cluster.getFileSystem();
util.createFiles(fs, "/srcdat", replFactor);
util.waitReplication(fs, "/srcdat", (short) 2);
// Now deliberately remove/truncate meta blocks from the first
// directory of the first datanode. The complete absense of a meta
// file disallows this Datanode to send data to another datanode.
// However, a client is alowed access to this block.
//
final int dnIdx = 0;
final DataNode dn = cluster.getDataNodes().get(dnIdx);
final String bpid = cluster.getNamesystem().getBlockPoolId();
List<ReplicaInfo> replicas = dn.getFSDataset().getFinalizedBlocks(bpid);
assertTrue("Replicas do not exist", !replicas.isEmpty());
for (int idx = 0; idx < replicas.size(); idx++) {
ReplicaInfo replica = replicas.get(idx);
ExtendedBlock eb = new ExtendedBlock(bpid, replica);
if (idx % 3 == 0) {
LOG.info("Deliberately removing meta for block " + eb);
cluster.deleteMeta(dnIdx, eb);
} else if (idx % 3 == 1) {
// bytes
final int newSize = 2;
LOG.info("Deliberately truncating meta file for block " + eb + " to size " + newSize + " bytes.");
cluster.truncateMeta(dnIdx, eb, newSize);
} else {
cluster.corruptMeta(dnIdx, eb);
}
}
//
// Only one replica is possibly corrupted. The other replica should still
// be good. Verify.
//
assertTrue("Corrupted replicas not handled properly.", util.checkFiles(fs, "/srcdat"));
LOG.info("All File still have a valid replica");
//
// set replication factor back to 1. This causes only one replica of
// of each block to remain in HDFS. The check is to make sure that
// the corrupted replica generated above is the one that gets deleted.
// This test is currently disabled until HADOOP-1557 is solved.
//
util.setReplication(fs, "/srcdat", (short) 1);
//util.waitReplication(fs, "/srcdat", (short)1);
//System.out.println("All Files done with removing replicas");
//assertTrue("Excess replicas deleted. Corrupted replicas found.",
// util.checkFiles(fs, "/srcdat"));
LOG.info("The excess-corrupted-replica test is disabled " + " pending HADOOP-1557");
util.cleanup(fs, "/srcdat");
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
use of org.apache.hadoop.hdfs.server.datanode.DataNode in project hadoop by apache.
the class TestDFSAddressConfig method testDFSAddressConfig.
@Test
public void testDFSAddressConfig() throws IOException {
Configuration conf = new HdfsConfiguration();
/*-------------------------------------------------------------------------
* By default, the DataNode socket address should be localhost (127.0.0.1).
*------------------------------------------------------------------------*/
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
cluster.waitActive();
ArrayList<DataNode> dns = cluster.getDataNodes();
DataNode dn = dns.get(0);
String selfSocketAddr = dn.getXferAddress().toString();
System.out.println("DN Self Socket Addr == " + selfSocketAddr);
assertTrue(selfSocketAddr.contains("/127.0.0.1:"));
/*-------------------------------------------------------------------------
* Shut down the datanodes, reconfigure, and bring them back up.
* Even if told to use the configuration properties for dfs.datanode,
* MiniDFSCluster.startDataNodes() should use localhost as the default if
* the dfs.datanode properties are not set.
*------------------------------------------------------------------------*/
for (int i = 0; i < dns.size(); i++) {
DataNodeProperties dnp = cluster.stopDataNode(i);
assertNotNull("Should have been able to stop simulated datanode", dnp);
}
conf.unset(DFS_DATANODE_ADDRESS_KEY);
conf.unset(DFS_DATANODE_HTTP_ADDRESS_KEY);
conf.unset(DFS_DATANODE_IPC_ADDRESS_KEY);
cluster.startDataNodes(conf, 1, true, StartupOption.REGULAR, null, null, null, false, true);
dns = cluster.getDataNodes();
dn = dns.get(0);
selfSocketAddr = dn.getXferAddress().toString();
System.out.println("DN Self Socket Addr == " + selfSocketAddr);
// assert that default self socket address is 127.0.0.1
assertTrue(selfSocketAddr.contains("/127.0.0.1:"));
/*-------------------------------------------------------------------------
* Shut down the datanodes, reconfigure, and bring them back up.
* This time, modify the dfs.datanode properties and make sure that they
* are used to configure sockets by MiniDFSCluster.startDataNodes().
*------------------------------------------------------------------------*/
for (int i = 0; i < dns.size(); i++) {
DataNodeProperties dnp = cluster.stopDataNode(i);
assertNotNull("Should have been able to stop simulated datanode", dnp);
}
conf.set(DFS_DATANODE_ADDRESS_KEY, "0.0.0.0:0");
conf.set(DFS_DATANODE_HTTP_ADDRESS_KEY, "0.0.0.0:0");
conf.set(DFS_DATANODE_IPC_ADDRESS_KEY, "0.0.0.0:0");
cluster.startDataNodes(conf, 1, true, StartupOption.REGULAR, null, null, null, false, true);
dns = cluster.getDataNodes();
dn = dns.get(0);
selfSocketAddr = dn.getXferAddress().toString();
System.out.println("DN Self Socket Addr == " + selfSocketAddr);
// assert that default self socket address is 0.0.0.0
assertTrue(selfSocketAddr.contains("/0.0.0.0:"));
cluster.shutdown();
}
use of org.apache.hadoop.hdfs.server.datanode.DataNode in project hadoop by apache.
the class TestEncryptedTransfer method testLongLivedClientPipelineRecovery.
@Test
public void testLongLivedClientPipelineRecovery() throws IOException, InterruptedException, TimeoutException {
if (resolverClazz != null) {
// TestTrustedChannelResolver does not use encryption keys.
return;
}
// use 4 datanodes to make sure that after 1 data node is stopped,
// client only retries establishing pipeline with the 4th node.
int numDataNodes = 4;
// do not consider load factor when selecting a data node
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_CONSIDERLOAD_KEY, false);
setEncryptionConfigKeys();
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDataNodes).build();
fs = getFileSystem(conf);
DFSClient client = DFSClientAdapter.getDFSClient((DistributedFileSystem) fs);
DFSClient spyClient = Mockito.spy(client);
DFSClientAdapter.setDFSClient((DistributedFileSystem) fs, spyClient);
writeTestDataToFile(fs);
BlockTokenSecretManager btsm = cluster.getNamesystem().getBlockManager().getBlockTokenSecretManager();
// Reduce key update interval and token life for testing.
btsm.setKeyUpdateIntervalForTesting(2 * 1000);
btsm.setTokenLifetime(2 * 1000);
btsm.clearAllKeysForTesting();
// Wait until the encryption key becomes invalid.
LOG.info("Wait until encryption keys become invalid...");
DataEncryptionKey encryptionKey = spyClient.getEncryptionKey();
List<DataNode> dataNodes = cluster.getDataNodes();
for (DataNode dn : dataNodes) {
GenericTestUtils.waitFor(new Supplier<Boolean>() {
@Override
public Boolean get() {
return !dn.getBlockPoolTokenSecretManager().get(encryptionKey.blockPoolId).hasKey(encryptionKey.keyId);
}
}, 100, 30 * 1000);
}
LOG.info("The encryption key is invalid on all nodes now.");
try (FSDataOutputStream out = fs.append(TEST_PATH)) {
DFSOutputStream dfstream = (DFSOutputStream) out.getWrappedStream();
// shut down the first datanode in the pipeline.
DatanodeInfo[] targets = dfstream.getPipeline();
cluster.stopDataNode(targets[0].getXferAddr());
// write data to induce pipeline recovery
out.write(PLAIN_TEXT.getBytes());
out.hflush();
assertFalse("The first datanode in the pipeline was not replaced.", Arrays.asList(dfstream.getPipeline()).contains(targets[0]));
}
// verify that InvalidEncryptionKeyException is handled properly
Mockito.verify(spyClient, times(1)).clearDataEncryptionKey();
}
Aggregations