use of org.apache.hadoop.hdfs.MiniDFSCluster in project hadoop by apache.
the class TestDatanodeRestart method testRbwReplicas.
// test rbw replicas persist across DataNode restarts
public void testRbwReplicas() throws IOException {
Configuration conf = new HdfsConfiguration();
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 1024L);
conf.setInt(HdfsClientConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY, 512);
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
cluster.waitActive();
try {
testRbwReplicas(cluster, false);
testRbwReplicas(cluster, true);
} finally {
cluster.shutdown();
}
}
use of org.apache.hadoop.hdfs.MiniDFSCluster in project hadoop by apache.
the class TestHSync method testHSyncWithReplication.
/** Test that syncBlock is correctly performed at replicas */
@Test
public void testHSyncWithReplication() throws Exception {
Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
final FileSystem fs = cluster.getFileSystem();
final Path p = new Path("/testHSyncWithReplication/foo");
final int len = 1 << 16;
FSDataOutputStream out = fs.create(p, FsPermission.getDefault(), EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE, CreateFlag.SYNC_BLOCK), 4096, (short) 3, len, null);
out.write(1);
out.hflush();
checkSyncMetric(cluster, 0, 0);
checkSyncMetric(cluster, 1, 0);
checkSyncMetric(cluster, 2, 0);
out.hsync();
checkSyncMetric(cluster, 0, 1);
checkSyncMetric(cluster, 1, 1);
checkSyncMetric(cluster, 2, 1);
out.hsync();
checkSyncMetric(cluster, 0, 2);
checkSyncMetric(cluster, 1, 2);
checkSyncMetric(cluster, 2, 2);
cluster.shutdown();
}
use of org.apache.hadoop.hdfs.MiniDFSCluster in project hadoop by apache.
the class TestHSync method testHSyncOperation.
private void testHSyncOperation(boolean testWithAppend) throws IOException {
Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
final DistributedFileSystem fs = cluster.getFileSystem();
final Path p = new Path("/testHSync/foo");
final int len = 1 << 16;
FSDataOutputStream out = fs.create(p, FsPermission.getDefault(), EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE, CreateFlag.SYNC_BLOCK), 4096, (short) 1, len, null);
if (testWithAppend) {
// re-open the file with append call
out.close();
out = fs.append(p, EnumSet.of(CreateFlag.APPEND, CreateFlag.SYNC_BLOCK), 4096, null);
}
out.hflush();
// hflush does not sync
checkSyncMetric(cluster, 0);
out.hsync();
// hsync on empty file does nothing
checkSyncMetric(cluster, 0);
out.write(1);
checkSyncMetric(cluster, 0);
out.hsync();
checkSyncMetric(cluster, 1);
// avoiding repeated hsyncs is a potential future optimization
out.hsync();
checkSyncMetric(cluster, 2);
out.hflush();
// hflush still does not sync
checkSyncMetric(cluster, 2);
out.close();
// close is sync'ing
checkSyncMetric(cluster, 3);
// same with a file created with out SYNC_BLOCK
out = fs.create(p, FsPermission.getDefault(), EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE), 4096, (short) 1, len, null);
out.hsync();
checkSyncMetric(cluster, 3);
out.write(1);
checkSyncMetric(cluster, 3);
out.hsync();
checkSyncMetric(cluster, 4);
// repeated hsyncs
out.hsync();
checkSyncMetric(cluster, 5);
out.close();
// close does not sync (not opened with SYNC_BLOCK)
checkSyncMetric(cluster, 5);
cluster.shutdown();
}
use of org.apache.hadoop.hdfs.MiniDFSCluster in project hadoop by apache.
the class TestStartSecureDataNode method testSecureNameNode.
@Test
public void testSecureNameNode() throws Exception {
MiniDFSCluster cluster = null;
try {
String nnPrincipal = System.getProperty("dfs.namenode.kerberos.principal");
String nnSpnegoPrincipal = System.getProperty("dfs.namenode.kerberos.internal.spnego.principal");
String nnKeyTab = System.getProperty("dfs.namenode.keytab.file");
assertNotNull("NameNode principal was not specified", nnPrincipal);
assertNotNull("NameNode SPNEGO principal was not specified", nnSpnegoPrincipal);
assertNotNull("NameNode keytab was not specified", nnKeyTab);
String dnPrincipal = System.getProperty("dfs.datanode.kerberos.principal");
String dnKeyTab = System.getProperty("dfs.datanode.keytab.file");
assertNotNull("DataNode principal was not specified", dnPrincipal);
assertNotNull("DataNode keytab was not specified", dnKeyTab);
Configuration conf = new HdfsConfiguration();
conf.set(CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION, "kerberos");
conf.set(DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, nnPrincipal);
conf.set(DFSConfigKeys.DFS_NAMENODE_KERBEROS_INTERNAL_SPNEGO_PRINCIPAL_KEY, nnSpnegoPrincipal);
conf.set(DFSConfigKeys.DFS_NAMENODE_KEYTAB_FILE_KEY, nnKeyTab);
conf.set(DFSConfigKeys.DFS_DATANODE_KERBEROS_PRINCIPAL_KEY, dnPrincipal);
conf.set(DFSConfigKeys.DFS_DATANODE_KEYTAB_FILE_KEY, dnKeyTab);
// Secure DataNode requires using ports lower than 1024.
conf.set(DFSConfigKeys.DFS_DATANODE_ADDRESS_KEY, "127.0.0.1:1004");
conf.set(DFSConfigKeys.DFS_DATANODE_HTTP_ADDRESS_KEY, "127.0.0.1:1006");
conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, "700");
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(NUM_OF_DATANODES).checkDataNodeAddrConfig(true).build();
cluster.waitActive();
assertTrue(cluster.isDataNodeUp());
} catch (Exception ex) {
ex.printStackTrace();
throw ex;
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
use of org.apache.hadoop.hdfs.MiniDFSCluster in project hadoop by apache.
the class TestTriggerBlockReport method testTriggerBlockReport.
private void testTriggerBlockReport(boolean incremental) throws Exception {
Configuration conf = new HdfsConfiguration();
// Set a really long value for dfs.blockreport.intervalMsec and
// dfs.heartbeat.interval, so that incremental block reports and heartbeats
// won't be sent during this test unless they're triggered
// manually.
conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 10800000L);
conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1080L);
final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
FileSystem fs = cluster.getFileSystem();
DatanodeProtocolClientSideTranslatorPB spy = InternalDataNodeTestUtils.spyOnBposToNN(cluster.getDataNodes().get(0), cluster.getNameNode());
DFSTestUtil.createFile(fs, new Path("/abc"), 16, (short) 1, 1L);
// We should get 1 incremental block report.
Mockito.verify(spy, timeout(60000).times(1)).blockReceivedAndDeleted(any(DatanodeRegistration.class), anyString(), any(StorageReceivedDeletedBlocks[].class));
// since the interval we configured is so long.
for (int i = 0; i < 3; i++) {
Thread.sleep(10);
Mockito.verify(spy, times(0)).blockReport(any(DatanodeRegistration.class), anyString(), any(StorageBlockReport[].class), Mockito.<BlockReportContext>anyObject());
Mockito.verify(spy, times(1)).blockReceivedAndDeleted(any(DatanodeRegistration.class), anyString(), any(StorageReceivedDeletedBlocks[].class));
}
// Create a fake block deletion notification on the DataNode.
// This will be sent with the next incremental block report.
ReceivedDeletedBlockInfo rdbi = new ReceivedDeletedBlockInfo(new Block(5678, 512, 1000), BlockStatus.DELETED_BLOCK, null);
DataNode datanode = cluster.getDataNodes().get(0);
BPServiceActor actor = datanode.getAllBpOs().get(0).getBPServiceActors().get(0);
final FsDatasetSpi<?> dataset = datanode.getFSDataset();
final DatanodeStorage storage;
try (FsDatasetSpi.FsVolumeReferences volumes = dataset.getFsVolumeReferences()) {
storage = dataset.getStorage(volumes.get(0).getStorageID());
}
actor.getIbrManager().addRDBI(rdbi, storage);
// Manually trigger a block report.
datanode.triggerBlockReport(new BlockReportOptions.Factory().setIncremental(incremental).build());
// actually sent. Wait for it to be sent here.
if (incremental) {
Mockito.verify(spy, timeout(60000).times(2)).blockReceivedAndDeleted(any(DatanodeRegistration.class), anyString(), any(StorageReceivedDeletedBlocks[].class));
} else {
Mockito.verify(spy, timeout(60000)).blockReport(any(DatanodeRegistration.class), anyString(), any(StorageBlockReport[].class), Mockito.<BlockReportContext>anyObject());
}
cluster.shutdown();
}
Aggregations