Search in sources :

Example 11 with HdfsConfiguration

use of org.apache.hadoop.hdfs.HdfsConfiguration in project hadoop by apache.

the class TestHSync method testHSyncWithReplication.

/** Test that syncBlock is correctly performed at replicas */
@Test
public void testHSyncWithReplication() throws Exception {
    Configuration conf = new HdfsConfiguration();
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
    final FileSystem fs = cluster.getFileSystem();
    final Path p = new Path("/testHSyncWithReplication/foo");
    final int len = 1 << 16;
    FSDataOutputStream out = fs.create(p, FsPermission.getDefault(), EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE, CreateFlag.SYNC_BLOCK), 4096, (short) 3, len, null);
    out.write(1);
    out.hflush();
    checkSyncMetric(cluster, 0, 0);
    checkSyncMetric(cluster, 1, 0);
    checkSyncMetric(cluster, 2, 0);
    out.hsync();
    checkSyncMetric(cluster, 0, 1);
    checkSyncMetric(cluster, 1, 1);
    checkSyncMetric(cluster, 2, 1);
    out.hsync();
    checkSyncMetric(cluster, 0, 2);
    checkSyncMetric(cluster, 1, 2);
    checkSyncMetric(cluster, 2, 2);
    cluster.shutdown();
}
Also used : Path(org.apache.hadoop.fs.Path) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) Configuration(org.apache.hadoop.conf.Configuration) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) FileSystem(org.apache.hadoop.fs.FileSystem) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) Test(org.junit.Test)

Example 12 with HdfsConfiguration

use of org.apache.hadoop.hdfs.HdfsConfiguration in project hadoop by apache.

the class TestHSync method testHSyncOperation.

private void testHSyncOperation(boolean testWithAppend) throws IOException {
    Configuration conf = new HdfsConfiguration();
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
    final DistributedFileSystem fs = cluster.getFileSystem();
    final Path p = new Path("/testHSync/foo");
    final int len = 1 << 16;
    FSDataOutputStream out = fs.create(p, FsPermission.getDefault(), EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE, CreateFlag.SYNC_BLOCK), 4096, (short) 1, len, null);
    if (testWithAppend) {
        // re-open the file with append call
        out.close();
        out = fs.append(p, EnumSet.of(CreateFlag.APPEND, CreateFlag.SYNC_BLOCK), 4096, null);
    }
    out.hflush();
    // hflush does not sync
    checkSyncMetric(cluster, 0);
    out.hsync();
    // hsync on empty file does nothing
    checkSyncMetric(cluster, 0);
    out.write(1);
    checkSyncMetric(cluster, 0);
    out.hsync();
    checkSyncMetric(cluster, 1);
    // avoiding repeated hsyncs is a potential future optimization
    out.hsync();
    checkSyncMetric(cluster, 2);
    out.hflush();
    // hflush still does not sync
    checkSyncMetric(cluster, 2);
    out.close();
    // close is sync'ing
    checkSyncMetric(cluster, 3);
    // same with a file created with out SYNC_BLOCK
    out = fs.create(p, FsPermission.getDefault(), EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE), 4096, (short) 1, len, null);
    out.hsync();
    checkSyncMetric(cluster, 3);
    out.write(1);
    checkSyncMetric(cluster, 3);
    out.hsync();
    checkSyncMetric(cluster, 4);
    // repeated hsyncs
    out.hsync();
    checkSyncMetric(cluster, 5);
    out.close();
    // close does not sync (not opened with SYNC_BLOCK)
    checkSyncMetric(cluster, 5);
    cluster.shutdown();
}
Also used : Path(org.apache.hadoop.fs.Path) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) Configuration(org.apache.hadoop.conf.Configuration) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem)

Example 13 with HdfsConfiguration

use of org.apache.hadoop.hdfs.HdfsConfiguration in project hadoop by apache.

the class TestStartSecureDataNode method testSecureNameNode.

@Test
public void testSecureNameNode() throws Exception {
    MiniDFSCluster cluster = null;
    try {
        String nnPrincipal = System.getProperty("dfs.namenode.kerberos.principal");
        String nnSpnegoPrincipal = System.getProperty("dfs.namenode.kerberos.internal.spnego.principal");
        String nnKeyTab = System.getProperty("dfs.namenode.keytab.file");
        assertNotNull("NameNode principal was not specified", nnPrincipal);
        assertNotNull("NameNode SPNEGO principal was not specified", nnSpnegoPrincipal);
        assertNotNull("NameNode keytab was not specified", nnKeyTab);
        String dnPrincipal = System.getProperty("dfs.datanode.kerberos.principal");
        String dnKeyTab = System.getProperty("dfs.datanode.keytab.file");
        assertNotNull("DataNode principal was not specified", dnPrincipal);
        assertNotNull("DataNode keytab was not specified", dnKeyTab);
        Configuration conf = new HdfsConfiguration();
        conf.set(CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION, "kerberos");
        conf.set(DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, nnPrincipal);
        conf.set(DFSConfigKeys.DFS_NAMENODE_KERBEROS_INTERNAL_SPNEGO_PRINCIPAL_KEY, nnSpnegoPrincipal);
        conf.set(DFSConfigKeys.DFS_NAMENODE_KEYTAB_FILE_KEY, nnKeyTab);
        conf.set(DFSConfigKeys.DFS_DATANODE_KERBEROS_PRINCIPAL_KEY, dnPrincipal);
        conf.set(DFSConfigKeys.DFS_DATANODE_KEYTAB_FILE_KEY, dnKeyTab);
        // Secure DataNode requires using ports lower than 1024.
        conf.set(DFSConfigKeys.DFS_DATANODE_ADDRESS_KEY, "127.0.0.1:1004");
        conf.set(DFSConfigKeys.DFS_DATANODE_HTTP_ADDRESS_KEY, "127.0.0.1:1006");
        conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, "700");
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(NUM_OF_DATANODES).checkDataNodeAddrConfig(true).build();
        cluster.waitActive();
        assertTrue(cluster.isDataNodeUp());
    } catch (Exception ex) {
        ex.printStackTrace();
        throw ex;
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}
Also used : MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) Test(org.junit.Test)

Example 14 with HdfsConfiguration

use of org.apache.hadoop.hdfs.HdfsConfiguration in project hadoop by apache.

the class TestTriggerBlockReport method testTriggerBlockReport.

private void testTriggerBlockReport(boolean incremental) throws Exception {
    Configuration conf = new HdfsConfiguration();
    // Set a really long value for dfs.blockreport.intervalMsec and
    // dfs.heartbeat.interval, so that incremental block reports and heartbeats
    // won't be sent during this test unless they're triggered
    // manually.
    conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 10800000L);
    conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1080L);
    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
    cluster.waitActive();
    FileSystem fs = cluster.getFileSystem();
    DatanodeProtocolClientSideTranslatorPB spy = InternalDataNodeTestUtils.spyOnBposToNN(cluster.getDataNodes().get(0), cluster.getNameNode());
    DFSTestUtil.createFile(fs, new Path("/abc"), 16, (short) 1, 1L);
    // We should get 1 incremental block report.
    Mockito.verify(spy, timeout(60000).times(1)).blockReceivedAndDeleted(any(DatanodeRegistration.class), anyString(), any(StorageReceivedDeletedBlocks[].class));
    // since the interval we configured is so long.
    for (int i = 0; i < 3; i++) {
        Thread.sleep(10);
        Mockito.verify(spy, times(0)).blockReport(any(DatanodeRegistration.class), anyString(), any(StorageBlockReport[].class), Mockito.<BlockReportContext>anyObject());
        Mockito.verify(spy, times(1)).blockReceivedAndDeleted(any(DatanodeRegistration.class), anyString(), any(StorageReceivedDeletedBlocks[].class));
    }
    // Create a fake block deletion notification on the DataNode.
    // This will be sent with the next incremental block report.
    ReceivedDeletedBlockInfo rdbi = new ReceivedDeletedBlockInfo(new Block(5678, 512, 1000), BlockStatus.DELETED_BLOCK, null);
    DataNode datanode = cluster.getDataNodes().get(0);
    BPServiceActor actor = datanode.getAllBpOs().get(0).getBPServiceActors().get(0);
    final FsDatasetSpi<?> dataset = datanode.getFSDataset();
    final DatanodeStorage storage;
    try (FsDatasetSpi.FsVolumeReferences volumes = dataset.getFsVolumeReferences()) {
        storage = dataset.getStorage(volumes.get(0).getStorageID());
    }
    actor.getIbrManager().addRDBI(rdbi, storage);
    // Manually trigger a block report.
    datanode.triggerBlockReport(new BlockReportOptions.Factory().setIncremental(incremental).build());
    // actually sent.  Wait for it to be sent here.
    if (incremental) {
        Mockito.verify(spy, timeout(60000).times(2)).blockReceivedAndDeleted(any(DatanodeRegistration.class), anyString(), any(StorageReceivedDeletedBlocks[].class));
    } else {
        Mockito.verify(spy, timeout(60000)).blockReport(any(DatanodeRegistration.class), anyString(), any(StorageBlockReport[].class), Mockito.<BlockReportContext>anyObject());
    }
    cluster.shutdown();
}
Also used : Path(org.apache.hadoop.fs.Path) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) FsDatasetSpi(org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) DatanodeProtocolClientSideTranslatorPB(org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB) DatanodeRegistration(org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration) FileSystem(org.apache.hadoop.fs.FileSystem) DatanodeStorage(org.apache.hadoop.hdfs.server.protocol.DatanodeStorage) Block(org.apache.hadoop.hdfs.protocol.Block) BlockReportOptions(org.apache.hadoop.hdfs.client.BlockReportOptions) ReceivedDeletedBlockInfo(org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo)

Example 15 with HdfsConfiguration

use of org.apache.hadoop.hdfs.HdfsConfiguration in project hadoop by apache.

the class TestDatasetVolumeChecker method testCheckAllVolumes.

/**
   * Test {@link DatasetVolumeChecker#checkAllVolumes} propagates
   * checks for all volumes to the delegate checker.
   *
   * @throws Exception
   */
@Test(timeout = 10000)
public void testCheckAllVolumes() throws Exception {
    LOG.info("Executing {}", testName.getMethodName());
    final List<FsVolumeSpi> volumes = makeVolumes(NUM_VOLUMES, expectedVolumeHealth);
    final FsDatasetSpi<FsVolumeSpi> dataset = makeDataset(volumes);
    final DatasetVolumeChecker checker = new DatasetVolumeChecker(new HdfsConfiguration(), new FakeTimer());
    checker.setDelegateChecker(new DummyChecker());
    Set<FsVolumeSpi> failedVolumes = checker.checkAllVolumes(dataset);
    LOG.info("Got back {} failed volumes", failedVolumes.size());
    if (expectedVolumeHealth == null || expectedVolumeHealth == FAILED) {
        assertThat(failedVolumes.size(), is(NUM_VOLUMES));
    } else {
        assertTrue(failedVolumes.isEmpty());
    }
    // Ensure each volume's check() method was called exactly once.
    for (FsVolumeSpi volume : volumes) {
        verify(volume, times(1)).check(anyObject());
    }
}
Also used : HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) FakeTimer(org.apache.hadoop.util.FakeTimer) Test(org.junit.Test)

Aggregations

HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)454 Configuration (org.apache.hadoop.conf.Configuration)311 Test (org.junit.Test)311 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)267 Path (org.apache.hadoop.fs.Path)152 FileSystem (org.apache.hadoop.fs.FileSystem)94 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)92 File (java.io.File)72 IOException (java.io.IOException)69 Before (org.junit.Before)56 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)40 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)35 MetricsRecordBuilder (org.apache.hadoop.metrics2.MetricsRecordBuilder)33 DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)30 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)27 RandomAccessFile (java.io.RandomAccessFile)22 ArrayList (java.util.ArrayList)20 NameNodeFile (org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeFile)20 URI (java.net.URI)19 FsPermission (org.apache.hadoop.fs.permission.FsPermission)19