Search in sources :

Example 11 with StorageInfo

use of org.apache.hadoop.hdfs.server.common.StorageInfo in project hadoop by apache.

the class TestComputeInvalidateWork method testDatanodeReRegistration.

@Test(timeout = 12000)
public void testDatanodeReRegistration() throws Exception {
    // Create a test file
    final DistributedFileSystem dfs = cluster.getFileSystem();
    final Path path = new Path("/testRR");
    // Create a file and shutdown the DNs, which populates InvalidateBlocks
    DFSTestUtil.createFile(dfs, path, dfs.getDefaultBlockSize(), (short) NUM_OF_DATANODES, 0xED0ED0);
    DFSTestUtil.waitForReplication(dfs, path, (short) NUM_OF_DATANODES, 12000);
    for (DataNode dn : cluster.getDataNodes()) {
        dn.shutdown();
    }
    dfs.delete(path, false);
    namesystem.writeLock();
    InvalidateBlocks invalidateBlocks;
    int expected = NUM_OF_DATANODES;
    try {
        invalidateBlocks = (InvalidateBlocks) Whitebox.getInternalState(cluster.getNamesystem().getBlockManager(), "invalidateBlocks");
        assertEquals("Expected invalidate blocks to be the number of DNs", (long) expected, invalidateBlocks.numBlocks());
    } finally {
        namesystem.writeUnlock();
    }
    // Re-register each DN and see that it wipes the invalidation work
    for (DataNode dn : cluster.getDataNodes()) {
        DatanodeID did = dn.getDatanodeId();
        DatanodeRegistration reg = new DatanodeRegistration(new DatanodeID(UUID.randomUUID().toString(), did), new StorageInfo(HdfsServerConstants.NodeType.DATA_NODE), new ExportedBlockKeys(), VersionInfo.getVersion());
        namesystem.writeLock();
        try {
            bm.getDatanodeManager().registerDatanode(reg);
            expected--;
            assertEquals("Expected number of invalidate blocks to decrease", (long) expected, invalidateBlocks.numBlocks());
        } finally {
            namesystem.writeUnlock();
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) DatanodeID(org.apache.hadoop.hdfs.protocol.DatanodeID) DatanodeRegistration(org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) StorageInfo(org.apache.hadoop.hdfs.server.common.StorageInfo) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) ExportedBlockKeys(org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys) Test(org.junit.Test)

Example 12 with StorageInfo

use of org.apache.hadoop.hdfs.server.common.StorageInfo in project hadoop by apache.

the class TestCheckpoint method testNamespaceVerifiedOnFileTransfer.

/**
   * Test that the primary NN will not serve any files to a 2NN who doesn't
   * share its namespace ID, and also will not accept any files from one.
   */
@Test
public void testNamespaceVerifiedOnFileTransfer() throws IOException {
    MiniDFSCluster cluster = null;
    Configuration conf = new HdfsConfiguration();
    try {
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).format(true).build();
        NamenodeProtocols nn = cluster.getNameNodeRpc();
        URL fsName = DFSUtil.getInfoServer(cluster.getNameNode().getServiceRpcAddress(), conf, DFSUtil.getHttpClientScheme(conf)).toURL();
        // Make a finalized log on the server side. 
        nn.rollEditLog();
        RemoteEditLogManifest manifest = nn.getEditLogManifest(1);
        RemoteEditLog log = manifest.getLogs().get(0);
        NNStorage dstImage = Mockito.mock(NNStorage.class);
        Mockito.doReturn(Lists.newArrayList(new File("/wont-be-written"))).when(dstImage).getFiles(Mockito.<NameNodeDirType>anyObject(), Mockito.anyString());
        File mockImageFile = File.createTempFile("image", "");
        FileOutputStream imageFile = new FileOutputStream(mockImageFile);
        imageFile.write("data".getBytes());
        imageFile.close();
        Mockito.doReturn(mockImageFile).when(dstImage).findImageFile(Mockito.any(NameNodeFile.class), Mockito.anyLong());
        Mockito.doReturn(new StorageInfo(1, 1, "X", 1, NodeType.NAME_NODE).toColonSeparatedString()).when(dstImage).toColonSeparatedString();
        try {
            TransferFsImage.downloadImageToStorage(fsName, 0, dstImage, false, false);
            fail("Storage info was not verified");
        } catch (IOException ioe) {
            String msg = StringUtils.stringifyException(ioe);
            assertTrue(msg, msg.contains("but the secondary expected"));
        }
        try {
            TransferFsImage.downloadEditsToStorage(fsName, log, dstImage);
            fail("Storage info was not verified");
        } catch (IOException ioe) {
            String msg = StringUtils.stringifyException(ioe);
            assertTrue(msg, msg.contains("but the secondary expected"));
        }
        try {
            TransferFsImage.uploadImageFromStorage(fsName, conf, dstImage, NameNodeFile.IMAGE, 0);
            fail("Storage info was not verified");
        } catch (IOException ioe) {
            String msg = StringUtils.stringifyException(ioe);
            assertTrue(msg, msg.contains("but the secondary expected"));
        }
    } finally {
        cleanup(cluster);
        cluster = null;
    }
}
Also used : NamenodeProtocols(org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) MetricsRecordBuilder(org.apache.hadoop.metrics2.MetricsRecordBuilder) IOException(java.io.IOException) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) URL(java.net.URL) RemoteEditLogManifest(org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest) FileOutputStream(java.io.FileOutputStream) NameNodeFile(org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeFile) StorageInfo(org.apache.hadoop.hdfs.server.common.StorageInfo) RemoteEditLog(org.apache.hadoop.hdfs.server.protocol.RemoteEditLog) RandomAccessFile(java.io.RandomAccessFile) EditLogFile(org.apache.hadoop.hdfs.server.namenode.FileJournalManager.EditLogFile) NameNodeFile(org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeFile) File(java.io.File) Test(org.junit.Test)

Example 13 with StorageInfo

use of org.apache.hadoop.hdfs.server.common.StorageInfo in project hadoop by apache.

the class PBHelper method convert.

public static NamespaceInfoProto convert(NamespaceInfo info) {
    NamespaceInfoProto.Builder builder = NamespaceInfoProto.newBuilder();
    builder.setBlockPoolID(info.getBlockPoolID()).setBuildVersion(info.getBuildVersion()).setUnused(0).setStorageInfo(PBHelper.convert((StorageInfo) info)).setSoftwareVersion(info.getSoftwareVersion()).setCapabilities(info.getCapabilities());
    HAServiceState state = info.getState();
    if (state != null) {
        builder.setState(convert(info.getState()));
    }
    return builder.build();
}
Also used : StorageInfo(org.apache.hadoop.hdfs.server.common.StorageInfo) HAServiceState(org.apache.hadoop.ha.HAServiceProtocol.HAServiceState) NamespaceInfoProto(org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamespaceInfoProto)

Example 14 with StorageInfo

use of org.apache.hadoop.hdfs.server.common.StorageInfo in project hadoop by apache.

the class FileJournalManager method getJournalCTime.

@Override
public long getJournalCTime() throws IOException {
    StorageInfo sInfo = new StorageInfo((NodeType) null);
    sInfo.readProperties(sd);
    return sInfo.getCTime();
}
Also used : StorageInfo(org.apache.hadoop.hdfs.server.common.StorageInfo)

Example 15 with StorageInfo

use of org.apache.hadoop.hdfs.server.common.StorageInfo in project hadoop by apache.

the class TestDFSUpgrade method testUpgrade.

/**
   * This test attempts to upgrade the NameNode and DataNode under
   * a number of valid and invalid conditions.
   */
@Test(timeout = 60000)
public void testUpgrade() throws Exception {
    File[] baseDirs;
    StorageInfo storageInfo = null;
    for (int numDirs = 1; numDirs <= 2; numDirs++) {
        conf = new HdfsConfiguration();
        conf = UpgradeUtilities.initializeStorageStateConf(numDirs, conf);
        String[] nameNodeDirs = conf.getStrings(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY);
        String[] dataNodeDirs = conf.getStrings(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY);
        conf.setBoolean(DFSConfigKeys.DFS_DATANODE_DUPLICATE_REPLICA_DELETION, false);
        log("Normal NameNode upgrade", numDirs);
        UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current");
        cluster = createCluster();
        // make sure that rolling upgrade cannot be started
        try {
            final DistributedFileSystem dfs = cluster.getFileSystem();
            dfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
            dfs.rollingUpgrade(RollingUpgradeAction.PREPARE);
            fail();
        } catch (RemoteException re) {
            assertEquals(InconsistentFSStateException.class.getName(), re.getClassName());
            LOG.info("The exception is expected.", re);
        }
        checkNameNode(nameNodeDirs, EXPECTED_TXID);
        if (numDirs > 1)
            TestParallelImageWrite.checkImages(cluster.getNamesystem(), numDirs);
        cluster.shutdown();
        UpgradeUtilities.createEmptyDirs(nameNodeDirs);
        log("Normal DataNode upgrade", numDirs);
        UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current");
        cluster = createCluster();
        UpgradeUtilities.createDataNodeStorageDirs(dataNodeDirs, "current");
        cluster.startDataNodes(conf, 1, false, StartupOption.REGULAR, null);
        checkDataNode(dataNodeDirs, UpgradeUtilities.getCurrentBlockPoolID(null));
        cluster.shutdown();
        UpgradeUtilities.createEmptyDirs(nameNodeDirs);
        UpgradeUtilities.createEmptyDirs(dataNodeDirs);
        log("NameNode upgrade with existing previous dir", numDirs);
        UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current");
        UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "previous");
        startNameNodeShouldFail(StartupOption.UPGRADE);
        UpgradeUtilities.createEmptyDirs(nameNodeDirs);
        log("DataNode upgrade with existing previous dir", numDirs);
        UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current");
        cluster = createCluster();
        UpgradeUtilities.createDataNodeStorageDirs(dataNodeDirs, "current");
        UpgradeUtilities.createDataNodeStorageDirs(dataNodeDirs, "previous");
        cluster.startDataNodes(conf, 1, false, StartupOption.REGULAR, null);
        checkDataNode(dataNodeDirs, UpgradeUtilities.getCurrentBlockPoolID(null));
        cluster.shutdown();
        UpgradeUtilities.createEmptyDirs(nameNodeDirs);
        UpgradeUtilities.createEmptyDirs(dataNodeDirs);
        log("DataNode upgrade with future stored layout version in current", numDirs);
        UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current");
        cluster = createCluster();
        baseDirs = UpgradeUtilities.createDataNodeStorageDirs(dataNodeDirs, "current");
        storageInfo = new StorageInfo(Integer.MIN_VALUE, UpgradeUtilities.getCurrentNamespaceID(cluster), UpgradeUtilities.getCurrentClusterID(cluster), UpgradeUtilities.getCurrentFsscTime(cluster), NodeType.DATA_NODE);
        UpgradeUtilities.createDataNodeVersionFile(baseDirs, storageInfo, UpgradeUtilities.getCurrentBlockPoolID(cluster));
        startBlockPoolShouldFail(StartupOption.REGULAR, UpgradeUtilities.getCurrentBlockPoolID(null));
        cluster.shutdown();
        UpgradeUtilities.createEmptyDirs(nameNodeDirs);
        UpgradeUtilities.createEmptyDirs(dataNodeDirs);
        log("DataNode upgrade with newer fsscTime in current", numDirs);
        UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current");
        cluster = createCluster();
        baseDirs = UpgradeUtilities.createDataNodeStorageDirs(dataNodeDirs, "current");
        storageInfo = new StorageInfo(HdfsServerConstants.DATANODE_LAYOUT_VERSION, UpgradeUtilities.getCurrentNamespaceID(cluster), UpgradeUtilities.getCurrentClusterID(cluster), Long.MAX_VALUE, NodeType.DATA_NODE);
        UpgradeUtilities.createDataNodeVersionFile(baseDirs, storageInfo, UpgradeUtilities.getCurrentBlockPoolID(cluster));
        // Ensure corresponding block pool failed to initialized
        startBlockPoolShouldFail(StartupOption.REGULAR, UpgradeUtilities.getCurrentBlockPoolID(null));
        cluster.shutdown();
        UpgradeUtilities.createEmptyDirs(nameNodeDirs);
        UpgradeUtilities.createEmptyDirs(dataNodeDirs);
        log("NameNode upgrade with no edits file", numDirs);
        UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current");
        deleteStorageFilesWithPrefix(nameNodeDirs, "edits_");
        startNameNodeShouldFail(StartupOption.UPGRADE);
        UpgradeUtilities.createEmptyDirs(nameNodeDirs);
        log("NameNode upgrade with no image file", numDirs);
        UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current");
        deleteStorageFilesWithPrefix(nameNodeDirs, "fsimage_");
        startNameNodeShouldFail(StartupOption.UPGRADE);
        UpgradeUtilities.createEmptyDirs(nameNodeDirs);
        log("NameNode upgrade with corrupt version file", numDirs);
        baseDirs = UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current");
        for (File f : baseDirs) {
            UpgradeUtilities.corruptFile(new File(f, "VERSION"), "layoutVersion".getBytes(Charsets.UTF_8), "xxxxxxxxxxxxx".getBytes(Charsets.UTF_8));
        }
        startNameNodeShouldFail(StartupOption.UPGRADE);
        UpgradeUtilities.createEmptyDirs(nameNodeDirs);
        log("NameNode upgrade with old layout version in current", numDirs);
        baseDirs = UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current");
        storageInfo = new StorageInfo(Storage.LAST_UPGRADABLE_LAYOUT_VERSION + 1, UpgradeUtilities.getCurrentNamespaceID(null), UpgradeUtilities.getCurrentClusterID(null), UpgradeUtilities.getCurrentFsscTime(null), NodeType.NAME_NODE);
        UpgradeUtilities.createNameNodeVersionFile(conf, baseDirs, storageInfo, UpgradeUtilities.getCurrentBlockPoolID(cluster));
        startNameNodeShouldFail(StartupOption.UPGRADE);
        UpgradeUtilities.createEmptyDirs(nameNodeDirs);
        log("NameNode upgrade with future layout version in current", numDirs);
        baseDirs = UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current");
        storageInfo = new StorageInfo(Integer.MIN_VALUE, UpgradeUtilities.getCurrentNamespaceID(null), UpgradeUtilities.getCurrentClusterID(null), UpgradeUtilities.getCurrentFsscTime(null), NodeType.NAME_NODE);
        UpgradeUtilities.createNameNodeVersionFile(conf, baseDirs, storageInfo, UpgradeUtilities.getCurrentBlockPoolID(cluster));
        startNameNodeShouldFail(StartupOption.UPGRADE);
        UpgradeUtilities.createEmptyDirs(nameNodeDirs);
    }
    // end numDir loop
    // One more check: normal NN upgrade with 4 directories, concurrent write
    int numDirs = 4;
    {
        conf = new HdfsConfiguration();
        conf.setInt(DFSConfigKeys.DFS_DATANODE_SCAN_PERIOD_HOURS_KEY, -1);
        conf.setBoolean(DFSConfigKeys.DFS_DATANODE_DUPLICATE_REPLICA_DELETION, false);
        conf = UpgradeUtilities.initializeStorageStateConf(numDirs, conf);
        String[] nameNodeDirs = conf.getStrings(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY);
        log("Normal NameNode upgrade", numDirs);
        UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current");
        cluster = createCluster();
        // make sure that rolling upgrade cannot be started
        try {
            final DistributedFileSystem dfs = cluster.getFileSystem();
            dfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
            dfs.rollingUpgrade(RollingUpgradeAction.PREPARE);
            fail();
        } catch (RemoteException re) {
            assertEquals(InconsistentFSStateException.class.getName(), re.getClassName());
            LOG.info("The exception is expected.", re);
        }
        checkNameNode(nameNodeDirs, EXPECTED_TXID);
        TestParallelImageWrite.checkImages(cluster.getNamesystem(), numDirs);
        cluster.shutdown();
        UpgradeUtilities.createEmptyDirs(nameNodeDirs);
    }
}
Also used : StorageInfo(org.apache.hadoop.hdfs.server.common.StorageInfo) RemoteException(org.apache.hadoop.ipc.RemoteException) File(java.io.File) InconsistentFSStateException(org.apache.hadoop.hdfs.server.common.InconsistentFSStateException) Test(org.junit.Test)

Aggregations

StorageInfo (org.apache.hadoop.hdfs.server.common.StorageInfo)17 Test (org.junit.Test)11 DatanodeRegistration (org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration)7 DatanodeStorageInfo (org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo)6 Configuration (org.apache.hadoop.conf.Configuration)5 DatanodeID (org.apache.hadoop.hdfs.protocol.DatanodeID)5 ExportedBlockKeys (org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys)4 NamenodeProtocols (org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols)4 File (java.io.File)3 IOException (java.io.IOException)3 Path (org.apache.hadoop.fs.Path)2 IncorrectVersionException (org.apache.hadoop.hdfs.server.common.IncorrectVersionException)2 ServiceException (com.google.protobuf.ServiceException)1 FileOutputStream (java.io.FileOutputStream)1 RandomAccessFile (java.io.RandomAccessFile)1 InetSocketAddress (java.net.InetSocketAddress)1 URL (java.net.URL)1 HAServiceState (org.apache.hadoop.ha.HAServiceProtocol.HAServiceState)1 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)1 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)1