Search in sources :

Example 6 with StorageInfo

use of org.apache.hadoop.hdfs.server.common.StorageInfo in project hadoop by apache.

the class TestDatanodeManager method testRemoveIncludedNode.

/**
   * Test whether removing a host from the includes list without adding it to
   * the excludes list will exclude it from data node reports.
   */
@Test
public void testRemoveIncludedNode() throws IOException {
    FSNamesystem fsn = Mockito.mock(FSNamesystem.class);
    // Set the write lock so that the DatanodeManager can start
    Mockito.when(fsn.hasWriteLock()).thenReturn(true);
    DatanodeManager dm = mockDatanodeManager(fsn, new Configuration());
    HostFileManager hm = new HostFileManager();
    HostSet noNodes = new HostSet();
    HostSet oneNode = new HostSet();
    HostSet twoNodes = new HostSet();
    DatanodeRegistration dr1 = new DatanodeRegistration(new DatanodeID("127.0.0.1", "127.0.0.1", "someStorageID-123", 12345, 12345, 12345, 12345), new StorageInfo(HdfsServerConstants.NodeType.DATA_NODE), new ExportedBlockKeys(), "test");
    DatanodeRegistration dr2 = new DatanodeRegistration(new DatanodeID("127.0.0.1", "127.0.0.1", "someStorageID-234", 23456, 23456, 23456, 23456), new StorageInfo(HdfsServerConstants.NodeType.DATA_NODE), new ExportedBlockKeys(), "test");
    twoNodes.add(entry("127.0.0.1:12345"));
    twoNodes.add(entry("127.0.0.1:23456"));
    oneNode.add(entry("127.0.0.1:23456"));
    hm.refresh(twoNodes, noNodes);
    Whitebox.setInternalState(dm, "hostConfigManager", hm);
    // Register two data nodes to simulate them coming up.
    // We need to add two nodes, because if we have only one node, removing it
    // will cause the includes list to be empty, which means all hosts will be
    // allowed.
    dm.registerDatanode(dr1);
    dm.registerDatanode(dr2);
    // Make sure that both nodes are reported
    List<DatanodeDescriptor> both = dm.getDatanodeListForReport(HdfsConstants.DatanodeReportType.ALL);
    // Sort the list so that we know which one is which
    Collections.sort(both);
    Assert.assertEquals("Incorrect number of hosts reported", 2, both.size());
    Assert.assertEquals("Unexpected host or host in unexpected position", "127.0.0.1:12345", both.get(0).getInfoAddr());
    Assert.assertEquals("Unexpected host or host in unexpected position", "127.0.0.1:23456", both.get(1).getInfoAddr());
    // Remove one node from includes, but do not add it to excludes.
    hm.refresh(oneNode, noNodes);
    // Make sure that only one node is still reported
    List<DatanodeDescriptor> onlyOne = dm.getDatanodeListForReport(HdfsConstants.DatanodeReportType.ALL);
    Assert.assertEquals("Incorrect number of hosts reported", 1, onlyOne.size());
    Assert.assertEquals("Unexpected host reported", "127.0.0.1:23456", onlyOne.get(0).getInfoAddr());
    // Remove all nodes from includes
    hm.refresh(noNodes, noNodes);
    // Check that both nodes are reported again
    List<DatanodeDescriptor> bothAgain = dm.getDatanodeListForReport(HdfsConstants.DatanodeReportType.ALL);
    // Sort the list so that we know which one is which
    Collections.sort(bothAgain);
    Assert.assertEquals("Incorrect number of hosts reported", 2, bothAgain.size());
    Assert.assertEquals("Unexpected host or host in unexpected position", "127.0.0.1:12345", bothAgain.get(0).getInfoAddr());
    Assert.assertEquals("Unexpected host or host in unexpected position", "127.0.0.1:23456", bothAgain.get(1).getInfoAddr());
}
Also used : DatanodeRegistration(org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration) DatanodeID(org.apache.hadoop.hdfs.protocol.DatanodeID) Configuration(org.apache.hadoop.conf.Configuration) StorageInfo(org.apache.hadoop.hdfs.server.common.StorageInfo) ExportedBlockKeys(org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys) FSNamesystem(org.apache.hadoop.hdfs.server.namenode.FSNamesystem) Test(org.junit.Test)

Example 7 with StorageInfo

use of org.apache.hadoop.hdfs.server.common.StorageInfo in project hadoop by apache.

the class QJournalProtocolServerSideTranslatorPB method canRollBack.

@Override
public CanRollBackResponseProto canRollBack(RpcController controller, CanRollBackRequestProto request) throws ServiceException {
    try {
        StorageInfo si = PBHelper.convert(request.getStorage(), NodeType.JOURNAL_NODE);
        Boolean result = impl.canRollBack(convert(request.getJid()), si, PBHelper.convert(request.getPrevStorage(), NodeType.JOURNAL_NODE), request.getTargetLayoutVersion());
        return CanRollBackResponseProto.newBuilder().setCanRollBack(result).build();
    } catch (IOException e) {
        throw new ServiceException(e);
    }
}
Also used : ServiceException(com.google.protobuf.ServiceException) StorageInfo(org.apache.hadoop.hdfs.server.common.StorageInfo) IOException(java.io.IOException)

Example 8 with StorageInfo

use of org.apache.hadoop.hdfs.server.common.StorageInfo in project hadoop by apache.

the class TestDatanodeRegistration method testRegistrationWithDifferentSoftwareVersions.

@Test
public void testRegistrationWithDifferentSoftwareVersions() throws Exception {
    Configuration conf = new HdfsConfiguration();
    conf.set(DFSConfigKeys.DFS_DATANODE_MIN_SUPPORTED_NAMENODE_VERSION_KEY, "3.0.0");
    conf.set(DFSConfigKeys.DFS_NAMENODE_MIN_SUPPORTED_DATANODE_VERSION_KEY, "3.0.0");
    MiniDFSCluster cluster = null;
    try {
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
        NamenodeProtocols rpcServer = cluster.getNameNodeRpc();
        long nnCTime = cluster.getNamesystem().getFSImage().getStorage().getCTime();
        StorageInfo mockStorageInfo = mock(StorageInfo.class);
        doReturn(nnCTime).when(mockStorageInfo).getCTime();
        DatanodeRegistration mockDnReg = mock(DatanodeRegistration.class);
        doReturn(HdfsServerConstants.DATANODE_LAYOUT_VERSION).when(mockDnReg).getVersion();
        doReturn("127.0.0.1").when(mockDnReg).getIpAddr();
        doReturn(123).when(mockDnReg).getXferPort();
        doReturn("fake-storage-id").when(mockDnReg).getDatanodeUuid();
        doReturn(mockStorageInfo).when(mockDnReg).getStorageInfo();
        // Should succeed when software versions are the same.
        doReturn("3.0.0").when(mockDnReg).getSoftwareVersion();
        rpcServer.registerDatanode(mockDnReg);
        // Should succeed when software version of DN is above minimum required by NN.
        doReturn("4.0.0").when(mockDnReg).getSoftwareVersion();
        rpcServer.registerDatanode(mockDnReg);
        // Should fail when software version of DN is below minimum required by NN.
        doReturn("2.0.0").when(mockDnReg).getSoftwareVersion();
        try {
            rpcServer.registerDatanode(mockDnReg);
            fail("Should not have been able to register DN with too-low version.");
        } catch (IncorrectVersionException ive) {
            GenericTestUtils.assertExceptionContains("The reported DataNode version is too low", ive);
            LOG.info("Got expected exception", ive);
        }
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}
Also used : NamenodeProtocols(org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols) IncorrectVersionException(org.apache.hadoop.hdfs.server.common.IncorrectVersionException) DatanodeRegistration(org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration) Configuration(org.apache.hadoop.conf.Configuration) DatanodeStorageInfo(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo) StorageInfo(org.apache.hadoop.hdfs.server.common.StorageInfo) Test(org.junit.Test)

Example 9 with StorageInfo

use of org.apache.hadoop.hdfs.server.common.StorageInfo in project hadoop by apache.

the class TestDFSRollback method testRollback.

/**
   * This test attempts to rollback the NameNode and DataNode under
   * a number of valid and invalid conditions.
   */
@Test
public void testRollback() throws Exception {
    File[] baseDirs;
    UpgradeUtilities.initialize();
    StorageInfo storageInfo = null;
    for (int numDirs = 1; numDirs <= 2; numDirs++) {
        conf = new HdfsConfiguration();
        conf.setInt(DFSConfigKeys.DFS_DATANODE_SCAN_PERIOD_HOURS_KEY, -1);
        conf = UpgradeUtilities.initializeStorageStateConf(numDirs, conf);
        String[] nameNodeDirs = conf.getStrings(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY);
        String[] dataNodeDirs = conf.getStrings(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY);
        log("Normal NameNode rollback", numDirs);
        UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current");
        UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "previous");
        NameNode.doRollback(conf, false);
        checkResult(NAME_NODE, nameNodeDirs);
        UpgradeUtilities.createEmptyDirs(nameNodeDirs);
        log("Normal DataNode rollback", numDirs);
        UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current");
        UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "previous");
        NameNode.doRollback(conf, false);
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).format(false).manageDataDfsDirs(false).manageNameDfsDirs(false).dnStartupOption(StartupOption.ROLLBACK).build();
        UpgradeUtilities.createDataNodeStorageDirs(dataNodeDirs, "current");
        UpgradeUtilities.createDataNodeStorageDirs(dataNodeDirs, "previous");
        cluster.startDataNodes(conf, 1, false, StartupOption.ROLLBACK, null);
        checkResult(DATA_NODE, dataNodeDirs);
        cluster.shutdown();
        UpgradeUtilities.createEmptyDirs(nameNodeDirs);
        UpgradeUtilities.createEmptyDirs(dataNodeDirs);
        log("Normal BlockPool rollback", numDirs);
        UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current");
        UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "previous");
        NameNode.doRollback(conf, false);
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).format(false).manageDataDfsDirs(false).manageNameDfsDirs(false).dnStartupOption(StartupOption.ROLLBACK).build();
        UpgradeUtilities.createDataNodeStorageDirs(dataNodeDirs, "current");
        UpgradeUtilities.createBlockPoolStorageDirs(dataNodeDirs, "current", UpgradeUtilities.getCurrentBlockPoolID(cluster));
        // Create a previous snapshot for the blockpool
        UpgradeUtilities.createBlockPoolStorageDirs(dataNodeDirs, "previous", UpgradeUtilities.getCurrentBlockPoolID(cluster));
        // Put newer layout version in current.
        storageInfo = new StorageInfo(HdfsServerConstants.DATANODE_LAYOUT_VERSION - 1, UpgradeUtilities.getCurrentNamespaceID(cluster), UpgradeUtilities.getCurrentClusterID(cluster), UpgradeUtilities.getCurrentFsscTime(cluster), NodeType.DATA_NODE);
        // Overwrite VERSION file in the current directory of
        // volume directories and block pool slice directories
        // with a layout version from future.
        File[] dataCurrentDirs = new File[dataNodeDirs.length];
        for (int i = 0; i < dataNodeDirs.length; i++) {
            dataCurrentDirs[i] = new File((new Path(dataNodeDirs[i] + "/current")).toString());
        }
        UpgradeUtilities.createDataNodeVersionFile(dataCurrentDirs, storageInfo, UpgradeUtilities.getCurrentBlockPoolID(cluster));
        cluster.startDataNodes(conf, 1, false, StartupOption.ROLLBACK, null);
        assertTrue(cluster.isDataNodeUp());
        cluster.shutdown();
        UpgradeUtilities.createEmptyDirs(nameNodeDirs);
        UpgradeUtilities.createEmptyDirs(dataNodeDirs);
        log("NameNode rollback without existing previous dir", numDirs);
        UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current");
        startNameNodeShouldFail("None of the storage directories contain previous fs state");
        UpgradeUtilities.createEmptyDirs(nameNodeDirs);
        log("DataNode rollback without existing previous dir", numDirs);
        UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current");
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).format(false).manageDataDfsDirs(false).manageNameDfsDirs(false).startupOption(StartupOption.UPGRADE).build();
        UpgradeUtilities.createDataNodeStorageDirs(dataNodeDirs, "current");
        cluster.startDataNodes(conf, 1, false, StartupOption.ROLLBACK, null);
        cluster.shutdown();
        UpgradeUtilities.createEmptyDirs(nameNodeDirs);
        UpgradeUtilities.createEmptyDirs(dataNodeDirs);
        log("DataNode rollback with future stored layout version in previous", numDirs);
        UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current");
        UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "previous");
        NameNode.doRollback(conf, false);
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).format(false).manageDataDfsDirs(false).manageNameDfsDirs(false).dnStartupOption(StartupOption.ROLLBACK).build();
        UpgradeUtilities.createDataNodeStorageDirs(dataNodeDirs, "current");
        baseDirs = UpgradeUtilities.createDataNodeStorageDirs(dataNodeDirs, "previous");
        storageInfo = new StorageInfo(Integer.MIN_VALUE, UpgradeUtilities.getCurrentNamespaceID(cluster), UpgradeUtilities.getCurrentClusterID(cluster), UpgradeUtilities.getCurrentFsscTime(cluster), NodeType.DATA_NODE);
        UpgradeUtilities.createDataNodeVersionFile(baseDirs, storageInfo, UpgradeUtilities.getCurrentBlockPoolID(cluster));
        startBlockPoolShouldFail(StartupOption.ROLLBACK, cluster.getNamesystem().getBlockPoolId());
        cluster.shutdown();
        UpgradeUtilities.createEmptyDirs(nameNodeDirs);
        UpgradeUtilities.createEmptyDirs(dataNodeDirs);
        log("DataNode rollback with newer fsscTime in previous", numDirs);
        UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current");
        UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "previous");
        NameNode.doRollback(conf, false);
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).format(false).manageDataDfsDirs(false).manageNameDfsDirs(false).dnStartupOption(StartupOption.ROLLBACK).build();
        UpgradeUtilities.createDataNodeStorageDirs(dataNodeDirs, "current");
        baseDirs = UpgradeUtilities.createDataNodeStorageDirs(dataNodeDirs, "previous");
        storageInfo = new StorageInfo(HdfsServerConstants.DATANODE_LAYOUT_VERSION, UpgradeUtilities.getCurrentNamespaceID(cluster), UpgradeUtilities.getCurrentClusterID(cluster), Long.MAX_VALUE, NodeType.DATA_NODE);
        UpgradeUtilities.createDataNodeVersionFile(baseDirs, storageInfo, UpgradeUtilities.getCurrentBlockPoolID(cluster));
        startBlockPoolShouldFail(StartupOption.ROLLBACK, cluster.getNamesystem().getBlockPoolId());
        cluster.shutdown();
        UpgradeUtilities.createEmptyDirs(nameNodeDirs);
        UpgradeUtilities.createEmptyDirs(dataNodeDirs);
        log("NameNode rollback with no edits file", numDirs);
        UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current");
        baseDirs = UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "previous");
        deleteMatchingFiles(baseDirs, "edits.*");
        startNameNodeShouldFail("Gap in transactions");
        UpgradeUtilities.createEmptyDirs(nameNodeDirs);
        log("NameNode rollback with no image file", numDirs);
        UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current");
        baseDirs = UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "previous");
        deleteMatchingFiles(baseDirs, "fsimage_.*");
        startNameNodeShouldFail("No valid image files found");
        UpgradeUtilities.createEmptyDirs(nameNodeDirs);
        log("NameNode rollback with corrupt version file", numDirs);
        UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current");
        baseDirs = UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "previous");
        for (File f : baseDirs) {
            UpgradeUtilities.corruptFile(new File(f, "VERSION"), "layoutVersion".getBytes(Charsets.UTF_8), "xxxxxxxxxxxxx".getBytes(Charsets.UTF_8));
        }
        startNameNodeShouldFail("file VERSION has layoutVersion missing");
        UpgradeUtilities.createEmptyDirs(nameNodeDirs);
        log("NameNode rollback with old layout version in previous", numDirs);
        UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current");
        baseDirs = UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "previous");
        storageInfo = new StorageInfo(1, UpgradeUtilities.getCurrentNamespaceID(null), UpgradeUtilities.getCurrentClusterID(null), UpgradeUtilities.getCurrentFsscTime(null), NodeType.NAME_NODE);
        UpgradeUtilities.createNameNodeVersionFile(conf, baseDirs, storageInfo, UpgradeUtilities.getCurrentBlockPoolID(cluster));
        startNameNodeShouldFail("Cannot rollback to storage version 1 using this version");
        UpgradeUtilities.createEmptyDirs(nameNodeDirs);
    }
// end numDir loop
}
Also used : Path(org.apache.hadoop.fs.Path) StorageInfo(org.apache.hadoop.hdfs.server.common.StorageInfo) File(java.io.File) Test(org.junit.Test)

Example 10 with StorageInfo

use of org.apache.hadoop.hdfs.server.common.StorageInfo in project hadoop by apache.

the class TestDFSStartupVersions method isVersionCompatible.

/**
   * Determines if the given Namenode version and Datanode version
   * are compatible with each other. Compatibility in this case mean
   * that the Namenode and Datanode will successfully start up and
   * will work together. The rules for compatibility,
   * taken from the DFS Upgrade Design, are as follows:
   * <pre>
   * <ol>
   * <li>Check 0: Datanode namespaceID != Namenode namespaceID the startup fails
   * </li>
   * <li>Check 1: Datanode clusterID != Namenode clusterID the startup fails
   * </li>
   * <li>Check 2: Datanode blockPoolID != Namenode blockPoolID the startup fails
   * </li>
   * <li>Check 3: The data-node does regular startup (no matter which options 
   *    it is started with) if
   *       softwareLV == storedLV AND 
   *       DataNode.FSSCTime == NameNode.FSSCTime
   * </li>
   * <li>Check 4: The data-node performs an upgrade if it is started without any 
   *    options and
   *       |softwareLV| > |storedLV| OR 
   *       (softwareLV == storedLV AND
   *        DataNode.FSSCTime < NameNode.FSSCTime)
   * </li>
   * <li>NOT TESTED: The data-node rolls back if it is started with
   *    the -rollback option and
   *       |softwareLV| >= |previous.storedLV| AND 
   *       DataNode.previous.FSSCTime <= NameNode.FSSCTime
   * </li>
   * <li>Check 5: In all other cases the startup fails.</li>
   * </ol>
   * </pre>
   */
boolean isVersionCompatible(StorageData namenodeSd, StorageData datanodeSd) {
    final StorageInfo namenodeVer = namenodeSd.storageInfo;
    final StorageInfo datanodeVer = datanodeSd.storageInfo;
    // check #0
    if (namenodeVer.getNamespaceID() != datanodeVer.getNamespaceID()) {
        LOG.info("namespaceIDs are not equal: isVersionCompatible=false");
        return false;
    }
    // check #1
    if (!namenodeVer.getClusterID().equals(datanodeVer.getClusterID())) {
        LOG.info("clusterIDs are not equal: isVersionCompatible=false");
        return false;
    }
    // check #2
    if (!namenodeSd.blockPoolId.equals(datanodeSd.blockPoolId)) {
        LOG.info("blockPoolIDs are not equal: isVersionCompatible=false");
        return false;
    }
    // check #3
    int softwareLV = HdfsServerConstants.DATANODE_LAYOUT_VERSION;
    int storedLV = datanodeVer.getLayoutVersion();
    if (softwareLV == storedLV && datanodeVer.getCTime() == namenodeVer.getCTime()) {
        LOG.info("layoutVersions and cTimes are equal: isVersionCompatible=true");
        return true;
    }
    // check #4
    long absSoftwareLV = Math.abs((long) softwareLV);
    long absStoredLV = Math.abs((long) storedLV);
    if (absSoftwareLV > absStoredLV || (softwareLV == storedLV && datanodeVer.getCTime() < namenodeVer.getCTime())) {
        LOG.info("softwareLayoutVersion is newer OR namenode cTime is newer: isVersionCompatible=true");
        return true;
    }
    // check #5
    LOG.info("default case: isVersionCompatible=false");
    return false;
}
Also used : StorageInfo(org.apache.hadoop.hdfs.server.common.StorageInfo)

Aggregations

StorageInfo (org.apache.hadoop.hdfs.server.common.StorageInfo)17 Test (org.junit.Test)11 DatanodeRegistration (org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration)7 DatanodeStorageInfo (org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo)6 Configuration (org.apache.hadoop.conf.Configuration)5 DatanodeID (org.apache.hadoop.hdfs.protocol.DatanodeID)5 ExportedBlockKeys (org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys)4 NamenodeProtocols (org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols)4 File (java.io.File)3 IOException (java.io.IOException)3 Path (org.apache.hadoop.fs.Path)2 IncorrectVersionException (org.apache.hadoop.hdfs.server.common.IncorrectVersionException)2 ServiceException (com.google.protobuf.ServiceException)1 FileOutputStream (java.io.FileOutputStream)1 RandomAccessFile (java.io.RandomAccessFile)1 InetSocketAddress (java.net.InetSocketAddress)1 URL (java.net.URL)1 HAServiceState (org.apache.hadoop.ha.HAServiceProtocol.HAServiceState)1 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)1 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)1