Search in sources :

Example 16 with StorageInfo

use of org.apache.hadoop.hdfs.server.common.StorageInfo in project hadoop by apache.

the class TestDatanodeRegistration method testChangeStorageID.

@Test
public void testChangeStorageID() throws Exception {
    final String DN_IP_ADDR = "127.0.0.1";
    final String DN_HOSTNAME = "localhost";
    final int DN_XFER_PORT = 12345;
    final int DN_INFO_PORT = 12346;
    final int DN_INFO_SECURE_PORT = 12347;
    final int DN_IPC_PORT = 12348;
    Configuration conf = new HdfsConfiguration();
    MiniDFSCluster cluster = null;
    try {
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
        InetSocketAddress addr = new InetSocketAddress("localhost", cluster.getNameNodePort());
        DFSClient client = new DFSClient(addr, conf);
        NamenodeProtocols rpcServer = cluster.getNameNodeRpc();
        // register a datanode
        DatanodeID dnId = new DatanodeID(DN_IP_ADDR, DN_HOSTNAME, "fake-datanode-id", DN_XFER_PORT, DN_INFO_PORT, DN_INFO_SECURE_PORT, DN_IPC_PORT);
        long nnCTime = cluster.getNamesystem().getFSImage().getStorage().getCTime();
        StorageInfo mockStorageInfo = mock(StorageInfo.class);
        doReturn(nnCTime).when(mockStorageInfo).getCTime();
        doReturn(HdfsServerConstants.DATANODE_LAYOUT_VERSION).when(mockStorageInfo).getLayoutVersion();
        DatanodeRegistration dnReg = new DatanodeRegistration(dnId, mockStorageInfo, null, VersionInfo.getVersion());
        rpcServer.registerDatanode(dnReg);
        DatanodeInfo[] report = client.datanodeReport(DatanodeReportType.ALL);
        assertEquals("Expected a registered datanode", 1, report.length);
        // register the same datanode again with a different storage ID
        dnId = new DatanodeID(DN_IP_ADDR, DN_HOSTNAME, "changed-fake-datanode-id", DN_XFER_PORT, DN_INFO_PORT, DN_INFO_SECURE_PORT, DN_IPC_PORT);
        dnReg = new DatanodeRegistration(dnId, mockStorageInfo, null, VersionInfo.getVersion());
        rpcServer.registerDatanode(dnReg);
        report = client.datanodeReport(DatanodeReportType.ALL);
        assertEquals("Datanode with changed storage ID not recognized", 1, report.length);
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}
Also used : NamenodeProtocols(org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols) DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) Configuration(org.apache.hadoop.conf.Configuration) InetSocketAddress(java.net.InetSocketAddress) DatanodeID(org.apache.hadoop.hdfs.protocol.DatanodeID) DatanodeRegistration(org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration) DatanodeStorageInfo(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo) StorageInfo(org.apache.hadoop.hdfs.server.common.StorageInfo) Test(org.junit.Test)

Example 17 with StorageInfo

use of org.apache.hadoop.hdfs.server.common.StorageInfo in project hadoop by apache.

the class TestDatanodeRegistration method testRegistrationWithDifferentSoftwareVersionsDuringUpgrade.

@Test
public void testRegistrationWithDifferentSoftwareVersionsDuringUpgrade() throws Exception {
    Configuration conf = new HdfsConfiguration();
    conf.set(DFSConfigKeys.DFS_DATANODE_MIN_SUPPORTED_NAMENODE_VERSION_KEY, "1.0.0");
    MiniDFSCluster cluster = null;
    try {
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
        NamenodeProtocols rpcServer = cluster.getNameNodeRpc();
        long nnCTime = cluster.getNamesystem().getFSImage().getStorage().getCTime();
        StorageInfo mockStorageInfo = mock(StorageInfo.class);
        doReturn(nnCTime).when(mockStorageInfo).getCTime();
        DatanodeRegistration mockDnReg = mock(DatanodeRegistration.class);
        doReturn(HdfsServerConstants.DATANODE_LAYOUT_VERSION).when(mockDnReg).getVersion();
        doReturn("fake-storage-id").when(mockDnReg).getDatanodeUuid();
        doReturn(mockStorageInfo).when(mockDnReg).getStorageInfo();
        // Should succeed when software versions are the same and CTimes are the
        // same.
        doReturn(VersionInfo.getVersion()).when(mockDnReg).getSoftwareVersion();
        doReturn("127.0.0.1").when(mockDnReg).getIpAddr();
        doReturn(123).when(mockDnReg).getXferPort();
        rpcServer.registerDatanode(mockDnReg);
        // Should succeed when software versions are the same and CTimes are
        // different.
        doReturn(nnCTime + 1).when(mockStorageInfo).getCTime();
        rpcServer.registerDatanode(mockDnReg);
        // Should fail when software version of DN is different from NN and CTimes
        // are different.
        doReturn(VersionInfo.getVersion() + ".1").when(mockDnReg).getSoftwareVersion();
        try {
            rpcServer.registerDatanode(mockDnReg);
            fail("Should not have been able to register DN with different software" + " versions and CTimes");
        } catch (IncorrectVersionException ive) {
            GenericTestUtils.assertExceptionContains("does not match CTime of NN", ive);
            LOG.info("Got expected exception", ive);
        }
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}
Also used : NamenodeProtocols(org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols) IncorrectVersionException(org.apache.hadoop.hdfs.server.common.IncorrectVersionException) DatanodeRegistration(org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration) Configuration(org.apache.hadoop.conf.Configuration) DatanodeStorageInfo(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo) StorageInfo(org.apache.hadoop.hdfs.server.common.StorageInfo) Test(org.junit.Test)

Aggregations

StorageInfo (org.apache.hadoop.hdfs.server.common.StorageInfo)17 Test (org.junit.Test)11 DatanodeRegistration (org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration)7 DatanodeStorageInfo (org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo)6 Configuration (org.apache.hadoop.conf.Configuration)5 DatanodeID (org.apache.hadoop.hdfs.protocol.DatanodeID)5 ExportedBlockKeys (org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys)4 NamenodeProtocols (org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols)4 File (java.io.File)3 IOException (java.io.IOException)3 Path (org.apache.hadoop.fs.Path)2 IncorrectVersionException (org.apache.hadoop.hdfs.server.common.IncorrectVersionException)2 ServiceException (com.google.protobuf.ServiceException)1 FileOutputStream (java.io.FileOutputStream)1 RandomAccessFile (java.io.RandomAccessFile)1 InetSocketAddress (java.net.InetSocketAddress)1 URL (java.net.URL)1 HAServiceState (org.apache.hadoop.ha.HAServiceProtocol.HAServiceState)1 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)1 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)1