Search in sources :

Example 31 with DatanodeID

use of org.apache.hadoop.hdfs.protocol.DatanodeID in project hadoop by apache.

the class TestPBHelper method testConvertDatanodeID.

@Test
public void testConvertDatanodeID() {
    DatanodeID dn = DFSTestUtil.getLocalDatanodeID();
    DatanodeIDProto dnProto = PBHelperClient.convert(dn);
    DatanodeID dn2 = PBHelperClient.convert(dnProto);
    compare(dn, dn2);
}
Also used : DatanodeID(org.apache.hadoop.hdfs.protocol.DatanodeID) DatanodeIDProto(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto) Test(org.junit.Test)

Example 32 with DatanodeID

use of org.apache.hadoop.hdfs.protocol.DatanodeID in project hadoop by apache.

the class TestFsck method testFsckMissingECFile.

@Test(timeout = 300000)
public void testFsckMissingECFile() throws Exception {
    DistributedFileSystem fs = null;
    int dataBlocks = StripedFileTestUtil.getDefaultECPolicy().getNumDataUnits();
    int parityBlocks = StripedFileTestUtil.getDefaultECPolicy().getNumParityUnits();
    int cellSize = StripedFileTestUtil.getDefaultECPolicy().getCellSize();
    int totalSize = dataBlocks + parityBlocks;
    conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY, StripedFileTestUtil.getDefaultECPolicy().getName());
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(totalSize).build();
    fs = cluster.getFileSystem();
    // create file
    Path ecDirPath = new Path("/striped");
    fs.mkdir(ecDirPath, FsPermission.getDirDefault());
    fs.getClient().setErasureCodingPolicy(ecDirPath.toString(), StripedFileTestUtil.getDefaultECPolicy().getName());
    Path file = new Path(ecDirPath, "missing");
    final int length = cellSize * dataBlocks;
    final byte[] bytes = StripedFileTestUtil.generateBytes(length);
    DFSTestUtil.writeFile(fs, file, bytes);
    // make an unrecoverable ec file with missing blocks
    ArrayList<DataNode> dns = cluster.getDataNodes();
    DatanodeID dnId;
    for (int i = 0; i < parityBlocks + 1; i++) {
        dnId = dns.get(i).getDatanodeId();
        cluster.stopDataNode(dnId.getXferAddr());
        cluster.setDataNodeDead(dnId);
    }
    waitForUnrecoverableBlockGroup(conf);
    String outStr = runFsck(conf, 1, true, "/", "-files", "-blocks", "-locations");
    assertTrue(outStr.contains(NamenodeFsck.CORRUPT_STATUS));
    assertTrue(outStr.contains("Live_repl=" + (dataBlocks - 1)));
    assertTrue(outStr.contains("Under-erasure-coded block groups:\t0"));
    outStr = runFsck(conf, -1, true, "/", "-list-corruptfileblocks");
    assertTrue(outStr.contains("has 1 CORRUPT files"));
}
Also used : Path(org.apache.hadoop.fs.Path) DatanodeID(org.apache.hadoop.hdfs.protocol.DatanodeID) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) Matchers.anyString(org.mockito.Matchers.anyString) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) Test(org.junit.Test)

Example 33 with DatanodeID

use of org.apache.hadoop.hdfs.protocol.DatanodeID in project hadoop by apache.

the class TestFsck method testUpgradeDomain.

private void testUpgradeDomain(boolean defineUpgradeDomain, boolean displayUpgradeDomain) throws Exception {
    final short replFactor = 1;
    final short numDN = 1;
    final long blockSize = 512;
    final long fileSize = 1024;
    final String upgradeDomain = "ud1";
    final String[] racks = { "/rack1" };
    final String[] hosts = { "127.0.0.1" };
    HostsFileWriter hostsFileWriter = new HostsFileWriter();
    conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
    conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, replFactor);
    if (defineUpgradeDomain) {
        conf.setClass(DFSConfigKeys.DFS_NAMENODE_HOSTS_PROVIDER_CLASSNAME_KEY, CombinedHostFileManager.class, HostConfigManager.class);
        hostsFileWriter.initialize(conf, "temp/fsckupgradedomain");
    }
    DistributedFileSystem dfs;
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDN).hosts(hosts).racks(racks).build();
    cluster.waitClusterUp();
    dfs = cluster.getFileSystem();
    // Configure the upgrade domain on the datanode
    if (defineUpgradeDomain) {
        DatanodeAdminProperties dnProp = new DatanodeAdminProperties();
        DatanodeID datanodeID = cluster.getDataNodes().get(0).getDatanodeId();
        dnProp.setHostName(datanodeID.getHostName());
        dnProp.setPort(datanodeID.getXferPort());
        dnProp.setUpgradeDomain(upgradeDomain);
        hostsFileWriter.initIncludeHosts(new DatanodeAdminProperties[] { dnProp });
        cluster.getFileSystem().refreshNodes();
    }
    // create files
    final String testFile = new String("/testfile");
    final Path path = new Path(testFile);
    DFSTestUtil.createFile(dfs, path, fileSize, replFactor, 1000L);
    DFSTestUtil.waitReplication(dfs, path, replFactor);
    try {
        String fsckOut = runFsck(conf, 0, true, testFile, "-files", "-blocks", displayUpgradeDomain ? "-upgradedomains" : "-locations");
        assertTrue(fsckOut.contains(NamenodeFsck.HEALTHY_STATUS));
        String udValue = defineUpgradeDomain ? upgradeDomain : NamenodeFsck.UNDEFINED;
        assertEquals(displayUpgradeDomain, fsckOut.contains("(ud=" + udValue + ")"));
    } finally {
        if (defineUpgradeDomain) {
            hostsFileWriter.cleanup();
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) DatanodeID(org.apache.hadoop.hdfs.protocol.DatanodeID) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) HostsFileWriter(org.apache.hadoop.hdfs.util.HostsFileWriter) Matchers.anyString(org.mockito.Matchers.anyString) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) DatanodeAdminProperties(org.apache.hadoop.hdfs.protocol.DatanodeAdminProperties)

Example 34 with DatanodeID

use of org.apache.hadoop.hdfs.protocol.DatanodeID in project hadoop by apache.

the class TestNameNodePrunesMissingStorages method runTest.

private static void runTest(final String testCaseName, final boolean createFiles, final int numInitialStorages, final int expectedStoragesAfterTest) throws IOException {
    Configuration conf = new HdfsConfiguration();
    MiniDFSCluster cluster = null;
    try {
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).storagesPerDatanode(numInitialStorages).build();
        cluster.waitActive();
        final DataNode dn0 = cluster.getDataNodes().get(0);
        // Ensure NN knows about the storage.
        final DatanodeID dnId = dn0.getDatanodeId();
        final DatanodeDescriptor dnDescriptor = cluster.getNamesystem().getBlockManager().getDatanodeManager().getDatanode(dnId);
        assertThat(dnDescriptor.getStorageInfos().length, is(numInitialStorages));
        final String bpid = cluster.getNamesystem().getBlockPoolId();
        final DatanodeRegistration dnReg = dn0.getDNRegistrationForBP(bpid);
        DataNodeTestUtils.triggerBlockReport(dn0);
        if (createFiles) {
            final Path path = new Path("/", testCaseName);
            DFSTestUtil.createFile(cluster.getFileSystem(), path, 1024, (short) 1, 0x1BAD5EED);
            DataNodeTestUtils.triggerBlockReport(dn0);
        }
        // Generate a fake StorageReport that is missing one storage.
        final StorageReport[] reports = dn0.getFSDataset().getStorageReports(bpid);
        final StorageReport[] prunedReports = new StorageReport[numInitialStorages - 1];
        System.arraycopy(reports, 0, prunedReports, 0, prunedReports.length);
        // Stop the DataNode and send fake heartbeat with missing storage.
        cluster.stopDataNode(0);
        cluster.getNameNodeRpc().sendHeartbeat(dnReg, prunedReports, 0L, 0L, 0, 0, 0, null, true, SlowPeerReports.EMPTY_REPORT);
        // Check that the missing storage was pruned.
        assertThat(dnDescriptor.getStorageInfos().length, is(expectedStoragesAfterTest));
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) DatanodeID(org.apache.hadoop.hdfs.protocol.DatanodeID) DatanodeRegistration(org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) StorageReport(org.apache.hadoop.hdfs.server.protocol.StorageReport) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration)

Example 35 with DatanodeID

use of org.apache.hadoop.hdfs.protocol.DatanodeID in project hadoop by apache.

the class TestOverReplicatedBlocks method testProcesOverReplicateBlock.

/** Test processOverReplicatedBlock can handle corrupt replicas fine.
   * It make sure that it won't treat corrupt replicas as valid ones 
   * thus prevents NN deleting valid replicas but keeping
   * corrupt ones.
   */
@Test
public void testProcesOverReplicateBlock() throws Exception {
    Configuration conf = new HdfsConfiguration();
    conf.setLong(DFSConfigKeys.DFS_DATANODE_SCAN_PERIOD_HOURS_KEY, 100L);
    conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000L);
    conf.set(DFSConfigKeys.DFS_NAMENODE_RECONSTRUCTION_PENDING_TIMEOUT_SEC_KEY, Integer.toString(2));
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
    FileSystem fs = cluster.getFileSystem();
    try {
        final Path fileName = new Path("/foo1");
        DFSTestUtil.createFile(fs, fileName, 2, (short) 3, 0L);
        DFSTestUtil.waitReplication(fs, fileName, (short) 3);
        // corrupt the block on datanode 0
        ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, fileName);
        cluster.corruptReplica(0, block);
        DataNodeProperties dnProps = cluster.stopDataNode(0);
        // remove block scanner log to trigger block scanning
        File scanCursor = new File(new File(MiniDFSCluster.getFinalizedDir(cluster.getInstanceStorageDir(0, 0), cluster.getNamesystem().getBlockPoolId()).getParent()).getParent(), "scanner.cursor");
        //wait for one minute for deletion to succeed;
        for (int i = 0; !scanCursor.delete(); i++) {
            assertTrue("Could not delete " + scanCursor.getAbsolutePath() + " in one minute", i < 60);
            try {
                Thread.sleep(1000);
            } catch (InterruptedException ignored) {
            }
        }
        // restart the datanode so the corrupt replica will be detected
        cluster.restartDataNode(dnProps);
        DFSTestUtil.waitReplication(fs, fileName, (short) 2);
        String blockPoolId = cluster.getNamesystem().getBlockPoolId();
        final DatanodeID corruptDataNode = InternalDataNodeTestUtils.getDNRegistrationForBP(cluster.getDataNodes().get(2), blockPoolId);
        final FSNamesystem namesystem = cluster.getNamesystem();
        final BlockManager bm = namesystem.getBlockManager();
        final HeartbeatManager hm = bm.getDatanodeManager().getHeartbeatManager();
        try {
            namesystem.writeLock();
            synchronized (hm) {
                // set live datanode's remaining space to be 0 
                // so they will be chosen to be deleted when over-replication occurs
                String corruptMachineName = corruptDataNode.getXferAddr();
                for (DatanodeDescriptor datanode : hm.getDatanodes()) {
                    if (!corruptMachineName.equals(datanode.getXferAddr())) {
                        datanode.getStorageInfos()[0].setUtilizationForTesting(100L, 100L, 0, 100L);
                        datanode.updateHeartbeat(BlockManagerTestUtil.getStorageReportsForDatanode(datanode), 0L, 0L, 0, 0, null);
                    }
                }
                // decrease the replication factor to 1; 
                NameNodeAdapter.setReplication(namesystem, fileName.toString(), (short) 1);
                // corrupt one won't be chosen to be excess one
                // without 4910 the number of live replicas would be 0: block gets lost
                assertEquals(1, bm.countNodes(bm.getStoredBlock(block.getLocalBlock())).liveReplicas());
            }
        } finally {
            namesystem.writeUnlock();
        }
    } finally {
        cluster.shutdown();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) Configuration(org.apache.hadoop.conf.Configuration) DataNodeProperties(org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) DatanodeID(org.apache.hadoop.hdfs.protocol.DatanodeID) FileSystem(org.apache.hadoop.fs.FileSystem) File(java.io.File) FSNamesystem(org.apache.hadoop.hdfs.server.namenode.FSNamesystem) Test(org.junit.Test)

Aggregations

DatanodeID (org.apache.hadoop.hdfs.protocol.DatanodeID)51 Test (org.junit.Test)36 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)18 Configuration (org.apache.hadoop.conf.Configuration)13 Path (org.apache.hadoop.fs.Path)12 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)10 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)10 IOException (java.io.IOException)8 InetSocketAddress (java.net.InetSocketAddress)8 Peer (org.apache.hadoop.hdfs.net.Peer)8 DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)8 DatanodeRegistration (org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration)8 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)7 Block (org.apache.hadoop.hdfs.protocol.Block)7 FSNamesystem (org.apache.hadoop.hdfs.server.namenode.FSNamesystem)6 Socket (java.net.Socket)5 FileSystem (org.apache.hadoop.fs.FileSystem)5 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)5 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)5 StorageInfo (org.apache.hadoop.hdfs.server.common.StorageInfo)5