Search in sources :

Example 1 with DataNodeProperties

use of org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties in project hadoop by apache.

the class TestRollingUpgrade method rollbackRollingUpgrade.

private static void rollbackRollingUpgrade(Path foo, Path bar, Path file, byte[] data, MiniDFSCluster cluster) throws IOException {
    final DataNodeProperties dnprop = cluster.stopDataNode(0);
    cluster.restartNameNode("-rollingUpgrade", "rollback");
    cluster.restartDataNode(dnprop, true);
    final DistributedFileSystem dfs = cluster.getFileSystem();
    Assert.assertTrue(dfs.exists(foo));
    Assert.assertFalse(dfs.exists(bar));
    AppendTestUtil.checkFullFile(dfs, file, data.length, data);
}
Also used : DataNodeProperties(org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties)

Example 2 with DataNodeProperties

use of org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties in project hadoop by apache.

the class TestPendingCorruptDnMessages method wipeAndRestartDn.

private static boolean wipeAndRestartDn(MiniDFSCluster cluster, int dnIndex) throws IOException {
    // stop the DN, reformat it, then start it again with the same xfer port.
    DataNodeProperties dnProps = cluster.stopDataNode(dnIndex);
    cluster.formatDataNodeDirs();
    return cluster.restartDataNode(dnProps, true);
}
Also used : DataNodeProperties(org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties)

Example 3 with DataNodeProperties

use of org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties in project hadoop by apache.

the class TestNameNodeMetadataConsistency method testGenerationStampInFuture.

/**
   * This test creates a file and modifies the block generation stamp to number
   * that name node has not seen yet. It then asserts that name node moves into
   * safe mode while it is in startup mode.
   */
@Test
public void testGenerationStampInFuture() throws Exception {
    cluster.waitActive();
    FileSystem fs = cluster.getFileSystem();
    OutputStream ostream = fs.create(filePath1);
    ostream.write(TEST_DATA_IN_FUTURE.getBytes());
    ostream.close();
    // Re-write the Generation Stamp to a Generation Stamp in future.
    ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, filePath1);
    final long genStamp = block.getGenerationStamp();
    final int datanodeIndex = 0;
    cluster.changeGenStampOfBlock(datanodeIndex, block, genStamp + 1);
    // stop the data node so that it won't remove block
    final DataNodeProperties dnProps = cluster.stopDataNode(datanodeIndex);
    // Simulate Namenode forgetting a Block
    cluster.restartNameNode(true);
    cluster.getNameNode().getNamesystem().writeLock();
    BlockInfo bInfo = cluster.getNameNode().getNamesystem().getBlockManager().getStoredBlock(block.getLocalBlock());
    bInfo.delete();
    cluster.getNameNode().getNamesystem().getBlockManager().removeBlock(bInfo);
    cluster.getNameNode().getNamesystem().writeUnlock();
    // we also need to tell block manager that we are in the startup path
    BlockManagerTestUtil.setStartupSafeModeForTest(cluster.getNameNode().getNamesystem().getBlockManager());
    cluster.restartDataNode(dnProps);
    waitForNumBytes(TEST_DATA_IN_FUTURE.length());
    // Make sure that we find all written bytes in future block
    assertEquals(TEST_DATA_IN_FUTURE.length(), cluster.getNameNode().getBytesWithFutureGenerationStamps());
    // Assert safemode reason
    assertTrue(cluster.getNameNode().getNamesystem().getSafeModeTip().contains("Name node detected blocks with generation stamps in future"));
}
Also used : DataNodeProperties(org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties) BlockInfo(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo) FileSystem(org.apache.hadoop.fs.FileSystem) OutputStream(java.io.OutputStream) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) Test(org.junit.Test)

Example 4 with DataNodeProperties

use of org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties in project hadoop by apache.

the class TestLeaseRecovery method testBlockRecoveryWithLessMetafile.

/**
   * Block Recovery when the meta file not having crcs for all chunks in block
   * file
   */
@Test
public void testBlockRecoveryWithLessMetafile() throws Exception {
    Configuration conf = new Configuration();
    conf.set(DFSConfigKeys.DFS_BLOCK_LOCAL_PATH_ACCESS_USER_KEY, UserGroupInformation.getCurrentUser().getShortUserName());
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
    Path file = new Path("/testRecoveryFile");
    DistributedFileSystem dfs = cluster.getFileSystem();
    FSDataOutputStream out = dfs.create(file);
    final int FILE_SIZE = 2 * 1024 * 1024;
    int count = 0;
    while (count < FILE_SIZE) {
        out.writeBytes("Data");
        count += 4;
    }
    out.hsync();
    // abort the original stream
    ((DFSOutputStream) out.getWrappedStream()).abort();
    LocatedBlocks locations = cluster.getNameNodeRpc().getBlockLocations(file.toString(), 0, count);
    ExtendedBlock block = locations.get(0).getBlock();
    // Calculate meta file size
    // From DataNode.java, checksum size is given by:
    // (length of data + BYTE_PER_CHECKSUM - 1)/BYTES_PER_CHECKSUM *
    // CHECKSUM_SIZE
    // CRC32 & CRC32C
    final int CHECKSUM_SIZE = 4;
    final int bytesPerChecksum = conf.getInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_DEFAULT);
    final int metaFileSize = (FILE_SIZE + bytesPerChecksum - 1) / bytesPerChecksum * CHECKSUM_SIZE + // meta file header is 8 bytes
    8;
    final int newMetaFileSize = metaFileSize - CHECKSUM_SIZE;
    // Corrupt the block meta file by dropping checksum for bytesPerChecksum
    // bytes. Lease recovery is expected to recover the uncorrupted file length.
    cluster.truncateMeta(0, block, newMetaFileSize);
    // restart DN to make replica to RWR
    DataNodeProperties dnProp = cluster.stopDataNode(0);
    cluster.restartDataNode(dnProp, true);
    // try to recover the lease
    DistributedFileSystem newdfs = (DistributedFileSystem) FileSystem.newInstance(cluster.getConfiguration(0));
    count = 0;
    while (++count < 10 && !newdfs.recoverLease(file)) {
        Thread.sleep(1000);
    }
    assertTrue("File should be closed", newdfs.recoverLease(file));
    // Verify file length after lease recovery. The new file length should not
    // include the bytes with corrupted checksum.
    final long expectedNewFileLen = FILE_SIZE - bytesPerChecksum;
    final long newFileLen = newdfs.getFileStatus(file).getLen();
    assertEquals(newFileLen, expectedNewFileLen);
}
Also used : Path(org.apache.hadoop.fs.Path) Configuration(org.apache.hadoop.conf.Configuration) DataNodeProperties(org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties) LocatedBlocks(org.apache.hadoop.hdfs.protocol.LocatedBlocks) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) Test(org.junit.Test)

Example 5 with DataNodeProperties

use of org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties in project hadoop by apache.

the class TestPendingInvalidateBlock method testPendingDeleteUnknownBlocks.

/**
   * Test whether we can delay the deletion of unknown blocks in DataNode's
   * first several block reports.
   */
@Test
public void testPendingDeleteUnknownBlocks() throws Exception {
    // 5 files
    final int fileNum = 5;
    final Path[] files = new Path[fileNum];
    final DataNodeProperties[] dnprops = new DataNodeProperties[REPLICATION];
    // create a group of files, each file contains 1 block
    for (int i = 0; i < fileNum; i++) {
        files[i] = new Path("/file" + i);
        DFSTestUtil.createFile(dfs, files[i], BLOCKSIZE, REPLICATION, i);
    }
    // wait until all DataNodes have replicas
    waitForReplication();
    for (int i = REPLICATION - 1; i >= 0; i--) {
        dnprops[i] = cluster.stopDataNode(i);
    }
    Thread.sleep(2000);
    // every DN storage
    for (int i = 0; i < 2; i++) {
        dfs.delete(files[i], true);
    }
    // restart NameNode
    cluster.restartNameNode(false);
    InvalidateBlocks invalidateBlocks = (InvalidateBlocks) Whitebox.getInternalState(cluster.getNamesystem().getBlockManager(), "invalidateBlocks");
    InvalidateBlocks mockIb = Mockito.spy(invalidateBlocks);
    Mockito.doReturn(1L).when(mockIb).getInvalidationDelay();
    Whitebox.setInternalState(cluster.getNamesystem().getBlockManager(), "invalidateBlocks", mockIb);
    Assert.assertEquals(0L, cluster.getNamesystem().getPendingDeletionBlocks());
    // restart DataNodes
    for (int i = 0; i < REPLICATION; i++) {
        cluster.restartDataNode(dnprops[i], true);
    }
    cluster.waitActive();
    for (int i = 0; i < REPLICATION; i++) {
        DataNodeTestUtils.triggerBlockReport(cluster.getDataNodes().get(i));
    }
    Thread.sleep(2000);
    // make sure we have received block reports by checking the total block #
    Assert.assertEquals(3, cluster.getNamesystem().getBlocksTotal());
    Assert.assertEquals(4, cluster.getNamesystem().getPendingDeletionBlocks());
    cluster.restartNameNode(true);
    waitForNumPendingDeletionBlocks(0);
    Assert.assertEquals(3, cluster.getNamesystem().getBlocksTotal());
    Assert.assertEquals(0, cluster.getNamesystem().getPendingDeletionBlocks());
}
Also used : Path(org.apache.hadoop.fs.Path) DataNodeProperties(org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties) Test(org.junit.Test)

Aggregations

DataNodeProperties (org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties)24 Test (org.junit.Test)21 Path (org.apache.hadoop.fs.Path)12 Configuration (org.apache.hadoop.conf.Configuration)11 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)7 FileSystem (org.apache.hadoop.fs.FileSystem)6 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)6 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)6 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)5 LocatedBlocks (org.apache.hadoop.hdfs.protocol.LocatedBlocks)3 DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)3 File (java.io.File)2 IOException (java.io.IOException)2 OutputStream (java.io.OutputStream)2 ArrayList (java.util.ArrayList)2 BlockLocation (org.apache.hadoop.fs.BlockLocation)2 AdminStatesBaseTest (org.apache.hadoop.hdfs.AdminStatesBaseTest)2 DatanodeID (org.apache.hadoop.hdfs.protocol.DatanodeID)2 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)2 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)2