Search in sources :

Example 21 with StorageType

use of org.apache.hadoop.fs.StorageType in project hadoop by apache.

the class DFSTestUtil method createDatanodeStorageInfos.

public static DatanodeStorageInfo[] createDatanodeStorageInfos(int n, String[] racks, String[] hostnames, StorageType[] types) {
    DatanodeStorageInfo[] storages = new DatanodeStorageInfo[n];
    for (int i = storages.length; i > 0; ) {
        final String storageID = "s" + i;
        final String ip = i + "." + i + "." + i + "." + i;
        i--;
        final String rack = (racks != null && i < racks.length) ? racks[i] : "defaultRack";
        final String hostname = (hostnames != null && i < hostnames.length) ? hostnames[i] : "host";
        final StorageType type = (types != null && i < types.length) ? types[i] : StorageType.DEFAULT;
        storages[i] = createDatanodeStorageInfo(storageID, ip, rack, hostname, type, null);
    }
    return storages;
}
Also used : DatanodeStorageInfo(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo) StorageType(org.apache.hadoop.fs.StorageType)

Example 22 with StorageType

use of org.apache.hadoop.fs.StorageType in project hadoop by apache.

the class TestMiniDFSCluster method testClusterSetDatanodeDifferentStorageType.

@Test
public void testClusterSetDatanodeDifferentStorageType() throws IOException {
    final Configuration conf = new HdfsConfiguration();
    StorageType[][] storageType = new StorageType[][] { { StorageType.DISK, StorageType.ARCHIVE }, { StorageType.DISK }, { StorageType.ARCHIVE } };
    try (MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).storageTypes(storageType).build()) {
        cluster.waitActive();
        ArrayList<DataNode> dataNodes = cluster.getDataNodes();
        // Check the number of directory in DN's
        for (int i = 0; i < storageType.length; i++) {
            assertEquals(DataNode.getStorageLocations(dataNodes.get(i).getConf()).size(), storageType[i].length);
        }
    }
}
Also used : StorageType(org.apache.hadoop.fs.StorageType) Configuration(org.apache.hadoop.conf.Configuration) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) Test(org.junit.Test)

Example 23 with StorageType

use of org.apache.hadoop.fs.StorageType in project hadoop by apache.

the class TestBalancer method testBalancerDuringUpgrade.

/**
   * Check that the balancer exits when there is an unfinalized upgrade.
   */
@Test(timeout = 300000)
public void testBalancerDuringUpgrade() throws Exception {
    final int SEED = 0xFADED;
    Configuration conf = new HdfsConfiguration();
    conf.setLong(DFS_HEARTBEAT_INTERVAL_KEY, 1);
    conf.setInt(DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 500);
    conf.setLong(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY, 1);
    conf.setLong(DFSConfigKeys.DFS_BALANCER_GETBLOCKS_MIN_BLOCK_SIZE_KEY, 1L);
    final int BLOCK_SIZE = 1024 * 1024;
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).storageCapacities(new long[] { BLOCK_SIZE * 10 }).storageTypes(new StorageType[] { DEFAULT }).storagesPerDatanode(1).build();
    cluster.waitActive();
    // Create a file on the single DN
    final String METHOD_NAME = GenericTestUtils.getMethodName();
    final Path path1 = new Path("/" + METHOD_NAME + ".01.dat");
    DistributedFileSystem fs = cluster.getFileSystem();
    DFSTestUtil.createFile(fs, path1, BLOCK_SIZE, BLOCK_SIZE * 2, BLOCK_SIZE, (short) 1, SEED);
    // Add another DN with the same capacity, cluster is now unbalanced
    cluster.startDataNodes(conf, 1, true, null, null);
    cluster.triggerHeartbeats();
    Collection<URI> namenodes = DFSUtil.getInternalNsRpcUris(conf);
    // Run balancer
    final BalancerParameters p = BalancerParameters.DEFAULT;
    fs.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_ENTER);
    fs.rollingUpgrade(HdfsConstants.RollingUpgradeAction.PREPARE);
    fs.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_LEAVE);
    // Rolling upgrade should abort the balancer
    assertEquals(ExitStatus.UNFINALIZED_UPGRADE.getExitCode(), Balancer.run(namenodes, p, conf));
    // Should work with the -runDuringUpgrade flag.
    BalancerParameters.Builder b = new BalancerParameters.Builder();
    b.setRunDuringUpgrade(true);
    final BalancerParameters runDuringUpgrade = b.build();
    assertEquals(ExitStatus.SUCCESS.getExitCode(), Balancer.run(namenodes, runDuringUpgrade, conf));
    // Finalize the rolling upgrade
    fs.rollingUpgrade(HdfsConstants.RollingUpgradeAction.FINALIZE);
    // Should also work after finalization.
    assertEquals(ExitStatus.SUCCESS.getExitCode(), Balancer.run(namenodes, p, conf));
}
Also used : Path(org.apache.hadoop.fs.Path) StorageType(org.apache.hadoop.fs.StorageType) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) URI(java.net.URI) Test(org.junit.Test)

Example 24 with StorageType

use of org.apache.hadoop.fs.StorageType in project hadoop by apache.

the class TestBalancer method testBalancerWithRamDisk.

/*
   * Test Balancer with Ram_Disk configured
   * One DN has two files on RAM_DISK, other DN has no files on RAM_DISK.
   * Then verify that the balancer does not migrate files on RAM_DISK across DN.
   */
@Test(timeout = 300000)
public void testBalancerWithRamDisk() throws Exception {
    final int SEED = 0xFADED;
    final short REPL_FACT = 1;
    Configuration conf = new Configuration();
    final int defaultRamDiskCapacity = 10;
    final long ramDiskStorageLimit = ((long) defaultRamDiskCapacity * DEFAULT_RAM_DISK_BLOCK_SIZE) + (DEFAULT_RAM_DISK_BLOCK_SIZE - 1);
    final long diskStorageLimit = ((long) defaultRamDiskCapacity * DEFAULT_RAM_DISK_BLOCK_SIZE) + (DEFAULT_RAM_DISK_BLOCK_SIZE - 1);
    initConfWithRamDisk(conf, ramDiskStorageLimit);
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).storageCapacities(new long[] { ramDiskStorageLimit, diskStorageLimit }).storageTypes(new StorageType[] { RAM_DISK, DEFAULT }).build();
    cluster.waitActive();
    // Create few files on RAM_DISK
    final String METHOD_NAME = GenericTestUtils.getMethodName();
    final Path path1 = new Path("/" + METHOD_NAME + ".01.dat");
    final Path path2 = new Path("/" + METHOD_NAME + ".02.dat");
    DistributedFileSystem fs = cluster.getFileSystem();
    DFSClient client = fs.getClient();
    DFSTestUtil.createFile(fs, path1, true, DEFAULT_RAM_DISK_BLOCK_SIZE, 4 * DEFAULT_RAM_DISK_BLOCK_SIZE, DEFAULT_RAM_DISK_BLOCK_SIZE, REPL_FACT, SEED, true);
    DFSTestUtil.createFile(fs, path2, true, DEFAULT_RAM_DISK_BLOCK_SIZE, 1 * DEFAULT_RAM_DISK_BLOCK_SIZE, DEFAULT_RAM_DISK_BLOCK_SIZE, REPL_FACT, SEED, true);
    // Sleep for a short time to allow the lazy writer thread to do its job
    Thread.sleep(6 * 1000);
    // Add another fresh DN with the same type/capacity without files on RAM_DISK
    StorageType[][] storageTypes = new StorageType[][] { { RAM_DISK, DEFAULT } };
    long[][] storageCapacities = new long[][] { { ramDiskStorageLimit, diskStorageLimit } };
    cluster.startDataNodes(conf, REPL_FACT, storageTypes, true, null, null, null, storageCapacities, null, false, false, false, null);
    cluster.triggerHeartbeats();
    Collection<URI> namenodes = DFSUtil.getInternalNsRpcUris(conf);
    // Run Balancer
    final BalancerParameters p = BalancerParameters.DEFAULT;
    final int r = Balancer.run(namenodes, p, conf);
    // Validate no RAM_DISK block should be moved
    assertEquals(ExitStatus.NO_MOVE_PROGRESS.getExitCode(), r);
    // Verify files are still on RAM_DISK
    DFSTestUtil.verifyFileReplicasOnStorageType(fs, client, path1, RAM_DISK);
    DFSTestUtil.verifyFileReplicasOnStorageType(fs, client, path2, RAM_DISK);
}
Also used : Path(org.apache.hadoop.fs.Path) DFSClient(org.apache.hadoop.hdfs.DFSClient) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) StorageType(org.apache.hadoop.fs.StorageType) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) URI(java.net.URI) Test(org.junit.Test)

Example 25 with StorageType

use of org.apache.hadoop.fs.StorageType in project hadoop by apache.

the class TestBlockStatsMXBean method testStorageTypeStatsWhenStorageFailed.

@Test
public void testStorageTypeStatsWhenStorageFailed() throws Exception {
    DFSTestUtil.createFile(cluster.getFileSystem(), new Path("/blockStatsFile1"), 1024, (short) 1, 0L);
    Map<StorageType, StorageTypeStats> storageTypeStatsMap = cluster.getNamesystem().getBlockManager().getStorageTypeStats();
    StorageTypeStats storageTypeStats = storageTypeStatsMap.get(StorageType.RAM_DISK);
    assertEquals(6, storageTypeStats.getNodesInService());
    storageTypeStats = storageTypeStatsMap.get(StorageType.DISK);
    assertEquals(3, storageTypeStats.getNodesInService());
    storageTypeStats = storageTypeStatsMap.get(StorageType.ARCHIVE);
    assertEquals(3, storageTypeStats.getNodesInService());
    String dataDir = cluster.getDataDirectory();
    File dn1ArcVol1 = new File(dataDir, "data" + (3 * 0 + 2));
    File dn2ArcVol1 = new File(dataDir, "data" + (3 * 1 + 2));
    File dn3ArcVol1 = new File(dataDir, "data" + (3 * 2 + 2));
    DataNodeTestUtils.injectDataDirFailure(dn1ArcVol1);
    DataNodeTestUtils.injectDataDirFailure(dn2ArcVol1);
    DataNodeTestUtils.injectDataDirFailure(dn3ArcVol1);
    try {
        DFSTestUtil.createFile(cluster.getFileSystem(), new Path("/blockStatsFile2"), 1024, (short) 1, 0L);
        fail("Should throw exception, becuase no DISK storage available");
    } catch (Exception e) {
        assertTrue(e.getMessage().contains("could only be replicated to 0 nodes instead"));
    }
    // wait for heartbeat
    Thread.sleep(6000);
    storageTypeStatsMap = cluster.getNamesystem().getBlockManager().getStorageTypeStats();
    assertFalse("StorageTypeStatsMap should not contain DISK Storage type", storageTypeStatsMap.containsKey(StorageType.DISK));
    DataNodeTestUtils.restoreDataDirFromFailure(dn1ArcVol1);
    DataNodeTestUtils.restoreDataDirFromFailure(dn2ArcVol1);
    DataNodeTestUtils.restoreDataDirFromFailure(dn3ArcVol1);
    for (int i = 0; i < 3; i++) {
        cluster.restartDataNode(0, true);
    }
    // wait for heartbeat
    Thread.sleep(6000);
    storageTypeStatsMap = cluster.getNamesystem().getBlockManager().getStorageTypeStats();
    storageTypeStats = storageTypeStatsMap.get(StorageType.RAM_DISK);
    assertEquals(6, storageTypeStats.getNodesInService());
    storageTypeStats = storageTypeStatsMap.get(StorageType.DISK);
    assertEquals(3, storageTypeStats.getNodesInService());
    storageTypeStats = storageTypeStatsMap.get(StorageType.ARCHIVE);
    assertEquals(3, storageTypeStats.getNodesInService());
}
Also used : Path(org.apache.hadoop.fs.Path) StorageType(org.apache.hadoop.fs.StorageType) File(java.io.File) IOException(java.io.IOException) Test(org.junit.Test)

Aggregations

StorageType (org.apache.hadoop.fs.StorageType)94 Test (org.junit.Test)31 Path (org.apache.hadoop.fs.Path)27 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)25 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)19 Configuration (org.apache.hadoop.conf.Configuration)17 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)16 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)14 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)13 BlockStoragePolicy (org.apache.hadoop.hdfs.protocol.BlockStoragePolicy)12 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)11 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)11 IOException (java.io.IOException)10 ArrayList (java.util.ArrayList)9 DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)6 ByteString (com.google.protobuf.ByteString)5 InterruptedIOException (java.io.InterruptedIOException)5 LocatedBlocks (org.apache.hadoop.hdfs.protocol.LocatedBlocks)5 File (java.io.File)4 InetSocketAddress (java.net.InetSocketAddress)4