Search in sources :

Example 81 with StorageType

use of org.apache.hadoop.fs.StorageType in project hadoop by apache.

the class DiskBalancerTestUtil method createRandomDataNode.

/**
   * Creates a RandomDataNode.
   *
   * @param diskTypes - Storage types needed in the Node
   * @param diskCount - Disk count - that many disks of each type is created
   * @return DataNode
   * @throws Exception
   */
public DiskBalancerDataNode createRandomDataNode(StorageType[] diskTypes, int diskCount) throws Exception {
    Preconditions.checkState(diskTypes.length > 0);
    Preconditions.checkState(diskCount > 0);
    DiskBalancerDataNode node = new DiskBalancerDataNode(UUID.randomUUID().toString());
    for (StorageType t : diskTypes) {
        DiskBalancerVolumeSet vSet = createRandomVolumeSet(t, diskCount);
        for (DiskBalancerVolume v : vSet.getVolumes()) {
            node.addVolume(v);
        }
    }
    return node;
}
Also used : DiskBalancerVolumeSet(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolumeSet) StorageType(org.apache.hadoop.fs.StorageType) DiskBalancerVolume(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolume) DiskBalancerDataNode(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode)

Example 82 with StorageType

use of org.apache.hadoop.fs.StorageType in project hadoop by apache.

the class TestMover method testMoverFailedRetry.

@Test
public void testMoverFailedRetry() throws Exception {
    // HDFS-8147
    final Configuration conf = new HdfsConfiguration();
    initConf(conf);
    conf.set(DFSConfigKeys.DFS_MOVER_RETRY_MAX_ATTEMPTS_KEY, "2");
    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).storageTypes(new StorageType[][] { { StorageType.DISK, StorageType.ARCHIVE }, { StorageType.DISK, StorageType.ARCHIVE }, { StorageType.DISK, StorageType.ARCHIVE } }).build();
    try {
        cluster.waitActive();
        final DistributedFileSystem dfs = cluster.getFileSystem();
        final String file = "/testMoverFailedRetry";
        // write to DISK
        final FSDataOutputStream out = dfs.create(new Path(file), (short) 2);
        out.writeChars("testMoverFailedRetry");
        out.close();
        // Delete block file so, block move will fail with FileNotFoundException
        LocatedBlock lb = dfs.getClient().getLocatedBlocks(file, 0).get(0);
        cluster.corruptBlockOnDataNodesByDeletingBlockFile(lb.getBlock());
        // move to ARCHIVE
        dfs.setStoragePolicy(new Path(file), "COLD");
        int rc = ToolRunner.run(conf, new Mover.Cli(), new String[] { "-p", file.toString() });
        Assert.assertEquals("Movement should fail after some retry", ExitStatus.NO_MOVE_PROGRESS.getExitCode(), rc);
    } finally {
        cluster.shutdown();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) StorageType(org.apache.hadoop.fs.StorageType) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) Test(org.junit.Test)

Example 83 with StorageType

use of org.apache.hadoop.fs.StorageType in project hadoop by apache.

the class TestMover method testMoverWithStripedFile.

@Test(timeout = 300000)
public void testMoverWithStripedFile() throws Exception {
    final Configuration conf = new HdfsConfiguration();
    initConfWithStripe(conf);
    // start 10 datanodes
    int numOfDatanodes = 10;
    int storagesPerDatanode = 2;
    long capacity = 10 * defaultBlockSize;
    long[][] capacities = new long[numOfDatanodes][storagesPerDatanode];
    for (int i = 0; i < numOfDatanodes; i++) {
        for (int j = 0; j < storagesPerDatanode; j++) {
            capacities[i][j] = capacity;
        }
    }
    conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY, StripedFileTestUtil.getDefaultECPolicy().getName());
    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numOfDatanodes).storagesPerDatanode(storagesPerDatanode).storageTypes(new StorageType[][] { { StorageType.DISK, StorageType.DISK }, { StorageType.DISK, StorageType.DISK }, { StorageType.DISK, StorageType.DISK }, { StorageType.DISK, StorageType.DISK }, { StorageType.DISK, StorageType.DISK }, { StorageType.DISK, StorageType.ARCHIVE }, { StorageType.DISK, StorageType.ARCHIVE }, { StorageType.DISK, StorageType.ARCHIVE }, { StorageType.DISK, StorageType.ARCHIVE }, { StorageType.DISK, StorageType.ARCHIVE } }).storageCapacities(capacities).build();
    try {
        cluster.waitActive();
        // set "/bar" directory with HOT storage policy.
        ClientProtocol client = NameNodeProxies.createProxy(conf, cluster.getFileSystem(0).getUri(), ClientProtocol.class).getProxy();
        String barDir = "/bar";
        client.mkdirs(barDir, new FsPermission((short) 777), true);
        client.setStoragePolicy(barDir, HdfsConstants.HOT_STORAGE_POLICY_NAME);
        // set an EC policy on "/bar" directory
        client.setErasureCodingPolicy(barDir, StripedFileTestUtil.getDefaultECPolicy().getName());
        // write file to barDir
        final String fooFile = "/bar/foo";
        long fileLen = 20 * defaultBlockSize;
        DFSTestUtil.createFile(cluster.getFileSystem(), new Path(fooFile), fileLen, (short) 3, 0);
        // verify storage types and locations
        LocatedBlocks locatedBlocks = client.getBlockLocations(fooFile, 0, fileLen);
        for (LocatedBlock lb : locatedBlocks.getLocatedBlocks()) {
            for (StorageType type : lb.getStorageTypes()) {
                Assert.assertEquals(StorageType.DISK, type);
            }
        }
        StripedFileTestUtil.verifyLocatedStripedBlocks(locatedBlocks, dataBlocks + parityBlocks);
        // start 5 more datanodes
        numOfDatanodes += 5;
        capacities = new long[5][storagesPerDatanode];
        for (int i = 0; i < 5; i++) {
            for (int j = 0; j < storagesPerDatanode; j++) {
                capacities[i][j] = capacity;
            }
        }
        cluster.startDataNodes(conf, 5, new StorageType[][] { { StorageType.ARCHIVE, StorageType.ARCHIVE }, { StorageType.ARCHIVE, StorageType.ARCHIVE }, { StorageType.ARCHIVE, StorageType.ARCHIVE }, { StorageType.ARCHIVE, StorageType.ARCHIVE }, { StorageType.ARCHIVE, StorageType.ARCHIVE } }, true, null, null, null, capacities, null, false, false, false, null);
        cluster.triggerHeartbeats();
        // move file to ARCHIVE
        client.setStoragePolicy(barDir, "COLD");
        // run Mover
        int rc = ToolRunner.run(conf, new Mover.Cli(), new String[] { "-p", barDir });
        Assert.assertEquals("Movement to ARCHIVE should be successful", 0, rc);
        // verify storage types and locations
        locatedBlocks = client.getBlockLocations(fooFile, 0, fileLen);
        for (LocatedBlock lb : locatedBlocks.getLocatedBlocks()) {
            for (StorageType type : lb.getStorageTypes()) {
                Assert.assertEquals(StorageType.ARCHIVE, type);
            }
        }
        StripedFileTestUtil.verifyLocatedStripedBlocks(locatedBlocks, dataBlocks + parityBlocks);
        // start 5 more datanodes
        numOfDatanodes += 5;
        capacities = new long[5][storagesPerDatanode];
        for (int i = 0; i < 5; i++) {
            for (int j = 0; j < storagesPerDatanode; j++) {
                capacities[i][j] = capacity;
            }
        }
        cluster.startDataNodes(conf, 5, new StorageType[][] { { StorageType.SSD, StorageType.DISK }, { StorageType.SSD, StorageType.DISK }, { StorageType.SSD, StorageType.DISK }, { StorageType.SSD, StorageType.DISK }, { StorageType.SSD, StorageType.DISK } }, true, null, null, null, capacities, null, false, false, false, null);
        cluster.triggerHeartbeats();
        // move file blocks to ONE_SSD policy
        client.setStoragePolicy(barDir, "ONE_SSD");
        // run Mover
        rc = ToolRunner.run(conf, new Mover.Cli(), new String[] { "-p", barDir });
        // verify storage types and locations
        // Movements should have been ignored for the unsupported policy on
        // striped file
        locatedBlocks = client.getBlockLocations(fooFile, 0, fileLen);
        for (LocatedBlock lb : locatedBlocks.getLocatedBlocks()) {
            for (StorageType type : lb.getStorageTypes()) {
                Assert.assertEquals(StorageType.ARCHIVE, type);
            }
        }
    } finally {
        cluster.shutdown();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) StorageType(org.apache.hadoop.fs.StorageType) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) LocatedBlocks(org.apache.hadoop.hdfs.protocol.LocatedBlocks) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) FsPermission(org.apache.hadoop.fs.permission.FsPermission) ClientProtocol(org.apache.hadoop.hdfs.protocol.ClientProtocol) Test(org.junit.Test)

Example 84 with StorageType

use of org.apache.hadoop.fs.StorageType in project hadoop by apache.

the class TestMover method testMoveWhenStoragePolicyNotSatisfying.

@Test(timeout = 300000)
public void testMoveWhenStoragePolicyNotSatisfying() throws Exception {
    // HDFS-8147
    final Configuration conf = new HdfsConfiguration();
    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).storageTypes(new StorageType[][] { { StorageType.DISK }, { StorageType.DISK }, { StorageType.DISK } }).build();
    try {
        cluster.waitActive();
        final DistributedFileSystem dfs = cluster.getFileSystem();
        final String file = "/testMoveWhenStoragePolicyNotSatisfying";
        // write to DISK
        final FSDataOutputStream out = dfs.create(new Path(file));
        out.writeChars("testMoveWhenStoragePolicyNotSatisfying");
        out.close();
        // move to ARCHIVE
        dfs.setStoragePolicy(new Path(file), "COLD");
        int rc = ToolRunner.run(conf, new Mover.Cli(), new String[] { "-p", file.toString() });
        int exitcode = ExitStatus.NO_MOVE_BLOCK.getExitCode();
        Assert.assertEquals("Exit code should be " + exitcode, exitcode, rc);
    } finally {
        cluster.shutdown();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) StorageType(org.apache.hadoop.fs.StorageType) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) Test(org.junit.Test)

Example 85 with StorageType

use of org.apache.hadoop.fs.StorageType in project hadoop by apache.

the class TestLazyPersistReplicaPlacement method testRamDiskNotChosenByDefault.

/**
   * If the only available storage is RAM_DISK and the LAZY_PERSIST flag is not
   * specified, then block placement should fail.
   *
   * @throws IOException
   */
@Test
public void testRamDiskNotChosenByDefault() throws IOException {
    getClusterBuilder().setStorageTypes(new StorageType[] { RAM_DISK, RAM_DISK }).build();
    final String METHOD_NAME = GenericTestUtils.getMethodName();
    Path path = new Path("/" + METHOD_NAME + ".dat");
    try {
        makeTestFile(path, BLOCK_SIZE, false);
        fail("Block placement to RAM_DISK should have failed without lazyPersist flag");
    } catch (Throwable t) {
        LOG.info("Got expected exception ", t);
    }
}
Also used : Path(org.apache.hadoop.fs.Path) StorageType(org.apache.hadoop.fs.StorageType) Test(org.junit.Test)

Aggregations

StorageType (org.apache.hadoop.fs.StorageType)91 Test (org.junit.Test)31 Path (org.apache.hadoop.fs.Path)27 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)24 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)18 Configuration (org.apache.hadoop.conf.Configuration)17 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)16 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)14 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)13 BlockStoragePolicy (org.apache.hadoop.hdfs.protocol.BlockStoragePolicy)12 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)11 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)10 IOException (java.io.IOException)9 ArrayList (java.util.ArrayList)8 DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)6 ByteString (com.google.protobuf.ByteString)5 LocatedBlocks (org.apache.hadoop.hdfs.protocol.LocatedBlocks)5 File (java.io.File)4 InetSocketAddress (java.net.InetSocketAddress)4 HashSet (java.util.HashSet)4