Search in sources :

Example 46 with StorageType

use of org.apache.hadoop.fs.StorageType in project hadoop by apache.

the class DiskBalancerTestUtil method newImbalancedCluster.

public static MiniDFSCluster newImbalancedCluster(final Configuration conf, final int numDatanodes, final long[] storageCapacities, final int defaultBlockSize, final int fileLen, final StartupOption dnOption) throws IOException, InterruptedException, TimeoutException {
    conf.setBoolean(DFSConfigKeys.DFS_DISK_BALANCER_ENABLED, true);
    conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, defaultBlockSize);
    conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, defaultBlockSize);
    conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1L);
    final String fileName = "/" + UUID.randomUUID().toString();
    final Path filePath = new Path(fileName);
    Preconditions.checkNotNull(storageCapacities);
    Preconditions.checkArgument(storageCapacities.length == 2, "need to specify capacities for two storages.");
    // Write a file and restart the cluster
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).storageCapacities(storageCapacities).storageTypes(new StorageType[] { StorageType.DISK, StorageType.DISK }).storagesPerDatanode(2).dnStartupOption(dnOption).build();
    FsVolumeImpl source = null;
    FsVolumeImpl dest = null;
    cluster.waitActive();
    Random r = new Random();
    FileSystem fs = cluster.getFileSystem(0);
    TestBalancer.createFile(cluster, filePath, fileLen, (short) 1, 0);
    DFSTestUtil.waitReplication(fs, filePath, (short) 1);
    cluster.restartDataNodes();
    cluster.waitActive();
    // Get the data node and move all data to one disk.
    for (int i = 0; i < numDatanodes; i++) {
        DataNode dnNode = cluster.getDataNodes().get(i);
        try (FsDatasetSpi.FsVolumeReferences refs = dnNode.getFSDataset().getFsVolumeReferences()) {
            source = (FsVolumeImpl) refs.get(0);
            dest = (FsVolumeImpl) refs.get(1);
            assertTrue(DiskBalancerTestUtil.getBlockCount(source) > 0);
            DiskBalancerTestUtil.moveAllDataToDestVolume(dnNode.getFSDataset(), source, dest);
            assertTrue(DiskBalancerTestUtil.getBlockCount(source) == 0);
        }
    }
    cluster.restartDataNodes();
    cluster.waitActive();
    return cluster;
}
Also used : Path(org.apache.hadoop.fs.Path) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) StorageType(org.apache.hadoop.fs.StorageType) Random(java.util.Random) FsVolumeImpl(org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsVolumeImpl) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) DiskBalancerDataNode(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode) FileSystem(org.apache.hadoop.fs.FileSystem) FsDatasetSpi(org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi)

Example 47 with StorageType

use of org.apache.hadoop.fs.StorageType in project SSM by Intel-bigdata.

the class MoverProcessor method chooseTarget.

boolean chooseTarget(Dispatcher.DBlock db, Dispatcher.Source source, List<StorageType> targetTypes, Matcher matcher) {
    final NetworkTopology cluster = dispatcher.getCluster();
    for (StorageType t : targetTypes) {
        final List<Dispatcher.DDatanode.StorageGroup> targets = storages.getTargetStorages(t);
        Collections.shuffle(targets);
        for (Dispatcher.DDatanode.StorageGroup target : targets) {
            if (matcher.match(cluster, source.getDatanodeInfo(), target.getDatanodeInfo())) {
                final Dispatcher.PendingMove pm = source.addPendingMove(db, target);
                if (pm != null) {
                    dispatcher.executePendingMove(pm);
                    return true;
                }
            }
        }
    }
    return false;
}
Also used : StorageType(org.apache.hadoop.fs.StorageType) NetworkTopology(org.apache.hadoop.net.NetworkTopology) Dispatcher(org.apache.hadoop.hdfs.server.balancer.Dispatcher)

Example 48 with StorageType

use of org.apache.hadoop.fs.StorageType in project SSM by Intel-bigdata.

the class TestAllSsdFileAction method testAllSsd.

@Test
public void testAllSsd() throws Exception {
    final String file = "/testAllSsd/file";
    Path dir = new Path("/testAllSsd");
    dfs.mkdirs(dir);
    // write to DISK
    dfs.setStoragePolicy(dir, "HOT");
    final FSDataOutputStream out = dfs.create(new Path(file));
    out.writeChars("testAllSSD");
    out.close();
    // schedule move to SSD
    AllSsdFileAction action = new AllSsdFileAction();
    action.setDfsClient(dfsClient);
    action.setContext(smartContext);
    action.init(file);
    ActionStatus status = action.getActionStatus();
    action.run();
    while (!status.isFinished()) {
        System.out.println("Mover running time : " + StringUtils.formatTime(status.getRunningTime()));
        Thread.sleep(1000);
    }
    // verify after movement
    Assert.assertTrue(status.isSuccessful());
    LocatedBlock lb = dfsClient.getLocatedBlocks(file, 0).get(0);
    StorageType[] storageTypes = lb.getStorageTypes();
    for (StorageType storageType : storageTypes) {
        Assert.assertTrue(StorageType.SSD == storageType);
    }
}
Also used : Path(org.apache.hadoop.fs.Path) StorageType(org.apache.hadoop.fs.StorageType) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) ActionStatus(org.smartdata.actions.ActionStatus) Test(org.junit.Test)

Example 49 with StorageType

use of org.apache.hadoop.fs.StorageType in project SSM by Intel-bigdata.

the class TestOneSsdFileAction method testAllSsd.

@Test
public void testAllSsd() throws Exception {
    final String file = "/testArchive/file";
    Path dir = new Path("/testArchive");
    dfs.mkdirs(dir);
    // write to DISK
    dfs.setStoragePolicy(dir, "HOT");
    final FSDataOutputStream out = dfs.create(new Path(file));
    out.writeChars("testArchive");
    out.close();
    // schedule move to Archive
    OneSsdFileAction action = new OneSsdFileAction();
    action.setDfsClient(dfsClient);
    action.setContext(smartContext);
    action.init(file);
    ActionStatus status = action.getActionStatus();
    action.run();
    while (!status.isFinished()) {
        System.out.println("Mover running time : " + StringUtils.formatTime(status.getRunningTime()));
        Thread.sleep(1000);
    }
    // verify after movement
    Assert.assertTrue(status.isSuccessful());
    LocatedBlock lb = dfsClient.getLocatedBlocks(file, 0).get(0);
    StorageType[] storageTypes = lb.getStorageTypes();
    int ssdCount = 0;
    int hddCount = 0;
    for (StorageType storageType : storageTypes) {
        if (storageType == StorageType.SSD) {
            ssdCount++;
        } else if (storageType == StorageType.DISK) {
            hddCount++;
        }
    }
    Assert.assertEquals(1, ssdCount);
    Assert.assertEquals(2, hddCount);
}
Also used : Path(org.apache.hadoop.fs.Path) StorageType(org.apache.hadoop.fs.StorageType) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) ActionStatus(org.smartdata.actions.ActionStatus) Test(org.junit.Test)

Example 50 with StorageType

use of org.apache.hadoop.fs.StorageType in project SSM by Intel-bigdata.

the class MoverProcessor method scheduleMoveBlock.

boolean scheduleMoveBlock(StorageTypeDiff diff, LocatedBlock lb) {
    final List<MLocation> locations = MLocation.toLocations(lb);
    Collections.shuffle(locations);
    final Dispatcher.DBlock db = newDBlock(lb, locations);
    for (final StorageType t : diff.existing) {
        for (final MLocation ml : locations) {
            final Dispatcher.Source source = storages.getSource(ml);
            if (ml.storageType == t && source != null) {
                // try to schedule one replica move.
                if (scheduleMoveReplica(db, source, diff.expected)) {
                    return true;
                }
            }
        }
    }
    return false;
}
Also used : StorageType(org.apache.hadoop.fs.StorageType) Dispatcher(org.apache.hadoop.hdfs.server.balancer.Dispatcher)

Aggregations

StorageType (org.apache.hadoop.fs.StorageType)91 Test (org.junit.Test)31 Path (org.apache.hadoop.fs.Path)27 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)24 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)18 Configuration (org.apache.hadoop.conf.Configuration)17 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)16 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)14 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)13 BlockStoragePolicy (org.apache.hadoop.hdfs.protocol.BlockStoragePolicy)12 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)11 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)10 IOException (java.io.IOException)9 ArrayList (java.util.ArrayList)8 DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)6 ByteString (com.google.protobuf.ByteString)5 LocatedBlocks (org.apache.hadoop.hdfs.protocol.LocatedBlocks)5 File (java.io.File)4 InetSocketAddress (java.net.InetSocketAddress)4 HashSet (java.util.HashSet)4