Search in sources :

Example 51 with StorageType

use of org.apache.hadoop.fs.StorageType in project SSM by Intel-bigdata.

the class MoverProcessor method chooseTargetInSameNode.

/**
   * Choose the target storage within same Datanode if possible.
   */
boolean chooseTargetInSameNode(Dispatcher.DBlock db, Dispatcher.Source source, List<StorageType> targetTypes) {
    for (StorageType t : targetTypes) {
        Dispatcher.DDatanode.StorageGroup target = storages.getTarget(source.getDatanodeInfo().getDatanodeUuid(), t);
        if (target == null) {
            continue;
        }
        final Dispatcher.PendingMove pm = source.addPendingMove(db, target);
        if (pm != null) {
            dispatcher.executePendingMove(pm);
            return true;
        }
    }
    return false;
}
Also used : StorageType(org.apache.hadoop.fs.StorageType) Dispatcher(org.apache.hadoop.hdfs.server.balancer.Dispatcher)

Example 52 with StorageType

use of org.apache.hadoop.fs.StorageType in project SSM by Intel-bigdata.

the class Mover method init.

@VisibleForTesting
void init() throws IOException {
    final List<DatanodeStorageReport> reports = dispatcher.init();
    for (DatanodeStorageReport r : reports) {
        final Dispatcher.DDatanode dn = dispatcher.newDatanode(r.getDatanodeInfo());
        for (StorageType t : StorageType.getMovableTypes()) {
            final Dispatcher.Source source = dn.addSource(t, Long.MAX_VALUE, dispatcher);
            final long maxRemaining = getMaxRemaining(r, t);
            final Dispatcher.DDatanode.StorageGroup target = maxRemaining > 0L ? dn.addTarget(t, maxRemaining) : null;
            storages.add(source, target);
        }
    }
}
Also used : StorageType(org.apache.hadoop.fs.StorageType) DatanodeStorageReport(org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport) Dispatcher(org.apache.hadoop.hdfs.server.balancer.Dispatcher) VisibleForTesting(com.google.common.annotations.VisibleForTesting)

Example 53 with StorageType

use of org.apache.hadoop.fs.StorageType in project hadoop by apache.

the class TestPBHelper method getBlockWithLocations.

private static BlockWithLocations getBlockWithLocations(int bid, boolean isStriped) {
    final String[] datanodeUuids = { "dn1", "dn2", "dn3" };
    final String[] storageIDs = { "s1", "s2", "s3" };
    final StorageType[] storageTypes = { StorageType.DISK, StorageType.DISK, StorageType.DISK };
    final byte[] indices = { 0, 1, 2 };
    final short dataBlkNum = 6;
    BlockWithLocations blkLocs = new BlockWithLocations(new Block(bid, 0, 1), datanodeUuids, storageIDs, storageTypes);
    if (isStriped) {
        blkLocs = new StripedBlockWithLocations(blkLocs, indices, dataBlkNum, StripedFileTestUtil.getDefaultECPolicy().getCellSize());
    }
    return blkLocs;
}
Also used : StorageType(org.apache.hadoop.fs.StorageType) StripedBlockWithLocations(org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.StripedBlockWithLocations) StripedBlockWithLocations(org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.StripedBlockWithLocations) BlockWithLocations(org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations) Block(org.apache.hadoop.hdfs.protocol.Block) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) RecoveringBlock(org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) ByteString(com.google.protobuf.ByteString)

Example 54 with StorageType

use of org.apache.hadoop.fs.StorageType in project hadoop by apache.

the class TestBalancer method testTwoReplicaShouldNotInSameDN.

/**
   * Test special case. Two replicas belong to same block should not in same node.
   * We have 2 nodes.
   * We have a block in (DN0,SSD) and (DN1,DISK).
   * Replica in (DN0,SSD) should not be moved to (DN1,SSD).
   * Otherwise DN1 has 2 replicas.
   */
@Test(timeout = 100000)
public void testTwoReplicaShouldNotInSameDN() throws Exception {
    final Configuration conf = new HdfsConfiguration();
    int blockSize = 5 * 1024 * 1024;
    conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
    conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1L);
    conf.setLong(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY, 1L);
    conf.setLong(DFSConfigKeys.DFS_BALANCER_GETBLOCKS_MIN_BLOCK_SIZE_KEY, 1L);
    int numOfDatanodes = 2;
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).racks(new String[] { "/default/rack0", "/default/rack0" }).storagesPerDatanode(2).storageTypes(new StorageType[][] { { StorageType.SSD, StorageType.DISK }, { StorageType.SSD, StorageType.DISK } }).storageCapacities(new long[][] { { 100 * blockSize, 20 * blockSize }, { 20 * blockSize, 100 * blockSize } }).build();
    cluster.waitActive();
    //set "/bar" directory with ONE_SSD storage policy.
    DistributedFileSystem fs = cluster.getFileSystem();
    Path barDir = new Path("/bar");
    fs.mkdir(barDir, new FsPermission((short) 777));
    fs.setStoragePolicy(barDir, HdfsConstants.ONESSD_STORAGE_POLICY_NAME);
    // Insert 30 blocks. So (DN0,SSD) and (DN1,DISK) are about half full,
    // and (DN0,SSD) and (DN1,DISK) are about 15% full.
    long fileLen = 30 * blockSize;
    // fooFile has ONE_SSD policy. So
    // (DN0,SSD) and (DN1,DISK) have 2 replicas belong to same block.
    // (DN0,DISK) and (DN1,SSD) have 2 replicas belong to same block.
    Path fooFile = new Path(barDir, "foo");
    createFile(cluster, fooFile, fileLen, (short) numOfDatanodes, 0);
    // update space info
    cluster.triggerHeartbeats();
    BalancerParameters p = BalancerParameters.DEFAULT;
    Collection<URI> namenodes = DFSUtil.getInternalNsRpcUris(conf);
    final int r = Balancer.run(namenodes, p, conf);
    // Replica in (DN0,SSD) was not moved to (DN1,SSD), because (DN1,DISK)
    // already has one. Otherwise DN1 will have 2 replicas.
    // For same reason, no replicas were moved.
    assertEquals(ExitStatus.NO_MOVE_PROGRESS.getExitCode(), r);
}
Also used : Path(org.apache.hadoop.fs.Path) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) StorageType(org.apache.hadoop.fs.StorageType) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) URI(java.net.URI) FsPermission(org.apache.hadoop.fs.permission.FsPermission) Test(org.junit.Test)

Example 55 with StorageType

use of org.apache.hadoop.fs.StorageType in project hadoop by apache.

the class TestSortLocatedStripedBlock method createEachLocatedBlock.

private LocatedStripedBlock createEachLocatedBlock(int numDataBlk, int numParityBlk, List<Integer> decommnNodeIndices, List<Integer> targetNodeIndices, ArrayList<String> decommNodeInfo) {
    final long blockGroupID = Long.MIN_VALUE;
    int totalDns = numDataBlk + numParityBlk + targetNodeIndices.size();
    DatanodeInfo[] locs = new DatanodeInfo[totalDns];
    String[] storageIDs = new String[totalDns];
    StorageType[] storageTypes = new StorageType[totalDns];
    byte[] blkIndices = new byte[totalDns];
    // Adding data blocks
    int index = 0;
    for (; index < numDataBlk; index++) {
        blkIndices[index] = (byte) index;
        // Location port always equal to logical index of a block,
        // for easier verification
        locs[index] = DFSTestUtil.getLocalDatanodeInfo(blkIndices[index]);
        locs[index].setLastUpdateMonotonic(Time.monotonicNow());
        storageIDs[index] = locs[index].getDatanodeUuid();
        storageTypes[index] = StorageType.DISK;
        // set decommissioned state
        if (decommnNodeIndices.contains(index)) {
            locs[index].setDecommissioned();
            decommNodeInfo.add(locs[index].toString());
            // Removing it from the list to ensure that all the given nodes are
            // successfully marked as decomissioned.
            decommnNodeIndices.remove(new Integer(index));
        }
    }
    // Adding parity blocks after data blocks
    index = dataBlocks;
    for (int j = numDataBlk; j < numDataBlk + numParityBlk; j++, index++) {
        blkIndices[j] = (byte) index;
        // Location port always equal to logical index of a block,
        // for easier verification
        locs[j] = DFSTestUtil.getLocalDatanodeInfo(blkIndices[j]);
        locs[j].setLastUpdateMonotonic(Time.monotonicNow());
        storageIDs[j] = locs[j].getDatanodeUuid();
        storageTypes[j] = StorageType.DISK;
        // set decommissioned state
        if (decommnNodeIndices.contains(index)) {
            locs[j].setDecommissioned();
            decommNodeInfo.add(locs[j].toString());
            // Removing it from the list to ensure that all the given nodes are
            // successfully marked as decomissioned.
            decommnNodeIndices.remove(new Integer(index));
        }
    }
    // Add extra target nodes to storage list after the parity blocks
    int basePortValue = dataBlocks + parityBlocks;
    index = numDataBlk + numParityBlk;
    for (int i = 0; i < targetNodeIndices.size(); i++, index++) {
        int blkIndexPos = targetNodeIndices.get(i);
        blkIndices[index] = (byte) blkIndexPos;
        // Location port always equal to logical index of a block,
        // for easier verification
        locs[index] = DFSTestUtil.getLocalDatanodeInfo(basePortValue++);
        locs[index].setLastUpdateMonotonic(Time.monotonicNow());
        storageIDs[index] = locs[index].getDatanodeUuid();
        storageTypes[index] = StorageType.DISK;
        // decommissioned by administrator
        if (decommnNodeIndices.contains(blkIndexPos)) {
            locs[index].setDecommissioned();
            decommNodeInfo.add(locs[index].toString());
            // Removing it from the list to ensure that all the given nodes are
            // successfully marked as decomissioned.
            decommnNodeIndices.remove(new Integer(blkIndexPos));
        }
    }
    return new LocatedStripedBlock(new ExtendedBlock("pool", blockGroupID, cellSize, 1001), locs, storageIDs, storageTypes, blkIndices, 0, false, null);
}
Also used : LocatedStripedBlock(org.apache.hadoop.hdfs.protocol.LocatedStripedBlock) DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) StorageType(org.apache.hadoop.fs.StorageType) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock)

Aggregations

StorageType (org.apache.hadoop.fs.StorageType)91 Test (org.junit.Test)31 Path (org.apache.hadoop.fs.Path)27 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)24 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)18 Configuration (org.apache.hadoop.conf.Configuration)17 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)16 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)14 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)13 BlockStoragePolicy (org.apache.hadoop.hdfs.protocol.BlockStoragePolicy)12 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)11 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)10 IOException (java.io.IOException)9 ArrayList (java.util.ArrayList)8 DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)6 ByteString (com.google.protobuf.ByteString)5 LocatedBlocks (org.apache.hadoop.hdfs.protocol.LocatedBlocks)5 File (java.io.File)4 InetSocketAddress (java.net.InetSocketAddress)4 HashSet (java.util.HashSet)4