Search in sources :

Example 11 with StorageType

use of org.apache.hadoop.fs.StorageType in project hadoop by apache.

the class TestMover method testMoverWithPinnedBlocks.

/**
   * Test to verify that mover can't move pinned blocks.
   */
@Test(timeout = 90000)
public void testMoverWithPinnedBlocks() throws Exception {
    final Configuration conf = new HdfsConfiguration();
    initConf(conf);
    // Sets bigger retry max attempts value so that test case will timed out if
    // block pinning errors are not handled properly during block movement.
    conf.setInt(DFSConfigKeys.DFS_MOVER_RETRY_MAX_ATTEMPTS_KEY, 10000);
    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
    try {
        cluster.waitActive();
        final DistributedFileSystem dfs = cluster.getFileSystem();
        final String file = "/testMoverWithPinnedBlocks/file";
        Path dir = new Path("/testMoverWithPinnedBlocks");
        dfs.mkdirs(dir);
        // write to DISK
        dfs.setStoragePolicy(dir, "HOT");
        final FSDataOutputStream out = dfs.create(new Path(file));
        byte[] fileData = StripedFileTestUtil.generateBytes(DEFAULT_BLOCK_SIZE * 3);
        out.write(fileData);
        out.close();
        // verify before movement
        LocatedBlock lb = dfs.getClient().getLocatedBlocks(file, 0).get(0);
        StorageType[] storageTypes = lb.getStorageTypes();
        for (StorageType storageType : storageTypes) {
            Assert.assertTrue(StorageType.DISK == storageType);
        }
        // Adding one SSD based data node to the cluster.
        StorageType[][] newtypes = new StorageType[][] { { StorageType.SSD } };
        startAdditionalDNs(conf, 1, newtypes, cluster);
        // Mock FsDatasetSpi#getPinning to show that the block is pinned.
        for (int i = 0; i < cluster.getDataNodes().size(); i++) {
            DataNode dn = cluster.getDataNodes().get(i);
            LOG.info("Simulate block pinning in datanode {}", dn);
            DataNodeTestUtils.mockDatanodeBlkPinning(dn, true);
        }
        // move file blocks to ONE_SSD policy
        dfs.setStoragePolicy(dir, "ONE_SSD");
        int rc = ToolRunner.run(conf, new Mover.Cli(), new String[] { "-p", dir.toString() });
        int exitcode = ExitStatus.NO_MOVE_BLOCK.getExitCode();
        Assert.assertEquals("Movement should fail", exitcode, rc);
    } finally {
        cluster.shutdown();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) StorageType(org.apache.hadoop.fs.StorageType) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) Test(org.junit.Test)

Example 12 with StorageType

use of org.apache.hadoop.fs.StorageType in project hadoop by apache.

the class TestMover method testScheduleSameBlock.

@Test
public void testScheduleSameBlock() throws IOException {
    final Configuration conf = new HdfsConfiguration();
    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
    try {
        cluster.waitActive();
        final DistributedFileSystem dfs = cluster.getFileSystem();
        final String file = "/testScheduleSameBlock/file";
        {
            final FSDataOutputStream out = dfs.create(new Path(file));
            out.writeChars("testScheduleSameBlock");
            out.close();
        }
        final Mover mover = newMover(conf);
        mover.init();
        final Mover.Processor processor = mover.new Processor();
        final LocatedBlock lb = dfs.getClient().getLocatedBlocks(file, 0).get(0);
        final List<MLocation> locations = MLocation.toLocations(lb);
        final MLocation ml = locations.get(0);
        final DBlock db = mover.newDBlock(lb, locations, null);
        final List<StorageType> storageTypes = new ArrayList<StorageType>(Arrays.asList(StorageType.DEFAULT, StorageType.DEFAULT));
        Assert.assertTrue(processor.scheduleMoveReplica(db, ml, storageTypes));
        Assert.assertFalse(processor.scheduleMoveReplica(db, ml, storageTypes));
    } finally {
        cluster.shutdown();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) StorageType(org.apache.hadoop.fs.StorageType) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) ArrayList(java.util.ArrayList) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) DBlock(org.apache.hadoop.hdfs.server.balancer.Dispatcher.DBlock) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) MLocation(org.apache.hadoop.hdfs.server.mover.Mover.MLocation) Test(org.junit.Test)

Example 13 with StorageType

use of org.apache.hadoop.fs.StorageType in project hadoop by apache.

the class JsonUtilClient method toContentSummary.

/** Convert a Json map to a ContentSummary. */
static ContentSummary toContentSummary(final Map<?, ?> json) {
    if (json == null) {
        return null;
    }
    final Map<?, ?> m = (Map<?, ?>) json.get(ContentSummary.class.getSimpleName());
    final long length = ((Number) m.get("length")).longValue();
    final long fileCount = ((Number) m.get("fileCount")).longValue();
    final long directoryCount = ((Number) m.get("directoryCount")).longValue();
    final long quota = ((Number) m.get("quota")).longValue();
    final long spaceConsumed = ((Number) m.get("spaceConsumed")).longValue();
    final long spaceQuota = ((Number) m.get("spaceQuota")).longValue();
    final Map<?, ?> typem = (Map<?, ?>) m.get("typeQuota");
    Builder contentSummaryBuilder = new ContentSummary.Builder().length(length).fileCount(fileCount).directoryCount(directoryCount).quota(quota).spaceConsumed(spaceConsumed).spaceQuota(spaceQuota);
    if (typem != null) {
        for (StorageType t : StorageType.getTypesSupportingQuota()) {
            Map<?, ?> type = (Map<?, ?>) typem.get(t.toString());
            if (type != null) {
                contentSummaryBuilder = contentSummaryBuilder.typeQuota(t, ((Number) type.get("quota")).longValue()).typeConsumed(t, ((Number) type.get("consumed")).longValue());
            }
        }
    }
    return contentSummaryBuilder.build();
}
Also used : StorageType(org.apache.hadoop.fs.StorageType) DatanodeInfoBuilder(org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder) Builder(org.apache.hadoop.fs.ContentSummary.Builder) Map(java.util.Map)

Example 14 with StorageType

use of org.apache.hadoop.fs.StorageType in project hadoop by apache.

the class JsonUtilClient method toLocatedBlock.

/** Convert a Json map to LocatedBlock. */
static LocatedBlock toLocatedBlock(final Map<?, ?> m) throws IOException {
    if (m == null) {
        return null;
    }
    final ExtendedBlock b = toExtendedBlock((Map<?, ?>) m.get("block"));
    final DatanodeInfo[] locations = toDatanodeInfoArray(getList(m, "locations"));
    final long startOffset = ((Number) m.get("startOffset")).longValue();
    final boolean isCorrupt = (Boolean) m.get("isCorrupt");
    final DatanodeInfo[] cachedLocations = toDatanodeInfoArray(getList(m, "cachedLocations"));
    final StorageType[] storageTypes = toStorageTypeArray(getList(m, "storageTypes"));
    final LocatedBlock locatedblock = new LocatedBlock(b, locations, null, storageTypes, startOffset, isCorrupt, cachedLocations);
    locatedblock.setBlockToken(toBlockToken((Map<?, ?>) m.get("blockToken")));
    return locatedblock;
}
Also used : DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) StorageType(org.apache.hadoop.fs.StorageType) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) Map(java.util.Map)

Example 15 with StorageType

use of org.apache.hadoop.fs.StorageType in project hadoop by apache.

the class PBHelperClient method convertLocatedBlockProto.

public static LocatedBlock convertLocatedBlockProto(LocatedBlockProto proto) {
    if (proto == null)
        return null;
    List<DatanodeInfoProto> locs = proto.getLocsList();
    DatanodeInfo[] targets = new DatanodeInfo[locs.size()];
    for (int i = 0; i < locs.size(); i++) {
        targets[i] = convert(locs.get(i));
    }
    final StorageType[] storageTypes = convertStorageTypes(proto.getStorageTypesList(), locs.size());
    final int storageIDsCount = proto.getStorageIDsCount();
    final String[] storageIDs;
    if (storageIDsCount == 0) {
        storageIDs = null;
    } else {
        Preconditions.checkState(storageIDsCount == locs.size());
        storageIDs = proto.getStorageIDsList().toArray(new String[storageIDsCount]);
    }
    byte[] indices = null;
    if (proto.hasBlockIndices()) {
        indices = proto.getBlockIndices().toByteArray();
    }
    // Set values from the isCached list, re-using references from loc
    List<DatanodeInfo> cachedLocs = new ArrayList<>(locs.size());
    List<Boolean> isCachedList = proto.getIsCachedList();
    for (int i = 0; i < isCachedList.size(); i++) {
        if (isCachedList.get(i)) {
            cachedLocs.add(targets[i]);
        }
    }
    final LocatedBlock lb;
    if (indices == null) {
        lb = new LocatedBlock(PBHelperClient.convert(proto.getB()), targets, storageIDs, storageTypes, proto.getOffset(), proto.getCorrupt(), cachedLocs.toArray(new DatanodeInfo[cachedLocs.size()]));
    } else {
        lb = new LocatedStripedBlock(PBHelperClient.convert(proto.getB()), targets, storageIDs, storageTypes, indices, proto.getOffset(), proto.getCorrupt(), cachedLocs.toArray(new DatanodeInfo[cachedLocs.size()]));
        List<TokenProto> tokenProtos = proto.getBlockTokensList();
        Token<BlockTokenIdentifier>[] blockTokens = convertTokens(tokenProtos);
        ((LocatedStripedBlock) lb).setBlockTokens(blockTokens);
    }
    lb.setBlockToken(convert(proto.getBlockToken()));
    return lb;
}
Also used : DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) StorageType(org.apache.hadoop.fs.StorageType) TokenProto(org.apache.hadoop.security.proto.SecurityProtos.TokenProto) ArrayList(java.util.ArrayList) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) Token(org.apache.hadoop.security.token.Token) ByteString(com.google.protobuf.ByteString) DatanodeInfoProto(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto) LocatedStripedBlock(org.apache.hadoop.hdfs.protocol.LocatedStripedBlock)

Aggregations

StorageType (org.apache.hadoop.fs.StorageType)94 Test (org.junit.Test)31 Path (org.apache.hadoop.fs.Path)27 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)25 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)19 Configuration (org.apache.hadoop.conf.Configuration)17 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)16 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)14 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)13 BlockStoragePolicy (org.apache.hadoop.hdfs.protocol.BlockStoragePolicy)12 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)11 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)11 IOException (java.io.IOException)10 ArrayList (java.util.ArrayList)9 DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)6 ByteString (com.google.protobuf.ByteString)5 InterruptedIOException (java.io.InterruptedIOException)5 LocatedBlocks (org.apache.hadoop.hdfs.protocol.LocatedBlocks)5 File (java.io.File)4 InetSocketAddress (java.net.InetSocketAddress)4