Search in sources :

Example 6 with StorageType

use of org.apache.hadoop.fs.StorageType in project hadoop by apache.

the class BlockManager method chooseExcessRedundancies.

private void chooseExcessRedundancies(final Collection<DatanodeStorageInfo> nonExcess, BlockInfo storedBlock, short replication, DatanodeDescriptor addedNode, DatanodeDescriptor delNodeHint) {
    assert namesystem.hasWriteLock();
    // first form a rack to datanodes map and
    BlockCollection bc = getBlockCollection(storedBlock);
    if (storedBlock.isStriped()) {
        chooseExcessRedundancyStriped(bc, nonExcess, storedBlock, delNodeHint);
    } else {
        final BlockStoragePolicy storagePolicy = storagePolicySuite.getPolicy(bc.getStoragePolicyID());
        final List<StorageType> excessTypes = storagePolicy.chooseExcess(replication, DatanodeStorageInfo.toStorageTypes(nonExcess));
        chooseExcessRedundancyContiguous(nonExcess, storedBlock, replication, addedNode, delNodeHint, excessTypes);
    }
}
Also used : StorageType(org.apache.hadoop.fs.StorageType) BlockStoragePolicy(org.apache.hadoop.hdfs.protocol.BlockStoragePolicy)

Example 7 with StorageType

use of org.apache.hadoop.fs.StorageType in project hadoop by apache.

the class BlockPlacementPolicyDefault method getRequiredStorageTypes.

private EnumMap<StorageType, Integer> getRequiredStorageTypes(List<StorageType> types) {
    EnumMap<StorageType, Integer> map = new EnumMap<>(StorageType.class);
    for (StorageType type : types) {
        if (!map.containsKey(type)) {
            map.put(type, 1);
        } else {
            int num = map.get(type);
            map.put(type, num + 1);
        }
    }
    return map;
}
Also used : StorageType(org.apache.hadoop.fs.StorageType)

Example 8 with StorageType

use of org.apache.hadoop.fs.StorageType in project hadoop by apache.

the class DatanodeStats method subtract.

synchronized void subtract(final DatanodeDescriptor node) {
    xceiverCount -= node.getXceiverCount();
    if (node.isInService()) {
        capacityUsed -= node.getDfsUsed();
        capacityUsedNonDfs -= node.getNonDfsUsed();
        blockPoolUsed -= node.getBlockPoolUsed();
        nodesInService--;
        nodesInServiceXceiverCount -= node.getXceiverCount();
        capacityTotal -= node.getCapacity();
        capacityRemaining -= node.getRemaining();
        cacheCapacity -= node.getCacheCapacity();
        cacheUsed -= node.getCacheUsed();
    } else if (node.isDecommissionInProgress() || node.isEnteringMaintenance()) {
        cacheCapacity -= node.getCacheCapacity();
        cacheUsed -= node.getCacheUsed();
    }
    Set<StorageType> storageTypes = new HashSet<>();
    for (DatanodeStorageInfo storageInfo : node.getStorageInfos()) {
        if (storageInfo.getState() != DatanodeStorage.State.FAILED) {
            statsMap.subtractStorage(storageInfo, node);
            storageTypes.add(storageInfo.getStorageType());
        }
    }
    for (StorageType storageType : storageTypes) {
        statsMap.subtractNode(storageType, node);
    }
}
Also used : StorageType(org.apache.hadoop.fs.StorageType) HashSet(java.util.HashSet)

Example 9 with StorageType

use of org.apache.hadoop.fs.StorageType in project hadoop by apache.

the class TestMover method createFileWithFavoredDatanodes.

private void createFileWithFavoredDatanodes(final Configuration conf, final MiniDFSCluster cluster, final DistributedFileSystem dfs) throws IOException {
    // Adding two DISK based data node to the cluster.
    // Also, ensure that blocks are pinned in these new data nodes.
    StorageType[][] newtypes = new StorageType[][] { { StorageType.DISK }, { StorageType.DISK } };
    startAdditionalDNs(conf, 2, newtypes, cluster);
    ArrayList<DataNode> dataNodes = cluster.getDataNodes();
    InetSocketAddress[] favoredNodes = new InetSocketAddress[2];
    int j = 0;
    for (int i = dataNodes.size() - 1; i >= 2; i--) {
        favoredNodes[j++] = dataNodes.get(i).getXferAddress();
    }
    final String file = "/parent/testMoverFailedRetryWithPinnedBlocks2";
    final FSDataOutputStream out = dfs.create(new Path(file), FsPermission.getDefault(), true, DEFAULT_BLOCK_SIZE, (short) 2, DEFAULT_BLOCK_SIZE, null, favoredNodes);
    byte[] fileData = StripedFileTestUtil.generateBytes(DEFAULT_BLOCK_SIZE * 2);
    out.write(fileData);
    out.close();
    // Mock FsDatasetSpi#getPinning to show that the block is pinned.
    LocatedBlocks locatedBlocks = dfs.getClient().getLocatedBlocks(file, 0);
    Assert.assertEquals("Wrong block count", 2, locatedBlocks.locatedBlockCount());
    LocatedBlock lb = locatedBlocks.get(0);
    DatanodeInfo datanodeInfo = lb.getLocations()[0];
    for (DataNode dn : cluster.getDataNodes()) {
        if (dn.getDatanodeId().getDatanodeUuid().equals(datanodeInfo.getDatanodeUuid())) {
            LOG.info("Simulate block pinning in datanode {}", datanodeInfo);
            DataNodeTestUtils.mockDatanodeBlkPinning(dn, true);
            break;
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) StorageType(org.apache.hadoop.fs.StorageType) InetSocketAddress(java.net.InetSocketAddress) LocatedBlocks(org.apache.hadoop.hdfs.protocol.LocatedBlocks) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream)

Example 10 with StorageType

use of org.apache.hadoop.fs.StorageType in project hadoop by apache.

the class TestMover method testTwoReplicaSameStorageTypeShouldNotSelect.

@Test(timeout = 300000)
public void testTwoReplicaSameStorageTypeShouldNotSelect() throws Exception {
    // HDFS-8147
    final Configuration conf = new HdfsConfiguration();
    initConf(conf);
    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).storageTypes(new StorageType[][] { { StorageType.DISK, StorageType.ARCHIVE }, { StorageType.DISK, StorageType.DISK }, { StorageType.DISK, StorageType.ARCHIVE } }).build();
    try {
        cluster.waitActive();
        final DistributedFileSystem dfs = cluster.getFileSystem();
        final String file = "/testForTwoReplicaSameStorageTypeShouldNotSelect";
        // write to DISK
        final FSDataOutputStream out = dfs.create(new Path(file), (short) 2);
        out.writeChars("testForTwoReplicaSameStorageTypeShouldNotSelect");
        out.close();
        // verify before movement
        LocatedBlock lb = dfs.getClient().getLocatedBlocks(file, 0).get(0);
        StorageType[] storageTypes = lb.getStorageTypes();
        for (StorageType storageType : storageTypes) {
            Assert.assertTrue(StorageType.DISK == storageType);
        }
        // move to ARCHIVE
        dfs.setStoragePolicy(new Path(file), "COLD");
        int rc = ToolRunner.run(conf, new Mover.Cli(), new String[] { "-p", file.toString() });
        Assert.assertEquals("Movement to ARCHIVE should be successful", 0, rc);
        // Wait till namenode notified about the block location details
        waitForLocatedBlockWithArchiveStorageType(dfs, file, 2);
    } finally {
        cluster.shutdown();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) StorageType(org.apache.hadoop.fs.StorageType) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) Test(org.junit.Test)

Aggregations

StorageType (org.apache.hadoop.fs.StorageType)94 Test (org.junit.Test)31 Path (org.apache.hadoop.fs.Path)27 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)25 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)19 Configuration (org.apache.hadoop.conf.Configuration)17 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)16 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)14 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)13 BlockStoragePolicy (org.apache.hadoop.hdfs.protocol.BlockStoragePolicy)12 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)11 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)11 IOException (java.io.IOException)10 ArrayList (java.util.ArrayList)9 DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)6 ByteString (com.google.protobuf.ByteString)5 InterruptedIOException (java.io.InterruptedIOException)5 LocatedBlocks (org.apache.hadoop.hdfs.protocol.LocatedBlocks)5 File (java.io.File)4 InetSocketAddress (java.net.InetSocketAddress)4