Search in sources :

Example 86 with StorageType

use of org.apache.hadoop.fs.StorageType in project hadoop by apache.

the class FileWithSnapshotFeature method updateQuotaAndCollectBlocks.

public void updateQuotaAndCollectBlocks(INode.ReclaimContext reclaimContext, INodeFile file, FileDiff removed) {
    byte storagePolicyID = file.getStoragePolicyID();
    BlockStoragePolicy bsp = null;
    if (storagePolicyID != HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED) {
        bsp = reclaimContext.storagePolicySuite().getPolicy(file.getStoragePolicyID());
    }
    QuotaCounts oldCounts;
    if (removed.snapshotINode != null) {
        oldCounts = new QuotaCounts.Builder().build();
        BlockInfo[] blocks = file.getBlocks() == null ? new BlockInfo[0] : file.getBlocks();
        for (BlockInfo b : blocks) {
            short replication = b.getReplication();
            long blockSize = b.isComplete() ? b.getNumBytes() : file.getPreferredBlockSize();
            oldCounts.addStorageSpace(blockSize * replication);
            if (bsp != null) {
                List<StorageType> oldTypeChosen = bsp.chooseStorageTypes(replication);
                for (StorageType t : oldTypeChosen) {
                    if (t.supportTypeQuota()) {
                        oldCounts.addTypeSpace(t, blockSize);
                    }
                }
            }
        }
        AclFeature aclFeature = removed.getSnapshotINode().getAclFeature();
        if (aclFeature != null) {
            AclStorage.removeAclFeature(aclFeature);
        }
    } else {
        oldCounts = file.storagespaceConsumed(null);
    }
    getDiffs().combineAndCollectSnapshotBlocks(reclaimContext, file, removed);
    if (file.getBlocks() != null) {
        short replInDiff = getMaxBlockRepInDiffs(removed);
        short repl = (short) Math.max(file.getPreferredBlockReplication(), replInDiff);
        for (BlockInfo b : file.getBlocks()) {
            if (repl != b.getReplication()) {
                reclaimContext.collectedBlocks().addUpdateReplicationFactor(b, repl);
            }
        }
    }
    QuotaCounts current = file.storagespaceConsumed(bsp);
    reclaimContext.quotaDelta().add(oldCounts.subtract(current));
}
Also used : StorageType(org.apache.hadoop.fs.StorageType) BlockInfo(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo) BlockStoragePolicy(org.apache.hadoop.hdfs.protocol.BlockStoragePolicy) QuotaCounts(org.apache.hadoop.hdfs.server.namenode.QuotaCounts) AclFeature(org.apache.hadoop.hdfs.server.namenode.AclFeature)

Example 87 with StorageType

use of org.apache.hadoop.fs.StorageType in project hadoop by apache.

the class TestBlockStoragePolicy method testChooseSsdOverDisk.

@Test
public void testChooseSsdOverDisk() throws Exception {
    BlockStoragePolicy policy = new BlockStoragePolicy((byte) 9, "TEST1", new StorageType[] { StorageType.SSD, StorageType.DISK, StorageType.ARCHIVE }, new StorageType[] {}, new StorageType[] {});
    final String[] racks = { "/d1/r1", "/d1/r1", "/d1/r1" };
    final String[] hosts = { "host1", "host2", "host3" };
    final StorageType[] disks = { StorageType.DISK, StorageType.DISK, StorageType.DISK };
    final DatanodeStorageInfo[] diskStorages = DFSTestUtil.createDatanodeStorageInfos(3, racks, hosts, disks);
    final DatanodeDescriptor[] dataNodes = DFSTestUtil.toDatanodeDescriptor(diskStorages);
    for (int i = 0; i < dataNodes.length; i++) {
        BlockManagerTestUtil.updateStorage(dataNodes[i], new DatanodeStorage("ssd" + i, DatanodeStorage.State.NORMAL, StorageType.SSD));
    }
    FileSystem.setDefaultUri(conf, "hdfs://localhost:0");
    conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "0.0.0.0:0");
    File baseDir = PathUtils.getTestDir(TestReplicationPolicy.class);
    conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, new File(baseDir, "name").getPath());
    DFSTestUtil.formatNameNode(conf);
    NameNode namenode = new NameNode(conf);
    final BlockManager bm = namenode.getNamesystem().getBlockManager();
    BlockPlacementPolicy replicator = bm.getBlockPlacementPolicy();
    NetworkTopology cluster = bm.getDatanodeManager().getNetworkTopology();
    for (DatanodeDescriptor datanode : dataNodes) {
        cluster.add(datanode);
    }
    DatanodeStorageInfo[] targets = replicator.chooseTarget("/foo", 3, dataNodes[0], Collections.<DatanodeStorageInfo>emptyList(), false, new HashSet<Node>(), 0, policy, null);
    System.out.println(policy.getName() + ": " + Arrays.asList(targets));
    Assert.assertEquals(2, targets.length);
    Assert.assertEquals(StorageType.SSD, targets[0].getStorageType());
    Assert.assertEquals(StorageType.DISK, targets[1].getStorageType());
}
Also used : NameNode(org.apache.hadoop.hdfs.server.namenode.NameNode) StorageType(org.apache.hadoop.fs.StorageType) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) Node(org.apache.hadoop.net.Node) NameNode(org.apache.hadoop.hdfs.server.namenode.NameNode) DatanodeStorage(org.apache.hadoop.hdfs.server.protocol.DatanodeStorage) NetworkTopology(org.apache.hadoop.net.NetworkTopology) File(java.io.File) Test(org.junit.Test)

Example 88 with StorageType

use of org.apache.hadoop.fs.StorageType in project hadoop by apache.

the class TestBlockStoragePolicy method checkLocatedBlocks.

private void checkLocatedBlocks(HdfsLocatedFileStatus status, int blockNum, int replicaNum, StorageType... types) {
    List<StorageType> typeList = Lists.newArrayList();
    Collections.addAll(typeList, types);
    LocatedBlocks lbs = status.getBlockLocations();
    Assert.assertEquals(blockNum, lbs.getLocatedBlocks().size());
    for (LocatedBlock lb : lbs.getLocatedBlocks()) {
        Assert.assertEquals(replicaNum, lb.getStorageTypes().length);
        for (StorageType type : lb.getStorageTypes()) {
            Assert.assertTrue(typeList.remove(type));
        }
    }
    Assert.assertTrue(typeList.isEmpty());
}
Also used : StorageType(org.apache.hadoop.fs.StorageType)

Example 89 with StorageType

use of org.apache.hadoop.fs.StorageType in project hadoop by apache.

the class TestBlockStoragePolicy method assertStorageType.

static void assertStorageType(List<StorageType> computed, short replication, StorageType... answers) {
    Assert.assertEquals(replication, computed.size());
    final StorageType last = answers[answers.length - 1];
    for (int i = 0; i < computed.size(); i++) {
        final StorageType expected = i < answers.length ? answers[i] : last;
        Assert.assertEquals(expected, computed.get(i));
    }
}
Also used : StorageType(org.apache.hadoop.fs.StorageType)

Example 90 with StorageType

use of org.apache.hadoop.fs.StorageType in project hadoop by apache.

the class TestBlockStoragePolicy method testGetAllStoragePoliciesFromFs.

/**
   * Verify that {@link FileSystem#getAllStoragePolicies} returns all
   * known storage policies for DFS.
   *
   * @throws IOException
   */
@Test
public void testGetAllStoragePoliciesFromFs() throws IOException {
    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(REPLICATION).storageTypes(new StorageType[] { StorageType.DISK, StorageType.ARCHIVE }).build();
    try {
        cluster.waitActive();
        // Get policies via {@link FileSystem#getAllStoragePolicies}
        Set<String> policyNamesSet1 = new HashSet<>();
        for (BlockStoragePolicySpi policy : cluster.getFileSystem().getAllStoragePolicies()) {
            policyNamesSet1.add(policy.getName());
        }
        // Get policies from the default BlockStoragePolicySuite.
        BlockStoragePolicySuite suite = BlockStoragePolicySuite.createDefaultSuite();
        Set<String> policyNamesSet2 = new HashSet<>();
        for (BlockStoragePolicy policy : suite.getAllPolicies()) {
            policyNamesSet2.add(policy.getName());
        }
        // Ensure that we got the same set of policies in both cases.
        Assert.assertTrue(Sets.difference(policyNamesSet1, policyNamesSet2).isEmpty());
        Assert.assertTrue(Sets.difference(policyNamesSet2, policyNamesSet1).isEmpty());
    } finally {
        cluster.shutdown();
    }
}
Also used : StorageType(org.apache.hadoop.fs.StorageType) BlockStoragePolicySpi(org.apache.hadoop.fs.BlockStoragePolicySpi) Test(org.junit.Test)

Aggregations

StorageType (org.apache.hadoop.fs.StorageType)91 Test (org.junit.Test)31 Path (org.apache.hadoop.fs.Path)27 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)24 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)18 Configuration (org.apache.hadoop.conf.Configuration)17 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)16 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)14 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)13 BlockStoragePolicy (org.apache.hadoop.hdfs.protocol.BlockStoragePolicy)12 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)11 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)10 IOException (java.io.IOException)9 ArrayList (java.util.ArrayList)8 DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)6 ByteString (com.google.protobuf.ByteString)5 LocatedBlocks (org.apache.hadoop.hdfs.protocol.LocatedBlocks)5 File (java.io.File)4 InetSocketAddress (java.net.InetSocketAddress)4 HashSet (java.util.HashSet)4