Search in sources :

Example 1 with BlockStoragePolicy

use of org.apache.hadoop.hdfs.protocol.BlockStoragePolicy in project hadoop by apache.

the class TestDFSIO method checkStoragePolicy.

private boolean checkStoragePolicy(String storagePolicy, FileSystem fs) throws IOException {
    boolean isValid = false;
    Collection<BlockStoragePolicy> storagePolicies = ((DistributedFileSystem) fs).getAllStoragePolicies();
    try {
        for (BlockStoragePolicy policy : storagePolicies) {
            if (policy.getName().equals(storagePolicy)) {
                isValid = true;
                break;
            }
        }
    } catch (Exception e) {
        throw new IOException("Get block storage policies error: ", e);
    }
    if (!isValid) {
        System.out.println("Invalid block storage policy: " + storagePolicy);
        System.out.println("Current supported storage policy list: ");
        for (BlockStoragePolicy policy : storagePolicies) {
            System.out.println(policy.getName());
        }
        return false;
    }
    config.set(STORAGE_POLICY_NAME_KEY, storagePolicy);
    LOG.info("storagePolicy = " + storagePolicy);
    return true;
}
Also used : BlockStoragePolicy(org.apache.hadoop.hdfs.protocol.BlockStoragePolicy) IOException(java.io.IOException) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) IOException(java.io.IOException)

Example 2 with BlockStoragePolicy

use of org.apache.hadoop.hdfs.protocol.BlockStoragePolicy in project hadoop by apache.

the class TestReplicationPolicy method testChooseReplicasToDelete.

@Test
public void testChooseReplicasToDelete() throws Exception {
    Collection<DatanodeStorageInfo> nonExcess = new ArrayList<>();
    nonExcess.add(storages[0]);
    nonExcess.add(storages[1]);
    nonExcess.add(storages[2]);
    nonExcess.add(storages[3]);
    List<DatanodeStorageInfo> excessReplicas;
    BlockStoragePolicySuite POLICY_SUITE = BlockStoragePolicySuite.createDefaultSuite();
    BlockStoragePolicy storagePolicy = POLICY_SUITE.getDefaultPolicy();
    DatanodeStorageInfo excessSSD = DFSTestUtil.createDatanodeStorageInfo("Storage-excess-SSD-ID", "localhost", storages[0].getDatanodeDescriptor().getNetworkLocation(), "foo.com", StorageType.SSD, null);
    updateHeartbeatWithUsage(excessSSD.getDatanodeDescriptor(), 2 * HdfsServerConstants.MIN_BLOCKS_FOR_WRITE * BLOCK_SIZE, 0L, 2 * HdfsServerConstants.MIN_BLOCKS_FOR_WRITE * BLOCK_SIZE, 0L, 0L, 0L, 0, 0);
    // use delete hint case.
    DatanodeDescriptor delHintNode = storages[0].getDatanodeDescriptor();
    List<StorageType> excessTypes = storagePolicy.chooseExcess((short) 3, DatanodeStorageInfo.toStorageTypes(nonExcess));
    excessReplicas = replicator.chooseReplicasToDelete(nonExcess, nonExcess, 3, excessTypes, storages[3].getDatanodeDescriptor(), delHintNode);
    assertTrue(excessReplicas.size() == 1);
    assertTrue(excessReplicas.contains(storages[0]));
    // Excess type deletion
    DatanodeStorageInfo excessStorage = DFSTestUtil.createDatanodeStorageInfo("Storage-excess-ID", "localhost", delHintNode.getNetworkLocation(), "foo.com", StorageType.ARCHIVE, null);
    nonExcess.add(excessStorage);
    excessTypes = storagePolicy.chooseExcess((short) 3, DatanodeStorageInfo.toStorageTypes(nonExcess));
    excessReplicas = replicator.chooseReplicasToDelete(nonExcess, nonExcess, 3, excessTypes, storages[3].getDatanodeDescriptor(), null);
    assertTrue(excessReplicas.contains(excessStorage));
    // The block was initially created on excessSSD(rack r1),
    // storages[4](rack r3) and storages[5](rack r3) with
    // ONESSD_STORAGE_POLICY_NAME storage policy. Replication factor = 3.
    // Right after balancer moves the block from storages[5] to
    // storages[3](rack r2), the application changes the storage policy from
    // ONESSD_STORAGE_POLICY_NAME to HOT_STORAGE_POLICY_ID. In this case,
    // we should be able to delete excessSSD since the remaining
    // storages ({storages[3]}, {storages[4], storages[5]})
    // are on different racks (r2, r3).
    nonExcess.clear();
    nonExcess.add(excessSSD);
    nonExcess.add(storages[3]);
    nonExcess.add(storages[4]);
    nonExcess.add(storages[5]);
    excessTypes = storagePolicy.chooseExcess((short) 3, DatanodeStorageInfo.toStorageTypes(nonExcess));
    excessReplicas = replicator.chooseReplicasToDelete(nonExcess, nonExcess, 3, excessTypes, storages[3].getDatanodeDescriptor(), storages[5].getDatanodeDescriptor());
    assertEquals(1, excessReplicas.size());
    assertTrue(excessReplicas.contains(excessSSD));
    // Similar to above, but after policy change and before deletion,
    // the replicas are located on excessSSD(rack r1), storages[1](rack r1),
    // storages[2](rack r2) and storages[3](rack r2). Replication factor = 3.
    // In this case, we should be able to delete excessSSD since the remaining
    // storages ({storages[1]} , {storages[2], storages[3]})
    // are on different racks (r1, r2).
    nonExcess.clear();
    nonExcess.add(excessSSD);
    nonExcess.add(storages[1]);
    nonExcess.add(storages[2]);
    nonExcess.add(storages[3]);
    excessTypes = storagePolicy.chooseExcess((short) 3, DatanodeStorageInfo.toStorageTypes(nonExcess));
    excessReplicas = replicator.chooseReplicasToDelete(nonExcess, nonExcess, 3, excessTypes, storages[1].getDatanodeDescriptor(), storages[3].getDatanodeDescriptor());
    assertEquals(1, excessReplicas.size());
    assertTrue(excessReplicas.contains(excessSSD));
    // Similar to above, but after policy change and before deletion,
    // the replicas are located on excessSSD(rack r1), storages[2](rack r2)
    // Replication factor = 1. We should be able to delete excessSSD.
    nonExcess.clear();
    nonExcess.add(excessSSD);
    nonExcess.add(storages[2]);
    excessTypes = storagePolicy.chooseExcess((short) 1, DatanodeStorageInfo.toStorageTypes(nonExcess));
    excessReplicas = replicator.chooseReplicasToDelete(nonExcess, nonExcess, 1, excessTypes, storages[2].getDatanodeDescriptor(), null);
    assertEquals(1, excessReplicas.size());
    assertTrue(excessReplicas.contains(excessSSD));
    // The block was initially created on excessSSD(rack r1),
    // storages[4](rack r3) and storages[5](rack r3) with
    // ONESSD_STORAGE_POLICY_NAME storage policy. Replication factor = 2.
    // In this case, no replica can be chosen as the excessive replica by
    // chooseReplicasToDelete because if the SSD storage is deleted,
    // the remaining storages[4] and storages[5] are the same rack (r3),
    // violating block placement policy (i.e. the number of racks >= 2).
    // TODO BlockPlacementPolicyDefault should be able to rebalance the replicas
    // and then delete excessSSD.
    nonExcess.clear();
    nonExcess.add(excessSSD);
    nonExcess.add(storages[4]);
    nonExcess.add(storages[5]);
    excessTypes = storagePolicy.chooseExcess((short) 2, DatanodeStorageInfo.toStorageTypes(nonExcess));
    excessReplicas = replicator.chooseReplicasToDelete(nonExcess, nonExcess, 2, excessTypes, null, null);
    assertEquals(0, excessReplicas.size());
}
Also used : StorageType(org.apache.hadoop.fs.StorageType) ArrayList(java.util.ArrayList) TestBlockStoragePolicy(org.apache.hadoop.hdfs.TestBlockStoragePolicy) BlockStoragePolicy(org.apache.hadoop.hdfs.protocol.BlockStoragePolicy) Test(org.junit.Test)

Example 3 with BlockStoragePolicy

use of org.apache.hadoop.hdfs.protocol.BlockStoragePolicy in project hadoop by apache.

the class PBHelperClient method convertStoragePolicies.

public static BlockStoragePolicy[] convertStoragePolicies(List<BlockStoragePolicyProto> policyProtos) {
    if (policyProtos == null || policyProtos.size() == 0) {
        return new BlockStoragePolicy[0];
    }
    BlockStoragePolicy[] policies = new BlockStoragePolicy[policyProtos.size()];
    int i = 0;
    for (BlockStoragePolicyProto proto : policyProtos) {
        policies[i++] = convert(proto);
    }
    return policies;
}
Also used : BlockStoragePolicyProto(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto) BlockStoragePolicy(org.apache.hadoop.hdfs.protocol.BlockStoragePolicy)

Example 4 with BlockStoragePolicy

use of org.apache.hadoop.hdfs.protocol.BlockStoragePolicy in project hadoop by apache.

the class BlockManager method chooseExcessRedundancies.

private void chooseExcessRedundancies(final Collection<DatanodeStorageInfo> nonExcess, BlockInfo storedBlock, short replication, DatanodeDescriptor addedNode, DatanodeDescriptor delNodeHint) {
    assert namesystem.hasWriteLock();
    // first form a rack to datanodes map and
    BlockCollection bc = getBlockCollection(storedBlock);
    if (storedBlock.isStriped()) {
        chooseExcessRedundancyStriped(bc, nonExcess, storedBlock, delNodeHint);
    } else {
        final BlockStoragePolicy storagePolicy = storagePolicySuite.getPolicy(bc.getStoragePolicyID());
        final List<StorageType> excessTypes = storagePolicy.chooseExcess(replication, DatanodeStorageInfo.toStorageTypes(nonExcess));
        chooseExcessRedundancyContiguous(nonExcess, storedBlock, replication, addedNode, delNodeHint, excessTypes);
    }
}
Also used : StorageType(org.apache.hadoop.fs.StorageType) BlockStoragePolicy(org.apache.hadoop.hdfs.protocol.BlockStoragePolicy)

Example 5 with BlockStoragePolicy

use of org.apache.hadoop.hdfs.protocol.BlockStoragePolicy in project hadoop by apache.

the class BlockManager method chooseTarget4NewBlock.

/**
   * Choose target datanodes for creating a new block.
   * 
   * @throws IOException
   *           if the number of targets < minimum replication.
   * @see BlockPlacementPolicy#chooseTarget(String, int, Node,
   *      Set, long, List, BlockStoragePolicy, EnumSet)
   */
public DatanodeStorageInfo[] chooseTarget4NewBlock(final String src, final int numOfReplicas, final Node client, final Set<Node> excludedNodes, final long blocksize, final List<String> favoredNodes, final byte storagePolicyID, final BlockType blockType, final EnumSet<AddBlockFlag> flags) throws IOException {
    List<DatanodeDescriptor> favoredDatanodeDescriptors = getDatanodeDescriptors(favoredNodes);
    final BlockStoragePolicy storagePolicy = storagePolicySuite.getPolicy(storagePolicyID);
    final BlockPlacementPolicy blockplacement = placementPolicies.getPolicy(blockType);
    final DatanodeStorageInfo[] targets = blockplacement.chooseTarget(src, numOfReplicas, client, excludedNodes, blocksize, favoredDatanodeDescriptors, storagePolicy, flags);
    if (targets.length < minReplication) {
        throw new IOException("File " + src + " could only be replicated to " + targets.length + " nodes instead of minReplication (=" + minReplication + ").  There are " + getDatanodeManager().getNetworkTopology().getNumOfLeaves() + " datanode(s) running and " + (excludedNodes == null ? "no" : excludedNodes.size()) + " node(s) are excluded in this operation.");
    }
    return targets;
}
Also used : BlockStoragePolicy(org.apache.hadoop.hdfs.protocol.BlockStoragePolicy) IOException(java.io.IOException)

Aggregations

BlockStoragePolicy (org.apache.hadoop.hdfs.protocol.BlockStoragePolicy)48 Test (org.junit.Test)19 BlockStoragePolicySuite (org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite)15 Path (org.apache.hadoop.fs.Path)12 StorageType (org.apache.hadoop.fs.StorageType)12 IOException (java.io.IOException)8 FileNotFoundException (java.io.FileNotFoundException)6 ArrayList (java.util.ArrayList)6 HashMap (java.util.HashMap)5 LinkedHashMap (java.util.LinkedHashMap)5 BlockInfo (org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo)5 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)4 HadoopIllegalArgumentException (org.apache.hadoop.HadoopIllegalArgumentException)3 BlockStoragePolicySpi (org.apache.hadoop.fs.BlockStoragePolicySpi)3 HdfsFileStatus (org.apache.hadoop.hdfs.protocol.HdfsFileStatus)3 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)3 QuotaCounts (org.apache.hadoop.hdfs.server.namenode.QuotaCounts)3 Field (java.lang.reflect.Field)2 List (java.util.List)2 FileSystem (org.apache.hadoop.fs.FileSystem)2