Search in sources :

Example 46 with BlockStoragePolicy

use of org.apache.hadoop.hdfs.protocol.BlockStoragePolicy in project hadoop by apache.

the class TestApplyingStoragePolicy method testSetAndGetStoragePolicy.

@Test
public void testSetAndGetStoragePolicy() throws IOException {
    final Path foo = new Path("/foo");
    final Path bar = new Path(foo, "bar");
    final Path fooz = new Path("/fooz");
    DFSTestUtil.createFile(fs, bar, SIZE, REPL, 0);
    final BlockStoragePolicySuite suite = BlockStoragePolicySuite.createDefaultSuite();
    final BlockStoragePolicy warm = suite.getPolicy("WARM");
    final BlockStoragePolicy cold = suite.getPolicy("COLD");
    final BlockStoragePolicy hot = suite.getPolicy("HOT");
    assertEquals(fs.getStoragePolicy(foo), hot);
    assertEquals(fs.getStoragePolicy(bar), hot);
    try {
        fs.getStoragePolicy(fooz);
    } catch (Exception e) {
        assertTrue(e instanceof FileNotFoundException);
    }
    /*
     * test: set storage policy
     */
    fs.setStoragePolicy(foo, warm.getName());
    fs.setStoragePolicy(bar, cold.getName());
    try {
        fs.setStoragePolicy(fooz, warm.getName());
    } catch (Exception e) {
        assertTrue(e instanceof FileNotFoundException);
    }
    /*
     * test: get storage policy after set
     */
    assertEquals(fs.getStoragePolicy(foo), warm);
    assertEquals(fs.getStoragePolicy(bar), cold);
    try {
        fs.getStoragePolicy(fooz);
    } catch (Exception e) {
        assertTrue(e instanceof FileNotFoundException);
    }
}
Also used : Path(org.apache.hadoop.fs.Path) BlockStoragePolicySuite(org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite) FileNotFoundException(java.io.FileNotFoundException) BlockStoragePolicy(org.apache.hadoop.hdfs.protocol.BlockStoragePolicy) IOException(java.io.IOException) FileNotFoundException(java.io.FileNotFoundException) Test(org.junit.Test)

Example 47 with BlockStoragePolicy

use of org.apache.hadoop.hdfs.protocol.BlockStoragePolicy in project hadoop by apache.

the class TestReplicationPolicyWithUpgradeDomain method testChooseReplicasToDelete.

/**
   * Verify the correct replica is chosen to satisfy both rack and upgrade
   * domain policy.
   * @throws Exception
   */
@Test
public void testChooseReplicasToDelete() throws Exception {
    Collection<DatanodeStorageInfo> nonExcess = new ArrayList<>();
    nonExcess.add(storages[0]);
    nonExcess.add(storages[1]);
    nonExcess.add(storages[2]);
    nonExcess.add(storages[3]);
    List<DatanodeStorageInfo> excessReplicas;
    BlockStoragePolicySuite POLICY_SUITE = BlockStoragePolicySuite.createDefaultSuite();
    BlockStoragePolicy storagePolicy = POLICY_SUITE.getDefaultPolicy();
    // delete hint accepted.
    DatanodeDescriptor delHintNode = storages[0].getDatanodeDescriptor();
    List<StorageType> excessTypes = storagePolicy.chooseExcess((short) 3, DatanodeStorageInfo.toStorageTypes(nonExcess));
    excessReplicas = replicator.chooseReplicasToDelete(nonExcess, nonExcess, 3, excessTypes, storages[3].getDatanodeDescriptor(), delHintNode);
    assertTrue(excessReplicas.size() == 1);
    assertTrue(excessReplicas.contains(storages[0]));
    // delete hint rejected because deleting storages[1] would have
    // cause only two upgrade domains left.
    delHintNode = storages[1].getDatanodeDescriptor();
    excessTypes = storagePolicy.chooseExcess((short) 3, DatanodeStorageInfo.toStorageTypes(nonExcess));
    excessReplicas = replicator.chooseReplicasToDelete(nonExcess, nonExcess, 3, excessTypes, storages[3].getDatanodeDescriptor(), delHintNode);
    assertTrue(excessReplicas.size() == 1);
    assertTrue(excessReplicas.contains(storages[0]));
    // no delete hint, case 1
    nonExcess.clear();
    nonExcess.add(storages[0]);
    nonExcess.add(storages[1]);
    nonExcess.add(storages[4]);
    nonExcess.add(storages[8]);
    excessTypes = storagePolicy.chooseExcess((short) 3, DatanodeStorageInfo.toStorageTypes(nonExcess));
    excessReplicas = replicator.chooseReplicasToDelete(nonExcess, nonExcess, 3, excessTypes, storages[8].getDatanodeDescriptor(), null);
    assertTrue(excessReplicas.size() == 1);
    assertTrue(excessReplicas.contains(storages[1]));
    // no delete hint, case 2
    nonExcess.clear();
    nonExcess.add(storages[0]);
    nonExcess.add(storages[1]);
    nonExcess.add(storages[4]);
    nonExcess.add(storages[5]);
    excessTypes = storagePolicy.chooseExcess((short) 3, DatanodeStorageInfo.toStorageTypes(nonExcess));
    excessReplicas = replicator.chooseReplicasToDelete(nonExcess, nonExcess, 3, excessTypes, storages[8].getDatanodeDescriptor(), null);
    assertTrue(excessReplicas.size() == 1);
    assertTrue(excessReplicas.contains(storages[1]) || excessReplicas.contains(storages[4]));
    // No delete hint, different excess type deletion
    nonExcess.clear();
    nonExcess.add(storages[0]);
    nonExcess.add(storages[1]);
    nonExcess.add(storages[2]);
    nonExcess.add(storages[3]);
    DatanodeStorageInfo excessStorage = DFSTestUtil.createDatanodeStorageInfo("Storage-excess-ID", "localhost", delHintNode.getNetworkLocation(), "foo.com", StorageType.ARCHIVE, delHintNode.getUpgradeDomain());
    nonExcess.add(excessStorage);
    excessTypes = storagePolicy.chooseExcess((short) 3, DatanodeStorageInfo.toStorageTypes(nonExcess));
    excessReplicas = replicator.chooseReplicasToDelete(nonExcess, nonExcess, 3, excessTypes, storages[3].getDatanodeDescriptor(), null);
    assertTrue(excessReplicas.size() == 2);
    assertTrue(excessReplicas.contains(storages[0]));
    assertTrue(excessReplicas.contains(excessStorage));
    // Test SSD related deletion. With different rack settings here, but
    // similar to {@link TestReplicationPolicy#testChooseReplicasToDelete}.
    // The block was initially created on excessSSD(rack r1, UD 4),
    // storages[7](rack r3, UD 2) and storages[8](rack r3, UD 3) with
    // ONESSD_STORAGE_POLICY_NAME storage policy. Replication factor = 3.
    // Right after balancer moves the block from storages[7] to
    // storages[3](rack r2, UD 1), the application changes the storage policy
    // from ONESSD_STORAGE_POLICY_NAME to HOT_STORAGE_POLICY_ID. In this case,
    // we should be able to delete excessSSD since the remaining
    // storages ({storages[3]}, {storages[7], storages[8]})
    // are on different racks (r2, r3) and different UDs (1, 2, 3).
    DatanodeStorageInfo excessSSD = DFSTestUtil.createDatanodeStorageInfo("Storage-excess-SSD-ID", "localhost", storages[0].getDatanodeDescriptor().getNetworkLocation(), "foo.com", StorageType.SSD, null);
    DatanodeStorageInfo[] ssds = { excessSSD };
    DatanodeDescriptor[] ssdNodes = DFSTestUtil.toDatanodeDescriptor(ssds);
    ssdNodes[0].setUpgradeDomain(Integer.toString(4));
    nonExcess.clear();
    nonExcess.add(excessSSD);
    nonExcess.add(storages[3]);
    nonExcess.add(storages[7]);
    nonExcess.add(storages[8]);
    excessTypes = storagePolicy.chooseExcess((short) 3, DatanodeStorageInfo.toStorageTypes(nonExcess));
    excessReplicas = replicator.chooseReplicasToDelete(nonExcess, nonExcess, 3, excessTypes, storages[3].getDatanodeDescriptor(), storages[7].getDatanodeDescriptor());
    assertEquals(1, excessReplicas.size());
    assertTrue(excessReplicas.contains(excessSSD));
}
Also used : StorageType(org.apache.hadoop.fs.StorageType) ArrayList(java.util.ArrayList) BlockStoragePolicy(org.apache.hadoop.hdfs.protocol.BlockStoragePolicy) TestBlockStoragePolicy(org.apache.hadoop.hdfs.TestBlockStoragePolicy) Test(org.junit.Test)

Example 48 with BlockStoragePolicy

use of org.apache.hadoop.hdfs.protocol.BlockStoragePolicy in project hbase by apache.

the class HFileSystem method getStoragePolicyForOldHDFSVersion.

/**
 * Before Hadoop 2.8.0, there's no getStoragePolicy method for FileSystem interface, and we need
 * to keep compatible with it. See HADOOP-12161 for more details.
 * @param path Path to get storage policy against
 * @return the storage policy name
 */
private String getStoragePolicyForOldHDFSVersion(Path path) {
    try {
        if (this.fs instanceof DistributedFileSystem) {
            DistributedFileSystem dfs = (DistributedFileSystem) this.fs;
            HdfsFileStatus status = dfs.getClient().getFileInfo(path.toUri().getPath());
            if (null != status) {
                if (unspecifiedStoragePolicyId < 0) {
                    // Get the unspecified id field through reflection to avoid compilation error.
                    // In later version BlockStoragePolicySuite#ID_UNSPECIFIED is moved to
                    // HdfsConstants#BLOCK_STORAGE_POLICY_ID_UNSPECIFIED
                    Field idUnspecified = BlockStoragePolicySuite.class.getField("ID_UNSPECIFIED");
                    unspecifiedStoragePolicyId = idUnspecified.getByte(BlockStoragePolicySuite.class);
                }
                byte storagePolicyId = status.getStoragePolicy();
                if (storagePolicyId != unspecifiedStoragePolicyId) {
                    BlockStoragePolicy[] policies = dfs.getStoragePolicies();
                    for (BlockStoragePolicy policy : policies) {
                        if (policy.getId() == storagePolicyId) {
                            return policy.getName();
                        }
                    }
                }
            }
        }
    } catch (Throwable e) {
        LOG.warn("failed to get block storage policy of [" + path + "]", e);
    }
    return null;
}
Also used : Field(java.lang.reflect.Field) BlockStoragePolicySuite(org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite) HdfsFileStatus(org.apache.hadoop.hdfs.protocol.HdfsFileStatus) BlockStoragePolicy(org.apache.hadoop.hdfs.protocol.BlockStoragePolicy) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem)

Aggregations

BlockStoragePolicy (org.apache.hadoop.hdfs.protocol.BlockStoragePolicy)48 Test (org.junit.Test)19 BlockStoragePolicySuite (org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite)15 Path (org.apache.hadoop.fs.Path)12 StorageType (org.apache.hadoop.fs.StorageType)12 IOException (java.io.IOException)8 FileNotFoundException (java.io.FileNotFoundException)6 ArrayList (java.util.ArrayList)6 HashMap (java.util.HashMap)5 LinkedHashMap (java.util.LinkedHashMap)5 BlockInfo (org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo)5 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)4 HadoopIllegalArgumentException (org.apache.hadoop.HadoopIllegalArgumentException)3 BlockStoragePolicySpi (org.apache.hadoop.fs.BlockStoragePolicySpi)3 HdfsFileStatus (org.apache.hadoop.hdfs.protocol.HdfsFileStatus)3 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)3 QuotaCounts (org.apache.hadoop.hdfs.server.namenode.QuotaCounts)3 Field (java.lang.reflect.Field)2 List (java.util.List)2 FileSystem (org.apache.hadoop.fs.FileSystem)2