Search in sources :

Example 11 with BlockStoragePolicy

use of org.apache.hadoop.hdfs.protocol.BlockStoragePolicy in project hadoop by apache.

the class FSDirWriteFileOp method setNewINodeStoragePolicy.

private static void setNewINodeStoragePolicy(BlockManager bm, INodesInPath iip, boolean isLazyPersist) throws IOException {
    INodeFile inode = iip.getLastINode().asFile();
    if (isLazyPersist) {
        BlockStoragePolicy lpPolicy = bm.getStoragePolicy("LAZY_PERSIST");
        // CreateFile.
        if (lpPolicy == null) {
            throw new HadoopIllegalArgumentException("The LAZY_PERSIST storage policy has been disabled " + "by the administrator.");
        }
        inode.setStoragePolicyID(lpPolicy.getId(), iip.getLatestSnapshotId());
    } else {
        BlockStoragePolicy effectivePolicy = bm.getStoragePolicy(inode.getStoragePolicyID());
        if (effectivePolicy != null && effectivePolicy.isCopyOnCreateFile()) {
            // Copy effective policy from ancestor directory to current file.
            inode.setStoragePolicyID(effectivePolicy.getId(), iip.getLatestSnapshotId());
        }
    }
}
Also used : HadoopIllegalArgumentException(org.apache.hadoop.HadoopIllegalArgumentException) BlockStoragePolicy(org.apache.hadoop.hdfs.protocol.BlockStoragePolicy)

Example 12 with BlockStoragePolicy

use of org.apache.hadoop.hdfs.protocol.BlockStoragePolicy in project hadoop by apache.

the class StoragePolicySummary method getStoragePolicy.

/**
   * 
   * @param storageTypes - sorted array of storageTypes
   * @return Storage Policy which matches the specific storage Combination
   */
private BlockStoragePolicy getStoragePolicy(StorageType[] storageTypes) {
    for (BlockStoragePolicy storagePolicy : storagePolicies) {
        StorageType[] policyStorageTypes = storagePolicy.getStorageTypes();
        policyStorageTypes = Arrays.copyOf(policyStorageTypes, policyStorageTypes.length);
        Arrays.sort(policyStorageTypes);
        if (policyStorageTypes.length <= storageTypes.length) {
            int i = 0;
            for (; i < policyStorageTypes.length; i++) {
                if (policyStorageTypes[i] != storageTypes[i]) {
                    break;
                }
            }
            if (i < policyStorageTypes.length) {
                continue;
            }
            int j = policyStorageTypes.length;
            for (; j < storageTypes.length; j++) {
                if (policyStorageTypes[i - 1] != storageTypes[j]) {
                    break;
                }
            }
            if (j == storageTypes.length) {
                return storagePolicy;
            }
        }
    }
    return null;
}
Also used : StorageType(org.apache.hadoop.fs.StorageType) BlockStoragePolicy(org.apache.hadoop.hdfs.protocol.BlockStoragePolicy)

Example 13 with BlockStoragePolicy

use of org.apache.hadoop.hdfs.protocol.BlockStoragePolicy in project hadoop by apache.

the class FileWithSnapshotFeature method cleanFile.

public void cleanFile(INode.ReclaimContext reclaimContext, final INodeFile file, final int snapshotId, int priorSnapshotId, byte storagePolicyId) {
    if (snapshotId == Snapshot.CURRENT_STATE_ID) {
        // delete the current file while the file has snapshot feature
        if (!isCurrentFileDeleted()) {
            file.recordModification(priorSnapshotId);
            deleteCurrentFile();
        }
        final BlockStoragePolicy policy = reclaimContext.storagePolicySuite().getPolicy(storagePolicyId);
        QuotaCounts old = file.storagespaceConsumed(policy);
        collectBlocksAndClear(reclaimContext, file);
        QuotaCounts current = file.storagespaceConsumed(policy);
        reclaimContext.quotaDelta().add(old.subtract(current));
    } else {
        // delete the snapshot
        priorSnapshotId = getDiffs().updatePrior(snapshotId, priorSnapshotId);
        diffs.deleteSnapshotDiff(reclaimContext, snapshotId, priorSnapshotId, file);
    }
}
Also used : BlockStoragePolicy(org.apache.hadoop.hdfs.protocol.BlockStoragePolicy) QuotaCounts(org.apache.hadoop.hdfs.server.namenode.QuotaCounts)

Example 14 with BlockStoragePolicy

use of org.apache.hadoop.hdfs.protocol.BlockStoragePolicy in project hadoop by apache.

the class BaseTestHttpFSWith method testStoragePolicy.

private void testStoragePolicy() throws Exception {
    Assume.assumeFalse("Assume its not a local FS", isLocalFS());
    FileSystem fs = FileSystem.get(getProxiedFSConf());
    fs.mkdirs(getProxiedFSTestDir());
    Path path = new Path(getProxiedFSTestDir(), "policy.txt");
    FileSystem httpfs = getHttpFSFileSystem();
    // test getAllStoragePolicies
    BlockStoragePolicy[] dfsPolicies = (BlockStoragePolicy[]) fs.getAllStoragePolicies().toArray();
    BlockStoragePolicy[] httpPolicies = (BlockStoragePolicy[]) httpfs.getAllStoragePolicies().toArray();
    Assert.assertArrayEquals("Policy array returned from the DFS and HttpFS should be equals", dfsPolicies, httpPolicies);
    // test get/set/unset policies
    DFSTestUtil.createFile(fs, path, 0, (short) 1, 0L);
    // get defaultPolicy
    BlockStoragePolicySpi defaultdfsPolicy = fs.getStoragePolicy(path);
    // set policy through webhdfs
    httpfs.setStoragePolicy(path, HdfsConstants.COLD_STORAGE_POLICY_NAME);
    // get policy from dfs
    BlockStoragePolicySpi dfsPolicy = fs.getStoragePolicy(path);
    // get policy from webhdfs
    BlockStoragePolicySpi httpFsPolicy = httpfs.getStoragePolicy(path);
    Assert.assertEquals("Storage policy returned from the get API should" + " be same as set policy", HdfsConstants.COLD_STORAGE_POLICY_NAME.toString(), httpFsPolicy.getName());
    Assert.assertEquals("Storage policy returned from the DFS and HttpFS should be equals", httpFsPolicy, dfsPolicy);
    // unset policy
    httpfs.unsetStoragePolicy(path);
    Assert.assertEquals("After unset storage policy, the get API shoudld" + " return the default policy", defaultdfsPolicy, httpfs.getStoragePolicy(path));
    fs.close();
}
Also used : Path(org.apache.hadoop.fs.Path) FileSystem(org.apache.hadoop.fs.FileSystem) BlockStoragePolicy(org.apache.hadoop.hdfs.protocol.BlockStoragePolicy) BlockStoragePolicySpi(org.apache.hadoop.fs.BlockStoragePolicySpi)

Example 15 with BlockStoragePolicy

use of org.apache.hadoop.hdfs.protocol.BlockStoragePolicy in project hadoop by apache.

the class TestHdfsAdmin method testHdfsAdminStoragePolicies.

/**
   * Test that we can set, get, unset storage policies via {@link HdfsAdmin}.
   */
@Test
public void testHdfsAdminStoragePolicies() throws Exception {
    HdfsAdmin hdfsAdmin = new HdfsAdmin(FileSystem.getDefaultUri(conf), conf);
    FileSystem fs = FileSystem.get(conf);
    final Path foo = new Path("/foo");
    final Path bar = new Path(foo, "bar");
    final Path wow = new Path(bar, "wow");
    DFSTestUtil.createFile(fs, wow, SIZE, REPL, 0);
    final BlockStoragePolicySuite suite = BlockStoragePolicySuite.createDefaultSuite();
    final BlockStoragePolicy warm = suite.getPolicy("WARM");
    final BlockStoragePolicy cold = suite.getPolicy("COLD");
    final BlockStoragePolicy hot = suite.getPolicy("HOT");
    /*
     * test: set storage policy
     */
    hdfsAdmin.setStoragePolicy(foo, warm.getName());
    hdfsAdmin.setStoragePolicy(bar, cold.getName());
    hdfsAdmin.setStoragePolicy(wow, hot.getName());
    /*
     * test: get storage policy after set
     */
    assertEquals(hdfsAdmin.getStoragePolicy(foo), warm);
    assertEquals(hdfsAdmin.getStoragePolicy(bar), cold);
    assertEquals(hdfsAdmin.getStoragePolicy(wow), hot);
    /*
     * test: unset storage policy
     */
    hdfsAdmin.unsetStoragePolicy(foo);
    hdfsAdmin.unsetStoragePolicy(bar);
    hdfsAdmin.unsetStoragePolicy(wow);
    /*
     * test: get storage policy after unset. HOT by default.
     */
    assertEquals(hdfsAdmin.getStoragePolicy(foo), hot);
    assertEquals(hdfsAdmin.getStoragePolicy(bar), hot);
    assertEquals(hdfsAdmin.getStoragePolicy(wow), hot);
    /*
     * test: get all storage policies
     */
    // Get policies via HdfsAdmin
    Set<String> policyNamesSet1 = new HashSet<>();
    for (BlockStoragePolicySpi policy : hdfsAdmin.getAllStoragePolicies()) {
        policyNamesSet1.add(policy.getName());
    }
    // Get policies via BlockStoragePolicySuite
    Set<String> policyNamesSet2 = new HashSet<>();
    for (BlockStoragePolicy policy : suite.getAllPolicies()) {
        policyNamesSet2.add(policy.getName());
    }
    // Ensure that we got the same set of policies in both cases.
    Assert.assertTrue(Sets.difference(policyNamesSet1, policyNamesSet2).isEmpty());
    Assert.assertTrue(Sets.difference(policyNamesSet2, policyNamesSet1).isEmpty());
}
Also used : Path(org.apache.hadoop.fs.Path) BlockStoragePolicySuite(org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite) HdfsAdmin(org.apache.hadoop.hdfs.client.HdfsAdmin) FileSystem(org.apache.hadoop.fs.FileSystem) BlockStoragePolicy(org.apache.hadoop.hdfs.protocol.BlockStoragePolicy) BlockStoragePolicySpi(org.apache.hadoop.fs.BlockStoragePolicySpi) HashSet(java.util.HashSet) Test(org.junit.Test)

Aggregations

BlockStoragePolicy (org.apache.hadoop.hdfs.protocol.BlockStoragePolicy)48 Test (org.junit.Test)19 BlockStoragePolicySuite (org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite)15 Path (org.apache.hadoop.fs.Path)12 StorageType (org.apache.hadoop.fs.StorageType)12 IOException (java.io.IOException)8 FileNotFoundException (java.io.FileNotFoundException)6 ArrayList (java.util.ArrayList)6 HashMap (java.util.HashMap)5 LinkedHashMap (java.util.LinkedHashMap)5 BlockInfo (org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo)5 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)4 HadoopIllegalArgumentException (org.apache.hadoop.HadoopIllegalArgumentException)3 BlockStoragePolicySpi (org.apache.hadoop.fs.BlockStoragePolicySpi)3 HdfsFileStatus (org.apache.hadoop.hdfs.protocol.HdfsFileStatus)3 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)3 QuotaCounts (org.apache.hadoop.hdfs.server.namenode.QuotaCounts)3 Field (java.lang.reflect.Field)2 List (java.util.List)2 FileSystem (org.apache.hadoop.fs.FileSystem)2