Search in sources :

Example 36 with ErasureCodingPolicy

use of org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy in project hadoop by apache.

the class FSDirErasureCodingOp method setErasureCodingPolicy.

/**
   * Set an erasure coding policy on the given path.
   *
   * @param fsn The namespace
   * @param srcArg The path of the target directory.
   * @param ecPolicyName The erasure coding policy name to set on the target
   *                    directory.
   * @param logRetryCache whether to record RPC ids in editlog for retry
   *          cache rebuilding
   * @return {@link HdfsFileStatus}
   * @throws IOException
   * @throws HadoopIllegalArgumentException if the policy is not enabled
   * @throws AccessControlException if the user does not have write access
   */
static HdfsFileStatus setErasureCodingPolicy(final FSNamesystem fsn, final String srcArg, final String ecPolicyName, final FSPermissionChecker pc, final boolean logRetryCache) throws IOException, AccessControlException {
    assert fsn.hasWriteLock();
    String src = srcArg;
    FSDirectory fsd = fsn.getFSDirectory();
    final INodesInPath iip;
    List<XAttr> xAttrs;
    fsd.writeLock();
    try {
        ErasureCodingPolicy ecPolicy = fsn.getErasureCodingPolicyManager().getEnabledPolicyByName(ecPolicyName);
        if (ecPolicy == null) {
            final String sysPolicies = Arrays.asList(fsn.getErasureCodingPolicyManager().getEnabledPolicies()).stream().map(ErasureCodingPolicy::getName).collect(Collectors.joining(", "));
            final String message = String.format("Policy '%s' does not match any " + "enabled erasure" + " coding policies: [%s]. The set of enabled erasure coding " + "policies can be configured at '%s'.", ecPolicyName, sysPolicies, DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY);
            throw new HadoopIllegalArgumentException(message);
        }
        iip = fsd.resolvePath(pc, src, DirOp.WRITE_LINK);
        // Write access is required to set erasure coding policy
        if (fsd.isPermissionEnabled()) {
            fsd.checkPathAccess(pc, iip, FsAction.WRITE);
        }
        src = iip.getPath();
        xAttrs = setErasureCodingPolicyXAttr(fsn, iip, ecPolicy);
    } finally {
        fsd.writeUnlock();
    }
    fsn.getEditLog().logSetXAttrs(src, xAttrs, logRetryCache);
    return fsd.getAuditFileInfo(iip);
}
Also used : HadoopIllegalArgumentException(org.apache.hadoop.HadoopIllegalArgumentException) ErasureCodingPolicy(org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy) XAttr(org.apache.hadoop.fs.XAttr)

Example 37 with ErasureCodingPolicy

use of org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy in project hadoop by apache.

the class INodeFile method getPreferredBlockReplication.

public short getPreferredBlockReplication() {
    short max = getFileReplication(CURRENT_STATE_ID);
    FileWithSnapshotFeature sf = this.getFileWithSnapshotFeature();
    if (sf != null) {
        short maxInSnapshot = sf.getMaxBlockRepInDiffs(null);
        if (sf.isCurrentFileDeleted()) {
            return maxInSnapshot;
        }
        max = maxInSnapshot > max ? maxInSnapshot : max;
    }
    if (!isStriped()) {
        return max;
    }
    ErasureCodingPolicy ecPolicy = ErasureCodingPolicyManager.getPolicyByID(getErasureCodingPolicyID());
    Preconditions.checkNotNull(ecPolicy, "Could not find EC policy with ID 0x" + StringUtils.byteToHexString(getErasureCodingPolicyID()));
    return (short) (ecPolicy.getNumDataUnits() + ecPolicy.getNumParityUnits());
}
Also used : FileWithSnapshotFeature(org.apache.hadoop.hdfs.server.namenode.snapshot.FileWithSnapshotFeature) ErasureCodingPolicy(org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy)

Example 38 with ErasureCodingPolicy

use of org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy in project hadoop by apache.

the class StripedFileTestUtil method waitBlockGroupsReported.

/**
   * Wait for all the internalBlocks of the blockGroups of the given file to be
   * reported.
   */
public static void waitBlockGroupsReported(DistributedFileSystem fs, String src, int numDeadDNs) throws Exception {
    boolean success;
    final int ATTEMPTS = 40;
    int count = 0;
    final ErasureCodingPolicy ecPolicy = fs.getErasureCodingPolicy(new Path(src));
    do {
        success = true;
        count++;
        LocatedBlocks lbs = fs.getClient().getLocatedBlocks(src, 0);
        for (LocatedBlock lb : lbs.getLocatedBlocks()) {
            short expected = (short) (getRealTotalBlockNum((int) lb.getBlockSize(), ecPolicy) - numDeadDNs);
            int reported = lb.getLocations().length;
            if (reported < expected) {
                success = false;
                LOG.info("blockGroup " + lb.getBlock() + " of file " + src + " has reported internalBlocks " + reported + " (desired " + expected + "); locations " + Joiner.on(' ').join(lb.getLocations()));
                Thread.sleep(1000);
                break;
            }
        }
        if (success) {
            LOG.info("All blockGroups of file " + src + " verified to have all internalBlocks.");
        }
    } while (!success && count < ATTEMPTS);
    if (count == ATTEMPTS) {
        throw new TimeoutException("Timed out waiting for " + src + " to have all the internalBlocks");
    }
}
Also used : Path(org.apache.hadoop.fs.Path) ErasureCodingPolicy(org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy) LocatedBlocks(org.apache.hadoop.hdfs.protocol.LocatedBlocks) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) TimeoutException(java.util.concurrent.TimeoutException)

Example 39 with ErasureCodingPolicy

use of org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy in project hadoop by apache.

the class TestErasureCodingPolicies method testSetInvalidPolicy.

@Test
public void testSetInvalidPolicy() throws IOException {
    ECSchema rsSchema = new ECSchema("rs", 4, 2);
    String policyName = "RS-4-2-128k";
    int cellSize = 128 * 1024;
    ErasureCodingPolicy ecPolicy = new ErasureCodingPolicy(policyName, rsSchema, cellSize, (byte) -1);
    String src = "/ecDir4-2";
    final Path ecDir = new Path(src);
    try {
        fs.mkdir(ecDir, FsPermission.getDirDefault());
        fs.getClient().setErasureCodingPolicy(src, ecPolicy.getName());
        fail("HadoopIllegalArgumentException should be thrown for" + "setting an invalid erasure coding policy");
    } catch (Exception e) {
        assertExceptionContains("Policy 'RS-4-2-128k' does not match " + "any enabled erasure coding policies", e);
    }
}
Also used : Path(org.apache.hadoop.fs.Path) ECSchema(org.apache.hadoop.io.erasurecode.ECSchema) ErasureCodingPolicy(org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy) IOException(java.io.IOException) FileNotFoundException(java.io.FileNotFoundException) AccessControlException(org.apache.hadoop.security.AccessControlException) Test(org.junit.Test)

Example 40 with ErasureCodingPolicy

use of org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy in project hadoop by apache.

the class TestErasureCodingPolicies method testReplication.

@Test
public void testReplication() throws IOException {
    final Path testDir = new Path("/ec");
    fs.mkdir(testDir, FsPermission.getDirDefault());
    fs.setErasureCodingPolicy(testDir, StripedFileTestUtil.getDefaultECPolicy().getName());
    final Path fooFile = new Path(testDir, "foo");
    // create ec file with replication=0
    fs.create(fooFile, FsPermission.getFileDefault(), true, conf.getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096), (short) 0, fs.getDefaultBlockSize(fooFile), null);
    ErasureCodingPolicy policy = fs.getErasureCodingPolicy(fooFile);
    // set replication should be a no-op
    fs.setReplication(fooFile, (short) 3);
    // should preserve the policy after set replication
    assertEquals(policy, fs.getErasureCodingPolicy(fooFile));
}
Also used : Path(org.apache.hadoop.fs.Path) ErasureCodingPolicy(org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy) Test(org.junit.Test)

Aggregations

ErasureCodingPolicy (org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy)46 Path (org.apache.hadoop.fs.Path)18 Test (org.junit.Test)16 IOException (java.io.IOException)9 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)5 LocatedBlocks (org.apache.hadoop.hdfs.protocol.LocatedBlocks)5 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)4 HdfsFileStatus (org.apache.hadoop.hdfs.protocol.HdfsFileStatus)4 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)4 ServiceException (com.google.protobuf.ServiceException)3 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)3 BlockType (org.apache.hadoop.hdfs.protocol.BlockType)3 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)3 BlockInfo (org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo)3 BlockInfoStriped (org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped)3 ActionException (org.smartdata.action.ActionException)3 ByteString (com.google.protobuf.ByteString)2 HashSet (java.util.HashSet)2 List (java.util.List)2 Random (java.util.Random)2