use of org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy in project hadoop by apache.
the class FSDirErasureCodingOp method setErasureCodingPolicy.
/**
* Set an erasure coding policy on the given path.
*
* @param fsn The namespace
* @param srcArg The path of the target directory.
* @param ecPolicyName The erasure coding policy name to set on the target
* directory.
* @param logRetryCache whether to record RPC ids in editlog for retry
* cache rebuilding
* @return {@link HdfsFileStatus}
* @throws IOException
* @throws HadoopIllegalArgumentException if the policy is not enabled
* @throws AccessControlException if the user does not have write access
*/
static HdfsFileStatus setErasureCodingPolicy(final FSNamesystem fsn, final String srcArg, final String ecPolicyName, final FSPermissionChecker pc, final boolean logRetryCache) throws IOException, AccessControlException {
assert fsn.hasWriteLock();
String src = srcArg;
FSDirectory fsd = fsn.getFSDirectory();
final INodesInPath iip;
List<XAttr> xAttrs;
fsd.writeLock();
try {
ErasureCodingPolicy ecPolicy = fsn.getErasureCodingPolicyManager().getEnabledPolicyByName(ecPolicyName);
if (ecPolicy == null) {
final String sysPolicies = Arrays.asList(fsn.getErasureCodingPolicyManager().getEnabledPolicies()).stream().map(ErasureCodingPolicy::getName).collect(Collectors.joining(", "));
final String message = String.format("Policy '%s' does not match any " + "enabled erasure" + " coding policies: [%s]. The set of enabled erasure coding " + "policies can be configured at '%s'.", ecPolicyName, sysPolicies, DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY);
throw new HadoopIllegalArgumentException(message);
}
iip = fsd.resolvePath(pc, src, DirOp.WRITE_LINK);
// Write access is required to set erasure coding policy
if (fsd.isPermissionEnabled()) {
fsd.checkPathAccess(pc, iip, FsAction.WRITE);
}
src = iip.getPath();
xAttrs = setErasureCodingPolicyXAttr(fsn, iip, ecPolicy);
} finally {
fsd.writeUnlock();
}
fsn.getEditLog().logSetXAttrs(src, xAttrs, logRetryCache);
return fsd.getAuditFileInfo(iip);
}
use of org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy in project hadoop by apache.
the class INodeFile method getPreferredBlockReplication.
public short getPreferredBlockReplication() {
short max = getFileReplication(CURRENT_STATE_ID);
FileWithSnapshotFeature sf = this.getFileWithSnapshotFeature();
if (sf != null) {
short maxInSnapshot = sf.getMaxBlockRepInDiffs(null);
if (sf.isCurrentFileDeleted()) {
return maxInSnapshot;
}
max = maxInSnapshot > max ? maxInSnapshot : max;
}
if (!isStriped()) {
return max;
}
ErasureCodingPolicy ecPolicy = ErasureCodingPolicyManager.getPolicyByID(getErasureCodingPolicyID());
Preconditions.checkNotNull(ecPolicy, "Could not find EC policy with ID 0x" + StringUtils.byteToHexString(getErasureCodingPolicyID()));
return (short) (ecPolicy.getNumDataUnits() + ecPolicy.getNumParityUnits());
}
use of org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy in project hadoop by apache.
the class StripedFileTestUtil method waitBlockGroupsReported.
/**
* Wait for all the internalBlocks of the blockGroups of the given file to be
* reported.
*/
public static void waitBlockGroupsReported(DistributedFileSystem fs, String src, int numDeadDNs) throws Exception {
boolean success;
final int ATTEMPTS = 40;
int count = 0;
final ErasureCodingPolicy ecPolicy = fs.getErasureCodingPolicy(new Path(src));
do {
success = true;
count++;
LocatedBlocks lbs = fs.getClient().getLocatedBlocks(src, 0);
for (LocatedBlock lb : lbs.getLocatedBlocks()) {
short expected = (short) (getRealTotalBlockNum((int) lb.getBlockSize(), ecPolicy) - numDeadDNs);
int reported = lb.getLocations().length;
if (reported < expected) {
success = false;
LOG.info("blockGroup " + lb.getBlock() + " of file " + src + " has reported internalBlocks " + reported + " (desired " + expected + "); locations " + Joiner.on(' ').join(lb.getLocations()));
Thread.sleep(1000);
break;
}
}
if (success) {
LOG.info("All blockGroups of file " + src + " verified to have all internalBlocks.");
}
} while (!success && count < ATTEMPTS);
if (count == ATTEMPTS) {
throw new TimeoutException("Timed out waiting for " + src + " to have all the internalBlocks");
}
}
use of org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy in project hadoop by apache.
the class TestErasureCodingPolicies method testSetInvalidPolicy.
@Test
public void testSetInvalidPolicy() throws IOException {
ECSchema rsSchema = new ECSchema("rs", 4, 2);
String policyName = "RS-4-2-128k";
int cellSize = 128 * 1024;
ErasureCodingPolicy ecPolicy = new ErasureCodingPolicy(policyName, rsSchema, cellSize, (byte) -1);
String src = "/ecDir4-2";
final Path ecDir = new Path(src);
try {
fs.mkdir(ecDir, FsPermission.getDirDefault());
fs.getClient().setErasureCodingPolicy(src, ecPolicy.getName());
fail("HadoopIllegalArgumentException should be thrown for" + "setting an invalid erasure coding policy");
} catch (Exception e) {
assertExceptionContains("Policy 'RS-4-2-128k' does not match " + "any enabled erasure coding policies", e);
}
}
use of org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy in project hadoop by apache.
the class TestErasureCodingPolicies method testReplication.
@Test
public void testReplication() throws IOException {
final Path testDir = new Path("/ec");
fs.mkdir(testDir, FsPermission.getDirDefault());
fs.setErasureCodingPolicy(testDir, StripedFileTestUtil.getDefaultECPolicy().getName());
final Path fooFile = new Path(testDir, "foo");
// create ec file with replication=0
fs.create(fooFile, FsPermission.getFileDefault(), true, conf.getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096), (short) 0, fs.getDefaultBlockSize(fooFile), null);
ErasureCodingPolicy policy = fs.getErasureCodingPolicy(fooFile);
// set replication should be a no-op
fs.setReplication(fooFile, (short) 3);
// should preserve the policy after set replication
assertEquals(policy, fs.getErasureCodingPolicy(fooFile));
}
Aggregations