use of org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy in project hadoop by apache.
the class ClientNamenodeProtocolTranslatorPB method getErasureCodingPolicies.
@Override
public ErasureCodingPolicy[] getErasureCodingPolicies() throws IOException {
try {
GetErasureCodingPoliciesResponseProto response = rpcProxy.getErasureCodingPolicies(null, VOID_GET_EC_POLICIES_REQUEST);
ErasureCodingPolicy[] ecPolicies = new ErasureCodingPolicy[response.getEcPoliciesCount()];
int i = 0;
for (ErasureCodingPolicyProto ecPolicyProto : response.getEcPoliciesList()) {
ecPolicies[i++] = PBHelperClient.convertErasureCodingPolicy(ecPolicyProto);
}
return ecPolicies;
} catch (ServiceException e) {
throw ProtobufHelper.getRemoteException(e);
}
}
use of org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy in project hadoop by apache.
the class ClientNamenodeProtocolServerSideTranslatorPB method getErasureCodingPolicy.
@Override
public GetErasureCodingPolicyResponseProto getErasureCodingPolicy(RpcController controller, GetErasureCodingPolicyRequestProto request) throws ServiceException {
try {
ErasureCodingPolicy ecPolicy = server.getErasureCodingPolicy(request.getSrc());
GetErasureCodingPolicyResponseProto.Builder builder = GetErasureCodingPolicyResponseProto.newBuilder();
if (ecPolicy != null) {
builder.setEcPolicy(PBHelperClient.convertErasureCodingPolicy(ecPolicy));
}
return builder.build();
} catch (IOException e) {
throw new ServiceException(e);
}
}
use of org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy in project hadoop by apache.
the class TestDFSIO method checkErasureCodePolicy.
private boolean checkErasureCodePolicy(String erasureCodePolicyName, FileSystem fs, TestType testType) throws IOException {
Collection<ErasureCodingPolicy> list = ((DistributedFileSystem) fs).getAllErasureCodingPolicies();
boolean isValid = false;
for (ErasureCodingPolicy ec : list) {
if (erasureCodePolicyName.equals(ec.getName())) {
isValid = true;
break;
}
}
if (!isValid) {
System.out.println("Invalid erasure code policy: " + erasureCodePolicyName);
System.out.println("Current supported erasure code policy list: ");
for (ErasureCodingPolicy ec : list) {
System.out.println(ec.getName());
}
return false;
}
if (testType == TestType.TEST_TYPE_APPEND || testType == TestType.TEST_TYPE_TRUNCATE) {
System.out.println("So far append or truncate operation" + " does not support erasureCodePolicy");
return false;
}
config.set(ERASURE_CODE_POLICY_NAME_KEY, erasureCodePolicyName);
LOG.info("erasureCodePolicy = " + erasureCodePolicyName);
return true;
}
use of org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy in project hadoop by apache.
the class TestDFSIO method createAndEnableECOnPath.
void createAndEnableECOnPath(FileSystem fs, Path path) throws IOException {
String erasureCodePolicyName = getConf().get(ERASURE_CODE_POLICY_NAME_KEY, null);
fs.mkdirs(path);
Collection<ErasureCodingPolicy> list = ((DistributedFileSystem) fs).getAllErasureCodingPolicies();
for (ErasureCodingPolicy ec : list) {
if (erasureCodePolicyName.equals(ec.getName())) {
((DistributedFileSystem) fs).setErasureCodingPolicy(path, ec.getName());
LOG.info("enable erasureCodePolicy = " + erasureCodePolicyName + " on " + path.toString());
break;
}
}
}
use of org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy in project hadoop by apache.
the class FSDirectory method updateSpaceForCompleteBlock.
/**
* Update the cached quota space for a block that is being completed.
* Must only be called once, as the block is being completed.
* @param completeBlk - Completed block for which to update space
* @param inodes - INodes in path to file containing completeBlk; if null
* this will be resolved internally
*/
public void updateSpaceForCompleteBlock(BlockInfo completeBlk, INodesInPath inodes) throws IOException {
assert namesystem.hasWriteLock();
INodesInPath iip = inodes != null ? inodes : INodesInPath.fromINode(namesystem.getBlockCollection(completeBlk));
INodeFile fileINode = iip.getLastINode().asFile();
// Adjust disk space consumption if required
final long diff;
final short replicationFactor;
if (fileINode.isStriped()) {
final ErasureCodingPolicy ecPolicy = FSDirErasureCodingOp.unprotectedGetErasureCodingPolicy(namesystem, iip);
final short numDataUnits = (short) ecPolicy.getNumDataUnits();
final short numParityUnits = (short) ecPolicy.getNumParityUnits();
final long numBlocks = numDataUnits + numParityUnits;
final long fullBlockGroupSize = fileINode.getPreferredBlockSize() * numBlocks;
final BlockInfoStriped striped = new BlockInfoStriped(completeBlk, ecPolicy);
final long actualBlockGroupSize = striped.spaceConsumed();
diff = fullBlockGroupSize - actualBlockGroupSize;
replicationFactor = (short) 1;
} else {
diff = fileINode.getPreferredBlockSize() - completeBlk.getNumBytes();
replicationFactor = fileINode.getFileReplication();
}
if (diff > 0) {
try {
updateSpaceConsumed(iip, 0, -diff, replicationFactor);
} catch (IOException e) {
LOG.warn("Unexpected exception while updating disk space.", e);
}
}
}
Aggregations