use of org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto in project hadoop by apache.
the class PBImageXmlWriter method dumpINodeFile.
private void dumpINodeFile(INodeSection.INodeFile f) {
if (f.hasErasureCodingPolicyID()) {
o(SECTION_REPLICATION, INodeFile.DEFAULT_REPL_FOR_STRIPED_BLOCKS);
} else {
o(SECTION_REPLICATION, f.getReplication());
}
o(INODE_SECTION_MTIME, f.getModificationTime()).o(INODE_SECTION_ATIME, f.getAccessTime()).o(INODE_SECTION_PREFERRED_BLOCK_SIZE, f.getPreferredBlockSize()).o(INODE_SECTION_PERMISSION, dumpPermission(f.getPermission()));
if (f.hasXAttrs()) {
dumpXattrs(f.getXAttrs());
}
dumpAcls(f.getAcl());
if (f.getBlocksCount() > 0) {
out.print("<" + INODE_SECTION_BLOCKS + ">");
for (BlockProto b : f.getBlocksList()) {
out.print("<" + INODE_SECTION_BLOCK + ">");
o(SECTION_ID, b.getBlockId()).o(INODE_SECTION_GEMSTAMP, b.getGenStamp()).o(INODE_SECTION_NUM_BYTES, b.getNumBytes());
out.print("</" + INODE_SECTION_BLOCK + ">\n");
}
out.print("</" + INODE_SECTION_BLOCKS + ">\n");
}
if (f.hasStoragePolicyID()) {
o(INODE_SECTION_STORAGE_POLICY_ID, f.getStoragePolicyID());
}
if (f.hasErasureCodingPolicyID()) {
o(INODE_SECTION_BLOCK_TYPE, f.getBlockType().name());
o(INODE_SECTION_EC_POLICY_ID, f.getErasureCodingPolicyID());
}
if (f.hasFileUC()) {
INodeSection.FileUnderConstructionFeature u = f.getFileUC();
out.print("<" + INODE_SECTION_FILE_UNDER_CONSTRUCTION + ">");
o(INODE_SECTION_CLIENT_NAME, u.getClientName()).o(INODE_SECTION_CLIENT_MACHINE, u.getClientMachine());
out.print("</" + INODE_SECTION_FILE_UNDER_CONSTRUCTION + ">\n");
}
}
use of org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto in project hadoop by apache.
the class FileDistributionCalculator method run.
private void run(InputStream in) throws IOException {
INodeSection s = INodeSection.parseDelimitedFrom(in);
for (int i = 0; i < s.getNumInodes(); ++i) {
INodeSection.INode p = INodeSection.INode.parseDelimitedFrom(in);
if (p.getType() == INodeSection.INode.Type.FILE) {
++totalFiles;
INodeSection.INodeFile f = p.getFile();
totalBlocks += f.getBlocksCount();
long fileSize = 0;
for (BlockProto b : f.getBlocksList()) {
fileSize += b.getNumBytes();
}
maxFileSize = Math.max(fileSize, maxFileSize);
totalSpace += fileSize * f.getReplication();
int bucket = fileSize > maxSize ? distribution.length - 1 : (int) Math.ceil((double) fileSize / steps);
// the length when maxSize can't be divided completely by step.
if (bucket >= distribution.length) {
bucket = distribution.length - 1;
}
++distribution[bucket];
} else if (p.getType() == INodeSection.INode.Type.DIRECTORY) {
++totalDirectories;
}
if (i % (1 << 20) == 0) {
out.println("Processed " + i + " inodes.");
}
}
}
use of org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto in project hadoop by apache.
the class InterDatanodeProtocolTranslatorPB method initReplicaRecovery.
@Override
public ReplicaRecoveryInfo initReplicaRecovery(RecoveringBlock rBlock) throws IOException {
InitReplicaRecoveryRequestProto req = InitReplicaRecoveryRequestProto.newBuilder().setBlock(PBHelper.convert(rBlock)).build();
InitReplicaRecoveryResponseProto resp;
try {
resp = rpcProxy.initReplicaRecovery(NULL_CONTROLLER, req);
} catch (ServiceException e) {
throw ProtobufHelper.getRemoteException(e);
}
if (!resp.getReplicaFound()) {
// No replica found on the remote node.
return null;
} else {
if (!resp.hasBlock() || !resp.hasState()) {
throw new IOException("Replica was found but missing fields. " + "Req: " + req + "\n" + "Resp: " + resp);
}
}
BlockProto b = resp.getBlock();
return new ReplicaRecoveryInfo(b.getBlockId(), b.getNumBytes(), b.getGenStamp(), PBHelper.convert(resp.getState()));
}
use of org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto in project hadoop by apache.
the class TestPBHelper method testConvertBlock.
@Test
public void testConvertBlock() {
Block b = new Block(1, 100, 3);
BlockProto bProto = PBHelperClient.convert(b);
Block b2 = PBHelperClient.convert(bProto);
assertEquals(b, b2);
}
Aggregations