Search in sources :

Example 1 with BlockProto

use of org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto in project hadoop by apache.

the class PBImageXmlWriter method dumpINodeFile.

private void dumpINodeFile(INodeSection.INodeFile f) {
    if (f.hasErasureCodingPolicyID()) {
        o(SECTION_REPLICATION, INodeFile.DEFAULT_REPL_FOR_STRIPED_BLOCKS);
    } else {
        o(SECTION_REPLICATION, f.getReplication());
    }
    o(INODE_SECTION_MTIME, f.getModificationTime()).o(INODE_SECTION_ATIME, f.getAccessTime()).o(INODE_SECTION_PREFERRED_BLOCK_SIZE, f.getPreferredBlockSize()).o(INODE_SECTION_PERMISSION, dumpPermission(f.getPermission()));
    if (f.hasXAttrs()) {
        dumpXattrs(f.getXAttrs());
    }
    dumpAcls(f.getAcl());
    if (f.getBlocksCount() > 0) {
        out.print("<" + INODE_SECTION_BLOCKS + ">");
        for (BlockProto b : f.getBlocksList()) {
            out.print("<" + INODE_SECTION_BLOCK + ">");
            o(SECTION_ID, b.getBlockId()).o(INODE_SECTION_GEMSTAMP, b.getGenStamp()).o(INODE_SECTION_NUM_BYTES, b.getNumBytes());
            out.print("</" + INODE_SECTION_BLOCK + ">\n");
        }
        out.print("</" + INODE_SECTION_BLOCKS + ">\n");
    }
    if (f.hasStoragePolicyID()) {
        o(INODE_SECTION_STORAGE_POLICY_ID, f.getStoragePolicyID());
    }
    if (f.hasErasureCodingPolicyID()) {
        o(INODE_SECTION_BLOCK_TYPE, f.getBlockType().name());
        o(INODE_SECTION_EC_POLICY_ID, f.getErasureCodingPolicyID());
    }
    if (f.hasFileUC()) {
        INodeSection.FileUnderConstructionFeature u = f.getFileUC();
        out.print("<" + INODE_SECTION_FILE_UNDER_CONSTRUCTION + ">");
        o(INODE_SECTION_CLIENT_NAME, u.getClientName()).o(INODE_SECTION_CLIENT_MACHINE, u.getClientMachine());
        out.print("</" + INODE_SECTION_FILE_UNDER_CONSTRUCTION + ">\n");
    }
}
Also used : INodeSection(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection) BlockProto(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto)

Example 2 with BlockProto

use of org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto in project hadoop by apache.

the class FileDistributionCalculator method run.

private void run(InputStream in) throws IOException {
    INodeSection s = INodeSection.parseDelimitedFrom(in);
    for (int i = 0; i < s.getNumInodes(); ++i) {
        INodeSection.INode p = INodeSection.INode.parseDelimitedFrom(in);
        if (p.getType() == INodeSection.INode.Type.FILE) {
            ++totalFiles;
            INodeSection.INodeFile f = p.getFile();
            totalBlocks += f.getBlocksCount();
            long fileSize = 0;
            for (BlockProto b : f.getBlocksList()) {
                fileSize += b.getNumBytes();
            }
            maxFileSize = Math.max(fileSize, maxFileSize);
            totalSpace += fileSize * f.getReplication();
            int bucket = fileSize > maxSize ? distribution.length - 1 : (int) Math.ceil((double) fileSize / steps);
            // the length when maxSize can't be divided completely by step.
            if (bucket >= distribution.length) {
                bucket = distribution.length - 1;
            }
            ++distribution[bucket];
        } else if (p.getType() == INodeSection.INode.Type.DIRECTORY) {
            ++totalDirectories;
        }
        if (i % (1 << 20) == 0) {
            out.println("Processed " + i + " inodes.");
        }
    }
}
Also used : INodeSection(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection) BlockProto(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto)

Example 3 with BlockProto

use of org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto in project hadoop by apache.

the class InterDatanodeProtocolTranslatorPB method initReplicaRecovery.

@Override
public ReplicaRecoveryInfo initReplicaRecovery(RecoveringBlock rBlock) throws IOException {
    InitReplicaRecoveryRequestProto req = InitReplicaRecoveryRequestProto.newBuilder().setBlock(PBHelper.convert(rBlock)).build();
    InitReplicaRecoveryResponseProto resp;
    try {
        resp = rpcProxy.initReplicaRecovery(NULL_CONTROLLER, req);
    } catch (ServiceException e) {
        throw ProtobufHelper.getRemoteException(e);
    }
    if (!resp.getReplicaFound()) {
        // No replica found on the remote node.
        return null;
    } else {
        if (!resp.hasBlock() || !resp.hasState()) {
            throw new IOException("Replica was found but missing fields. " + "Req: " + req + "\n" + "Resp: " + resp);
        }
    }
    BlockProto b = resp.getBlock();
    return new ReplicaRecoveryInfo(b.getBlockId(), b.getNumBytes(), b.getGenStamp(), PBHelper.convert(resp.getState()));
}
Also used : InitReplicaRecoveryRequestProto(org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryRequestProto) ServiceException(com.google.protobuf.ServiceException) ReplicaRecoveryInfo(org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo) InitReplicaRecoveryResponseProto(org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryResponseProto) BlockProto(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto) IOException(java.io.IOException)

Example 4 with BlockProto

use of org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto in project hadoop by apache.

the class TestPBHelper method testConvertBlock.

@Test
public void testConvertBlock() {
    Block b = new Block(1, 100, 3);
    BlockProto bProto = PBHelperClient.convert(b);
    Block b2 = PBHelperClient.convert(bProto);
    assertEquals(b, b2);
}
Also used : BlockProto(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto) ExtendedBlockProto(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto) LocatedBlockProto(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto) RecoveringBlockProto(org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RecoveringBlockProto) Block(org.apache.hadoop.hdfs.protocol.Block) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) RecoveringBlock(org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) Test(org.junit.Test)

Aggregations

BlockProto (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto)4 INodeSection (org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection)2 ServiceException (com.google.protobuf.ServiceException)1 IOException (java.io.IOException)1 Block (org.apache.hadoop.hdfs.protocol.Block)1 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)1 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)1 ExtendedBlockProto (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto)1 LocatedBlockProto (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto)1 RecoveringBlockProto (org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RecoveringBlockProto)1 InitReplicaRecoveryRequestProto (org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryRequestProto)1 InitReplicaRecoveryResponseProto (org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryResponseProto)1 RecoveringBlock (org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock)1 ReplicaRecoveryInfo (org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo)1 Test (org.junit.Test)1