use of org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection in project hadoop by apache.
the class PBImageTextWriter method outputINodes.
private void outputINodes(InputStream in) throws IOException {
INodeSection s = INodeSection.parseDelimitedFrom(in);
LOG.info("Found {} INodes in the INode section", s.getNumInodes());
long ignored = 0;
long ignoredSnapshots = 0;
for (int i = 0; i < s.getNumInodes(); ++i) {
INode p = INode.parseDelimitedFrom(in);
try {
String parentPath = metadataMap.getParentPath(p.getId());
out.println(getEntry(parentPath, p));
} catch (IOException ioe) {
ignored++;
if (!(ioe instanceof IgnoreSnapshotException)) {
LOG.warn("Exception caught, ignoring node:{}", p.getId(), ioe);
} else {
ignoredSnapshots++;
if (LOG.isDebugEnabled()) {
LOG.debug("Exception caught, ignoring node:{}.", p.getId(), ioe);
}
}
}
if (LOG.isDebugEnabled() && i % 100000 == 0) {
LOG.debug("Outputted {} INodes.", i);
}
}
if (ignored > 0) {
LOG.warn("Ignored {} nodes, including {} in snapshots. Please turn on" + " debug log for details", ignored, ignoredSnapshots);
}
LOG.info("Outputted {} INodes.", s.getNumInodes());
}
use of org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection in project hadoop by apache.
the class PBImageXmlWriter method dumpINodeSection.
private void dumpINodeSection(InputStream in) throws IOException {
INodeSection s = INodeSection.parseDelimitedFrom(in);
out.print("<" + INODE_SECTION_NAME + ">");
o(INODE_SECTION_LAST_INODE_ID, s.getLastInodeId());
o(INODE_SECTION_NUM_INODES, s.getNumInodes());
for (int i = 0; i < s.getNumInodes(); ++i) {
INodeSection.INode p = INodeSection.INode.parseDelimitedFrom(in);
out.print("<" + INODE_SECTION_INODE + ">");
dumpINodeFields(p);
out.print("</" + INODE_SECTION_INODE + ">\n");
}
out.print("</" + INODE_SECTION_NAME + ">\n");
}
use of org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection in project hadoop by apache.
the class FileDistributionCalculator method run.
private void run(InputStream in) throws IOException {
INodeSection s = INodeSection.parseDelimitedFrom(in);
for (int i = 0; i < s.getNumInodes(); ++i) {
INodeSection.INode p = INodeSection.INode.parseDelimitedFrom(in);
if (p.getType() == INodeSection.INode.Type.FILE) {
++totalFiles;
INodeSection.INodeFile f = p.getFile();
totalBlocks += f.getBlocksCount();
long fileSize = 0;
for (BlockProto b : f.getBlocksList()) {
fileSize += b.getNumBytes();
}
maxFileSize = Math.max(fileSize, maxFileSize);
totalSpace += fileSize * f.getReplication();
int bucket = fileSize > maxSize ? distribution.length - 1 : (int) Math.ceil((double) fileSize / steps);
// the length when maxSize can't be divided completely by step.
if (bucket >= distribution.length) {
bucket = distribution.length - 1;
}
++distribution[bucket];
} else if (p.getType() == INodeSection.INode.Type.DIRECTORY) {
++totalDirectories;
}
if (i % (1 << 20) == 0) {
out.println("Processed " + i + " inodes.");
}
}
}
use of org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection in project hadoop by apache.
the class PBImageTextWriter method loadDirectoriesInINodeSection.
/**
* Load the filenames of the directories from the INode section.
*/
private void loadDirectoriesInINodeSection(InputStream in) throws IOException {
INodeSection s = INodeSection.parseDelimitedFrom(in);
LOG.info("Loading directories in INode section.");
int numDirs = 0;
for (int i = 0; i < s.getNumInodes(); ++i) {
INode p = INode.parseDelimitedFrom(in);
if (LOG.isDebugEnabled() && i % 10000 == 0) {
LOG.debug("Scanned {} inodes.", i);
}
if (p.hasDirectory()) {
metadataMap.putDir(p);
numDirs++;
}
}
LOG.info("Found {} directories in INode section.", numDirs);
}
Aggregations