use of org.apache.hadoop.util.LimitInputStream in project hadoop by apache.
the class PBImageTextWriter method loadINodeDirSection.
private void loadINodeDirSection(FileInputStream fin, List<FileSummary.Section> sections, FileSummary summary, Configuration conf, List<Long> refIdList) throws IOException {
LOG.info("Loading INode directory section.");
long startTime = Time.monotonicNow();
for (FileSummary.Section section : sections) {
if (SectionName.fromString(section.getName()) == SectionName.INODE_DIR) {
fin.getChannel().position(section.getOffset());
InputStream is = FSImageUtil.wrapInputStreamForCompression(conf, summary.getCodec(), new BufferedInputStream(new LimitInputStream(fin, section.getLength())));
buildNamespace(is, refIdList);
}
}
long timeTaken = Time.monotonicNow() - startTime;
LOG.info("Finished loading INode directory section in {}ms", timeTaken);
}
use of org.apache.hadoop.util.LimitInputStream in project hadoop by apache.
the class PBImageTextWriter method output.
private void output(Configuration conf, FileSummary summary, FileInputStream fin, ArrayList<FileSummary.Section> sections) throws IOException {
InputStream is;
long startTime = Time.monotonicNow();
out.println(getHeader());
for (FileSummary.Section section : sections) {
if (SectionName.fromString(section.getName()) == SectionName.INODE) {
fin.getChannel().position(section.getOffset());
is = FSImageUtil.wrapInputStreamForCompression(conf, summary.getCodec(), new BufferedInputStream(new LimitInputStream(fin, section.getLength())));
outputINodes(is);
}
}
long timeTaken = Time.monotonicNow() - startTime;
LOG.debug("Time to output inodes: {}ms", timeTaken);
}
use of org.apache.hadoop.util.LimitInputStream in project hadoop by apache.
the class FSImageLoader method load.
/**
* Load fsimage into the memory.
* @param inputFile the filepath of the fsimage to load.
* @return FSImageLoader
* @throws IOException if failed to load fsimage.
*/
static FSImageLoader load(String inputFile) throws IOException {
Configuration conf = new Configuration();
RandomAccessFile file = new RandomAccessFile(inputFile, "r");
if (!FSImageUtil.checkFileFormat(file)) {
throw new IOException("Unrecognized FSImage");
}
FsImageProto.FileSummary summary = FSImageUtil.loadSummary(file);
try (FileInputStream fin = new FileInputStream(file.getFD())) {
// Map to record INodeReference to the referred id
ImmutableList<Long> refIdList = null;
String[] stringTable = null;
byte[][] inodes = null;
Map<Long, long[]> dirmap = null;
ArrayList<FsImageProto.FileSummary.Section> sections = Lists.newArrayList(summary.getSectionsList());
Collections.sort(sections, new Comparator<FsImageProto.FileSummary.Section>() {
@Override
public int compare(FsImageProto.FileSummary.Section s1, FsImageProto.FileSummary.Section s2) {
FSImageFormatProtobuf.SectionName n1 = FSImageFormatProtobuf.SectionName.fromString(s1.getName());
FSImageFormatProtobuf.SectionName n2 = FSImageFormatProtobuf.SectionName.fromString(s2.getName());
if (n1 == null) {
return n2 == null ? 0 : -1;
} else if (n2 == null) {
return -1;
} else {
return n1.ordinal() - n2.ordinal();
}
}
});
for (FsImageProto.FileSummary.Section s : sections) {
fin.getChannel().position(s.getOffset());
InputStream is = FSImageUtil.wrapInputStreamForCompression(conf, summary.getCodec(), new BufferedInputStream(new LimitInputStream(fin, s.getLength())));
if (LOG.isDebugEnabled()) {
LOG.debug("Loading section " + s.getName() + " length: " + s.getLength());
}
switch(FSImageFormatProtobuf.SectionName.fromString(s.getName())) {
case STRING_TABLE:
stringTable = loadStringTable(is);
break;
case INODE:
inodes = loadINodeSection(is);
break;
case INODE_REFERENCE:
refIdList = loadINodeReferenceSection(is);
break;
case INODE_DIR:
dirmap = loadINodeDirectorySection(is, refIdList);
break;
default:
break;
}
}
return new FSImageLoader(stringTable, inodes, dirmap);
}
}
use of org.apache.hadoop.util.LimitInputStream in project hadoop by apache.
the class CryptoUtils method wrapIfNecessary.
/**
* Wraps a given InputStream with a CryptoInputStream. The size of the data
* buffer required for the stream is specified by the
* "mapreduce.job.encrypted-intermediate-data.buffer.kb" Job configuration
* variable.
*
* If the value of 'length' is > -1, The InputStream is additionally
* wrapped in a LimitInputStream. CryptoStreams are late buffering in nature.
* This means they will always try to read ahead if they can. The
* LimitInputStream will ensure that the CryptoStream does not read past the
* provided length from the given Input Stream.
*
* @param conf configuration
* @param in given input stream
* @param length maximum number of bytes to read from the input stream
* @return InputStream encrypted input stream if encryption is
* enabled; otherwise the given input stream itself
* @throws IOException exception in case of error
*/
public static InputStream wrapIfNecessary(Configuration conf, InputStream in, long length) throws IOException {
if (isEncryptedSpillEnabled(conf)) {
int bufferSize = getBufferSize(conf);
if (length > -1) {
in = new LimitInputStream(in, length);
}
byte[] offsetArray = new byte[8];
IOUtils.readFully(in, offsetArray, 0, 8);
long offset = ByteBuffer.wrap(offsetArray).getLong();
CryptoCodec cryptoCodec = CryptoCodec.getInstance(conf);
byte[] iv = new byte[cryptoCodec.getCipherSuite().getAlgorithmBlockSize()];
IOUtils.readFully(in, iv, 0, cryptoCodec.getCipherSuite().getAlgorithmBlockSize());
if (LOG.isDebugEnabled()) {
LOG.debug("IV read from [" + Base64.encodeBase64URLSafeString(iv) + "]");
}
return new CryptoInputStream(in, cryptoCodec, bufferSize, getEncryptionKey(), iv, offset + cryptoPadding(conf));
} else {
return in;
}
}
use of org.apache.hadoop.util.LimitInputStream in project hadoop by apache.
the class WebHdfsHandler method onOpen.
private void onOpen(ChannelHandlerContext ctx) throws IOException {
final String nnId = params.namenodeId();
final int bufferSize = params.bufferSize();
final long offset = params.offset();
final long length = params.length();
resp = new DefaultHttpResponse(HTTP_1_1, OK);
HttpHeaders headers = resp.headers();
// Allow the UI to access the file
headers.set(ACCESS_CONTROL_ALLOW_METHODS, GET);
headers.set(ACCESS_CONTROL_ALLOW_ORIGIN, "*");
headers.set(CONTENT_TYPE, APPLICATION_OCTET_STREAM);
headers.set(CONNECTION, CLOSE);
final DFSClient dfsclient = newDfsClient(nnId, conf);
HdfsDataInputStream in = dfsclient.createWrappedInputStream(dfsclient.open(path, bufferSize, true));
in.seek(offset);
long contentLength = in.getVisibleLength() - offset;
if (length >= 0) {
contentLength = Math.min(contentLength, length);
}
final InputStream data;
if (contentLength >= 0) {
headers.set(CONTENT_LENGTH, contentLength);
data = new LimitInputStream(in, contentLength);
} else {
data = in;
}
ctx.write(resp);
ctx.writeAndFlush(new ChunkedStream(data) {
@Override
public void close() throws Exception {
super.close();
dfsclient.close();
}
}).addListener(ChannelFutureListener.CLOSE);
}
Aggregations