use of org.apache.hadoop.hdfs.DFSUtilClient.CorruptedBlocks in project hadoop by apache.
the class DFSStripedInputStream method readWithStrategy.
@Override
protected synchronized int readWithStrategy(ReaderStrategy strategy) throws IOException {
dfsClient.checkOpen();
if (closed.get()) {
throw new IOException("Stream closed");
}
int len = strategy.getTargetLength();
CorruptedBlocks corruptedBlocks = new CorruptedBlocks();
if (pos < getFileLength()) {
try {
if (pos > blockEnd) {
blockSeekTo(pos);
}
int realLen = (int) Math.min(len, (blockEnd - pos + 1L));
synchronized (infoLock) {
if (locatedBlocks.isLastBlockComplete()) {
realLen = (int) Math.min(realLen, locatedBlocks.getFileLength() - pos);
}
}
/** Number of bytes already read into buffer */
int result = 0;
while (result < realLen) {
if (!curStripeRange.include(getOffsetInBlockGroup())) {
readOneStripe(corruptedBlocks);
}
int ret = copyToTargetBuf(strategy, realLen - result);
result += ret;
pos += ret;
}
return result;
} finally {
// Check if need to report block replicas corruption either read
// was successful or ChecksumException occured.
reportCheckSumFailure(corruptedBlocks, currentLocatedBlock.getLocations().length, true);
}
}
return -1;
}
use of org.apache.hadoop.hdfs.DFSUtilClient.CorruptedBlocks in project hadoop by apache.
the class DFSInputStream method readWithStrategy.
protected synchronized int readWithStrategy(ReaderStrategy strategy) throws IOException {
dfsClient.checkOpen();
if (closed.get()) {
throw new IOException("Stream closed");
}
int len = strategy.getTargetLength();
CorruptedBlocks corruptedBlocks = new CorruptedBlocks();
failures = 0;
if (pos < getFileLength()) {
int retries = 2;
while (retries > 0) {
try {
// error on the same block. See HDFS-3067
if (pos > blockEnd || currentNode == null) {
currentNode = blockSeekTo(pos);
}
int realLen = (int) Math.min(len, (blockEnd - pos + 1L));
synchronized (infoLock) {
if (locatedBlocks.isLastBlockComplete()) {
realLen = (int) Math.min(realLen, locatedBlocks.getFileLength() - pos);
}
}
int result = readBuffer(strategy, realLen, corruptedBlocks);
if (result >= 0) {
pos += result;
} else {
// got a EOS from reader though we expect more data on it.
throw new IOException("Unexpected EOS from the reader");
}
return result;
} catch (ChecksumException ce) {
throw ce;
} catch (IOException e) {
checkInterrupted(e);
if (retries == 1) {
DFSClient.LOG.warn("DFS Read", e);
}
blockEnd = -1;
if (currentNode != null) {
addToDeadNodes(currentNode);
}
if (--retries == 0) {
throw e;
}
} finally {
// Check if need to report block replicas corruption either read
// was successful or ChecksumException occured.
reportCheckSumFailure(corruptedBlocks, currentLocatedBlock.getLocations().length, false);
}
}
}
return -1;
}
use of org.apache.hadoop.hdfs.DFSUtilClient.CorruptedBlocks in project hadoop by apache.
the class DFSInputStream method pread.
private int pread(long position, ByteBuffer buffer) throws IOException {
// sanity checks
dfsClient.checkOpen();
if (closed.get()) {
throw new IOException("Stream closed");
}
failures = 0;
long filelen = getFileLength();
if ((position < 0) || (position >= filelen)) {
return -1;
}
int length = buffer.remaining();
int realLen = length;
if ((position + length) > filelen) {
realLen = (int) (filelen - position);
}
// determine the block and byte range within the block
// corresponding to position and realLen
List<LocatedBlock> blockRange = getBlockRange(position, realLen);
int remaining = realLen;
CorruptedBlocks corruptedBlocks = new CorruptedBlocks();
for (LocatedBlock blk : blockRange) {
long targetStart = position - blk.getStartOffset();
int bytesToRead = (int) Math.min(remaining, blk.getBlockSize() - targetStart);
long targetEnd = targetStart + bytesToRead - 1;
try {
if (dfsClient.isHedgedReadsEnabled() && !blk.isStriped()) {
hedgedFetchBlockByteRange(blk, targetStart, targetEnd, buffer, corruptedBlocks);
} else {
fetchBlockByteRange(blk, targetStart, targetEnd, buffer, corruptedBlocks);
}
} finally {
// Check and report if any block replicas are corrupted.
// BlockMissingException may be caught if all block replicas are
// corrupted.
reportCheckSumFailure(corruptedBlocks, blk.getLocations().length, false);
}
remaining -= bytesToRead;
position += bytesToRead;
}
assert remaining == 0 : "Wrong number of bytes read.";
return realLen;
}
Aggregations