Search in sources :

Example 1 with BlockMetadataHeader

use of org.apache.hadoop.hdfs.server.datanode.BlockMetadataHeader in project hadoop by apache.

the class MappableBlock method verifyChecksum.

/**
   * Verifies the block's checksum. This is an I/O intensive operation.
   */
private static void verifyChecksum(long length, FileInputStream metaIn, FileChannel blockChannel, String blockFileName) throws IOException, ChecksumException {
    // Verify the checksum from the block's meta file
    // Get the DataChecksum from the meta file header
    BlockMetadataHeader header = BlockMetadataHeader.readHeader(new DataInputStream(new BufferedInputStream(metaIn, BlockMetadataHeader.getHeaderSize())));
    FileChannel metaChannel = null;
    try {
        metaChannel = metaIn.getChannel();
        if (metaChannel == null) {
            throw new IOException("Block InputStream meta file has no FileChannel.");
        }
        DataChecksum checksum = header.getChecksum();
        final int bytesPerChecksum = checksum.getBytesPerChecksum();
        final int checksumSize = checksum.getChecksumSize();
        final int numChunks = (8 * 1024 * 1024) / bytesPerChecksum;
        ByteBuffer blockBuf = ByteBuffer.allocate(numChunks * bytesPerChecksum);
        ByteBuffer checksumBuf = ByteBuffer.allocate(numChunks * checksumSize);
        // Verify the checksum
        int bytesVerified = 0;
        while (bytesVerified < length) {
            Preconditions.checkState(bytesVerified % bytesPerChecksum == 0, "Unexpected partial chunk before EOF");
            assert bytesVerified % bytesPerChecksum == 0;
            int bytesRead = fillBuffer(blockChannel, blockBuf);
            if (bytesRead == -1) {
                throw new IOException("checksum verification failed: premature EOF");
            }
            blockBuf.flip();
            // Number of read chunks, including partial chunk at end
            int chunks = (bytesRead + bytesPerChecksum - 1) / bytesPerChecksum;
            checksumBuf.limit(chunks * checksumSize);
            fillBuffer(metaChannel, checksumBuf);
            checksumBuf.flip();
            checksum.verifyChunkedSums(blockBuf, checksumBuf, blockFileName, bytesVerified);
            // Success
            bytesVerified += bytesRead;
            blockBuf.clear();
            checksumBuf.clear();
        }
    } finally {
        IOUtils.closeQuietly(metaChannel);
    }
}
Also used : BlockMetadataHeader(org.apache.hadoop.hdfs.server.datanode.BlockMetadataHeader) BufferedInputStream(java.io.BufferedInputStream) FileChannel(java.nio.channels.FileChannel) IOException(java.io.IOException) DataInputStream(java.io.DataInputStream) ByteBuffer(java.nio.ByteBuffer) MappedByteBuffer(java.nio.MappedByteBuffer) DataChecksum(org.apache.hadoop.util.DataChecksum)

Aggregations

BufferedInputStream (java.io.BufferedInputStream)1 DataInputStream (java.io.DataInputStream)1 IOException (java.io.IOException)1 ByteBuffer (java.nio.ByteBuffer)1 MappedByteBuffer (java.nio.MappedByteBuffer)1 FileChannel (java.nio.channels.FileChannel)1 BlockMetadataHeader (org.apache.hadoop.hdfs.server.datanode.BlockMetadataHeader)1 DataChecksum (org.apache.hadoop.util.DataChecksum)1