Search in sources :

Example 6 with FileChannel

use of java.nio.channels.FileChannel in project hadoop by apache.

the class MappableBlock method load.

/**
   * Load the block.
   *
   * mmap and mlock the block, and then verify its checksum.
   *
   * @param length         The current length of the block.
   * @param blockIn        The block input stream.  Should be positioned at the
   *                       start.  The caller must close this.
   * @param metaIn         The meta file input stream.  Should be positioned at
   *                       the start.  The caller must close this.
   * @param blockFileName  The block file name, for logging purposes.
   *
   * @return               The Mappable block.
   */
public static MappableBlock load(long length, FileInputStream blockIn, FileInputStream metaIn, String blockFileName) throws IOException {
    MappableBlock mappableBlock = null;
    MappedByteBuffer mmap = null;
    FileChannel blockChannel = null;
    try {
        blockChannel = blockIn.getChannel();
        if (blockChannel == null) {
            throw new IOException("Block InputStream has no FileChannel.");
        }
        mmap = blockChannel.map(MapMode.READ_ONLY, 0, length);
        NativeIO.POSIX.getCacheManipulator().mlock(blockFileName, mmap, length);
        verifyChecksum(length, metaIn, blockChannel, blockFileName);
        mappableBlock = new MappableBlock(mmap, length);
    } finally {
        IOUtils.closeQuietly(blockChannel);
        if (mappableBlock == null) {
            if (mmap != null) {
                // unmapping also unlocks
                NativeIO.POSIX.munmap(mmap);
            }
        }
    }
    return mappableBlock;
}
Also used : MappedByteBuffer(java.nio.MappedByteBuffer) FileChannel(java.nio.channels.FileChannel) IOException(java.io.IOException)

Example 7 with FileChannel

use of java.nio.channels.FileChannel in project hadoop by apache.

the class TestFsck method testCorruptBlock.

@Test
public void testCorruptBlock() throws Exception {
    conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000);
    // Set short retry timeouts so this test runs faster
    conf.setInt(HdfsClientConfigKeys.Retry.WINDOW_BASE_KEY, 10);
    FileSystem fs = null;
    DFSClient dfsClient = null;
    LocatedBlocks blocks = null;
    int replicaCount = 0;
    Random random = new Random();
    String outStr = null;
    short factor = 1;
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
    cluster.waitActive();
    fs = cluster.getFileSystem();
    Path file1 = new Path("/testCorruptBlock");
    DFSTestUtil.createFile(fs, file1, 1024, factor, 0);
    // Wait until file replication has completed
    DFSTestUtil.waitReplication(fs, file1, factor);
    ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, file1);
    // Make sure filesystem is in healthy state
    outStr = runFsck(conf, 0, true, "/");
    System.out.println(outStr);
    assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
    // corrupt replicas
    File blockFile = cluster.getBlockFile(0, block);
    if (blockFile != null && blockFile.exists()) {
        RandomAccessFile raFile = new RandomAccessFile(blockFile, "rw");
        FileChannel channel = raFile.getChannel();
        String badString = "BADBAD";
        int rand = random.nextInt((int) channel.size() / 2);
        raFile.seek(rand);
        raFile.write(badString.getBytes());
        raFile.close();
    }
    // Read the file to trigger reportBadBlocks
    try {
        IOUtils.copyBytes(fs.open(file1), new IOUtils.NullOutputStream(), conf, true);
    } catch (IOException ie) {
        assertTrue(ie instanceof ChecksumException);
    }
    dfsClient = new DFSClient(new InetSocketAddress("localhost", cluster.getNameNodePort()), conf);
    blocks = dfsClient.getNamenode().getBlockLocations(file1.toString(), 0, Long.MAX_VALUE);
    replicaCount = blocks.get(0).getLocations().length;
    while (replicaCount != factor) {
        try {
            Thread.sleep(100);
        } catch (InterruptedException ignore) {
        }
        blocks = dfsClient.getNamenode().getBlockLocations(file1.toString(), 0, Long.MAX_VALUE);
        replicaCount = blocks.get(0).getLocations().length;
    }
    assertTrue(blocks.get(0).isCorrupt());
    // Check if fsck reports the same
    outStr = runFsck(conf, 1, true, "/");
    System.out.println(outStr);
    assertTrue(outStr.contains(NamenodeFsck.CORRUPT_STATUS));
    assertTrue(outStr.contains("testCorruptBlock"));
}
Also used : DFSClient(org.apache.hadoop.hdfs.DFSClient) Path(org.apache.hadoop.fs.Path) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) FileChannel(java.nio.channels.FileChannel) ChecksumException(org.apache.hadoop.fs.ChecksumException) InetSocketAddress(java.net.InetSocketAddress) LocatedBlocks(org.apache.hadoop.hdfs.protocol.LocatedBlocks) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) Matchers.anyString(org.mockito.Matchers.anyString) IOException(java.io.IOException) IOUtils(org.apache.hadoop.io.IOUtils) Random(java.util.Random) RandomAccessFile(java.io.RandomAccessFile) FileSystem(org.apache.hadoop.fs.FileSystem) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) RandomAccessFile(java.io.RandomAccessFile) File(java.io.File) Test(org.junit.Test)

Example 8 with FileChannel

use of java.nio.channels.FileChannel in project hadoop by apache.

the class TestIOUtils method testWriteFully.

@Test
public void testWriteFully() throws IOException {
    final int INPUT_BUFFER_LEN = 10000;
    final int HALFWAY = 1 + (INPUT_BUFFER_LEN / 2);
    byte[] input = new byte[INPUT_BUFFER_LEN];
    for (int i = 0; i < input.length; i++) {
        input[i] = (byte) (i & 0xff);
    }
    byte[] output = new byte[input.length];
    try {
        RandomAccessFile raf = new RandomAccessFile(TEST_FILE_NAME, "rw");
        FileChannel fc = raf.getChannel();
        ByteBuffer buf = ByteBuffer.wrap(input);
        IOUtils.writeFully(fc, buf);
        raf.seek(0);
        raf.read(output);
        for (int i = 0; i < input.length; i++) {
            assertEquals(input[i], output[i]);
        }
        buf.rewind();
        IOUtils.writeFully(fc, buf, HALFWAY);
        for (int i = 0; i < HALFWAY; i++) {
            assertEquals(input[i], output[i]);
        }
        raf.seek(0);
        raf.read(output);
        for (int i = HALFWAY; i < input.length; i++) {
            assertEquals(input[i - HALFWAY], output[i]);
        }
        raf.close();
    } finally {
        File f = new File(TEST_FILE_NAME);
        if (f.exists()) {
            f.delete();
        }
    }
}
Also used : RandomAccessFile(java.io.RandomAccessFile) FileChannel(java.nio.channels.FileChannel) ByteBuffer(java.nio.ByteBuffer) RandomAccessFile(java.io.RandomAccessFile) File(java.io.File) Test(org.junit.Test)

Example 9 with FileChannel

use of java.nio.channels.FileChannel in project hadoop by apache.

the class TestFSEditLogLoader method getNonTrailerLength.

/**
   * Return the length of bytes in the given file after subtracting
   * the trailer of 0xFF (OP_INVALID)s.
   * This seeks to the end of the file and reads chunks backwards until
   * it finds a non-0xFF byte.
   * @throws IOException if the file cannot be read
   */
private static long getNonTrailerLength(File f) throws IOException {
    final int chunkSizeToRead = 256 * 1024;
    FileInputStream fis = new FileInputStream(f);
    try {
        byte[] buf = new byte[chunkSizeToRead];
        FileChannel fc = fis.getChannel();
        long size = fc.size();
        long pos = size - (size % chunkSizeToRead);
        while (pos >= 0) {
            fc.position(pos);
            int readLen = (int) Math.min(size - pos, chunkSizeToRead);
            IOUtils.readFully(fis, buf, 0, readLen);
            for (int i = readLen - 1; i >= 0; i--) {
                if (buf[i] != FSEditLogOpCodes.OP_INVALID.getOpCode()) {
                    // + 1 since we count this byte!
                    return pos + i + 1;
                }
            }
            pos -= chunkSizeToRead;
        }
        return 0;
    } finally {
        fis.close();
    }
}
Also used : FileChannel(java.nio.channels.FileChannel) FileInputStream(java.io.FileInputStream)

Example 10 with FileChannel

use of java.nio.channels.FileChannel in project hadoop by apache.

the class BlockSender method sendPacket.

/**
   * Sends a packet with up to maxChunks chunks of data.
   * 
   * @param pkt buffer used for writing packet data
   * @param maxChunks maximum number of chunks to send
   * @param out stream to send data to
   * @param transferTo use transferTo to send data
   * @param throttler used for throttling data transfer bandwidth
   */
private int sendPacket(ByteBuffer pkt, int maxChunks, OutputStream out, boolean transferTo, DataTransferThrottler throttler) throws IOException {
    int dataLen = (int) Math.min(endOffset - offset, (chunkSize * (long) maxChunks));
    // Number of chunks be sent in the packet
    int numChunks = numberOfChunks(dataLen);
    int checksumDataLen = numChunks * checksumSize;
    int packetLen = dataLen + checksumDataLen + 4;
    boolean lastDataPacket = offset + dataLen == endOffset && dataLen > 0;
    // The packet buffer is organized as follows:
    // _______HHHHCCCCD?D?D?D?
    //        ^   ^
    //        |   \ checksumOff
    //        \ headerOff
    // _ padding, since the header is variable-length
    // H = header and length prefixes
    // C = checksums
    // D? = data, if transferTo is false.
    int headerLen = writePacketHeader(pkt, dataLen, packetLen);
    // Per above, the header doesn't start at the beginning of the
    // buffer
    int headerOff = pkt.position() - headerLen;
    int checksumOff = pkt.position();
    byte[] buf = pkt.array();
    if (checksumSize > 0 && ris.getChecksumIn() != null) {
        readChecksum(buf, checksumOff, checksumDataLen);
        // write in progress that we need to use to get last checksum
        if (lastDataPacket && lastChunkChecksum != null) {
            int start = checksumOff + checksumDataLen - checksumSize;
            byte[] updatedChecksum = lastChunkChecksum.getChecksum();
            if (updatedChecksum != null) {
                System.arraycopy(updatedChecksum, 0, buf, start, checksumSize);
            }
        }
    }
    int dataOff = checksumOff + checksumDataLen;
    if (!transferTo) {
        // normal transfer
        ris.readDataFully(buf, dataOff, dataLen);
        if (verifyChecksum) {
            verifyChecksum(buf, dataOff, dataLen, numChunks, checksumOff);
        }
    }
    try {
        if (transferTo) {
            SocketOutputStream sockOut = (SocketOutputStream) out;
            // First write header and checksums
            sockOut.write(buf, headerOff, dataOff - headerOff);
            // no need to flush since we know out is not a buffered stream
            FileChannel fileCh = ((FileInputStream) ris.getDataIn()).getChannel();
            LongWritable waitTime = new LongWritable();
            LongWritable transferTime = new LongWritable();
            fileIoProvider.transferToSocketFully(ris.getVolumeRef().getVolume(), sockOut, fileCh, blockInPosition, dataLen, waitTime, transferTime);
            datanode.metrics.addSendDataPacketBlockedOnNetworkNanos(waitTime.get());
            datanode.metrics.addSendDataPacketTransferNanos(transferTime.get());
            blockInPosition += dataLen;
        } else {
            // normal transfer
            out.write(buf, headerOff, dataOff + dataLen - headerOff);
        }
    } catch (IOException e) {
        if (e instanceof SocketTimeoutException) {
        /*
         * writing to client timed out.  This happens if the client reads
         * part of a block and then decides not to read the rest (but leaves
         * the socket open).
         * 
         * Reporting of this case is done in DataXceiver#run
         */
        } else {
            /* Exception while writing to the client. Connection closure from
         * the other end is mostly the case and we do not care much about
         * it. But other things can go wrong, especially in transferTo(),
         * which we do not want to ignore.
         *
         * The message parsing below should not be considered as a good
         * coding example. NEVER do it to drive a program logic. NEVER.
         * It was done here because the NIO throws an IOException for EPIPE.
         */
            String ioem = e.getMessage();
            if (!ioem.startsWith("Broken pipe") && !ioem.startsWith("Connection reset")) {
                LOG.error("BlockSender.sendChunks() exception: ", e);
                datanode.getBlockScanner().markSuspectBlock(ris.getVolumeRef().getVolume().getStorageID(), block);
            }
        }
        throw ioeToSocketException(e);
    }
    if (throttler != null) {
        // rebalancing so throttle
        throttler.throttle(packetLen);
    }
    return dataLen;
}
Also used : SocketOutputStream(org.apache.hadoop.net.SocketOutputStream) SocketTimeoutException(java.net.SocketTimeoutException) FileChannel(java.nio.channels.FileChannel) LongWritable(org.apache.hadoop.io.LongWritable) IOException(java.io.IOException) FileInputStream(java.io.FileInputStream)

Aggregations

FileChannel (java.nio.channels.FileChannel)629 IOException (java.io.IOException)227 ByteBuffer (java.nio.ByteBuffer)205 File (java.io.File)185 FileInputStream (java.io.FileInputStream)164 FileOutputStream (java.io.FileOutputStream)147 RandomAccessFile (java.io.RandomAccessFile)144 Test (org.junit.Test)95 MappedByteBuffer (java.nio.MappedByteBuffer)78 Path (java.nio.file.Path)37 FileLock (java.nio.channels.FileLock)32 FileNotFoundException (java.io.FileNotFoundException)29 Random (java.util.Random)12 OutputStream (java.io.OutputStream)11 ArrayList (java.util.ArrayList)11 AsynchronousFileChannel (java.nio.channels.AsynchronousFileChannel)10 OverlappingFileLockException (java.nio.channels.OverlappingFileLockException)10 LinkedList (java.util.LinkedList)10 ProjectWorkspace (com.facebook.buck.testutil.integration.ProjectWorkspace)9 BufferedReader (java.io.BufferedReader)9