Search in sources :

Example 1 with ShortCircuitShm

use of org.apache.hadoop.hdfs.shortcircuit.ShortCircuitShm in project hadoop by apache.

the class TestBlockReaderLocal method runBlockReaderLocalTest.

public void runBlockReaderLocalTest(BlockReaderLocalTest test, boolean checksum, long readahead) throws IOException {
    Assume.assumeThat(DomainSocket.getLoadingFailureReason(), equalTo(null));
    MiniDFSCluster cluster = null;
    HdfsConfiguration conf = new HdfsConfiguration();
    conf.setBoolean(HdfsClientConfigKeys.Read.ShortCircuit.SKIP_CHECKSUM_KEY, !checksum);
    conf.setLong(HdfsClientConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, BlockReaderLocalTest.BYTES_PER_CHECKSUM);
    conf.set(DFSConfigKeys.DFS_CHECKSUM_TYPE_KEY, "CRC32C");
    conf.setLong(HdfsClientConfigKeys.DFS_CLIENT_CACHE_READAHEAD, readahead);
    test.setConfiguration(conf);
    FileInputStream dataIn = null, metaIn = null;
    final Path TEST_PATH = new Path("/a");
    final long RANDOM_SEED = 4567L;
    BlockReaderLocal blockReaderLocal = null;
    FSDataInputStream fsIn = null;
    byte[] original = new byte[BlockReaderLocalTest.TEST_LENGTH];
    FileSystem fs = null;
    ShortCircuitShm shm = null;
    RandomAccessFile raf = null;
    try {
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
        cluster.waitActive();
        fs = cluster.getFileSystem();
        DFSTestUtil.createFile(fs, TEST_PATH, BlockReaderLocalTest.TEST_LENGTH, (short) 1, RANDOM_SEED);
        try {
            DFSTestUtil.waitReplication(fs, TEST_PATH, (short) 1);
        } catch (InterruptedException e) {
            Assert.fail("unexpected InterruptedException during " + "waitReplication: " + e);
        } catch (TimeoutException e) {
            Assert.fail("unexpected TimeoutException during " + "waitReplication: " + e);
        }
        fsIn = fs.open(TEST_PATH);
        IOUtils.readFully(fsIn, original, 0, BlockReaderLocalTest.TEST_LENGTH);
        fsIn.close();
        fsIn = null;
        ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, TEST_PATH);
        File dataFile = cluster.getBlockFile(0, block);
        File metaFile = cluster.getBlockMetadataFile(0, block);
        ShortCircuitCache shortCircuitCache = ClientContext.getFromConf(conf).getShortCircuitCache();
        cluster.shutdown();
        cluster = null;
        test.setup(dataFile, checksum);
        FileInputStream[] streams = { new FileInputStream(dataFile), new FileInputStream(metaFile) };
        dataIn = streams[0];
        metaIn = streams[1];
        ExtendedBlockId key = new ExtendedBlockId(block.getBlockId(), block.getBlockPoolId());
        raf = new RandomAccessFile(new File(sockDir.getDir().getAbsolutePath(), UUID.randomUUID().toString()), "rw");
        raf.setLength(8192);
        FileInputStream shmStream = new FileInputStream(raf.getFD());
        shm = new ShortCircuitShm(ShmId.createRandom(), shmStream);
        ShortCircuitReplica replica = new ShortCircuitReplica(key, dataIn, metaIn, shortCircuitCache, Time.now(), shm.allocAndRegisterSlot(ExtendedBlockId.fromExtendedBlock(block)));
        blockReaderLocal = new BlockReaderLocal.Builder(new DfsClientConf.ShortCircuitConf(conf)).setFilename(TEST_PATH.getName()).setBlock(block).setShortCircuitReplica(replica).setCachingStrategy(new CachingStrategy(false, readahead)).setVerifyChecksum(checksum).setTracer(FsTracer.get(conf)).build();
        dataIn = null;
        metaIn = null;
        test.doTest(blockReaderLocal, original);
        // BlockReaderLocal should not alter the file position.
        Assert.assertEquals(0, streams[0].getChannel().position());
        Assert.assertEquals(0, streams[1].getChannel().position());
    } finally {
        if (fsIn != null)
            fsIn.close();
        if (fs != null)
            fs.close();
        if (cluster != null)
            cluster.shutdown();
        if (dataIn != null)
            dataIn.close();
        if (metaIn != null)
            metaIn.close();
        if (blockReaderLocal != null)
            blockReaderLocal.close();
        if (shm != null)
            shm.free();
        if (raf != null)
            raf.close();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) ExtendedBlockId(org.apache.hadoop.hdfs.ExtendedBlockId) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) CachingStrategy(org.apache.hadoop.hdfs.server.datanode.CachingStrategy) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) ShortCircuitShm(org.apache.hadoop.hdfs.shortcircuit.ShortCircuitShm) ShortCircuitCache(org.apache.hadoop.hdfs.shortcircuit.ShortCircuitCache) FileInputStream(java.io.FileInputStream) RandomAccessFile(java.io.RandomAccessFile) ShortCircuitReplica(org.apache.hadoop.hdfs.shortcircuit.ShortCircuitReplica) FileSystem(org.apache.hadoop.fs.FileSystem) FSDataInputStream(org.apache.hadoop.fs.FSDataInputStream) RandomAccessFile(java.io.RandomAccessFile) File(java.io.File) TimeoutException(java.util.concurrent.TimeoutException)

Aggregations

File (java.io.File)1 FileInputStream (java.io.FileInputStream)1 RandomAccessFile (java.io.RandomAccessFile)1 TimeoutException (java.util.concurrent.TimeoutException)1 FSDataInputStream (org.apache.hadoop.fs.FSDataInputStream)1 FileSystem (org.apache.hadoop.fs.FileSystem)1 Path (org.apache.hadoop.fs.Path)1 ExtendedBlockId (org.apache.hadoop.hdfs.ExtendedBlockId)1 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)1 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)1 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)1 CachingStrategy (org.apache.hadoop.hdfs.server.datanode.CachingStrategy)1 ShortCircuitCache (org.apache.hadoop.hdfs.shortcircuit.ShortCircuitCache)1 ShortCircuitReplica (org.apache.hadoop.hdfs.shortcircuit.ShortCircuitReplica)1 ShortCircuitShm (org.apache.hadoop.hdfs.shortcircuit.ShortCircuitShm)1