Search in sources :

Example 6 with MiniDFSCluster

use of org.apache.hadoop.hdfs.MiniDFSCluster in project hadoop by apache.

the class TestBlockReaderFactory method testMultipleWaitersOnShortCircuitCache.

/**
   * Test the case where we have multiple threads waiting on the
   * ShortCircuitCache delivering a certain ShortCircuitReplica.
   *
   * In this case, there should only be one call to
   * createShortCircuitReplicaInfo.  This one replica should be shared
   * by all threads.
   */
@Test(timeout = 60000)
public void testMultipleWaitersOnShortCircuitCache() throws Exception {
    final CountDownLatch latch = new CountDownLatch(1);
    final AtomicBoolean creationIsBlocked = new AtomicBoolean(true);
    final AtomicBoolean testFailed = new AtomicBoolean(false);
    DFSInputStream.tcpReadsDisabledForTesting = true;
    BlockReaderFactory.createShortCircuitReplicaInfoCallback = new ShortCircuitCache.ShortCircuitReplicaCreator() {

        @Override
        public ShortCircuitReplicaInfo createShortCircuitReplicaInfo() {
            Uninterruptibles.awaitUninterruptibly(latch);
            if (!creationIsBlocked.compareAndSet(true, false)) {
                Assert.fail("there were multiple calls to " + "createShortCircuitReplicaInfo.  Only one was expected.");
            }
            return null;
        }
    };
    TemporarySocketDirectory sockDir = new TemporarySocketDirectory();
    Configuration conf = createShortCircuitConf("testMultipleWaitersOnShortCircuitCache", sockDir);
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
    cluster.waitActive();
    final DistributedFileSystem dfs = cluster.getFileSystem();
    final String TEST_FILE = "/test_file";
    final int TEST_FILE_LEN = 4000;
    final int SEED = 0xFADED;
    final int NUM_THREADS = 10;
    DFSTestUtil.createFile(dfs, new Path(TEST_FILE), TEST_FILE_LEN, (short) 1, SEED);
    Runnable readerRunnable = new Runnable() {

        @Override
        public void run() {
            try {
                byte[] contents = DFSTestUtil.readFileBuffer(dfs, new Path(TEST_FILE));
                Assert.assertFalse(creationIsBlocked.get());
                byte[] expected = DFSTestUtil.calculateFileContentsFromSeed(SEED, TEST_FILE_LEN);
                Assert.assertTrue(Arrays.equals(contents, expected));
            } catch (Throwable e) {
                LOG.error("readerRunnable error", e);
                testFailed.set(true);
            }
        }
    };
    Thread[] threads = new Thread[NUM_THREADS];
    for (int i = 0; i < NUM_THREADS; i++) {
        threads[i] = new Thread(readerRunnable);
        threads[i].start();
    }
    Thread.sleep(500);
    latch.countDown();
    for (int i = 0; i < NUM_THREADS; i++) {
        Uninterruptibles.joinUninterruptibly(threads[i]);
    }
    cluster.shutdown();
    sockDir.close();
    Assert.assertFalse(testFailed.get());
}
Also used : Path(org.apache.hadoop.fs.Path) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) DatanodeInfoBuilder(org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder) CountDownLatch(java.util.concurrent.CountDownLatch) ShortCircuitCache(org.apache.hadoop.hdfs.shortcircuit.ShortCircuitCache) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) TemporarySocketDirectory(org.apache.hadoop.net.unix.TemporarySocketDirectory) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) ShortCircuitReplicaInfo(org.apache.hadoop.hdfs.shortcircuit.ShortCircuitReplicaInfo) Test(org.junit.Test)

Example 7 with MiniDFSCluster

use of org.apache.hadoop.hdfs.MiniDFSCluster in project hadoop by apache.

the class TestBlockReaderFactory method testShortCircuitReadFromServerWithoutShm.

/**
   * Test that a client which supports short-circuit reads using
   * shared memory can fall back to not using shared memory when
   * the server doesn't support it.
   */
@Test
public void testShortCircuitReadFromServerWithoutShm() throws Exception {
    TemporarySocketDirectory sockDir = new TemporarySocketDirectory();
    Configuration clientConf = createShortCircuitConf("testShortCircuitReadFromServerWithoutShm", sockDir);
    Configuration serverConf = new Configuration(clientConf);
    serverConf.setInt(DFS_SHORT_CIRCUIT_SHARED_MEMORY_WATCHER_INTERRUPT_CHECK_MS, 0);
    DFSInputStream.tcpReadsDisabledForTesting = true;
    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(serverConf).numDataNodes(1).build();
    cluster.waitActive();
    clientConf.set(DFS_CLIENT_CONTEXT, "testShortCircuitReadFromServerWithoutShm_clientContext");
    final DistributedFileSystem fs = (DistributedFileSystem) FileSystem.get(cluster.getURI(0), clientConf);
    final String TEST_FILE = "/test_file";
    final int TEST_FILE_LEN = 4000;
    final int SEED = 0xFADEC;
    DFSTestUtil.createFile(fs, new Path(TEST_FILE), TEST_FILE_LEN, (short) 1, SEED);
    byte[] contents = DFSTestUtil.readFileBuffer(fs, new Path(TEST_FILE));
    byte[] expected = DFSTestUtil.calculateFileContentsFromSeed(SEED, TEST_FILE_LEN);
    Assert.assertTrue(Arrays.equals(contents, expected));
    final ShortCircuitCache cache = fs.getClient().getClientContext().getShortCircuitCache();
    final DatanodeInfo datanode = new DatanodeInfoBuilder().setNodeID(cluster.getDataNodes().get(0).getDatanodeId()).build();
    cache.getDfsClientShmManager().visit(new Visitor() {

        @Override
        public void visit(HashMap<DatanodeInfo, PerDatanodeVisitorInfo> info) throws IOException {
            Assert.assertEquals(1, info.size());
            PerDatanodeVisitorInfo vinfo = info.get(datanode);
            Assert.assertTrue(vinfo.disabled);
            Assert.assertEquals(0, vinfo.full.size());
            Assert.assertEquals(0, vinfo.notFull.size());
        }
    });
    cluster.shutdown();
    sockDir.close();
}
Also used : Path(org.apache.hadoop.fs.Path) DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) DatanodeInfoBuilder(org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder) Visitor(org.apache.hadoop.hdfs.shortcircuit.DfsClientShmManager.Visitor) DatanodeInfoBuilder(org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder) IOException(java.io.IOException) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) ShortCircuitCache(org.apache.hadoop.hdfs.shortcircuit.ShortCircuitCache) TemporarySocketDirectory(org.apache.hadoop.net.unix.TemporarySocketDirectory) PerDatanodeVisitorInfo(org.apache.hadoop.hdfs.shortcircuit.DfsClientShmManager.PerDatanodeVisitorInfo) Test(org.junit.Test)

Example 8 with MiniDFSCluster

use of org.apache.hadoop.hdfs.MiniDFSCluster in project hadoop by apache.

the class TestBlockReaderFactory method testFallbackFromShortCircuitToUnixDomainTraffic.

/**
   * If we have a UNIX domain socket configured,
   * and we have dfs.client.domain.socket.data.traffic set to true,
   * and short-circuit access fails, we should still be able to pass
   * data traffic over the UNIX domain socket.  Test this.
   */
@Test(timeout = 60000)
public void testFallbackFromShortCircuitToUnixDomainTraffic() throws Exception {
    DFSInputStream.tcpReadsDisabledForTesting = true;
    TemporarySocketDirectory sockDir = new TemporarySocketDirectory();
    // The server is NOT configured with short-circuit local reads;
    // the client is.  Both support UNIX domain reads.
    Configuration clientConf = createShortCircuitConf("testFallbackFromShortCircuitToUnixDomainTraffic", sockDir);
    clientConf.set(DFS_CLIENT_CONTEXT, "testFallbackFromShortCircuitToUnixDomainTraffic_clientContext");
    clientConf.setBoolean(DFS_CLIENT_DOMAIN_SOCKET_DATA_TRAFFIC, true);
    Configuration serverConf = new Configuration(clientConf);
    serverConf.setBoolean(HdfsClientConfigKeys.Read.ShortCircuit.KEY, false);
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(serverConf).numDataNodes(1).build();
    cluster.waitActive();
    FileSystem dfs = FileSystem.get(cluster.getURI(0), clientConf);
    String TEST_FILE = "/test_file";
    final int TEST_FILE_LEN = 8193;
    final int SEED = 0xFADED;
    DFSTestUtil.createFile(dfs, new Path(TEST_FILE), TEST_FILE_LEN, (short) 1, SEED);
    byte[] contents = DFSTestUtil.readFileBuffer(dfs, new Path(TEST_FILE));
    byte[] expected = DFSTestUtil.calculateFileContentsFromSeed(SEED, TEST_FILE_LEN);
    Assert.assertTrue(Arrays.equals(contents, expected));
    cluster.shutdown();
    sockDir.close();
}
Also used : Path(org.apache.hadoop.fs.Path) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) DatanodeInfoBuilder(org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder) FileSystem(org.apache.hadoop.fs.FileSystem) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) TemporarySocketDirectory(org.apache.hadoop.net.unix.TemporarySocketDirectory) Test(org.junit.Test)

Example 9 with MiniDFSCluster

use of org.apache.hadoop.hdfs.MiniDFSCluster in project hadoop by apache.

the class TestBlockReaderFactory method testShortCircuitCacheShutdown.

/**
   * Test shutting down the ShortCircuitCache while there are things in it.
   */
@Test
public void testShortCircuitCacheShutdown() throws Exception {
    TemporarySocketDirectory sockDir = new TemporarySocketDirectory();
    Configuration conf = createShortCircuitConf("testShortCircuitCacheShutdown", sockDir);
    conf.set(DFS_CLIENT_CONTEXT, "testShortCircuitCacheShutdown");
    Configuration serverConf = new Configuration(conf);
    DFSInputStream.tcpReadsDisabledForTesting = true;
    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(serverConf).numDataNodes(1).build();
    cluster.waitActive();
    final DistributedFileSystem fs = (DistributedFileSystem) FileSystem.get(cluster.getURI(0), conf);
    final String TEST_FILE = "/test_file";
    final int TEST_FILE_LEN = 4000;
    final int SEED = 0xFADEC;
    DFSTestUtil.createFile(fs, new Path(TEST_FILE), TEST_FILE_LEN, (short) 1, SEED);
    byte[] contents = DFSTestUtil.readFileBuffer(fs, new Path(TEST_FILE));
    byte[] expected = DFSTestUtil.calculateFileContentsFromSeed(SEED, TEST_FILE_LEN);
    Assert.assertTrue(Arrays.equals(contents, expected));
    final ShortCircuitCache cache = fs.getClient().getClientContext().getShortCircuitCache();
    cache.close();
    Assert.assertTrue(cache.getDfsClientShmManager().getDomainSocketWatcher().isClosed());
    cluster.shutdown();
    sockDir.close();
}
Also used : Path(org.apache.hadoop.fs.Path) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) DatanodeInfoBuilder(org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) ShortCircuitCache(org.apache.hadoop.hdfs.shortcircuit.ShortCircuitCache) TemporarySocketDirectory(org.apache.hadoop.net.unix.TemporarySocketDirectory) Test(org.junit.Test)

Example 10 with MiniDFSCluster

use of org.apache.hadoop.hdfs.MiniDFSCluster in project hadoop by apache.

the class TestBlockReaderLocal method testStatistics.

private void testStatistics(boolean isShortCircuit) throws Exception {
    Assume.assumeTrue(DomainSocket.getLoadingFailureReason() == null);
    HdfsConfiguration conf = new HdfsConfiguration();
    TemporarySocketDirectory sockDir = null;
    if (isShortCircuit) {
        DFSInputStream.tcpReadsDisabledForTesting = true;
        sockDir = new TemporarySocketDirectory();
        conf.set(DFSConfigKeys.DFS_DOMAIN_SOCKET_PATH_KEY, new File(sockDir.getDir(), "TestStatisticsForLocalRead.%d.sock").getAbsolutePath());
        conf.setBoolean(HdfsClientConfigKeys.Read.ShortCircuit.KEY, true);
        DomainSocket.disableBindPathValidation();
    } else {
        conf.setBoolean(HdfsClientConfigKeys.Read.ShortCircuit.KEY, false);
    }
    MiniDFSCluster cluster = null;
    final Path TEST_PATH = new Path("/a");
    final long RANDOM_SEED = 4567L;
    FSDataInputStream fsIn = null;
    byte[] original = new byte[BlockReaderLocalTest.TEST_LENGTH];
    FileSystem fs = null;
    try {
        cluster = new MiniDFSCluster.Builder(conf).hosts(new String[] { NetUtils.getLocalHostname() }).build();
        cluster.waitActive();
        fs = cluster.getFileSystem();
        DFSTestUtil.createFile(fs, TEST_PATH, BlockReaderLocalTest.TEST_LENGTH, (short) 1, RANDOM_SEED);
        try {
            DFSTestUtil.waitReplication(fs, TEST_PATH, (short) 1);
        } catch (InterruptedException e) {
            Assert.fail("unexpected InterruptedException during " + "waitReplication: " + e);
        } catch (TimeoutException e) {
            Assert.fail("unexpected TimeoutException during " + "waitReplication: " + e);
        }
        fsIn = fs.open(TEST_PATH);
        IOUtils.readFully(fsIn, original, 0, BlockReaderLocalTest.TEST_LENGTH);
        HdfsDataInputStream dfsIn = (HdfsDataInputStream) fsIn;
        Assert.assertEquals(BlockReaderLocalTest.TEST_LENGTH, dfsIn.getReadStatistics().getTotalBytesRead());
        Assert.assertEquals(BlockReaderLocalTest.TEST_LENGTH, dfsIn.getReadStatistics().getTotalLocalBytesRead());
        if (isShortCircuit) {
            Assert.assertEquals(BlockReaderLocalTest.TEST_LENGTH, dfsIn.getReadStatistics().getTotalShortCircuitBytesRead());
        } else {
            Assert.assertEquals(0, dfsIn.getReadStatistics().getTotalShortCircuitBytesRead());
        }
        fsIn.close();
        fsIn = null;
    } finally {
        DFSInputStream.tcpReadsDisabledForTesting = false;
        if (fsIn != null)
            fsIn.close();
        if (fs != null)
            fs.close();
        if (cluster != null)
            cluster.shutdown();
        if (sockDir != null)
            sockDir.close();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) TemporarySocketDirectory(org.apache.hadoop.net.unix.TemporarySocketDirectory) FileSystem(org.apache.hadoop.fs.FileSystem) FSDataInputStream(org.apache.hadoop.fs.FSDataInputStream) RandomAccessFile(java.io.RandomAccessFile) File(java.io.File) HdfsDataInputStream(org.apache.hadoop.hdfs.client.HdfsDataInputStream) TimeoutException(java.util.concurrent.TimeoutException)

Aggregations

MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)507 Test (org.junit.Test)429 Configuration (org.apache.hadoop.conf.Configuration)403 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)312 Path (org.apache.hadoop.fs.Path)290 FileSystem (org.apache.hadoop.fs.FileSystem)211 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)183 IOException (java.io.IOException)107 File (java.io.File)83 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)64 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)53 DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)35 RandomAccessFile (java.io.RandomAccessFile)33 MetricsRecordBuilder (org.apache.hadoop.metrics2.MetricsRecordBuilder)33 URI (java.net.URI)31 ArrayList (java.util.ArrayList)29 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)28 FSNamesystem (org.apache.hadoop.hdfs.server.namenode.FSNamesystem)26 FsPermission (org.apache.hadoop.fs.permission.FsPermission)25 HttpServerFunctionalTest (org.apache.hadoop.http.HttpServerFunctionalTest)24