Search in sources :

Example 1 with TemporarySocketDirectory

use of org.apache.hadoop.net.unix.TemporarySocketDirectory in project hadoop by apache.

the class TestBlockReaderFactory method testShortCircuitCacheTemporaryFailure.

/**
   * Test the case where we have a failure to complete a short circuit read
   * that occurs, and then later on, we have a success.
   * Any thread waiting on a cache load should receive the failure (if it
   * occurs);  however, the failure result should not be cached.  We want
   * to be able to retry later and succeed.
   */
@Test(timeout = 60000)
public void testShortCircuitCacheTemporaryFailure() throws Exception {
    BlockReaderTestUtil.enableBlockReaderFactoryTracing();
    final AtomicBoolean replicaCreationShouldFail = new AtomicBoolean(true);
    final AtomicBoolean testFailed = new AtomicBoolean(false);
    DFSInputStream.tcpReadsDisabledForTesting = true;
    BlockReaderFactory.createShortCircuitReplicaInfoCallback = new ShortCircuitCache.ShortCircuitReplicaCreator() {

        @Override
        public ShortCircuitReplicaInfo createShortCircuitReplicaInfo() {
            if (replicaCreationShouldFail.get()) {
                // Insert a short delay to increase the chance that one client
                // thread waits for the other client thread's failure via
                // a condition variable.
                Uninterruptibles.sleepUninterruptibly(2, TimeUnit.SECONDS);
                return new ShortCircuitReplicaInfo();
            }
            return null;
        }
    };
    TemporarySocketDirectory sockDir = new TemporarySocketDirectory();
    Configuration conf = createShortCircuitConf("testShortCircuitCacheTemporaryFailure", sockDir);
    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
    cluster.waitActive();
    final DistributedFileSystem dfs = cluster.getFileSystem();
    final String TEST_FILE = "/test_file";
    final int TEST_FILE_LEN = 4000;
    final int NUM_THREADS = 2;
    final int SEED = 0xFADED;
    final CountDownLatch gotFailureLatch = new CountDownLatch(NUM_THREADS);
    final CountDownLatch shouldRetryLatch = new CountDownLatch(1);
    DFSTestUtil.createFile(dfs, new Path(TEST_FILE), TEST_FILE_LEN, (short) 1, SEED);
    Runnable readerRunnable = new Runnable() {

        @Override
        public void run() {
            try {
                // First time should fail.
                List<LocatedBlock> locatedBlocks = cluster.getNameNode().getRpcServer().getBlockLocations(TEST_FILE, 0, TEST_FILE_LEN).getLocatedBlocks();
                // first block
                LocatedBlock lblock = locatedBlocks.get(0);
                BlockReader blockReader = null;
                try {
                    blockReader = BlockReaderTestUtil.getBlockReader(cluster.getFileSystem(), lblock, 0, TEST_FILE_LEN);
                    Assert.fail("expected getBlockReader to fail the first time.");
                } catch (Throwable t) {
                    Assert.assertTrue("expected to see 'TCP reads were disabled " + "for testing' in exception " + t, t.getMessage().contains("TCP reads were disabled for testing"));
                } finally {
                    // keep findbugs happy
                    if (blockReader != null)
                        blockReader.close();
                }
                gotFailureLatch.countDown();
                shouldRetryLatch.await();
                // Second time should succeed.
                try {
                    blockReader = BlockReaderTestUtil.getBlockReader(cluster.getFileSystem(), lblock, 0, TEST_FILE_LEN);
                } catch (Throwable t) {
                    LOG.error("error trying to retrieve a block reader " + "the second time.", t);
                    throw t;
                } finally {
                    if (blockReader != null)
                        blockReader.close();
                }
            } catch (Throwable t) {
                LOG.error("getBlockReader failure", t);
                testFailed.set(true);
            }
        }
    };
    Thread[] threads = new Thread[NUM_THREADS];
    for (int i = 0; i < NUM_THREADS; i++) {
        threads[i] = new Thread(readerRunnable);
        threads[i].start();
    }
    gotFailureLatch.await();
    replicaCreationShouldFail.set(false);
    shouldRetryLatch.countDown();
    for (int i = 0; i < NUM_THREADS; i++) {
        Uninterruptibles.joinUninterruptibly(threads[i]);
    }
    cluster.shutdown();
    sockDir.close();
    Assert.assertFalse(testFailed.get());
}
Also used : Path(org.apache.hadoop.fs.Path) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) DatanodeInfoBuilder(org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder) BlockReader(org.apache.hadoop.hdfs.BlockReader) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) ShortCircuitCache(org.apache.hadoop.hdfs.shortcircuit.ShortCircuitCache) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) CountDownLatch(java.util.concurrent.CountDownLatch) TemporarySocketDirectory(org.apache.hadoop.net.unix.TemporarySocketDirectory) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) ShortCircuitReplicaInfo(org.apache.hadoop.hdfs.shortcircuit.ShortCircuitReplicaInfo) Test(org.junit.Test)

Example 2 with TemporarySocketDirectory

use of org.apache.hadoop.net.unix.TemporarySocketDirectory in project hadoop by apache.

the class TestBlockReaderFactory method testShortCircuitReadFromClientWithoutShm.

/**
   * Test that a client which does not support short-circuit reads using
   * shared memory can talk with a server which supports it.
   */
@Test
public void testShortCircuitReadFromClientWithoutShm() throws Exception {
    TemporarySocketDirectory sockDir = new TemporarySocketDirectory();
    Configuration clientConf = createShortCircuitConf("testShortCircuitReadWithoutShm", sockDir);
    Configuration serverConf = new Configuration(clientConf);
    DFSInputStream.tcpReadsDisabledForTesting = true;
    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(serverConf).numDataNodes(1).build();
    cluster.waitActive();
    clientConf.setInt(DFS_SHORT_CIRCUIT_SHARED_MEMORY_WATCHER_INTERRUPT_CHECK_MS, 0);
    clientConf.set(DFS_CLIENT_CONTEXT, "testShortCircuitReadFromClientWithoutShm_clientContext");
    final DistributedFileSystem fs = (DistributedFileSystem) FileSystem.get(cluster.getURI(0), clientConf);
    final String TEST_FILE = "/test_file";
    final int TEST_FILE_LEN = 4000;
    final int SEED = 0xFADEC;
    DFSTestUtil.createFile(fs, new Path(TEST_FILE), TEST_FILE_LEN, (short) 1, SEED);
    byte[] contents = DFSTestUtil.readFileBuffer(fs, new Path(TEST_FILE));
    byte[] expected = DFSTestUtil.calculateFileContentsFromSeed(SEED, TEST_FILE_LEN);
    Assert.assertTrue(Arrays.equals(contents, expected));
    final ShortCircuitCache cache = fs.getClient().getClientContext().getShortCircuitCache();
    Assert.assertEquals(null, cache.getDfsClientShmManager());
    cluster.shutdown();
    sockDir.close();
}
Also used : Path(org.apache.hadoop.fs.Path) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) DatanodeInfoBuilder(org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) ShortCircuitCache(org.apache.hadoop.hdfs.shortcircuit.ShortCircuitCache) TemporarySocketDirectory(org.apache.hadoop.net.unix.TemporarySocketDirectory) Test(org.junit.Test)

Example 3 with TemporarySocketDirectory

use of org.apache.hadoop.net.unix.TemporarySocketDirectory in project hadoop by apache.

the class TestBlockReaderFactory method testMultipleWaitersOnShortCircuitCache.

/**
   * Test the case where we have multiple threads waiting on the
   * ShortCircuitCache delivering a certain ShortCircuitReplica.
   *
   * In this case, there should only be one call to
   * createShortCircuitReplicaInfo.  This one replica should be shared
   * by all threads.
   */
@Test(timeout = 60000)
public void testMultipleWaitersOnShortCircuitCache() throws Exception {
    final CountDownLatch latch = new CountDownLatch(1);
    final AtomicBoolean creationIsBlocked = new AtomicBoolean(true);
    final AtomicBoolean testFailed = new AtomicBoolean(false);
    DFSInputStream.tcpReadsDisabledForTesting = true;
    BlockReaderFactory.createShortCircuitReplicaInfoCallback = new ShortCircuitCache.ShortCircuitReplicaCreator() {

        @Override
        public ShortCircuitReplicaInfo createShortCircuitReplicaInfo() {
            Uninterruptibles.awaitUninterruptibly(latch);
            if (!creationIsBlocked.compareAndSet(true, false)) {
                Assert.fail("there were multiple calls to " + "createShortCircuitReplicaInfo.  Only one was expected.");
            }
            return null;
        }
    };
    TemporarySocketDirectory sockDir = new TemporarySocketDirectory();
    Configuration conf = createShortCircuitConf("testMultipleWaitersOnShortCircuitCache", sockDir);
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
    cluster.waitActive();
    final DistributedFileSystem dfs = cluster.getFileSystem();
    final String TEST_FILE = "/test_file";
    final int TEST_FILE_LEN = 4000;
    final int SEED = 0xFADED;
    final int NUM_THREADS = 10;
    DFSTestUtil.createFile(dfs, new Path(TEST_FILE), TEST_FILE_LEN, (short) 1, SEED);
    Runnable readerRunnable = new Runnable() {

        @Override
        public void run() {
            try {
                byte[] contents = DFSTestUtil.readFileBuffer(dfs, new Path(TEST_FILE));
                Assert.assertFalse(creationIsBlocked.get());
                byte[] expected = DFSTestUtil.calculateFileContentsFromSeed(SEED, TEST_FILE_LEN);
                Assert.assertTrue(Arrays.equals(contents, expected));
            } catch (Throwable e) {
                LOG.error("readerRunnable error", e);
                testFailed.set(true);
            }
        }
    };
    Thread[] threads = new Thread[NUM_THREADS];
    for (int i = 0; i < NUM_THREADS; i++) {
        threads[i] = new Thread(readerRunnable);
        threads[i].start();
    }
    Thread.sleep(500);
    latch.countDown();
    for (int i = 0; i < NUM_THREADS; i++) {
        Uninterruptibles.joinUninterruptibly(threads[i]);
    }
    cluster.shutdown();
    sockDir.close();
    Assert.assertFalse(testFailed.get());
}
Also used : Path(org.apache.hadoop.fs.Path) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) DatanodeInfoBuilder(org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder) CountDownLatch(java.util.concurrent.CountDownLatch) ShortCircuitCache(org.apache.hadoop.hdfs.shortcircuit.ShortCircuitCache) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) TemporarySocketDirectory(org.apache.hadoop.net.unix.TemporarySocketDirectory) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) ShortCircuitReplicaInfo(org.apache.hadoop.hdfs.shortcircuit.ShortCircuitReplicaInfo) Test(org.junit.Test)

Example 4 with TemporarySocketDirectory

use of org.apache.hadoop.net.unix.TemporarySocketDirectory in project hadoop by apache.

the class TestBlockReaderFactory method testShortCircuitReadFromServerWithoutShm.

/**
   * Test that a client which supports short-circuit reads using
   * shared memory can fall back to not using shared memory when
   * the server doesn't support it.
   */
@Test
public void testShortCircuitReadFromServerWithoutShm() throws Exception {
    TemporarySocketDirectory sockDir = new TemporarySocketDirectory();
    Configuration clientConf = createShortCircuitConf("testShortCircuitReadFromServerWithoutShm", sockDir);
    Configuration serverConf = new Configuration(clientConf);
    serverConf.setInt(DFS_SHORT_CIRCUIT_SHARED_MEMORY_WATCHER_INTERRUPT_CHECK_MS, 0);
    DFSInputStream.tcpReadsDisabledForTesting = true;
    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(serverConf).numDataNodes(1).build();
    cluster.waitActive();
    clientConf.set(DFS_CLIENT_CONTEXT, "testShortCircuitReadFromServerWithoutShm_clientContext");
    final DistributedFileSystem fs = (DistributedFileSystem) FileSystem.get(cluster.getURI(0), clientConf);
    final String TEST_FILE = "/test_file";
    final int TEST_FILE_LEN = 4000;
    final int SEED = 0xFADEC;
    DFSTestUtil.createFile(fs, new Path(TEST_FILE), TEST_FILE_LEN, (short) 1, SEED);
    byte[] contents = DFSTestUtil.readFileBuffer(fs, new Path(TEST_FILE));
    byte[] expected = DFSTestUtil.calculateFileContentsFromSeed(SEED, TEST_FILE_LEN);
    Assert.assertTrue(Arrays.equals(contents, expected));
    final ShortCircuitCache cache = fs.getClient().getClientContext().getShortCircuitCache();
    final DatanodeInfo datanode = new DatanodeInfoBuilder().setNodeID(cluster.getDataNodes().get(0).getDatanodeId()).build();
    cache.getDfsClientShmManager().visit(new Visitor() {

        @Override
        public void visit(HashMap<DatanodeInfo, PerDatanodeVisitorInfo> info) throws IOException {
            Assert.assertEquals(1, info.size());
            PerDatanodeVisitorInfo vinfo = info.get(datanode);
            Assert.assertTrue(vinfo.disabled);
            Assert.assertEquals(0, vinfo.full.size());
            Assert.assertEquals(0, vinfo.notFull.size());
        }
    });
    cluster.shutdown();
    sockDir.close();
}
Also used : Path(org.apache.hadoop.fs.Path) DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) DatanodeInfoBuilder(org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder) Visitor(org.apache.hadoop.hdfs.shortcircuit.DfsClientShmManager.Visitor) DatanodeInfoBuilder(org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder) IOException(java.io.IOException) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) ShortCircuitCache(org.apache.hadoop.hdfs.shortcircuit.ShortCircuitCache) TemporarySocketDirectory(org.apache.hadoop.net.unix.TemporarySocketDirectory) PerDatanodeVisitorInfo(org.apache.hadoop.hdfs.shortcircuit.DfsClientShmManager.PerDatanodeVisitorInfo) Test(org.junit.Test)

Example 5 with TemporarySocketDirectory

use of org.apache.hadoop.net.unix.TemporarySocketDirectory in project hadoop by apache.

the class TestBlockReaderFactory method testFallbackFromShortCircuitToUnixDomainTraffic.

/**
   * If we have a UNIX domain socket configured,
   * and we have dfs.client.domain.socket.data.traffic set to true,
   * and short-circuit access fails, we should still be able to pass
   * data traffic over the UNIX domain socket.  Test this.
   */
@Test(timeout = 60000)
public void testFallbackFromShortCircuitToUnixDomainTraffic() throws Exception {
    DFSInputStream.tcpReadsDisabledForTesting = true;
    TemporarySocketDirectory sockDir = new TemporarySocketDirectory();
    // The server is NOT configured with short-circuit local reads;
    // the client is.  Both support UNIX domain reads.
    Configuration clientConf = createShortCircuitConf("testFallbackFromShortCircuitToUnixDomainTraffic", sockDir);
    clientConf.set(DFS_CLIENT_CONTEXT, "testFallbackFromShortCircuitToUnixDomainTraffic_clientContext");
    clientConf.setBoolean(DFS_CLIENT_DOMAIN_SOCKET_DATA_TRAFFIC, true);
    Configuration serverConf = new Configuration(clientConf);
    serverConf.setBoolean(HdfsClientConfigKeys.Read.ShortCircuit.KEY, false);
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(serverConf).numDataNodes(1).build();
    cluster.waitActive();
    FileSystem dfs = FileSystem.get(cluster.getURI(0), clientConf);
    String TEST_FILE = "/test_file";
    final int TEST_FILE_LEN = 8193;
    final int SEED = 0xFADED;
    DFSTestUtil.createFile(dfs, new Path(TEST_FILE), TEST_FILE_LEN, (short) 1, SEED);
    byte[] contents = DFSTestUtil.readFileBuffer(dfs, new Path(TEST_FILE));
    byte[] expected = DFSTestUtil.calculateFileContentsFromSeed(SEED, TEST_FILE_LEN);
    Assert.assertTrue(Arrays.equals(contents, expected));
    cluster.shutdown();
    sockDir.close();
}
Also used : Path(org.apache.hadoop.fs.Path) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) DatanodeInfoBuilder(org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder) FileSystem(org.apache.hadoop.fs.FileSystem) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) TemporarySocketDirectory(org.apache.hadoop.net.unix.TemporarySocketDirectory) Test(org.junit.Test)

Aggregations

TemporarySocketDirectory (org.apache.hadoop.net.unix.TemporarySocketDirectory)27 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)17 Configuration (org.apache.hadoop.conf.Configuration)16 Test (org.junit.Test)16 Path (org.apache.hadoop.fs.Path)14 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)14 DatanodeInfoBuilder (org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder)13 File (java.io.File)8 BeforeClass (org.junit.BeforeClass)8 IOException (java.io.IOException)6 ShortCircuitCache (org.apache.hadoop.hdfs.shortcircuit.ShortCircuitCache)6 AtomicBoolean (java.util.concurrent.atomic.AtomicBoolean)3 FSDataInputStream (org.apache.hadoop.fs.FSDataInputStream)3 FileSystem (org.apache.hadoop.fs.FileSystem)3 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)3 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)3 PerDatanodeVisitorInfo (org.apache.hadoop.hdfs.shortcircuit.DfsClientShmManager.PerDatanodeVisitorInfo)3 Visitor (org.apache.hadoop.hdfs.shortcircuit.DfsClientShmManager.Visitor)3 CacheVisitor (org.apache.hadoop.hdfs.shortcircuit.ShortCircuitCache.CacheVisitor)3 ShortCircuitReplicaInfo (org.apache.hadoop.hdfs.shortcircuit.ShortCircuitReplicaInfo)3