use of org.apache.hadoop.hdfs.shortcircuit.ShortCircuitCache in project hadoop by apache.
the class TestBlockReaderFactory method testShortCircuitReadFromClientWithoutShm.
/**
* Test that a client which does not support short-circuit reads using
* shared memory can talk with a server which supports it.
*/
@Test
public void testShortCircuitReadFromClientWithoutShm() throws Exception {
TemporarySocketDirectory sockDir = new TemporarySocketDirectory();
Configuration clientConf = createShortCircuitConf("testShortCircuitReadWithoutShm", sockDir);
Configuration serverConf = new Configuration(clientConf);
DFSInputStream.tcpReadsDisabledForTesting = true;
final MiniDFSCluster cluster = new MiniDFSCluster.Builder(serverConf).numDataNodes(1).build();
cluster.waitActive();
clientConf.setInt(DFS_SHORT_CIRCUIT_SHARED_MEMORY_WATCHER_INTERRUPT_CHECK_MS, 0);
clientConf.set(DFS_CLIENT_CONTEXT, "testShortCircuitReadFromClientWithoutShm_clientContext");
final DistributedFileSystem fs = (DistributedFileSystem) FileSystem.get(cluster.getURI(0), clientConf);
final String TEST_FILE = "/test_file";
final int TEST_FILE_LEN = 4000;
final int SEED = 0xFADEC;
DFSTestUtil.createFile(fs, new Path(TEST_FILE), TEST_FILE_LEN, (short) 1, SEED);
byte[] contents = DFSTestUtil.readFileBuffer(fs, new Path(TEST_FILE));
byte[] expected = DFSTestUtil.calculateFileContentsFromSeed(SEED, TEST_FILE_LEN);
Assert.assertTrue(Arrays.equals(contents, expected));
final ShortCircuitCache cache = fs.getClient().getClientContext().getShortCircuitCache();
Assert.assertEquals(null, cache.getDfsClientShmManager());
cluster.shutdown();
sockDir.close();
}
use of org.apache.hadoop.hdfs.shortcircuit.ShortCircuitCache in project hadoop by apache.
the class TestBlockReaderFactory method testMultipleWaitersOnShortCircuitCache.
/**
* Test the case where we have multiple threads waiting on the
* ShortCircuitCache delivering a certain ShortCircuitReplica.
*
* In this case, there should only be one call to
* createShortCircuitReplicaInfo. This one replica should be shared
* by all threads.
*/
@Test(timeout = 60000)
public void testMultipleWaitersOnShortCircuitCache() throws Exception {
final CountDownLatch latch = new CountDownLatch(1);
final AtomicBoolean creationIsBlocked = new AtomicBoolean(true);
final AtomicBoolean testFailed = new AtomicBoolean(false);
DFSInputStream.tcpReadsDisabledForTesting = true;
BlockReaderFactory.createShortCircuitReplicaInfoCallback = new ShortCircuitCache.ShortCircuitReplicaCreator() {
@Override
public ShortCircuitReplicaInfo createShortCircuitReplicaInfo() {
Uninterruptibles.awaitUninterruptibly(latch);
if (!creationIsBlocked.compareAndSet(true, false)) {
Assert.fail("there were multiple calls to " + "createShortCircuitReplicaInfo. Only one was expected.");
}
return null;
}
};
TemporarySocketDirectory sockDir = new TemporarySocketDirectory();
Configuration conf = createShortCircuitConf("testMultipleWaitersOnShortCircuitCache", sockDir);
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
final DistributedFileSystem dfs = cluster.getFileSystem();
final String TEST_FILE = "/test_file";
final int TEST_FILE_LEN = 4000;
final int SEED = 0xFADED;
final int NUM_THREADS = 10;
DFSTestUtil.createFile(dfs, new Path(TEST_FILE), TEST_FILE_LEN, (short) 1, SEED);
Runnable readerRunnable = new Runnable() {
@Override
public void run() {
try {
byte[] contents = DFSTestUtil.readFileBuffer(dfs, new Path(TEST_FILE));
Assert.assertFalse(creationIsBlocked.get());
byte[] expected = DFSTestUtil.calculateFileContentsFromSeed(SEED, TEST_FILE_LEN);
Assert.assertTrue(Arrays.equals(contents, expected));
} catch (Throwable e) {
LOG.error("readerRunnable error", e);
testFailed.set(true);
}
}
};
Thread[] threads = new Thread[NUM_THREADS];
for (int i = 0; i < NUM_THREADS; i++) {
threads[i] = new Thread(readerRunnable);
threads[i].start();
}
Thread.sleep(500);
latch.countDown();
for (int i = 0; i < NUM_THREADS; i++) {
Uninterruptibles.joinUninterruptibly(threads[i]);
}
cluster.shutdown();
sockDir.close();
Assert.assertFalse(testFailed.get());
}
use of org.apache.hadoop.hdfs.shortcircuit.ShortCircuitCache in project hadoop by apache.
the class TestBlockReaderFactory method testShortCircuitReadFromServerWithoutShm.
/**
* Test that a client which supports short-circuit reads using
* shared memory can fall back to not using shared memory when
* the server doesn't support it.
*/
@Test
public void testShortCircuitReadFromServerWithoutShm() throws Exception {
TemporarySocketDirectory sockDir = new TemporarySocketDirectory();
Configuration clientConf = createShortCircuitConf("testShortCircuitReadFromServerWithoutShm", sockDir);
Configuration serverConf = new Configuration(clientConf);
serverConf.setInt(DFS_SHORT_CIRCUIT_SHARED_MEMORY_WATCHER_INTERRUPT_CHECK_MS, 0);
DFSInputStream.tcpReadsDisabledForTesting = true;
final MiniDFSCluster cluster = new MiniDFSCluster.Builder(serverConf).numDataNodes(1).build();
cluster.waitActive();
clientConf.set(DFS_CLIENT_CONTEXT, "testShortCircuitReadFromServerWithoutShm_clientContext");
final DistributedFileSystem fs = (DistributedFileSystem) FileSystem.get(cluster.getURI(0), clientConf);
final String TEST_FILE = "/test_file";
final int TEST_FILE_LEN = 4000;
final int SEED = 0xFADEC;
DFSTestUtil.createFile(fs, new Path(TEST_FILE), TEST_FILE_LEN, (short) 1, SEED);
byte[] contents = DFSTestUtil.readFileBuffer(fs, new Path(TEST_FILE));
byte[] expected = DFSTestUtil.calculateFileContentsFromSeed(SEED, TEST_FILE_LEN);
Assert.assertTrue(Arrays.equals(contents, expected));
final ShortCircuitCache cache = fs.getClient().getClientContext().getShortCircuitCache();
final DatanodeInfo datanode = new DatanodeInfoBuilder().setNodeID(cluster.getDataNodes().get(0).getDatanodeId()).build();
cache.getDfsClientShmManager().visit(new Visitor() {
@Override
public void visit(HashMap<DatanodeInfo, PerDatanodeVisitorInfo> info) throws IOException {
Assert.assertEquals(1, info.size());
PerDatanodeVisitorInfo vinfo = info.get(datanode);
Assert.assertTrue(vinfo.disabled);
Assert.assertEquals(0, vinfo.full.size());
Assert.assertEquals(0, vinfo.notFull.size());
}
});
cluster.shutdown();
sockDir.close();
}
use of org.apache.hadoop.hdfs.shortcircuit.ShortCircuitCache in project hadoop by apache.
the class TestBlockReaderFactory method testShortCircuitCacheShutdown.
/**
* Test shutting down the ShortCircuitCache while there are things in it.
*/
@Test
public void testShortCircuitCacheShutdown() throws Exception {
TemporarySocketDirectory sockDir = new TemporarySocketDirectory();
Configuration conf = createShortCircuitConf("testShortCircuitCacheShutdown", sockDir);
conf.set(DFS_CLIENT_CONTEXT, "testShortCircuitCacheShutdown");
Configuration serverConf = new Configuration(conf);
DFSInputStream.tcpReadsDisabledForTesting = true;
final MiniDFSCluster cluster = new MiniDFSCluster.Builder(serverConf).numDataNodes(1).build();
cluster.waitActive();
final DistributedFileSystem fs = (DistributedFileSystem) FileSystem.get(cluster.getURI(0), conf);
final String TEST_FILE = "/test_file";
final int TEST_FILE_LEN = 4000;
final int SEED = 0xFADEC;
DFSTestUtil.createFile(fs, new Path(TEST_FILE), TEST_FILE_LEN, (short) 1, SEED);
byte[] contents = DFSTestUtil.readFileBuffer(fs, new Path(TEST_FILE));
byte[] expected = DFSTestUtil.calculateFileContentsFromSeed(SEED, TEST_FILE_LEN);
Assert.assertTrue(Arrays.equals(contents, expected));
final ShortCircuitCache cache = fs.getClient().getClientContext().getShortCircuitCache();
cache.close();
Assert.assertTrue(cache.getDfsClientShmManager().getDomainSocketWatcher().isClosed());
cluster.shutdown();
sockDir.close();
}
use of org.apache.hadoop.hdfs.shortcircuit.ShortCircuitCache in project hadoop by apache.
the class BlockReaderFactory method createShortCircuitReplicaInfo.
/**
* Fetch a pair of short-circuit block descriptors from a local DataNode.
*
* @return Null if we could not communicate with the datanode,
* a new ShortCircuitReplicaInfo object otherwise.
* ShortCircuitReplicaInfo objects may contain either an
* InvalidToken exception, or a ShortCircuitReplica object ready to
* use.
*/
@Override
public ShortCircuitReplicaInfo createShortCircuitReplicaInfo() {
if (createShortCircuitReplicaInfoCallback != null) {
ShortCircuitReplicaInfo info = createShortCircuitReplicaInfoCallback.createShortCircuitReplicaInfo();
if (info != null)
return info;
}
LOG.trace("{}: trying to create ShortCircuitReplicaInfo.", this);
BlockReaderPeer curPeer;
while (true) {
curPeer = nextDomainPeer();
if (curPeer == null)
break;
if (curPeer.fromCache)
remainingCacheTries--;
DomainPeer peer = (DomainPeer) curPeer.peer;
Slot slot = null;
ShortCircuitCache cache = clientContext.getShortCircuitCache();
try {
MutableBoolean usedPeer = new MutableBoolean(false);
slot = cache.allocShmSlot(datanode, peer, usedPeer, new ExtendedBlockId(block.getBlockId(), block.getBlockPoolId()), clientName);
if (usedPeer.booleanValue()) {
LOG.trace("{}: allocShmSlot used up our previous socket {}. " + "Allocating a new one...", this, peer.getDomainSocket());
curPeer = nextDomainPeer();
if (curPeer == null)
break;
peer = (DomainPeer) curPeer.peer;
}
ShortCircuitReplicaInfo info = requestFileDescriptors(peer, slot);
clientContext.getPeerCache().put(datanode, peer);
return info;
} catch (IOException e) {
if (slot != null) {
cache.freeSlot(slot);
}
if (curPeer.fromCache) {
// Handle an I/O error we got when using a cached socket.
// These are considered less serious, because the socket may be stale.
LOG.debug("{}: closing stale domain peer {}", this, peer, e);
IOUtilsClient.cleanup(LOG, peer);
} else {
// Handle an I/O error we got when using a newly created socket.
// We temporarily disable the domain socket path for a few minutes in
// this case, to prevent wasting more time on it.
LOG.warn(this + ": I/O error requesting file descriptors. " + "Disabling domain socket " + peer.getDomainSocket(), e);
IOUtilsClient.cleanup(LOG, peer);
clientContext.getDomainSocketFactory().disableDomainSocketPath(pathInfo.getPath());
return null;
}
}
}
return null;
}
Aggregations