use of java.util.concurrent.atomic.AtomicBoolean in project flink by apache.
the class KvStateClientTest method testServerClosesChannel.
/**
* Tests that a server channel close, closes the connection and removes it
* from the established connections.
*/
@Test
public void testServerClosesChannel() throws Exception {
Deadline deadline = TEST_TIMEOUT.fromNow();
AtomicKvStateRequestStats stats = new AtomicKvStateRequestStats();
KvStateClient client = null;
Channel serverChannel = null;
try {
client = new KvStateClient(1, stats);
final AtomicBoolean received = new AtomicBoolean();
final AtomicReference<Channel> channel = new AtomicReference<>();
serverChannel = createServerChannel(new ChannelInboundHandlerAdapter() {
@Override
public void channelActive(ChannelHandlerContext ctx) throws Exception {
channel.set(ctx.channel());
}
@Override
public void channelRead(ChannelHandlerContext ctx, Object msg) throws Exception {
received.set(true);
}
});
KvStateServerAddress serverAddress = getKvStateServerAddress(serverChannel);
// Requests
Future<byte[]> future = client.getKvState(serverAddress, new KvStateID(), new byte[0]);
while (!received.get() && deadline.hasTimeLeft()) {
Thread.sleep(50);
}
assertTrue("Receive timed out", received.get());
assertEquals(1, stats.getNumConnections());
channel.get().close().await(deadline.timeLeft().toMillis(), TimeUnit.MILLISECONDS);
try {
Await.result(future, deadline.timeLeft());
fail("Did not throw expected server failure");
} catch (ClosedChannelException ignored) {
// Expected
}
assertEquals(0, stats.getNumConnections());
// Counts can take some time to propagate
while (deadline.hasTimeLeft() && (stats.getNumSuccessful() != 0 || stats.getNumFailed() != 1)) {
Thread.sleep(100);
}
assertEquals(1, stats.getNumRequests());
assertEquals(0, stats.getNumSuccessful());
assertEquals(1, stats.getNumFailed());
} finally {
if (client != null) {
client.shutDown();
}
if (serverChannel != null) {
serverChannel.close();
}
assertEquals("Channel leak", 0, stats.getNumConnections());
}
}
use of java.util.concurrent.atomic.AtomicBoolean in project hadoop by apache.
the class TestHostnameFilter method testMissingHostname.
@Test
public void testMissingHostname() throws Exception {
ServletRequest request = Mockito.mock(ServletRequest.class);
Mockito.when(request.getRemoteAddr()).thenReturn(null);
ServletResponse response = Mockito.mock(ServletResponse.class);
final AtomicBoolean invoked = new AtomicBoolean();
FilterChain chain = new FilterChain() {
@Override
public void doFilter(ServletRequest servletRequest, ServletResponse servletResponse) throws IOException, ServletException {
assertTrue(HostnameFilter.get().contains("???"));
invoked.set(true);
}
};
Filter filter = new HostnameFilter();
filter.init(null);
assertNull(HostnameFilter.get());
filter.doFilter(request, response, chain);
assertTrue(invoked.get());
assertNull(HostnameFilter.get());
filter.destroy();
}
use of java.util.concurrent.atomic.AtomicBoolean in project hadoop by apache.
the class TestMDCFilter method mdc.
@Test
public void mdc() throws Exception {
HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
Mockito.when(request.getUserPrincipal()).thenReturn(null);
Mockito.when(request.getMethod()).thenReturn("METHOD");
Mockito.when(request.getPathInfo()).thenReturn("/pathinfo");
ServletResponse response = Mockito.mock(ServletResponse.class);
final AtomicBoolean invoked = new AtomicBoolean();
FilterChain chain = new FilterChain() {
@Override
public void doFilter(ServletRequest servletRequest, ServletResponse servletResponse) throws IOException, ServletException {
assertEquals(MDC.get("hostname"), null);
assertEquals(MDC.get("user"), null);
assertEquals(MDC.get("method"), "METHOD");
assertEquals(MDC.get("path"), "/pathinfo");
invoked.set(true);
}
};
MDC.clear();
Filter filter = new MDCFilter();
filter.init(null);
filter.doFilter(request, response, chain);
assertTrue(invoked.get());
assertNull(MDC.get("hostname"));
assertNull(MDC.get("user"));
assertNull(MDC.get("method"));
assertNull(MDC.get("path"));
Mockito.when(request.getUserPrincipal()).thenReturn(new Principal() {
@Override
public String getName() {
return "name";
}
});
invoked.set(false);
chain = new FilterChain() {
@Override
public void doFilter(ServletRequest servletRequest, ServletResponse servletResponse) throws IOException, ServletException {
assertEquals(MDC.get("hostname"), null);
assertEquals(MDC.get("user"), "name");
assertEquals(MDC.get("method"), "METHOD");
assertEquals(MDC.get("path"), "/pathinfo");
invoked.set(true);
}
};
filter.doFilter(request, response, chain);
assertTrue(invoked.get());
HostnameFilter.HOSTNAME_TL.set("HOST");
invoked.set(false);
chain = new FilterChain() {
@Override
public void doFilter(ServletRequest servletRequest, ServletResponse servletResponse) throws IOException, ServletException {
assertEquals(MDC.get("hostname"), "HOST");
assertEquals(MDC.get("user"), "name");
assertEquals(MDC.get("method"), "METHOD");
assertEquals(MDC.get("path"), "/pathinfo");
invoked.set(true);
}
};
filter.doFilter(request, response, chain);
assertTrue(invoked.get());
HostnameFilter.HOSTNAME_TL.remove();
filter.destroy();
}
use of java.util.concurrent.atomic.AtomicBoolean in project hadoop by apache.
the class TestBlockReaderFactory method testShortCircuitCacheTemporaryFailure.
/**
* Test the case where we have a failure to complete a short circuit read
* that occurs, and then later on, we have a success.
* Any thread waiting on a cache load should receive the failure (if it
* occurs); however, the failure result should not be cached. We want
* to be able to retry later and succeed.
*/
@Test(timeout = 60000)
public void testShortCircuitCacheTemporaryFailure() throws Exception {
BlockReaderTestUtil.enableBlockReaderFactoryTracing();
final AtomicBoolean replicaCreationShouldFail = new AtomicBoolean(true);
final AtomicBoolean testFailed = new AtomicBoolean(false);
DFSInputStream.tcpReadsDisabledForTesting = true;
BlockReaderFactory.createShortCircuitReplicaInfoCallback = new ShortCircuitCache.ShortCircuitReplicaCreator() {
@Override
public ShortCircuitReplicaInfo createShortCircuitReplicaInfo() {
if (replicaCreationShouldFail.get()) {
// Insert a short delay to increase the chance that one client
// thread waits for the other client thread's failure via
// a condition variable.
Uninterruptibles.sleepUninterruptibly(2, TimeUnit.SECONDS);
return new ShortCircuitReplicaInfo();
}
return null;
}
};
TemporarySocketDirectory sockDir = new TemporarySocketDirectory();
Configuration conf = createShortCircuitConf("testShortCircuitCacheTemporaryFailure", sockDir);
final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
final DistributedFileSystem dfs = cluster.getFileSystem();
final String TEST_FILE = "/test_file";
final int TEST_FILE_LEN = 4000;
final int NUM_THREADS = 2;
final int SEED = 0xFADED;
final CountDownLatch gotFailureLatch = new CountDownLatch(NUM_THREADS);
final CountDownLatch shouldRetryLatch = new CountDownLatch(1);
DFSTestUtil.createFile(dfs, new Path(TEST_FILE), TEST_FILE_LEN, (short) 1, SEED);
Runnable readerRunnable = new Runnable() {
@Override
public void run() {
try {
// First time should fail.
List<LocatedBlock> locatedBlocks = cluster.getNameNode().getRpcServer().getBlockLocations(TEST_FILE, 0, TEST_FILE_LEN).getLocatedBlocks();
// first block
LocatedBlock lblock = locatedBlocks.get(0);
BlockReader blockReader = null;
try {
blockReader = BlockReaderTestUtil.getBlockReader(cluster.getFileSystem(), lblock, 0, TEST_FILE_LEN);
Assert.fail("expected getBlockReader to fail the first time.");
} catch (Throwable t) {
Assert.assertTrue("expected to see 'TCP reads were disabled " + "for testing' in exception " + t, t.getMessage().contains("TCP reads were disabled for testing"));
} finally {
// keep findbugs happy
if (blockReader != null)
blockReader.close();
}
gotFailureLatch.countDown();
shouldRetryLatch.await();
// Second time should succeed.
try {
blockReader = BlockReaderTestUtil.getBlockReader(cluster.getFileSystem(), lblock, 0, TEST_FILE_LEN);
} catch (Throwable t) {
LOG.error("error trying to retrieve a block reader " + "the second time.", t);
throw t;
} finally {
if (blockReader != null)
blockReader.close();
}
} catch (Throwable t) {
LOG.error("getBlockReader failure", t);
testFailed.set(true);
}
}
};
Thread[] threads = new Thread[NUM_THREADS];
for (int i = 0; i < NUM_THREADS; i++) {
threads[i] = new Thread(readerRunnable);
threads[i].start();
}
gotFailureLatch.await();
replicaCreationShouldFail.set(false);
shouldRetryLatch.countDown();
for (int i = 0; i < NUM_THREADS; i++) {
Uninterruptibles.joinUninterruptibly(threads[i]);
}
cluster.shutdown();
sockDir.close();
Assert.assertFalse(testFailed.get());
}
use of java.util.concurrent.atomic.AtomicBoolean in project hadoop by apache.
the class TestBlockReaderFactory method testMultipleWaitersOnShortCircuitCache.
/**
* Test the case where we have multiple threads waiting on the
* ShortCircuitCache delivering a certain ShortCircuitReplica.
*
* In this case, there should only be one call to
* createShortCircuitReplicaInfo. This one replica should be shared
* by all threads.
*/
@Test(timeout = 60000)
public void testMultipleWaitersOnShortCircuitCache() throws Exception {
final CountDownLatch latch = new CountDownLatch(1);
final AtomicBoolean creationIsBlocked = new AtomicBoolean(true);
final AtomicBoolean testFailed = new AtomicBoolean(false);
DFSInputStream.tcpReadsDisabledForTesting = true;
BlockReaderFactory.createShortCircuitReplicaInfoCallback = new ShortCircuitCache.ShortCircuitReplicaCreator() {
@Override
public ShortCircuitReplicaInfo createShortCircuitReplicaInfo() {
Uninterruptibles.awaitUninterruptibly(latch);
if (!creationIsBlocked.compareAndSet(true, false)) {
Assert.fail("there were multiple calls to " + "createShortCircuitReplicaInfo. Only one was expected.");
}
return null;
}
};
TemporarySocketDirectory sockDir = new TemporarySocketDirectory();
Configuration conf = createShortCircuitConf("testMultipleWaitersOnShortCircuitCache", sockDir);
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
final DistributedFileSystem dfs = cluster.getFileSystem();
final String TEST_FILE = "/test_file";
final int TEST_FILE_LEN = 4000;
final int SEED = 0xFADED;
final int NUM_THREADS = 10;
DFSTestUtil.createFile(dfs, new Path(TEST_FILE), TEST_FILE_LEN, (short) 1, SEED);
Runnable readerRunnable = new Runnable() {
@Override
public void run() {
try {
byte[] contents = DFSTestUtil.readFileBuffer(dfs, new Path(TEST_FILE));
Assert.assertFalse(creationIsBlocked.get());
byte[] expected = DFSTestUtil.calculateFileContentsFromSeed(SEED, TEST_FILE_LEN);
Assert.assertTrue(Arrays.equals(contents, expected));
} catch (Throwable e) {
LOG.error("readerRunnable error", e);
testFailed.set(true);
}
}
};
Thread[] threads = new Thread[NUM_THREADS];
for (int i = 0; i < NUM_THREADS; i++) {
threads[i] = new Thread(readerRunnable);
threads[i].start();
}
Thread.sleep(500);
latch.countDown();
for (int i = 0; i < NUM_THREADS; i++) {
Uninterruptibles.joinUninterruptibly(threads[i]);
}
cluster.shutdown();
sockDir.close();
Assert.assertFalse(testFailed.get());
}
Aggregations