use of java.util.concurrent.atomic.AtomicBoolean in project hadoop by apache.
the class TestRPC method testRPCInterrupted.
@Test(timeout = 30000)
public void testRPCInterrupted() throws Exception {
Server server;
RPC.Builder builder = newServerBuilder(conf).setNumHandlers(5).setVerbose(true).setSecretManager(null);
server = setupTestServer(builder);
int numConcurrentRPC = 200;
final CyclicBarrier barrier = new CyclicBarrier(numConcurrentRPC);
final CountDownLatch latch = new CountDownLatch(numConcurrentRPC);
final AtomicBoolean leaderRunning = new AtomicBoolean(true);
final AtomicReference<Throwable> error = new AtomicReference<>();
Thread leaderThread = null;
try {
for (int i = 0; i < numConcurrentRPC; i++) {
final int num = i;
final TestRpcService proxy = getClient(addr, conf);
Thread rpcThread = new Thread(new Runnable() {
@Override
public void run() {
try {
barrier.await();
while (num == 0 || leaderRunning.get()) {
proxy.slowPing(null, newSlowPingRequest(false));
}
proxy.slowPing(null, newSlowPingRequest(false));
} catch (Exception e) {
if (num == 0) {
leaderRunning.set(false);
} else {
error.set(e);
}
LOG.error("thread " + num, e);
} finally {
latch.countDown();
}
}
});
rpcThread.start();
if (leaderThread == null) {
leaderThread = rpcThread;
}
}
// let threads get past the barrier
Thread.sleep(1000);
// stop a single thread
while (leaderRunning.get()) {
leaderThread.interrupt();
}
latch.await();
// should not cause any other thread to get an error
assertTrue("rpc got exception " + error.get(), error.get() == null);
} finally {
server.stop();
}
}
use of java.util.concurrent.atomic.AtomicBoolean in project hadoop by apache.
the class TestRPC method testExternalCall.
@Test(timeout = 30000)
public void testExternalCall() throws Exception {
final UserGroupInformation ugi = UserGroupInformation.createUserForTesting("user123", new String[0]);
final IOException expectedIOE = new IOException("boom");
// use 1 handler so the callq can be plugged
final Server server = setupTestServer(conf, 1);
try {
final AtomicBoolean result = new AtomicBoolean();
ExternalCall<String> remoteUserCall = newExtCall(ugi, new PrivilegedExceptionAction<String>() {
@Override
public String run() throws Exception {
return UserGroupInformation.getCurrentUser().getUserName();
}
});
ExternalCall<String> exceptionCall = newExtCall(ugi, new PrivilegedExceptionAction<String>() {
@Override
public String run() throws Exception {
throw expectedIOE;
}
});
final CountDownLatch latch = new CountDownLatch(1);
final CyclicBarrier barrier = new CyclicBarrier(2);
ExternalCall<Void> barrierCall = newExtCall(ugi, new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
// notify we are in a handler and then wait to keep the callq
// plugged up
latch.countDown();
barrier.await();
return null;
}
});
server.queueCall(barrierCall);
server.queueCall(exceptionCall);
server.queueCall(remoteUserCall);
// wait for barrier call to enter the handler, check that the other 2
// calls are actually queued
latch.await();
assertEquals(2, server.getCallQueueLen());
// unplug the callq
barrier.await();
barrierCall.get();
// verify correct ugi is used
String answer = remoteUserCall.get();
assertEquals(ugi.getUserName(), answer);
try {
exceptionCall.get();
fail("didn't throw");
} catch (ExecutionException ee) {
assertTrue((ee.getCause()) instanceof IOException);
assertEquals(expectedIOE.getMessage(), ee.getCause().getMessage());
}
} finally {
server.stop();
}
}
use of java.util.concurrent.atomic.AtomicBoolean in project hadoop by apache.
the class TestFileConcurrentReader method runTestUnfinishedBlockCRCError.
private void runTestUnfinishedBlockCRCError(final boolean transferToAllowed, final SyncType syncType, final int writeSize, Configuration conf) throws IOException {
conf.setBoolean(DFSConfigKeys.DFS_DATANODE_TRANSFERTO_ALLOWED_KEY, transferToAllowed);
init(conf);
final Path file = new Path("/block-being-written-to");
final int numWrites = 2000;
final AtomicBoolean writerDone = new AtomicBoolean(false);
final AtomicBoolean writerStarted = new AtomicBoolean(false);
final AtomicBoolean error = new AtomicBoolean(false);
final Thread writer = new Thread(new Runnable() {
@Override
public void run() {
try {
FSDataOutputStream outputStream = fileSystem.create(file);
if (syncType == SyncType.APPEND) {
outputStream.close();
outputStream = fileSystem.append(file);
}
try {
for (int i = 0; !error.get() && i < numWrites; i++) {
final byte[] writeBuf = DFSTestUtil.generateSequentialBytes(i * writeSize, writeSize);
outputStream.write(writeBuf);
if (syncType == SyncType.SYNC) {
outputStream.hflush();
}
writerStarted.set(true);
}
} catch (IOException e) {
error.set(true);
LOG.error("error writing to file", e);
} finally {
outputStream.close();
}
writerDone.set(true);
} catch (Exception e) {
LOG.error("error in writer", e);
throw new RuntimeException(e);
}
}
});
Thread tailer = new Thread(new Runnable() {
@Override
public void run() {
try {
long startPos = 0;
while (!writerDone.get() && !error.get()) {
if (writerStarted.get()) {
try {
startPos = tailFile(file, startPos);
} catch (IOException e) {
LOG.error(String.format("error tailing file %s", file), e);
throw new RuntimeException(e);
}
}
}
} catch (RuntimeException e) {
if (e.getCause() instanceof ChecksumException) {
error.set(true);
}
writer.interrupt();
LOG.error("error in tailer", e);
throw e;
}
}
});
writer.start();
tailer.start();
try {
writer.join();
tailer.join();
assertFalse("error occurred, see log above", error.get());
} catch (InterruptedException e) {
LOG.info("interrupted waiting for writer or tailer to complete");
Thread.currentThread().interrupt();
}
}
use of java.util.concurrent.atomic.AtomicBoolean in project hadoop by apache.
the class TestSaslDataTransfer method TestPeerFromSocketAndKeyReadTimeout.
/**
* Verifies that peerFromSocketAndKey honors socket read timeouts.
*/
@Test(timeout = 60000)
public void TestPeerFromSocketAndKeyReadTimeout() throws Exception {
HdfsConfiguration conf = createSecureConfig("authentication,integrity,privacy");
AtomicBoolean fallbackToSimpleAuth = new AtomicBoolean(false);
SaslDataTransferClient saslClient = new SaslDataTransferClient(conf, DataTransferSaslUtil.getSaslPropertiesResolver(conf), TrustedChannelResolver.getInstance(conf), fallbackToSimpleAuth);
DatanodeID fakeDatanodeId = new DatanodeID("127.0.0.1", "localhost", "beefbeef-beef-beef-beef-beefbeefbeef", 1, 2, 3, 4);
DataEncryptionKeyFactory dataEncKeyFactory = new DataEncryptionKeyFactory() {
@Override
public DataEncryptionKey newDataEncryptionKey() {
return new DataEncryptionKey(123, "456", new byte[8], new byte[8], 1234567, "fakeAlgorithm");
}
};
ServerSocket serverSocket = null;
Socket socket = null;
try {
serverSocket = new ServerSocket(0, -1);
socket = new Socket(serverSocket.getInetAddress(), serverSocket.getLocalPort());
Peer peer = DFSUtilClient.peerFromSocketAndKey(saslClient, socket, dataEncKeyFactory, new Token(), fakeDatanodeId, 1);
peer.close();
Assert.fail("Expected DFSClient#peerFromSocketAndKey to time out.");
} catch (SocketTimeoutException e) {
GenericTestUtils.assertExceptionContains("Read timed out", e);
} finally {
IOUtils.cleanup(null, socket, serverSocket);
}
}
use of java.util.concurrent.atomic.AtomicBoolean in project hadoop by apache.
the class TestLazyPersistFiles method testConcurrentWrites.
/**
* Concurrent write with eviction
* RAM_DISK can hold 9 replicas
* 4 threads each write 5 replicas
* @throws IOException
* @throws InterruptedException
*/
@Test
public void testConcurrentWrites() throws IOException, InterruptedException {
getClusterBuilder().setRamDiskReplicaCapacity(9).build();
final String METHOD_NAME = GenericTestUtils.getMethodName();
final int SEED = 0xFADED;
final int NUM_WRITERS = 4;
final int NUM_WRITER_PATHS = 5;
Path[][] paths = new Path[NUM_WRITERS][NUM_WRITER_PATHS];
for (int i = 0; i < NUM_WRITERS; i++) {
paths[i] = new Path[NUM_WRITER_PATHS];
for (int j = 0; j < NUM_WRITER_PATHS; j++) {
paths[i][j] = new Path("/" + METHOD_NAME + ".Writer" + i + ".File." + j + ".dat");
}
}
final CountDownLatch latch = new CountDownLatch(NUM_WRITERS);
final AtomicBoolean testFailed = new AtomicBoolean(false);
ExecutorService executor = Executors.newFixedThreadPool(THREADPOOL_SIZE);
for (int i = 0; i < NUM_WRITERS; i++) {
Runnable writer = new WriterRunnable(i, paths[i], SEED, latch, testFailed);
executor.execute(writer);
}
Thread.sleep(3 * LAZY_WRITER_INTERVAL_SEC * 1000);
triggerBlockReport();
// Stop executor from adding new tasks to finish existing threads in queue
latch.await();
assertThat(testFailed.get(), is(false));
}
Aggregations