use of org.apache.hadoop.test.MultithreadedTestUtil.TestContext in project hadoop by apache.
the class TestMultithreadedTestUtil method testRepeatingThread.
@Test
public void testRepeatingThread() throws Exception {
final AtomicInteger counter = new AtomicInteger();
TestContext ctx = new TestContext();
ctx.addThread(new RepeatingTestThread(ctx) {
@Override
public void doAnAction() throws Exception {
counter.incrementAndGet();
}
});
ctx.startThreads();
long st = Time.now();
ctx.waitFor(3000);
ctx.stop();
long et = Time.now();
long elapsed = et - st;
// Test should have waited just about 3 seconds
assertTrue("Test took " + (et - st) + "ms", Math.abs(elapsed - 3000) < 500);
// Counter should have been incremented lots of times in 3 full seconds
assertTrue("Counter value = " + counter.get(), counter.get() > 1000);
}
use of org.apache.hadoop.test.MultithreadedTestUtil.TestContext in project hadoop by apache.
the class TestMultithreadedTestUtil method testThreadThrowsCheckedException.
@Test
public void testThreadThrowsCheckedException() throws Exception {
TestContext ctx = new TestContext();
ctx.addThread(new TestingThread(ctx) {
@Override
public void doWork() throws Exception {
throw new IOException("my ioe");
}
});
ctx.startThreads();
long st = Time.now();
try {
ctx.waitFor(30000);
fail("waitFor did not throw");
} catch (RuntimeException rte) {
// expected
assertEquals("my ioe", rte.getCause().getMessage());
}
long et = Time.now();
// Test shouldn't have waited the full 30 seconds, since
// the thread throws faster than that
assertTrue("Test took " + (et - st) + "ms", et - st < 5000);
}
use of org.apache.hadoop.test.MultithreadedTestUtil.TestContext in project hadoop by apache.
the class TestDFSZKFailoverController method setup.
@Before
public void setup() throws Exception {
conf = new Configuration();
// Specify the quorum per-nameservice, to ensure that these configs
// can be nameservice-scoped.
conf.set(ZKFailoverController.ZK_QUORUM_KEY + ".ns1", hostPort);
conf.set(DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY, AlwaysSucceedFencer.class.getName());
conf.setBoolean(DFSConfigKeys.DFS_HA_AUTO_FAILOVER_ENABLED_KEY, true);
// Turn off IPC client caching, so that the suite can handle
// the restart of the daemons between test cases.
conf.setInt(CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY, 0);
// Get random port numbers in advance. Because ZKFCs and DFSHAAdmin
// needs rpc port numbers of all ZKFCs, Setting 0 does not work here.
conf.setInt(DFSConfigKeys.DFS_HA_ZKFC_PORT_KEY + ".ns1.nn1", ServerSocketUtil.getPort(10023, 100));
conf.setInt(DFSConfigKeys.DFS_HA_ZKFC_PORT_KEY + ".ns1.nn2", ServerSocketUtil.getPort(10024, 100));
// prefer non-ephemeral port to avoid port collision on restartNameNode
MiniDFSNNTopology topology = new MiniDFSNNTopology().addNameservice(new MiniDFSNNTopology.NSConf("ns1").addNN(new MiniDFSNNTopology.NNConf("nn1").setIpcPort(ServerSocketUtil.getPort(10021, 100))).addNN(new MiniDFSNNTopology.NNConf("nn2").setIpcPort(ServerSocketUtil.getPort(10022, 100))));
cluster = new MiniDFSCluster.Builder(conf).nnTopology(topology).numDataNodes(0).build();
cluster.waitActive();
ctx = new TestContext();
ctx.addThread(thr1 = new ZKFCThread(ctx, 0));
assertEquals(0, thr1.zkfc.run(new String[] { "-formatZK" }));
thr1.start();
waitForHAState(0, HAServiceState.ACTIVE);
ctx.addThread(thr2 = new ZKFCThread(ctx, 1));
thr2.start();
// Wait for the ZKFCs to fully start up
ZKFCTestUtil.waitForHealthState(thr1.zkfc, HealthMonitor.State.SERVICE_HEALTHY, ctx);
ZKFCTestUtil.waitForHealthState(thr2.zkfc, HealthMonitor.State.SERVICE_HEALTHY, ctx);
fs = HATestUtil.configureFailoverFs(cluster, conf);
}
use of org.apache.hadoop.test.MultithreadedTestUtil.TestContext in project hadoop by apache.
the class RPCCallBenchmark method setupClientTestContext.
private TestContext setupClientTestContext(final MyOptions opts) throws IOException, InterruptedException {
if (opts.clientThreads <= 0) {
return null;
}
// Set up a separate proxy for each client thread,
// rather than making them share TCP pipes.
int numProxies = opts.clientThreads;
final RpcServiceWrapper[] proxies = new RpcServiceWrapper[numProxies];
for (int i = 0; i < numProxies; i++) {
proxies[i] = UserGroupInformation.createUserForTesting("proxy-" + i, new String[] {}).doAs(new PrivilegedExceptionAction<RpcServiceWrapper>() {
@Override
public RpcServiceWrapper run() throws Exception {
return createRpcClient(opts);
}
});
}
// Create an echo message of the desired length
final StringBuilder msgBuilder = new StringBuilder(opts.msgSize);
for (int c = 0; c < opts.msgSize; c++) {
msgBuilder.append('x');
}
final String echoMessage = msgBuilder.toString();
// Create the clients in a test context
TestContext ctx = new TestContext();
for (int i = 0; i < opts.clientThreads; i++) {
final RpcServiceWrapper proxy = proxies[i % numProxies];
ctx.addThread(new MultithreadedTestUtil.RepeatingTestThread(ctx) {
@Override
public void doAnAction() throws Exception {
proxy.doEcho(echoMessage);
callCount.incrementAndGet();
}
});
}
return ctx;
}
use of org.apache.hadoop.test.MultithreadedTestUtil.TestContext in project hadoop by apache.
the class RPCCallBenchmark method run.
@Override
public int run(String[] args) throws Exception {
MyOptions opts = new MyOptions(args);
if (opts.failed) {
return -1;
}
// Set RPC engine to the configured RPC engine
RPC.setProtocolEngine(conf, TestRpcService.class, opts.rpcEngine);
Server server = startServer(opts);
try {
TestContext ctx = setupClientTestContext(opts);
if (ctx != null) {
long totalCalls = 0;
ctx.startThreads();
long veryStart = System.nanoTime();
// time has elapsed
for (int i = 0; i < opts.secondsToRun; i++) {
long st = System.nanoTime();
ctx.waitFor(1000);
long et = System.nanoTime();
long ct = callCount.getAndSet(0);
totalCalls += ct;
double callsPerSec = (ct * 1000000000) / (et - st);
System.out.println("Calls per second: " + callsPerSec);
}
if (totalCalls > 0) {
long veryEnd = System.nanoTime();
double callsPerSec = (totalCalls * 1000000000) / (veryEnd - veryStart);
long cpuNanosClient = getTotalCpuTime(ctx.getTestThreads());
long cpuNanosServer = -1;
if (server != null) {
cpuNanosServer = getTotalCpuTime(server.getHandlers());
;
}
System.out.println("====== Results ======");
System.out.println("Options:\n" + opts);
System.out.println("Total calls per second: " + callsPerSec);
System.out.println("CPU time per call on client: " + (cpuNanosClient / totalCalls) + " ns");
if (server != null) {
System.out.println("CPU time per call on server: " + (cpuNanosServer / totalCalls) + " ns");
}
} else {
System.out.println("No calls!");
}
ctx.stop();
} else {
while (true) {
Thread.sleep(10000);
}
}
} finally {
if (server != null) {
server.stop();
}
}
return 0;
}
Aggregations