use of java.util.concurrent.CountDownLatch in project hadoop by apache.
the class TestResourceManagerAdministrationProtocolPBClientImpl method setUpResourceManager.
/**
* Start resource manager server
*/
@BeforeClass
public static void setUpResourceManager() throws IOException, InterruptedException {
Configuration.addDefaultResource("config-with-security.xml");
Configuration configuration = new YarnConfiguration();
resourceManager = new ResourceManager() {
@Override
protected void doSecureLogin() throws IOException {
}
};
// a reliable way to wait for resource manager to fully start
final CountDownLatch rmStartedSignal = new CountDownLatch(1);
ServiceStateChangeListener rmStateChangeListener = new ServiceStateChangeListener() {
@Override
public void stateChanged(Service service) {
if (service.getServiceState() == STATE.STARTED) {
rmStartedSignal.countDown();
}
}
};
resourceManager.registerServiceListener(rmStateChangeListener);
resourceManager.init(configuration);
new Thread() {
public void run() {
resourceManager.start();
}
}.start();
boolean rmStarted = rmStartedSignal.await(60000L, TimeUnit.MILLISECONDS);
Assert.assertTrue("ResourceManager failed to start up.", rmStarted);
LOG.info("ResourceManager RMAdmin address: " + configuration.get(YarnConfiguration.RM_ADMIN_ADDRESS));
client = new ResourceManagerAdministrationProtocolPBClientImpl(1L, getProtocolAddress(configuration), configuration);
}
use of java.util.concurrent.CountDownLatch in project hadoop by apache.
the class TestGetGroups method setUpResourceManager.
@BeforeClass
public static void setUpResourceManager() throws InterruptedException {
conf = new YarnConfiguration();
resourceManager = new ResourceManager() {
@Override
protected void doSecureLogin() throws IOException {
}
;
};
// a reliable way to wait for resource manager to start
CountDownLatch rmStartedSignal = new CountDownLatch(1);
ServiceStateChangeListener rmStateChangeListener = new ServiceStateChangeListener() {
@Override
public void stateChanged(Service service) {
if (service.getServiceState() == STATE.STARTED) {
rmStartedSignal.countDown();
}
}
};
resourceManager.registerServiceListener(rmStateChangeListener);
resourceManager.init(conf);
new Thread() {
public void run() {
resourceManager.start();
}
;
}.start();
boolean rmStarted = rmStartedSignal.await(60000L, TimeUnit.MILLISECONDS);
Assert.assertTrue("ResourceManager failed to start up.", rmStarted);
LOG.info("ResourceManager RMAdmin address: " + conf.get(YarnConfiguration.RM_ADMIN_ADDRESS));
}
use of java.util.concurrent.CountDownLatch in project hadoop by apache.
the class TestAggregatedLogFormat method writeSrcFileAndALog.
private void writeSrcFileAndALog(Path srcFilePath, String fileName, final long length, Path remoteAppLogFile, Path srcFileRoot, ContainerId testContainerId) throws Exception {
File dir = new File(srcFilePath.toString());
if (!dir.exists()) {
if (!dir.mkdirs()) {
throw new IOException("Unable to create directory : " + dir);
}
}
File outputFile = new File(new File(srcFilePath.toString()), fileName);
FileOutputStream os = new FileOutputStream(outputFile);
final OutputStreamWriter osw = new OutputStreamWriter(os, "UTF8");
final int ch = filler;
UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
LogWriter logWriter = new LogWriter(new Configuration(), remoteAppLogFile, ugi);
LogKey logKey = new LogKey(testContainerId);
LogValue logValue = spy(new LogValue(Collections.singletonList(srcFileRoot.toString()), testContainerId, ugi.getShortUserName()));
final CountDownLatch latch = new CountDownLatch(1);
Thread t = new Thread() {
public void run() {
try {
for (int i = 0; i < length / 3; i++) {
osw.write(ch);
}
latch.countDown();
for (int i = 0; i < (2 * length) / 3; i++) {
osw.write(ch);
}
osw.close();
} catch (IOException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
}
};
t.start();
//Wait till the osw is partially written
//aggregation starts once the ows has completed 1/3rd of its work
latch.await();
//Aggregate The Logs
logWriter.append(logKey, logValue);
logWriter.close();
}
use of java.util.concurrent.CountDownLatch in project hadoop by apache.
the class TestGridMixClasses method testSerialReaderThread.
/*
* test SerialJobFactory
*/
@Test(timeout = 120000)
public void testSerialReaderThread() throws Exception {
Configuration conf = new Configuration();
File fin = new File("src" + File.separator + "test" + File.separator + "resources" + File.separator + "data" + File.separator + "wordcount2.json");
// read couple jobs from wordcount2.json
JobStoryProducer jobProducer = new ZombieJobProducer(new Path(fin.getAbsolutePath()), null, conf);
CountDownLatch startFlag = new CountDownLatch(1);
UserResolver resolver = new SubmitterUserResolver();
FakeJobSubmitter submitter = new FakeJobSubmitter();
File ws = new File("target" + File.separator + this.getClass().getName());
if (!ws.exists()) {
Assert.assertTrue(ws.mkdirs());
}
SerialJobFactory jobFactory = new SerialJobFactory(submitter, jobProducer, new Path(ws.getAbsolutePath()), conf, startFlag, resolver);
Path ioPath = new Path(ws.getAbsolutePath());
jobFactory.setDistCacheEmulator(new DistributedCacheEmulator(conf, ioPath));
Thread test = jobFactory.createReaderThread();
test.start();
Thread.sleep(1000);
// SerialReaderThread waits startFlag
assertEquals(0, submitter.getJobs().size());
// start!
startFlag.countDown();
while (test.isAlive()) {
Thread.sleep(1000);
jobFactory.update(null);
}
// submitter was called twice
assertEquals(2, submitter.getJobs().size());
}
use of java.util.concurrent.CountDownLatch in project hadoop by apache.
the class TestNodeStatusUpdater method testConcurrentAccessToSystemCredentials.
@Test
public void testConcurrentAccessToSystemCredentials() {
final Map<ApplicationId, ByteBuffer> testCredentials = new HashMap<>();
ByteBuffer byteBuffer = ByteBuffer.wrap(new byte[300]);
ApplicationId applicationId = ApplicationId.newInstance(123456, 120);
testCredentials.put(applicationId, byteBuffer);
final List<Throwable> exceptions = Collections.synchronizedList(new ArrayList<Throwable>());
final int NUM_THREADS = 10;
final CountDownLatch allDone = new CountDownLatch(NUM_THREADS);
final ExecutorService threadPool = HadoopExecutors.newFixedThreadPool(NUM_THREADS);
final AtomicBoolean stop = new AtomicBoolean(false);
try {
for (int i = 0; i < NUM_THREADS; i++) {
threadPool.submit(new Runnable() {
@Override
public void run() {
try {
for (int i = 0; i < 100 && !stop.get(); i++) {
NodeHeartbeatResponse nodeHeartBeatResponse = newNodeHeartbeatResponse(0, NodeAction.NORMAL, null, null, null, null, 0);
nodeHeartBeatResponse.setSystemCredentialsForApps(testCredentials);
NodeHeartbeatResponseProto proto = ((NodeHeartbeatResponsePBImpl) nodeHeartBeatResponse).getProto();
Assert.assertNotNull(proto);
}
} catch (Throwable t) {
exceptions.add(t);
stop.set(true);
} finally {
allDone.countDown();
}
}
});
}
int testTimeout = 2;
Assert.assertTrue("Timeout waiting for more than " + testTimeout + " " + "seconds", allDone.await(testTimeout, TimeUnit.SECONDS));
} catch (InterruptedException ie) {
exceptions.add(ie);
} finally {
threadPool.shutdownNow();
}
Assert.assertTrue("Test failed with exception(s)" + exceptions, exceptions.isEmpty());
}
Aggregations