use of org.apache.hadoop.yarn.server.MiniYARNCluster in project weave by continuuity.
the class YarnTestSuite method init.
@BeforeClass
public static final void init() throws IOException {
// Starts Zookeeper
zkServer = InMemoryZKServer.builder().build();
zkServer.startAndWait();
// Start YARN mini cluster
config = new YarnConfiguration(new Configuration());
if (YarnUtils.isHadoop20()) {
config.set("yarn.resourcemanager.scheduler.class", "org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler");
} else {
config.set("yarn.resourcemanager.scheduler.class", "org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler");
config.set("yarn.scheduler.capacity.resource-calculator", "org.apache.hadoop.yarn.util.resource.DominantResourceCalculator");
}
config.set("yarn.minicluster.fixed.ports", "true");
config.set("yarn.nodemanager.vmem-pmem-ratio", "20.1");
config.set("yarn.nodemanager.vmem-check-enabled", "false");
config.set("yarn.scheduler.minimum-allocation-mb", "128");
config.set("yarn.nodemanager.delete.debug-delay-sec", "3600");
cluster = new MiniYARNCluster("test-cluster", 1, 1, 1);
cluster.init(config);
cluster.start();
runnerService = createWeaveRunnerService();
runnerService.startAndWait();
}
use of org.apache.hadoop.yarn.server.MiniYARNCluster in project hadoop by apache.
the class JHEventHandlerForSigtermTest method testTimelineEventHandling.
// Have JobHistoryEventHandler handle some events and make sure they get
// stored to the Timeline store
@Test(timeout = 50000)
public void testTimelineEventHandling() throws Exception {
TestParams t = new TestParams(RunningAppContext.class, false);
Configuration conf = new YarnConfiguration();
conf.setBoolean(YarnConfiguration.TIMELINE_SERVICE_ENABLED, true);
long currentTime = System.currentTimeMillis();
try (MiniYARNCluster yarnCluster = new MiniYARNCluster(TestJobHistoryEventHandler.class.getSimpleName(), 1, 1, 1, 1)) {
yarnCluster.init(conf);
yarnCluster.start();
Configuration confJHEH = new YarnConfiguration(conf);
confJHEH.setBoolean(MRJobConfig.MAPREDUCE_JOB_EMIT_TIMELINE_DATA, true);
confJHEH.set(YarnConfiguration.TIMELINE_SERVICE_WEBAPP_ADDRESS, MiniYARNCluster.getHostname() + ":" + yarnCluster.getApplicationHistoryServer().getPort());
JHEvenHandlerForTest jheh = new JHEvenHandlerForTest(t.mockAppContext, 0);
jheh.init(confJHEH);
jheh.start();
TimelineStore ts = yarnCluster.getApplicationHistoryServer().getTimelineStore();
handleEvent(jheh, new JobHistoryEvent(t.jobId, new AMStartedEvent(t.appAttemptId, 200, t.containerId, "nmhost", 3000, 4000, -1), currentTime - 10));
TimelineEntities entities = ts.getEntities("MAPREDUCE_JOB", null, null, null, null, null, null, null, null, null);
Assert.assertEquals(1, entities.getEntities().size());
TimelineEntity tEntity = entities.getEntities().get(0);
Assert.assertEquals(t.jobId.toString(), tEntity.getEntityId());
Assert.assertEquals(1, tEntity.getEvents().size());
Assert.assertEquals(EventType.AM_STARTED.toString(), tEntity.getEvents().get(0).getEventType());
Assert.assertEquals(currentTime - 10, tEntity.getEvents().get(0).getTimestamp());
handleEvent(jheh, new JobHistoryEvent(t.jobId, new JobSubmittedEvent(TypeConverter.fromYarn(t.jobId), "name", "user", 200, "/foo/job.xml", new HashMap<JobACL, AccessControlList>(), "default"), currentTime + 10));
entities = ts.getEntities("MAPREDUCE_JOB", null, null, null, null, null, null, null, null, null);
Assert.assertEquals(1, entities.getEntities().size());
tEntity = entities.getEntities().get(0);
Assert.assertEquals(t.jobId.toString(), tEntity.getEntityId());
Assert.assertEquals(2, tEntity.getEvents().size());
Assert.assertEquals(EventType.JOB_SUBMITTED.toString(), tEntity.getEvents().get(0).getEventType());
Assert.assertEquals(EventType.AM_STARTED.toString(), tEntity.getEvents().get(1).getEventType());
Assert.assertEquals(currentTime + 10, tEntity.getEvents().get(0).getTimestamp());
Assert.assertEquals(currentTime - 10, tEntity.getEvents().get(1).getTimestamp());
handleEvent(jheh, new JobHistoryEvent(t.jobId, new JobQueueChangeEvent(TypeConverter.fromYarn(t.jobId), "q2"), currentTime - 20));
entities = ts.getEntities("MAPREDUCE_JOB", null, null, null, null, null, null, null, null, null);
Assert.assertEquals(1, entities.getEntities().size());
tEntity = entities.getEntities().get(0);
Assert.assertEquals(t.jobId.toString(), tEntity.getEntityId());
Assert.assertEquals(3, tEntity.getEvents().size());
Assert.assertEquals(EventType.JOB_SUBMITTED.toString(), tEntity.getEvents().get(0).getEventType());
Assert.assertEquals(EventType.AM_STARTED.toString(), tEntity.getEvents().get(1).getEventType());
Assert.assertEquals(EventType.JOB_QUEUE_CHANGED.toString(), tEntity.getEvents().get(2).getEventType());
Assert.assertEquals(currentTime + 10, tEntity.getEvents().get(0).getTimestamp());
Assert.assertEquals(currentTime - 10, tEntity.getEvents().get(1).getTimestamp());
Assert.assertEquals(currentTime - 20, tEntity.getEvents().get(2).getTimestamp());
handleEvent(jheh, new JobHistoryEvent(t.jobId, new JobFinishedEvent(TypeConverter.fromYarn(t.jobId), 0, 0, 0, 0, 0, new Counters(), new Counters(), new Counters()), currentTime));
entities = ts.getEntities("MAPREDUCE_JOB", null, null, null, null, null, null, null, null, null);
Assert.assertEquals(1, entities.getEntities().size());
tEntity = entities.getEntities().get(0);
Assert.assertEquals(t.jobId.toString(), tEntity.getEntityId());
Assert.assertEquals(4, tEntity.getEvents().size());
Assert.assertEquals(EventType.JOB_SUBMITTED.toString(), tEntity.getEvents().get(0).getEventType());
Assert.assertEquals(EventType.JOB_FINISHED.toString(), tEntity.getEvents().get(1).getEventType());
Assert.assertEquals(EventType.AM_STARTED.toString(), tEntity.getEvents().get(2).getEventType());
Assert.assertEquals(EventType.JOB_QUEUE_CHANGED.toString(), tEntity.getEvents().get(3).getEventType());
Assert.assertEquals(currentTime + 10, tEntity.getEvents().get(0).getTimestamp());
Assert.assertEquals(currentTime, tEntity.getEvents().get(1).getTimestamp());
Assert.assertEquals(currentTime - 10, tEntity.getEvents().get(2).getTimestamp());
Assert.assertEquals(currentTime - 20, tEntity.getEvents().get(3).getTimestamp());
handleEvent(jheh, new JobHistoryEvent(t.jobId, new JobUnsuccessfulCompletionEvent(TypeConverter.fromYarn(t.jobId), 0, 0, 0, JobStateInternal.KILLED.toString()), currentTime + 20));
entities = ts.getEntities("MAPREDUCE_JOB", null, null, null, null, null, null, null, null, null);
Assert.assertEquals(1, entities.getEntities().size());
tEntity = entities.getEntities().get(0);
Assert.assertEquals(t.jobId.toString(), tEntity.getEntityId());
Assert.assertEquals(5, tEntity.getEvents().size());
Assert.assertEquals(EventType.JOB_KILLED.toString(), tEntity.getEvents().get(0).getEventType());
Assert.assertEquals(EventType.JOB_SUBMITTED.toString(), tEntity.getEvents().get(1).getEventType());
Assert.assertEquals(EventType.JOB_FINISHED.toString(), tEntity.getEvents().get(2).getEventType());
Assert.assertEquals(EventType.AM_STARTED.toString(), tEntity.getEvents().get(3).getEventType());
Assert.assertEquals(EventType.JOB_QUEUE_CHANGED.toString(), tEntity.getEvents().get(4).getEventType());
Assert.assertEquals(currentTime + 20, tEntity.getEvents().get(0).getTimestamp());
Assert.assertEquals(currentTime + 10, tEntity.getEvents().get(1).getTimestamp());
Assert.assertEquals(currentTime, tEntity.getEvents().get(2).getTimestamp());
Assert.assertEquals(currentTime - 10, tEntity.getEvents().get(3).getTimestamp());
Assert.assertEquals(currentTime - 20, tEntity.getEvents().get(4).getTimestamp());
handleEvent(jheh, new JobHistoryEvent(t.jobId, new TaskStartedEvent(t.taskID, 0, TaskType.MAP, "")));
entities = ts.getEntities("MAPREDUCE_TASK", null, null, null, null, null, null, null, null, null);
Assert.assertEquals(1, entities.getEntities().size());
tEntity = entities.getEntities().get(0);
Assert.assertEquals(t.taskID.toString(), tEntity.getEntityId());
Assert.assertEquals(1, tEntity.getEvents().size());
Assert.assertEquals(EventType.TASK_STARTED.toString(), tEntity.getEvents().get(0).getEventType());
Assert.assertEquals(TaskType.MAP.toString(), tEntity.getEvents().get(0).getEventInfo().get("TASK_TYPE"));
handleEvent(jheh, new JobHistoryEvent(t.jobId, new TaskStartedEvent(t.taskID, 0, TaskType.REDUCE, "")));
entities = ts.getEntities("MAPREDUCE_TASK", null, null, null, null, null, null, null, null, null);
Assert.assertEquals(1, entities.getEntities().size());
tEntity = entities.getEntities().get(0);
Assert.assertEquals(t.taskID.toString(), tEntity.getEntityId());
Assert.assertEquals(2, tEntity.getEvents().size());
Assert.assertEquals(EventType.TASK_STARTED.toString(), tEntity.getEvents().get(1).getEventType());
Assert.assertEquals(TaskType.REDUCE.toString(), tEntity.getEvents().get(0).getEventInfo().get("TASK_TYPE"));
Assert.assertEquals(TaskType.MAP.toString(), tEntity.getEvents().get(1).getEventInfo().get("TASK_TYPE"));
}
}
use of org.apache.hadoop.yarn.server.MiniYARNCluster in project hadoop by apache.
the class TestHadoopArchiveLogs method testFilterAppsByAggregatedStatus.
@Test(timeout = 30000)
public void testFilterAppsByAggregatedStatus() throws Exception {
try (MiniYARNCluster yarnCluster = new MiniYARNCluster(TestHadoopArchiveLogs.class.getSimpleName(), 1, 1, 1, 1)) {
Configuration conf = new Configuration();
conf.setBoolean(YarnConfiguration.LOG_AGGREGATION_ENABLED, true);
yarnCluster.init(conf);
yarnCluster.start();
conf = yarnCluster.getConfig();
RMContext rmContext = yarnCluster.getResourceManager().getRMContext();
RMAppImpl appImpl1 = (RMAppImpl) createRMApp(1, conf, rmContext, LogAggregationStatus.DISABLED);
RMAppImpl appImpl2 = (RMAppImpl) createRMApp(2, conf, rmContext, LogAggregationStatus.FAILED);
RMAppImpl appImpl3 = (RMAppImpl) createRMApp(3, conf, rmContext, LogAggregationStatus.NOT_START);
RMAppImpl appImpl4 = (RMAppImpl) createRMApp(4, conf, rmContext, LogAggregationStatus.SUCCEEDED);
RMAppImpl appImpl5 = (RMAppImpl) createRMApp(5, conf, rmContext, LogAggregationStatus.RUNNING);
RMAppImpl appImpl6 = (RMAppImpl) createRMApp(6, conf, rmContext, LogAggregationStatus.RUNNING_WITH_FAILURE);
RMAppImpl appImpl7 = (RMAppImpl) createRMApp(7, conf, rmContext, LogAggregationStatus.TIME_OUT);
RMAppImpl appImpl8 = (RMAppImpl) createRMApp(8, conf, rmContext, LogAggregationStatus.SUCCEEDED);
rmContext.getRMApps().put(appImpl1.getApplicationId(), appImpl1);
rmContext.getRMApps().put(appImpl2.getApplicationId(), appImpl2);
rmContext.getRMApps().put(appImpl3.getApplicationId(), appImpl3);
rmContext.getRMApps().put(appImpl4.getApplicationId(), appImpl4);
rmContext.getRMApps().put(appImpl5.getApplicationId(), appImpl5);
rmContext.getRMApps().put(appImpl6.getApplicationId(), appImpl6);
rmContext.getRMApps().put(appImpl7.getApplicationId(), appImpl7);
// appImpl8 is not in the RM
HadoopArchiveLogs hal = new HadoopArchiveLogs(conf);
Assert.assertEquals(0, hal.eligibleApplications.size());
hal.eligibleApplications.add(new HadoopArchiveLogs.AppInfo(appImpl1.getApplicationId().toString(), USER));
hal.eligibleApplications.add(new HadoopArchiveLogs.AppInfo(appImpl2.getApplicationId().toString(), USER));
hal.eligibleApplications.add(new HadoopArchiveLogs.AppInfo(appImpl3.getApplicationId().toString(), USER));
HadoopArchiveLogs.AppInfo app4 = new HadoopArchiveLogs.AppInfo(appImpl4.getApplicationId().toString(), USER);
hal.eligibleApplications.add(app4);
hal.eligibleApplications.add(new HadoopArchiveLogs.AppInfo(appImpl5.getApplicationId().toString(), USER));
hal.eligibleApplications.add(new HadoopArchiveLogs.AppInfo(appImpl6.getApplicationId().toString(), USER));
HadoopArchiveLogs.AppInfo app7 = new HadoopArchiveLogs.AppInfo(appImpl7.getApplicationId().toString(), USER);
hal.eligibleApplications.add(app7);
HadoopArchiveLogs.AppInfo app8 = new HadoopArchiveLogs.AppInfo(appImpl8.getApplicationId().toString(), USER);
hal.eligibleApplications.add(app8);
Assert.assertEquals(8, hal.eligibleApplications.size());
hal.filterAppsByAggregatedStatus();
Assert.assertEquals(3, hal.eligibleApplications.size());
Assert.assertTrue(hal.eligibleApplications.contains(app4));
Assert.assertTrue(hal.eligibleApplications.contains(app7));
Assert.assertTrue(hal.eligibleApplications.contains(app8));
}
}
use of org.apache.hadoop.yarn.server.MiniYARNCluster in project hadoop by apache.
the class TestAMRMProxy method testAMRMProxyE2E.
/*
* This test validates register, allocate and finish of an application through
* the AMRMPRoxy.
*/
@Test(timeout = 120000)
public void testAMRMProxyE2E() throws Exception {
ApplicationMasterProtocol client;
try (MiniYARNCluster cluster = new MiniYARNCluster("testAMRMProxyE2E", 1, 1, 1);
YarnClient rmClient = YarnClient.createYarnClient()) {
Configuration conf = new YarnConfiguration();
conf.setBoolean(YarnConfiguration.AMRM_PROXY_ENABLED, true);
cluster.init(conf);
cluster.start();
final Configuration yarnConf = cluster.getConfig();
// the client has to connect to AMRMProxy
yarnConf.set(YarnConfiguration.RM_SCHEDULER_ADDRESS, YarnConfiguration.DEFAULT_AMRM_PROXY_ADDRESS);
rmClient.init(yarnConf);
rmClient.start();
// Submit application
ApplicationAttemptId appAttmptId = createApp(rmClient, cluster, conf);
ApplicationId appId = appAttmptId.getApplicationId();
client = createAMRMProtocol(rmClient, appId, cluster, yarnConf);
LOG.info("testAMRMProxyE2E - Register Application Master");
RegisterApplicationMasterResponse responseRegister = client.registerApplicationMaster(RegisterApplicationMasterRequest.newInstance(NetUtils.getHostname(), 1024, ""));
Assert.assertNotNull(responseRegister);
Assert.assertNotNull(responseRegister.getQueue());
Assert.assertNotNull(responseRegister.getApplicationACLs());
Assert.assertNotNull(responseRegister.getClientToAMTokenMasterKey());
Assert.assertNotNull(responseRegister.getContainersFromPreviousAttempts());
Assert.assertNotNull(responseRegister.getSchedulerResourceTypes());
Assert.assertNotNull(responseRegister.getMaximumResourceCapability());
RMApp rmApp = cluster.getResourceManager().getRMContext().getRMApps().get(appId);
Assert.assertEquals(RMAppState.RUNNING, rmApp.getState());
LOG.info("testAMRMProxyE2E - Allocate Resources Application Master");
AllocateRequest request = createAllocateRequest(rmClient.getNodeReports(NodeState.RUNNING));
AllocateResponse allocResponse = client.allocate(request);
Assert.assertNotNull(allocResponse);
Assert.assertEquals(0, allocResponse.getAllocatedContainers().size());
request.setAskList(new ArrayList<ResourceRequest>());
request.setResponseId(request.getResponseId() + 1);
Thread.sleep(1000);
// RM should allocate container within 2 calls to allocate()
allocResponse = client.allocate(request);
Assert.assertNotNull(allocResponse);
Assert.assertEquals(2, allocResponse.getAllocatedContainers().size());
LOG.info("testAMRMPRoxy - Finish Application Master");
FinishApplicationMasterResponse responseFinish = client.finishApplicationMaster(FinishApplicationMasterRequest.newInstance(FinalApplicationStatus.SUCCEEDED, "success", null));
Assert.assertNotNull(responseFinish);
Thread.sleep(500);
Assert.assertNotEquals(RMAppState.FINISHED, rmApp.getState());
}
}
use of org.apache.hadoop.yarn.server.MiniYARNCluster in project hadoop by apache.
the class TestAMRMProxy method testAMRMProxyTokenRenewal.
/*
* This test validates the token renewal from the AMRMPRoxy. The test verifies
* that the received token from AMRMProxy is different from the previous one
* within 5 requests.
*/
@Test(timeout = 120000)
public void testAMRMProxyTokenRenewal() throws Exception {
ApplicationMasterProtocol client;
try (MiniYARNCluster cluster = new MiniYARNCluster("testE2ETokenRenewal", 1, 1, 1);
YarnClient rmClient = YarnClient.createYarnClient()) {
Configuration conf = new YarnConfiguration();
conf.setBoolean(YarnConfiguration.AMRM_PROXY_ENABLED, true);
conf.setInt(YarnConfiguration.RM_NM_EXPIRY_INTERVAL_MS, 1500);
conf.setInt(YarnConfiguration.RM_NM_HEARTBEAT_INTERVAL_MS, 1500);
conf.setInt(YarnConfiguration.RM_AM_EXPIRY_INTERVAL_MS, 1500);
// RM_AMRM_TOKEN_MASTER_KEY_ROLLING_INTERVAL_SECS should be at least
// RM_AM_EXPIRY_INTERVAL_MS * 1.5 *3
conf.setInt(YarnConfiguration.RM_AMRM_TOKEN_MASTER_KEY_ROLLING_INTERVAL_SECS, 6);
cluster.init(conf);
cluster.start();
final Configuration yarnConf = cluster.getConfig();
yarnConf.set(YarnConfiguration.RM_SCHEDULER_ADDRESS, YarnConfiguration.DEFAULT_AMRM_PROXY_ADDRESS);
rmClient.init(yarnConf);
rmClient.start();
// Submit
ApplicationAttemptId appAttmptId = createApp(rmClient, cluster, conf);
ApplicationId appId = appAttmptId.getApplicationId();
client = createAMRMProtocol(rmClient, appId, cluster, yarnConf);
client.registerApplicationMaster(RegisterApplicationMasterRequest.newInstance(NetUtils.getHostname(), 1024, ""));
LOG.info("testAMRMProxyTokenRenewal - Allocate Resources Application Master");
AllocateRequest request = createAllocateRequest(rmClient.getNodeReports(NodeState.RUNNING));
Token lastToken = null;
AllocateResponse response = null;
for (int i = 0; i < 5; i++) {
response = client.allocate(request);
request.setResponseId(request.getResponseId() + 1);
if (response.getAMRMToken() != null && !response.getAMRMToken().equals(lastToken)) {
break;
}
lastToken = response.getAMRMToken();
// Time slot to be sure the AMRMProxy renew the token
Thread.sleep(1500);
}
Assert.assertFalse(response.getAMRMToken().equals(lastToken));
LOG.info("testAMRMPRoxy - Finish Application Master");
client.finishApplicationMaster(FinishApplicationMasterRequest.newInstance(FinalApplicationStatus.SUCCEEDED, "success", null));
}
}
Aggregations