use of org.apache.hadoop.yarn.api.records.ApplicationReport in project metron by apache.
the class MaasIntegrationTest method testDSShell.
public void testDSShell(boolean haveDomain) throws Exception {
MaaSConfig config = new MaaSConfig() {
{
setServiceRoot("/maas/service");
setQueueConfig(new HashMap<String, Object>() {
{
put(ZKQueue.ZK_PATH, "/maas/queue");
}
});
}
};
String configRoot = "/maas/config";
byte[] configData = ConfigUtil.INSTANCE.toBytes(config);
try {
client.setData().forPath(configRoot, configData);
} catch (KeeperException.NoNodeException e) {
client.create().creatingParentsIfNeeded().forPath(configRoot, configData);
}
String[] args = { "--jar", yarnComponent.getAppMasterJar(), "--zk_quorum", zkServerComponent.getConnectionString(), "--zk_root", configRoot, "--master_memory", "512", "--master_vcores", "2" };
if (haveDomain) {
String[] domainArgs = { "--domain", "TEST_DOMAIN", "--view_acls", "reader_user reader_group", "--modify_acls", "writer_user writer_group", "--create" };
List<String> argsList = new ArrayList<String>(Arrays.asList(args));
argsList.addAll(Arrays.asList(domainArgs));
args = argsList.toArray(new String[argsList.size()]);
}
YarnConfiguration conf = yarnComponent.getConfig();
LOG.info("Initializing DS Client");
final Client client = new Client(new Configuration(conf));
boolean initSuccess = client.init(args);
Assert.assertTrue(initSuccess);
LOG.info("Running DS Client");
final AtomicBoolean result = new AtomicBoolean(false);
Thread t = new Thread() {
@Override
public void run() {
try {
result.set(client.run());
} catch (Exception e) {
throw new RuntimeException(e);
}
}
};
t.start();
YarnClient yarnClient = YarnClient.createYarnClient();
yarnClient.init(new Configuration(conf));
yarnClient.start();
String hostName = NetUtils.getHostname();
boolean verified = false;
String errorMessage = "";
while (!verified) {
List<ApplicationReport> apps = yarnClient.getApplications();
if (apps.size() == 0) {
Thread.sleep(10);
continue;
}
ApplicationReport appReport = apps.get(0);
if (appReport.getHost().equals("N/A")) {
Thread.sleep(10);
continue;
}
errorMessage = "Expected host name to start with '" + hostName + "', was '" + appReport.getHost() + "'. Expected rpc port to be '-1', was '" + appReport.getRpcPort() + "'.";
if (checkHostname(appReport.getHost()) && appReport.getRpcPort() == -1) {
verified = true;
}
if (appReport.getYarnApplicationState() == YarnApplicationState.FINISHED) {
break;
}
}
Assert.assertTrue(errorMessage, verified);
FileSystem fs = FileSystem.get(conf);
try {
new ModelSubmission().execute(FileSystem.get(conf), new String[] { "--name", "dummy", "--version", "1.0", "--zk_quorum", zkServerComponent.getConnectionString(), "--zk_root", configRoot, "--local_model_path", "src/test/resources/maas", "--hdfs_model_path", new Path(fs.getHomeDirectory(), "maas/dummy").toString(), "--num_instances", "1", "--memory", "100", "--mode", "ADD", "--log4j", "src/test/resources/log4j.properties" });
ServiceDiscoverer discoverer = new ServiceDiscoverer(this.client, config.getServiceRoot());
discoverer.start();
{
boolean passed = false;
for (int i = 0; i < 100; ++i) {
try {
List<ModelEndpoint> endpoints = discoverer.getEndpoints(new Model("dummy", "1.0"));
if (endpoints != null && endpoints.size() == 1) {
LOG.trace("Found endpoints: " + endpoints.get(0));
String output = makeRESTcall(new URL(endpoints.get(0).getEndpoint().getUrl() + "/echo/casey"));
if (output.contains("casey")) {
passed = true;
break;
}
}
} catch (Exception e) {
}
Thread.sleep(2000);
}
Assert.assertTrue(passed);
}
{
List<ModelEndpoint> endpoints = discoverer.getEndpoints(new Model("dummy", "1.0"));
Assert.assertNotNull(endpoints);
Assert.assertEquals(1, endpoints.size());
}
new ModelSubmission().execute(FileSystem.get(conf), new String[] { "--name", "dummy", "--version", "1.0", "--zk_quorum", zkServerComponent.getConnectionString(), "--zk_root", configRoot, "--num_instances", "1", "--mode", "REMOVE" });
{
boolean passed = false;
for (int i = 0; i < 100; ++i) {
try {
List<ModelEndpoint> endpoints = discoverer.getEndpoints(new Model("dummy", "1.0"));
// ensure that the endpoint is dead.
if (endpoints == null || endpoints.size() == 0) {
passed = true;
break;
}
} catch (Exception e) {
}
Thread.sleep(2000);
}
Assert.assertTrue(passed);
}
} finally {
cleanup();
}
}
use of org.apache.hadoop.yarn.api.records.ApplicationReport in project hive by apache.
the class WebHCatJTShim23 method getYarnChildJobs.
/**
* Queries RM for the list of applications with the given tag that have started
* after the given timestamp.
*/
private Set<ApplicationId> getYarnChildJobs(String tag, long timestamp) {
Set<ApplicationId> childYarnJobs = new HashSet<ApplicationId>();
LOG.info(String.format("Querying RM for tag = %s, starting with ts = %s", tag, timestamp));
GetApplicationsRequest gar = GetApplicationsRequest.newInstance();
gar.setScope(ApplicationsRequestScope.OWN);
gar.setStartRange(timestamp, System.currentTimeMillis());
gar.setApplicationTags(Collections.singleton(tag));
try {
ApplicationClientProtocol proxy = ClientRMProxy.createRMProxy(conf, ApplicationClientProtocol.class);
GetApplicationsResponse apps = proxy.getApplications(gar);
List<ApplicationReport> appsList = apps.getApplicationList();
for (ApplicationReport appReport : appsList) {
childYarnJobs.add(appReport.getApplicationId());
}
} catch (IOException ioe) {
throw new RuntimeException("Exception occurred while finding child jobs", ioe);
} catch (YarnException ye) {
throw new RuntimeException("Exception occurred while finding child jobs", ye);
}
return childYarnJobs;
}
use of org.apache.hadoop.yarn.api.records.ApplicationReport in project hive by apache.
the class LlapStatusServiceDriver method getAppReport.
private ApplicationReport getAppReport(String appName, SliderClient sliderClient, long timeoutMs) throws LlapStatusCliException {
long startTime = clock.getTime();
long timeoutTime = timeoutMs < 0 ? Long.MAX_VALUE : (startTime + timeoutMs);
ApplicationReport appReport = null;
while (appReport == null) {
try {
appReport = sliderClient.getYarnAppListClient().findInstance(appName);
if (timeoutMs == 0) {
// break immediately if timeout is 0
break;
}
// Otherwise sleep, and try again.
if (appReport == null) {
long remainingTime = Math.min(timeoutTime - clock.getTime(), 500l);
if (remainingTime > 0) {
Thread.sleep(remainingTime);
} else {
break;
}
}
} catch (Exception e) {
// No point separating IOException vs YarnException vs others
throw new LlapStatusCliException(ExitCode.YARN_ERROR, "Failed to get Yarn AppReport", e);
}
}
return appReport;
}
use of org.apache.hadoop.yarn.api.records.ApplicationReport in project hive by apache.
the class LlapStatusServiceDriver method run.
public int run(LlapStatusOptions options, long watchTimeoutMs) {
appStatusBuilder = new AppStatusBuilder();
try {
if (appName == null) {
// user provided configs
for (Map.Entry<Object, Object> props : options.getConf().entrySet()) {
conf.set((String) props.getKey(), (String) props.getValue());
}
appName = options.getName();
if (StringUtils.isEmpty(appName)) {
appName = HiveConf.getVar(conf, HiveConf.ConfVars.LLAP_DAEMON_SERVICE_HOSTS);
if (appName.startsWith("@") && appName.length() > 1) {
// This is a valid slider app name. Parse it out.
appName = appName.substring(1);
} else {
// Invalid app name. Checked later.
appName = null;
}
}
if (StringUtils.isEmpty(appName)) {
String message = "Invalid app name. This must be setup via config or passed in as a parameter." + " This tool works with clusters deployed by Slider/YARN";
LOG.info(message);
return ExitCode.INCORRECT_USAGE.getInt();
}
if (LOG.isDebugEnabled()) {
LOG.debug("Using appName: {}", appName);
}
llapRegistryConf.set(HiveConf.ConfVars.LLAP_DAEMON_SERVICE_HOSTS.varname, "@" + appName);
}
try {
if (sliderClient == null) {
sliderClient = LlapSliderUtils.createSliderClient(conf);
}
} catch (Exception e) {
LlapStatusCliException le = new LlapStatusCliException(LlapStatusServiceDriver.ExitCode.SLIDER_CLIENT_ERROR_CREATE_FAILED, "Failed to create slider client", e);
logError(le);
return le.getExitCode().getInt();
}
// Get the App report from YARN
ApplicationReport appReport;
try {
appReport = LlapSliderUtils.getAppReport(appName, sliderClient, options.getFindAppTimeoutMs());
} catch (LlapStatusCliException e) {
logError(e);
return e.getExitCode().getInt();
}
// Process the report to decide whether to go to slider.
ExitCode ret;
try {
ret = processAppReport(appReport, appStatusBuilder);
} catch (LlapStatusCliException e) {
logError(e);
return e.getExitCode().getInt();
}
if (ret != ExitCode.SUCCESS) {
return ret.getInt();
} else if (NO_SLIDER_INFO_STATES.contains(appStatusBuilder.getState())) {
return ExitCode.SUCCESS.getInt();
} else {
// Get information from slider.
try {
ret = populateAppStatusFromSliderStatus(appName, sliderClient, appStatusBuilder);
} catch (LlapStatusCliException e) {
// In case of failure, send back whatever is constructed sop far - which wouldbe from the AppReport
logError(e);
return e.getExitCode().getInt();
}
}
if (ret != ExitCode.SUCCESS) {
return ret.getInt();
} else {
try {
ret = populateAppStatusFromSliderDiagnostics(appName, sliderClient, appStatusBuilder);
} catch (LlapStatusCliException e) {
logError(e);
return e.getExitCode().getInt();
}
}
if (ret != ExitCode.SUCCESS) {
return ret.getInt();
} else {
try {
ret = populateAppStatusFromLlapRegistry(appStatusBuilder, watchTimeoutMs);
} catch (LlapStatusCliException e) {
logError(e);
return e.getExitCode().getInt();
}
}
return ret.getInt();
} finally {
if (LOG.isDebugEnabled()) {
LOG.debug("Final AppState: " + appStatusBuilder.toString());
}
}
}
use of org.apache.hadoop.yarn.api.records.ApplicationReport in project hadoop by apache.
the class TestApplicationACLs method verifyInvalidQueueWithAcl.
private void verifyInvalidQueueWithAcl() throws Exception {
isQueueUser = true;
SubmitApplicationRequest submitRequest = recordFactory.newRecordInstance(SubmitApplicationRequest.class);
ApplicationSubmissionContext context = recordFactory.newRecordInstance(ApplicationSubmissionContext.class);
ApplicationId applicationId = rmClient.getNewApplication(recordFactory.newRecordInstance(GetNewApplicationRequest.class)).getApplicationId();
context.setApplicationId(applicationId);
Map<ApplicationAccessType, String> acls = new HashMap<ApplicationAccessType, String>();
ContainerLaunchContext amContainer = recordFactory.newRecordInstance(ContainerLaunchContext.class);
Resource resource = BuilderUtils.newResource(1024, 1);
context.setResource(resource);
amContainer.setApplicationACLs(acls);
context.setQueue("InvalidQueue");
context.setAMContainerSpec(amContainer);
submitRequest.setApplicationSubmissionContext(context);
rmClient.submitApplication(submitRequest);
resourceManager.waitForState(applicationId, RMAppState.FAILED);
final GetApplicationReportRequest appReportRequest = recordFactory.newRecordInstance(GetApplicationReportRequest.class);
appReportRequest.setApplicationId(applicationId);
GetApplicationReportResponse applicationReport = rmClient.getApplicationReport(appReportRequest);
ApplicationReport appReport = applicationReport.getApplicationReport();
Assert.assertTrue(appReport.getDiagnostics().contains("submitted by user owner to unknown queue: InvalidQueue"));
}
Aggregations