Search in sources :

Example 81 with ApplicationReport

use of org.apache.hadoop.yarn.api.records.ApplicationReport in project metron by apache.

the class MaasIntegrationTest method testDSShell.

public void testDSShell(boolean haveDomain) throws Exception {
    MaaSConfig config = new MaaSConfig() {

        {
            setServiceRoot("/maas/service");
            setQueueConfig(new HashMap<String, Object>() {

                {
                    put(ZKQueue.ZK_PATH, "/maas/queue");
                }
            });
        }
    };
    String configRoot = "/maas/config";
    byte[] configData = ConfigUtil.INSTANCE.toBytes(config);
    try {
        client.setData().forPath(configRoot, configData);
    } catch (KeeperException.NoNodeException e) {
        client.create().creatingParentsIfNeeded().forPath(configRoot, configData);
    }
    String[] args = { "--jar", yarnComponent.getAppMasterJar(), "--zk_quorum", zkServerComponent.getConnectionString(), "--zk_root", configRoot, "--master_memory", "512", "--master_vcores", "2" };
    if (haveDomain) {
        String[] domainArgs = { "--domain", "TEST_DOMAIN", "--view_acls", "reader_user reader_group", "--modify_acls", "writer_user writer_group", "--create" };
        List<String> argsList = new ArrayList<String>(Arrays.asList(args));
        argsList.addAll(Arrays.asList(domainArgs));
        args = argsList.toArray(new String[argsList.size()]);
    }
    YarnConfiguration conf = yarnComponent.getConfig();
    LOG.info("Initializing DS Client");
    final Client client = new Client(new Configuration(conf));
    boolean initSuccess = client.init(args);
    Assert.assertTrue(initSuccess);
    LOG.info("Running DS Client");
    final AtomicBoolean result = new AtomicBoolean(false);
    Thread t = new Thread() {

        @Override
        public void run() {
            try {
                result.set(client.run());
            } catch (Exception e) {
                throw new RuntimeException(e);
            }
        }
    };
    t.start();
    YarnClient yarnClient = YarnClient.createYarnClient();
    yarnClient.init(new Configuration(conf));
    yarnClient.start();
    String hostName = NetUtils.getHostname();
    boolean verified = false;
    String errorMessage = "";
    while (!verified) {
        List<ApplicationReport> apps = yarnClient.getApplications();
        if (apps.size() == 0) {
            Thread.sleep(10);
            continue;
        }
        ApplicationReport appReport = apps.get(0);
        if (appReport.getHost().equals("N/A")) {
            Thread.sleep(10);
            continue;
        }
        errorMessage = "Expected host name to start with '" + hostName + "', was '" + appReport.getHost() + "'. Expected rpc port to be '-1', was '" + appReport.getRpcPort() + "'.";
        if (checkHostname(appReport.getHost()) && appReport.getRpcPort() == -1) {
            verified = true;
        }
        if (appReport.getYarnApplicationState() == YarnApplicationState.FINISHED) {
            break;
        }
    }
    Assert.assertTrue(errorMessage, verified);
    FileSystem fs = FileSystem.get(conf);
    try {
        new ModelSubmission().execute(FileSystem.get(conf), new String[] { "--name", "dummy", "--version", "1.0", "--zk_quorum", zkServerComponent.getConnectionString(), "--zk_root", configRoot, "--local_model_path", "src/test/resources/maas", "--hdfs_model_path", new Path(fs.getHomeDirectory(), "maas/dummy").toString(), "--num_instances", "1", "--memory", "100", "--mode", "ADD", "--log4j", "src/test/resources/log4j.properties" });
        ServiceDiscoverer discoverer = new ServiceDiscoverer(this.client, config.getServiceRoot());
        discoverer.start();
        {
            boolean passed = false;
            for (int i = 0; i < 100; ++i) {
                try {
                    List<ModelEndpoint> endpoints = discoverer.getEndpoints(new Model("dummy", "1.0"));
                    if (endpoints != null && endpoints.size() == 1) {
                        LOG.trace("Found endpoints: " + endpoints.get(0));
                        String output = makeRESTcall(new URL(endpoints.get(0).getEndpoint().getUrl() + "/echo/casey"));
                        if (output.contains("casey")) {
                            passed = true;
                            break;
                        }
                    }
                } catch (Exception e) {
                }
                Thread.sleep(2000);
            }
            Assert.assertTrue(passed);
        }
        {
            List<ModelEndpoint> endpoints = discoverer.getEndpoints(new Model("dummy", "1.0"));
            Assert.assertNotNull(endpoints);
            Assert.assertEquals(1, endpoints.size());
        }
        new ModelSubmission().execute(FileSystem.get(conf), new String[] { "--name", "dummy", "--version", "1.0", "--zk_quorum", zkServerComponent.getConnectionString(), "--zk_root", configRoot, "--num_instances", "1", "--mode", "REMOVE" });
        {
            boolean passed = false;
            for (int i = 0; i < 100; ++i) {
                try {
                    List<ModelEndpoint> endpoints = discoverer.getEndpoints(new Model("dummy", "1.0"));
                    // ensure that the endpoint is dead.
                    if (endpoints == null || endpoints.size() == 0) {
                        passed = true;
                        break;
                    }
                } catch (Exception e) {
                }
                Thread.sleep(2000);
            }
            Assert.assertTrue(passed);
        }
    } finally {
        cleanup();
    }
}
Also used : YarnConfiguration(org.apache.hadoop.yarn.conf.YarnConfiguration) Configuration(org.apache.hadoop.conf.Configuration) ModelSubmission(org.apache.metron.maas.submit.ModelSubmission) MaaSConfig(org.apache.metron.maas.config.MaaSConfig) URL(java.net.URL) YarnConfiguration(org.apache.hadoop.yarn.conf.YarnConfiguration) FileSystem(org.apache.hadoop.fs.FileSystem) YarnClient(org.apache.hadoop.yarn.client.api.YarnClient) ServiceDiscoverer(org.apache.metron.maas.discovery.ServiceDiscoverer) Path(org.apache.hadoop.fs.Path) KeeperException(org.apache.zookeeper.KeeperException) YarnClient(org.apache.hadoop.yarn.client.api.YarnClient) ApplicationReport(org.apache.hadoop.yarn.api.records.ApplicationReport) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) Model(org.apache.metron.maas.config.Model) KeeperException(org.apache.zookeeper.KeeperException)

Example 82 with ApplicationReport

use of org.apache.hadoop.yarn.api.records.ApplicationReport in project hive by apache.

the class WebHCatJTShim23 method getYarnChildJobs.

/**
 * Queries RM for the list of applications with the given tag that have started
 * after the given timestamp.
 */
private Set<ApplicationId> getYarnChildJobs(String tag, long timestamp) {
    Set<ApplicationId> childYarnJobs = new HashSet<ApplicationId>();
    LOG.info(String.format("Querying RM for tag = %s, starting with ts = %s", tag, timestamp));
    GetApplicationsRequest gar = GetApplicationsRequest.newInstance();
    gar.setScope(ApplicationsRequestScope.OWN);
    gar.setStartRange(timestamp, System.currentTimeMillis());
    gar.setApplicationTags(Collections.singleton(tag));
    try {
        ApplicationClientProtocol proxy = ClientRMProxy.createRMProxy(conf, ApplicationClientProtocol.class);
        GetApplicationsResponse apps = proxy.getApplications(gar);
        List<ApplicationReport> appsList = apps.getApplicationList();
        for (ApplicationReport appReport : appsList) {
            childYarnJobs.add(appReport.getApplicationId());
        }
    } catch (IOException ioe) {
        throw new RuntimeException("Exception occurred while finding child jobs", ioe);
    } catch (YarnException ye) {
        throw new RuntimeException("Exception occurred while finding child jobs", ye);
    }
    return childYarnJobs;
}
Also used : ApplicationReport(org.apache.hadoop.yarn.api.records.ApplicationReport) GetApplicationsResponse(org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsResponse) IOException(java.io.IOException) ApplicationId(org.apache.hadoop.yarn.api.records.ApplicationId) ApplicationClientProtocol(org.apache.hadoop.yarn.api.ApplicationClientProtocol) GetApplicationsRequest(org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsRequest) YarnException(org.apache.hadoop.yarn.exceptions.YarnException) HashSet(java.util.HashSet)

Example 83 with ApplicationReport

use of org.apache.hadoop.yarn.api.records.ApplicationReport in project hive by apache.

the class LlapStatusServiceDriver method getAppReport.

private ApplicationReport getAppReport(String appName, SliderClient sliderClient, long timeoutMs) throws LlapStatusCliException {
    long startTime = clock.getTime();
    long timeoutTime = timeoutMs < 0 ? Long.MAX_VALUE : (startTime + timeoutMs);
    ApplicationReport appReport = null;
    while (appReport == null) {
        try {
            appReport = sliderClient.getYarnAppListClient().findInstance(appName);
            if (timeoutMs == 0) {
                // break immediately if timeout is 0
                break;
            }
            // Otherwise sleep, and try again.
            if (appReport == null) {
                long remainingTime = Math.min(timeoutTime - clock.getTime(), 500l);
                if (remainingTime > 0) {
                    Thread.sleep(remainingTime);
                } else {
                    break;
                }
            }
        } catch (Exception e) {
            // No point separating IOException vs YarnException vs others
            throw new LlapStatusCliException(ExitCode.YARN_ERROR, "Failed to get Yarn AppReport", e);
        }
    }
    return appReport;
}
Also used : ApplicationReport(org.apache.hadoop.yarn.api.records.ApplicationReport) URISyntaxException(java.net.URISyntaxException) YarnException(org.apache.hadoop.yarn.exceptions.YarnException) SliderException(org.apache.slider.core.exceptions.SliderException) IOException(java.io.IOException)

Example 84 with ApplicationReport

use of org.apache.hadoop.yarn.api.records.ApplicationReport in project hive by apache.

the class LlapStatusServiceDriver method run.

public int run(LlapStatusOptions options, long watchTimeoutMs) {
    appStatusBuilder = new AppStatusBuilder();
    try {
        if (appName == null) {
            // user provided configs
            for (Map.Entry<Object, Object> props : options.getConf().entrySet()) {
                conf.set((String) props.getKey(), (String) props.getValue());
            }
            appName = options.getName();
            if (StringUtils.isEmpty(appName)) {
                appName = HiveConf.getVar(conf, HiveConf.ConfVars.LLAP_DAEMON_SERVICE_HOSTS);
                if (appName.startsWith("@") && appName.length() > 1) {
                    // This is a valid slider app name. Parse it out.
                    appName = appName.substring(1);
                } else {
                    // Invalid app name. Checked later.
                    appName = null;
                }
            }
            if (StringUtils.isEmpty(appName)) {
                String message = "Invalid app name. This must be setup via config or passed in as a parameter." + " This tool works with clusters deployed by Slider/YARN";
                LOG.info(message);
                return ExitCode.INCORRECT_USAGE.getInt();
            }
            if (LOG.isDebugEnabled()) {
                LOG.debug("Using appName: {}", appName);
            }
            llapRegistryConf.set(HiveConf.ConfVars.LLAP_DAEMON_SERVICE_HOSTS.varname, "@" + appName);
        }
        try {
            if (sliderClient == null) {
                sliderClient = LlapSliderUtils.createSliderClient(conf);
            }
        } catch (Exception e) {
            LlapStatusCliException le = new LlapStatusCliException(LlapStatusServiceDriver.ExitCode.SLIDER_CLIENT_ERROR_CREATE_FAILED, "Failed to create slider client", e);
            logError(le);
            return le.getExitCode().getInt();
        }
        // Get the App report from YARN
        ApplicationReport appReport;
        try {
            appReport = LlapSliderUtils.getAppReport(appName, sliderClient, options.getFindAppTimeoutMs());
        } catch (LlapStatusCliException e) {
            logError(e);
            return e.getExitCode().getInt();
        }
        // Process the report to decide whether to go to slider.
        ExitCode ret;
        try {
            ret = processAppReport(appReport, appStatusBuilder);
        } catch (LlapStatusCliException e) {
            logError(e);
            return e.getExitCode().getInt();
        }
        if (ret != ExitCode.SUCCESS) {
            return ret.getInt();
        } else if (NO_SLIDER_INFO_STATES.contains(appStatusBuilder.getState())) {
            return ExitCode.SUCCESS.getInt();
        } else {
            // Get information from slider.
            try {
                ret = populateAppStatusFromSliderStatus(appName, sliderClient, appStatusBuilder);
            } catch (LlapStatusCliException e) {
                // In case of failure, send back whatever is constructed sop far - which wouldbe from the AppReport
                logError(e);
                return e.getExitCode().getInt();
            }
        }
        if (ret != ExitCode.SUCCESS) {
            return ret.getInt();
        } else {
            try {
                ret = populateAppStatusFromSliderDiagnostics(appName, sliderClient, appStatusBuilder);
            } catch (LlapStatusCliException e) {
                logError(e);
                return e.getExitCode().getInt();
            }
        }
        if (ret != ExitCode.SUCCESS) {
            return ret.getInt();
        } else {
            try {
                ret = populateAppStatusFromLlapRegistry(appStatusBuilder, watchTimeoutMs);
            } catch (LlapStatusCliException e) {
                logError(e);
                return e.getExitCode().getInt();
            }
        }
        return ret.getInt();
    } finally {
        if (LOG.isDebugEnabled()) {
            LOG.debug("Final AppState: " + appStatusBuilder.toString());
        }
    }
}
Also used : ApplicationReport(org.apache.hadoop.yarn.api.records.ApplicationReport) AppStatusBuilder(org.apache.hadoop.hive.llap.cli.status.LlapStatusHelpers.AppStatusBuilder) Map(java.util.Map) URISyntaxException(java.net.URISyntaxException) YarnException(org.apache.hadoop.yarn.exceptions.YarnException) SliderException(org.apache.slider.core.exceptions.SliderException) IOException(java.io.IOException)

Example 85 with ApplicationReport

use of org.apache.hadoop.yarn.api.records.ApplicationReport in project hadoop by apache.

the class TestApplicationACLs method verifyInvalidQueueWithAcl.

private void verifyInvalidQueueWithAcl() throws Exception {
    isQueueUser = true;
    SubmitApplicationRequest submitRequest = recordFactory.newRecordInstance(SubmitApplicationRequest.class);
    ApplicationSubmissionContext context = recordFactory.newRecordInstance(ApplicationSubmissionContext.class);
    ApplicationId applicationId = rmClient.getNewApplication(recordFactory.newRecordInstance(GetNewApplicationRequest.class)).getApplicationId();
    context.setApplicationId(applicationId);
    Map<ApplicationAccessType, String> acls = new HashMap<ApplicationAccessType, String>();
    ContainerLaunchContext amContainer = recordFactory.newRecordInstance(ContainerLaunchContext.class);
    Resource resource = BuilderUtils.newResource(1024, 1);
    context.setResource(resource);
    amContainer.setApplicationACLs(acls);
    context.setQueue("InvalidQueue");
    context.setAMContainerSpec(amContainer);
    submitRequest.setApplicationSubmissionContext(context);
    rmClient.submitApplication(submitRequest);
    resourceManager.waitForState(applicationId, RMAppState.FAILED);
    final GetApplicationReportRequest appReportRequest = recordFactory.newRecordInstance(GetApplicationReportRequest.class);
    appReportRequest.setApplicationId(applicationId);
    GetApplicationReportResponse applicationReport = rmClient.getApplicationReport(appReportRequest);
    ApplicationReport appReport = applicationReport.getApplicationReport();
    Assert.assertTrue(appReport.getDiagnostics().contains("submitted by user owner to unknown queue: InvalidQueue"));
}
Also used : ApplicationReport(org.apache.hadoop.yarn.api.records.ApplicationReport) GetApplicationReportRequest(org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportRequest) HashMap(java.util.HashMap) ApplicationAccessType(org.apache.hadoop.yarn.api.records.ApplicationAccessType) ApplicationSubmissionContext(org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext) Resource(org.apache.hadoop.yarn.api.records.Resource) ContainerLaunchContext(org.apache.hadoop.yarn.api.records.ContainerLaunchContext) ApplicationId(org.apache.hadoop.yarn.api.records.ApplicationId) GetApplicationReportResponse(org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportResponse) SubmitApplicationRequest(org.apache.hadoop.yarn.api.protocolrecords.SubmitApplicationRequest)

Aggregations

ApplicationReport (org.apache.hadoop.yarn.api.records.ApplicationReport)143 ApplicationId (org.apache.hadoop.yarn.api.records.ApplicationId)65 Test (org.junit.Test)52 IOException (java.io.IOException)41 YarnException (org.apache.hadoop.yarn.exceptions.YarnException)32 YarnApplicationState (org.apache.hadoop.yarn.api.records.YarnApplicationState)29 YarnClient (org.apache.hadoop.yarn.client.api.YarnClient)21 ApplicationNotFoundException (org.apache.hadoop.yarn.exceptions.ApplicationNotFoundException)19 ApplicationAttemptId (org.apache.hadoop.yarn.api.records.ApplicationAttemptId)18 ApplicationSubmissionContext (org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext)16 RMApp (org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp)16 Configuration (org.apache.hadoop.conf.Configuration)14 UserGroupInformation (org.apache.hadoop.security.UserGroupInformation)14 ArrayList (java.util.ArrayList)13 YarnConfiguration (org.apache.hadoop.yarn.conf.YarnConfiguration)13 GetApplicationReportRequest (org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportRequest)11 GetApplicationsRequest (org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsRequest)11 GetApplicationReportResponse (org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportResponse)10 ApplicationResourceUsageReport (org.apache.hadoop.yarn.api.records.ApplicationResourceUsageReport)10 ContainerLaunchContext (org.apache.hadoop.yarn.api.records.ContainerLaunchContext)10