Search in sources :

Example 46 with YarnRPC

use of org.apache.hadoop.yarn.ipc.YarnRPC in project hadoop by apache.

the class SharedCacheUploadService method createSCMClient.

private SCMUploaderProtocol createSCMClient(Configuration conf) {
    YarnRPC rpc = YarnRPC.create(conf);
    InetSocketAddress scmAddress = conf.getSocketAddr(YarnConfiguration.SCM_UPLOADER_SERVER_ADDRESS, YarnConfiguration.DEFAULT_SCM_UPLOADER_SERVER_ADDRESS, YarnConfiguration.DEFAULT_SCM_UPLOADER_SERVER_PORT);
    return (SCMUploaderProtocol) rpc.getProxy(SCMUploaderProtocol.class, scmAddress, conf);
}
Also used : InetSocketAddress(java.net.InetSocketAddress) YarnRPC(org.apache.hadoop.yarn.ipc.YarnRPC) SCMUploaderProtocol(org.apache.hadoop.yarn.server.api.SCMUploaderProtocol)

Example 47 with YarnRPC

use of org.apache.hadoop.yarn.ipc.YarnRPC in project hadoop by apache.

the class TestPBLocalizerRPC method testLocalizerRPC.

@Test
public void testLocalizerRPC() throws Exception {
    InetSocketAddress locAddr = new InetSocketAddress("0.0.0.0", 8040);
    LocalizerService server = new LocalizerService(locAddr);
    try {
        server.start();
        Configuration conf = new Configuration();
        YarnRPC rpc = YarnRPC.create(conf);
        LocalizationProtocol client = (LocalizationProtocol) rpc.getProxy(LocalizationProtocol.class, locAddr, conf);
        LocalizerStatus status = recordFactory.newRecordInstance(LocalizerStatus.class);
        status.setLocalizerId("localizer0");
        LocalizerHeartbeatResponse response = client.heartbeat(status);
        assertEquals(dieHBResponse(), response);
    } finally {
        server.stop();
    }
    assertTrue(true);
}
Also used : Configuration(org.apache.hadoop.conf.Configuration) LocalizationProtocol(org.apache.hadoop.yarn.server.nodemanager.api.LocalizationProtocol) InetSocketAddress(java.net.InetSocketAddress) LocalizerHeartbeatResponse(org.apache.hadoop.yarn.server.nodemanager.api.protocolrecords.LocalizerHeartbeatResponse) YarnRPC(org.apache.hadoop.yarn.ipc.YarnRPC) LocalizerStatus(org.apache.hadoop.yarn.server.nodemanager.api.protocolrecords.LocalizerStatus) Test(org.junit.Test)

Example 48 with YarnRPC

use of org.apache.hadoop.yarn.ipc.YarnRPC in project hadoop by apache.

the class MRClientService method serviceStart.

protected void serviceStart() throws Exception {
    Configuration conf = getConfig();
    YarnRPC rpc = YarnRPC.create(conf);
    InetSocketAddress address = new InetSocketAddress(0);
    server = rpc.getServer(MRClientProtocol.class, protocolHandler, address, conf, appContext.getClientToAMTokenSecretManager(), conf.getInt(MRJobConfig.MR_AM_JOB_CLIENT_THREAD_COUNT, MRJobConfig.DEFAULT_MR_AM_JOB_CLIENT_THREAD_COUNT), MRJobConfig.MR_AM_JOB_CLIENT_PORT_RANGE);
    // Enable service authorization?
    if (conf.getBoolean(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHORIZATION, false)) {
        refreshServiceAcls(conf, new MRAMPolicyProvider());
    }
    server.start();
    this.bindAddress = NetUtils.createSocketAddrForHost(appContext.getNMHostname(), server.getListenerAddress().getPort());
    LOG.info("Instantiated MRClientService at " + this.bindAddress);
    try {
        // Explicitly disabling SSL for map reduce task as we can't allow MR users
        // to gain access to keystore file for opening SSL listener. We can trust
        // RM/NM to issue SSL certificates but definitely not MR-AM as it is
        // running in user-land.
        webApp = WebApps.$for("mapreduce", AppContext.class, appContext, "ws").withHttpPolicy(conf, Policy.HTTP_ONLY).withPortRange(conf, MRJobConfig.MR_AM_WEBAPP_PORT_RANGE).start(new AMWebApp());
    } catch (Exception e) {
        LOG.error("Webapps failed to start. Ignoring for now:", e);
    }
    super.serviceStart();
}
Also used : Configuration(org.apache.hadoop.conf.Configuration) InetSocketAddress(java.net.InetSocketAddress) AMWebApp(org.apache.hadoop.mapreduce.v2.app.webapp.AMWebApp) MRAMPolicyProvider(org.apache.hadoop.mapreduce.v2.app.security.authorize.MRAMPolicyProvider) YarnRPC(org.apache.hadoop.yarn.ipc.YarnRPC) YarnException(org.apache.hadoop.yarn.exceptions.YarnException) IOException(java.io.IOException) AccessControlException(org.apache.hadoop.security.AccessControlException) MRClientProtocol(org.apache.hadoop.mapreduce.v2.api.MRClientProtocol)

Example 49 with YarnRPC

use of org.apache.hadoop.yarn.ipc.YarnRPC in project hadoop by apache.

the class TestMRClientService method testViewAclOnlyCannotModify.

@Test
public void testViewAclOnlyCannotModify() throws Exception {
    final MRAppWithClientService app = new MRAppWithClientService(1, 0, false);
    final Configuration conf = new Configuration();
    conf.setBoolean(MRConfig.MR_ACLS_ENABLED, true);
    conf.set(MRJobConfig.JOB_ACL_VIEW_JOB, "viewonlyuser");
    Job job = app.submit(conf);
    app.waitForState(job, JobState.RUNNING);
    Assert.assertEquals("Num tasks not correct", 1, job.getTasks().size());
    Iterator<Task> it = job.getTasks().values().iterator();
    Task task = it.next();
    app.waitForState(task, TaskState.RUNNING);
    TaskAttempt attempt = task.getAttempts().values().iterator().next();
    app.waitForState(attempt, TaskAttemptState.RUNNING);
    UserGroupInformation viewOnlyUser = UserGroupInformation.createUserForTesting("viewonlyuser", new String[] {});
    Assert.assertTrue("viewonlyuser cannot view job", job.checkAccess(viewOnlyUser, JobACL.VIEW_JOB));
    Assert.assertFalse("viewonlyuser can modify job", job.checkAccess(viewOnlyUser, JobACL.MODIFY_JOB));
    MRClientProtocol client = viewOnlyUser.doAs(new PrivilegedExceptionAction<MRClientProtocol>() {

        @Override
        public MRClientProtocol run() throws Exception {
            YarnRPC rpc = YarnRPC.create(conf);
            return (MRClientProtocol) rpc.getProxy(MRClientProtocol.class, app.clientService.getBindAddress(), conf);
        }
    });
    KillJobRequest killJobRequest = recordFactory.newRecordInstance(KillJobRequest.class);
    killJobRequest.setJobId(app.getJobId());
    try {
        client.killJob(killJobRequest);
        fail("viewonlyuser killed job");
    } catch (AccessControlException e) {
    // pass
    }
    KillTaskRequest killTaskRequest = recordFactory.newRecordInstance(KillTaskRequest.class);
    killTaskRequest.setTaskId(task.getID());
    try {
        client.killTask(killTaskRequest);
        fail("viewonlyuser killed task");
    } catch (AccessControlException e) {
    // pass
    }
    KillTaskAttemptRequest killTaskAttemptRequest = recordFactory.newRecordInstance(KillTaskAttemptRequest.class);
    killTaskAttemptRequest.setTaskAttemptId(attempt.getID());
    try {
        client.killTaskAttempt(killTaskAttemptRequest);
        fail("viewonlyuser killed task attempt");
    } catch (AccessControlException e) {
    // pass
    }
    FailTaskAttemptRequest failTaskAttemptRequest = recordFactory.newRecordInstance(FailTaskAttemptRequest.class);
    failTaskAttemptRequest.setTaskAttemptId(attempt.getID());
    try {
        client.failTaskAttempt(failTaskAttemptRequest);
        fail("viewonlyuser killed task attempt");
    } catch (AccessControlException e) {
    // pass
    }
}
Also used : Task(org.apache.hadoop.mapreduce.v2.app.job.Task) Configuration(org.apache.hadoop.conf.Configuration) FailTaskAttemptRequest(org.apache.hadoop.mapreduce.v2.api.protocolrecords.FailTaskAttemptRequest) AccessControlException(org.apache.hadoop.security.AccessControlException) YarnRPC(org.apache.hadoop.yarn.ipc.YarnRPC) IOException(java.io.IOException) AccessControlException(org.apache.hadoop.security.AccessControlException) MRClientProtocol(org.apache.hadoop.mapreduce.v2.api.MRClientProtocol) KillTaskAttemptRequest(org.apache.hadoop.mapreduce.v2.api.protocolrecords.KillTaskAttemptRequest) KillJobRequest(org.apache.hadoop.mapreduce.v2.api.protocolrecords.KillJobRequest) KillTaskRequest(org.apache.hadoop.mapreduce.v2.api.protocolrecords.KillTaskRequest) TaskAttempt(org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt) Job(org.apache.hadoop.mapreduce.v2.app.job.Job) UserGroupInformation(org.apache.hadoop.security.UserGroupInformation) Test(org.junit.Test)

Example 50 with YarnRPC

use of org.apache.hadoop.yarn.ipc.YarnRPC in project hadoop by apache.

the class TestMRClientService method test.

@Test
public void test() throws Exception {
    MRAppWithClientService app = new MRAppWithClientService(1, 0, false);
    Configuration conf = new Configuration();
    Job job = app.submit(conf);
    app.waitForState(job, JobState.RUNNING);
    Assert.assertEquals("Num tasks not correct", 1, job.getTasks().size());
    Iterator<Task> it = job.getTasks().values().iterator();
    Task task = it.next();
    app.waitForState(task, TaskState.RUNNING);
    TaskAttempt attempt = task.getAttempts().values().iterator().next();
    app.waitForState(attempt, TaskAttemptState.RUNNING);
    // send the diagnostic
    String diagnostic1 = "Diagnostic1";
    String diagnostic2 = "Diagnostic2";
    app.getContext().getEventHandler().handle(new TaskAttemptDiagnosticsUpdateEvent(attempt.getID(), diagnostic1));
    // send the status update
    TaskAttemptStatus taskAttemptStatus = new TaskAttemptStatus();
    taskAttemptStatus.id = attempt.getID();
    taskAttemptStatus.progress = 0.5f;
    taskAttemptStatus.stateString = "RUNNING";
    taskAttemptStatus.taskState = TaskAttemptState.RUNNING;
    taskAttemptStatus.phase = Phase.MAP;
    // send the status update
    app.getContext().getEventHandler().handle(new TaskAttemptStatusUpdateEvent(attempt.getID(), taskAttemptStatus));
    //verify that all object are fully populated by invoking RPCs.
    YarnRPC rpc = YarnRPC.create(conf);
    MRClientProtocol proxy = (MRClientProtocol) rpc.getProxy(MRClientProtocol.class, app.clientService.getBindAddress(), conf);
    GetCountersRequest gcRequest = recordFactory.newRecordInstance(GetCountersRequest.class);
    gcRequest.setJobId(job.getID());
    Assert.assertNotNull("Counters is null", proxy.getCounters(gcRequest).getCounters());
    GetJobReportRequest gjrRequest = recordFactory.newRecordInstance(GetJobReportRequest.class);
    gjrRequest.setJobId(job.getID());
    JobReport jr = proxy.getJobReport(gjrRequest).getJobReport();
    verifyJobReport(jr);
    GetTaskAttemptCompletionEventsRequest gtaceRequest = recordFactory.newRecordInstance(GetTaskAttemptCompletionEventsRequest.class);
    gtaceRequest.setJobId(job.getID());
    gtaceRequest.setFromEventId(0);
    gtaceRequest.setMaxEvents(10);
    Assert.assertNotNull("TaskCompletionEvents is null", proxy.getTaskAttemptCompletionEvents(gtaceRequest).getCompletionEventList());
    GetDiagnosticsRequest gdRequest = recordFactory.newRecordInstance(GetDiagnosticsRequest.class);
    gdRequest.setTaskAttemptId(attempt.getID());
    Assert.assertNotNull("Diagnostics is null", proxy.getDiagnostics(gdRequest).getDiagnosticsList());
    GetTaskAttemptReportRequest gtarRequest = recordFactory.newRecordInstance(GetTaskAttemptReportRequest.class);
    gtarRequest.setTaskAttemptId(attempt.getID());
    TaskAttemptReport tar = proxy.getTaskAttemptReport(gtarRequest).getTaskAttemptReport();
    verifyTaskAttemptReport(tar);
    GetTaskReportRequest gtrRequest = recordFactory.newRecordInstance(GetTaskReportRequest.class);
    gtrRequest.setTaskId(task.getID());
    Assert.assertNotNull("TaskReport is null", proxy.getTaskReport(gtrRequest).getTaskReport());
    GetTaskReportsRequest gtreportsRequest = recordFactory.newRecordInstance(GetTaskReportsRequest.class);
    gtreportsRequest.setJobId(job.getID());
    gtreportsRequest.setTaskType(TaskType.MAP);
    Assert.assertNotNull("TaskReports for map is null", proxy.getTaskReports(gtreportsRequest).getTaskReportList());
    gtreportsRequest = recordFactory.newRecordInstance(GetTaskReportsRequest.class);
    gtreportsRequest.setJobId(job.getID());
    gtreportsRequest.setTaskType(TaskType.REDUCE);
    Assert.assertNotNull("TaskReports for reduce is null", proxy.getTaskReports(gtreportsRequest).getTaskReportList());
    List<String> diag = proxy.getDiagnostics(gdRequest).getDiagnosticsList();
    Assert.assertEquals("Num diagnostics not correct", 1, diag.size());
    Assert.assertEquals("Diag 1 not correct", diagnostic1, diag.get(0).toString());
    TaskReport taskReport = proxy.getTaskReport(gtrRequest).getTaskReport();
    Assert.assertEquals("Num diagnostics not correct", 1, taskReport.getDiagnosticsCount());
    //send the done signal to the task
    app.getContext().getEventHandler().handle(new TaskAttemptEvent(task.getAttempts().values().iterator().next().getID(), TaskAttemptEventType.TA_DONE));
    app.waitForState(job, JobState.SUCCEEDED);
    // For invalid jobid, throw IOException
    gtreportsRequest = recordFactory.newRecordInstance(GetTaskReportsRequest.class);
    gtreportsRequest.setJobId(TypeConverter.toYarn(JobID.forName("job_1415730144495_0001")));
    gtreportsRequest.setTaskType(TaskType.REDUCE);
    try {
        proxy.getTaskReports(gtreportsRequest);
        fail("IOException not thrown for invalid job id");
    } catch (IOException e) {
    // Expected
    }
}
Also used : Task(org.apache.hadoop.mapreduce.v2.app.job.Task) Configuration(org.apache.hadoop.conf.Configuration) GetTaskAttemptReportRequest(org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskAttemptReportRequest) MRClientProtocol(org.apache.hadoop.mapreduce.v2.api.MRClientProtocol) JobReport(org.apache.hadoop.mapreduce.v2.api.records.JobReport) GetTaskAttemptCompletionEventsRequest(org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskAttemptCompletionEventsRequest) GetTaskReportRequest(org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskReportRequest) TaskAttempt(org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt) GetDiagnosticsRequest(org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetDiagnosticsRequest) Job(org.apache.hadoop.mapreduce.v2.app.job.Job) GetCountersRequest(org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetCountersRequest) TaskReport(org.apache.hadoop.mapreduce.v2.api.records.TaskReport) TaskAttemptDiagnosticsUpdateEvent(org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptDiagnosticsUpdateEvent) TaskAttemptStatus(org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptStatusUpdateEvent.TaskAttemptStatus) TaskAttemptEvent(org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent) YarnRPC(org.apache.hadoop.yarn.ipc.YarnRPC) IOException(java.io.IOException) GetJobReportRequest(org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetJobReportRequest) TaskAttemptStatusUpdateEvent(org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptStatusUpdateEvent) TaskAttemptReport(org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptReport) GetTaskReportsRequest(org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskReportsRequest) Test(org.junit.Test)

Aggregations

YarnRPC (org.apache.hadoop.yarn.ipc.YarnRPC)56 Configuration (org.apache.hadoop.conf.Configuration)39 YarnConfiguration (org.apache.hadoop.yarn.conf.YarnConfiguration)38 InetSocketAddress (java.net.InetSocketAddress)34 Test (org.junit.Test)18 UserGroupInformation (org.apache.hadoop.security.UserGroupInformation)15 IOException (java.io.IOException)14 ApplicationAttemptId (org.apache.hadoop.yarn.api.records.ApplicationAttemptId)12 NodeId (org.apache.hadoop.yarn.api.records.NodeId)12 MRClientProtocol (org.apache.hadoop.mapreduce.v2.api.MRClientProtocol)8 ApplicationClientProtocol (org.apache.hadoop.yarn.api.ApplicationClientProtocol)8 ContainerManagementProtocol (org.apache.hadoop.yarn.api.ContainerManagementProtocol)8 ContainerId (org.apache.hadoop.yarn.api.records.ContainerId)8 HashMap (java.util.HashMap)7 Server (org.apache.hadoop.ipc.Server)7 ApplicationMasterProtocol (org.apache.hadoop.yarn.api.ApplicationMasterProtocol)7 RegisterApplicationMasterRequest (org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterRequest)7 YarnException (org.apache.hadoop.yarn.exceptions.YarnException)7 RMApp (org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp)7 RMAppAttempt (org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt)7