use of org.apache.hadoop.yarn.ipc.YarnRPC in project hadoop by apache.
the class SharedCacheUploadService method createSCMClient.
private SCMUploaderProtocol createSCMClient(Configuration conf) {
YarnRPC rpc = YarnRPC.create(conf);
InetSocketAddress scmAddress = conf.getSocketAddr(YarnConfiguration.SCM_UPLOADER_SERVER_ADDRESS, YarnConfiguration.DEFAULT_SCM_UPLOADER_SERVER_ADDRESS, YarnConfiguration.DEFAULT_SCM_UPLOADER_SERVER_PORT);
return (SCMUploaderProtocol) rpc.getProxy(SCMUploaderProtocol.class, scmAddress, conf);
}
use of org.apache.hadoop.yarn.ipc.YarnRPC in project hadoop by apache.
the class TestPBLocalizerRPC method testLocalizerRPC.
@Test
public void testLocalizerRPC() throws Exception {
InetSocketAddress locAddr = new InetSocketAddress("0.0.0.0", 8040);
LocalizerService server = new LocalizerService(locAddr);
try {
server.start();
Configuration conf = new Configuration();
YarnRPC rpc = YarnRPC.create(conf);
LocalizationProtocol client = (LocalizationProtocol) rpc.getProxy(LocalizationProtocol.class, locAddr, conf);
LocalizerStatus status = recordFactory.newRecordInstance(LocalizerStatus.class);
status.setLocalizerId("localizer0");
LocalizerHeartbeatResponse response = client.heartbeat(status);
assertEquals(dieHBResponse(), response);
} finally {
server.stop();
}
assertTrue(true);
}
use of org.apache.hadoop.yarn.ipc.YarnRPC in project hadoop by apache.
the class MRClientService method serviceStart.
protected void serviceStart() throws Exception {
Configuration conf = getConfig();
YarnRPC rpc = YarnRPC.create(conf);
InetSocketAddress address = new InetSocketAddress(0);
server = rpc.getServer(MRClientProtocol.class, protocolHandler, address, conf, appContext.getClientToAMTokenSecretManager(), conf.getInt(MRJobConfig.MR_AM_JOB_CLIENT_THREAD_COUNT, MRJobConfig.DEFAULT_MR_AM_JOB_CLIENT_THREAD_COUNT), MRJobConfig.MR_AM_JOB_CLIENT_PORT_RANGE);
// Enable service authorization?
if (conf.getBoolean(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHORIZATION, false)) {
refreshServiceAcls(conf, new MRAMPolicyProvider());
}
server.start();
this.bindAddress = NetUtils.createSocketAddrForHost(appContext.getNMHostname(), server.getListenerAddress().getPort());
LOG.info("Instantiated MRClientService at " + this.bindAddress);
try {
// Explicitly disabling SSL for map reduce task as we can't allow MR users
// to gain access to keystore file for opening SSL listener. We can trust
// RM/NM to issue SSL certificates but definitely not MR-AM as it is
// running in user-land.
webApp = WebApps.$for("mapreduce", AppContext.class, appContext, "ws").withHttpPolicy(conf, Policy.HTTP_ONLY).withPortRange(conf, MRJobConfig.MR_AM_WEBAPP_PORT_RANGE).start(new AMWebApp());
} catch (Exception e) {
LOG.error("Webapps failed to start. Ignoring for now:", e);
}
super.serviceStart();
}
use of org.apache.hadoop.yarn.ipc.YarnRPC in project hadoop by apache.
the class TestMRClientService method testViewAclOnlyCannotModify.
@Test
public void testViewAclOnlyCannotModify() throws Exception {
final MRAppWithClientService app = new MRAppWithClientService(1, 0, false);
final Configuration conf = new Configuration();
conf.setBoolean(MRConfig.MR_ACLS_ENABLED, true);
conf.set(MRJobConfig.JOB_ACL_VIEW_JOB, "viewonlyuser");
Job job = app.submit(conf);
app.waitForState(job, JobState.RUNNING);
Assert.assertEquals("Num tasks not correct", 1, job.getTasks().size());
Iterator<Task> it = job.getTasks().values().iterator();
Task task = it.next();
app.waitForState(task, TaskState.RUNNING);
TaskAttempt attempt = task.getAttempts().values().iterator().next();
app.waitForState(attempt, TaskAttemptState.RUNNING);
UserGroupInformation viewOnlyUser = UserGroupInformation.createUserForTesting("viewonlyuser", new String[] {});
Assert.assertTrue("viewonlyuser cannot view job", job.checkAccess(viewOnlyUser, JobACL.VIEW_JOB));
Assert.assertFalse("viewonlyuser can modify job", job.checkAccess(viewOnlyUser, JobACL.MODIFY_JOB));
MRClientProtocol client = viewOnlyUser.doAs(new PrivilegedExceptionAction<MRClientProtocol>() {
@Override
public MRClientProtocol run() throws Exception {
YarnRPC rpc = YarnRPC.create(conf);
return (MRClientProtocol) rpc.getProxy(MRClientProtocol.class, app.clientService.getBindAddress(), conf);
}
});
KillJobRequest killJobRequest = recordFactory.newRecordInstance(KillJobRequest.class);
killJobRequest.setJobId(app.getJobId());
try {
client.killJob(killJobRequest);
fail("viewonlyuser killed job");
} catch (AccessControlException e) {
// pass
}
KillTaskRequest killTaskRequest = recordFactory.newRecordInstance(KillTaskRequest.class);
killTaskRequest.setTaskId(task.getID());
try {
client.killTask(killTaskRequest);
fail("viewonlyuser killed task");
} catch (AccessControlException e) {
// pass
}
KillTaskAttemptRequest killTaskAttemptRequest = recordFactory.newRecordInstance(KillTaskAttemptRequest.class);
killTaskAttemptRequest.setTaskAttemptId(attempt.getID());
try {
client.killTaskAttempt(killTaskAttemptRequest);
fail("viewonlyuser killed task attempt");
} catch (AccessControlException e) {
// pass
}
FailTaskAttemptRequest failTaskAttemptRequest = recordFactory.newRecordInstance(FailTaskAttemptRequest.class);
failTaskAttemptRequest.setTaskAttemptId(attempt.getID());
try {
client.failTaskAttempt(failTaskAttemptRequest);
fail("viewonlyuser killed task attempt");
} catch (AccessControlException e) {
// pass
}
}
use of org.apache.hadoop.yarn.ipc.YarnRPC in project hadoop by apache.
the class TestMRClientService method test.
@Test
public void test() throws Exception {
MRAppWithClientService app = new MRAppWithClientService(1, 0, false);
Configuration conf = new Configuration();
Job job = app.submit(conf);
app.waitForState(job, JobState.RUNNING);
Assert.assertEquals("Num tasks not correct", 1, job.getTasks().size());
Iterator<Task> it = job.getTasks().values().iterator();
Task task = it.next();
app.waitForState(task, TaskState.RUNNING);
TaskAttempt attempt = task.getAttempts().values().iterator().next();
app.waitForState(attempt, TaskAttemptState.RUNNING);
// send the diagnostic
String diagnostic1 = "Diagnostic1";
String diagnostic2 = "Diagnostic2";
app.getContext().getEventHandler().handle(new TaskAttemptDiagnosticsUpdateEvent(attempt.getID(), diagnostic1));
// send the status update
TaskAttemptStatus taskAttemptStatus = new TaskAttemptStatus();
taskAttemptStatus.id = attempt.getID();
taskAttemptStatus.progress = 0.5f;
taskAttemptStatus.stateString = "RUNNING";
taskAttemptStatus.taskState = TaskAttemptState.RUNNING;
taskAttemptStatus.phase = Phase.MAP;
// send the status update
app.getContext().getEventHandler().handle(new TaskAttemptStatusUpdateEvent(attempt.getID(), taskAttemptStatus));
//verify that all object are fully populated by invoking RPCs.
YarnRPC rpc = YarnRPC.create(conf);
MRClientProtocol proxy = (MRClientProtocol) rpc.getProxy(MRClientProtocol.class, app.clientService.getBindAddress(), conf);
GetCountersRequest gcRequest = recordFactory.newRecordInstance(GetCountersRequest.class);
gcRequest.setJobId(job.getID());
Assert.assertNotNull("Counters is null", proxy.getCounters(gcRequest).getCounters());
GetJobReportRequest gjrRequest = recordFactory.newRecordInstance(GetJobReportRequest.class);
gjrRequest.setJobId(job.getID());
JobReport jr = proxy.getJobReport(gjrRequest).getJobReport();
verifyJobReport(jr);
GetTaskAttemptCompletionEventsRequest gtaceRequest = recordFactory.newRecordInstance(GetTaskAttemptCompletionEventsRequest.class);
gtaceRequest.setJobId(job.getID());
gtaceRequest.setFromEventId(0);
gtaceRequest.setMaxEvents(10);
Assert.assertNotNull("TaskCompletionEvents is null", proxy.getTaskAttemptCompletionEvents(gtaceRequest).getCompletionEventList());
GetDiagnosticsRequest gdRequest = recordFactory.newRecordInstance(GetDiagnosticsRequest.class);
gdRequest.setTaskAttemptId(attempt.getID());
Assert.assertNotNull("Diagnostics is null", proxy.getDiagnostics(gdRequest).getDiagnosticsList());
GetTaskAttemptReportRequest gtarRequest = recordFactory.newRecordInstance(GetTaskAttemptReportRequest.class);
gtarRequest.setTaskAttemptId(attempt.getID());
TaskAttemptReport tar = proxy.getTaskAttemptReport(gtarRequest).getTaskAttemptReport();
verifyTaskAttemptReport(tar);
GetTaskReportRequest gtrRequest = recordFactory.newRecordInstance(GetTaskReportRequest.class);
gtrRequest.setTaskId(task.getID());
Assert.assertNotNull("TaskReport is null", proxy.getTaskReport(gtrRequest).getTaskReport());
GetTaskReportsRequest gtreportsRequest = recordFactory.newRecordInstance(GetTaskReportsRequest.class);
gtreportsRequest.setJobId(job.getID());
gtreportsRequest.setTaskType(TaskType.MAP);
Assert.assertNotNull("TaskReports for map is null", proxy.getTaskReports(gtreportsRequest).getTaskReportList());
gtreportsRequest = recordFactory.newRecordInstance(GetTaskReportsRequest.class);
gtreportsRequest.setJobId(job.getID());
gtreportsRequest.setTaskType(TaskType.REDUCE);
Assert.assertNotNull("TaskReports for reduce is null", proxy.getTaskReports(gtreportsRequest).getTaskReportList());
List<String> diag = proxy.getDiagnostics(gdRequest).getDiagnosticsList();
Assert.assertEquals("Num diagnostics not correct", 1, diag.size());
Assert.assertEquals("Diag 1 not correct", diagnostic1, diag.get(0).toString());
TaskReport taskReport = proxy.getTaskReport(gtrRequest).getTaskReport();
Assert.assertEquals("Num diagnostics not correct", 1, taskReport.getDiagnosticsCount());
//send the done signal to the task
app.getContext().getEventHandler().handle(new TaskAttemptEvent(task.getAttempts().values().iterator().next().getID(), TaskAttemptEventType.TA_DONE));
app.waitForState(job, JobState.SUCCEEDED);
// For invalid jobid, throw IOException
gtreportsRequest = recordFactory.newRecordInstance(GetTaskReportsRequest.class);
gtreportsRequest.setJobId(TypeConverter.toYarn(JobID.forName("job_1415730144495_0001")));
gtreportsRequest.setTaskType(TaskType.REDUCE);
try {
proxy.getTaskReports(gtreportsRequest);
fail("IOException not thrown for invalid job id");
} catch (IOException e) {
// Expected
}
}
Aggregations