use of org.apache.hadoop.mapreduce.protocol.ClientProtocol in project hadoop by apache.
the class Cluster method initialize.
private void initialize(InetSocketAddress jobTrackAddr, Configuration conf) throws IOException {
initProviderList();
final IOException initEx = new IOException("Cannot initialize Cluster. Please check your configuration for " + MRConfig.FRAMEWORK_NAME + " and the correspond server addresses.");
if (jobTrackAddr != null) {
LOG.info("Initializing cluster for Job Tracker=" + jobTrackAddr.toString());
}
for (ClientProtocolProvider provider : providerList) {
LOG.debug("Trying ClientProtocolProvider : " + provider.getClass().getName());
ClientProtocol clientProtocol = null;
try {
if (jobTrackAddr == null) {
clientProtocol = provider.create(conf);
} else {
clientProtocol = provider.create(jobTrackAddr, conf);
}
if (clientProtocol != null) {
clientProtocolProvider = provider;
client = clientProtocol;
LOG.debug("Picked " + provider.getClass().getName() + " as the ClientProtocolProvider");
break;
} else {
LOG.debug("Cannot pick " + provider.getClass().getName() + " as the ClientProtocolProvider - returned null protocol");
}
} catch (Exception e) {
final String errMsg = "Failed to use " + provider.getClass().getName() + " due to error: ";
initEx.addSuppressed(new IOException(errMsg, e));
LOG.info(errMsg, e);
}
}
if (null == clientProtocolProvider || null == client) {
throw initEx;
}
}
use of org.apache.hadoop.mapreduce.protocol.ClientProtocol in project hadoop by apache.
the class TestJobMonitorAndPrint method setUp.
@Before
public void setUp() throws IOException {
conf = new Configuration();
clientProtocol = mock(ClientProtocol.class);
Cluster cluster = mock(Cluster.class);
when(cluster.getConf()).thenReturn(conf);
when(cluster.getClient()).thenReturn(clientProtocol);
JobStatus jobStatus = new JobStatus(new JobID("job_000", 1), 0f, 0f, 0f, 0f, State.RUNNING, JobPriority.HIGH, "tmp-user", "tmp-jobname", "tmp-jobfile", "tmp-url");
job = Job.getInstance(cluster, jobStatus, conf);
job = spy(job);
}
use of org.apache.hadoop.mapreduce.protocol.ClientProtocol in project ignite by apache.
the class HadoopClientProtocolSelfTest method tstUnknownJobCounters.
/**
* Tests job counters retrieval for unknown job id.
*
* @throws Exception If failed.
*/
private void tstUnknownJobCounters() throws Exception {
IgniteHadoopClientProtocolProvider provider = provider();
ClientProtocol proto = provider.create(config(HadoopAbstractSelfTest.REST_PORT));
try {
proto.getJobCounters(new JobID(UUID.randomUUID().toString(), -1));
fail("exception must be thrown");
} catch (Exception e) {
assert e instanceof IOException : "wrong error has been thrown";
}
}
use of org.apache.hadoop.mapreduce.protocol.ClientProtocol in project ignite by apache.
the class HadoopClientProtocolSelfTest method tstNextJobId.
/**
* Test next job ID generation.
*
* @throws Exception If failed.
*/
@SuppressWarnings("ConstantConditions")
private void tstNextJobId() throws Exception {
IgniteHadoopClientProtocolProvider provider = provider();
ClientProtocol proto = provider.create(config(HadoopAbstractSelfTest.REST_PORT));
JobID jobId = proto.getNewJobID();
assert jobId != null;
assert jobId.getJtIdentifier() != null;
JobID nextJobId = proto.getNewJobID();
assert nextJobId != null;
assert nextJobId.getJtIdentifier() != null;
assert !F.eq(jobId, nextJobId);
}
use of org.apache.hadoop.mapreduce.protocol.ClientProtocol in project hadoop by apache.
the class TestJob method testJobToString.
@Test
public void testJobToString() throws IOException, InterruptedException {
Cluster cluster = mock(Cluster.class);
ClientProtocol client = mock(ClientProtocol.class);
when(cluster.getClient()).thenReturn(client);
JobID jobid = new JobID("1014873536921", 6);
JobStatus status = new JobStatus(jobid, 0.0f, 0.0f, 0.0f, 0.0f, State.FAILED, JobPriority.DEFAULT, "root", "TestJobToString", "job file", "tracking url");
when(client.getJobStatus(jobid)).thenReturn(status);
when(client.getTaskReports(jobid, TaskType.MAP)).thenReturn(new TaskReport[0]);
when(client.getTaskReports(jobid, TaskType.REDUCE)).thenReturn(new TaskReport[0]);
when(client.getTaskCompletionEvents(jobid, 0, 10)).thenReturn(new TaskCompletionEvent[0]);
Job job = Job.getInstance(cluster, status, new JobConf());
Assert.assertNotNull(job.toString());
}
Aggregations