use of org.apache.hadoop.mapreduce.Cluster in project hadoop by apache.
the class TestCLI method testGetJobWithRetry.
@Test
public void testGetJobWithRetry() throws Exception {
Configuration conf = new Configuration();
conf.setInt(MRJobConfig.MR_CLIENT_JOB_MAX_RETRIES, 1);
final Cluster mockCluster = mock(Cluster.class);
final Job mockJob = Job.getInstance(conf);
when(mockCluster.getJob(any(JobID.class))).thenReturn(null).thenReturn(mockJob);
CLI cli = new CLI(conf);
cli.cluster = mockCluster;
Job job = cli.getJob(JobID.forName("job_1234654654_001"));
Assert.assertTrue("job is null", job != null);
}
use of org.apache.hadoop.mapreduce.Cluster in project hadoop by apache.
the class JobClient method submitJobInternal.
@InterfaceAudience.Private
public RunningJob submitJobInternal(final JobConf conf) throws FileNotFoundException, IOException {
try {
conf.setBooleanIfUnset("mapred.mapper.new-api", false);
conf.setBooleanIfUnset("mapred.reducer.new-api", false);
Job job = clientUgi.doAs(new PrivilegedExceptionAction<Job>() {
@Override
public Job run() throws IOException, ClassNotFoundException, InterruptedException {
Job job = Job.getInstance(conf);
job.submit();
return job;
}
});
Cluster prev = cluster;
// update our Cluster instance with the one created by Job for submission
// (we can't pass our Cluster instance to Job, since Job wraps the config
// instance, and the two configs would then diverge)
cluster = job.getCluster();
// to cleanup resources.
if (prev != null) {
prev.close();
}
return new NetworkedJob(job);
} catch (InterruptedException ie) {
throw new IOException("interrupted", ie);
}
}
use of org.apache.hadoop.mapreduce.Cluster in project hadoop by apache.
the class TestExternalCall method testCleanup.
/**
* test methods run end execute of DistCp class. silple copy file
* @throws Exception
*/
@Test
public void testCleanup() throws Exception {
Configuration conf = getConf();
Path stagingDir = JobSubmissionFiles.getStagingDir(new Cluster(conf), conf);
stagingDir.getFileSystem(conf).mkdirs(stagingDir);
Path soure = createFile("tmp.txt");
Path target = createFile("target.txt");
DistCp distcp = new DistCp(conf, null);
String[] arg = { soure.toString(), target.toString() };
distcp.run(arg);
Assert.assertTrue(fs.exists(target));
}
use of org.apache.hadoop.mapreduce.Cluster in project hadoop by apache.
the class TestIntegration method testCleanup.
@Test(timeout = 100000)
public void testCleanup() {
try {
Path sourcePath = new Path("noscheme:///file");
List<Path> sources = new ArrayList<Path>();
sources.add(sourcePath);
DistCpOptions options = new DistCpOptions(sources, target);
Configuration conf = getConf();
Path stagingDir = JobSubmissionFiles.getStagingDir(new Cluster(conf), conf);
stagingDir.getFileSystem(conf).mkdirs(stagingDir);
try {
new DistCp(conf, options).execute();
} catch (Throwable t) {
Assert.assertEquals(stagingDir.getFileSystem(conf).listStatus(stagingDir).length, 0);
}
} catch (Exception e) {
LOG.error("Exception encountered ", e);
Assert.fail("testCleanup failed " + e.getMessage());
}
}
use of org.apache.hadoop.mapreduce.Cluster in project hbase by apache.
the class MapReduceBackupCopyJob method cancel.
@Override
public void cancel(String jobId) throws IOException {
JobID id = JobID.forName(jobId);
Cluster cluster = new Cluster(this.getConf());
try {
Job job = cluster.getJob(id);
if (job == null) {
LOG.error("No job found for " + id);
// should we throw exception
return;
}
if (job.isComplete() || job.isRetired()) {
return;
}
job.killJob();
LOG.debug("Killed copy job " + id);
} catch (InterruptedException e) {
throw new IOException(e);
}
}
Aggregations