use of org.apache.ignite.IgniteCheckedException in project ignite by apache.
the class HadoopV2Job method initialize.
/**
* {@inheritDoc}
*/
@Override
public void initialize(final boolean external, final UUID locNodeId) throws IgniteCheckedException {
assert locNodeId != null;
this.locNodeId = locNodeId;
ClassLoader oldLdr = HadoopCommonUtils.setContextClassLoader(getClass().getClassLoader());
try {
if (jobInfo.credentials() == null)
rsrcMgr.prepareJobEnvironment(!external, jobLocalDir(igniteWorkDirectory(), locNodeId, jobId));
else {
UserGroupInformation ugi = HadoopUtils.createUGI(jobInfo.user(), jobInfo.credentials());
try {
ugi.doAs(new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
rsrcMgr.prepareJobEnvironment(!external, jobLocalDir(igniteWorkDirectory(), locNodeId, jobId));
return null;
}
});
} catch (IOException | InterruptedException e) {
throw new IgniteCheckedException(e);
}
}
if (HadoopJobProperty.get(jobInfo, JOB_SHARED_CLASSLOADER, true)) {
U.warn(log, JOB_SHARED_CLASSLOADER.propertyName() + " job property is set to true; please disable " + "it if job tasks rely on mutable static state.");
sharedClsLdr = createClassLoader(HadoopClassLoader.nameForJob(jobId));
}
} catch (IOException e) {
throw new IgniteCheckedException(e);
} finally {
HadoopCommonUtils.restoreContextClassLoader(oldLdr);
}
}
use of org.apache.ignite.IgniteCheckedException in project ignite by apache.
the class HadoopV2JobResourceManager method prepareJobEnvironment.
/**
* Prepare job resources. Resolve the classpath list and download it if needed.
*
* @param download {@code true} If need to download resources.
* @param jobLocDir Work directory for the job.
* @throws IgniteCheckedException If failed.
*/
public void prepareJobEnvironment(boolean download, File jobLocDir) throws IgniteCheckedException {
try {
if (jobLocDir.exists())
throw new IgniteCheckedException("Local job directory already exists: " + jobLocDir.getAbsolutePath());
JobConf cfg = ctx.getJobConf();
Collection<URL> clsPathUrls = new ArrayList<>();
String mrDir = cfg.get(MRJobConfig.MAPREDUCE_JOB_DIR);
if (mrDir != null) {
stagingDir = new Path(new URI(mrDir));
if (download) {
FileSystem fs = job.fileSystem(stagingDir.toUri(), cfg);
if (!fs.exists(stagingDir))
throw new IgniteCheckedException("Failed to find map-reduce submission " + "directory (does not exist): " + stagingDir);
if (!FileUtil.copy(fs, stagingDir, jobLocDir, false, cfg))
throw new IgniteCheckedException("Failed to copy job submission directory " + "contents to local file system " + "[path=" + stagingDir + ", locDir=" + jobLocDir.getAbsolutePath() + ", jobId=" + jobId + ']');
}
File jarJobFile = new File(jobLocDir, "job.jar");
clsPathUrls.add(jarJobFile.toURI().toURL());
rsrcSet.add(jarJobFile);
rsrcSet.add(new File(jobLocDir, "job.xml"));
} else if (!jobLocDir.mkdirs())
throw new IgniteCheckedException("Failed to create local job directory: " + jobLocDir.getAbsolutePath());
processFiles(jobLocDir, ctx.getCacheFiles(), download, false, null, MRJobConfig.CACHE_LOCALFILES);
processFiles(jobLocDir, ctx.getCacheArchives(), download, true, null, MRJobConfig.CACHE_LOCALARCHIVES);
processFiles(jobLocDir, ctx.getFileClassPaths(), download, false, clsPathUrls, null);
processFiles(jobLocDir, ctx.getArchiveClassPaths(), download, true, clsPathUrls, null);
if (!clsPathUrls.isEmpty())
clsPath = clsPathUrls.toArray(new URL[clsPathUrls.size()]);
setLocalFSWorkingDirectory(jobLocDir);
} catch (URISyntaxException | IOException e) {
throw new IgniteCheckedException(e);
}
}
use of org.apache.ignite.IgniteCheckedException in project ignite by apache.
the class HadoopV2SetupTask method run0.
/**
* {@inheritDoc}
*/
@SuppressWarnings("ConstantConditions")
@Override
protected void run0(HadoopV2TaskContext taskCtx) throws IgniteCheckedException {
try {
JobContextImpl jobCtx = taskCtx.jobContext();
OutputFormat outputFormat = getOutputFormat(jobCtx);
outputFormat.checkOutputSpecs(jobCtx);
OutputCommitter committer = outputFormat.getOutputCommitter(hadoopContext());
if (committer != null)
committer.setupJob(jobCtx);
} catch (ClassNotFoundException | IOException e) {
throw new IgniteCheckedException(e);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new IgniteInterruptedCheckedException(e);
}
}
use of org.apache.ignite.IgniteCheckedException in project ignite by apache.
the class HadoopV2Splitter method splitJob.
/**
* @param ctx Job context.
* @return Collection of mapped splits.
* @throws IgniteCheckedException If mapping failed.
*/
public static Collection<HadoopInputSplit> splitJob(JobContext ctx) throws IgniteCheckedException {
try {
InputFormat<?, ?> format = ReflectionUtils.newInstance(ctx.getInputFormatClass(), ctx.getConfiguration());
assert format != null;
List<InputSplit> splits = format.getSplits(ctx);
Collection<HadoopInputSplit> res = new ArrayList<>(splits.size());
int id = 0;
for (InputSplit nativeSplit : splits) {
if (nativeSplit instanceof FileSplit) {
FileSplit s = (FileSplit) nativeSplit;
res.add(new HadoopFileBlock(s.getLocations(), s.getPath().toUri(), s.getStart(), s.getLength()));
} else
res.add(HadoopUtils.wrapSplit(id, nativeSplit, nativeSplit.getLocations()));
id++;
}
return res;
} catch (IOException | ClassNotFoundException e) {
throw new IgniteCheckedException(e);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new IgniteInterruptedCheckedException(e);
}
}
use of org.apache.ignite.IgniteCheckedException in project ignite by apache.
the class HadoopJobTracker method submit.
/**
* Submits execution of Hadoop job to grid.
*
* @param jobId Job ID.
* @param info Job info.
* @return Job completion future.
*/
@SuppressWarnings("unchecked")
public IgniteInternalFuture<HadoopJobId> submit(HadoopJobId jobId, HadoopJobInfo info) {
if (!busyLock.tryReadLock()) {
return new GridFinishedFuture<>(new IgniteCheckedException("Failed to execute map-reduce job " + "(grid is stopping): " + info));
}
try {
long jobPrepare = U.currentTimeMillis();
if (jobs.containsKey(jobId) || jobMetaCache().containsKey(jobId))
throw new IgniteCheckedException("Failed to submit job. Job with the same ID already exists: " + jobId);
HadoopJobEx job = job(jobId, info);
HadoopMapReducePlan mrPlan = mrPlanner.preparePlan(job, ctx.nodes(), null);
logPlan(info, mrPlan);
HadoopJobMetadata meta = new HadoopJobMetadata(ctx.localNodeId(), jobId, info);
meta.mapReducePlan(mrPlan);
meta.pendingSplits(allSplits(mrPlan));
meta.pendingReducers(allReducers(mrPlan));
GridFutureAdapter<HadoopJobId> completeFut = new GridFutureAdapter<>();
GridFutureAdapter<HadoopJobId> old = activeFinishFuts.put(jobId, completeFut);
assert old == null : "Duplicate completion future [jobId=" + jobId + ", old=" + old + ']';
if (log.isDebugEnabled())
log.debug("Submitting job metadata [jobId=" + jobId + ", meta=" + meta + ']');
long jobStart = U.currentTimeMillis();
HadoopPerformanceCounter perfCntr = HadoopPerformanceCounter.getCounter(meta.counters(), ctx.localNodeId());
perfCntr.clientSubmissionEvents(info);
perfCntr.onJobPrepare(jobPrepare);
perfCntr.onJobStart(jobStart);
if (jobMetaCache().getAndPutIfAbsent(jobId, meta) != null)
throw new IgniteCheckedException("Failed to submit job. Job with the same ID already exists: " + jobId);
return completeFut;
} catch (IgniteCheckedException e) {
U.error(log, "Failed to submit job: " + jobId, e);
return new GridFinishedFuture<>(e);
} finally {
busyLock.readUnlock();
}
}
Aggregations