use of org.apache.ignite.internal.processors.hadoop.HadoopHelperImpl in project ignite by apache.
the class HadoopChildProcessRunner method prepareProcess.
/**
* Initializes process for task execution.
*
* @param req Initialization request.
*/
@SuppressWarnings("unchecked")
private void prepareProcess(HadoopPrepareForJobRequest req) {
if (initGuard.compareAndSet(false, true)) {
try {
if (log.isDebugEnabled())
log.debug("Initializing external hadoop task: " + req);
assert job == null;
Class jobCls;
try {
jobCls = Class.forName(HadoopCommonUtils.JOB_CLS_NAME);
} catch (ClassNotFoundException e) {
throw new IgniteException("Failed to load job class: " + HadoopCommonUtils.JOB_CLS_NAME, e);
}
job = req.jobInfo().createJob(jobCls, req.jobId(), log, null, new HadoopHelperImpl());
job.initialize(true, nodeDesc.processId());
shuffleJob = new HadoopShuffleJob<>(comm.localProcessDescriptor(), log, job, mem, req.totalReducerCount(), req.localReducers(), 0, false);
initializeExecutors();
if (log.isDebugEnabled())
log.debug("External process initialized [initWaitTime=" + (U.currentTimeMillis() - startTime) + ']');
initFut.onDone();
} catch (IgniteCheckedException e) {
U.error(log, "Failed to initialize process: " + req, e);
initFut.onDone(e);
}
} else
log.warning("Duplicate initialize process request received (will ignore): " + req);
}
use of org.apache.ignite.internal.processors.hadoop.HadoopHelperImpl in project ignite by apache.
the class HadoopV2JobSelfTest method testCustomSerializationApplying.
/**
* Tests that {@link HadoopJobEx} provides wrapped serializer if it's set in configuration.
*
* @throws IgniteCheckedException If fails.
*/
public void testCustomSerializationApplying() throws IgniteCheckedException {
JobConf cfg = new JobConf();
cfg.setMapOutputKeyClass(IntWritable.class);
cfg.setMapOutputValueClass(Text.class);
cfg.set(CommonConfigurationKeys.IO_SERIALIZATIONS_KEY, CustomSerialization.class.getName());
HadoopDefaultJobInfo info = createJobInfo(cfg, null);
final UUID uuid = UUID.randomUUID();
HadoopJobId id = new HadoopJobId(uuid, 1);
HadoopJobEx job = info.createJob(HadoopV2Job.class, id, log, null, new HadoopHelperImpl());
HadoopTaskContext taskCtx = job.getTaskContext(new HadoopTaskInfo(HadoopTaskType.MAP, null, 0, 0, null));
HadoopSerialization ser = taskCtx.keySerialization();
assertEquals(HadoopSerializationWrapper.class.getName(), ser.getClass().getName());
DataInput in = new DataInputStream(new ByteArrayInputStream(new byte[0]));
assertEquals(TEST_SERIALIZED_VALUE, ser.read(in, null).toString());
ser = taskCtx.valueSerialization();
assertEquals(HadoopSerializationWrapper.class.getName(), ser.getClass().getName());
assertEquals(TEST_SERIALIZED_VALUE, ser.read(in, null).toString());
}
use of org.apache.ignite.internal.processors.hadoop.HadoopHelperImpl in project ignite by apache.
the class HadoopSnappyTest method testSnappy.
/**
* Checks Snappy codec usage.
*
* @throws Exception On error.
*/
public void testSnappy() throws Throwable {
// Run Snappy test in default class loader:
checkSnappy();
// Run the same in several more class loaders simulating jobs and tasks:
for (int i = 0; i < 2; i++) {
ClassLoader hadoopClsLdr = new HadoopClassLoader(null, "cl-" + i, null, new HadoopHelperImpl());
Class<?> cls = (Class) Class.forName(HadoopSnappyTest.class.getName(), true, hadoopClsLdr);
assertEquals(hadoopClsLdr, cls.getClassLoader());
U.invoke(cls, null, "checkSnappy");
}
}
use of org.apache.ignite.internal.processors.hadoop.HadoopHelperImpl in project ignite by apache.
the class HadoopTasksV1Test method getHadoopJob.
/**
* Creates WordCount hadoop job for API v1.
*
* @param inFile Input file name for the job.
* @param outFile Output file name for the job.
* @return Hadoop job.
* @throws IOException If fails.
*/
@Override
public HadoopJobEx getHadoopJob(String inFile, String outFile) throws Exception {
JobConf jobConf = HadoopWordCount1.getJob(inFile, outFile);
setupFileSystems(jobConf);
HadoopDefaultJobInfo jobInfo = createJobInfo(jobConf, null);
UUID uuid = new UUID(0, 0);
HadoopJobId jobId = new HadoopJobId(uuid, 0);
return jobInfo.createJob(HadoopV2Job.class, jobId, log, null, new HadoopHelperImpl());
}
use of org.apache.ignite.internal.processors.hadoop.HadoopHelperImpl in project ignite by apache.
the class HadoopTasksV2Test method getHadoopJob.
/**
* Creates WordCount hadoop job for API v2.
*
* @param inFile Input file name for the job.
* @param outFile Output file name for the job.
* @return Hadoop job.
* @throws Exception if fails.
*/
@Override
public HadoopJobEx getHadoopJob(String inFile, String outFile) throws Exception {
Job job = Job.getInstance();
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(IntWritable.class);
HadoopWordCount2.setTasksClasses(job, true, true, true, false);
Configuration conf = job.getConfiguration();
setupFileSystems(conf);
FileInputFormat.setInputPaths(job, new Path(inFile));
FileOutputFormat.setOutputPath(job, new Path(outFile));
job.setJarByClass(HadoopWordCount2.class);
Job hadoopJob = HadoopWordCount2.getJob(inFile, outFile);
HadoopDefaultJobInfo jobInfo = createJobInfo(hadoopJob.getConfiguration(), null);
UUID uuid = new UUID(0, 0);
HadoopJobId jobId = new HadoopJobId(uuid, 0);
return jobInfo.createJob(HadoopV2Job.class, jobId, log, null, new HadoopHelperImpl());
}
Aggregations