use of org.apache.hadoop.mapreduce.OutputCommitter in project hadoop by apache.
the class TestGridMixClasses method testLoadMapper.
/*
* test LoadMapper loadMapper should write to writer record for each reduce
*/
@SuppressWarnings({ "rawtypes", "unchecked" })
@Test(timeout = 10000)
public void testLoadMapper() throws Exception {
Configuration conf = new Configuration();
conf.setInt(JobContext.NUM_REDUCES, 2);
CompressionEmulationUtil.setCompressionEmulationEnabled(conf, true);
conf.setBoolean(MRJobConfig.MAP_OUTPUT_COMPRESS, true);
TaskAttemptID taskId = new TaskAttemptID();
RecordReader<NullWritable, GridmixRecord> reader = new FakeRecordReader();
LoadRecordGkGrWriter writer = new LoadRecordGkGrWriter();
OutputCommitter committer = new CustomOutputCommitter();
StatusReporter reporter = new TaskAttemptContextImpl.DummyReporter();
LoadSplit split = getLoadSplit();
MapContext<NullWritable, GridmixRecord, GridmixKey, GridmixRecord> mapContext = new MapContextImpl<NullWritable, GridmixRecord, GridmixKey, GridmixRecord>(conf, taskId, reader, writer, committer, reporter, split);
// context
Context ctx = new WrappedMapper<NullWritable, GridmixRecord, GridmixKey, GridmixRecord>().getMapContext(mapContext);
reader.initialize(split, ctx);
ctx.getConfiguration().setBoolean(MRJobConfig.MAP_OUTPUT_COMPRESS, true);
CompressionEmulationUtil.setCompressionEmulationEnabled(ctx.getConfiguration(), true);
LoadJob.LoadMapper mapper = new LoadJob.LoadMapper();
// setup, map, clean
mapper.run(ctx);
Map<GridmixKey, GridmixRecord> data = writer.getData();
// check result
assertEquals(2, data.size());
}
use of org.apache.hadoop.mapreduce.OutputCommitter in project hadoop by apache.
the class TestGridMixClasses method testLoadJobLoadReducer.
/*
* test LoadReducer
*/
@Test(timeout = 3000)
public void testLoadJobLoadReducer() throws Exception {
LoadJob.LoadReducer test = new LoadJob.LoadReducer();
Configuration conf = new Configuration();
conf.setInt(JobContext.NUM_REDUCES, 2);
CompressionEmulationUtil.setCompressionEmulationEnabled(conf, true);
conf.setBoolean(FileOutputFormat.COMPRESS, true);
CompressionEmulationUtil.setCompressionEmulationEnabled(conf, true);
conf.setBoolean(MRJobConfig.MAP_OUTPUT_COMPRESS, true);
TaskAttemptID taskid = new TaskAttemptID();
RawKeyValueIterator input = new FakeRawKeyValueIterator();
Counter counter = new GenericCounter();
Counter inputValueCounter = new GenericCounter();
LoadRecordWriter output = new LoadRecordWriter();
OutputCommitter committer = new CustomOutputCommitter();
StatusReporter reporter = new DummyReporter();
RawComparator<GridmixKey> comparator = new FakeRawComparator();
ReduceContext<GridmixKey, GridmixRecord, NullWritable, GridmixRecord> reduceContext = new ReduceContextImpl<GridmixKey, GridmixRecord, NullWritable, GridmixRecord>(conf, taskid, input, counter, inputValueCounter, output, committer, reporter, comparator, GridmixKey.class, GridmixRecord.class);
// read for previous data
reduceContext.nextKeyValue();
org.apache.hadoop.mapreduce.Reducer<GridmixKey, GridmixRecord, NullWritable, GridmixRecord>.Context<GridmixKey, GridmixRecord, NullWritable, GridmixRecord> context = new WrappedReducer<GridmixKey, GridmixRecord, NullWritable, GridmixRecord>().getReducerContext(reduceContext);
// test.setup(context);
test.run(context);
// have been readed 9 records (-1 for previous)
assertEquals(9, counter.getValue());
assertEquals(10, inputValueCounter.getValue());
assertEquals(1, output.getData().size());
GridmixRecord record = output.getData().values().iterator().next();
assertEquals(1593, record.getSize());
}
use of org.apache.hadoop.mapreduce.OutputCommitter in project hive by apache.
the class TestHCatOutputFormat method publishTest.
public void publishTest(Job job) throws Exception {
HCatOutputFormat hcof = new HCatOutputFormat();
TaskAttemptContext tac = ShimLoader.getHadoopShims().getHCatShim().createTaskAttemptContext(job.getConfiguration(), ShimLoader.getHadoopShims().getHCatShim().createTaskAttemptID());
OutputCommitter committer = hcof.getOutputCommitter(tac);
committer.setupJob(job);
committer.setupTask(tac);
committer.commitTask(tac);
committer.commitJob(job);
Partition part = client.getPartition(dbName, tblName, Arrays.asList("p1"));
assertNotNull(part);
StorerInfo storer = InternalUtil.extractStorerInfo(part.getSd(), part.getParameters());
assertEquals(storer.getProperties().get("hcat.testarg"), "testArgValue");
assertTrue(part.getSd().getLocation().indexOf("p1") != -1);
}
use of org.apache.hadoop.mapreduce.OutputCommitter in project ignite by apache.
the class HadoopV2SetupTask method run0.
/** {@inheritDoc} */
@SuppressWarnings("ConstantConditions")
@Override
protected void run0(HadoopV2TaskContext taskCtx) throws IgniteCheckedException {
try {
JobContextImpl jobCtx = taskCtx.jobContext();
OutputFormat outputFormat = getOutputFormat(jobCtx);
outputFormat.checkOutputSpecs(jobCtx);
OutputCommitter committer = outputFormat.getOutputCommitter(hadoopContext());
if (committer != null)
committer.setupJob(jobCtx);
} catch (ClassNotFoundException | IOException e) {
throw new IgniteCheckedException(e);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new IgniteInterruptedCheckedException(e);
}
}
use of org.apache.hadoop.mapreduce.OutputCommitter in project ignite by apache.
the class HadoopV2Task method prepareWriter.
/**
* Put write into Hadoop context and return associated output format instance.
*
* @param jobCtx Job context.
* @return Output format.
* @throws IgniteCheckedException In case of Grid exception.
* @throws InterruptedException In case of interrupt.
*/
protected OutputFormat prepareWriter(JobContext jobCtx) throws IgniteCheckedException, InterruptedException {
try {
OutputFormat outputFormat = getOutputFormat(jobCtx);
assert outputFormat != null;
OutputCommitter outCommitter = outputFormat.getOutputCommitter(hadoopCtx);
if (outCommitter != null)
outCommitter.setupTask(hadoopCtx);
RecordWriter writer = outputFormat.getRecordWriter(hadoopCtx);
hadoopCtx.writer(writer);
return outputFormat;
} catch (IOException | ClassNotFoundException e) {
throw new IgniteCheckedException(e);
}
}
Aggregations