use of org.apache.hadoop.mapreduce.TaskAttemptContext in project cdap by caskdata.
the class MultipleOutputsCommitter method recoverTask.
@Override
public void recoverTask(TaskAttemptContext taskContext) throws IOException {
for (Map.Entry<String, OutputCommitter> committer : committers.entrySet()) {
TaskAttemptContext namedTaskContext = MultipleOutputs.getNamedTaskContext(taskContext, committer.getKey());
committer.getValue().recoverTask(namedTaskContext);
}
}
use of org.apache.hadoop.mapreduce.TaskAttemptContext in project cdap by caskdata.
the class MultipleOutputsCommitter method setupTask.
@Override
public void setupTask(TaskAttemptContext taskContext) throws IOException {
for (Map.Entry<String, OutputCommitter> committer : committers.entrySet()) {
TaskAttemptContext namedTaskContext = MultipleOutputs.getNamedTaskContext(taskContext, committer.getKey());
committer.getValue().setupTask(namedTaskContext);
}
}
use of org.apache.hadoop.mapreduce.TaskAttemptContext in project hbase by apache.
the class TableInputFormatBase method createRecordReader.
/**
* Builds a {@link TableRecordReader}. If no {@link TableRecordReader} was provided, uses
* the default.
*
* @param split The split to work with.
* @param context The current context.
* @return The newly created record reader.
* @throws IOException When creating the reader fails.
* @see org.apache.hadoop.mapreduce.InputFormat#createRecordReader(
* org.apache.hadoop.mapreduce.InputSplit,
* org.apache.hadoop.mapreduce.TaskAttemptContext)
*/
@Override
public RecordReader<ImmutableBytesWritable, Result> createRecordReader(InputSplit split, TaskAttemptContext context) throws IOException {
// Just in case a subclass is relying on JobConfigurable magic.
if (table == null) {
initialize(context);
}
// null check in case our child overrides getTable to not throw.
try {
if (getTable() == null) {
// initialize() must not have been implemented in the subclass.
throw new IOException(INITIALIZATION_ERROR);
}
} catch (IllegalStateException exception) {
throw new IOException(INITIALIZATION_ERROR, exception);
}
TableSplit tSplit = (TableSplit) split;
LOG.info("Input split length: " + StringUtils.humanReadableInt(tSplit.getLength()) + " bytes.");
final TableRecordReader trr = this.tableRecordReader != null ? this.tableRecordReader : new TableRecordReader();
Scan sc = new Scan(this.scan);
sc.setStartRow(tSplit.getStartRow());
sc.setStopRow(tSplit.getEndRow());
trr.setScan(sc);
trr.setTable(getTable());
return new RecordReader<ImmutableBytesWritable, Result>() {
@Override
public void close() throws IOException {
trr.close();
closeTable();
}
@Override
public ImmutableBytesWritable getCurrentKey() throws IOException, InterruptedException {
return trr.getCurrentKey();
}
@Override
public Result getCurrentValue() throws IOException, InterruptedException {
return trr.getCurrentValue();
}
@Override
public float getProgress() throws IOException, InterruptedException {
return trr.getProgress();
}
@Override
public void initialize(InputSplit inputsplit, TaskAttemptContext context) throws IOException, InterruptedException {
trr.initialize(inputsplit, context);
}
@Override
public boolean nextKeyValue() throws IOException, InterruptedException {
return trr.nextKeyValue();
}
};
}
use of org.apache.hadoop.mapreduce.TaskAttemptContext in project hbase by apache.
the class TestHFileOutputFormat2 method createTestTaskAttemptContext.
private TaskAttemptContext createTestTaskAttemptContext(final Job job) throws Exception {
HadoopShims hadoop = CompatibilitySingletonFactory.getInstance(HadoopShims.class);
TaskAttemptContext context = hadoop.createTestTaskAttemptContext(job, "attempt_201402131733_0001_m_000000_0");
return context;
}
use of org.apache.hadoop.mapreduce.TaskAttemptContext in project hbase by apache.
the class TestHFileOutputFormat2 method test_TIMERANGE.
/*
* Test that {@link HFileOutputFormat2} creates an HFile with TIMERANGE
* metadata used by time-restricted scans.
*/
@Ignore("Goes zombie too frequently; needs work. See HBASE-14563")
@Test
public void test_TIMERANGE() throws Exception {
Configuration conf = new Configuration(this.util.getConfiguration());
RecordWriter<ImmutableBytesWritable, Cell> writer = null;
TaskAttemptContext context = null;
Path dir = util.getDataTestDir("test_TIMERANGE_present");
LOG.info("Timerange dir writing to dir: " + dir);
try {
// build a record writer using HFileOutputFormat2
Job job = new Job(conf);
FileOutputFormat.setOutputPath(job, dir);
context = createTestTaskAttemptContext(job);
HFileOutputFormat2 hof = new HFileOutputFormat2();
writer = hof.getRecordWriter(context);
// Pass two key values with explicit times stamps
final byte[] b = Bytes.toBytes("b");
// value 1 with timestamp 2000
KeyValue kv = new KeyValue(b, b, b, 2000, b);
KeyValue original = kv.clone();
writer.write(new ImmutableBytesWritable(), kv);
assertEquals(original, kv);
// value 2 with timestamp 1000
kv = new KeyValue(b, b, b, 1000, b);
original = kv.clone();
writer.write(new ImmutableBytesWritable(), kv);
assertEquals(original, kv);
// verify that the file has the proper FileInfo.
writer.close(context);
// the generated file lives 1 directory down from the attempt directory
// and is the only file, e.g.
// _attempt__0000_r_000000_0/b/1979617994050536795
FileSystem fs = FileSystem.get(conf);
Path attemptDirectory = hof.getDefaultWorkFile(context, "").getParent();
FileStatus[] sub1 = fs.listStatus(attemptDirectory);
FileStatus[] file = fs.listStatus(sub1[0].getPath());
// open as HFile Reader and pull out TIMERANGE FileInfo.
HFile.Reader rd = HFile.createReader(fs, file[0].getPath(), new CacheConfig(conf), conf);
Map<byte[], byte[]> finfo = rd.loadFileInfo();
byte[] range = finfo.get("TIMERANGE".getBytes());
assertNotNull(range);
// unmarshall and check values.
TimeRangeTracker timeRangeTracker = new TimeRangeTracker();
Writables.copyWritable(range, timeRangeTracker);
LOG.info(timeRangeTracker.getMin() + "...." + timeRangeTracker.getMax());
assertEquals(1000, timeRangeTracker.getMin());
assertEquals(2000, timeRangeTracker.getMax());
rd.close();
} finally {
if (writer != null && context != null)
writer.close(context);
dir.getFileSystem(conf).delete(dir, true);
}
}
Aggregations