use of org.apache.tez.mapreduce.lib.MRReaderMapReduce in project tez by apache.
the class MRInput method initializeInternal.
@Private
void initializeInternal() throws IOException {
// Primarily for visibility
rrLock.lock();
try {
if (splitInfoViaEvents) {
if (useNewApi) {
mrReader = new MRReaderMapReduce(jobConf, getContext().getCounters(), inputRecordCounter, getContext().getApplicationId().getClusterTimestamp(), getContext().getTaskVertexIndex(), getContext().getApplicationId().getId(), getContext().getTaskIndex(), getContext().getTaskAttemptNumber(), getContext());
} else {
mrReader = new MRReaderMapred(jobConf, getContext().getCounters(), inputRecordCounter, getContext());
}
} else {
TaskSplitMetaInfo[] allMetaInfo = MRInputUtils.readSplits(jobConf);
TaskSplitMetaInfo thisTaskMetaInfo = allMetaInfo[getContext().getTaskIndex()];
TaskSplitIndex splitMetaInfo = new TaskSplitIndex(thisTaskMetaInfo.getSplitLocation(), thisTaskMetaInfo.getStartOffset());
long splitLength = -1;
if (useNewApi) {
org.apache.hadoop.mapreduce.InputSplit newInputSplit = MRInputUtils.getNewSplitDetailsFromDisk(splitMetaInfo, jobConf, getContext().getCounters().findCounter(TaskCounter.SPLIT_RAW_BYTES));
try {
splitLength = newInputSplit.getLength();
} catch (InterruptedException e) {
LOG.warn("Got interrupted while reading split length: ", e);
}
mrReader = new MRReaderMapReduce(jobConf, newInputSplit, getContext().getCounters(), inputRecordCounter, getContext().getApplicationId().getClusterTimestamp(), getContext().getTaskVertexIndex(), getContext().getApplicationId().getId(), getContext().getTaskIndex(), getContext().getTaskAttemptNumber(), getContext());
} else {
org.apache.hadoop.mapred.InputSplit oldInputSplit = MRInputUtils.getOldSplitDetailsFromDisk(splitMetaInfo, jobConf, getContext().getCounters().findCounter(TaskCounter.SPLIT_RAW_BYTES));
splitLength = oldInputSplit.getLength();
mrReader = new MRReaderMapred(jobConf, oldInputSplit, getContext().getCounters(), inputRecordCounter, getContext());
}
if (splitLength != -1) {
getContext().getCounters().findCounter(TaskCounter.INPUT_SPLIT_LENGTH_BYTES).increment(splitLength);
}
}
} finally {
rrLock.unlock();
}
LOG.info("Initialized MRInput: " + getContext().getSourceVertexName());
}
use of org.apache.tez.mapreduce.lib.MRReaderMapReduce in project tez by apache.
the class MultiMRInput method initFromEvent.
private MRReader initFromEvent(InputDataInformationEvent event) throws IOException {
Preconditions.checkState(event != null, "Event must be specified");
if (LOG.isDebugEnabled()) {
LOG.debug(getContext().getSourceVertexName() + " initializing Reader: " + eventCount.get());
}
MRSplitProto splitProto = MRSplitProto.parseFrom(ByteString.copyFrom(event.getUserPayload()));
MRReader reader = null;
JobConf localJobConf = new JobConf(jobConf);
long splitLength = -1;
if (useNewApi) {
InputSplit split = MRInputUtils.getNewSplitDetailsFromEvent(splitProto, localJobConf);
try {
splitLength = split.getLength();
} catch (InterruptedException e) {
LOG.warn("Got interrupted while reading split length: ", e);
}
reader = new MRReaderMapReduce(localJobConf, split, getContext().getCounters(), inputRecordCounter, getContext().getApplicationId().getClusterTimestamp(), getContext().getTaskVertexIndex(), getContext().getApplicationId().getId(), getContext().getTaskIndex(), getContext().getTaskAttemptNumber(), getContext());
if (LOG.isDebugEnabled()) {
LOG.debug(getContext().getSourceVertexName() + " split Details -> SplitClass: " + split.getClass().getName() + ", NewSplit: " + split + ", length: " + splitLength);
}
} else {
org.apache.hadoop.mapred.InputSplit split = MRInputUtils.getOldSplitDetailsFromEvent(splitProto, localJobConf);
splitLength = split.getLength();
reader = new MRReaderMapred(localJobConf, split, getContext().getCounters(), inputRecordCounter, getContext());
if (LOG.isDebugEnabled()) {
LOG.debug(getContext().getSourceVertexName() + " split Details -> SplitClass: " + split.getClass().getName() + ", OldSplit: " + split + ", length: " + splitLength);
}
}
if (splitLength != -1) {
getContext().getCounters().findCounter(TaskCounter.INPUT_SPLIT_LENGTH_BYTES).increment(splitLength);
}
LOG.info(getContext().getSourceVertexName() + " initialized RecordReader from event");
return reader;
}
Aggregations