use of org.apache.hadoop.hive.ql.exec.mr.ExecMapperContext in project hive by apache.
the class VectorMapOperator method process.
@Override
public void process(Writable value) throws HiveException {
// A mapper can span multiple files/partitions.
// The VectorPartitionContext need to be changed if the input file changed
ExecMapperContext context = getExecContext();
if (context != null && context.inputFileChanged()) {
// The child operators cleanup if input file has changed
cleanUpInputFileChanged();
}
if (!oneRootOperator.getDone()) {
/*
* 3 different kinds of vectorized reading supported:
*
* 1) Read the Vectorized Input File Format which returns VectorizedRowBatch as the row.
*
* 2) Read using VectorDeserializeRow to deserialize each row into the VectorizedRowBatch.
*
* 3) And read using the regular partition deserializer to get the row object and assigning
* the row object into the VectorizedRowBatch with VectorAssignRow.
*/
try {
if (currentReadType == VectorMapOperatorReadType.VECTORIZED_INPUT_FILE_FORMAT) {
if (!deliverVectorizedRowBatch(value)) {
// Operator tree is now done.
return;
}
} else if (value instanceof VectorizedRowBatch) {
/*
* Clear out any rows we may have processed in row-mode for the current partition..
*/
if (!flushDeserializerBatch()) {
// Operator tree is now done.
return;
}
if (!deliverVectorizedRowBatch(value)) {
// Operator tree is now done.
return;
}
} else {
/*
* We have a "regular" single rows from the Input File Format reader that we will need
* to deserialize.
*/
Preconditions.checkState(currentReadType == VectorMapOperatorReadType.VECTOR_DESERIALIZE || currentReadType == VectorMapOperatorReadType.ROW_DESERIALIZE);
if (deserializerBatch.size == deserializerBatch.DEFAULT_SIZE) {
numRows += deserializerBatch.size;
/*
* Feed current full batch to operator tree.
*/
batchCounter++;
oneRootOperator.process(deserializerBatch, 0);
resetVectorizedRowBatchForDeserialize();
if (oneRootOperator.getDone()) {
setDone(true);
return;
}
}
/*
* Do the {vector|row} deserialization of the one row into the VectorizedRowBatch.
*/
switch(currentReadType) {
case VECTOR_DESERIALIZE:
{
BinaryComparable binComp = (BinaryComparable) value;
currentDeserializeRead.set(binComp.getBytes(), 0, binComp.getLength());
// Deserialize and append new row using the current batch size as the index.
try {
currentVectorDeserializeRow.deserialize(deserializerBatch, deserializerBatch.size++);
} catch (Exception e) {
throw new HiveException("\nDeserializeRead detail: " + currentVectorDeserializeRow.getDetailedReadPositionString(), e);
}
}
break;
case ROW_DESERIALIZE:
{
Object deserialized = currentPartDeserializer.deserialize(value);
// Note: Regardless of what the Input File Format returns, we have determined
// with VectorAppendRow.initConversion that only currentDataColumnCount columns
// have values we want.
//
// Any extra columns needed by the table schema were set to repeating null
// in the batch by setupPartitionContextVars.
// Convert input row to standard objects.
List<Object> standardObjects = new ArrayList<Object>();
try {
ObjectInspectorUtils.copyToStandardObject(standardObjects, deserialized, currentPartRawRowObjectInspector, ObjectInspectorCopyOption.WRITABLE);
} catch (Exception e) {
throw new HiveException("copyToStandardObject failed: " + e);
}
if (standardObjects.size() < currentDataColumnCount) {
throw new HiveException("Input File Format returned row with too few columns");
}
// Append the deserialized standard object row using the current batch size
// as the index.
currentVectorAssign.assignRow(deserializerBatch, deserializerBatch.size++, standardObjects, currentDataColumnCount);
}
break;
default:
throw new RuntimeException("Unexpected vector MapOperator read type " + currentReadType.name());
}
}
} catch (Exception e) {
throw new HiveException("Hive Runtime Error while processing row ", e);
}
}
}
use of org.apache.hadoop.hive.ql.exec.mr.ExecMapperContext in project hive by apache.
the class MergeFileRecordProcessor method init.
@Override
void init(MRTaskReporter mrReporter, Map<String, LogicalInput> inputs, Map<String, LogicalOutput> outputs) throws Exception {
// TODO HIVE-14042. Abort handling.
perfLogger.perfLogBegin(CLASS_NAME, PerfLogger.TEZ_INIT_OPERATORS);
super.init(mrReporter, inputs, outputs);
execContext = new ExecMapperContext(jconf);
// Update JobConf using MRInput, info like filename comes via this
mrInput = getMRInput(inputs);
Configuration updatedConf = mrInput.getConfigUpdates();
if (updatedConf != null) {
for (Map.Entry<String, String> entry : updatedConf) {
jconf.set(entry.getKey(), entry.getValue());
}
}
createOutputMap();
// Start all the Outputs.
for (Map.Entry<String, LogicalOutput> outputEntry : outputs.entrySet()) {
outputEntry.getValue().start();
((TezProcessor.TezKVOutputCollector) outMap.get(outputEntry.getKey())).initialize();
}
String queryId = HiveConf.getVar(jconf, HiveConf.ConfVars.HIVEQUERYID);
cache = ObjectCacheFactory.getCache(jconf, queryId, true);
try {
execContext.setJc(jconf);
cacheKey = MAP_PLAN_KEY;
MapWork mapWork = (MapWork) cache.retrieve(cacheKey, new Callable<Object>() {
@Override
public Object call() {
return Utilities.getMapWork(jconf);
}
});
Utilities.setMapWork(jconf, mapWork);
if (mapWork instanceof MergeFileWork) {
mfWork = (MergeFileWork) mapWork;
} else {
throw new RuntimeException("MapWork should be an instance of MergeFileWork.");
}
String alias = mfWork.getAliasToWork().keySet().iterator().next();
mergeOp = mfWork.getAliasToWork().get(alias);
LOG.info(mergeOp.dump(0));
MapredContext.init(true, new JobConf(jconf));
((TezContext) MapredContext.get()).setInputs(inputs);
mergeOp.passExecContext(execContext);
mergeOp.initializeLocalWork(jconf);
mergeOp.initialize(jconf, null);
OperatorUtils.setChildrenCollector(mergeOp.getChildOperators(), outMap);
mergeOp.setReporter(reporter);
MapredContext.get().setReporter(reporter);
} catch (Throwable e) {
if (e instanceof OutOfMemoryError) {
// Don't create a new object if we are already out of memory
throw (OutOfMemoryError) e;
} else if (e instanceof InterruptedException) {
LOG.info("Hit an interrupt while initializing MergeFileRecordProcessor. Message={}", e.getMessage());
throw (InterruptedException) e;
} else {
throw new RuntimeException("Map operator initialization failed", e);
}
}
perfLogger.perfLogEnd(CLASS_NAME, PerfLogger.TEZ_INIT_OPERATORS);
}
use of org.apache.hadoop.hive.ql.exec.mr.ExecMapperContext in project hive by apache.
the class MapRecordProcessor method init.
@Override
void init(MRTaskReporter mrReporter, Map<String, LogicalInput> inputs, Map<String, LogicalOutput> outputs) throws Exception {
perfLogger.perfLogBegin(CLASS_NAME, PerfLogger.TEZ_INIT_OPERATORS);
super.init(mrReporter, inputs, outputs);
checkAbortCondition();
String key = processorContext.getTaskVertexName() + MAP_PLAN_KEY;
cacheKeys.add(key);
// create map and fetch operators
if (!isInCompaction) {
mapWork = cache.retrieve(key, () -> Utilities.getMapWork(jconf));
} else {
// During query-based compaction, we don't want to retrieve old MapWork from the cache, we want a new mapper
// and new UDF validate_acid_sort_order instance for each bucket, otherwise validate_acid_sort_order will fail.
mapWork = Utilities.getMapWork(jconf);
}
// TODO HIVE-14042. Cleanup may be required if exiting early.
Utilities.setMapWork(jconf, mapWork);
for (PartitionDesc part : mapWork.getAliasToPartnInfo().values()) {
TableDesc tableDesc = part.getTableDesc();
Utilities.copyJobSecretToTableProperties(tableDesc);
}
String prefixes = jconf.get(DagUtils.TEZ_MERGE_WORK_FILE_PREFIXES);
if (prefixes != null) {
mergeWorkList = new ArrayList<>();
for (final String prefix : prefixes.split(",")) {
if (prefix == null || prefix.isEmpty()) {
continue;
}
key = processorContext.getTaskVertexName() + prefix;
cacheKeys.add(key);
checkAbortCondition();
mergeWorkList.add((MapWork) cache.retrieve(key, () -> Utilities.getMergeWork(jconf, prefix)));
}
}
MapredContext.init(true, new JobConf(jconf));
((TezContext) MapredContext.get()).setInputs(inputs);
((TezContext) MapredContext.get()).setTezProcessorContext(processorContext);
// Update JobConf using MRInput, info like filename comes via this
checkAbortCondition();
legacyMRInput = getMRInput(inputs);
if (legacyMRInput != null) {
Configuration updatedConf = legacyMRInput.getConfigUpdates();
if (updatedConf != null) {
for (Entry<String, String> entry : updatedConf) {
jconf.set(entry.getKey(), entry.getValue());
}
}
}
checkAbortCondition();
createOutputMap();
// Start all the Outputs.
for (Entry<String, LogicalOutput> outputEntry : outputs.entrySet()) {
LOG.debug("Starting Output: " + outputEntry.getKey());
outputEntry.getValue().start();
((TezKVOutputCollector) outMap.get(outputEntry.getKey())).initialize();
}
checkAbortCondition();
try {
CompilationOpContext runtimeCtx = new CompilationOpContext();
if (mapWork.getVectorMode()) {
mapOp = new VectorMapOperator(runtimeCtx);
} else {
mapOp = new MapOperator(runtimeCtx);
}
// Not synchronizing creation of mapOp with an invocation. Check immediately
// after creation in case abort has been set.
// Relying on the regular flow to clean up the actual operator. i.e. If an exception is
// thrown, an attempt will be made to cleanup the op.
// If we are here - exit out via an exception. If we're in the middle of the opeartor.initialize
// call further down, we rely upon op.abort().
checkAbortCondition();
mapOp.clearConnectedOperators();
mapOp.setExecContext(execContext);
boolean fromCache = false;
if (mergeWorkList != null) {
AbstractMapOperator mergeMapOp = null;
for (BaseWork mergeWork : mergeWorkList) {
// TODO HIVE-14042. What is mergeWork, and why is it not part of the regular operator chain.
// The mergeMapOp.initialize call further down can block, and will not receive information
// about an abort request.
MapWork mergeMapWork = (MapWork) mergeWork;
if (mergeMapWork.getVectorMode()) {
mergeMapOp = new VectorMapOperator(runtimeCtx);
} else {
mergeMapOp = new MapOperator(runtimeCtx);
}
mergeMapOpList.add(mergeMapOp);
// initialize the merge operators first.
if (mergeMapOp != null) {
mergeMapOp.setConf(mergeMapWork);
LOG.info("Input name is {}", mergeMapWork.getName());
jconf.set(Utilities.INPUT_NAME, mergeMapWork.getName());
mergeMapOp.initialize(jconf, null);
// if there are no files/partitions to read, we need to skip trying to read
MultiMRInput multiMRInput = multiMRInputMap.get(mergeMapWork.getName());
boolean skipRead = false;
if (multiMRInput == null) {
LOG.info("Multi MR Input for work {} is null. Skipping read.", mergeMapWork.getName());
skipRead = true;
} else {
Collection<KeyValueReader> keyValueReaders = multiMRInput.getKeyValueReaders();
if ((keyValueReaders == null) || (keyValueReaders.isEmpty())) {
LOG.info("Key value readers are null or empty and hence skipping read. " + "KeyValueReaders = {}", keyValueReaders);
skipRead = true;
}
}
if (skipRead) {
List<Operator<?>> children = new ArrayList<>();
children.addAll(mergeMapOp.getConf().getAliasToWork().values());
// do the same thing as setChildren when there is nothing to read.
// the setChildren method initializes the object inspector needed by the operators
// based on path and partition information which we don't have in this case.
mergeMapOp.initEmptyInputChildren(children, jconf);
} else {
// the setChildren method initializes the object inspector needed by the operators
// based on path and partition information.
mergeMapOp.setChildren(jconf);
}
Operator<? extends OperatorDesc> finalOp = getFinalOp(mergeMapOp);
if (finalOp instanceof TezDummyStoreOperator) {
// we ensure that we don't try to read any data in case of skip read.
((TezDummyStoreOperator) finalOp).setFetchDone(skipRead);
mapOp.setConnectedOperators(mergeMapWork.getTag(), (DummyStoreOperator) finalOp);
} else {
// found the plan is already connected which means this is derived from the cache.
fromCache = true;
}
mergeMapOp.passExecContext(new ExecMapperContext(jconf));
mergeMapOp.initializeLocalWork(jconf);
}
}
}
if (!fromCache) {
// if not from cache, we still need to hook up the plans.
((TezContext) (MapredContext.get())).setDummyOpsMap(mapOp.getConnectedOperators());
}
// initialize map operator
mapOp.setConf(mapWork);
LOG.info("Main input name is " + mapWork.getName());
jconf.set(Utilities.INPUT_NAME, mapWork.getName());
mapOp.initialize(jconf, null);
checkAbortCondition();
mapOp.setChildren(jconf);
mapOp.passExecContext(execContext);
LOG.info(mapOp.dump(0));
// set memory available for operators
long memoryAvailableToTask = processorContext.getTotalMemoryAvailableToTask();
if (mapOp.getConf() != null) {
mapOp.getConf().setMaxMemoryAvailable(memoryAvailableToTask);
LOG.info("Memory available for operators set to {}", LlapUtil.humanReadableByteCount(memoryAvailableToTask));
}
OperatorUtils.setMemoryAvailable(mapOp.getChildOperators(), memoryAvailableToTask);
mapOp.initializeLocalWork(jconf);
// Setup values registry
checkAbortCondition();
String valueRegistryKey = DynamicValue.DYNAMIC_VALUE_REGISTRY_CACHE_KEY;
// On LLAP dynamic value registry might already be cached.
final DynamicValueRegistryTez registryTez = dynamicValueCache.retrieve(valueRegistryKey, () -> new DynamicValueRegistryTez());
dynamicValueCacheKeys.add(valueRegistryKey);
RegistryConfTez registryConf = new RegistryConfTez(jconf, mapWork, processorContext, inputs);
registryTez.init(registryConf);
checkAbortCondition();
initializeMapRecordSources();
mapOp.initializeMapOperator(jconf);
if ((mergeMapOpList != null) && !mergeMapOpList.isEmpty()) {
for (AbstractMapOperator mergeMapOp : mergeMapOpList) {
jconf.set(Utilities.INPUT_NAME, mergeMapOp.getConf().getName());
// TODO HIVE-14042. abort handling: Handling of mergeMapOp
mergeMapOp.initializeMapOperator(jconf);
}
}
// Initialization isn't finished until all parents of all operators
// are initialized. For broadcast joins that means initializing the
// dummy parent operators as well.
List<HashTableDummyOperator> dummyOps = mapWork.getDummyOps();
jconf.set(Utilities.INPUT_NAME, mapWork.getName());
if (dummyOps != null) {
for (Operator<? extends OperatorDesc> dummyOp : dummyOps) {
dummyOp.setExecContext(execContext);
// TODO HIVE-14042. Handling of dummyOps, and propagating abort information to them
dummyOp.initialize(jconf, null);
}
}
OperatorUtils.setChildrenCollector(mapOp.getChildOperators(), outMap);
mapOp.setReporter(reporter);
MapredContext.get().setReporter(reporter);
} catch (Throwable e) {
setAborted(true);
if (e instanceof OutOfMemoryError) {
// Don't create a new object if we are already out of memory
throw (OutOfMemoryError) e;
} else if (e instanceof InterruptedException) {
LOG.info("Hit an interrupt while initializing MapRecordProcessor. Message={}", e.getMessage());
throw (InterruptedException) e;
} else {
throw new RuntimeException("Map operator initialization failed", e);
}
}
perfLogger.perfLogEnd(CLASS_NAME, PerfLogger.TEZ_INIT_OPERATORS);
}
use of org.apache.hadoop.hive.ql.exec.mr.ExecMapperContext in project hive by apache.
the class SparkReduceRecordHandler method init.
@Override
@SuppressWarnings("unchecked")
public void init(JobConf job, OutputCollector output, Reporter reporter) throws Exception {
perfLogger.perfLogBegin(CLASS_NAME, PerfLogger.SPARK_INIT_OPERATORS);
super.init(job, output, reporter);
rowObjectInspector = new ObjectInspector[Byte.MAX_VALUE];
ObjectInspector[] valueObjectInspector = new ObjectInspector[Byte.MAX_VALUE];
ObjectInspector keyObjectInspector;
ReduceWork gWork = Utilities.getReduceWork(job);
reducer = gWork.getReducer();
vectorized = gWork.getVectorMode();
// clear out any parents as reducer is the
reducer.setParentOperators(null);
batchContext = gWork.getVectorizedRowBatchCtx();
// root
isTagged = gWork.getNeedsTagging();
try {
keyTableDesc = gWork.getKeyDesc();
inputKeySerDe = ReflectionUtils.newInstance(keyTableDesc.getSerDeClass(), null);
inputKeySerDe.initialize(null, keyTableDesc.getProperties(), null);
keyObjectInspector = inputKeySerDe.getObjectInspector();
valueTableDesc = new TableDesc[gWork.getTagToValueDesc().size()];
if (vectorized) {
final int maxTags = gWork.getTagToValueDesc().size();
// CONSIDER: Cleaning up this code and eliminating the arrays. Vectorization only handles
// one operator tree.
Preconditions.checkState(maxTags == 1);
keyStructInspector = (StructObjectInspector) keyObjectInspector;
firstValueColumnOffset = keyStructInspector.getAllStructFieldRefs().size();
buffer = new DataOutputBuffer();
}
for (int tag = 0; tag < gWork.getTagToValueDesc().size(); tag++) {
// We should initialize the SerDe with the TypeInfo when available.
valueTableDesc[tag] = gWork.getTagToValueDesc().get(tag);
AbstractSerDe inputValueSerDe = ReflectionUtils.newInstance(valueTableDesc[tag].getSerDeClass(), null);
inputValueSerDe.initialize(null, valueTableDesc[tag].getProperties(), null);
inputValueDeserializer[tag] = inputValueSerDe;
valueObjectInspector[tag] = inputValueSerDe.getObjectInspector();
ArrayList<ObjectInspector> ois = new ArrayList<ObjectInspector>();
if (vectorized) {
/* vectorization only works with struct object inspectors */
valueStructInspector = (StructObjectInspector) valueObjectInspector[tag];
final int totalColumns = firstValueColumnOffset + valueStructInspector.getAllStructFieldRefs().size();
rowObjectInspector[tag] = Utilities.constructVectorizedReduceRowOI(keyStructInspector, valueStructInspector);
batch = gWork.getVectorizedRowBatchCtx().createVectorizedRowBatch();
// Setup vectorized deserialization for the key and value.
BinarySortableSerDe binarySortableSerDe = (BinarySortableSerDe) inputKeySerDe;
keyBinarySortableDeserializeToRow = new VectorDeserializeRow<BinarySortableDeserializeRead>(new BinarySortableDeserializeRead(VectorizedBatchUtil.typeInfosFromStructObjectInspector(keyStructInspector), (batchContext.getRowdataTypePhysicalVariations().length > firstValueColumnOffset) ? Arrays.copyOfRange(batchContext.getRowdataTypePhysicalVariations(), 0, firstValueColumnOffset) : batchContext.getRowdataTypePhysicalVariations(), /* useExternalBuffer */
true, binarySortableSerDe.getSortOrders(), binarySortableSerDe.getNullMarkers(), binarySortableSerDe.getNotNullMarkers()));
keyBinarySortableDeserializeToRow.init(0);
final int valuesSize = valueStructInspector.getAllStructFieldRefs().size();
if (valuesSize > 0) {
valueLazyBinaryDeserializeToRow = new VectorDeserializeRow<LazyBinaryDeserializeRead>(new LazyBinaryDeserializeRead(VectorizedBatchUtil.typeInfosFromStructObjectInspector(valueStructInspector), (batchContext.getRowdataTypePhysicalVariations().length >= totalColumns) ? Arrays.copyOfRange(batchContext.getRowdataTypePhysicalVariations(), firstValueColumnOffset, totalColumns) : null, /* useExternalBuffer */
true));
valueLazyBinaryDeserializeToRow.init(firstValueColumnOffset);
// Create data buffers for value bytes column vectors.
for (int i = firstValueColumnOffset; i < batch.numCols; i++) {
ColumnVector colVector = batch.cols[i];
if (colVector instanceof BytesColumnVector) {
BytesColumnVector bytesColumnVector = (BytesColumnVector) colVector;
bytesColumnVector.initBuffer();
}
}
}
} else {
ois.add(keyObjectInspector);
ois.add(valueObjectInspector[tag]);
// reducer.setGroupKeyObjectInspector(keyObjectInspector);
rowObjectInspector[tag] = ObjectInspectorFactory.getStandardStructObjectInspector(Utilities.reduceFieldNameList, ois);
}
}
} catch (Exception e) {
throw new RuntimeException(e);
}
ExecMapperContext execContext = new ExecMapperContext(job);
localWork = gWork.getMapRedLocalWork();
execContext.setJc(jc);
execContext.setLocalWork(localWork);
reducer.passExecContext(execContext);
reducer.setReporter(rp);
OperatorUtils.setChildrenCollector(Arrays.<Operator<? extends OperatorDesc>>asList(reducer), output);
// initialize reduce operator tree
try {
LOG.info(reducer.dump(0));
reducer.initialize(jc, rowObjectInspector);
if (localWork != null) {
for (Operator<? extends OperatorDesc> dummyOp : localWork.getDummyParentOp()) {
dummyOp.setExecContext(execContext);
dummyOp.initialize(jc, null);
}
}
} catch (Throwable e) {
abort = true;
if (e instanceof OutOfMemoryError) {
// Don't create a new object if we are already out of memory
throw (OutOfMemoryError) e;
} else {
throw new RuntimeException("Reduce operator initialization failed", e);
}
}
perfLogger.perfLogEnd(CLASS_NAME, PerfLogger.SPARK_INIT_OPERATORS);
}
Aggregations