use of org.apache.drill.exec.record.MaterializedField in project drill by apache.
the class PruneScanRule method doOnMatch.
protected void doOnMatch(RelOptRuleCall call, Filter filterRel, Project projectRel, TableScan scanRel) {
final String pruningClassName = getClass().getName();
logger.info("Beginning partition pruning, pruning class: {}", pruningClassName);
Stopwatch totalPruningTime = Stopwatch.createStarted();
final PlannerSettings settings = PrelUtil.getPlannerSettings(call.getPlanner());
PartitionDescriptor descriptor = getPartitionDescriptor(settings, scanRel);
final BufferAllocator allocator = optimizerContext.getAllocator();
final Object selection = getDrillTable(scanRel).getSelection();
MetadataContext metaContext = null;
if (selection instanceof FormatSelection) {
metaContext = ((FormatSelection) selection).getSelection().getMetaContext();
}
RexNode condition = null;
if (projectRel == null) {
condition = filterRel.getCondition();
} else {
// get the filter as if it were below the projection.
condition = RelOptUtil.pushFilterPastProject(filterRel.getCondition(), projectRel);
}
RewriteAsBinaryOperators visitor = new RewriteAsBinaryOperators(true, filterRel.getCluster().getRexBuilder());
condition = condition.accept(visitor);
Map<Integer, String> fieldNameMap = Maps.newHashMap();
List<String> fieldNames = scanRel.getRowType().getFieldNames();
BitSet columnBitset = new BitSet();
BitSet partitionColumnBitSet = new BitSet();
Map<Integer, Integer> partitionMap = Maps.newHashMap();
int relColIndex = 0;
for (String field : fieldNames) {
final Integer partitionIndex = descriptor.getIdIfValid(field);
if (partitionIndex != null) {
fieldNameMap.put(partitionIndex, field);
partitionColumnBitSet.set(partitionIndex);
columnBitset.set(relColIndex);
// mapping between the relColIndex and partitionIndex
partitionMap.put(relColIndex, partitionIndex);
}
relColIndex++;
}
if (partitionColumnBitSet.isEmpty()) {
logger.info("No partition columns are projected from the scan..continue. " + "Total pruning elapsed time: {} ms", totalPruningTime.elapsed(TimeUnit.MILLISECONDS));
setPruneStatus(metaContext, PruneStatus.NOT_PRUNED);
return;
}
// stop watch to track how long we spend in different phases of pruning
Stopwatch miscTimer = Stopwatch.createUnstarted();
// track how long we spend building the filter tree
miscTimer.start();
FindPartitionConditions c = new FindPartitionConditions(columnBitset, filterRel.getCluster().getRexBuilder());
c.analyze(condition);
RexNode pruneCondition = c.getFinalCondition();
BitSet referencedDirsBitSet = c.getReferencedDirs();
logger.info("Total elapsed time to build and analyze filter tree: {} ms", miscTimer.elapsed(TimeUnit.MILLISECONDS));
miscTimer.reset();
if (pruneCondition == null) {
logger.info("No conditions were found eligible for partition pruning." + "Total pruning elapsed time: {} ms", totalPruningTime.elapsed(TimeUnit.MILLISECONDS));
setPruneStatus(metaContext, PruneStatus.NOT_PRUNED);
return;
}
// set up the partitions
List<PartitionLocation> newPartitions = Lists.newArrayList();
// total number of partitions
long numTotal = 0;
int batchIndex = 0;
PartitionLocation firstLocation = null;
LogicalExpression materializedExpr = null;
String[] spInfo = null;
int maxIndex = -1;
BitSet matchBitSet = new BitSet();
// Outer loop: iterate over a list of batches of PartitionLocations
for (List<PartitionLocation> partitions : descriptor) {
numTotal += partitions.size();
logger.debug("Evaluating partition pruning for batch {}", batchIndex);
if (batchIndex == 0) {
// save the first location in case everything is pruned
firstLocation = partitions.get(0);
}
final NullableBitVector output = new NullableBitVector(MaterializedField.create("", Types.optional(MinorType.BIT)), allocator);
final VectorContainer container = new VectorContainer();
try {
final ValueVector[] vectors = new ValueVector[descriptor.getMaxHierarchyLevel()];
for (int partitionColumnIndex : BitSets.toIter(partitionColumnBitSet)) {
SchemaPath column = SchemaPath.getSimplePath(fieldNameMap.get(partitionColumnIndex));
MajorType type = descriptor.getVectorType(column, settings);
MaterializedField field = MaterializedField.create(column.getAsUnescapedPath(), type);
ValueVector v = TypeHelper.getNewVector(field, allocator);
v.allocateNew();
vectors[partitionColumnIndex] = v;
container.add(v);
}
// track how long we spend populating partition column vectors
miscTimer.start();
// populate partition vectors.
descriptor.populatePartitionVectors(vectors, partitions, partitionColumnBitSet, fieldNameMap);
logger.info("Elapsed time to populate partitioning column vectors: {} ms within batchIndex: {}", miscTimer.elapsed(TimeUnit.MILLISECONDS), batchIndex);
miscTimer.reset();
// materialize the expression; only need to do this once
if (batchIndex == 0) {
materializedExpr = materializePruneExpr(pruneCondition, settings, scanRel, container);
if (materializedExpr == null) {
// continue without partition pruning; no need to log anything here since
// materializePruneExpr logs it already
logger.info("Total pruning elapsed time: {} ms", totalPruningTime.elapsed(TimeUnit.MILLISECONDS));
setPruneStatus(metaContext, PruneStatus.NOT_PRUNED);
return;
}
}
output.allocateNew(partitions.size());
// start the timer to evaluate how long we spend in the interpreter evaluation
miscTimer.start();
InterpreterEvaluator.evaluate(partitions.size(), optimizerContext, container, output, materializedExpr);
logger.info("Elapsed time in interpreter evaluation: {} ms within batchIndex: {} with # of partitions : {}", miscTimer.elapsed(TimeUnit.MILLISECONDS), batchIndex, partitions.size());
miscTimer.reset();
int recordCount = 0;
int qualifiedCount = 0;
if (descriptor.supportsMetadataCachePruning() && partitions.get(0).isCompositePartition()) /* apply single partition check only for composite partitions */
{
// Inner loop: within each batch iterate over the PartitionLocations
for (PartitionLocation part : partitions) {
assert part.isCompositePartition();
if (!output.getAccessor().isNull(recordCount) && output.getAccessor().get(recordCount) == 1) {
newPartitions.add(part);
// Rather than using the PartitionLocation, get the array of partition values for the directories that are
// referenced by the filter since we are not interested in directory references in other parts of the query.
Pair<String[], Integer> p = composePartition(referencedDirsBitSet, partitionMap, vectors, recordCount);
String[] parts = p.getLeft();
int tmpIndex = p.getRight();
maxIndex = Math.max(maxIndex, tmpIndex);
if (spInfo == null) {
// initialization
spInfo = parts;
for (int j = 0; j <= tmpIndex; j++) {
if (parts[j] != null) {
matchBitSet.set(j);
}
}
} else {
// compare the new partition with existing partition
for (int j = 0; j <= tmpIndex; j++) {
if (parts[j] == null || spInfo[j] == null) {
// nulls don't match
matchBitSet.clear(j);
} else {
if (!parts[j].equals(spInfo[j])) {
matchBitSet.clear(j);
}
}
}
}
qualifiedCount++;
}
recordCount++;
}
} else {
// Inner loop: within each batch iterate over the PartitionLocations
for (PartitionLocation part : partitions) {
if (!output.getAccessor().isNull(recordCount) && output.getAccessor().get(recordCount) == 1) {
newPartitions.add(part);
qualifiedCount++;
}
recordCount++;
}
}
logger.debug("Within batch {}: total records: {}, qualified records: {}", batchIndex, recordCount, qualifiedCount);
batchIndex++;
} catch (Exception e) {
logger.warn("Exception while trying to prune partition.", e);
logger.info("Total pruning elapsed time: {} ms", totalPruningTime.elapsed(TimeUnit.MILLISECONDS));
setPruneStatus(metaContext, PruneStatus.NOT_PRUNED);
// continue without partition pruning
return;
} finally {
container.clear();
if (output != null) {
output.clear();
}
}
}
try {
if (newPartitions.size() == numTotal) {
logger.info("No partitions were eligible for pruning");
return;
}
// handle the case all partitions are filtered out.
boolean canDropFilter = true;
boolean wasAllPartitionsPruned = false;
String cacheFileRoot = null;
if (newPartitions.isEmpty()) {
assert firstLocation != null;
// Add the first non-composite partition location, since execution requires schema.
// In such case, we should not drop filter.
newPartitions.add(firstLocation.getPartitionLocationRecursive().get(0));
canDropFilter = false;
// NOTE: with DRILL-4530, the PruneScanRule may be called with only a list of
// directories first and the non-composite partition location will still return
// directories, not files. So, additional processing is done depending on this flag
wasAllPartitionsPruned = true;
logger.info("All {} partitions were pruned; added back a single partition to allow creating a schema", numTotal);
// set the cacheFileRoot appropriately
if (firstLocation.isCompositePartition()) {
cacheFileRoot = descriptor.getBaseTableLocation() + firstLocation.getCompositePartitionPath();
}
}
logger.info("Pruned {} partitions down to {}", numTotal, newPartitions.size());
List<RexNode> conjuncts = RelOptUtil.conjunctions(condition);
List<RexNode> pruneConjuncts = RelOptUtil.conjunctions(pruneCondition);
conjuncts.removeAll(pruneConjuncts);
RexNode newCondition = RexUtil.composeConjunction(filterRel.getCluster().getRexBuilder(), conjuncts, false);
RewriteCombineBinaryOperators reverseVisitor = new RewriteCombineBinaryOperators(true, filterRel.getCluster().getRexBuilder());
condition = condition.accept(reverseVisitor);
pruneCondition = pruneCondition.accept(reverseVisitor);
if (descriptor.supportsMetadataCachePruning() && !wasAllPartitionsPruned) {
// if metadata cache file could potentially be used, then assign a proper cacheFileRoot
int index = -1;
if (!matchBitSet.isEmpty()) {
String path = "";
index = matchBitSet.length() - 1;
for (int j = 0; j < matchBitSet.length(); j++) {
if (!matchBitSet.get(j)) {
// stop at the first index with no match and use the immediate
// previous index
index = j - 1;
break;
}
}
for (int j = 0; j <= index; j++) {
path += "/" + spInfo[j];
}
cacheFileRoot = descriptor.getBaseTableLocation() + path;
}
if (index != maxIndex) {
// if multiple partitions are being selected, we should not drop the filter
// since we are reading the cache file at a parent/ancestor level
canDropFilter = false;
}
}
RelNode inputRel = descriptor.supportsMetadataCachePruning() ? descriptor.createTableScan(newPartitions, cacheFileRoot, wasAllPartitionsPruned, metaContext) : descriptor.createTableScan(newPartitions, wasAllPartitionsPruned);
if (projectRel != null) {
inputRel = projectRel.copy(projectRel.getTraitSet(), Collections.singletonList(inputRel));
}
if (newCondition.isAlwaysTrue() && canDropFilter) {
call.transformTo(inputRel);
} else {
final RelNode newFilter = filterRel.copy(filterRel.getTraitSet(), Collections.singletonList(inputRel));
call.transformTo(newFilter);
}
setPruneStatus(metaContext, PruneStatus.PRUNED);
} catch (Exception e) {
logger.warn("Exception while using the pruned partitions.", e);
} finally {
logger.info("Total pruning elapsed time: {} ms", totalPruningTime.elapsed(TimeUnit.MILLISECONDS));
}
}
use of org.apache.drill.exec.record.MaterializedField in project drill by apache.
the class MapUtility method writeToMapFromReader.
/*
* Function to read a value from the field reader, detect the type, construct the appropriate value holder
* and use the value holder to write to the Map.
*/
// TODO : This should be templatized and generated using freemarker
public static void writeToMapFromReader(FieldReader fieldReader, BaseWriter.MapWriter mapWriter) {
try {
MajorType valueMajorType = fieldReader.getType();
MinorType valueMinorType = valueMajorType.getMinorType();
boolean repeated = false;
if (valueMajorType.getMode() == TypeProtos.DataMode.REPEATED) {
repeated = true;
}
switch(valueMinorType) {
case TINYINT:
if (repeated) {
fieldReader.copyAsValue(mapWriter.list(MappifyUtility.fieldValue).tinyInt());
} else {
fieldReader.copyAsValue(mapWriter.tinyInt(MappifyUtility.fieldValue));
}
break;
case SMALLINT:
if (repeated) {
fieldReader.copyAsValue(mapWriter.list(MappifyUtility.fieldValue).smallInt());
} else {
fieldReader.copyAsValue(mapWriter.smallInt(MappifyUtility.fieldValue));
}
break;
case BIGINT:
if (repeated) {
fieldReader.copyAsValue(mapWriter.list(MappifyUtility.fieldValue).bigInt());
} else {
fieldReader.copyAsValue(mapWriter.bigInt(MappifyUtility.fieldValue));
}
break;
case INT:
if (repeated) {
fieldReader.copyAsValue(mapWriter.list(MappifyUtility.fieldValue).integer());
} else {
fieldReader.copyAsValue(mapWriter.integer(MappifyUtility.fieldValue));
}
break;
case UINT1:
if (repeated) {
fieldReader.copyAsValue(mapWriter.list(MappifyUtility.fieldValue).uInt1());
} else {
fieldReader.copyAsValue(mapWriter.uInt1(MappifyUtility.fieldValue));
}
break;
case UINT2:
if (repeated) {
fieldReader.copyAsValue(mapWriter.list(MappifyUtility.fieldValue).uInt2());
} else {
fieldReader.copyAsValue(mapWriter.uInt2(MappifyUtility.fieldValue));
}
break;
case UINT4:
if (repeated) {
fieldReader.copyAsValue(mapWriter.list(MappifyUtility.fieldValue).uInt4());
} else {
fieldReader.copyAsValue(mapWriter.uInt4(MappifyUtility.fieldValue));
}
break;
case UINT8:
if (repeated) {
fieldReader.copyAsValue(mapWriter.list(MappifyUtility.fieldValue).uInt8());
} else {
fieldReader.copyAsValue(mapWriter.uInt8(MappifyUtility.fieldValue));
}
break;
case DECIMAL9:
if (repeated) {
fieldReader.copyAsValue(mapWriter.list(MappifyUtility.fieldValue).decimal9());
} else {
fieldReader.copyAsValue(mapWriter.decimal9(MappifyUtility.fieldValue));
}
break;
case DECIMAL18:
if (repeated) {
fieldReader.copyAsValue(mapWriter.list(MappifyUtility.fieldValue).decimal18());
} else {
fieldReader.copyAsValue(mapWriter.decimal18(MappifyUtility.fieldValue));
}
break;
case DECIMAL28SPARSE:
if (repeated) {
fieldReader.copyAsValue(mapWriter.list(MappifyUtility.fieldValue).decimal28Sparse());
} else {
fieldReader.copyAsValue(mapWriter.decimal28Sparse(MappifyUtility.fieldValue));
}
break;
case DECIMAL38SPARSE:
if (repeated) {
fieldReader.copyAsValue(mapWriter.list(MappifyUtility.fieldValue).decimal38Sparse());
} else {
fieldReader.copyAsValue(mapWriter.decimal38Sparse(MappifyUtility.fieldValue));
}
break;
case DATE:
if (repeated) {
fieldReader.copyAsValue(mapWriter.list(MappifyUtility.fieldValue).date());
} else {
fieldReader.copyAsValue(mapWriter.date(MappifyUtility.fieldValue));
}
break;
case TIME:
if (repeated) {
fieldReader.copyAsValue(mapWriter.list(MappifyUtility.fieldValue).time());
} else {
fieldReader.copyAsValue(mapWriter.time(MappifyUtility.fieldValue));
}
break;
case TIMESTAMP:
if (repeated) {
fieldReader.copyAsValue(mapWriter.list(MappifyUtility.fieldValue).timeStamp());
} else {
fieldReader.copyAsValue(mapWriter.timeStamp(MappifyUtility.fieldValue));
}
break;
case INTERVAL:
if (repeated) {
fieldReader.copyAsValue(mapWriter.list(MappifyUtility.fieldValue).interval());
} else {
fieldReader.copyAsValue(mapWriter.interval(MappifyUtility.fieldValue));
}
break;
case INTERVALDAY:
if (repeated) {
fieldReader.copyAsValue(mapWriter.list(MappifyUtility.fieldValue).intervalDay());
} else {
fieldReader.copyAsValue(mapWriter.intervalDay(MappifyUtility.fieldValue));
}
break;
case INTERVALYEAR:
if (repeated) {
fieldReader.copyAsValue(mapWriter.list(MappifyUtility.fieldValue).intervalYear());
} else {
fieldReader.copyAsValue(mapWriter.intervalYear(MappifyUtility.fieldValue));
}
break;
case FLOAT4:
if (repeated) {
fieldReader.copyAsValue(mapWriter.list(MappifyUtility.fieldValue).float4());
} else {
fieldReader.copyAsValue(mapWriter.float4(MappifyUtility.fieldValue));
}
break;
case FLOAT8:
if (repeated) {
fieldReader.copyAsValue(mapWriter.list(MappifyUtility.fieldValue).float8());
} else {
fieldReader.copyAsValue(mapWriter.float8(MappifyUtility.fieldValue));
}
break;
case BIT:
if (repeated) {
fieldReader.copyAsValue(mapWriter.list(MappifyUtility.fieldValue).bit());
} else {
fieldReader.copyAsValue(mapWriter.bit(MappifyUtility.fieldValue));
}
break;
case VARCHAR:
if (repeated) {
fieldReader.copyAsValue(mapWriter.list(MappifyUtility.fieldValue).varChar());
} else {
fieldReader.copyAsValue(mapWriter.varChar(MappifyUtility.fieldValue));
}
break;
case VARBINARY:
if (repeated) {
fieldReader.copyAsValue(mapWriter.list(MappifyUtility.fieldValue).varBinary());
} else {
fieldReader.copyAsValue(mapWriter.varBinary(MappifyUtility.fieldValue));
}
break;
case MAP:
if (valueMajorType.getMode() == TypeProtos.DataMode.REPEATED) {
fieldReader.copyAsValue(mapWriter.list(MappifyUtility.fieldValue).map());
} else {
fieldReader.copyAsValue(mapWriter.map(MappifyUtility.fieldValue));
}
break;
case LIST:
fieldReader.copyAsValue(mapWriter.list(MappifyUtility.fieldValue).list());
break;
default:
throw new DrillRuntimeException(String.format("kvgen does not support input of type: %s", valueMinorType));
}
} catch (ClassCastException e) {
final MaterializedField field = fieldReader.getField();
throw new DrillRuntimeException(String.format(TYPE_MISMATCH_ERROR, field.getPath(), field.getType()));
}
}
use of org.apache.drill.exec.record.MaterializedField in project drill by apache.
the class ProjectRecordBatch method setupNewSchema.
@Override
protected boolean setupNewSchema() throws SchemaChangeException {
if (allocationVectors != null) {
for (final ValueVector v : allocationVectors) {
v.clear();
}
}
this.allocationVectors = Lists.newArrayList();
if (complexWriters != null) {
container.clear();
} else {
container.zeroVectors();
}
final List<NamedExpression> exprs = getExpressionList();
final ErrorCollector collector = new ErrorCollectorImpl();
final List<TransferPair> transfers = Lists.newArrayList();
final ClassGenerator<Projector> cg = CodeGenerator.getRoot(Projector.TEMPLATE_DEFINITION, context.getFunctionRegistry(), context.getOptions());
cg.getCodeGenerator().plainJavaCapable(true);
// Uncomment out this line to debug the generated code.
// cg.getCodeGenerator().saveCodeForDebugging(true);
final IntHashSet transferFieldIds = new IntHashSet();
final boolean isAnyWildcard = isAnyWildcard(exprs);
final ClassifierResult result = new ClassifierResult();
final boolean classify = isClassificationNeeded(exprs);
for (int i = 0; i < exprs.size(); i++) {
final NamedExpression namedExpression = exprs.get(i);
result.clear();
if (classify && namedExpression.getExpr() instanceof SchemaPath) {
classifyExpr(namedExpression, incoming, result);
if (result.isStar) {
// The value indicates which wildcard we are processing now
final Integer value = result.prefixMap.get(result.prefix);
if (value != null && value.intValue() == 1) {
int k = 0;
for (final VectorWrapper<?> wrapper : incoming) {
final ValueVector vvIn = wrapper.getValueVector();
if (k > result.outputNames.size() - 1) {
assert false;
}
// get the renamed column names
final String name = result.outputNames.get(k++);
if (name == EMPTY_STRING) {
continue;
}
if (isImplicitFileColumn(vvIn)) {
continue;
}
final FieldReference ref = new FieldReference(name);
final ValueVector vvOut = container.addOrGet(MaterializedField.create(ref.getAsNamePart().getName(), vvIn.getField().getType()), callBack);
final TransferPair tp = vvIn.makeTransferPair(vvOut);
transfers.add(tp);
}
} else if (value != null && value.intValue() > 1) {
// subsequent wildcards should do a copy of incoming valuevectors
int k = 0;
for (final VectorWrapper<?> wrapper : incoming) {
final ValueVector vvIn = wrapper.getValueVector();
final SchemaPath originalPath = SchemaPath.getSimplePath(vvIn.getField().getPath());
if (k > result.outputNames.size() - 1) {
assert false;
}
// get the renamed column names
final String name = result.outputNames.get(k++);
if (name == EMPTY_STRING) {
continue;
}
if (isImplicitFileColumn(vvIn)) {
continue;
}
final LogicalExpression expr = ExpressionTreeMaterializer.materialize(originalPath, incoming, collector, context.getFunctionRegistry());
if (collector.hasErrors()) {
throw new SchemaChangeException(String.format("Failure while trying to materialize incoming schema. Errors:\n %s.", collector.toErrorString()));
}
final MaterializedField outputField = MaterializedField.create(name, expr.getMajorType());
final ValueVector vv = container.addOrGet(outputField, callBack);
allocationVectors.add(vv);
final TypedFieldId fid = container.getValueVectorId(SchemaPath.getSimplePath(outputField.getPath()));
final ValueVectorWriteExpression write = new ValueVectorWriteExpression(fid, expr, true);
final HoldingContainer hc = cg.addExpr(write, ClassGenerator.BlkCreateMode.TRUE_IF_BOUND);
}
}
continue;
}
} else {
// For the columns which do not needed to be classified,
// it is still necessary to ensure the output column name is unique
result.outputNames = Lists.newArrayList();
final String outputName = getRef(namedExpression).getRootSegment().getPath();
addToResultMaps(outputName, result, true);
}
String outputName = getRef(namedExpression).getRootSegment().getPath();
if (result != null && result.outputNames != null && result.outputNames.size() > 0) {
boolean isMatched = false;
for (int j = 0; j < result.outputNames.size(); j++) {
if (!result.outputNames.get(j).equals(EMPTY_STRING)) {
outputName = result.outputNames.get(j);
isMatched = true;
break;
}
}
if (!isMatched) {
continue;
}
}
final LogicalExpression expr = ExpressionTreeMaterializer.materialize(namedExpression.getExpr(), incoming, collector, context.getFunctionRegistry(), true, unionTypeEnabled);
final MaterializedField outputField = MaterializedField.create(outputName, expr.getMajorType());
if (collector.hasErrors()) {
throw new SchemaChangeException(String.format("Failure while trying to materialize incoming schema. Errors:\n %s.", collector.toErrorString()));
}
// add value vector to transfer if direct reference and this is allowed, otherwise, add to evaluation stack.
if (expr instanceof ValueVectorReadExpression && incoming.getSchema().getSelectionVectorMode() == SelectionVectorMode.NONE && !((ValueVectorReadExpression) expr).hasReadPath() && !isAnyWildcard && !transferFieldIds.contains(((ValueVectorReadExpression) expr).getFieldId().getFieldIds()[0])) {
final ValueVectorReadExpression vectorRead = (ValueVectorReadExpression) expr;
final TypedFieldId id = vectorRead.getFieldId();
final ValueVector vvIn = incoming.getValueAccessorById(id.getIntermediateClass(), id.getFieldIds()).getValueVector();
Preconditions.checkNotNull(incoming);
final FieldReference ref = getRef(namedExpression);
final ValueVector vvOut = container.addOrGet(MaterializedField.create(ref.getAsUnescapedPath(), vectorRead.getMajorType()), callBack);
final TransferPair tp = vvIn.makeTransferPair(vvOut);
transfers.add(tp);
transferFieldIds.add(vectorRead.getFieldId().getFieldIds()[0]);
} else if (expr instanceof DrillFuncHolderExpr && ((DrillFuncHolderExpr) expr).getHolder().isComplexWriterFuncHolder()) {
// Lazy initialization of the list of complex writers, if not done yet.
if (complexWriters == null) {
complexWriters = Lists.newArrayList();
} else {
complexWriters.clear();
}
// The reference name will be passed to ComplexWriter, used as the name of the output vector from the writer.
((DrillFuncHolderExpr) expr).getFieldReference(namedExpression.getRef());
cg.addExpr(expr, ClassGenerator.BlkCreateMode.TRUE_IF_BOUND);
if (complexFieldReferencesList == null) {
complexFieldReferencesList = Lists.newArrayList();
}
// save the field reference for later for getting schema when input is empty
complexFieldReferencesList.add(namedExpression.getRef());
} else {
// need to do evaluation.
final ValueVector vector = container.addOrGet(outputField, callBack);
allocationVectors.add(vector);
final TypedFieldId fid = container.getValueVectorId(SchemaPath.getSimplePath(outputField.getPath()));
final boolean useSetSafe = !(vector instanceof FixedWidthVector);
final ValueVectorWriteExpression write = new ValueVectorWriteExpression(fid, expr, useSetSafe);
final HoldingContainer hc = cg.addExpr(write, ClassGenerator.BlkCreateMode.TRUE_IF_BOUND);
// We cannot do multiple transfers from the same vector. However we still need to instantiate the output vector.
if (expr instanceof ValueVectorReadExpression) {
final ValueVectorReadExpression vectorRead = (ValueVectorReadExpression) expr;
if (!vectorRead.hasReadPath()) {
final TypedFieldId id = vectorRead.getFieldId();
final ValueVector vvIn = incoming.getValueAccessorById(id.getIntermediateClass(), id.getFieldIds()).getValueVector();
vvIn.makeTransferPair(vector);
}
}
logger.debug("Added eval for project expression.");
}
}
try {
CodeGenerator<Projector> codeGen = cg.getCodeGenerator();
codeGen.plainJavaCapable(true);
// Uncomment out this line to debug the generated code.
// codeGen.saveCodeForDebugging(true);
this.projector = context.getImplementationClass(codeGen);
projector.setup(context, incoming, this, transfers);
} catch (ClassTransformationException | IOException e) {
throw new SchemaChangeException("Failure while attempting to load generated class", e);
}
if (container.isSchemaChanged()) {
container.buildSchema(SelectionVectorMode.NONE);
return true;
} else {
return false;
}
}
use of org.apache.drill.exec.record.MaterializedField in project drill by apache.
the class ProjectRecordBatch method getExpressionList.
private List<NamedExpression> getExpressionList() {
if (popConfig.getExprs() != null) {
return popConfig.getExprs();
}
final List<NamedExpression> exprs = Lists.newArrayList();
for (final MaterializedField field : incoming.getSchema()) {
if (Types.isComplex(field.getType()) || Types.isRepeated(field.getType())) {
final LogicalExpression convertToJson = FunctionCallFactory.createConvert(ConvertExpression.CONVERT_TO, "JSON", SchemaPath.getSimplePath(field.getPath()), ExpressionPosition.UNKNOWN);
final String castFuncName = CastFunctions.getCastFunc(MinorType.VARCHAR);
final List<LogicalExpression> castArgs = Lists.newArrayList();
//input_expr
castArgs.add(convertToJson);
// implicitly casting to varchar, since we don't know actual source length, cast to undefined length, which will preserve source length
castArgs.add(new ValueExpressions.LongExpression(Types.MAX_VARCHAR_LENGTH, null));
final FunctionCall castCall = new FunctionCall(castFuncName, castArgs, ExpressionPosition.UNKNOWN);
exprs.add(new NamedExpression(castCall, new FieldReference(field.getPath())));
} else {
exprs.add(new NamedExpression(SchemaPath.getSimplePath(field.getPath()), new FieldReference(field.getPath())));
}
}
return exprs;
}
use of org.apache.drill.exec.record.MaterializedField in project drill by apache.
the class OrderedPartitionRecordBatch method getCopier.
/**
* Creates a copier that does a project for every Nth record from a VectorContainer incoming into VectorContainer
* outgoing. Each Ordering in orderings generates a column, and evaluation of the expression associated with each
* Ordering determines the value of each column. These records will later be sorted based on the values in each
* column, in the same order as the orderings.
*
* @param sv4
* @param incoming
* @param outgoing
* @param orderings
* @return
* @throws SchemaChangeException
*/
private SampleCopier getCopier(SelectionVector4 sv4, VectorContainer incoming, VectorContainer outgoing, List<Ordering> orderings, List<ValueVector> localAllocationVectors) throws SchemaChangeException {
final ErrorCollector collector = new ErrorCollectorImpl();
final ClassGenerator<SampleCopier> cg = CodeGenerator.getRoot(SampleCopier.TEMPLATE_DEFINITION, context.getFunctionRegistry(), context.getOptions());
// Note: disabled for now. This may require some debugging:
// no tests are available for this operator.
// cg.getCodeGenerator().plainOldJavaCapable(true);
// Uncomment out this line to debug the generated code.
// cg.getCodeGenerator().saveCodeForDebugging(true);
int i = 0;
for (Ordering od : orderings) {
final LogicalExpression expr = ExpressionTreeMaterializer.materialize(od.getExpr(), incoming, collector, context.getFunctionRegistry());
SchemaPath schemaPath = SchemaPath.getSimplePath("f" + i++);
TypeProtos.MajorType.Builder builder = TypeProtos.MajorType.newBuilder().mergeFrom(expr.getMajorType()).clearMode().setMode(TypeProtos.DataMode.REQUIRED);
TypeProtos.MajorType newType = builder.build();
MaterializedField outputField = MaterializedField.create(schemaPath.getAsUnescapedPath(), newType);
if (collector.hasErrors()) {
throw new SchemaChangeException(String.format("Failure while trying to materialize incoming schema. Errors:\n %s.", collector.toErrorString()));
}
@SuppressWarnings("resource") ValueVector vector = TypeHelper.getNewVector(outputField, oContext.getAllocator());
localAllocationVectors.add(vector);
TypedFieldId fid = outgoing.add(vector);
ValueVectorWriteExpression write = new ValueVectorWriteExpression(fid, expr, true);
HoldingContainer hc = cg.addExpr(write);
cg.getEvalBlock()._if(hc.getValue().eq(JExpr.lit(0)))._then()._return(JExpr.FALSE);
}
cg.rotateBlock();
cg.getEvalBlock()._return(JExpr.TRUE);
outgoing.buildSchema(BatchSchema.SelectionVectorMode.NONE);
try {
SampleCopier sampleCopier = context.getImplementationClass(cg);
sampleCopier.setupCopier(context, sv4, incoming, outgoing);
return sampleCopier;
} catch (ClassTransformationException | IOException e) {
throw new SchemaChangeException(e);
}
}
Aggregations