use of org.apache.drill.exec.record.TypedFieldId in project drill by axbaretto.
the class OrderedPartitionRecordBatch method getCopier.
/**
* Creates a copier that does a project for every Nth record from a VectorContainer incoming into VectorContainer
* outgoing. Each Ordering in orderings generates a column, and evaluation of the expression associated with each
* Ordering determines the value of each column. These records will later be sorted based on the values in each
* column, in the same order as the orderings.
*
* @param sv4
* @param incoming
* @param outgoing
* @param orderings
* @return
* @throws SchemaChangeException
*/
private SampleCopier getCopier(SelectionVector4 sv4, VectorContainer incoming, VectorContainer outgoing, List<Ordering> orderings, List<ValueVector> localAllocationVectors) throws SchemaChangeException {
final ErrorCollector collector = new ErrorCollectorImpl();
final ClassGenerator<SampleCopier> cg = CodeGenerator.getRoot(SampleCopier.TEMPLATE_DEFINITION, context.getOptions());
// Note: disabled for now. This may require some debugging:
// no tests are available for this operator.
// cg.getCodeGenerator().plainOldJavaCapable(true);
// Uncomment out this line to debug the generated code.
// cg.getCodeGenerator().saveCodeForDebugging(true);
int i = 0;
for (Ordering od : orderings) {
final LogicalExpression expr = ExpressionTreeMaterializer.materialize(od.getExpr(), incoming, collector, context.getFunctionRegistry());
TypeProtos.MajorType.Builder builder = TypeProtos.MajorType.newBuilder().mergeFrom(expr.getMajorType()).clearMode().setMode(TypeProtos.DataMode.REQUIRED);
TypeProtos.MajorType newType = builder.build();
MaterializedField outputField = MaterializedField.create("f" + i++, newType);
if (collector.hasErrors()) {
throw new SchemaChangeException(String.format("Failure while trying to materialize incoming schema. Errors:\n %s.", collector.toErrorString()));
}
@SuppressWarnings("resource") ValueVector vector = TypeHelper.getNewVector(outputField, oContext.getAllocator());
localAllocationVectors.add(vector);
TypedFieldId fid = outgoing.add(vector);
ValueVectorWriteExpression write = new ValueVectorWriteExpression(fid, expr, true);
HoldingContainer hc = cg.addExpr(write);
cg.getEvalBlock()._if(hc.getValue().eq(JExpr.lit(0)))._then()._return(JExpr.FALSE);
}
cg.rotateBlock();
cg.getEvalBlock()._return(JExpr.TRUE);
outgoing.buildSchema(BatchSchema.SelectionVectorMode.NONE);
try {
SampleCopier sampleCopier = context.getImplementationClass(cg);
sampleCopier.setupCopier(context, sv4, incoming, outgoing);
return sampleCopier;
} catch (ClassTransformationException | IOException e) {
throw new SchemaChangeException(e);
}
}
use of org.apache.drill.exec.record.TypedFieldId in project drill by axbaretto.
the class FlattenRecordBatch method getFlattenFieldTransferPair.
/**
* The data layout is the same for the actual data within a repeated field, as it is in a scalar vector for
* the same sql type. For example, a repeated int vector has a vector of offsets into a regular int vector to
* represent the lists. As the data layout for the actual values in the same in the repeated vector as in the
* scalar vector of the same type, we can avoid making individual copies for the column being flattened, and just
* use vector copies between the inner vector of the repeated field to the resulting scalar vector from the flatten
* operation. This is completed after we determine how many records will fit (as we will hit either a batch end, or
* the end of one of the other vectors while we are copying the data of the other vectors alongside each new flattened
* value coming out of the repeated field.)
*/
@SuppressWarnings("resource")
private TransferPair getFlattenFieldTransferPair(FieldReference reference) {
final TypedFieldId fieldId = incoming.getValueVectorId(popConfig.getColumn());
final Class<?> vectorClass = incoming.getSchema().getColumn(fieldId.getFieldIds()[0]).getValueClass();
final ValueVector flattenField = incoming.getValueAccessorById(vectorClass, fieldId.getFieldIds()).getValueVector();
TransferPair tp = null;
if (flattenField instanceof RepeatedMapVector) {
tp = ((RepeatedMapVector) flattenField).getTransferPairToSingleMap(reference.getAsNamePart().getName(), oContext.getAllocator());
} else if (!(flattenField instanceof RepeatedValueVector)) {
if (incoming.getRecordCount() != 0) {
throw UserException.unsupportedError().message("Flatten does not support inputs of non-list values.").build(logger);
}
logger.error("Cannot cast {} to RepeatedValueVector", flattenField);
// when incoming recordCount is 0, don't throw exception since the type being seen here is not solid
final ValueVector vv = new RepeatedMapVector(flattenField.getField(), oContext.getAllocator(), null);
tp = RepeatedValueVector.class.cast(vv).getTransferPair(reference.getAsNamePart().getName(), oContext.getAllocator());
} else {
final ValueVector vvIn = RepeatedValueVector.class.cast(flattenField).getDataVector();
// vvIn may be null because of fast schema return for repeated list vectors
if (vvIn != null) {
tp = vvIn.getTransferPair(reference.getAsNamePart().getName(), oContext.getAllocator());
}
}
return tp;
}
use of org.apache.drill.exec.record.TypedFieldId in project drill by axbaretto.
the class FlattenRecordBatch method setFlattenVector.
@SuppressWarnings("resource")
private void setFlattenVector() {
final TypedFieldId typedFieldId = incoming.getValueVectorId(popConfig.getColumn());
final MaterializedField field = incoming.getSchema().getColumn(typedFieldId.getFieldIds()[0]);
final RepeatedValueVector vector;
final ValueVector inVV = incoming.getValueAccessorById(field.getValueClass(), typedFieldId.getFieldIds()).getValueVector();
if (!(inVV instanceof RepeatedValueVector)) {
if (incoming.getRecordCount() != 0) {
throw UserException.unsupportedError().message("Flatten does not support inputs of non-list values.").build(logger);
}
// when incoming recordCount is 0, don't throw exception since the type being seen here is not solid
logger.error("setFlattenVector cast failed and recordcount is 0, create empty vector anyway.");
vector = new RepeatedMapVector(field, oContext.getAllocator(), null);
} else {
vector = RepeatedValueVector.class.cast(inVV);
}
flattener.setFlattenField(vector);
}
use of org.apache.drill.exec.record.TypedFieldId in project drill by axbaretto.
the class ParquetRecordWriter method updateSchema.
@Override
public void updateSchema(VectorAccessible batch) throws IOException {
if (this.batchSchema == null || !this.batchSchema.equals(batch.getSchema()) || containsComplexVectors(this.batchSchema)) {
if (this.batchSchema != null) {
flush();
}
this.batchSchema = batch.getSchema();
newSchema();
}
TypedFieldId fieldId = batch.getValueVectorId(SchemaPath.getSimplePath(WriterPrel.PARTITION_COMPARATOR_FIELD));
if (fieldId != null) {
VectorWrapper w = batch.getValueAccessorById(BitVector.class, fieldId.getFieldIds());
setPartitionVector((BitVector) w.getValueVector());
}
}
use of org.apache.drill.exec.record.TypedFieldId in project drill by axbaretto.
the class CopyUtil method generateCopies.
public static void generateCopies(ClassGenerator<?> g, VectorAccessible batch, boolean hyper) {
// we have parallel ids for each value vector so we don't actually have to deal with managing the ids at all.
int fieldId = 0;
JExpression inIndex = JExpr.direct("inIndex");
JExpression outIndex = JExpr.direct("outIndex");
for (VectorWrapper<?> vv : batch) {
String copyMethod;
if (!Types.isFixedWidthType(vv.getField().getType()) || Types.isRepeated(vv.getField().getType()) || Types.isComplex(vv.getField().getType())) {
copyMethod = "copyFromSafe";
} else {
copyMethod = "copyFrom";
}
g.rotateBlock();
JVar inVV = g.declareVectorValueSetupAndMember("incoming", new TypedFieldId(vv.getField().getType(), vv.isHyper(), fieldId));
JVar outVV = g.declareVectorValueSetupAndMember("outgoing", new TypedFieldId(vv.getField().getType(), false, fieldId));
if (hyper) {
g.getEvalBlock().add(outVV.invoke(copyMethod).arg(inIndex.band(JExpr.lit((int) Character.MAX_VALUE))).arg(outIndex).arg(inVV.component(inIndex.shrz(JExpr.lit(16)))));
} else {
g.getEvalBlock().add(outVV.invoke(copyMethod).arg(inIndex).arg(outIndex).arg(inVV));
}
g.rotateBlock();
fieldId++;
}
}
Aggregations