use of org.apache.drill.exec.vector.ValueVector in project drill by apache.
the class UnionAllRecordBatch method doWork.
@SuppressWarnings("resource")
private IterOutcome doWork() throws ClassTransformationException, IOException, SchemaChangeException {
if (allocationVectors != null) {
for (ValueVector v : allocationVectors) {
v.clear();
}
}
allocationVectors = Lists.newArrayList();
transfers.clear();
// If both sides of Union-All are empty
if (unionAllInput.isBothSideEmpty()) {
for (int i = 0; i < outputFields.size(); ++i) {
final String colName = outputFields.get(i).getPath();
final MajorType majorType = MajorType.newBuilder().setMinorType(MinorType.INT).setMode(DataMode.OPTIONAL).build();
MaterializedField outputField = MaterializedField.create(colName, majorType);
ValueVector vv = container.addOrGet(outputField, callBack);
allocationVectors.add(vv);
}
container.buildSchema(BatchSchema.SelectionVectorMode.NONE);
return IterOutcome.OK_NEW_SCHEMA;
}
final ClassGenerator<UnionAller> cg = CodeGenerator.getRoot(UnionAller.TEMPLATE_DEFINITION, context.getFunctionRegistry(), context.getOptions());
cg.getCodeGenerator().plainJavaCapable(true);
// Uncomment out this line to debug the generated code.
// cg.getCodeGenerator().saveCodeForDebugging(true);
int index = 0;
for (VectorWrapper<?> vw : current) {
ValueVector vvIn = vw.getValueVector();
// get the original input column names
SchemaPath inputPath = SchemaPath.getSimplePath(vvIn.getField().getPath());
// get the renamed column names
SchemaPath outputPath = SchemaPath.getSimplePath(outputFields.get(index).getPath());
final ErrorCollector collector = new ErrorCollectorImpl();
// cast data types (Minortype or DataMode)
if (hasSameTypeAndMode(outputFields.get(index), vw.getValueVector().getField())) {
// Transfer column
MajorType outputFieldType = outputFields.get(index).getType();
MaterializedField outputField = MaterializedField.create(outputPath.getAsUnescapedPath(), outputFieldType);
/*
todo: Fix if condition when DRILL-4824 is merged
If condition should be changed to:
`if (outputFields.get(index).getPath().equals(inputPath.getAsUnescapedPath())) {`
DRILL-5419 has changed condition to correct one but this caused regression (DRILL-5521).
Root cause is missing indication of child column in map types when it is null.
DRILL-4824 is re-working json reader implementation, including map types and will fix this problem.
Reverting condition to previous one to avoid regression till DRILL-4824 is merged.
Unit test - TestJsonReader.testKvgenWithUnionAll().
*/
if (outputFields.get(index).getPath().equals(inputPath)) {
ValueVector vvOut = container.addOrGet(outputField);
TransferPair tp = vvIn.makeTransferPair(vvOut);
transfers.add(tp);
// Copy data in order to rename the column
} else {
final LogicalExpression expr = ExpressionTreeMaterializer.materialize(inputPath, current, collector, context.getFunctionRegistry());
if (collector.hasErrors()) {
throw new SchemaChangeException(String.format("Failure while trying to materialize incoming schema. Errors:\n %s.", collector.toErrorString()));
}
ValueVector vv = container.addOrGet(outputField, callBack);
allocationVectors.add(vv);
TypedFieldId fid = container.getValueVectorId(SchemaPath.getSimplePath(outputField.getPath()));
ValueVectorWriteExpression write = new ValueVectorWriteExpression(fid, expr, true);
cg.addExpr(write);
}
// Cast is necessary
} else {
LogicalExpression expr = ExpressionTreeMaterializer.materialize(inputPath, current, collector, context.getFunctionRegistry());
if (collector.hasErrors()) {
throw new SchemaChangeException(String.format("Failure while trying to materialize incoming schema. Errors:\n %s.", collector.toErrorString()));
}
// cast to the one with the least restriction
if (vvIn.getField().getType().getMode() == DataMode.REQUIRED && outputFields.get(index).getType().getMode() != DataMode.REQUIRED) {
expr = ExpressionTreeMaterializer.convertToNullableType(expr, vvIn.getField().getType().getMinorType(), context.getFunctionRegistry(), collector);
if (collector.hasErrors()) {
throw new SchemaChangeException(String.format("Failure while trying to materialize incoming schema. Errors:\n %s.", collector.toErrorString()));
}
}
// Insert a cast before the Union operation
if (vvIn.getField().getType().getMinorType() != outputFields.get(index).getType().getMinorType()) {
expr = ExpressionTreeMaterializer.addCastExpression(expr, outputFields.get(index).getType(), context.getFunctionRegistry(), collector);
if (collector.hasErrors()) {
throw new SchemaChangeException(String.format("Failure while trying to materialize incoming schema. Errors:\n %s.", collector.toErrorString()));
}
}
final MaterializedField outputField = MaterializedField.create(outputPath.getAsUnescapedPath(), expr.getMajorType());
ValueVector vector = container.addOrGet(outputField, callBack);
allocationVectors.add(vector);
TypedFieldId fid = container.getValueVectorId(SchemaPath.getSimplePath(outputField.getPath()));
boolean useSetSafe = !(vector instanceof FixedWidthVector);
ValueVectorWriteExpression write = new ValueVectorWriteExpression(fid, expr, useSetSafe);
cg.addExpr(write);
}
++index;
}
unionall = context.getImplementationClass(cg.getCodeGenerator());
unionall.setup(context, current, this, transfers);
if (!schemaAvailable) {
container.buildSchema(BatchSchema.SelectionVectorMode.NONE);
schemaAvailable = true;
}
if (!doAlloc()) {
return IterOutcome.OUT_OF_MEMORY;
}
recordCount = unionall.unionRecords(0, current.getRecordCount(), 0);
setValueCount(recordCount);
return IterOutcome.OK;
}
use of org.apache.drill.exec.vector.ValueVector in project drill by apache.
the class BatchValidator method validateRepeatedVector.
private void validateRepeatedVector(String name, BaseRepeatedValueVector vector) {
int dataLength = Integer.MAX_VALUE;
if (vector instanceof RepeatedVarCharVector) {
dataLength = ((RepeatedVarCharVector) vector).getOffsetVector().getValueCapacity();
} else if (vector instanceof RepeatedFixedWidthVectorLike) {
dataLength = ((BaseDataValueVector) ((BaseRepeatedValueVector) vector).getDataVector()).getBuffer().capacity();
}
int itemCount = validateOffsetVector(name + "-offsets", vector.getOffsetVector(), rowCount, dataLength);
// Special handling of repeated VarChar vectors
// The nested data vectors are not quite exactly like top-level vectors.
@SuppressWarnings("resource") ValueVector dataVector = vector.getDataVector();
if (dataVector instanceof VariableWidthVector) {
validateVariableWidthVector(name + "-data", (VariableWidthVector) dataVector, itemCount);
}
}
use of org.apache.drill.exec.vector.ValueVector in project drill by apache.
the class CopierHolder method createCopier.
/**
* Prepare a copier which will write a collection of vectors to disk. The copier
* uses generated code to do the actual writes. If the copier has not yet been
* created, generate code and create it. If it has been created, close it and
* prepare it for a new collection of batches.
*
* @param batch the (hyper) batch of vectors to be copied
* @param batchGroupList same batches as above, but represented as a list
* of individual batches
* @param outputContainer the container into which to copy the batches
*/
@SuppressWarnings("unchecked")
private void createCopier(VectorAccessible batch, List<? extends BatchGroup> batchGroupList, VectorContainer outputContainer) {
if (copier != null) {
opCodeGen.closeCopier();
} else {
copier = opCodeGen.getCopier(batch);
}
for (VectorWrapper<?> i : batch) {
@SuppressWarnings("resource") ValueVector v = TypeHelper.getNewVector(i.getField(), allocator);
outputContainer.add(v);
}
try {
copier.setup(context, allocator, batch, (List<BatchGroup>) batchGroupList, outputContainer);
} catch (SchemaChangeException e) {
throw UserException.unsupportedError(e).message("Unexpected schema change - likely code error.").build(logger);
}
}
use of org.apache.drill.exec.vector.ValueVector in project drill by apache.
the class FileSystemPartitionDescriptor method populatePartitionVectors.
@Override
public void populatePartitionVectors(ValueVector[] vectors, List<PartitionLocation> partitions, BitSet partitionColumnBitSet, Map<Integer, String> fieldNameMap) {
int record = 0;
for (PartitionLocation partitionLocation : partitions) {
for (int partitionColumnIndex : BitSets.toIter(partitionColumnBitSet)) {
if (partitionLocation.getPartitionValue(partitionColumnIndex) == null) {
// set null if dirX does not exist for the location.
((NullableVarCharVector) vectors[partitionColumnIndex]).getMutator().setNull(record);
} else {
byte[] bytes = (partitionLocation.getPartitionValue(partitionColumnIndex)).getBytes(Charsets.UTF_8);
((NullableVarCharVector) vectors[partitionColumnIndex]).getMutator().setSafe(record, bytes, 0, bytes.length);
}
}
record++;
}
for (ValueVector v : vectors) {
if (v == null) {
continue;
}
v.getMutator().setValueCount(partitions.size());
}
}
use of org.apache.drill.exec.vector.ValueVector in project drill by apache.
the class ExternalSortBatch method constructHyperBatch.
private VectorContainer constructHyperBatch(List<BatchGroup> batchGroupList) {
VectorContainer cont = new VectorContainer();
for (MaterializedField field : schema) {
ValueVector[] vectors = new ValueVector[batchGroupList.size()];
int i = 0;
for (BatchGroup group : batchGroupList) {
vectors[i++] = group.getValueAccessorById(field.getValueClass(), group.getValueVectorId(SchemaPath.getSimplePath(field.getPath())).getFieldIds()).getValueVector();
}
cont.add(vectors);
}
cont.buildSchema(BatchSchema.SelectionVectorMode.FOUR_BYTE);
return cont;
}
Aggregations