use of org.apache.drill.common.expression.SchemaPath in project drill by apache.
the class ParquetMetaStatCollector method collectColStat.
@Override
public Map<SchemaPath, ColumnStatistics> collectColStat(Set<SchemaPath> fields) {
Stopwatch timer = Stopwatch.createStarted();
// map from column to ColumnMetadata
final Map<SchemaPath, Metadata.ColumnMetadata> columnMetadataMap = new HashMap<>();
// map from column name to column statistics.
final Map<SchemaPath, ColumnStatistics> statMap = new HashMap<>();
for (final Metadata.ColumnMetadata columnMetadata : columnMetadataList) {
SchemaPath schemaPath = SchemaPath.getCompoundPath(columnMetadata.getName());
columnMetadataMap.put(schemaPath, columnMetadata);
}
for (final SchemaPath schemaPath : fields) {
final PrimitiveType.PrimitiveTypeName primitiveType;
final OriginalType originalType;
final Metadata.ColumnMetadata columnMetadata = columnMetadataMap.get(schemaPath);
if (columnMetadata != null) {
final Object min = columnMetadata.getMinValue();
final Object max = columnMetadata.getMaxValue();
final Long numNull = columnMetadata.getNulls();
primitiveType = this.parquetTableMetadata.getPrimitiveType(columnMetadata.getName());
originalType = this.parquetTableMetadata.getOriginalType(columnMetadata.getName());
final Integer repetitionLevel = this.parquetTableMetadata.getRepetitionLevel(columnMetadata.getName());
statMap.put(schemaPath, getStat(min, max, numNull, primitiveType, originalType, repetitionLevel));
} else {
final String columnName = schemaPath.getRootSegment().getPath();
if (implicitColValues.containsKey(columnName)) {
TypeProtos.MajorType type = Types.required(TypeProtos.MinorType.VARCHAR);
Statistics stat = new BinaryStatistics();
stat.setNumNulls(0);
byte[] val = implicitColValues.get(columnName).getBytes();
stat.setMinMaxFromBytes(val, val);
statMap.put(schemaPath, new ColumnStatistics(stat, type));
}
}
}
if (logger.isDebugEnabled()) {
logger.debug("Took {} ms to column statistics for row group", timer.elapsed(TimeUnit.MILLISECONDS));
}
return statMap;
}
use of org.apache.drill.common.expression.SchemaPath in project drill by apache.
the class DrillParquetReader method getProjection.
public static MessageType getProjection(MessageType schema, Collection<SchemaPath> columns, List<SchemaPath> columnsNotFound) {
MessageType projection = null;
String messageName = schema.getName();
List<ColumnDescriptor> schemaColumns = schema.getColumns();
// parquet type.union() seems to lose ConvertedType info when merging two columns that are the same type. This can
// happen when selecting two elements from an array. So to work around this, we use set of SchemaPath to avoid duplicates
// and then merge the types at the end
Set<SchemaPath> selectedSchemaPaths = Sets.newLinkedHashSet();
// get a list of modified columns which have the array elements removed from the schema path since parquet schema doesn't include array elements
List<SchemaPath> modifiedColumns = Lists.newLinkedList();
for (SchemaPath path : columns) {
List<String> segments = Lists.newArrayList();
PathSegment seg = path.getRootSegment();
do {
if (seg.isNamed()) {
segments.add(seg.getNameSegment().getPath());
}
} while ((seg = seg.getChild()) != null);
String[] pathSegments = new String[segments.size()];
segments.toArray(pathSegments);
SchemaPath modifiedSchemaPath = SchemaPath.getCompoundPath(pathSegments);
modifiedColumns.add(modifiedSchemaPath);
}
// convert the columns in the parquet schema to a list of SchemaPath columns so that they can be compared in case insensitive manner
// to the projection columns
List<SchemaPath> schemaPaths = Lists.newLinkedList();
for (ColumnDescriptor columnDescriptor : schemaColumns) {
String[] schemaColDesc = Arrays.copyOf(columnDescriptor.getPath(), columnDescriptor.getPath().length);
SchemaPath schemaPath = SchemaPath.getCompoundPath(schemaColDesc);
schemaPaths.add(schemaPath);
}
// loop through projection columns and add any columns that are missing from parquet schema to columnsNotFound list
for (SchemaPath columnPath : modifiedColumns) {
boolean notFound = true;
for (SchemaPath schemaPath : schemaPaths) {
if (schemaPath.contains(columnPath)) {
selectedSchemaPaths.add(schemaPath);
notFound = false;
}
}
if (notFound) {
columnsNotFound.add(columnPath);
}
}
// convert SchemaPaths from selectedSchemaPaths and convert to parquet type, and merge into projection schema
for (SchemaPath schemaPath : selectedSchemaPaths) {
List<String> segments = Lists.newArrayList();
PathSegment seg = schemaPath.getRootSegment();
do {
segments.add(seg.getNameSegment().getPath());
} while ((seg = seg.getChild()) != null);
String[] pathSegments = new String[segments.size()];
segments.toArray(pathSegments);
Type t = getType(pathSegments, 0, schema);
if (projection == null) {
projection = new MessageType(messageName, t);
} else {
projection = projection.union(new MessageType(messageName, t));
}
}
return projection;
}
use of org.apache.drill.common.expression.SchemaPath in project drill by apache.
the class DrillParquetReader method setup.
@Override
public void setup(OperatorContext context, OutputMutator output) throws ExecutionSetupException {
try {
this.operatorContext = context;
schema = footer.getFileMetaData().getSchema();
MessageType projection = null;
if (isStarQuery()) {
projection = schema;
} else {
columnsNotFound = new ArrayList<SchemaPath>();
projection = getProjection(schema, getColumns(), columnsNotFound);
if (projection == null) {
projection = schema;
}
if (columnsNotFound != null && columnsNotFound.size() > 0) {
nullFilledVectors = new ArrayList<>();
for (SchemaPath col : columnsNotFound) {
nullFilledVectors.add((NullableIntVector) output.addField(MaterializedField.create(col.getAsUnescapedPath(), org.apache.drill.common.types.Types.optional(TypeProtos.MinorType.INT)), (Class<? extends ValueVector>) TypeHelper.getValueVectorClass(TypeProtos.MinorType.INT, TypeProtos.DataMode.OPTIONAL)));
}
if (columnsNotFound.size() == getColumns().size()) {
noColumnsFound = true;
}
}
}
logger.debug("Requesting schema {}", projection);
ColumnIOFactory factory = new ColumnIOFactory(false);
MessageColumnIO columnIO = factory.getColumnIO(projection, schema);
Map<ColumnPath, ColumnChunkMetaData> paths = new HashMap<>();
for (ColumnChunkMetaData md : footer.getBlocks().get(entry.getRowGroupIndex()).getColumns()) {
paths.put(md.getPath(), md);
}
Path filePath = new Path(entry.getPath());
BlockMetaData blockMetaData = footer.getBlocks().get(entry.getRowGroupIndex());
recordCount = (int) blockMetaData.getRowCount();
pageReadStore = new ColumnChunkIncReadStore(recordCount, CodecFactory.createDirectCodecFactory(fileSystem.getConf(), new ParquetDirectByteBufferAllocator(operatorContext.getAllocator()), 0), operatorContext.getAllocator(), fileSystem, filePath);
for (String[] path : schema.getPaths()) {
Type type = schema.getType(path);
if (type.isPrimitive()) {
ColumnChunkMetaData md = paths.get(ColumnPath.get(path));
pageReadStore.addColumn(schema.getColumnDescription(path), md);
}
}
if (!noColumnsFound) {
writer = new VectorContainerWriter(output);
// Discard the columns not found in the schema when create DrillParquetRecordMaterializer, since they have been added to output already.
final Collection<SchemaPath> columns = columnsNotFound == null || columnsNotFound.size() == 0 ? getColumns() : CollectionUtils.subtract(getColumns(), columnsNotFound);
recordMaterializer = new DrillParquetRecordMaterializer(output, writer, projection, columns, fragmentContext.getOptions(), containsCorruptedDates);
primitiveVectors = writer.getMapVector().getPrimitiveVectors();
recordReader = columnIO.getRecordReader(pageReadStore, recordMaterializer);
}
} catch (Exception e) {
handleAndRaise("Failure in setting up reader", e);
}
}
use of org.apache.drill.common.expression.SchemaPath in project drill by apache.
the class UnionAllRecordBatch method doWork.
@SuppressWarnings("resource")
private IterOutcome doWork() throws ClassTransformationException, IOException, SchemaChangeException {
if (allocationVectors != null) {
for (ValueVector v : allocationVectors) {
v.clear();
}
}
allocationVectors = Lists.newArrayList();
transfers.clear();
// If both sides of Union-All are empty
if (unionAllInput.isBothSideEmpty()) {
for (int i = 0; i < outputFields.size(); ++i) {
final String colName = outputFields.get(i).getPath();
final MajorType majorType = MajorType.newBuilder().setMinorType(MinorType.INT).setMode(DataMode.OPTIONAL).build();
MaterializedField outputField = MaterializedField.create(colName, majorType);
ValueVector vv = container.addOrGet(outputField, callBack);
allocationVectors.add(vv);
}
container.buildSchema(BatchSchema.SelectionVectorMode.NONE);
return IterOutcome.OK_NEW_SCHEMA;
}
final ClassGenerator<UnionAller> cg = CodeGenerator.getRoot(UnionAller.TEMPLATE_DEFINITION, context.getFunctionRegistry(), context.getOptions());
cg.getCodeGenerator().plainJavaCapable(true);
// Uncomment out this line to debug the generated code.
// cg.getCodeGenerator().saveCodeForDebugging(true);
int index = 0;
for (VectorWrapper<?> vw : current) {
ValueVector vvIn = vw.getValueVector();
// get the original input column names
SchemaPath inputPath = SchemaPath.getSimplePath(vvIn.getField().getPath());
// get the renamed column names
SchemaPath outputPath = SchemaPath.getSimplePath(outputFields.get(index).getPath());
final ErrorCollector collector = new ErrorCollectorImpl();
// cast data types (Minortype or DataMode)
if (hasSameTypeAndMode(outputFields.get(index), vw.getValueVector().getField())) {
// Transfer column
MajorType outputFieldType = outputFields.get(index).getType();
MaterializedField outputField = MaterializedField.create(outputPath.getAsUnescapedPath(), outputFieldType);
/*
todo: Fix if condition when DRILL-4824 is merged
If condition should be changed to:
`if (outputFields.get(index).getPath().equals(inputPath.getAsUnescapedPath())) {`
DRILL-5419 has changed condition to correct one but this caused regression (DRILL-5521).
Root cause is missing indication of child column in map types when it is null.
DRILL-4824 is re-working json reader implementation, including map types and will fix this problem.
Reverting condition to previous one to avoid regression till DRILL-4824 is merged.
Unit test - TestJsonReader.testKvgenWithUnionAll().
*/
if (outputFields.get(index).getPath().equals(inputPath)) {
ValueVector vvOut = container.addOrGet(outputField);
TransferPair tp = vvIn.makeTransferPair(vvOut);
transfers.add(tp);
// Copy data in order to rename the column
} else {
final LogicalExpression expr = ExpressionTreeMaterializer.materialize(inputPath, current, collector, context.getFunctionRegistry());
if (collector.hasErrors()) {
throw new SchemaChangeException(String.format("Failure while trying to materialize incoming schema. Errors:\n %s.", collector.toErrorString()));
}
ValueVector vv = container.addOrGet(outputField, callBack);
allocationVectors.add(vv);
TypedFieldId fid = container.getValueVectorId(SchemaPath.getSimplePath(outputField.getPath()));
ValueVectorWriteExpression write = new ValueVectorWriteExpression(fid, expr, true);
cg.addExpr(write);
}
// Cast is necessary
} else {
LogicalExpression expr = ExpressionTreeMaterializer.materialize(inputPath, current, collector, context.getFunctionRegistry());
if (collector.hasErrors()) {
throw new SchemaChangeException(String.format("Failure while trying to materialize incoming schema. Errors:\n %s.", collector.toErrorString()));
}
// cast to the one with the least restriction
if (vvIn.getField().getType().getMode() == DataMode.REQUIRED && outputFields.get(index).getType().getMode() != DataMode.REQUIRED) {
expr = ExpressionTreeMaterializer.convertToNullableType(expr, vvIn.getField().getType().getMinorType(), context.getFunctionRegistry(), collector);
if (collector.hasErrors()) {
throw new SchemaChangeException(String.format("Failure while trying to materialize incoming schema. Errors:\n %s.", collector.toErrorString()));
}
}
// Insert a cast before the Union operation
if (vvIn.getField().getType().getMinorType() != outputFields.get(index).getType().getMinorType()) {
expr = ExpressionTreeMaterializer.addCastExpression(expr, outputFields.get(index).getType(), context.getFunctionRegistry(), collector);
if (collector.hasErrors()) {
throw new SchemaChangeException(String.format("Failure while trying to materialize incoming schema. Errors:\n %s.", collector.toErrorString()));
}
}
final MaterializedField outputField = MaterializedField.create(outputPath.getAsUnescapedPath(), expr.getMajorType());
ValueVector vector = container.addOrGet(outputField, callBack);
allocationVectors.add(vector);
TypedFieldId fid = container.getValueVectorId(SchemaPath.getSimplePath(outputField.getPath()));
boolean useSetSafe = !(vector instanceof FixedWidthVector);
ValueVectorWriteExpression write = new ValueVectorWriteExpression(fid, expr, useSetSafe);
cg.addExpr(write);
}
++index;
}
unionall = context.getImplementationClass(cg.getCodeGenerator());
unionall.setup(context, current, this, transfers);
if (!schemaAvailable) {
container.buildSchema(BatchSchema.SelectionVectorMode.NONE);
schemaAvailable = true;
}
if (!doAlloc()) {
return IterOutcome.OUT_OF_MEMORY;
}
recordCount = unionall.unionRecords(0, current.getRecordCount(), 0);
setValueCount(recordCount);
return IterOutcome.OK;
}
use of org.apache.drill.common.expression.SchemaPath in project drill by apache.
the class HBaseRecordReader method transformColumns.
@Override
protected Collection<SchemaPath> transformColumns(Collection<SchemaPath> columns) {
Set<SchemaPath> transformed = Sets.newLinkedHashSet();
rowKeyOnly = true;
if (!isStarQuery()) {
for (SchemaPath column : columns) {
if (column.getRootSegment().getPath().equalsIgnoreCase(ROW_KEY)) {
transformed.add(ROW_KEY_PATH);
continue;
}
rowKeyOnly = false;
NameSegment root = column.getRootSegment();
byte[] family = root.getPath().getBytes();
transformed.add(SchemaPath.getSimplePath(root.getPath()));
PathSegment child = root.getChild();
if (child != null && child.isNamed()) {
byte[] qualifier = child.getNameSegment().getPath().getBytes();
hbaseScan.addColumn(family, qualifier);
} else {
hbaseScan.addFamily(family);
}
}
/* if only the row key was requested, add a FirstKeyOnlyFilter to the scan
* to fetch only one KV from each row. If a filter is already part of this
* scan, add the FirstKeyOnlyFilter as the LAST filter of a MUST_PASS_ALL
* FilterList.
*/
if (rowKeyOnly) {
hbaseScan.setFilter(HBaseUtils.andFilterAtIndex(hbaseScan.getFilter(), HBaseUtils.LAST_FILTER, new FirstKeyOnlyFilter()));
}
} else {
rowKeyOnly = false;
transformed.add(ROW_KEY_PATH);
}
return transformed;
}
Aggregations