use of org.apache.drill.common.expression.SchemaPath in project drill by apache.
the class MongoCompareFunctionProcessor method process.
public static MongoCompareFunctionProcessor process(FunctionCall call) {
String functionName = call.getName();
LogicalExpression nameArg = call.args.get(0);
LogicalExpression valueArg = call.args.size() == 2 ? call.args.get(1) : null;
MongoCompareFunctionProcessor evaluator = new MongoCompareFunctionProcessor(functionName);
if (valueArg != null) {
// binary function
if (VALUE_EXPRESSION_CLASSES.contains(nameArg.getClass())) {
LogicalExpression swapArg = valueArg;
valueArg = nameArg;
nameArg = swapArg;
evaluator.functionName = COMPARE_FUNCTIONS_TRANSPOSE_MAP.get(functionName);
}
evaluator.success = nameArg.accept(evaluator, valueArg);
} else if (call.args.get(0) instanceof SchemaPath) {
evaluator.success = true;
evaluator.path = (SchemaPath) nameArg;
}
return evaluator;
}
use of org.apache.drill.common.expression.SchemaPath in project drill by apache.
the class MongoScanBatchCreator method getBatch.
@Override
public ScanBatch getBatch(FragmentContext context, MongoSubScan subScan, List<RecordBatch> children) throws ExecutionSetupException {
Preconditions.checkArgument(children.isEmpty());
List<RecordReader> readers = Lists.newArrayList();
List<SchemaPath> columns = null;
for (MongoSubScan.MongoSubScanSpec scanSpec : subScan.getChunkScanSpecList()) {
try {
if ((columns = subScan.getColumns()) == null) {
columns = GroupScan.ALL_COLUMNS;
}
readers.add(new MongoRecordReader(scanSpec, columns, context, subScan.getMongoStoragePlugin()));
} catch (Exception e) {
logger.error("MongoRecordReader creation failed for subScan: " + subScan + ".");
logger.error(e.getMessage(), e);
throw new ExecutionSetupException(e);
}
}
logger.info("Number of record readers initialized : " + readers.size());
return new ScanBatch(subScan, context, readers.iterator());
}
use of org.apache.drill.common.expression.SchemaPath in project drill by apache.
the class ParquetPartitionDescriptor method populatePartitionVectors.
@Override
public void populatePartitionVectors(ValueVector[] vectors, List<PartitionLocation> partitions, BitSet partitionColumnBitSet, Map<Integer, String> fieldNameMap) {
int record = 0;
for (PartitionLocation partitionLocation : partitions) {
for (int partitionColumnIndex : BitSets.toIter(partitionColumnBitSet)) {
SchemaPath column = SchemaPath.getSimplePath(fieldNameMap.get(partitionColumnIndex));
((ParquetGroupScan) scanRel.getGroupScan()).populatePruningVector(vectors[partitionColumnIndex], record, column, partitionLocation.getEntirePartitionLocation());
}
record++;
}
for (ValueVector v : vectors) {
if (v == null) {
continue;
}
v.getMutator().setValueCount(partitions.size());
}
}
use of org.apache.drill.common.expression.SchemaPath in project drill by apache.
the class BsonRecordReader method ensureAtLeastOneField.
public void ensureAtLeastOneField(ComplexWriter writer) {
if (!atLeastOneWrite) {
// if we had no columns, create one empty one so we can return some data
// for count purposes.
SchemaPath sp = columns.get(0);
PathSegment root = sp.getRootSegment();
BaseWriter.MapWriter fieldWriter = writer.rootAsMap();
while (root.getChild() != null && !root.getChild().isArray()) {
fieldWriter = fieldWriter.map(root.getNameSegment().getPath());
root = root.getChild();
}
fieldWriter.integer(root.getNameSegment().getPath());
}
}
use of org.apache.drill.common.expression.SchemaPath in project drill by apache.
the class JsonReader method ensureAtLeastOneField.
@SuppressWarnings("resource")
@Override
public void ensureAtLeastOneField(ComplexWriter writer) {
List<BaseWriter.MapWriter> writerList = Lists.newArrayList();
List<PathSegment> fieldPathList = Lists.newArrayList();
BitSet emptyStatus = new BitSet(columns.size());
// first pass: collect which fields are empty
for (int i = 0; i < columns.size(); i++) {
SchemaPath sp = columns.get(i);
PathSegment fieldPath = sp.getRootSegment();
BaseWriter.MapWriter fieldWriter = writer.rootAsMap();
while (fieldPath.getChild() != null && !fieldPath.getChild().isArray()) {
fieldWriter = fieldWriter.map(fieldPath.getNameSegment().getPath());
fieldPath = fieldPath.getChild();
}
writerList.add(fieldWriter);
fieldPathList.add(fieldPath);
if (fieldWriter.isEmptyMap()) {
emptyStatus.set(i, true);
}
if (i == 0 && !allTextMode) {
// avoid schema change exceptions by downstream operators.
break;
}
}
// so we rely on the emptyStatus.
for (int j = 0; j < fieldPathList.size(); j++) {
BaseWriter.MapWriter fieldWriter = writerList.get(j);
PathSegment fieldPath = fieldPathList.get(j);
if (emptyStatus.get(j)) {
if (allTextMode) {
fieldWriter.varChar(fieldPath.getNameSegment().getPath());
} else {
fieldWriter.integer(fieldPath.getNameSegment().getPath());
}
}
}
for (ListWriter field : emptyArrayWriters) {
// checks that array has not been initialized
if (field.getValueCapacity() == 0) {
if (allTextMode) {
field.varChar();
} else {
field.integer();
}
}
}
}
Aggregations