use of org.apache.drill.exec.memory.BufferAllocator in project drill by axbaretto.
the class ExternalSortBatch method createCopier.
private void createCopier(VectorAccessible batch, List<BatchGroup> batchGroupList, VectorContainer outputContainer, boolean spilling) throws SchemaChangeException {
try {
if (copier == null) {
CodeGenerator<PriorityQueueCopier> cg = CodeGenerator.get(PriorityQueueCopier.TEMPLATE_DEFINITION, context.getOptions());
cg.plainJavaCapable(true);
// Uncomment out this line to debug the generated code.
// cg.saveCodeForDebugging(true);
ClassGenerator<PriorityQueueCopier> g = cg.getRoot();
generateComparisons(g, batch);
g.setMappingSet(COPIER_MAPPING_SET);
CopyUtil.generateCopies(g, batch, true);
g.setMappingSet(MAIN_MAPPING);
copier = context.getImplementationClass(cg);
} else {
copier.close();
}
@SuppressWarnings("resource") BufferAllocator allocator = spilling ? copierAllocator : oAllocator;
for (VectorWrapper<?> i : batch) {
@SuppressWarnings("resource") ValueVector v = TypeHelper.getNewVector(i.getField(), allocator);
outputContainer.add(v);
}
copier.setup(context, allocator, batch, batchGroupList, outputContainer);
} catch (ClassTransformationException | IOException e) {
throw new RuntimeException(e);
}
}
use of org.apache.drill.exec.memory.BufferAllocator in project drill by axbaretto.
the class BufferedBatches method bufferBatch.
@SuppressWarnings("resource")
private void bufferBatch(VectorContainer convertedBatch, SelectionVector2 sv2, long netSize) {
BufferAllocator allocator = context.getAllocator();
RecordBatchData rbd = new RecordBatchData(convertedBatch, allocator);
try {
rbd.setSv2(sv2);
bufferedBatches.add(new BatchGroup.InputBatch(rbd.getContainer(), rbd.getSv2(), allocator, netSize));
} catch (Throwable t) {
rbd.clear();
throw t;
}
}
use of org.apache.drill.exec.memory.BufferAllocator in project drill by axbaretto.
the class PruneScanRule method doOnMatch.
protected void doOnMatch(RelOptRuleCall call, Filter filterRel, Project projectRel, TableScan scanRel) {
final String pruningClassName = getClass().getName();
logger.info("Beginning partition pruning, pruning class: {}", pruningClassName);
Stopwatch totalPruningTime = Stopwatch.createStarted();
final PlannerSettings settings = PrelUtil.getPlannerSettings(call.getPlanner());
PartitionDescriptor descriptor = getPartitionDescriptor(settings, scanRel);
final BufferAllocator allocator = optimizerContext.getAllocator();
final Object selection = getDrillTable(scanRel).getSelection();
MetadataContext metaContext = null;
if (selection instanceof FormatSelection) {
metaContext = ((FormatSelection) selection).getSelection().getMetaContext();
}
RexNode condition;
if (projectRel == null) {
condition = filterRel.getCondition();
} else {
// get the filter as if it were below the projection.
condition = RelOptUtil.pushFilterPastProject(filterRel.getCondition(), projectRel);
}
RewriteAsBinaryOperators visitor = new RewriteAsBinaryOperators(true, filterRel.getCluster().getRexBuilder());
condition = condition.accept(visitor);
Map<Integer, String> fieldNameMap = Maps.newHashMap();
List<String> fieldNames = scanRel.getRowType().getFieldNames();
BitSet columnBitset = new BitSet();
BitSet partitionColumnBitSet = new BitSet();
Map<Integer, Integer> partitionMap = Maps.newHashMap();
int relColIndex = 0;
for (String field : fieldNames) {
final Integer partitionIndex = descriptor.getIdIfValid(field);
if (partitionIndex != null) {
fieldNameMap.put(partitionIndex, field);
partitionColumnBitSet.set(partitionIndex);
columnBitset.set(relColIndex);
// mapping between the relColIndex and partitionIndex
partitionMap.put(relColIndex, partitionIndex);
}
relColIndex++;
}
if (partitionColumnBitSet.isEmpty()) {
logger.info("No partition columns are projected from the scan..continue. " + "Total pruning elapsed time: {} ms", totalPruningTime.elapsed(TimeUnit.MILLISECONDS));
setPruneStatus(metaContext, PruneStatus.NOT_PRUNED);
return;
}
// stop watch to track how long we spend in different phases of pruning
Stopwatch miscTimer = Stopwatch.createUnstarted();
// track how long we spend building the filter tree
miscTimer.start();
FindPartitionConditions c = new FindPartitionConditions(columnBitset, filterRel.getCluster().getRexBuilder());
c.analyze(condition);
RexNode pruneCondition = c.getFinalCondition();
BitSet referencedDirsBitSet = c.getReferencedDirs();
logger.info("Total elapsed time to build and analyze filter tree: {} ms", miscTimer.elapsed(TimeUnit.MILLISECONDS));
miscTimer.reset();
if (pruneCondition == null) {
logger.info("No conditions were found eligible for partition pruning." + "Total pruning elapsed time: {} ms", totalPruningTime.elapsed(TimeUnit.MILLISECONDS));
setPruneStatus(metaContext, PruneStatus.NOT_PRUNED);
return;
}
// set up the partitions
List<PartitionLocation> newPartitions = Lists.newArrayList();
// total number of partitions
long numTotal = 0;
int batchIndex = 0;
PartitionLocation firstLocation = null;
LogicalExpression materializedExpr = null;
String[] spInfo = null;
int maxIndex = -1;
BitSet matchBitSet = new BitSet();
// Outer loop: iterate over a list of batches of PartitionLocations
for (List<PartitionLocation> partitions : descriptor) {
numTotal += partitions.size();
logger.debug("Evaluating partition pruning for batch {}", batchIndex);
if (batchIndex == 0) {
// save the first location in case everything is pruned
firstLocation = partitions.get(0);
}
final NullableBitVector output = new NullableBitVector(MaterializedField.create("", Types.optional(MinorType.BIT)), allocator);
final VectorContainer container = new VectorContainer();
try {
final ValueVector[] vectors = new ValueVector[descriptor.getMaxHierarchyLevel()];
for (int partitionColumnIndex : BitSets.toIter(partitionColumnBitSet)) {
SchemaPath column = SchemaPath.getSimplePath(fieldNameMap.get(partitionColumnIndex));
MajorType type = descriptor.getVectorType(column, settings);
MaterializedField field = MaterializedField.create(column.getLastSegment().getNameSegment().getPath(), type);
ValueVector v = TypeHelper.getNewVector(field, allocator);
v.allocateNew();
vectors[partitionColumnIndex] = v;
container.add(v);
}
// track how long we spend populating partition column vectors
miscTimer.start();
// populate partition vectors.
descriptor.populatePartitionVectors(vectors, partitions, partitionColumnBitSet, fieldNameMap);
logger.info("Elapsed time to populate partitioning column vectors: {} ms within batchIndex: {}", miscTimer.elapsed(TimeUnit.MILLISECONDS), batchIndex);
miscTimer.reset();
// materialize the expression; only need to do this once
if (batchIndex == 0) {
materializedExpr = materializePruneExpr(pruneCondition, settings, scanRel, container);
if (materializedExpr == null) {
// continue without partition pruning; no need to log anything here since
// materializePruneExpr logs it already
logger.info("Total pruning elapsed time: {} ms", totalPruningTime.elapsed(TimeUnit.MILLISECONDS));
setPruneStatus(metaContext, PruneStatus.NOT_PRUNED);
return;
}
}
output.allocateNew(partitions.size());
// start the timer to evaluate how long we spend in the interpreter evaluation
miscTimer.start();
InterpreterEvaluator.evaluate(partitions.size(), optimizerContext, container, output, materializedExpr);
logger.info("Elapsed time in interpreter evaluation: {} ms within batchIndex: {} with # of partitions : {}", miscTimer.elapsed(TimeUnit.MILLISECONDS), batchIndex, partitions.size());
miscTimer.reset();
int recordCount = 0;
int qualifiedCount = 0;
if (descriptor.supportsMetadataCachePruning() && partitions.get(0).isCompositePartition()) /* apply single partition check only for composite partitions */
{
// Inner loop: within each batch iterate over the PartitionLocations
for (PartitionLocation part : partitions) {
assert part.isCompositePartition();
if (!output.getAccessor().isNull(recordCount) && output.getAccessor().get(recordCount) == 1) {
newPartitions.add(part);
// Rather than using the PartitionLocation, get the array of partition values for the directories that are
// referenced by the filter since we are not interested in directory references in other parts of the query.
Pair<String[], Integer> p = composePartition(referencedDirsBitSet, partitionMap, vectors, recordCount);
String[] parts = p.getLeft();
int tmpIndex = p.getRight();
maxIndex = Math.max(maxIndex, tmpIndex);
if (spInfo == null) {
// initialization
spInfo = parts;
for (int j = 0; j <= tmpIndex; j++) {
if (parts[j] != null) {
matchBitSet.set(j);
}
}
} else {
// compare the new partition with existing partition
for (int j = 0; j <= tmpIndex; j++) {
if (parts[j] == null || spInfo[j] == null) {
// nulls don't match
matchBitSet.clear(j);
} else {
if (!parts[j].equals(spInfo[j])) {
matchBitSet.clear(j);
}
}
}
}
qualifiedCount++;
}
recordCount++;
}
} else {
// Inner loop: within each batch iterate over the PartitionLocations
for (PartitionLocation part : partitions) {
if (!output.getAccessor().isNull(recordCount) && output.getAccessor().get(recordCount) == 1) {
newPartitions.add(part);
qualifiedCount++;
}
recordCount++;
}
}
logger.debug("Within batch {}: total records: {}, qualified records: {}", batchIndex, recordCount, qualifiedCount);
batchIndex++;
} catch (Exception e) {
logger.warn("Exception while trying to prune partition.", e);
logger.info("Total pruning elapsed time: {} ms", totalPruningTime.elapsed(TimeUnit.MILLISECONDS));
setPruneStatus(metaContext, PruneStatus.NOT_PRUNED);
// continue without partition pruning
return;
} finally {
container.clear();
if (output != null) {
output.clear();
}
}
}
try {
if (newPartitions.size() == numTotal) {
logger.info("No partitions were eligible for pruning");
return;
}
// handle the case all partitions are filtered out.
boolean canDropFilter = true;
boolean wasAllPartitionsPruned = false;
String cacheFileRoot = null;
if (newPartitions.isEmpty()) {
assert firstLocation != null;
// Add the first non-composite partition location, since execution requires schema.
// In such case, we should not drop filter.
newPartitions.add(firstLocation.getPartitionLocationRecursive().get(0));
canDropFilter = false;
// NOTE: with DRILL-4530, the PruneScanRule may be called with only a list of
// directories first and the non-composite partition location will still return
// directories, not files. So, additional processing is done depending on this flag
wasAllPartitionsPruned = true;
logger.info("All {} partitions were pruned; added back a single partition to allow creating a schema", numTotal);
// set the cacheFileRoot appropriately
if (firstLocation.isCompositePartition()) {
cacheFileRoot = descriptor.getBaseTableLocation() + firstLocation.getCompositePartitionPath();
}
}
logger.info("Pruned {} partitions down to {}", numTotal, newPartitions.size());
List<RexNode> conjuncts = RelOptUtil.conjunctions(condition);
List<RexNode> pruneConjuncts = RelOptUtil.conjunctions(pruneCondition);
conjuncts.removeAll(pruneConjuncts);
RexNode newCondition = RexUtil.composeConjunction(filterRel.getCluster().getRexBuilder(), conjuncts, false);
RewriteCombineBinaryOperators reverseVisitor = new RewriteCombineBinaryOperators(true, filterRel.getCluster().getRexBuilder());
condition = condition.accept(reverseVisitor);
pruneCondition = pruneCondition.accept(reverseVisitor);
if (descriptor.supportsMetadataCachePruning() && !wasAllPartitionsPruned) {
// if metadata cache file could potentially be used, then assign a proper cacheFileRoot
int index = -1;
if (!matchBitSet.isEmpty()) {
String path = "";
index = matchBitSet.length() - 1;
for (int j = 0; j < matchBitSet.length(); j++) {
if (!matchBitSet.get(j)) {
// stop at the first index with no match and use the immediate
// previous index
index = j - 1;
break;
}
}
for (int j = 0; j <= index; j++) {
path += "/" + spInfo[j];
}
cacheFileRoot = descriptor.getBaseTableLocation() + path;
}
if (index != maxIndex) {
// if multiple partitions are being selected, we should not drop the filter
// since we are reading the cache file at a parent/ancestor level
canDropFilter = false;
}
}
RelNode inputRel = descriptor.supportsMetadataCachePruning() ? descriptor.createTableScan(newPartitions, cacheFileRoot, wasAllPartitionsPruned, metaContext) : descriptor.createTableScan(newPartitions, wasAllPartitionsPruned);
if (projectRel != null) {
inputRel = projectRel.copy(projectRel.getTraitSet(), Collections.singletonList(inputRel));
}
if (newCondition.isAlwaysTrue() && canDropFilter) {
call.transformTo(inputRel);
} else {
final RelNode newFilter = filterRel.copy(filterRel.getTraitSet(), Collections.singletonList(inputRel));
call.transformTo(newFilter);
}
setPruneStatus(metaContext, PruneStatus.PRUNED);
} catch (Exception e) {
logger.warn("Exception while using the pruned partitions.", e);
} finally {
logger.info("Total pruning elapsed time: {} ms", totalPruningTime.elapsed(TimeUnit.MILLISECONDS));
}
}
use of org.apache.drill.exec.memory.BufferAllocator in project drill by axbaretto.
the class WebUserConnection method sendData.
@Override
public void sendData(RpcOutcomeListener<Ack> listener, QueryWritableBatch result) {
// Check if there is any data or not. There can be overflow here but DrillBuf doesn't support allocating with
// bytes in long. Hence we are just preserving the earlier behavior and logging debug log for the case.
final int dataByteCount = (int) result.getByteCount();
if (dataByteCount <= 0) {
if (logger.isDebugEnabled()) {
logger.debug("Either no data received in this batch or there is BufferOverflow in dataByteCount: {}", dataByteCount);
}
listener.success(Acks.OK, null);
return;
}
// If here that means there is some data for sure. Create a ByteBuf with all the data in it.
final int rows = result.getHeader().getRowCount();
final BufferAllocator allocator = webSessionResources.getAllocator();
final DrillBuf bufferWithData = allocator.buffer(dataByteCount);
try {
final ByteBuf[] resultDataBuffers = result.getBuffers();
for (final ByteBuf buffer : resultDataBuffers) {
bufferWithData.writeBytes(buffer);
buffer.release();
}
final RecordBatchLoader loader = new RecordBatchLoader(allocator);
try {
loader.load(result.getHeader().getDef(), bufferWithData);
// SchemaChangeException, so check/clean catch clause below.
for (int i = 0; i < loader.getSchema().getFieldCount(); ++i) {
columns.add(loader.getSchema().getColumn(i).getName());
}
for (int i = 0; i < rows; ++i) {
final Map<String, String> record = Maps.newHashMap();
for (VectorWrapper<?> vw : loader) {
final String field = vw.getValueVector().getMetadata().getNamePart().getName();
final Accessor accessor = vw.getValueVector().getAccessor();
final Object value = i < accessor.getValueCount() ? accessor.getObject(i) : null;
final String display = value == null ? null : value.toString();
record.put(field, display);
}
results.add(record);
}
} finally {
loader.clear();
}
} catch (Exception e) {
exception = UserException.systemError(e).build(logger);
} finally {
// Notify the listener with ACK.OK both in error/success case because data was send successfully from Drillbit.
bufferWithData.release();
listener.success(Acks.OK, null);
}
}
use of org.apache.drill.exec.memory.BufferAllocator in project drill by axbaretto.
the class TestLoad method testMapSchemaChange.
@Test
public void testMapSchemaChange() throws SchemaChangeException {
final BufferAllocator allocator = RootAllocatorFactory.newRoot(drillConfig);
final RecordBatchLoader batchLoader = new RecordBatchLoader(allocator);
// Initial schema: a: INT, m: MAP{}
BatchSchema schema1 = new SchemaBuilder().add("a", MinorType.INT).addMap("m").resumeSchema().build();
{
assertTrue(loadBatch(allocator, batchLoader, schema1));
assertTrue(schema1.isEquivalent(batchLoader.getSchema()));
batchLoader.getContainer().zeroVectors();
}
// Same schema
// Schema change: No
{
assertFalse(loadBatch(allocator, batchLoader, schema1));
assertTrue(schema1.isEquivalent(batchLoader.getSchema()));
batchLoader.getContainer().zeroVectors();
}
// Add column to map: a: INT, m: MAP{b: VARCHAR}
// Schema change: Yes
BatchSchema schema2 = new SchemaBuilder().add("a", MinorType.INT).addMap("m").add("b", MinorType.VARCHAR).resumeSchema().build();
{
assertTrue(loadBatch(allocator, batchLoader, schema2));
assertTrue(schema2.isEquivalent(batchLoader.getSchema()));
batchLoader.getContainer().zeroVectors();
}
// Same schema
// Schema change: No
{
assertFalse(loadBatch(allocator, batchLoader, schema2));
assertTrue(schema2.isEquivalent(batchLoader.getSchema()));
batchLoader.getContainer().zeroVectors();
}
// Add column: a: INT, m: MAP{b: VARCHAR, c: INT}
// Schema change: Yes
{
BatchSchema schema = new SchemaBuilder().add("a", MinorType.INT).addMap("m").add("b", MinorType.VARCHAR).add("c", MinorType.INT).resumeSchema().build();
assertTrue(loadBatch(allocator, batchLoader, schema));
assertTrue(schema.isEquivalent(batchLoader.getSchema()));
batchLoader.getContainer().zeroVectors();
}
// Drop a column: a: INT, m: MAP{b: VARCHAR}
// Schema change: Yes
{
BatchSchema schema = new SchemaBuilder().add("a", MinorType.INT).addMap("m").add("b", MinorType.VARCHAR).resumeSchema().build();
assertTrue(loadBatch(allocator, batchLoader, schema));
assertTrue(schema.isEquivalent(batchLoader.getSchema()));
batchLoader.getContainer().zeroVectors();
}
// Change type: a: INT, m: MAP{b: INT}
// Schema change: Yes
{
BatchSchema schema = new SchemaBuilder().add("a", MinorType.INT).addMap("m").add("b", MinorType.INT).resumeSchema().build();
assertTrue(loadBatch(allocator, batchLoader, schema));
assertTrue(schema.isEquivalent(batchLoader.getSchema()));
batchLoader.getContainer().zeroVectors();
}
// Empty map: a: INT, m: MAP{}
{
assertTrue(loadBatch(allocator, batchLoader, schema1));
assertTrue(schema1.isEquivalent(batchLoader.getSchema()));
batchLoader.getContainer().zeroVectors();
}
// Drop map: a: INT
{
BatchSchema schema = new SchemaBuilder().add("a", MinorType.INT).build();
assertTrue(loadBatch(allocator, batchLoader, schema));
assertTrue(schema.isEquivalent(batchLoader.getSchema()));
batchLoader.getContainer().zeroVectors();
}
batchLoader.clear();
allocator.close();
}
Aggregations