use of org.apache.drill.exec.exception.SchemaChangeException in project drill by apache.
the class WindowFrameRecordBatch method canDoWork.
/**
* @return true when all window functions are ready to process the current batch (it's the first batch currently
* held in memory)
*/
private boolean canDoWork() {
if (batches.size() < 2) {
// current partition
return false;
}
final VectorAccessible current = batches.get(0);
final int currentSize = current.getRecordCount();
final VectorAccessible last = batches.get(batches.size() - 1);
final int lastSize = last.getRecordCount();
boolean partitionEndReached;
boolean frameEndReached;
try {
partitionEndReached = !framers[0].isSamePartition(currentSize - 1, current, lastSize - 1, last);
frameEndReached = partitionEndReached || !framers[0].isPeer(currentSize - 1, current, lastSize - 1, last);
for (final WindowFunction function : functions) {
if (!function.canDoWork(batches.size(), popConfig, frameEndReached, partitionEndReached)) {
return false;
}
}
} catch (SchemaChangeException e) {
throw new UnsupportedOperationException(e);
}
return true;
}
use of org.apache.drill.exec.exception.SchemaChangeException in project drill by apache.
the class RemovingRecordBatch method getGenerated4Copier.
public static Copier getGenerated4Copier(RecordBatch batch, FragmentContext context, BufferAllocator allocator, VectorContainer container, RecordBatch outgoing, SchemaChangeCallBack callBack) throws SchemaChangeException {
for (VectorWrapper<?> vv : batch) {
@SuppressWarnings("resource") ValueVector v = vv.getValueVectors()[0];
v.makeTransferPair(container.addOrGet(v.getField(), callBack));
}
try {
final CodeGenerator<Copier> cg = CodeGenerator.get(Copier.TEMPLATE_DEFINITION4, context.getFunctionRegistry(), context.getOptions());
CopyUtil.generateCopies(cg.getRoot(), batch, true);
cg.plainJavaCapable(true);
// Uncomment out this line to debug the generated code.
// cg.saveCodeForDebugging(true);
Copier copier = context.getImplementationClass(cg);
copier.setupRemover(context, batch, outgoing);
return copier;
} catch (ClassTransformationException | IOException e) {
throw new SchemaChangeException("Failure while attempting to load generated class", e);
}
}
use of org.apache.drill.exec.exception.SchemaChangeException in project drill by apache.
the class TraceRecordBatch method setupNewSchema.
@Override
protected boolean setupNewSchema() throws SchemaChangeException {
/* Trace operator does not deal with hyper vectors yet */
if (incoming.getSchema().getSelectionVectorMode() == SelectionVectorMode.FOUR_BYTE) {
throw new SchemaChangeException("Trace operator does not work with hyper vectors");
}
/*
* we have a new schema, clear our existing container to load the new value vectors
*/
container.clear();
/* Add all the value vectors in the container */
for (VectorWrapper<?> vv : incoming) {
TransferPair tp = vv.getValueVector().getTransferPair(oContext.getAllocator());
container.add(tp.getTo());
}
container.buildSchema(incoming.getSchema().getSelectionVectorMode());
return true;
}
use of org.apache.drill.exec.exception.SchemaChangeException in project drill by apache.
the class MergingRecordBatch method createMerger.
// private boolean isOutgoingFull() {
// return outgoingPosition == DEFAULT_ALLOC_RECORD_COUNT;
// }
/**
* Creates a generate class which implements the copy and compare methods.
*
* @return instance of a new merger based on generated code
* @throws SchemaChangeException
*/
private MergingReceiverGeneratorBase createMerger() throws SchemaChangeException {
try {
final CodeGenerator<MergingReceiverGeneratorBase> cg = CodeGenerator.get(MergingReceiverGeneratorBase.TEMPLATE_DEFINITION, context.getFunctionRegistry(), context.getOptions());
cg.plainJavaCapable(true);
// Uncomment out this line to debug the generated code.
// cg.saveCodeForDebugging(true);
final ClassGenerator<MergingReceiverGeneratorBase> g = cg.getRoot();
ExpandableHyperContainer batch = null;
boolean first = true;
for (final RecordBatchLoader loader : batchLoaders) {
if (first) {
batch = new ExpandableHyperContainer(loader);
first = false;
} else {
batch.addBatch(loader);
}
}
generateComparisons(g, batch);
g.setMappingSet(COPIER_MAPPING_SET);
CopyUtil.generateCopies(g, batch, true);
g.setMappingSet(MAIN_MAPPING);
final MergingReceiverGeneratorBase merger = context.getImplementationClass(cg);
merger.doSetup(context, batch, outgoingContainer);
return merger;
} catch (ClassTransformationException | IOException e) {
throw new SchemaChangeException(e);
}
}
use of org.apache.drill.exec.exception.SchemaChangeException in project drill by apache.
the class CompliantTextRecordReader method setup.
/**
* Performs the initial setup required for the record reader.
* Initializes the input stream, handling of the output record batch
* and the actual reader to be used.
* @param context operator context from which buffer's will be allocated and managed
* @param outputMutator Used to create the schema in the output record batch
* @throws ExecutionSetupException
*/
@SuppressWarnings("resource")
@Override
public void setup(OperatorContext context, OutputMutator outputMutator) throws ExecutionSetupException {
oContext = context;
// Note: DO NOT use managed buffers here. They remain in existence
// until the fragment is shut down. The buffers here are large.
// If we scan 1000 files, and allocate 1 MB for each, we end up
// holding onto 1 GB of memory in managed buffers.
// Instead, we allocate the buffers explicitly, and must free
// them.
// readBuffer = context.getManagedBuffer(READ_BUFFER);
// whitespaceBuffer = context.getManagedBuffer(WHITE_SPACE_BUFFER);
readBuffer = context.getAllocator().buffer(READ_BUFFER);
whitespaceBuffer = context.getAllocator().buffer(WHITE_SPACE_BUFFER);
// setup Output, Input, and Reader
try {
TextOutput output = null;
TextInput input = null;
InputStream stream = null;
// setup Output using OutputMutator
if (settings.isHeaderExtractionEnabled()) {
//extract header and use that to setup a set of VarCharVectors
String[] fieldNames = extractHeader();
output = new FieldVarCharOutput(outputMutator, fieldNames, getColumns(), isStarQuery());
} else {
//simply use RepeatedVarCharVector
output = new RepeatedVarCharOutput(outputMutator, getColumns(), isStarQuery());
}
// setup Input using InputStream
logger.trace("Opening file {}", split.getPath());
stream = dfs.openPossiblyCompressedStream(split.getPath());
input = new TextInput(settings, stream, readBuffer, split.getStart(), split.getStart() + split.getLength());
// setup Reader using Input and Output
reader = new TextReader(settings, input, output, whitespaceBuffer);
reader.start();
} catch (SchemaChangeException | IOException e) {
throw new ExecutionSetupException(String.format("Failure while setting up text reader for file %s", split.getPath()), e);
} catch (IllegalArgumentException e) {
throw UserException.dataReadError(e).addContext("File Path", split.getPath().toString()).build(logger);
}
}
Aggregations