use of org.apache.drill.exec.exception.SchemaChangeException in project drill by apache.
the class ProjectorTemplate method projectRecords.
@Override
public final int projectRecords(int startIndex, final int recordCount, int firstOutputIndex) {
switch(svMode) {
case FOUR_BYTE:
throw new UnsupportedOperationException();
case TWO_BYTE:
final int count = recordCount;
for (int i = 0; i < count; i++, firstOutputIndex++) {
try {
doEval(vector2.getIndex(i), firstOutputIndex);
} catch (SchemaChangeException e) {
throw new UnsupportedOperationException(e);
}
}
return recordCount;
case NONE:
final int countN = recordCount;
int i;
for (i = startIndex; i < startIndex + countN; i++, firstOutputIndex++) {
try {
doEval(i, firstOutputIndex);
} catch (SchemaChangeException e) {
throw new UnsupportedOperationException(e);
}
}
if (i < startIndex + recordCount || startIndex > 0) {
for (TransferPair t : transfers) {
t.splitAndTransfer(startIndex, i - startIndex);
}
return i - startIndex;
}
for (TransferPair t : transfers) {
t.transfer();
}
return recordCount;
default:
throw new UnsupportedOperationException();
}
}
use of org.apache.drill.exec.exception.SchemaChangeException in project drill by apache.
the class PartitionSenderRootExec method createClassInstances.
private List<Partitioner> createClassInstances(int actualPartitions) throws SchemaChangeException {
// set up partitioning function
final LogicalExpression expr = operator.getExpr();
final ErrorCollector collector = new ErrorCollectorImpl();
final ClassGenerator<Partitioner> cg;
cg = CodeGenerator.getRoot(Partitioner.TEMPLATE_DEFINITION, context.getFunctionRegistry(), context.getOptions());
cg.getCodeGenerator().plainJavaCapable(true);
// Uncomment out this line to debug the generated code.
// cg.getCodeGenerator().saveCodeForDebugging(true);
ClassGenerator<Partitioner> cgInner = cg.getInnerGenerator("OutgoingRecordBatch");
final LogicalExpression materializedExpr = ExpressionTreeMaterializer.materialize(expr, incoming, collector, context.getFunctionRegistry());
if (collector.hasErrors()) {
throw new SchemaChangeException(String.format("Failure while trying to materialize incoming schema. Errors:\n %s.", collector.toErrorString()));
}
// generate code to copy from an incoming value vector to the destination partition's outgoing value vector
JExpression bucket = JExpr.direct("bucket");
// generate evaluate expression to determine the hash
ClassGenerator.HoldingContainer exprHolder = cg.addExpr(materializedExpr);
cg.getEvalBlock().decl(JType.parse(cg.getModel(), "int"), "bucket", exprHolder.getValue().mod(JExpr.lit(outGoingBatchCount)));
cg.getEvalBlock()._return(cg.getModel().ref(Math.class).staticInvoke("abs").arg(bucket));
CopyUtil.generateCopies(cgInner, incoming, incoming.getSchema().getSelectionVectorMode() == SelectionVectorMode.FOUR_BYTE);
try {
// compile and setup generated code
List<Partitioner> subPartitioners = context.getImplementationClass(cg, actualPartitions);
return subPartitioners;
} catch (ClassTransformationException | IOException e) {
throw new SchemaChangeException("Failure while attempting to load generated class", e);
}
}
use of org.apache.drill.exec.exception.SchemaChangeException in project drill by apache.
the class PartitionSenderRootExec method innerNext.
@Override
public boolean innerNext() {
if (!ok) {
return false;
}
IterOutcome out;
if (!done) {
out = next(incoming);
} else {
incoming.kill(true);
out = IterOutcome.NONE;
}
logger.debug("Partitioner.next(): got next record batch with status {}", out);
if (first && out == IterOutcome.OK) {
out = IterOutcome.OK_NEW_SCHEMA;
}
switch(out) {
case NONE:
try {
// send any pending batches
if (partitioner != null) {
partitioner.flushOutgoingBatches(true, false);
} else {
sendEmptyBatch(true);
}
} catch (IOException e) {
incoming.kill(false);
logger.error("Error while creating partitioning sender or flushing outgoing batches", e);
context.fail(e);
}
return false;
case OUT_OF_MEMORY:
throw new OutOfMemoryException();
case STOP:
if (partitioner != null) {
partitioner.clear();
}
return false;
case OK_NEW_SCHEMA:
try {
// send all existing batches
if (partitioner != null) {
partitioner.flushOutgoingBatches(false, true);
partitioner.clear();
}
createPartitioner();
if (first) {
// Send an empty batch for fast schema
first = false;
sendEmptyBatch(false);
}
} catch (IOException e) {
incoming.kill(false);
logger.error("Error while flushing outgoing batches", e);
context.fail(e);
return false;
} catch (SchemaChangeException e) {
incoming.kill(false);
logger.error("Error while setting up partitioner", e);
context.fail(e);
return false;
}
case OK:
try {
partitioner.partitionBatch(incoming);
} catch (IOException e) {
context.fail(e);
incoming.kill(false);
return false;
}
for (VectorWrapper<?> v : incoming) {
v.clear();
}
return true;
case NOT_YET:
default:
throw new IllegalStateException();
}
}
use of org.apache.drill.exec.exception.SchemaChangeException in project drill by apache.
the class PartitionerTemplate method doCopy.
/**
* Helper method to copy data based on partition
* @param svIndex
* @throws IOException
*/
private void doCopy(int svIndex) throws IOException {
int index;
try {
index = doEval(svIndex);
} catch (SchemaChangeException e) {
throw new UnsupportedOperationException(e);
}
if (index >= start && index < end) {
OutgoingRecordBatch outgoingBatch = outgoingBatches.get(index - start);
outgoingBatch.copy(svIndex);
}
}
use of org.apache.drill.exec.exception.SchemaChangeException in project drill by apache.
the class OrderedPartitionRecordBatch method getCopier.
/**
* Creates a copier that does a project for every Nth record from a VectorContainer incoming into VectorContainer
* outgoing. Each Ordering in orderings generates a column, and evaluation of the expression associated with each
* Ordering determines the value of each column. These records will later be sorted based on the values in each
* column, in the same order as the orderings.
*
* @param sv4
* @param incoming
* @param outgoing
* @param orderings
* @return
* @throws SchemaChangeException
*/
private SampleCopier getCopier(SelectionVector4 sv4, VectorContainer incoming, VectorContainer outgoing, List<Ordering> orderings, List<ValueVector> localAllocationVectors) throws SchemaChangeException {
final ErrorCollector collector = new ErrorCollectorImpl();
final ClassGenerator<SampleCopier> cg = CodeGenerator.getRoot(SampleCopier.TEMPLATE_DEFINITION, context.getFunctionRegistry(), context.getOptions());
// Note: disabled for now. This may require some debugging:
// no tests are available for this operator.
// cg.getCodeGenerator().plainOldJavaCapable(true);
// Uncomment out this line to debug the generated code.
// cg.getCodeGenerator().saveCodeForDebugging(true);
int i = 0;
for (Ordering od : orderings) {
final LogicalExpression expr = ExpressionTreeMaterializer.materialize(od.getExpr(), incoming, collector, context.getFunctionRegistry());
SchemaPath schemaPath = SchemaPath.getSimplePath("f" + i++);
TypeProtos.MajorType.Builder builder = TypeProtos.MajorType.newBuilder().mergeFrom(expr.getMajorType()).clearMode().setMode(TypeProtos.DataMode.REQUIRED);
TypeProtos.MajorType newType = builder.build();
MaterializedField outputField = MaterializedField.create(schemaPath.getAsUnescapedPath(), newType);
if (collector.hasErrors()) {
throw new SchemaChangeException(String.format("Failure while trying to materialize incoming schema. Errors:\n %s.", collector.toErrorString()));
}
@SuppressWarnings("resource") ValueVector vector = TypeHelper.getNewVector(outputField, oContext.getAllocator());
localAllocationVectors.add(vector);
TypedFieldId fid = outgoing.add(vector);
ValueVectorWriteExpression write = new ValueVectorWriteExpression(fid, expr, true);
HoldingContainer hc = cg.addExpr(write);
cg.getEvalBlock()._if(hc.getValue().eq(JExpr.lit(0)))._then()._return(JExpr.FALSE);
}
cg.rotateBlock();
cg.getEvalBlock()._return(JExpr.TRUE);
outgoing.buildSchema(BatchSchema.SelectionVectorMode.NONE);
try {
SampleCopier sampleCopier = context.getImplementationClass(cg);
sampleCopier.setupCopier(context, sv4, incoming, outgoing);
return sampleCopier;
} catch (ClassTransformationException | IOException e) {
throw new SchemaChangeException(e);
}
}
Aggregations