use of org.apache.drill.exec.exception.SchemaChangeException in project drill by apache.
the class JdbcRecordReader method setup.
@Override
public void setup(OperatorContext operatorContext, OutputMutator output) throws ExecutionSetupException {
try {
this.operatorContext = operatorContext;
connection = source.getConnection();
statement = connection.createStatement();
resultSet = statement.executeQuery(sql);
final ResultSetMetaData meta = resultSet.getMetaData();
final int columns = meta.getColumnCount();
ImmutableList.Builder<ValueVector> vectorBuilder = ImmutableList.builder();
ImmutableList.Builder<Copier<?>> copierBuilder = ImmutableList.builder();
for (int i = 1; i <= columns; i++) {
final String name = meta.getColumnLabel(i);
final int jdbcType = meta.getColumnType(i);
final int width = meta.getPrecision(i);
final int scale = meta.getScale(i);
MinorType minorType = JDBC_TYPE_MAPPINGS.get(jdbcType);
if (minorType == null) {
logger.warn("Ignoring column that is unsupported.", UserException.unsupportedError().message("A column you queried has a data type that is not currently supported by the JDBC storage plugin. " + "The column's name was %s and its JDBC data type was %s. ", name, nameFromType(jdbcType)).addContext("sql", sql).addContext("column Name", name).addContext("plugin", storagePluginName).build(logger));
continue;
}
final MajorType type = Types.optional(minorType);
final MaterializedField field = MaterializedField.create(name, type);
final Class<? extends ValueVector> clazz = (Class<? extends ValueVector>) TypeHelper.getValueVectorClass(minorType, type.getMode());
ValueVector vector = output.addField(field, clazz);
vectorBuilder.add(vector);
copierBuilder.add(getCopier(jdbcType, i, resultSet, vector));
}
vectors = vectorBuilder.build();
copiers = copierBuilder.build();
} catch (SQLException | SchemaChangeException e) {
throw UserException.dataReadError(e).message("The JDBC storage plugin failed while trying setup the SQL query. ").addContext("sql", sql).addContext("plugin", storagePluginName).build(logger);
}
}
use of org.apache.drill.exec.exception.SchemaChangeException in project drill by apache.
the class PrintingResultsListener method dataArrived.
@Override
public void dataArrived(QueryDataBatch result, ConnectionThrottle throttle) {
final QueryData header = result.getHeader();
final DrillBuf data = result.getData();
if (data != null) {
count.addAndGet(header.getRowCount());
try {
loader.load(header.getDef(), data);
// TODO: Clean: DRILL-2933: That load(...) no longer throws
// SchemaChangeException, so check/clean catch clause below.
} catch (SchemaChangeException e) {
submissionFailed(UserException.systemError(e).build(logger));
}
switch(format) {
case TABLE:
VectorUtil.showVectorAccessibleContent(loader, columnWidth);
break;
case TSV:
VectorUtil.showVectorAccessibleContent(loader, "\t");
break;
case CSV:
VectorUtil.showVectorAccessibleContent(loader, ",");
break;
}
loader.clear();
}
result.release();
}
use of org.apache.drill.exec.exception.SchemaChangeException in project drill by apache.
the class ExternalSortBatch method createNewMSorter.
private MSorter createNewMSorter(FragmentContext context, List<Ordering> orderings, VectorAccessible batch, MappingSet mainMapping, MappingSet leftMapping, MappingSet rightMapping) throws ClassTransformationException, IOException, SchemaChangeException {
CodeGenerator<MSorter> cg = CodeGenerator.get(MSorter.TEMPLATE_DEFINITION, context.getFunctionRegistry(), context.getOptions());
ClassGenerator<MSorter> g = cg.getRoot();
g.setMappingSet(mainMapping);
for (Ordering od : orderings) {
// first, we rewrite the evaluation stack for each side of the comparison.
ErrorCollector collector = new ErrorCollectorImpl();
final LogicalExpression expr = ExpressionTreeMaterializer.materialize(od.getExpr(), batch, collector, context.getFunctionRegistry());
if (collector.hasErrors()) {
throw new SchemaChangeException("Failure while materializing expression. " + collector.toErrorString());
}
g.setMappingSet(leftMapping);
HoldingContainer left = g.addExpr(expr, ClassGenerator.BlkCreateMode.FALSE);
g.setMappingSet(rightMapping);
HoldingContainer right = g.addExpr(expr, ClassGenerator.BlkCreateMode.FALSE);
g.setMappingSet(mainMapping);
// next we wrap the two comparison sides and add the expression block for the comparison.
LogicalExpression fh = FunctionGenerationHelper.getOrderingComparator(od.nullsSortHigh(), left, right, context.getFunctionRegistry());
HoldingContainer out = g.addExpr(fh, ClassGenerator.BlkCreateMode.FALSE);
JConditional jc = g.getEvalBlock()._if(out.getValue().ne(JExpr.lit(0)));
if (od.getDirection() == Direction.ASCENDING) {
jc._then()._return(out.getValue());
} else {
jc._then()._return(out.getValue().minus());
}
g.rotateBlock();
}
g.rotateBlock();
g.getEvalBlock()._return(JExpr.lit(0));
// This class can generate plain-old Java.
cg.plainJavaCapable(true);
// cg.saveCodeForDebugging(true);
return context.getImplementationClass(cg);
}
use of org.apache.drill.exec.exception.SchemaChangeException in project drill by apache.
the class ExternalSortBatch method innerNext.
@SuppressWarnings("resource")
@Override
public IterOutcome innerNext() {
if (schema != null) {
if (spillCount == 0) {
return (getSelectionVector4().next()) ? IterOutcome.OK : IterOutcome.NONE;
} else {
Stopwatch w = Stopwatch.createStarted();
int count = copier.next(targetRecordCount);
if (count > 0) {
long t = w.elapsed(TimeUnit.MICROSECONDS);
logger.debug("Took {} us to merge {} records", t, count);
container.setRecordCount(count);
return IterOutcome.OK;
} else {
logger.debug("copier returned 0 records");
return IterOutcome.NONE;
}
}
}
int totalCount = 0;
// total number of batches received so far
int totalBatches = 0;
try {
container.clear();
outer: while (true) {
IterOutcome upstream;
if (first) {
upstream = IterOutcome.OK_NEW_SCHEMA;
} else {
upstream = next(incoming);
}
if (upstream == IterOutcome.OK && sorter == null) {
upstream = IterOutcome.OK_NEW_SCHEMA;
}
switch(upstream) {
case NONE:
if (first) {
return upstream;
}
break outer;
case NOT_YET:
throw new UnsupportedOperationException();
case STOP:
return upstream;
case OK_NEW_SCHEMA:
case OK:
VectorContainer convertedBatch;
// only change in the case that the schema truly changes. Artificial schema changes are ignored.
if (upstream == IterOutcome.OK_NEW_SCHEMA && !incoming.getSchema().equals(schema)) {
if (schema != null) {
if (unionTypeEnabled) {
this.schema = SchemaUtil.mergeSchemas(schema, incoming.getSchema());
} else {
throw SchemaChangeException.schemaChanged("Schema changes not supported in External Sort. Please enable Union type", schema, incoming.getSchema());
}
} else {
schema = incoming.getSchema();
}
convertedBatch = SchemaUtil.coerceContainer(incoming, schema, oContext);
for (BatchGroup b : batchGroups) {
b.setSchema(schema);
}
for (BatchGroup b : spilledBatchGroups) {
b.setSchema(schema);
}
this.sorter = createNewSorter(context, convertedBatch);
} else {
convertedBatch = SchemaUtil.coerceContainer(incoming, schema, oContext);
}
if (first) {
first = false;
}
if (convertedBatch.getRecordCount() == 0) {
for (VectorWrapper<?> w : convertedBatch) {
w.clear();
}
break;
}
SelectionVector2 sv2;
if (incoming.getSchema().getSelectionVectorMode() == BatchSchema.SelectionVectorMode.TWO_BYTE) {
sv2 = incoming.getSelectionVector2().clone();
} else {
try {
sv2 = newSV2();
} catch (InterruptedException e) {
return IterOutcome.STOP;
} catch (OutOfMemoryException e) {
throw new OutOfMemoryException(e);
}
}
int count = sv2.getCount();
totalCount += count;
totalBatches++;
sorter.setup(context, sv2, convertedBatch);
sorter.sort(sv2);
RecordBatchData rbd = new RecordBatchData(convertedBatch, oAllocator);
boolean success = false;
try {
rbd.setSv2(sv2);
batchGroups.add(new BatchGroup(rbd.getContainer(), rbd.getSv2(), oContext));
if (peakNumBatches < batchGroups.size()) {
peakNumBatches = batchGroups.size();
stats.setLongStat(Metric.PEAK_BATCHES_IN_MEMORY, peakNumBatches);
}
batchesSinceLastSpill++;
if (// If we haven't spilled so far, do we have enough memory for MSorter if this turns out to be the last incoming batch?
(spillCount == 0 && !hasMemoryForInMemorySort(totalCount)) || // If we haven't spilled so far, make sure we don't exceed the maximum number of batches SV4 can address
(spillCount == 0 && totalBatches > Character.MAX_VALUE) || // current memory used is more than 95% of memory usage limit of this operator
(oAllocator.getAllocatedMemory() > .95 * oAllocator.getLimit()) || // since the last spill exceed the defined limit
(batchGroups.size() > SPILL_THRESHOLD && batchesSinceLastSpill >= SPILL_BATCH_GROUP_SIZE)) {
if (firstSpillBatchCount == 0) {
firstSpillBatchCount = batchGroups.size();
}
if (spilledBatchGroups.size() > firstSpillBatchCount / 2) {
logger.info("Merging spills");
final BatchGroup merged = mergeAndSpill(spilledBatchGroups);
if (merged != null) {
spilledBatchGroups.addFirst(merged);
}
}
final BatchGroup merged = mergeAndSpill(batchGroups);
if (merged != null) {
// make sure we don't add null to spilledBatchGroups
spilledBatchGroups.add(merged);
batchesSinceLastSpill = 0;
}
}
success = true;
} finally {
if (!success) {
rbd.clear();
}
}
break;
case OUT_OF_MEMORY:
logger.debug("received OUT_OF_MEMORY, trying to spill");
if (batchesSinceLastSpill > 2) {
final BatchGroup merged = mergeAndSpill(batchGroups);
if (merged != null) {
spilledBatchGroups.add(merged);
batchesSinceLastSpill = 0;
}
} else {
logger.debug("not enough batches to spill, sending OUT_OF_MEMORY downstream");
return IterOutcome.OUT_OF_MEMORY;
}
break;
default:
throw new UnsupportedOperationException();
}
}
if (totalCount == 0) {
return IterOutcome.NONE;
}
if (spillCount == 0) {
if (builder != null) {
builder.clear();
builder.close();
}
builder = new SortRecordBatchBuilder(oAllocator);
for (BatchGroup group : batchGroups) {
RecordBatchData rbd = new RecordBatchData(group.getContainer(), oAllocator);
rbd.setSv2(group.getSv2());
builder.add(rbd);
}
builder.build(context, container);
sv4 = builder.getSv4();
mSorter = createNewMSorter();
mSorter.setup(context, oAllocator, getSelectionVector4(), this.container);
// For testing memory-leak purpose, inject exception after mSorter finishes setup
injector.injectUnchecked(context.getExecutionControls(), INTERRUPTION_AFTER_SETUP);
mSorter.sort(this.container);
// sort may have prematurely exited due to should continue returning false.
if (!context.shouldContinue()) {
return IterOutcome.STOP;
}
// For testing memory-leak purpose, inject exception after mSorter finishes sorting
injector.injectUnchecked(context.getExecutionControls(), INTERRUPTION_AFTER_SORT);
sv4 = mSorter.getSV4();
container.buildSchema(SelectionVectorMode.FOUR_BYTE);
} else {
// some batches were spilled
final BatchGroup merged = mergeAndSpill(batchGroups);
if (merged != null) {
spilledBatchGroups.add(merged);
}
batchGroups.addAll(spilledBatchGroups);
// no need to cleanup spilledBatchGroups, all it's batches are in batchGroups now
spilledBatchGroups = null;
logger.warn("Starting to merge. {} batch groups. Current allocated memory: {}", batchGroups.size(), oAllocator.getAllocatedMemory());
VectorContainer hyperBatch = constructHyperBatch(batchGroups);
createCopier(hyperBatch, batchGroups, container, false);
int estimatedRecordSize = 0;
for (VectorWrapper<?> w : batchGroups.get(0)) {
try {
estimatedRecordSize += TypeHelper.getSize(w.getField().getType());
} catch (UnsupportedOperationException e) {
estimatedRecordSize += 50;
}
}
targetRecordCount = Math.min(MAX_BATCH_SIZE, Math.max(1, COPIER_BATCH_MEM_LIMIT / estimatedRecordSize));
int count = copier.next(targetRecordCount);
container.buildSchema(SelectionVectorMode.NONE);
container.setRecordCount(count);
}
return IterOutcome.OK_NEW_SCHEMA;
} catch (SchemaChangeException ex) {
kill(false);
context.fail(UserException.unsupportedError(ex).message("Sort doesn't currently support sorts with changing schemas").build(logger));
return IterOutcome.STOP;
} catch (ClassTransformationException | IOException ex) {
kill(false);
context.fail(ex);
return IterOutcome.STOP;
} catch (UnsupportedOperationException e) {
throw new RuntimeException(e);
}
}
use of org.apache.drill.exec.exception.SchemaChangeException in project drill by apache.
the class ExternalSortBatch method generateComparisons.
private void generateComparisons(ClassGenerator<?> g, VectorAccessible batch) throws SchemaChangeException {
g.setMappingSet(MAIN_MAPPING);
for (Ordering od : popConfig.getOrderings()) {
// first, we rewrite the evaluation stack for each side of the comparison.
ErrorCollector collector = new ErrorCollectorImpl();
final LogicalExpression expr = ExpressionTreeMaterializer.materialize(od.getExpr(), batch, collector, context.getFunctionRegistry());
if (collector.hasErrors()) {
throw new SchemaChangeException("Failure while materializing expression. " + collector.toErrorString());
}
g.setMappingSet(LEFT_MAPPING);
HoldingContainer left = g.addExpr(expr, ClassGenerator.BlkCreateMode.FALSE);
g.setMappingSet(RIGHT_MAPPING);
HoldingContainer right = g.addExpr(expr, ClassGenerator.BlkCreateMode.FALSE);
g.setMappingSet(MAIN_MAPPING);
// next we wrap the two comparison sides and add the expression block for the comparison.
LogicalExpression fh = FunctionGenerationHelper.getOrderingComparator(od.nullsSortHigh(), left, right, context.getFunctionRegistry());
HoldingContainer out = g.addExpr(fh, ClassGenerator.BlkCreateMode.FALSE);
JConditional jc = g.getEvalBlock()._if(out.getValue().ne(JExpr.lit(0)));
if (od.getDirection() == Direction.ASCENDING) {
jc._then()._return(out.getValue());
} else {
jc._then()._return(out.getValue().minus());
}
g.rotateBlock();
}
g.rotateBlock();
g.getEvalBlock()._return(JExpr.lit(0));
}
Aggregations