use of org.apache.drill.common.exceptions.UserException in project drill by apache.
the class ExternalSortBatch method doMergeAndSpill.
private BatchGroup.SpilledRun doMergeAndSpill(LinkedList<? extends BatchGroup> batchGroups, int spillCount) {
List<BatchGroup> batchesToSpill = Lists.newArrayList();
spillCount = Math.min(batchGroups.size(), spillCount);
assert spillCount > 0 : "Spill count to mergeAndSpill must not be zero";
for (int i = 0; i < spillCount; i++) {
batchesToSpill.add(batchGroups.pollFirst());
}
// Merge the selected set of matches and write them to the
// spill file. After each write, we release the memory associated
// with the just-written batch.
String outputFile = spillSet.getNextSpillFile();
stats.setLongStat(Metric.SPILL_COUNT, spillSet.getFileCount());
BatchGroup.SpilledRun newGroup = null;
try (AutoCloseable ignored = AutoCloseables.all(batchesToSpill);
CopierHolder.BatchMerger merger = copierHolder.startMerge(schema, batchesToSpill, spillBatchRowCount)) {
logger.trace("Spilling {} of {} batches, spill batch size = {} rows, memory = {}, write to {}", batchesToSpill.size(), bufferedBatches.size() + batchesToSpill.size(), spillBatchRowCount, allocator.getAllocatedMemory(), outputFile);
newGroup = new BatchGroup.SpilledRun(spillSet, outputFile, oContext);
while (merger.next()) {
// Add a new batch of records (given by merger.getOutput()) to the spill
// file.
//
// note that addBatch also clears the merger's output container
newGroup.addBatch(merger.getOutput());
}
injector.injectChecked(context.getExecutionControls(), INTERRUPTION_WHILE_SPILLING, IOException.class);
newGroup.closeOutputStream();
logger.trace("Spilled {} batches, {} records; memory = {} to {}", merger.getBatchCount(), merger.getRecordCount(), allocator.getAllocatedMemory(), outputFile);
newGroup.setBatchSize(merger.getEstBatchSize());
return newGroup;
} catch (Throwable e) {
// we only need to clean up newGroup if spill failed
try {
if (newGroup != null) {
AutoCloseables.close(e, newGroup);
}
} catch (Throwable t) {
/* close() may hit the same IO issue; just ignore */
}
try {
throw e;
} catch (UserException ue) {
throw ue;
} catch (Throwable ex) {
throw UserException.resourceError(ex).message("External Sort encountered an error while spilling to disk").build(logger);
}
}
}
use of org.apache.drill.common.exceptions.UserException in project drill by apache.
the class MaprDBJsonRecordReader method next.
@Override
public int next() {
Stopwatch watch = Stopwatch.createUnstarted();
watch.start();
vectorWriter.allocate();
vectorWriter.reset();
int recordCount = 0;
DBDocumentReaderBase reader = null;
while (recordCount < BaseValueVector.INITIAL_VALUE_ALLOCATION) {
vectorWriter.setPosition(recordCount);
try {
reader = nextDocumentReader();
if (reader == null) {
// no more documents for this scanner
break;
} else if (isSkipQuery()) {
vectorWriter.rootAsMap().bit("count").writeBit(1);
} else {
MapOrListWriterImpl writer = new MapOrListWriterImpl(vectorWriter.rootAsMap());
if (idOnly) {
writeId(writer, reader.getId());
} else {
if (reader.next() != EventType.START_MAP) {
throw dataReadError("The document did not start with START_MAP!");
}
writeToListOrMap(writer, reader);
}
}
recordCount++;
} catch (UserException e) {
throw UserException.unsupportedError(e).addContext(String.format("Table: %s, document id: '%s'", table.getPath(), reader == null ? null : IdCodec.asString(reader.getId()))).build(logger);
} catch (SchemaChangeException e) {
if (ignoreSchemaChange) {
logger.warn("{}. Dropping the row from result.", e.getMessage());
logger.debug("Stack trace:", e);
} else {
throw dataReadError(e);
}
}
}
vectorWriter.setValueCount(recordCount);
logger.debug("Took {} ms to get {} records", watch.elapsed(TimeUnit.MILLISECONDS), recordCount);
return recordCount;
}
use of org.apache.drill.common.exceptions.UserException in project drill by apache.
the class JSONRecordReader method handleAndRaise.
protected void handleAndRaise(String suffix, Exception e) throws UserException {
String message = e.getMessage();
int columnNr = -1;
if (e instanceof JsonParseException) {
final JsonParseException ex = (JsonParseException) e;
message = ex.getOriginalMessage();
columnNr = ex.getLocation().getColumnNr();
}
UserException.Builder exceptionBuilder = UserException.dataReadError(e).message("%s - %s", suffix, message);
if (columnNr > 0) {
exceptionBuilder.pushContext("Column ", columnNr);
}
if (hadoopPath != null) {
exceptionBuilder.pushContext("Record ", currentRecordNumberInFile()).pushContext("File ", hadoopPath.toUri().getPath());
}
throw exceptionBuilder.build(logger);
}
use of org.apache.drill.common.exceptions.UserException in project drill by apache.
the class DrillCursor method nextRowInternally.
/**
* ...
* <p>
* Is to be called (once) from {@link #loadInitialSchema} for
* {@link DrillResultSetImpl#execute()}, and then (repeatedly) from
* {@link #next()} for {@link AvaticaResultSet#next()}.
* </p>
*
* @return whether cursor is positioned at a row (false when after end of
* results)
*/
private boolean nextRowInternally() throws SQLException {
if (currentRecordNumber + 1 < currentBatchHolder.getRecordCount()) {
// Have next row in current batch--just advance index and report "at a row."
currentRecordNumber++;
return true;
} else {
try {
QueryDataBatch qrb = resultsListener.getNext();
// the (initial) schema but no rows)).
if (afterFirstBatch) {
while (qrb != null && (qrb.getHeader().getRowCount() == 0 || qrb.getData() == null)) {
// Empty message--dispose of and try to get another.
logger.warn("Spurious batch read: {}", qrb);
qrb.release();
qrb = resultsListener.getNext();
}
}
afterFirstBatch = true;
if (qrb == null) {
// End of batches--clean up, set state to done, report after last row.
// (We load it so we clear it.)
currentBatchHolder.clear();
afterLastRow = true;
return false;
} else {
// Got next (or first) batch--reset record offset to beginning;
// assimilate schema if changed; set up return value for first call
// to next().
currentRecordNumber = 0;
final boolean schemaChanged;
try {
schemaChanged = currentBatchHolder.load(qrb.getHeader().getDef(), qrb.getData());
} finally {
qrb.release();
}
schema = currentBatchHolder.getSchema();
if (schemaChanged) {
updateColumns();
}
if (returnTrueForNextCallToNext && currentBatchHolder.getRecordCount() == 0) {
returnTrueForNextCallToNext = false;
}
return true;
}
} catch (UserException e) {
// error type is accessible, of course. :-( )
throw new SQLException(e.getMessage(), e);
} catch (InterruptedException e) {
// but JDBC client certainly could.
throw new SQLException("Interrupted.", e);
} catch (SchemaChangeException e) {
// throws SchemaChangeException, so check/clean catch clause.
throw new SQLException("Unexpected SchemaChangeException from RecordBatchLoader.load(...)");
} catch (RuntimeException e) {
throw new SQLException("Unexpected RuntimeException: " + e.toString(), e);
}
}
}
use of org.apache.drill.common.exceptions.UserException in project drill by apache.
the class TestDrillbitResilience method assertExceptionMessage.
/**
* Check that the injected exception is what we were expecting.
*
* @param throwable the throwable that was caught (by the test)
* @param exceptionClass the expected exception class
* @param desc the expected exception site description
*/
private static void assertExceptionMessage(final Throwable throwable, final Class<? extends Throwable> exceptionClass, final String desc) {
assertTrue("Throwable was not of UserException type.", throwable instanceof UserException);
final ExceptionWrapper cause = ((UserException) throwable).getOrCreatePBError(false).getException();
assertEquals("Exception class names should match.", exceptionClass.getName(), cause.getExceptionClass());
assertEquals("Exception sites should match.", desc, cause.getMessage());
}
Aggregations