Search in sources :

Example 6 with UserException

use of org.apache.drill.common.exceptions.UserException in project drill by apache.

the class ExternalSortBatch method doMergeAndSpill.

private BatchGroup.SpilledRun doMergeAndSpill(LinkedList<? extends BatchGroup> batchGroups, int spillCount) {
    List<BatchGroup> batchesToSpill = Lists.newArrayList();
    spillCount = Math.min(batchGroups.size(), spillCount);
    assert spillCount > 0 : "Spill count to mergeAndSpill must not be zero";
    for (int i = 0; i < spillCount; i++) {
        batchesToSpill.add(batchGroups.pollFirst());
    }
    // Merge the selected set of matches and write them to the
    // spill file. After each write, we release the memory associated
    // with the just-written batch.
    String outputFile = spillSet.getNextSpillFile();
    stats.setLongStat(Metric.SPILL_COUNT, spillSet.getFileCount());
    BatchGroup.SpilledRun newGroup = null;
    try (AutoCloseable ignored = AutoCloseables.all(batchesToSpill);
        CopierHolder.BatchMerger merger = copierHolder.startMerge(schema, batchesToSpill, spillBatchRowCount)) {
        logger.trace("Spilling {} of {} batches, spill batch size = {} rows, memory = {}, write to {}", batchesToSpill.size(), bufferedBatches.size() + batchesToSpill.size(), spillBatchRowCount, allocator.getAllocatedMemory(), outputFile);
        newGroup = new BatchGroup.SpilledRun(spillSet, outputFile, oContext);
        while (merger.next()) {
            // Add a new batch of records (given by merger.getOutput()) to the spill
            // file.
            //
            // note that addBatch also clears the merger's output container
            newGroup.addBatch(merger.getOutput());
        }
        injector.injectChecked(context.getExecutionControls(), INTERRUPTION_WHILE_SPILLING, IOException.class);
        newGroup.closeOutputStream();
        logger.trace("Spilled {} batches, {} records; memory = {} to {}", merger.getBatchCount(), merger.getRecordCount(), allocator.getAllocatedMemory(), outputFile);
        newGroup.setBatchSize(merger.getEstBatchSize());
        return newGroup;
    } catch (Throwable e) {
        // we only need to clean up newGroup if spill failed
        try {
            if (newGroup != null) {
                AutoCloseables.close(e, newGroup);
            }
        } catch (Throwable t) {
        /* close() may hit the same IO issue; just ignore */
        }
        try {
            throw e;
        } catch (UserException ue) {
            throw ue;
        } catch (Throwable ex) {
            throw UserException.resourceError(ex).message("External Sort encountered an error while spilling to disk").build(logger);
        }
    }
}
Also used : SpilledRun(org.apache.drill.exec.physical.impl.xsort.managed.BatchGroup.SpilledRun) UserException(org.apache.drill.common.exceptions.UserException)

Example 7 with UserException

use of org.apache.drill.common.exceptions.UserException in project drill by apache.

the class MaprDBJsonRecordReader method next.

@Override
public int next() {
    Stopwatch watch = Stopwatch.createUnstarted();
    watch.start();
    vectorWriter.allocate();
    vectorWriter.reset();
    int recordCount = 0;
    DBDocumentReaderBase reader = null;
    while (recordCount < BaseValueVector.INITIAL_VALUE_ALLOCATION) {
        vectorWriter.setPosition(recordCount);
        try {
            reader = nextDocumentReader();
            if (reader == null) {
                // no more documents for this scanner
                break;
            } else if (isSkipQuery()) {
                vectorWriter.rootAsMap().bit("count").writeBit(1);
            } else {
                MapOrListWriterImpl writer = new MapOrListWriterImpl(vectorWriter.rootAsMap());
                if (idOnly) {
                    writeId(writer, reader.getId());
                } else {
                    if (reader.next() != EventType.START_MAP) {
                        throw dataReadError("The document did not start with START_MAP!");
                    }
                    writeToListOrMap(writer, reader);
                }
            }
            recordCount++;
        } catch (UserException e) {
            throw UserException.unsupportedError(e).addContext(String.format("Table: %s, document id: '%s'", table.getPath(), reader == null ? null : IdCodec.asString(reader.getId()))).build(logger);
        } catch (SchemaChangeException e) {
            if (ignoreSchemaChange) {
                logger.warn("{}. Dropping the row from result.", e.getMessage());
                logger.debug("Stack trace:", e);
            } else {
                throw dataReadError(e);
            }
        }
    }
    vectorWriter.setValueCount(recordCount);
    logger.debug("Took {} ms to get {} records", watch.elapsed(TimeUnit.MILLISECONDS), recordCount);
    return recordCount;
}
Also used : SchemaChangeException(org.apache.drill.exec.exception.SchemaChangeException) DBDocumentReaderBase(com.mapr.db.ojai.DBDocumentReaderBase) MapOrListWriterImpl(org.apache.drill.exec.vector.complex.impl.MapOrListWriterImpl) Stopwatch(com.google.common.base.Stopwatch) UserException(org.apache.drill.common.exceptions.UserException)

Example 8 with UserException

use of org.apache.drill.common.exceptions.UserException in project drill by apache.

the class JSONRecordReader method handleAndRaise.

protected void handleAndRaise(String suffix, Exception e) throws UserException {
    String message = e.getMessage();
    int columnNr = -1;
    if (e instanceof JsonParseException) {
        final JsonParseException ex = (JsonParseException) e;
        message = ex.getOriginalMessage();
        columnNr = ex.getLocation().getColumnNr();
    }
    UserException.Builder exceptionBuilder = UserException.dataReadError(e).message("%s - %s", suffix, message);
    if (columnNr > 0) {
        exceptionBuilder.pushContext("Column ", columnNr);
    }
    if (hadoopPath != null) {
        exceptionBuilder.pushContext("Record ", currentRecordNumberInFile()).pushContext("File ", hadoopPath.toUri().getPath());
    }
    throw exceptionBuilder.build(logger);
}
Also used : UserException(org.apache.drill.common.exceptions.UserException) JsonParseException(com.fasterxml.jackson.core.JsonParseException)

Example 9 with UserException

use of org.apache.drill.common.exceptions.UserException in project drill by apache.

the class DrillCursor method nextRowInternally.

/**
   * ...
   * <p>
   *   Is to be called (once) from {@link #loadInitialSchema} for
   *   {@link DrillResultSetImpl#execute()}, and then (repeatedly) from
   *   {@link #next()} for {@link AvaticaResultSet#next()}.
   * </p>
   *
   * @return  whether cursor is positioned at a row (false when after end of
   *   results)
   */
private boolean nextRowInternally() throws SQLException {
    if (currentRecordNumber + 1 < currentBatchHolder.getRecordCount()) {
        // Have next row in current batch--just advance index and report "at a row."
        currentRecordNumber++;
        return true;
    } else {
        try {
            QueryDataBatch qrb = resultsListener.getNext();
            // the (initial) schema but no rows)).
            if (afterFirstBatch) {
                while (qrb != null && (qrb.getHeader().getRowCount() == 0 || qrb.getData() == null)) {
                    // Empty message--dispose of and try to get another.
                    logger.warn("Spurious batch read: {}", qrb);
                    qrb.release();
                    qrb = resultsListener.getNext();
                }
            }
            afterFirstBatch = true;
            if (qrb == null) {
                // End of batches--clean up, set state to done, report after last row.
                // (We load it so we clear it.)
                currentBatchHolder.clear();
                afterLastRow = true;
                return false;
            } else {
                // Got next (or first) batch--reset record offset to beginning;
                // assimilate schema if changed; set up return value for first call
                // to next().
                currentRecordNumber = 0;
                final boolean schemaChanged;
                try {
                    schemaChanged = currentBatchHolder.load(qrb.getHeader().getDef(), qrb.getData());
                } finally {
                    qrb.release();
                }
                schema = currentBatchHolder.getSchema();
                if (schemaChanged) {
                    updateColumns();
                }
                if (returnTrueForNextCallToNext && currentBatchHolder.getRecordCount() == 0) {
                    returnTrueForNextCallToNext = false;
                }
                return true;
            }
        } catch (UserException e) {
            // error type is accessible, of course. :-( )
            throw new SQLException(e.getMessage(), e);
        } catch (InterruptedException e) {
            // but JDBC client certainly could.
            throw new SQLException("Interrupted.", e);
        } catch (SchemaChangeException e) {
            // throws SchemaChangeException, so check/clean catch clause.
            throw new SQLException("Unexpected SchemaChangeException from RecordBatchLoader.load(...)");
        } catch (RuntimeException e) {
            throw new SQLException("Unexpected RuntimeException: " + e.toString(), e);
        }
    }
}
Also used : SchemaChangeException(org.apache.drill.exec.exception.SchemaChangeException) QueryDataBatch(org.apache.drill.exec.rpc.user.QueryDataBatch) SQLException(java.sql.SQLException) UserException(org.apache.drill.common.exceptions.UserException)

Example 10 with UserException

use of org.apache.drill.common.exceptions.UserException in project drill by apache.

the class TestDrillbitResilience method assertExceptionMessage.

/**
   * Check that the injected exception is what we were expecting.
   *
   * @param throwable      the throwable that was caught (by the test)
   * @param exceptionClass the expected exception class
   * @param desc           the expected exception site description
   */
private static void assertExceptionMessage(final Throwable throwable, final Class<? extends Throwable> exceptionClass, final String desc) {
    assertTrue("Throwable was not of UserException type.", throwable instanceof UserException);
    final ExceptionWrapper cause = ((UserException) throwable).getOrCreatePBError(false).getException();
    assertEquals("Exception class names should match.", exceptionClass.getName(), cause.getExceptionClass());
    assertEquals("Exception sites should match.", desc, cause.getMessage());
}
Also used : UserException(org.apache.drill.common.exceptions.UserException) ExceptionWrapper(org.apache.drill.exec.proto.UserBitShared.ExceptionWrapper)

Aggregations

UserException (org.apache.drill.common.exceptions.UserException)11 Stopwatch (com.google.common.base.Stopwatch)3 IOException (java.io.IOException)2 DrillRuntimeException (org.apache.drill.common.exceptions.DrillRuntimeException)2 ExecutionSetupException (org.apache.drill.common.exceptions.ExecutionSetupException)2 SchemaChangeException (org.apache.drill.exec.exception.SchemaChangeException)2 Test (org.junit.Test)2 JsonParseException (com.fasterxml.jackson.core.JsonParseException)1 DBDocumentReaderBase (com.mapr.db.ojai.DBDocumentReaderBase)1 DrillBuf (io.netty.buffer.DrillBuf)1 SQLException (java.sql.SQLException)1 ArrayList (java.util.ArrayList)1 LinkedList (java.util.LinkedList)1 RelDataType (org.apache.calcite.rel.type.RelDataType)1 RelDataTypeFactory (org.apache.calcite.rel.type.RelDataTypeFactory)1 RexBuilder (org.apache.calcite.rex.RexBuilder)1 RexFieldCollation (org.apache.calcite.rex.RexFieldCollation)1 RexNode (org.apache.calcite.rex.RexNode)1 SqlOperator (org.apache.calcite.sql.SqlOperator)1 SqlTypeFactoryImpl (org.apache.calcite.sql.type.SqlTypeFactoryImpl)1