Search in sources :

Example 96 with UserException

use of org.apache.drill.common.exceptions.UserException in project drill by apache.

the class TestDrillbitResilience method assertExceptionMessage.

/**
 * Check that the injected exception is what we were expecting.
 *
 * @param throwable      the throwable that was caught (by the test)
 * @param exceptionClass the expected exception class
 * @param desc           the expected exception site description
 */
private void assertExceptionMessage(final Throwable throwable, final Class<? extends Throwable> exceptionClass, final String desc) {
    assertTrue(throwable instanceof UserException, "Throwable was not of UserException type");
    final ExceptionWrapper cause = ((UserException) throwable).getOrCreatePBError(false).getException();
    assertEquals(exceptionClass.getName(), cause.getExceptionClass(), "Exception class names should match");
    assertEquals(desc, cause.getMessage(), "Exception sites should match.");
}
Also used : UserException(org.apache.drill.common.exceptions.UserException) ExceptionWrapper(org.apache.drill.exec.proto.UserBitShared.ExceptionWrapper)

Example 97 with UserException

use of org.apache.drill.common.exceptions.UserException in project drill by apache.

the class MaprDBJsonRecordReader method next.

@Override
public int next() {
    Stopwatch watch = Stopwatch.createUnstarted();
    watch.start();
    vectorWriter.allocate();
    vectorWriter.reset();
    int recordCount = 0;
    reader = null;
    document = null;
    int maxRecordsForThisBatch = this.maxRecordsToRead >= 0 ? Math.min(BaseValueVector.INITIAL_VALUE_ALLOCATION, this.maxRecordsToRead) : BaseValueVector.INITIAL_VALUE_ALLOCATION;
    try {
        // If the last document caused a SchemaChange create a new output schema for this scan batch
        if (schemaState == SchemaState.SCHEMA_CHANGE && !ignoreSchemaChange) {
            // Clear the ScanBatch vector container writer/mutator in order to be able to generate the new schema
            vectorWriterMutator.clear();
            vectorWriter = new VectorContainerWriter(vectorWriterMutator, unionEnabled);
            logger.debug("Encountered schema change earlier use new writer {}", vectorWriter.toString());
            document = lastDocument;
            setupWriter();
            if (recordCount < maxRecordsForThisBatch) {
                vectorWriter.setPosition(recordCount);
                if (document != null) {
                    reader = (DBDocumentReaderBase) document.asReader();
                    documentWriter.writeDBDocument(vectorWriter, reader);
                    recordCount++;
                }
            }
        }
    } catch (SchemaChangeException e) {
        String err_row = reader.getId().asJsonString();
        if (ignoreSchemaChange) {
            logger.warn("{}. Dropping row '{}' from result.", e.getMessage(), err_row);
            logger.debug("Stack trace:", e);
        } else {
            /* We should not encounter a SchemaChangeException here since this is the first document for this
           * new schema. Something is very wrong - cannot handle any further!
           */
            throw dataReadError(logger, e, "SchemaChangeException for row '%s'.", err_row);
        }
    }
    schemaState = SchemaState.SCHEMA_INIT;
    while (recordCount < maxRecordsForThisBatch) {
        vectorWriter.setPosition(recordCount);
        try {
            document = nextDocument();
            if (document == null) {
                // no more documents for this reader
                break;
            } else {
                documentWriter.writeDBDocument(vectorWriter, (DBDocumentReaderBase) document.asReader());
            }
            recordCount++;
        } catch (UserException e) {
            throw UserException.unsupportedError(e).addContext(String.format("Table: %s, document id: '%s'", table.getPath(), document.asReader() == null ? null : IdCodec.asString(((DBDocumentReaderBase) document.asReader()).getId()))).build(logger);
        } catch (SchemaChangeException e) {
            String err_row = ((DBDocumentReaderBase) document.asReader()).getId().asJsonString();
            if (ignoreSchemaChange) {
                logger.warn("{}. Dropping row '{}' from result.", e.getMessage(), err_row);
                logger.debug("Stack trace:", e);
            } else {
                /* Save the current document reader for next iteration. The recordCount is not updated so we
           * would start from this reader on the next next() call
           */
                lastDocument = document;
                schemaState = SchemaState.SCHEMA_CHANGE;
                break;
            }
        }
    }
    if (nonExistentColumnsProjection && recordCount > 0) {
        if (schema == null || schema.isEmpty()) {
            JsonReaderUtils.ensureAtLeastOneField(vectorWriter, getColumns(), allTextMode, Collections.emptyList());
        } else {
            JsonReaderUtils.writeColumnsUsingSchema(vectorWriter, getColumns(), schema, allTextMode);
        }
    }
    vectorWriter.setValueCount(recordCount);
    if (maxRecordsToRead > 0) {
        maxRecordsToRead -= recordCount;
    }
    logger.debug("Took {} ms to get {} records", watch.elapsed(TimeUnit.MILLISECONDS), recordCount);
    return recordCount;
}
Also used : SchemaChangeException(org.apache.drill.exec.exception.SchemaChangeException) VectorContainerWriter(org.apache.drill.exec.vector.complex.impl.VectorContainerWriter) DBDocumentReaderBase(com.mapr.db.ojai.DBDocumentReaderBase) Stopwatch(org.apache.drill.shaded.guava.com.google.common.base.Stopwatch) UserException(org.apache.drill.common.exceptions.UserException)

Example 98 with UserException

use of org.apache.drill.common.exceptions.UserException in project drill by apache.

the class RestrictedJsonRecordReader method next.

@Override
public int next() {
    Stopwatch watch = Stopwatch.createUnstarted();
    watch.start();
    RestrictedMapRDBSubScanSpec rss = ((RestrictedMapRDBSubScanSpec) this.subScanSpec);
    vectorWriter.allocate();
    vectorWriter.reset();
    if (!rss.readyToGetRowKey()) {
        // when we are in the build schema phase
        if (rss.isBuildSchemaPhase()) {
            readToInitSchema();
        }
        return 0;
    }
    Table table = super.formatPlugin.getJsonTableCache().getTable(subScanSpec.getTableName(), subScanSpec.getUserName());
    final MultiGet multiGet = new MultiGet((BaseJsonTable) table, condition, false, projections);
    int recordCount = 0;
    DBDocumentReaderBase reader = null;
    int maxRecordsForThisBatch = this.maxRecordsToRead > 0 ? Math.min(rss.getMaxRowKeysToBeRead(), this.maxRecordsToRead) : this.maxRecordsToRead == -1 ? rss.getMaxRowKeysToBeRead() : 0;
    Stopwatch timer = Stopwatch.createUnstarted();
    while (recordCount < maxRecordsForThisBatch) {
        ByteBuffer[] rowKeyIds = rss.getRowKeyIdsToRead(batchSize);
        if (rowKeyIds == null) {
            break;
        }
        try {
            timer.start();
            final List<Document> docList = multiGet.doGet(rowKeyIds);
            int index = 0;
            long docsToRead = docList.size();
            // If limit pushdown then stop once we have `limit` rows from multiget i.e. maxRecordsForThisBatch
            if (this.maxRecordsToRead != -1) {
                docsToRead = Math.min(docsToRead, maxRecordsForThisBatch);
            }
            while (index < docsToRead) {
                vectorWriter.setPosition(recordCount);
                reader = (DBDocumentReaderBase) docList.get(index).asReader();
                documentWriter.writeDBDocument(vectorWriter, reader);
                recordCount++;
                index++;
            }
            timer.stop();
        } catch (UserException e) {
            throw UserException.unsupportedError(e).addContext(String.format("Table: %s, document id: '%s'", getTable().getPath(), reader == null ? null : IdCodec.asString(reader.getId()))).build(logger);
        } catch (SchemaChangeException e) {
            if (getIgnoreSchemaChange()) {
                logger.warn("{}. Dropping the row from result.", e.getMessage());
                logger.debug("Stack trace:", e);
            } else {
                throw dataReadError(logger, e);
            }
        }
    }
    vectorWriter.setValueCount(recordCount);
    if (maxRecordsToRead > 0) {
        if (maxRecordsToRead - recordCount >= 0) {
            maxRecordsToRead -= recordCount;
        } else {
            maxRecordsToRead = 0;
        }
    }
    logger.debug("Took {} ms to get {} records, getrowkey {}", watch.elapsed(TimeUnit.MILLISECONDS), recordCount, timer.elapsed(TimeUnit.MILLISECONDS));
    return recordCount;
}
Also used : BaseJsonTable(com.mapr.db.impl.BaseJsonTable) Table(com.mapr.db.Table) RestrictedMapRDBSubScanSpec(org.apache.drill.exec.store.mapr.db.RestrictedMapRDBSubScanSpec) Stopwatch(org.apache.drill.shaded.guava.com.google.common.base.Stopwatch) Document(org.ojai.Document) ByteBuffer(java.nio.ByteBuffer) MultiGet(com.mapr.db.impl.MultiGet) SchemaChangeException(org.apache.drill.exec.exception.SchemaChangeException) DBDocumentReaderBase(com.mapr.db.ojai.DBDocumentReaderBase) UserException(org.apache.drill.common.exceptions.UserException)

Example 99 with UserException

use of org.apache.drill.common.exceptions.UserException in project drill by apache.

the class RestrictedJsonRecordReader method readToInitSchema.

public void readToInitSchema() {
    DBDocumentReaderBase reader = null;
    vectorWriter.setPosition(0);
    try (DocumentStream dstream = table.find()) {
        reader = (DBDocumentReaderBase) dstream.iterator().next().asReader();
        documentWriter.writeDBDocument(vectorWriter, reader);
    } catch (UserException e) {
        throw UserException.unsupportedError(e).addContext(String.format("Table: %s, document id: '%s'", getTable().getPath(), reader == null ? null : IdCodec.asString(reader.getId()))).build(logger);
    } catch (SchemaChangeException e) {
        if (getIgnoreSchemaChange()) {
            logger.warn("{}. Dropping the row from result.", e.getMessage());
            logger.debug("Stack trace:", e);
        } else {
            throw dataReadError(logger, e);
        }
    } finally {
        vectorWriter.setPosition(0);
    }
}
Also used : SchemaChangeException(org.apache.drill.exec.exception.SchemaChangeException) DBDocumentReaderBase(com.mapr.db.ojai.DBDocumentReaderBase) UserException(org.apache.drill.common.exceptions.UserException) DocumentStream(org.ojai.DocumentStream)

Example 100 with UserException

use of org.apache.drill.common.exceptions.UserException in project drill by apache.

the class RestQueryRunner method submitQuery.

private QueryResult submitQuery() {
    webUserConnection.setAutoLimitRowCount(maxRows);
    startQuery(QueryType.valueOf(query.getQueryType()), query.getQuery(), webUserConnection);
    // Heap usage threshold/trigger to provide resiliency on web server for queries submitted via HTTP
    double memoryFailureThreshold = workManager.getContext().getConfig().getDouble(ExecConstants.HTTP_MEMORY_HEAP_FAILURE_THRESHOLD);
    boolean isComplete = false;
    boolean nearlyOutOfHeapSpace = false;
    float usagePercent = getHeapUsage();
    // Wait until the query execution is complete or there is error submitting the query
    logger.debug("Wait until the query execution is complete or there is error submitting the query");
    do {
        try {
            // periodically timeout 1 sec to check heap
            isComplete = webUserConnection.await(TimeUnit.SECONDS.toMillis(1));
        } catch (InterruptedException e) {
        }
        usagePercent = getHeapUsage();
        if (memoryFailureThreshold > 0 && usagePercent > memoryFailureThreshold) {
            nearlyOutOfHeapSpace = true;
        }
    } while (!isComplete && !nearlyOutOfHeapSpace);
    // Fail if nearly out of heap space
    if (nearlyOutOfHeapSpace) {
        UserException almostOutOfHeapException = UserException.resourceError().message("There is not enough heap memory to run this query using the web interface. ").addContext("Please try a query with fewer columns or with a filter or limit condition to limit the data returned. ").addContext("You can also try an ODBC/JDBC client. ").build(logger);
        // Add event
        workManager.getBee().getForemanForQueryId(queryId).addToEventQueue(QueryState.FAILED, almostOutOfHeapException);
        // Return NearlyOutOfHeap exception
        throw almostOutOfHeapException;
    }
    logger.trace("Query {} is completed ", queryId);
    if (webUserConnection.getError() != null) {
        throw new UserRemoteException(webUserConnection.getError());
    }
    return new QueryResult(queryId, webUserConnection, webUserConnection.results);
}
Also used : UserRemoteException(org.apache.drill.common.exceptions.UserRemoteException) UserException(org.apache.drill.common.exceptions.UserException)

Aggregations

UserException (org.apache.drill.common.exceptions.UserException)102 Test (org.junit.Test)76 EvfTest (org.apache.drill.categories.EvfTest)39 TupleMetadata (org.apache.drill.exec.record.metadata.TupleMetadata)30 SubOperatorTest (org.apache.drill.test.SubOperatorTest)30 SchemaBuilder (org.apache.drill.exec.record.metadata.SchemaBuilder)28 RowBatchReader (org.apache.drill.exec.physical.impl.scan.RowBatchReader)12 ManagedReader (org.apache.drill.exec.physical.impl.scan.v3.ManagedReader)12 ScanLifecycleBuilder (org.apache.drill.exec.physical.impl.scan.v3.ScanLifecycleBuilder)11 SchemaNegotiator (org.apache.drill.exec.physical.impl.scan.v3.SchemaNegotiator)11 ScanOperatorExec (org.apache.drill.exec.physical.impl.scan.ScanOperatorExec)9 ScanFixture (org.apache.drill.exec.physical.impl.scan.ScanTestUtils.ScanFixture)9 SchemaPath (org.apache.drill.common.expression.SchemaPath)8 ResultSetOptions (org.apache.drill.exec.physical.resultSet.impl.ResultSetLoaderImpl.ResultSetOptions)8 RowSet (org.apache.drill.exec.physical.rowSet.RowSet)8 MockRecordBatch (org.apache.drill.exec.physical.impl.MockRecordBatch)6 ArrayList (java.util.ArrayList)5 OperatorTest (org.apache.drill.categories.OperatorTest)5 DrillException (org.apache.drill.common.exceptions.DrillException)5 BaseTest (org.apache.drill.test.BaseTest)5