Search in sources :

Example 51 with UserException

use of org.apache.drill.common.exceptions.UserException in project drill by axbaretto.

the class DrillOptiqTest method testUnsupportedRexNode.

/* Method checks if we raise the appropriate error while dealing with RexNode that cannot be converted to
   * equivalent Drill expressions
   */
@Test
public void testUnsupportedRexNode() {
    try {
        // Create the data type factory.
        RelDataTypeFactory relFactory = new SqlTypeFactoryImpl(DrillRelDataTypeSystem.DRILL_REL_DATATYPE_SYSTEM);
        // Create the rex builder
        RexBuilder rex = new RexBuilder(relFactory);
        RelDataType anyType = relFactory.createSqlType(SqlTypeName.ANY);
        List<RexNode> emptyList = new LinkedList<>();
        ImmutableList<RexFieldCollation> e = ImmutableList.copyOf(new RexFieldCollation[0]);
        // create a dummy RexOver object.
        RexNode window = rex.makeOver(anyType, SqlStdOperatorTable.AVG, emptyList, emptyList, e, null, null, true, false, false, false);
        DrillOptiq.toDrill(null, (RelNode) null, window);
    } catch (UserException e) {
        if (e.getMessage().contains(DrillOptiq.UNSUPPORTED_REX_NODE_ERROR)) {
            // got expected error return
            return;
        }
        Assert.fail("Hit exception with unexpected error message");
    }
    Assert.fail("Failed to raise the expected exception");
}
Also used : SqlTypeFactoryImpl(org.apache.calcite.sql.type.SqlTypeFactoryImpl) RelDataTypeFactory(org.apache.calcite.rel.type.RelDataTypeFactory) RexBuilder(org.apache.calcite.rex.RexBuilder) RelDataType(org.apache.calcite.rel.type.RelDataType) UserException(org.apache.drill.common.exceptions.UserException) RexFieldCollation(org.apache.calcite.rex.RexFieldCollation) LinkedList(java.util.LinkedList) RexNode(org.apache.calcite.rex.RexNode) Test(org.junit.Test) PlannerTest(org.apache.drill.categories.PlannerTest)

Example 52 with UserException

use of org.apache.drill.common.exceptions.UserException in project drill by axbaretto.

the class JSONRecordReader method handleAndRaise.

protected void handleAndRaise(String suffix, Exception e) throws UserException {
    String message = e.getMessage();
    int columnNr = -1;
    if (e instanceof JsonParseException) {
        final JsonParseException ex = (JsonParseException) e;
        message = ex.getOriginalMessage();
        columnNr = ex.getLocation().getColumnNr();
    }
    UserException.Builder exceptionBuilder = UserException.dataReadError(e).message("%s - %s", suffix, message);
    if (columnNr > 0) {
        exceptionBuilder.pushContext("Column ", columnNr);
    }
    if (hadoopPath != null) {
        exceptionBuilder.pushContext("Record ", currentRecordNumberInFile()).pushContext("File ", hadoopPath.toUri().getPath());
    }
    throw exceptionBuilder.build(logger);
}
Also used : UserException(org.apache.drill.common.exceptions.UserException) JsonParseException(com.fasterxml.jackson.core.JsonParseException)

Example 53 with UserException

use of org.apache.drill.common.exceptions.UserException in project drill by axbaretto.

the class DrillCursor method nextRowInternally.

/**
 * ...
 * <p>
 *   Is to be called (once) from {@link #loadInitialSchema} for
 *   {@link DrillResultSetImpl#execute()}, and then (repeatedly) from
 *   {@link #next()} for {@link AvaticaResultSet#next()}.
 * </p>
 *
 * @return  whether cursor is positioned at a row (false when after end of
 *   results)
 */
private boolean nextRowInternally() throws SQLException {
    if (currentRecordNumber + 1 < currentBatchHolder.getRecordCount()) {
        // Have next row in current batch--just advance index and report "at a row."
        currentRecordNumber++;
        return true;
    } else {
        try {
            QueryDataBatch qrb = resultsListener.getNext();
            // the (initial) schema but no rows)).
            if (afterFirstBatch) {
                while (qrb != null && (qrb.getHeader().getRowCount() == 0 || qrb.getData() == null)) {
                    // Empty message--dispose of and try to get another.
                    logger.warn("Spurious batch read: {}", qrb);
                    qrb.release();
                    qrb = resultsListener.getNext();
                }
            }
            afterFirstBatch = true;
            if (qrb == null) {
                // End of batches--clean up, set state to done, report after last row.
                // (We load it so we clear it.)
                currentBatchHolder.clear();
                afterLastRow = true;
                return false;
            } else {
                // Got next (or first) batch--reset record offset to beginning;
                // assimilate schema if changed; set up return value for first call
                // to next().
                currentRecordNumber = 0;
                final boolean schemaChanged;
                try {
                    schemaChanged = currentBatchHolder.load(qrb.getHeader().getDef(), qrb.getData());
                } finally {
                    qrb.release();
                }
                schema = currentBatchHolder.getSchema();
                if (schemaChanged) {
                    updateColumns();
                }
                if (returnTrueForNextCallToNext && currentBatchHolder.getRecordCount() == 0) {
                    returnTrueForNextCallToNext = false;
                }
                return true;
            }
        } catch (UserException e) {
            // error type is accessible, of course. :-( )
            throw new SQLException(e.getMessage(), e);
        } catch (InterruptedException e) {
            // but JDBC client certainly could.
            throw new SQLException("Interrupted.", e);
        } catch (SchemaChangeException e) {
            // throws SchemaChangeException, so check/clean catch clause.
            throw new SQLException("Unexpected SchemaChangeException from RecordBatchLoader.load(...)");
        } catch (RuntimeException e) {
            throw new SQLException("Unexpected RuntimeException: " + e.toString(), e);
        }
    }
}
Also used : SchemaChangeException(org.apache.drill.exec.exception.SchemaChangeException) QueryDataBatch(org.apache.drill.exec.rpc.user.QueryDataBatch) SQLException(java.sql.SQLException) UserException(org.apache.drill.common.exceptions.UserException)

Example 54 with UserException

use of org.apache.drill.common.exceptions.UserException in project drill by axbaretto.

the class FragmentExecutor method sendFinalState.

private void sendFinalState() {
    final FragmentState outcome = fragmentState.get();
    if (outcome == FragmentState.FAILED) {
        final FragmentHandle handle = getContext().getHandle();
        final UserException uex = UserException.systemError(deferredException.getAndClear()).addIdentity(getContext().getEndpoint()).addContext("Fragment", handle.getMajorFragmentId() + ":" + handle.getMinorFragmentId()).build(logger);
        statusReporter.fail(uex);
    } else {
        statusReporter.stateChanged(outcome);
    }
    statusReporter.close();
}
Also used : FragmentState(org.apache.drill.exec.proto.UserBitShared.FragmentState) FragmentHandle(org.apache.drill.exec.proto.ExecProtos.FragmentHandle) UserException(org.apache.drill.common.exceptions.UserException)

Example 55 with UserException

use of org.apache.drill.common.exceptions.UserException in project drill by axbaretto.

the class TestTimedRunnable method withTasksExceedingTimeout.

@Test
public void withTasksExceedingTimeout() throws Exception {
    UserException ex = null;
    try {
        List<TimedRunnable<TestTask>> tasks = Lists.newArrayList();
        for (int i = 0; i < 100; i++) {
            if ((i & (i + 1)) == 0) {
                tasks.add(new TestTask(2000));
            } else {
                tasks.add(new TestTask(20000));
            }
        }
        TimedRunnable.run("Execution with some tasks triggering timeout", logger, tasks, 16);
    } catch (UserException e) {
        ex = e;
    }
    assertNotNull("Expected a UserException", ex);
    assertThat(ex.getMessage(), containsString("Waited for 93750ms, but tasks for 'Execution with some tasks triggering timeout' are not " + "complete. Total runnable size 100, parallelism 16."));
}
Also used : UserException(org.apache.drill.common.exceptions.UserException) Test(org.junit.Test) SlowTest(org.apache.drill.categories.SlowTest) DrillTest(org.apache.drill.test.DrillTest)

Aggregations

UserException (org.apache.drill.common.exceptions.UserException)102 Test (org.junit.Test)76 EvfTest (org.apache.drill.categories.EvfTest)39 TupleMetadata (org.apache.drill.exec.record.metadata.TupleMetadata)30 SubOperatorTest (org.apache.drill.test.SubOperatorTest)30 SchemaBuilder (org.apache.drill.exec.record.metadata.SchemaBuilder)28 RowBatchReader (org.apache.drill.exec.physical.impl.scan.RowBatchReader)12 ManagedReader (org.apache.drill.exec.physical.impl.scan.v3.ManagedReader)12 ScanLifecycleBuilder (org.apache.drill.exec.physical.impl.scan.v3.ScanLifecycleBuilder)11 SchemaNegotiator (org.apache.drill.exec.physical.impl.scan.v3.SchemaNegotiator)11 ScanOperatorExec (org.apache.drill.exec.physical.impl.scan.ScanOperatorExec)9 ScanFixture (org.apache.drill.exec.physical.impl.scan.ScanTestUtils.ScanFixture)9 SchemaPath (org.apache.drill.common.expression.SchemaPath)8 ResultSetOptions (org.apache.drill.exec.physical.resultSet.impl.ResultSetLoaderImpl.ResultSetOptions)8 RowSet (org.apache.drill.exec.physical.rowSet.RowSet)8 MockRecordBatch (org.apache.drill.exec.physical.impl.MockRecordBatch)6 ArrayList (java.util.ArrayList)5 OperatorTest (org.apache.drill.categories.OperatorTest)5 DrillException (org.apache.drill.common.exceptions.DrillException)5 BaseTest (org.apache.drill.test.BaseTest)5