Search in sources :

Example 46 with UserException

use of org.apache.drill.common.exceptions.UserException in project drill by apache.

the class TestScanSchemaTracker method testWildcard.

/**
 * Wildcard projection, schema change allowed.
 */
@Test
public void testWildcard() {
    // Simulate SELECT * ...
    final ScanSchemaConfigBuilder builder = new ScanSchemaConfigBuilder().projection(RowSetTestUtils.projectAll());
    final ScanSchemaTracker schemaTracker = builder.build();
    assertSame(ProjectionType.ALL, schemaTracker.projectionType());
    assertFalse(schemaTracker.isResolved());
    // Reader input schema is dynamic
    final TupleMetadata reader1InputSchema = schemaTracker.readerInputSchema();
    assertTrue(reader1InputSchema.isEmpty());
    ProjectionFilter filter1 = schemaTracker.projectionFilter(ERROR_CONTEXT);
    assertSame(ProjectionFilter.PROJECT_ALL, filter1);
    // Pretend the reader discovers two columns.
    final TupleMetadata reader1OutputSchema = new SchemaBuilder().add("a", MinorType.INT).add("b", MinorType.BIGINT).buildSchema();
    schemaTracker.applyReaderSchema(reader1OutputSchema, ERROR_CONTEXT);
    assertTrue(schemaTracker.isResolved());
    // Schema sent downstream after first batch
    final TupleMetadata outputSchema1 = schemaTracker.outputSchema();
    assertEquals(reader1OutputSchema, outputSchema1);
    // Next reader schema is partially defined
    final TupleMetadata reader2InputSchema = schemaTracker.readerInputSchema();
    assertEquals(reader1OutputSchema, reader2InputSchema);
    ProjectionFilter filter2 = schemaTracker.projectionFilter(ERROR_CONTEXT);
    assertTrue(filter2 instanceof DynamicSchemaFilter);
    assertTrue(filter2.projection(reader1OutputSchema.metadata("a")).isProjected);
    assertTrue(filter2.projection(reader1OutputSchema.metadata("b")).isProjected);
    assertTrue(filter2.isProjected("c"));
    try {
        filter2.projection(MetadataUtils.newScalar("a", Types.required(MinorType.VARCHAR)));
        fail();
    } catch (UserException e) {
    // Expected;
    }
    // The next reader defines another column.
    // This triggers a schema change in output.
    final TupleMetadata reader2OutputSchema = new SchemaBuilder().add("c", MinorType.VARCHAR).buildSchema();
    schemaTracker.applyReaderSchema(reader2OutputSchema, ERROR_CONTEXT);
    // Schema sent downstream after second reader
    final TupleMetadata outputSchema2 = schemaTracker.outputSchema();
    final TupleMetadata expectedOutput = new SchemaBuilder().addAll(reader1OutputSchema).addAll(reader2OutputSchema).buildSchema();
    assertEquals(expectedOutput, outputSchema2);
}
Also used : ProjectionFilter(org.apache.drill.exec.physical.resultSet.impl.ProjectionFilter) TupleMetadata(org.apache.drill.exec.record.metadata.TupleMetadata) SchemaBuilder(org.apache.drill.exec.record.metadata.SchemaBuilder) UserException(org.apache.drill.common.exceptions.UserException) Test(org.junit.Test) BaseTest(org.apache.drill.test.BaseTest) EvfTest(org.apache.drill.categories.EvfTest)

Example 47 with UserException

use of org.apache.drill.common.exceptions.UserException in project drill by apache.

the class AsyncPageReader method readDictionaryPage.

// Read and decode the dictionary and the header
private void readDictionaryPage(final ColumnReader<?> parentStatus) throws UserException {
    try {
        Stopwatch timer = Stopwatch.createStarted();
        ReadStatus readStatus = null;
        synchronized (pageQueue) {
            boolean pageQueueFull = pageQueue.remainingCapacity() == 0;
            // get the result of execution
            asyncPageRead.poll().get();
            // get the data if no exception has been thrown
            readStatus = pageQueue.take();
            assert (readStatus.pageData != null);
            // have been no new read tasks scheduled. In that case, schedule a new read.
            if (pageQueueFull) {
                asyncPageRead.offer(threadPool.submit(new AsyncPageReaderTask(debugName, pageQueue)));
            }
        }
        long timeBlocked = timer.elapsed(TimeUnit.NANOSECONDS);
        stats.timeDiskScanWait.addAndGet(timeBlocked);
        stats.timeDiskScan.addAndGet(readStatus.getDiskScanTime());
        stats.numDictPageLoads.incrementAndGet();
        stats.timeDictPageLoads.addAndGet(timeBlocked + readStatus.getDiskScanTime());
        readDictionaryPageData(readStatus, parentStatus);
    } catch (InterruptedException e) {
        Thread.currentThread().interrupt();
    } catch (Exception e) {
        handleAndThrowException(e, "Error reading dictionary page.");
    }
}
Also used : Stopwatch(com.google.common.base.Stopwatch) UserException(org.apache.drill.common.exceptions.UserException) DrillRuntimeException(org.apache.drill.common.exceptions.DrillRuntimeException) ExecutionSetupException(org.apache.drill.common.exceptions.ExecutionSetupException) IOException(java.io.IOException)

Example 48 with UserException

use of org.apache.drill.common.exceptions.UserException in project drill by apache.

the class ExternalSortBatch method doMergeAndSpill.

private BatchGroup.SpilledRun doMergeAndSpill(LinkedList<? extends BatchGroup> batchGroups, int spillCount) {
    List<BatchGroup> batchesToSpill = Lists.newArrayList();
    spillCount = Math.min(batchGroups.size(), spillCount);
    assert spillCount > 0 : "Spill count to mergeAndSpill must not be zero";
    for (int i = 0; i < spillCount; i++) {
        batchesToSpill.add(batchGroups.pollFirst());
    }
    // Merge the selected set of matches and write them to the
    // spill file. After each write, we release the memory associated
    // with the just-written batch.
    String outputFile = spillSet.getNextSpillFile();
    stats.setLongStat(Metric.SPILL_COUNT, spillSet.getFileCount());
    BatchGroup.SpilledRun newGroup = null;
    try (AutoCloseable ignored = AutoCloseables.all(batchesToSpill);
        CopierHolder.BatchMerger merger = copierHolder.startMerge(schema, batchesToSpill, spillBatchRowCount)) {
        logger.trace("Spilling {} of {} batches, spill batch size = {} rows, memory = {}, write to {}", batchesToSpill.size(), bufferedBatches.size() + batchesToSpill.size(), spillBatchRowCount, allocator.getAllocatedMemory(), outputFile);
        newGroup = new BatchGroup.SpilledRun(spillSet, outputFile, oContext);
        while (merger.next()) {
            // Add a new batch of records (given by merger.getOutput()) to the spill
            // file.
            //
            // note that addBatch also clears the merger's output container
            newGroup.addBatch(merger.getOutput());
        }
        injector.injectChecked(context.getExecutionControls(), INTERRUPTION_WHILE_SPILLING, IOException.class);
        newGroup.closeOutputStream();
        logger.trace("Spilled {} batches, {} records; memory = {} to {}", merger.getBatchCount(), merger.getRecordCount(), allocator.getAllocatedMemory(), outputFile);
        newGroup.setBatchSize(merger.getEstBatchSize());
        return newGroup;
    } catch (Throwable e) {
        // we only need to clean up newGroup if spill failed
        try {
            if (newGroup != null) {
                AutoCloseables.close(e, newGroup);
            }
        } catch (Throwable t) {
        /* close() may hit the same IO issue; just ignore */
        }
        try {
            throw e;
        } catch (UserException ue) {
            throw ue;
        } catch (Throwable ex) {
            throw UserException.resourceError(ex).message("External Sort encountered an error while spilling to disk").build(logger);
        }
    }
}
Also used : SpilledRun(org.apache.drill.exec.physical.impl.xsort.managed.BatchGroup.SpilledRun) UserException(org.apache.drill.common.exceptions.UserException)

Example 49 with UserException

use of org.apache.drill.common.exceptions.UserException in project drill by apache.

the class TestTimedRunnable method withTasksExceedingTimeout.

@Test
public void withTasksExceedingTimeout() throws Exception {
    UserException ex = null;
    try {
        List<TimedRunnable<TestTask>> tasks = Lists.newArrayList();
        for (int i = 0; i < 100; i++) {
            if ((i & (i + 1)) == 0) {
                tasks.add(new TestTask(2000));
            } else {
                tasks.add(new TestTask(20000));
            }
        }
        TimedRunnable.run("Execution with some tasks triggering timeout", logger, tasks, 16);
    } catch (UserException e) {
        ex = e;
    }
    assertNotNull("Expected a UserException", ex);
    assertThat(ex.getMessage(), containsString("Waited for 93750ms, but tasks for 'Execution with some tasks triggering timeout' are not " + "complete. Total runnable size 100, parallelism 16."));
}
Also used : UserException(org.apache.drill.common.exceptions.UserException) Test(org.junit.Test) DrillTest(org.apache.drill.test.DrillTest)

Example 50 with UserException

use of org.apache.drill.common.exceptions.UserException in project drill by axbaretto.

the class TestResultSetLoaderOverflow method testLargeArray.

/**
 * Create an array that contains more than 64K values. Drill has no numeric
 * limit on array lengths. (Well, it does, but the limit is about 2 billion
 * which, even for bytes, is too large to fit into a vector...)
 */
@Test
public void testLargeArray() {
    ResultSetLoader rsLoader = new ResultSetLoaderImpl(fixture.allocator());
    RowSetLoader rootWriter = rsLoader.writer();
    MaterializedField field = SchemaBuilder.columnSchema("a", MinorType.INT, DataMode.REPEATED);
    rootWriter.addColumn(field);
    // Create a single array as the column value in the first row. When
    // this overflows, an exception is thrown since overflow is not possible.
    rsLoader.startBatch();
    rootWriter.start();
    ScalarWriter array = rootWriter.array(0).scalar();
    try {
        for (int i = 0; i < Integer.MAX_VALUE; i++) {
            array.setInt(i + 1);
        }
        fail();
    } catch (UserException e) {
    // Expected
    }
    rsLoader.close();
}
Also used : ResultSetLoader(org.apache.drill.exec.physical.rowSet.ResultSetLoader) MaterializedField(org.apache.drill.exec.record.MaterializedField) UserException(org.apache.drill.common.exceptions.UserException) RowSetLoader(org.apache.drill.exec.physical.rowSet.RowSetLoader) ScalarWriter(org.apache.drill.exec.vector.accessor.ScalarWriter) SubOperatorTest(org.apache.drill.test.SubOperatorTest) Test(org.junit.Test)

Aggregations

UserException (org.apache.drill.common.exceptions.UserException)102 Test (org.junit.Test)76 EvfTest (org.apache.drill.categories.EvfTest)39 TupleMetadata (org.apache.drill.exec.record.metadata.TupleMetadata)30 SubOperatorTest (org.apache.drill.test.SubOperatorTest)30 SchemaBuilder (org.apache.drill.exec.record.metadata.SchemaBuilder)28 RowBatchReader (org.apache.drill.exec.physical.impl.scan.RowBatchReader)12 ManagedReader (org.apache.drill.exec.physical.impl.scan.v3.ManagedReader)12 ScanLifecycleBuilder (org.apache.drill.exec.physical.impl.scan.v3.ScanLifecycleBuilder)11 SchemaNegotiator (org.apache.drill.exec.physical.impl.scan.v3.SchemaNegotiator)11 ScanOperatorExec (org.apache.drill.exec.physical.impl.scan.ScanOperatorExec)9 ScanFixture (org.apache.drill.exec.physical.impl.scan.ScanTestUtils.ScanFixture)9 SchemaPath (org.apache.drill.common.expression.SchemaPath)8 ResultSetOptions (org.apache.drill.exec.physical.resultSet.impl.ResultSetLoaderImpl.ResultSetOptions)8 RowSet (org.apache.drill.exec.physical.rowSet.RowSet)8 MockRecordBatch (org.apache.drill.exec.physical.impl.MockRecordBatch)6 ArrayList (java.util.ArrayList)5 OperatorTest (org.apache.drill.categories.OperatorTest)5 DrillException (org.apache.drill.common.exceptions.DrillException)5 BaseTest (org.apache.drill.test.BaseTest)5