Search in sources :

Example 86 with DrillRuntimeException

use of org.apache.drill.common.exceptions.DrillRuntimeException in project drill by axbaretto.

the class FileSelection method createFromDirectories.

public static FileSelection createFromDirectories(final List<String> dirPaths, final FileSelection selection, final String cacheFileRoot) {
    Stopwatch timer = Stopwatch.createStarted();
    final String root = selection.getSelectionRoot();
    if (Strings.isNullOrEmpty(root)) {
        throw new DrillRuntimeException("Selection root is null or empty" + root);
    }
    if (dirPaths == null || dirPaths.isEmpty()) {
        throw new DrillRuntimeException("List of directories is null or empty");
    }
    List<String> dirs = Lists.newArrayList();
    if (selection.hadWildcard()) {
        // for wildcard the directory list should have already been expanded
        for (FileStatus status : selection.getFileStatuses()) {
            dirs.add(status.getPath().toString());
        }
    } else {
        for (String s : dirPaths) {
            dirs.add(s);
        }
    }
    final Path rootPath = handleWildCard(root);
    // final URI uri = dirPaths.get(0).toUri();
    final URI uri = selection.getFileStatuses().get(0).getPath().toUri();
    final Path path = new Path(uri.getScheme(), uri.getAuthority(), rootPath.toUri().getPath());
    FileSelection fileSel = new FileSelection(null, dirs, path.toString(), cacheFileRoot, false);
    fileSel.setHadWildcard(selection.hadWildcard());
    logger.info("FileSelection.createFromDirectories() took {} ms ", timer.elapsed(TimeUnit.MILLISECONDS));
    return fileSel;
}
Also used : Path(org.apache.hadoop.fs.Path) FileStatus(org.apache.hadoop.fs.FileStatus) Stopwatch(com.google.common.base.Stopwatch) DrillRuntimeException(org.apache.drill.common.exceptions.DrillRuntimeException) URI(java.net.URI)

Example 87 with DrillRuntimeException

use of org.apache.drill.common.exceptions.DrillRuntimeException in project drill by axbaretto.

the class AsyncPageReader method nextInternal.

@Override
protected void nextInternal() throws IOException {
    ReadStatus readStatus = null;
    try {
        Stopwatch timer = Stopwatch.createStarted();
        parentColumnReader.parentReader.getOperatorContext().getStats().startWait();
        try {
            // get the result of execution
            waitForExecutionResult();
            synchronized (pageQueueSyncronize) {
                boolean pageQueueFull = pageQueue.remainingCapacity() == 0;
                // get the data if no exception has been thrown
                readStatus = pageQueue.take();
                if (readStatus.pageData == null || readStatus == ReadStatus.EMPTY) {
                    throw new DrillRuntimeException("Unexpected end of data");
                }
                // have been no new read tasks scheduled. In that case, schedule a new read.
                if (!parentColumnReader.isShuttingDown && pageQueueFull) {
                    asyncPageRead.offer(threadPool.submit(new AsyncPageReaderTask(debugName, pageQueue)));
                }
            }
        } finally {
            parentColumnReader.parentReader.getOperatorContext().getStats().stopWait();
        }
        long timeBlocked = timer.elapsed(TimeUnit.NANOSECONDS);
        stats.timeDiskScanWait.addAndGet(timeBlocked);
        stats.timeDiskScan.addAndGet(readStatus.getDiskScanTime());
        if (readStatus.isDictionaryPage) {
            stats.numDictPageLoads.incrementAndGet();
            stats.timeDictPageLoads.addAndGet(timeBlocked + readStatus.getDiskScanTime());
        } else {
            stats.numDataPageLoads.incrementAndGet();
            stats.timeDataPageLoads.addAndGet(timeBlocked + readStatus.getDiskScanTime());
        }
        pageHeader = readStatus.getPageHeader();
        do {
            if (pageHeader.getType() == PageType.DICTIONARY_PAGE) {
                readDictionaryPageData(readStatus, parentColumnReader);
                // get the result of execution
                waitForExecutionResult();
                synchronized (pageQueueSyncronize) {
                    boolean pageQueueFull = pageQueue.remainingCapacity() == 0;
                    // get the data if no exception has been thrown
                    readStatus = pageQueue.take();
                    if (readStatus.pageData == null || readStatus == ReadStatus.EMPTY) {
                        break;
                    }
                    // have been no new read tasks scheduled. In that case, schedule a new read.
                    if (!parentColumnReader.isShuttingDown && pageQueueFull) {
                        asyncPageRead.offer(threadPool.submit(new AsyncPageReaderTask(debugName, pageQueue)));
                    }
                }
                pageHeader = readStatus.getPageHeader();
            }
        } while (pageHeader.getType() == PageType.DICTIONARY_PAGE);
        pageHeader = readStatus.getPageHeader();
        pageData = getDecompressedPageData(readStatus);
        assert (pageData != null);
    } catch (InterruptedException e) {
        Thread.currentThread().interrupt();
    } catch (RuntimeException e) {
        // Catch this explicitly to satisfy findbugs
        handleAndThrowException(e, "Error reading page data");
    } catch (Exception e) {
        handleAndThrowException(e, "Error reading page data");
    }
}
Also used : DrillRuntimeException(org.apache.drill.common.exceptions.DrillRuntimeException) Stopwatch(com.google.common.base.Stopwatch) DrillRuntimeException(org.apache.drill.common.exceptions.DrillRuntimeException) UserException(org.apache.drill.common.exceptions.UserException) DrillRuntimeException(org.apache.drill.common.exceptions.DrillRuntimeException) ExecutionSetupException(org.apache.drill.common.exceptions.ExecutionSetupException) IOException(java.io.IOException) EOFException(java.io.EOFException) ExecutionException(java.util.concurrent.ExecutionException)

Example 88 with DrillRuntimeException

use of org.apache.drill.common.exceptions.DrillRuntimeException in project drill by axbaretto.

the class DrillParquetGroupConverter method getConverterForType.

@SuppressWarnings("resource")
private PrimitiveConverter getConverterForType(String name, PrimitiveType type) {
    switch(type.getPrimitiveTypeName()) {
        case INT32:
            {
                if (type.getOriginalType() == null) {
                    IntWriter writer = type.getRepetition() == Repetition.REPEATED ? mapWriter.list(name).integer() : mapWriter.integer(name);
                    return new DrillIntConverter(writer);
                }
                switch(type.getOriginalType()) {
                    case UINT_8:
                    case UINT_16:
                    case UINT_32:
                    case INT_8:
                    case INT_16:
                    case INT_32:
                        {
                            IntWriter writer = type.getRepetition() == Repetition.REPEATED ? mapWriter.list(name).integer() : mapWriter.integer(name);
                            return new DrillIntConverter(writer);
                        }
                    case DECIMAL:
                        {
                            ParquetReaderUtility.checkDecimalTypeEnabled(options);
                            Decimal9Writer writer = type.getRepetition() == Repetition.REPEATED ? mapWriter.list(name).decimal9() : mapWriter.decimal9(name, type.getDecimalMetadata().getScale(), type.getDecimalMetadata().getPrecision());
                            return new DrillDecimal9Converter(writer, type.getDecimalMetadata().getPrecision(), type.getDecimalMetadata().getScale());
                        }
                    case DATE:
                        {
                            DateWriter writer = type.getRepetition() == Repetition.REPEATED ? mapWriter.list(name).date() : mapWriter.date(name);
                            switch(containsCorruptedDates) {
                                case META_SHOWS_CORRUPTION:
                                    return new DrillCorruptedDateConverter(writer);
                                case META_SHOWS_NO_CORRUPTION:
                                    return new DrillDateConverter(writer);
                                case META_UNCLEAR_TEST_VALUES:
                                    return new CorruptionDetectingDateConverter(writer);
                                default:
                                    throw new DrillRuntimeException(String.format("Issue setting up parquet reader for date type, " + "unrecognized date corruption status %s. See DRILL-4203 for more info.", containsCorruptedDates));
                            }
                        }
                    case TIME_MILLIS:
                        {
                            TimeWriter writer = type.getRepetition() == Repetition.REPEATED ? mapWriter.list(name).time() : mapWriter.time(name);
                            return new DrillTimeConverter(writer);
                        }
                    default:
                        {
                            throw new UnsupportedOperationException("Unsupported type: " + type.getOriginalType());
                        }
                }
            }
        case INT64:
            {
                if (type.getOriginalType() == null) {
                    BigIntWriter writer = type.getRepetition() == Repetition.REPEATED ? mapWriter.list(name).bigInt() : mapWriter.bigInt(name);
                    return new DrillBigIntConverter(writer);
                }
                switch(type.getOriginalType()) {
                    case UINT_64:
                    case INT_64:
                        {
                            BigIntWriter writer = type.getRepetition() == Repetition.REPEATED ? mapWriter.list(name).bigInt() : mapWriter.bigInt(name);
                            return new DrillBigIntConverter(writer);
                        }
                    case DECIMAL:
                        {
                            ParquetReaderUtility.checkDecimalTypeEnabled(options);
                            Decimal18Writer writer = type.getRepetition() == Repetition.REPEATED ? mapWriter.list(name).decimal18() : mapWriter.decimal18(name, type.getDecimalMetadata().getScale(), type.getDecimalMetadata().getPrecision());
                            return new DrillDecimal18Converter(writer, type.getDecimalMetadata().getPrecision(), type.getDecimalMetadata().getScale());
                        }
                    case TIMESTAMP_MILLIS:
                        {
                            TimeStampWriter writer = type.getRepetition() == Repetition.REPEATED ? mapWriter.list(name).timeStamp() : mapWriter.timeStamp(name);
                            return new DrillTimeStampConverter(writer);
                        }
                    default:
                        {
                            throw new UnsupportedOperationException("Unsupported type " + type.getOriginalType());
                        }
                }
            }
        case INT96:
            {
                // TODO: replace null with TIMESTAMP_NANOS once parquet support such type annotation.
                if (type.getOriginalType() == null) {
                    if (options.getOption(ExecConstants.PARQUET_READER_INT96_AS_TIMESTAMP).bool_val) {
                        TimeStampWriter writer = type.getRepetition() == Repetition.REPEATED ? mapWriter.list(name).timeStamp() : mapWriter.timeStamp(name);
                        return new DrillFixedBinaryToTimeStampConverter(writer);
                    } else {
                        VarBinaryWriter writer = type.getRepetition() == Repetition.REPEATED ? mapWriter.list(name).varBinary() : mapWriter.varBinary(name);
                        return new DrillFixedBinaryToVarbinaryConverter(writer, ParquetColumnMetadata.getTypeLengthInBits(type.getPrimitiveTypeName()) / 8, mutator.getManagedBuffer());
                    }
                }
            }
        case FLOAT:
            {
                Float4Writer writer = type.getRepetition() == Repetition.REPEATED ? mapWriter.list(name).float4() : mapWriter.float4(name);
                return new DrillFloat4Converter(writer);
            }
        case DOUBLE:
            {
                Float8Writer writer = type.getRepetition() == Repetition.REPEATED ? mapWriter.list(name).float8() : mapWriter.float8(name);
                return new DrillFloat8Converter(writer);
            }
        case BOOLEAN:
            {
                BitWriter writer = type.getRepetition() == Repetition.REPEATED ? mapWriter.list(name).bit() : mapWriter.bit(name);
                return new DrillBoolConverter(writer);
            }
        case BINARY:
            {
                if (type.getOriginalType() == null) {
                    VarBinaryWriter writer = type.getRepetition() == Repetition.REPEATED ? mapWriter.list(name).varBinary() : mapWriter.varBinary(name);
                    return new DrillVarBinaryConverter(writer, mutator.getManagedBuffer());
                }
                switch(type.getOriginalType()) {
                    case UTF8:
                        {
                            VarCharWriter writer = type.getRepetition() == Repetition.REPEATED ? mapWriter.list(name).varChar() : mapWriter.varChar(name);
                            return new DrillVarCharConverter(writer, mutator.getManagedBuffer());
                        }
                    case ENUM:
                        {
                            VarCharWriter writer = type.getRepetition() == Repetition.REPEATED ? mapWriter.list(name).varChar() : mapWriter.varChar(name);
                            return new DrillVarCharConverter(writer, mutator.getManagedBuffer());
                        }
                    // TODO not sure if BINARY/DECIMAL is actually supported
                    case DECIMAL:
                        {
                            ParquetReaderUtility.checkDecimalTypeEnabled(options);
                            DecimalMetadata metadata = type.getDecimalMetadata();
                            if (metadata.getPrecision() <= 28) {
                                Decimal28SparseWriter writer = type.getRepetition() == Repetition.REPEATED ? mapWriter.list(name).decimal28Sparse() : mapWriter.decimal28Sparse(name, metadata.getScale(), metadata.getPrecision());
                                return new DrillBinaryToDecimal28Converter(writer, metadata.getPrecision(), metadata.getScale(), mutator.getManagedBuffer());
                            } else {
                                Decimal38SparseWriter writer = type.getRepetition() == Repetition.REPEATED ? mapWriter.list(name).decimal38Sparse() : mapWriter.decimal38Sparse(name, metadata.getScale(), metadata.getPrecision());
                                return new DrillBinaryToDecimal38Converter(writer, metadata.getPrecision(), metadata.getScale(), mutator.getManagedBuffer());
                            }
                        }
                    default:
                        {
                            throw new UnsupportedOperationException("Unsupported type " + type.getOriginalType());
                        }
                }
            }
        case FIXED_LEN_BYTE_ARRAY:
            if (type.getOriginalType() == OriginalType.DECIMAL) {
                ParquetReaderUtility.checkDecimalTypeEnabled(options);
                DecimalMetadata metadata = type.getDecimalMetadata();
                if (metadata.getPrecision() <= 28) {
                    Decimal28SparseWriter writer = type.getRepetition() == Repetition.REPEATED ? mapWriter.list(name).decimal28Sparse() : mapWriter.decimal28Sparse(name, metadata.getScale(), metadata.getPrecision());
                    return new DrillBinaryToDecimal28Converter(writer, metadata.getPrecision(), metadata.getScale(), mutator.getManagedBuffer());
                } else {
                    Decimal38SparseWriter writer = type.getRepetition() == Repetition.REPEATED ? mapWriter.list(name).decimal38Sparse() : mapWriter.decimal38Sparse(name, metadata.getScale(), metadata.getPrecision());
                    return new DrillBinaryToDecimal38Converter(writer, metadata.getPrecision(), metadata.getScale(), mutator.getManagedBuffer());
                }
            } else if (type.getOriginalType() == OriginalType.INTERVAL) {
                IntervalWriter writer = type.getRepetition() == Repetition.REPEATED ? mapWriter.list(name).interval() : mapWriter.interval(name);
                return new DrillFixedLengthByteArrayToInterval(writer);
            } else {
                VarBinaryWriter writer = type.getRepetition() == Repetition.REPEATED ? mapWriter.list(name).varBinary() : mapWriter.varBinary(name);
                return new DrillFixedBinaryToVarbinaryConverter(writer, type.getTypeLength(), mutator.getManagedBuffer());
            }
        default:
            throw new UnsupportedOperationException("Unsupported type: " + type.getPrimitiveTypeName());
    }
}
Also used : BitWriter(org.apache.drill.exec.vector.complex.writer.BitWriter) Decimal9Writer(org.apache.drill.exec.vector.complex.writer.Decimal9Writer) Float4Writer(org.apache.drill.exec.vector.complex.writer.Float4Writer) TimeWriter(org.apache.drill.exec.vector.complex.writer.TimeWriter) VarBinaryWriter(org.apache.drill.exec.vector.complex.writer.VarBinaryWriter) TimeStampWriter(org.apache.drill.exec.vector.complex.writer.TimeStampWriter) DateWriter(org.apache.drill.exec.vector.complex.writer.DateWriter) DrillRuntimeException(org.apache.drill.common.exceptions.DrillRuntimeException) IntervalWriter(org.apache.drill.exec.vector.complex.writer.IntervalWriter) Decimal18Writer(org.apache.drill.exec.vector.complex.writer.Decimal18Writer) BigIntWriter(org.apache.drill.exec.vector.complex.writer.BigIntWriter) IntWriter(org.apache.drill.exec.vector.complex.writer.IntWriter) VarCharWriter(org.apache.drill.exec.vector.complex.writer.VarCharWriter) Float8Writer(org.apache.drill.exec.vector.complex.writer.Float8Writer) BigIntWriter(org.apache.drill.exec.vector.complex.writer.BigIntWriter) DecimalMetadata(org.apache.parquet.schema.DecimalMetadata) Decimal28SparseWriter(org.apache.drill.exec.vector.complex.writer.Decimal28SparseWriter) Decimal38SparseWriter(org.apache.drill.exec.vector.complex.writer.Decimal38SparseWriter)

Example 89 with DrillRuntimeException

use of org.apache.drill.common.exceptions.DrillRuntimeException in project drill by axbaretto.

the class ZookeeperPersistentStore method put.

@Override
public void put(final String key, final V value, final DataChangeVersion version) {
    final InstanceSerializer<V> serializer = config.getSerializer();
    try {
        final byte[] bytes = serializer.serialize(value);
        client.put(key, bytes, version);
    } catch (final IOException e) {
        throw new DrillRuntimeException(String.format("unable to de/serialize value of type %s", value.getClass()), e);
    }
}
Also used : IOException(java.io.IOException) DrillRuntimeException(org.apache.drill.common.exceptions.DrillRuntimeException)

Example 90 with DrillRuntimeException

use of org.apache.drill.common.exceptions.DrillRuntimeException in project drill by axbaretto.

the class AssignmentCreator method getMappings.

/**
 * Does the work of creating the mappings for this AssignmentCreator
 * @return the minor fragment id to work units mapping
 */
private ListMultimap<Integer, T> getMappings() {
    Stopwatch watch = Stopwatch.createStarted();
    maxWork = (int) Math.ceil(units.size() / ((float) incomingEndpoints.size()));
    LinkedList<WorkEndpointListPair<T>> workList = getWorkList();
    LinkedList<WorkEndpointListPair<T>> unassignedWorkList;
    Map<DrillbitEndpoint, FragIteratorWrapper> endpointIterators = getEndpointIterators();
    // Assign upto maxCount per node based on locality.
    unassignedWorkList = assign(workList, endpointIterators, false);
    // Assign upto minCount per node in a round robin fashion.
    assignLeftovers(unassignedWorkList, endpointIterators, true);
    // Assign upto maxCount + leftovers per node based on locality.
    unassignedWorkList = assign(unassignedWorkList, endpointIterators, true);
    // Assign upto maxCount + leftovers per node in a round robin fashion.
    assignLeftovers(unassignedWorkList, endpointIterators, false);
    if (unassignedWorkList.size() != 0) {
        throw new DrillRuntimeException("There are still unassigned work units");
    }
    logger.debug("Took {} ms to assign {} work units to {} fragments", watch.elapsed(TimeUnit.MILLISECONDS), units.size(), incomingEndpoints.size());
    return mappings;
}
Also used : DrillbitEndpoint(org.apache.drill.exec.proto.CoordinationProtos.DrillbitEndpoint) Stopwatch(com.google.common.base.Stopwatch) DrillRuntimeException(org.apache.drill.common.exceptions.DrillRuntimeException)

Aggregations

DrillRuntimeException (org.apache.drill.common.exceptions.DrillRuntimeException)184 IOException (java.io.IOException)76 VersionMismatchException (org.apache.drill.exec.exception.VersionMismatchException)18 ExecutionSetupException (org.apache.drill.common.exceptions.ExecutionSetupException)15 UserException (org.apache.drill.common.exceptions.UserException)13 Path (org.apache.hadoop.fs.Path)13 KeeperException (org.apache.zookeeper.KeeperException)12 NodeExistsException (org.apache.zookeeper.KeeperException.NodeExistsException)12 NoSuchElementException (java.util.NoSuchElementException)11 Stopwatch (com.google.common.base.Stopwatch)10 TypeProtos (org.apache.drill.common.types.TypeProtos)9 Bson (org.bson.conversions.Bson)9 MaterializedField (org.apache.drill.exec.record.MaterializedField)8 List (java.util.List)7 RexNode (org.apache.calcite.rex.RexNode)7 MajorType (org.apache.drill.common.types.TypeProtos.MajorType)7 ValueHolder (org.apache.drill.exec.expr.holders.ValueHolder)6 VarCharHolder (org.apache.drill.exec.expr.holders.VarCharHolder)6 Registry (org.apache.drill.exec.proto.UserBitShared.Registry)6 Admin (org.apache.hadoop.hbase.client.Admin)6