Search in sources :

Example 86 with ExecutionSetupException

use of org.apache.drill.common.exceptions.ExecutionSetupException in project drill by apache.

the class HiveDefaultRecordReader method initNextReader.

/**
 * Closes previous mapredReader if any, then initializes mapredReader for next present InputSplit,
 * or returns false when there are no more splits.
 *
 * @param job map / reduce job configuration.
 * @return true if new mapredReader initialized
 * @throws ExecutionSetupException if could not init record mapredReader
 */
@SuppressWarnings("unchecked")
private boolean initNextReader(JobConf job) throws ExecutionSetupException {
    if (inputSplitsIterator.hasNext()) {
        closeMapredReader();
        InputSplit inputSplit = inputSplitsIterator.next();
        try {
            mapredReader = job.getInputFormat().getRecordReader(inputSplit, job, Reporter.NULL);
            logger.trace("hive mapredReader created: {} for inputSplit {}", mapredReader.getClass().getName(), inputSplit.toString());
            return true;
        } catch (Exception e) {
            throw new ExecutionSetupException("Failed to get o.a.hadoop.mapred.RecordReader from Hive InputFormat", e);
        }
    }
    return false;
}
Also used : ExecutionSetupException(org.apache.drill.common.exceptions.ExecutionSetupException) InputSplit(org.apache.hadoop.mapred.InputSplit) DrillRuntimeException(org.apache.drill.common.exceptions.DrillRuntimeException) ExecutionSetupException(org.apache.drill.common.exceptions.ExecutionSetupException) IOException(java.io.IOException) ExecutionException(java.util.concurrent.ExecutionException) SerDeException(org.apache.hadoop.hive.serde2.SerDeException)

Example 87 with ExecutionSetupException

use of org.apache.drill.common.exceptions.ExecutionSetupException in project drill by apache.

the class SequenceFileBatchReader method open.

@Override
public boolean open(FileSchemaNegotiator negotiator) {
    negotiator.tableSchema(defineMetadata(), true);
    logger.debug("The config is {}, root is {}, columns has {}", config, scan.getSelectionRoot(), scan.getColumns());
    // open Sequencefile
    try {
        processReader(negotiator);
    } catch (ExecutionSetupException e) {
        throw UserException.dataReadError(e).message("Failure in initial sequencefile reader. " + e.getMessage()).addContext(errorContext).build(logger);
    }
    ResultSetLoader setLoader = negotiator.build();
    loader = setLoader.writer();
    keyWriter = loader.scalar(KEY_SCHEMA);
    valueWriter = loader.scalar(VALUE_SCHEMA);
    return true;
}
Also used : ExecutionSetupException(org.apache.drill.common.exceptions.ExecutionSetupException) ResultSetLoader(org.apache.drill.exec.physical.resultSet.ResultSetLoader)

Example 88 with ExecutionSetupException

use of org.apache.drill.common.exceptions.ExecutionSetupException in project drill by apache.

the class HBaseRecordReader method setup.

@Override
public void setup(OperatorContext context, OutputMutator output) throws ExecutionSetupException {
    this.operatorContext = context;
    this.outputMutator = output;
    familyVectorMap = new HashMap<>();
    try {
        hTable = connection.getTable(hbaseTableName);
        // when creating reader (order of first appearance in query).
        for (SchemaPath column : getColumns()) {
            if (column.equals(ROW_KEY_PATH)) {
                MaterializedField field = MaterializedField.create(column.getAsNamePart().getName(), ROW_KEY_TYPE);
                rowKeyVector = outputMutator.addField(field, VarBinaryVector.class);
            } else {
                getOrCreateFamilyVector(column.getRootSegment().getPath(), false);
            }
        }
        // Add map and child vectors for any HBase columns that are requested (in
        // order to avoid later creation of dummy NullableIntVectors for them).
        final Set<Map.Entry<byte[], NavigableSet<byte[]>>> familiesEntries = hbaseScanColumnsOnly.getFamilyMap().entrySet();
        for (Map.Entry<byte[], NavigableSet<byte[]>> familyEntry : familiesEntries) {
            final String familyName = new String(familyEntry.getKey(), StandardCharsets.UTF_8);
            final MapVector familyVector = getOrCreateFamilyVector(familyName, false);
            final Set<byte[]> children = familyEntry.getValue();
            if (null != children) {
                for (byte[] childNameBytes : children) {
                    final String childName = new String(childNameBytes, StandardCharsets.UTF_8);
                    getOrCreateColumnVector(familyVector, childName);
                }
            }
        }
        // Add map vectors for any HBase column families that are requested.
        for (String familyName : completeFamilies) {
            getOrCreateFamilyVector(familyName, false);
        }
        resultScanner = hTable.getScanner(hbaseScan);
    } catch (SchemaChangeException | IOException e) {
        throw new ExecutionSetupException(e);
    }
}
Also used : ExecutionSetupException(org.apache.drill.common.exceptions.ExecutionSetupException) NavigableSet(java.util.NavigableSet) MaterializedField(org.apache.drill.exec.record.MaterializedField) IOException(java.io.IOException) VarBinaryVector(org.apache.drill.exec.vector.VarBinaryVector) NullableVarBinaryVector(org.apache.drill.exec.vector.NullableVarBinaryVector) SchemaChangeException(org.apache.drill.exec.exception.SchemaChangeException) SchemaPath(org.apache.drill.common.expression.SchemaPath) HashMap(java.util.HashMap) Map(java.util.Map) MapVector(org.apache.drill.exec.vector.complex.MapVector)

Example 89 with ExecutionSetupException

use of org.apache.drill.common.exceptions.ExecutionSetupException in project drill by apache.

the class MongoScanBatchCreator method getBatch.

@Override
public ScanBatch getBatch(ExecutorFragmentContext context, MongoSubScan subScan, List<RecordBatch> children) throws ExecutionSetupException {
    Preconditions.checkArgument(children.isEmpty());
    List<RecordReader> readers = new LinkedList<>();
    for (BaseMongoSubScanSpec scanSpec : subScan.getChunkScanSpecList()) {
        try {
            List<SchemaPath> columns = subScan.getColumns();
            if (columns == null) {
                columns = GroupScan.ALL_COLUMNS;
            }
            readers.add(new MongoRecordReader(scanSpec, columns, context, subScan.getMongoStoragePlugin()));
        } catch (Exception e) {
            logger.error("MongoRecordReader creation failed for subScan: {}.", subScan);
            logger.error(e.getMessage(), e);
            throw new ExecutionSetupException(e);
        }
    }
    logger.info("Number of record readers initialized : {}", readers.size());
    return new ScanBatch(subScan, context, readers);
}
Also used : ExecutionSetupException(org.apache.drill.common.exceptions.ExecutionSetupException) SchemaPath(org.apache.drill.common.expression.SchemaPath) RecordReader(org.apache.drill.exec.store.RecordReader) ScanBatch(org.apache.drill.exec.physical.impl.ScanBatch) LinkedList(java.util.LinkedList) ExecutionSetupException(org.apache.drill.common.exceptions.ExecutionSetupException)

Example 90 with ExecutionSetupException

use of org.apache.drill.common.exceptions.ExecutionSetupException in project drill by apache.

the class ImplCreator method getExec.

/**
 * Create and return fragment RootExec for given FragmentRoot. RootExec has
 * one or more RecordBatches as children (which may contain child
 * RecordBatches and so on).
 *
 * @param context
 *          FragmentContext.
 * @param root
 *          FragmentRoot.
 * @return RootExec of fragment.
 * @throws ExecutionSetupException
 */
public static RootExec getExec(ExecutorFragmentContext context, FragmentRoot root) throws ExecutionSetupException {
    Preconditions.checkNotNull(root);
    Preconditions.checkNotNull(context);
    if (AssertionUtil.isAssertionsEnabled() || context.getOptions().getOption(ExecConstants.ENABLE_ITERATOR_VALIDATOR) || context.getConfig().getBoolean(ExecConstants.ENABLE_ITERATOR_VALIDATION)) {
        root = IteratorValidatorInjector.rewritePlanWithIteratorValidator(context, root);
    }
    final ImplCreator creator = new ImplCreator();
    Stopwatch watch = Stopwatch.createStarted();
    try {
        final RootExec rootExec = creator.getRootExec(root, context);
        // skip over this for SimpleRootExec (testing)
        if (rootExec instanceof BaseRootExec) {
            ((BaseRootExec) rootExec).setOperators(creator.getOperators());
        }
        logger.debug("Took {} ms to create RecordBatch tree", watch.elapsed(TimeUnit.MILLISECONDS));
        if (rootExec == null) {
            throw new ExecutionSetupException("The provided fragment did not have a root node that correctly created a RootExec value.");
        }
        return rootExec;
    } catch (Exception e) {
        AutoCloseables.close(e, creator.getOperators());
        context.getExecutorState().fail(e);
    }
    return null;
}
Also used : ExecutionSetupException(org.apache.drill.common.exceptions.ExecutionSetupException) Stopwatch(org.apache.drill.shaded.guava.com.google.common.base.Stopwatch) IOException(java.io.IOException) ExecutionSetupException(org.apache.drill.common.exceptions.ExecutionSetupException)

Aggregations

ExecutionSetupException (org.apache.drill.common.exceptions.ExecutionSetupException)94 IOException (java.io.IOException)43 ScanBatch (org.apache.drill.exec.physical.impl.ScanBatch)26 SchemaPath (org.apache.drill.common.expression.SchemaPath)25 RecordReader (org.apache.drill.exec.store.RecordReader)24 SchemaChangeException (org.apache.drill.exec.exception.SchemaChangeException)22 LinkedList (java.util.LinkedList)16 Map (java.util.Map)14 MaterializedField (org.apache.drill.exec.record.MaterializedField)13 ExecutionException (java.util.concurrent.ExecutionException)10 DrillRuntimeException (org.apache.drill.common.exceptions.DrillRuntimeException)10 OperatorContext (org.apache.drill.exec.ops.OperatorContext)8 UserException (org.apache.drill.common.exceptions.UserException)7 MajorType (org.apache.drill.common.types.TypeProtos.MajorType)7 JobConf (org.apache.hadoop.mapred.JobConf)7 HashMap (java.util.HashMap)6 List (java.util.List)6 OutOfMemoryException (org.apache.drill.exec.exception.OutOfMemoryException)6 VectorContainerWriter (org.apache.drill.exec.vector.complex.impl.VectorContainerWriter)6 Path (org.apache.hadoop.fs.Path)6