use of org.apache.drill.common.exceptions.ExecutionSetupException in project drill by apache.
the class HiveDefaultRecordReader method initNextReader.
/**
* Closes previous mapredReader if any, then initializes mapredReader for next present InputSplit,
* or returns false when there are no more splits.
*
* @param job map / reduce job configuration.
* @return true if new mapredReader initialized
* @throws ExecutionSetupException if could not init record mapredReader
*/
@SuppressWarnings("unchecked")
private boolean initNextReader(JobConf job) throws ExecutionSetupException {
if (inputSplitsIterator.hasNext()) {
closeMapredReader();
InputSplit inputSplit = inputSplitsIterator.next();
try {
mapredReader = job.getInputFormat().getRecordReader(inputSplit, job, Reporter.NULL);
logger.trace("hive mapredReader created: {} for inputSplit {}", mapredReader.getClass().getName(), inputSplit.toString());
return true;
} catch (Exception e) {
throw new ExecutionSetupException("Failed to get o.a.hadoop.mapred.RecordReader from Hive InputFormat", e);
}
}
return false;
}
use of org.apache.drill.common.exceptions.ExecutionSetupException in project drill by apache.
the class SequenceFileBatchReader method open.
@Override
public boolean open(FileSchemaNegotiator negotiator) {
negotiator.tableSchema(defineMetadata(), true);
logger.debug("The config is {}, root is {}, columns has {}", config, scan.getSelectionRoot(), scan.getColumns());
// open Sequencefile
try {
processReader(negotiator);
} catch (ExecutionSetupException e) {
throw UserException.dataReadError(e).message("Failure in initial sequencefile reader. " + e.getMessage()).addContext(errorContext).build(logger);
}
ResultSetLoader setLoader = negotiator.build();
loader = setLoader.writer();
keyWriter = loader.scalar(KEY_SCHEMA);
valueWriter = loader.scalar(VALUE_SCHEMA);
return true;
}
use of org.apache.drill.common.exceptions.ExecutionSetupException in project drill by apache.
the class HBaseRecordReader method setup.
@Override
public void setup(OperatorContext context, OutputMutator output) throws ExecutionSetupException {
this.operatorContext = context;
this.outputMutator = output;
familyVectorMap = new HashMap<>();
try {
hTable = connection.getTable(hbaseTableName);
// when creating reader (order of first appearance in query).
for (SchemaPath column : getColumns()) {
if (column.equals(ROW_KEY_PATH)) {
MaterializedField field = MaterializedField.create(column.getAsNamePart().getName(), ROW_KEY_TYPE);
rowKeyVector = outputMutator.addField(field, VarBinaryVector.class);
} else {
getOrCreateFamilyVector(column.getRootSegment().getPath(), false);
}
}
// Add map and child vectors for any HBase columns that are requested (in
// order to avoid later creation of dummy NullableIntVectors for them).
final Set<Map.Entry<byte[], NavigableSet<byte[]>>> familiesEntries = hbaseScanColumnsOnly.getFamilyMap().entrySet();
for (Map.Entry<byte[], NavigableSet<byte[]>> familyEntry : familiesEntries) {
final String familyName = new String(familyEntry.getKey(), StandardCharsets.UTF_8);
final MapVector familyVector = getOrCreateFamilyVector(familyName, false);
final Set<byte[]> children = familyEntry.getValue();
if (null != children) {
for (byte[] childNameBytes : children) {
final String childName = new String(childNameBytes, StandardCharsets.UTF_8);
getOrCreateColumnVector(familyVector, childName);
}
}
}
// Add map vectors for any HBase column families that are requested.
for (String familyName : completeFamilies) {
getOrCreateFamilyVector(familyName, false);
}
resultScanner = hTable.getScanner(hbaseScan);
} catch (SchemaChangeException | IOException e) {
throw new ExecutionSetupException(e);
}
}
use of org.apache.drill.common.exceptions.ExecutionSetupException in project drill by apache.
the class MongoScanBatchCreator method getBatch.
@Override
public ScanBatch getBatch(ExecutorFragmentContext context, MongoSubScan subScan, List<RecordBatch> children) throws ExecutionSetupException {
Preconditions.checkArgument(children.isEmpty());
List<RecordReader> readers = new LinkedList<>();
for (BaseMongoSubScanSpec scanSpec : subScan.getChunkScanSpecList()) {
try {
List<SchemaPath> columns = subScan.getColumns();
if (columns == null) {
columns = GroupScan.ALL_COLUMNS;
}
readers.add(new MongoRecordReader(scanSpec, columns, context, subScan.getMongoStoragePlugin()));
} catch (Exception e) {
logger.error("MongoRecordReader creation failed for subScan: {}.", subScan);
logger.error(e.getMessage(), e);
throw new ExecutionSetupException(e);
}
}
logger.info("Number of record readers initialized : {}", readers.size());
return new ScanBatch(subScan, context, readers);
}
use of org.apache.drill.common.exceptions.ExecutionSetupException in project drill by apache.
the class ImplCreator method getExec.
/**
* Create and return fragment RootExec for given FragmentRoot. RootExec has
* one or more RecordBatches as children (which may contain child
* RecordBatches and so on).
*
* @param context
* FragmentContext.
* @param root
* FragmentRoot.
* @return RootExec of fragment.
* @throws ExecutionSetupException
*/
public static RootExec getExec(ExecutorFragmentContext context, FragmentRoot root) throws ExecutionSetupException {
Preconditions.checkNotNull(root);
Preconditions.checkNotNull(context);
if (AssertionUtil.isAssertionsEnabled() || context.getOptions().getOption(ExecConstants.ENABLE_ITERATOR_VALIDATOR) || context.getConfig().getBoolean(ExecConstants.ENABLE_ITERATOR_VALIDATION)) {
root = IteratorValidatorInjector.rewritePlanWithIteratorValidator(context, root);
}
final ImplCreator creator = new ImplCreator();
Stopwatch watch = Stopwatch.createStarted();
try {
final RootExec rootExec = creator.getRootExec(root, context);
// skip over this for SimpleRootExec (testing)
if (rootExec instanceof BaseRootExec) {
((BaseRootExec) rootExec).setOperators(creator.getOperators());
}
logger.debug("Took {} ms to create RecordBatch tree", watch.elapsed(TimeUnit.MILLISECONDS));
if (rootExec == null) {
throw new ExecutionSetupException("The provided fragment did not have a root node that correctly created a RootExec value.");
}
return rootExec;
} catch (Exception e) {
AutoCloseables.close(e, creator.getOperators());
context.getExecutorState().fail(e);
}
return null;
}
Aggregations