use of org.apache.drill.common.exceptions.ExecutionSetupException in project drill by apache.
the class PersistentStoreRegistry method newPStoreProvider.
@SuppressWarnings("unchecked")
public PersistentStoreProvider newPStoreProvider() throws ExecutionSetupException {
try {
String storeProviderClassName = config.getString(ExecConstants.SYS_STORE_PROVIDER_CLASS);
logger.info("Using the configured PStoreProvider class: '{}'.", storeProviderClassName);
Class<? extends PersistentStoreProvider> storeProviderClass = (Class<? extends PersistentStoreProvider>) Class.forName(storeProviderClassName);
Constructor<? extends PersistentStoreProvider> c = storeProviderClass.getConstructor(PersistentStoreRegistry.class);
return new CachingPersistentStoreProvider(c.newInstance(this));
} catch (ConfigException.Missing | ClassNotFoundException | NoSuchMethodException | SecurityException | InstantiationException | IllegalAccessException | IllegalArgumentException | InvocationTargetException e) {
logger.error(e.getMessage(), e);
throw new ExecutionSetupException("A System Table provider was either not specified or could not be found or instantiated", e);
}
}
use of org.apache.drill.common.exceptions.ExecutionSetupException in project drill by apache.
the class TestScanOperExecBasics method testNoReader.
/**
* Pathological case that a scan operator is provided no readers.
* It will throw a user exception because the downstream operators
* can't handle this case so we choose to stop the show early to
* avoid getting into a strange state.
*/
@Test
public void testNoReader() {
// Create the scan operator
ScanFixture scanFixture = simpleFixture();
ScanOperatorExec scan = scanFixture.scanOp;
try {
scan.buildSchema();
} catch (UserException e) {
// Expected
assertTrue(e.getCause() instanceof ExecutionSetupException);
}
// Must close the DAG (context and scan operator) even on failures
scanFixture.close();
}
use of org.apache.drill.common.exceptions.ExecutionSetupException in project drill by apache.
the class ParquetRecordReader method setup.
/**
* Prepare the Parquet reader. First determine the set of columns to read (the schema
* for this read.) Then, create a state object to track the read across calls to
* the reader <tt>next()</tt> method. Finally, create one of three readers to
* read batches depending on whether this scan is for only fixed-width fields,
* contains at least one variable-width field, or is a "mock" scan consisting
* only of null fields (fields in the SELECT clause but not in the Parquet file.)
*/
@Override
public void setup(OperatorContext operatorContext, OutputMutator output) throws ExecutionSetupException {
this.operatorContext = operatorContext;
ParquetSchema schema = new ParquetSchema(fragmentContext.getOptions(), rowGroupIndex, footer, isStarQuery() ? null : getColumns());
batchSizerMgr = new RecordBatchSizerManager(fragmentContext.getOptions(), schema, numRecordsToRead, new RecordBatchStatsContext(fragmentContext, operatorContext));
logger.debug("Reading {} records from row group({}) in file {}.", numRecordsToRead, rowGroupIndex, hadoopPath.toUri().getPath());
try {
schema.buildSchema();
batchSizerMgr.setup();
readState = new ReadState(schema, batchSizerMgr, parquetReaderStats, numRecordsToRead, useAsyncColReader);
readState.buildReader(this, output);
} catch (Exception e) {
throw handleAndRaise("Failure in setting up reader", e);
}
ColumnReader<?> firstColumnStatus = readState.getFirstColumnReader();
if (firstColumnStatus == null) {
batchReader = new BatchReader.MockBatchReader(readState);
} else if (schema.allFieldsFixedLength()) {
batchReader = new BatchReader.FixedWidthReader(readState);
} else {
batchReader = new BatchReader.VariableWidthReader(readState);
}
}
use of org.apache.drill.common.exceptions.ExecutionSetupException in project drill by apache.
the class MockRecordReader method setup.
@Override
public void setup(OperatorContext context, OutputMutator output) throws ExecutionSetupException {
try {
final int estimateRowSize = getEstimatedRecordSize(config.getTypes());
if (config.getTypes() == null) {
return;
}
valueVectors = new ValueVector[config.getTypes().length];
batchRecordCount = 250000 / estimateRowSize;
for (int i = 0; i < config.getTypes().length; i++) {
final MajorType type = config.getTypes()[i].getMajorType();
final MaterializedField field = getVector(config.getTypes()[i].getName(), type);
final Class<? extends ValueVector> vvClass = TypeHelper.getValueVectorClass(field.getType().getMinorType(), field.getDataMode());
valueVectors[i] = output.addField(field, vvClass);
}
} catch (SchemaChangeException e) {
throw new ExecutionSetupException("Failure while setting up fields", e);
}
}
use of org.apache.drill.common.exceptions.ExecutionSetupException in project drill by apache.
the class ControlMessageHandler method startNewFragment.
/**
* Start a new fragment on this node. These fragments can be leaf or intermediate fragments
* which are scheduled by remote or local Foreman node.
* @param fragment
* @throws UserRpcException
*/
public void startNewFragment(final PlanFragment fragment, final DrillbitContext drillbitContext) throws UserRpcException {
logger.debug("Received remote fragment start instruction: {}", fragment);
try {
final FragmentContextImpl fragmentContext = new FragmentContextImpl(drillbitContext, fragment, drillbitContext.getFunctionImplementationRegistry());
final FragmentStatusReporter statusReporter = new FragmentStatusReporter(fragmentContext);
final FragmentExecutor fragmentExecutor = new FragmentExecutor(fragmentContext, fragment, statusReporter);
// we either need to start the fragment if it is a leaf fragment, or set up a fragment manager if it is non leaf.
if (fragment.getLeafFragment()) {
bee.addFragmentRunner(fragmentExecutor);
} else {
// isIntermediate, store for incoming data.
final NonRootFragmentManager manager = new NonRootFragmentManager(fragment, fragmentExecutor, statusReporter);
drillbitContext.getWorkBus().addFragmentManager(manager);
}
} catch (final ExecutionSetupException ex) {
throw new UserRpcException(drillbitContext.getEndpoint(), "Failed to create fragment context", ex);
} catch (final Exception e) {
throw new UserRpcException(drillbitContext.getEndpoint(), "Failure while trying to start remote fragment", e);
} catch (final OutOfMemoryError t) {
if (t.getMessage().startsWith("Direct buffer")) {
throw new UserRpcException(drillbitContext.getEndpoint(), "Out of direct memory while trying to start remote fragment", t);
} else {
throw t;
}
}
}
Aggregations