use of org.apache.drill.common.exceptions.ExecutionSetupException in project drill by axbaretto.
the class HiveAbstractReader method setup.
@Override
public void setup(OperatorContext context, OutputMutator output) throws ExecutionSetupException {
// initializes "reader"
final Callable<Void> readerInitializer = new Callable<Void>() {
@Override
public Void call() throws Exception {
init();
return null;
}
};
final ListenableFuture<Void> result = context.runCallableAs(proxyUgi, readerInitializer);
try {
result.get();
} catch (InterruptedException e) {
result.cancel(true);
// Preserve evidence that the interruption occurred so that code higher up on the call stack can learn of the
// interruption and respond to it if it wants to.
Thread.currentThread().interrupt();
} catch (ExecutionException e) {
throw ExecutionSetupException.fromThrowable(e.getMessage(), e);
}
try {
final OptionManager options = fragmentContext.getOptions();
for (int i = 0; i < selectedColumnNames.size(); i++) {
MajorType type = HiveUtilities.getMajorTypeFromHiveTypeInfo(selectedColumnTypes.get(i), options);
MaterializedField field = MaterializedField.create(selectedColumnNames.get(i), type);
Class<? extends ValueVector> vvClass = TypeHelper.getValueVectorClass(type.getMinorType(), type.getMode());
vectors.add(output.addField(field, vvClass));
}
for (int i = 0; i < selectedPartitionNames.size(); i++) {
MajorType type = HiveUtilities.getMajorTypeFromHiveTypeInfo(selectedPartitionTypes.get(i), options);
MaterializedField field = MaterializedField.create(selectedPartitionNames.get(i), type);
Class<? extends ValueVector> vvClass = TypeHelper.getValueVectorClass(field.getType().getMinorType(), field.getDataMode());
pVectors.add(output.addField(field, vvClass));
}
} catch (SchemaChangeException e) {
throw new ExecutionSetupException(e);
}
}
use of org.apache.drill.common.exceptions.ExecutionSetupException in project drill by axbaretto.
the class ImplCreator method getExec.
/**
* Create and return fragment RootExec for given FragmentRoot. RootExec has one or more RecordBatches as children
* (which may contain child RecordBatches and so on).
*
* @param context
* FragmentContext.
* @param root
* FragmentRoot.
* @return RootExec of fragment.
* @throws ExecutionSetupException
*/
public static RootExec getExec(ExecutorFragmentContext context, FragmentRoot root) throws ExecutionSetupException {
Preconditions.checkNotNull(root);
Preconditions.checkNotNull(context);
if (AssertionUtil.isAssertionsEnabled() || context.getOptions().getOption(ExecConstants.ENABLE_ITERATOR_VALIDATOR) || context.getConfig().getBoolean(ExecConstants.ENABLE_ITERATOR_VALIDATION)) {
root = IteratorValidatorInjector.rewritePlanWithIteratorValidator(context, root);
}
final ImplCreator creator = new ImplCreator();
Stopwatch watch = Stopwatch.createStarted();
try {
final RootExec rootExec = creator.getRootExec(root, context);
// skip over this for SimpleRootExec (testing)
if (rootExec instanceof BaseRootExec) {
((BaseRootExec) rootExec).setOperators(creator.getOperators());
}
logger.debug("Took {} ms to create RecordBatch tree", watch.elapsed(TimeUnit.MILLISECONDS));
if (rootExec == null) {
throw new ExecutionSetupException("The provided fragment did not have a root node that correctly created a RootExec value.");
}
return rootExec;
} catch (Exception e) {
AutoCloseables.close(e, creator.getOperators());
context.getExecutorState().fail(e);
}
return null;
}
use of org.apache.drill.common.exceptions.ExecutionSetupException in project drill by axbaretto.
the class DescribeSchemaHandler method getPlan.
@Override
public PhysicalPlan getPlan(SqlNode sqlNode) {
SqlIdentifier schema = ((SqlDescribeSchema) sqlNode).getSchema();
SchemaPlus drillSchema = SchemaUtilites.findSchema(config.getConverter().getDefaultSchema(), schema.names);
if (drillSchema != null) {
StoragePlugin storagePlugin;
try {
storagePlugin = context.getStorage().getPlugin(schema.names.get(0));
} catch (ExecutionSetupException e) {
throw new DrillRuntimeException("Failure while retrieving storage plugin", e);
}
String properties;
try {
final Map configMap = mapper.convertValue(storagePlugin.getConfig(), Map.class);
if (storagePlugin instanceof FileSystemPlugin) {
transformWorkspaces(schema.names, configMap);
}
properties = mapper.writeValueAsString(configMap);
} catch (JsonProcessingException e) {
throw new DrillRuntimeException("Error while trying to convert storage config to json string", e);
}
return DirectPlan.createDirectPlan(context, new DescribeSchemaResult(Joiner.on(".").join(schema.names), properties));
}
throw UserException.validationError().message(String.format("Invalid schema name [%s]", Joiner.on(".").join(schema.names))).build(logger);
}
use of org.apache.drill.common.exceptions.ExecutionSetupException in project drill by axbaretto.
the class StoragePluginRegistryImpl method getFormatPlugin.
@SuppressWarnings("resource")
@Override
public FormatPlugin getFormatPlugin(StoragePluginConfig storageConfig, FormatPluginConfig formatConfig) throws ExecutionSetupException {
StoragePlugin p = getPlugin(storageConfig);
if (!(p instanceof FileSystemPlugin)) {
throw new ExecutionSetupException(String.format("You tried to request a format plugin for a storage plugin that wasn't of type " + "FileSystemPlugin. The actual type of plugin was %s.", p.getClass().getName()));
}
FileSystemPlugin storage = (FileSystemPlugin) p;
return storage.getFormatPlugin(formatConfig);
}
use of org.apache.drill.common.exceptions.ExecutionSetupException in project drill by axbaretto.
the class AvroRecordReader method setup.
@Override
public void setup(final OperatorContext context, final OutputMutator output) throws ExecutionSetupException {
writer = new VectorContainerWriter(output);
try {
reader = getReader(hadoop, fs);
logger.debug("Processing file : {}, start position : {}, end position : {} ", hadoop, start, end);
reader.sync(this.start);
} catch (IOException e) {
throw new ExecutionSetupException(e);
}
}
Aggregations