use of org.apache.drill.common.exceptions.ExecutionSetupException in project drill by apache.
the class Foreman method runFragment.
/**
* This is a helper method to run query based on the list of PlanFragment that were planned
* at some point of time
* @param fragmentsList fragment list
* @throws ExecutionSetupException
*/
private void runFragment(List<PlanFragment> fragmentsList) throws ExecutionSetupException {
// need to set QueryId, MinorFragment for incoming Fragments
PlanFragment rootFragment = null;
boolean isFirst = true;
final List<PlanFragment> planFragments = Lists.newArrayList();
for (PlanFragment myFragment : fragmentsList) {
final FragmentHandle handle = myFragment.getHandle();
// though we have new field in the FragmentHandle - parentQueryId
// it can not be used until every piece of code that creates handle is using it, as otherwise
// comparisons on that handle fail that causes fragment runtime failure
final FragmentHandle newFragmentHandle = FragmentHandle.newBuilder().setMajorFragmentId(handle.getMajorFragmentId()).setMinorFragmentId(handle.getMinorFragmentId()).setQueryId(queryId).build();
final PlanFragment newFragment = PlanFragment.newBuilder(myFragment).setHandle(newFragmentHandle).build();
if (isFirst) {
rootFragment = newFragment;
isFirst = false;
} else {
planFragments.add(newFragment);
}
}
assert rootFragment != null;
final FragmentRoot rootOperator;
try {
rootOperator = drillbitContext.getPlanReader().readFragmentRoot(rootFragment.getFragmentJson());
} catch (IOException e) {
throw new ExecutionSetupException(String.format("Unable to parse FragmentRoot from fragment: %s", rootFragment.getFragmentJson()));
}
queryRM.setCost(rootOperator.getCost().getOutputRowCount());
fragmentsRunner.setFragmentsInfo(planFragments, rootFragment, rootOperator);
startQueryProcessing();
}
use of org.apache.drill.common.exceptions.ExecutionSetupException in project drill by apache.
the class MaprDBJsonRecordReader method setup.
@Override
public void setup(OperatorContext context, OutputMutator output) throws ExecutionSetupException {
this.vectorWriter = new VectorContainerWriter(output, unionEnabled);
this.vectorWriterMutator = output;
this.operatorContext = context;
try {
table.setOption(TableOption.EXCLUDEID, !includeId);
documentStream = table.find(condition, scannedFields);
documentIterator = documentStream.iterator();
setupWriter();
} catch (DBException ex) {
throw new ExecutionSetupException(ex);
}
}
use of org.apache.drill.common.exceptions.ExecutionSetupException in project drill by apache.
the class RestrictedJsonRecordReader method setup.
@Override
public void setup(OperatorContext context, OutputMutator output) throws ExecutionSetupException {
RestrictedMapRDBSubScanSpec rss = ((RestrictedMapRDBSubScanSpec) this.subScanSpec);
RowKeyJoin rjBatch = rss.getJoinForSubScan();
if (rjBatch == null) {
throw new ExecutionSetupException("RowKeyJoin Batch is not setup for Restricted MapRDB Subscan");
}
AbstractRecordBatch.BatchState state = rjBatch.getBatchState();
if (state == AbstractRecordBatch.BatchState.BUILD_SCHEMA || state == AbstractRecordBatch.BatchState.FIRST) {
super.setup(context, output);
}
return;
}
use of org.apache.drill.common.exceptions.ExecutionSetupException in project drill by apache.
the class HiveStoragePlugin method getPhysicalScan.
@Override
public HiveScan getPhysicalScan(String userName, JSONOptions selection, List<SchemaPath> columns, SessionOptionManager options) throws IOException {
HiveReadEntry hiveReadEntry = selection.getListWith(new ObjectMapper(), new TypeReference<HiveReadEntry>() {
});
try {
Map<String, String> confProperties = new HashMap<>();
if (options != null) {
String value = StringEscapeUtils.unescapeJava(options.getString(ExecConstants.HIVE_CONF_PROPERTIES));
logger.trace("[{}] is set to {}.", ExecConstants.HIVE_CONF_PROPERTIES, value);
try {
Properties properties = new Properties();
properties.load(new StringReader(value));
confProperties = properties.stringPropertyNames().stream().collect(Collectors.toMap(Function.identity(), properties::getProperty, (o, n) -> n));
} catch (IOException e) {
logger.warn("Unable to parse Hive conf properties {}, ignoring them.", value);
}
}
return new HiveScan(userName, hiveReadEntry, this, columns, null, confProperties);
} catch (ExecutionSetupException e) {
throw new IOException(e);
}
}
use of org.apache.drill.common.exceptions.ExecutionSetupException in project drill by apache.
the class HiveStoragePlugin method registerSchemas.
// Forced to synchronize this method to allow error recovery
// in the multi-threaded case. Can remove synchronized only
// by restructuring connections and cache to allow better
// recovery from failed secure connections.
@Override
public synchronized void registerSchemas(SchemaConfig schemaConfig, SchemaPlus parent) throws IOException {
try {
schemaFactory.registerSchemas(schemaConfig, parent);
return;
// Hack. We may need to retry the connection. But, we can't because
// the retry logic is implemented in the very connection we need to
// discard and rebuild. To work around, we discard the entire schema
// factory, and all its invalid connections. Very crude, but the
// easiest short-term solution until we refactor the code to do the
// job properly. See DRILL-5510.
} catch (Throwable e) {
// Unwrap exception
Throwable ex = e;
while (true) {
// Case for failing on an invalid cached connection
if (ex instanceof MetaException || // tokens.
ex instanceof TTransportException) {
break;
}
if (ex.getCause() == null || ex.getCause() == ex) {
logger.error("Hive metastore register schemas failed", e);
throw new DrillRuntimeException("Unknown Hive error", e);
}
ex = ex.getCause();
}
}
try {
schemaFactory.close();
} catch (Throwable t) {
// Ignore, we're in a bad state.
logger.warn("Schema factory forced close failed, error ignored", t);
}
try {
schemaFactory = new HiveSchemaFactory(this, getName(), hiveConf);
} catch (ExecutionSetupException e) {
throw new DrillRuntimeException(e);
}
// Try the schemas again. If this fails, just give up.
schemaFactory.registerSchemas(schemaConfig, parent);
logger.debug("Successfully recovered from a Hive metastore connection failure.");
}
Aggregations