use of org.apache.drill.common.exceptions.DrillRuntimeException in project drill by apache.
the class MongoGroupScan method getScanStats.
@Override
public ScanStats getScanStats() {
try {
MongoClient client = storagePlugin.getClient();
MongoDatabase db = client.getDatabase(scanSpec.getDbName());
MongoCollection<Document> collection = db.getCollection(scanSpec.getCollectionName());
long numDocs = collection.count();
float approxDiskCost = 0;
if (numDocs != 0) {
//toJson should use client's codec, otherwise toJson could fail on
// some types not known to DocumentCodec, e.g. DBRef.
final DocumentCodec codec = new DocumentCodec(client.getMongoClientOptions().getCodecRegistry(), new BsonTypeClassMap());
String json = collection.find().first().toJson(codec);
approxDiskCost = json.getBytes().length * numDocs;
}
return new ScanStats(GroupScanProperty.EXACT_ROW_COUNT, numDocs, 1, approxDiskCost);
} catch (Exception e) {
throw new DrillRuntimeException(e.getMessage(), e);
}
}
use of org.apache.drill.common.exceptions.DrillRuntimeException in project drill by apache.
the class AsyncPageReader method nextInternal.
@Override
protected void nextInternal() throws IOException {
ReadStatus readStatus = null;
String name = parentColumnReader.columnChunkMetaData.toString();
try {
Stopwatch timer = Stopwatch.createStarted();
parentColumnReader.parentReader.getOperatorContext().getStats().startWait();
// get the result of execution
asyncPageRead.poll().get();
synchronized (pageQueue) {
boolean pageQueueFull = pageQueue.remainingCapacity() == 0;
// get the data if no exception has been thrown
readStatus = pageQueue.take();
if (readStatus.pageData == null || readStatus == ReadStatus.EMPTY) {
throw new DrillRuntimeException("Unexpected end of data");
}
// have been no new read tasks scheduled. In that case, schedule a new read.
if (pageQueueFull) {
asyncPageRead.offer(threadPool.submit(new AsyncPageReaderTask(debugName, pageQueue)));
}
}
long timeBlocked = timer.elapsed(TimeUnit.NANOSECONDS);
parentColumnReader.parentReader.getOperatorContext().getStats().stopWait();
stats.timeDiskScanWait.addAndGet(timeBlocked);
stats.timeDiskScan.addAndGet(readStatus.getDiskScanTime());
if (readStatus.isDictionaryPage) {
stats.numDictPageLoads.incrementAndGet();
stats.timeDictPageLoads.addAndGet(timeBlocked + readStatus.getDiskScanTime());
} else {
stats.numDataPageLoads.incrementAndGet();
stats.timeDataPageLoads.addAndGet(timeBlocked + readStatus.getDiskScanTime());
}
pageHeader = readStatus.getPageHeader();
do {
if (pageHeader.getType() == PageType.DICTIONARY_PAGE) {
readDictionaryPageData(readStatus, parentColumnReader);
// get the result of execution
asyncPageRead.poll().get();
synchronized (pageQueue) {
boolean pageQueueFull = pageQueue.remainingCapacity() == 0;
// get the data if no exception has been thrown
readStatus = pageQueue.take();
if (readStatus.pageData == null || readStatus == ReadStatus.EMPTY) {
break;
}
// have been no new read tasks scheduled. In that case, schedule a new read.
if (pageQueueFull) {
asyncPageRead.offer(threadPool.submit(new AsyncPageReaderTask(debugName, pageQueue)));
}
}
assert (readStatus.pageData != null);
pageHeader = readStatus.getPageHeader();
}
} while (pageHeader.getType() == PageType.DICTIONARY_PAGE);
pageHeader = readStatus.getPageHeader();
pageData = getDecompressedPageData(readStatus);
assert (pageData != null);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
} catch (Exception e) {
handleAndThrowException(e, "Error reading page data");
}
}
use of org.apache.drill.common.exceptions.DrillRuntimeException in project drill by apache.
the class ZookeeperPersistentStore method putIfAbsent.
@Override
public boolean putIfAbsent(final String key, final V value) {
try {
final byte[] bytes = config.getSerializer().serialize(value);
final byte[] data = client.putIfAbsent(key, bytes);
return data == null;
} catch (final IOException e) {
throw new DrillRuntimeException(String.format("unable to serialize value of type %s", value.getClass()), e);
}
}
use of org.apache.drill.common.exceptions.DrillRuntimeException in project drill by apache.
the class HiveStoragePlugin method registerSchemas.
// Forced to synchronize this method to allow error recovery
// in the multi-threaded case. Can remove synchronized only
// by restructuring connections and cache to allow better
// recovery from failed secure connections.
@Override
public synchronized void registerSchemas(SchemaConfig schemaConfig, SchemaPlus parent) throws IOException {
try {
schemaFactory.registerSchemas(schemaConfig, parent);
return;
// Hack. We may need to retry the connection. But, we can't because
// the retry logic is implemented in the very connection we need to
// discard and rebuild. To work around, we discard the entire schema
// factory, and all its invalid connections. Very crude, but the
// easiest short-term solution until we refactor the code to do the
// job properly. See DRILL-5510.
} catch (Throwable e) {
// Unwrap exception
Throwable ex = e;
for (; ; ) {
// Case for failing on an invalid cached connection
if (ex instanceof MetaException || // tokens.
ex instanceof TTransportException) {
break;
}
if (ex.getCause() == null || ex.getCause() == ex) {
logger.error("Hive metastore register schemas failed", e);
throw new DrillRuntimeException("Unknown Hive error", e);
}
ex = ex.getCause();
}
}
try {
schemaFactory.close();
} catch (Throwable t) {
// Ignore, we're in a bad state.
logger.warn("Schema factory forced close failed, error ignored", t);
}
try {
schemaFactory = new HiveSchemaFactory(this, name, hiveConf);
} catch (ExecutionSetupException e) {
throw new DrillRuntimeException(e);
}
// Try the schemas again. If this fails, just give up.
schemaFactory.registerSchemas(schemaConfig, parent);
logger.debug("Successfully recovered from a Hive metastore connection failure.");
}
use of org.apache.drill.common.exceptions.DrillRuntimeException in project drill by apache.
the class DrillHiveMetaStoreClient method createClientWithAuthz.
/**
* Create a DrillHiveMetaStoreClient for cases where:
* 1. Drill impersonation is enabled and
* 2. either storage (in remote HiveMetaStore server) or SQL standard based authorization (in Hive storage plugin)
* is enabled
* @param processUserMetaStoreClient MetaStoreClient of process user. Useful for generating the delegation tokens when
* SASL (KERBEROS or custom SASL implementations) is enabled.
* @param hiveConf Conf including authorization configuration
* @param userName User who is trying to access the Hive metadata
* @return
* @throws MetaException
*/
public static DrillHiveMetaStoreClient createClientWithAuthz(final DrillHiveMetaStoreClient processUserMetaStoreClient, final HiveConf hiveConf, final String userName) throws MetaException {
try {
boolean delegationTokenGenerated = false;
// UGI credentials to use for RPC communication with Hive MetaStore server
final UserGroupInformation ugiForRpc;
if (!hiveConf.getBoolVar(ConfVars.HIVE_SERVER2_ENABLE_DOAS)) {
// If the user impersonation is disabled in Hive storage plugin (not Drill impersonation), use the process
// user UGI credentials.
ugiForRpc = ImpersonationUtil.getProcessUserUGI();
} else {
ugiForRpc = ImpersonationUtil.createProxyUgi(userName);
if (hiveConf.getBoolVar(ConfVars.METASTORE_USE_THRIFT_SASL)) {
// When SASL is enabled for proxy user create a delegation token. Currently HiveMetaStoreClient can create
// client transport for proxy users only when the authentication mechanims is DIGEST (through use of
// delegation tokens).
String delegationToken = processUserMetaStoreClient.getDelegationToken(userName, userName);
try {
Utils.setTokenStr(ugiForRpc, delegationToken, HiveClientWithAuthzWithCaching.DRILL2HMS_TOKEN);
} catch (IOException e) {
throw new DrillRuntimeException("Couldn't setup delegation token in the UGI for Hive MetaStoreClient", e);
}
delegationTokenGenerated = true;
}
}
final HiveConf hiveConfForClient;
if (delegationTokenGenerated) {
hiveConfForClient = new HiveConf(hiveConf);
hiveConfForClient.set("hive.metastore.token.signature", HiveClientWithAuthzWithCaching.DRILL2HMS_TOKEN);
} else {
hiveConfForClient = hiveConf;
}
return ugiForRpc.doAs(new PrivilegedExceptionAction<DrillHiveMetaStoreClient>() {
@Override
public DrillHiveMetaStoreClient run() throws Exception {
return new HiveClientWithAuthzWithCaching(hiveConfForClient, ugiForRpc, userName);
}
});
} catch (final Exception e) {
throw new DrillRuntimeException("Failure setting up HiveMetaStore client.", e);
}
}
Aggregations