use of io.cdap.cdap.explore.service.ExploreException in project cdap by caskdata.
the class BaseHiveExploreService method getTables.
@Override
public QueryHandle getTables(String catalog, String schemaPattern, String tableNamePattern, List<String> tableTypes) throws ExploreException, SQLException {
startAndWait();
try {
SessionHandle sessionHandle = null;
OperationHandle operationHandle = null;
Map<String, String> sessionConf = startSession();
String database = getHiveDatabase(schemaPattern);
try {
sessionHandle = openHiveSession(sessionConf);
operationHandle = cliService.getTables(sessionHandle, catalog, database, tableNamePattern, tableTypes);
QueryHandle handle = saveReadOnlyOperation(operationHandle, sessionHandle, sessionConf, "", database);
LOG.trace("Retrieving tables: catalog {}, schemaNamePattern {}, tableNamePattern {}, tableTypes {}", catalog, database, tableNamePattern, tableTypes);
return handle;
} catch (Throwable e) {
closeInternal(getQueryHandle(sessionConf), new ReadOnlyOperationInfo(sessionHandle, operationHandle, sessionConf, "", database));
throw e;
}
} catch (HiveSQLException e) {
throw getSqlException(e);
} catch (Throwable e) {
throw new ExploreException(e);
}
}
use of io.cdap.cdap.explore.service.ExploreException in project cdap by caskdata.
the class BaseHiveExploreService method previewResults.
@Override
public List<QueryResult> previewResults(QueryHandle handle) throws ExploreException, HandleNotFoundException, SQLException {
startAndWait();
if (inactiveHandleCache.getIfPresent(handle) != null) {
throw new HandleNotFoundException("Query is inactive.", true);
}
OperationInfo operationInfo = getActiveOperationInfo(handle);
Lock previewLock = operationInfo.getPreviewLock();
previewLock.lock();
try {
File previewFile = operationInfo.getPreviewFile();
if (previewFile != null) {
try {
Reader reader = com.google.common.io.Files.newReader(previewFile, Charsets.UTF_8);
try {
return GSON.fromJson(reader, new TypeToken<List<QueryResult>>() {
}.getType());
} finally {
Closeables.closeQuietly(reader);
}
} catch (FileNotFoundException e) {
LOG.error("Could not retrieve preview result file {}", previewFile, e);
throw new ExploreException(e);
}
}
try {
// Create preview results for query
previewFile = new File(previewsDir, handle.getHandle());
try (FileWriter fileWriter = new FileWriter(previewFile)) {
List<QueryResult> results = fetchNextResults(handle, PREVIEW_COUNT);
GSON.toJson(results, fileWriter);
operationInfo.setPreviewFile(previewFile);
return results;
}
} catch (IOException e) {
LOG.error("Could not write preview results into file", e);
throw new ExploreException(e);
}
} finally {
previewLock.unlock();
}
}
use of io.cdap.cdap.explore.service.ExploreException in project cdap by caskdata.
the class BaseHiveExploreService method getTableInfo.
@Override
public TableInfo getTableInfo(String namespace, @Nullable String databaseName, String table) throws ExploreException, TableNotFoundException {
startAndWait();
// TODO check if the database user is allowed to access if security is enabled
try {
String db = databaseName != null ? databaseName : getHiveDatabase(namespace);
Table tableInfo = getMetaStoreClient().getTable(db, table);
List<FieldSchema> tableFields = tableInfo.getSd().getCols();
// in the storage descriptor. If columns are missing, do a separate call for schema.
if (tableFields == null || tableFields.isEmpty()) {
// don't call .getSchema()... class not found exception if we do in the thrift code...
tableFields = getMetaStoreClient().getFields(db, table);
}
ImmutableList.Builder<TableInfo.ColumnInfo> schemaBuilder = ImmutableList.builder();
Set<String> fieldNames = Sets.newHashSet();
for (FieldSchema column : tableFields) {
schemaBuilder.add(new TableInfo.ColumnInfo(column.getName(), column.getType(), column.getComment()));
fieldNames.add(column.getName());
}
ImmutableList.Builder<TableInfo.ColumnInfo> partitionKeysBuilder = ImmutableList.builder();
for (FieldSchema column : tableInfo.getPartitionKeys()) {
TableInfo.ColumnInfo columnInfo = new TableInfo.ColumnInfo(column.getName(), column.getType(), column.getComment());
partitionKeysBuilder.add(columnInfo);
// since they show up when you do a 'describe <table>' command.
if (!fieldNames.contains(column.getName())) {
schemaBuilder.add(columnInfo);
}
}
// its a cdap generated table if it uses our storage handler, or if a property is set on the table.
String cdapName = null;
Map<String, String> tableParameters = tableInfo.getParameters();
if (tableParameters != null) {
cdapName = tableParameters.get(Constants.Explore.CDAP_NAME);
}
// tables created after CDAP 2.6 should set the "cdap.name" property, but older ones
// do not. So also check if it uses a cdap storage handler.
String storageHandler = tableInfo.getParameters().get("storage_handler");
boolean isDatasetTable = cdapName != null || DatasetStorageHandler.class.getName().equals(storageHandler);
return new TableInfo(tableInfo.getTableName(), tableInfo.getDbName(), tableInfo.getOwner(), (long) tableInfo.getCreateTime() * 1000, (long) tableInfo.getLastAccessTime() * 1000, tableInfo.getRetention(), partitionKeysBuilder.build(), tableInfo.getParameters(), tableInfo.getTableType(), schemaBuilder.build(), tableInfo.getSd().getLocation(), tableInfo.getSd().getInputFormat(), tableInfo.getSd().getOutputFormat(), tableInfo.getSd().isCompressed(), tableInfo.getSd().getNumBuckets(), tableInfo.getSd().getSerdeInfo().getSerializationLib(), tableInfo.getSd().getSerdeInfo().getParameters(), isDatasetTable);
} catch (NoSuchObjectException e) {
throw new TableNotFoundException(e);
} catch (TException e) {
throw new ExploreException(e);
}
}
use of io.cdap.cdap.explore.service.ExploreException in project cdap by caskdata.
the class BaseHiveExploreService method doStartSession.
private Map<String, String> doStartSession(@Nullable NamespaceId namespace, @Nullable Map<String, String> additionalSessionConf) throws ExploreException, IOException, NamespaceNotFoundException {
Map<String, String> sessionConf = new HashMap<>();
QueryHandle queryHandle = QueryHandle.generate();
sessionConf.put(Constants.Explore.QUERY_ID, queryHandle.getHandle());
NamespaceMeta namespaceMeta = null;
if (namespace != null) {
try {
namespaceMeta = namespaceQueryAdmin.get(namespace);
} catch (NamespaceNotFoundException e) {
throw e;
} catch (Exception e) {
throw new IOException(e);
}
}
String schedulerQueue = namespaceMeta != null ? schedulerQueueResolver.getQueue(namespaceMeta) : schedulerQueueResolver.getDefaultQueue();
if (schedulerQueue != null && !schedulerQueue.isEmpty()) {
sessionConf.put(JobContext.QUEUE_NAME, schedulerQueue);
}
Transaction tx = startTransaction();
ConfigurationUtil.set(sessionConf, Constants.Explore.TX_QUERY_KEY, TxnCodec.INSTANCE, tx);
ConfigurationUtil.set(sessionConf, Constants.Explore.CCONF_KEY, CConfCodec.INSTANCE, cConf);
ConfigurationUtil.set(sessionConf, Constants.Explore.HCONF_KEY, HConfCodec.INSTANCE, hConf);
HiveConf hiveConf = createHiveConf();
if (ExploreServiceUtils.isSparkEngine(hiveConf, additionalSessionConf)) {
sessionConf.putAll(sparkConf);
}
if (UserGroupInformation.isSecurityEnabled()) {
// make sure RM does not cancel delegation tokens after the query is run
sessionConf.put("mapreduce.job.complete.cancel.delegation.tokens", "false");
sessionConf.put("spark.hadoop.mapreduce.job.complete.cancel.delegation.tokens", "false");
// write the user's credentials to a file, to be used for the query
File credentialsFile = writeCredentialsFile(queryHandle);
String credentialsFilePath = credentialsFile.getAbsolutePath();
// mapreduce.job.credentials.binary is added by Hive only if Kerberos credentials are present and impersonation
// is enabled. However, in our case we don't have Kerberos credentials for Explore service.
// Hence it will not be automatically added by Hive, instead we have to add it ourselves.
sessionConf.put(MRJobConfig.MAPREDUCE_JOB_CREDENTIALS_BINARY, credentialsFilePath);
// CDAP-8367 We need to set this back to Kerberos if security is enabled. We override it in HiveConf.
sessionConf.put(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION, UserGroupInformation.AuthenticationMethod.KERBEROS.name());
sessionConf.put(Constants.Explore.SUBMITLOCALTASKVIACHILD, Boolean.FALSE.toString());
sessionConf.put(Constants.Explore.SUBMITVIACHILD, Boolean.FALSE.toString());
if (ExploreServiceUtils.isTezEngine(hiveConf, additionalSessionConf)) {
// Add token file location property for tez if engine is tez
sessionConf.put("tez.credentials.path", credentialsFilePath);
}
}
if (additionalSessionConf != null) {
sessionConf.putAll(additionalSessionConf);
}
return sessionConf;
}
use of io.cdap.cdap.explore.service.ExploreException in project cdap by caskdata.
the class BaseHiveExploreService method fetchStatus.
protected QueryStatus fetchStatus(OperationInfo operationInfo) throws ExploreException, HandleNotFoundException, HiveSQLException {
QueryStatus queryStatus;
try {
queryStatus = doFetchStatus(operationInfo.getOperationHandle());
if (QueryStatus.OpStatus.ERROR.equals(queryStatus.getStatus()) && queryStatus.getErrorMessage() == null) {
queryStatus = new QueryStatus("Operation failed. See the log for more details.", null);
}
} catch (HiveSQLException e) {
// it means that query execution failed, but we can successfully retrieve the status.
if (e.getSQLState() != null) {
queryStatus = new QueryStatus(e.getMessage(), e.getSQLState());
} else {
// this is an internal error - we are not able to retrieve the status
throw new ExploreException(e.getMessage(), e);
}
}
operationInfo.setStatus(queryStatus);
return queryStatus;
}
Aggregations