use of io.cdap.cdap.explore.service.ExploreException in project cdap by caskdata.
the class BaseHiveExploreService method createNamespace.
@Override
public QueryHandle createNamespace(NamespaceMeta namespaceMeta) throws ExploreException, SQLException {
startAndWait();
try {
// This check prevents the extra warn log.
if (NamespaceId.DEFAULT.equals(namespaceMeta.getNamespaceId())) {
return QueryHandle.NO_OP;
}
Map<String, String> sessionConf = startSession();
SessionHandle sessionHandle = null;
OperationHandle operationHandle = null;
try {
sessionHandle = openHiveSession(sessionConf);
QueryHandle handle;
if (Strings.isNullOrEmpty(namespaceMeta.getConfig().getHiveDatabase())) {
// if no custom hive database was provided get the hive database according to cdap format and create it
// if one does not exists since cdap is responsible for managing the lifecycle of such databases
String database = createHiveDBName(namespaceMeta.getName());
// "IF NOT EXISTS" so that this operation is idempotent.
String statement = String.format("CREATE DATABASE IF NOT EXISTS %s", database);
operationHandle = executeAsync(sessionHandle, statement);
handle = saveReadOnlyOperation(operationHandle, sessionHandle, sessionConf, statement, database);
LOG.info("Creating database {} with handle {}", database, handle);
} else {
// a custom database name was provided so check its existence
// there is no way to check if a hive database exists or not other than trying to use it and see whether
// it fails or not. So, run a USE databaseName command and see if it throws exception
// Other way can be to list all database and check if the database exists or not but we are doing USE to
// make sure that user can acutally use the database once we have impersonation.
String statement = String.format("USE %s", namespaceMeta.getConfig().getHiveDatabase());
// if the database does not exists the below line will throw exception from hive
try {
operationHandle = executeAsync(sessionHandle, statement);
} catch (HiveSQLException e) {
// then we will get an exception from Hive with error code 10072 which represent database was not found
if (e.toTStatus().getErrorCode() == ErrorMsg.DATABASE_NOT_EXISTS.getErrorCode()) {
// TODO: Add username here
throw new ExploreException(String.format("A custom Hive Database %s was provided for namespace %s " + "which does not exists. Please create the database in hive " + "for the user and try creating the namespace again.", namespaceMeta.getConfig().getHiveDatabase(), namespaceMeta.getName()), e);
} else {
// some other exception was generated while checking the existense of the database
throw new ExploreException(String.format("Failed to check existence of given custom hive database " + "%s for namespace %s", namespaceMeta.getConfig().getHiveDatabase(), namespaceMeta.getName()), e);
}
}
// if we didn't got an exception on the line above we know that the database exists
handle = saveReadOnlyOperation(operationHandle, sessionHandle, sessionConf, statement, namespaceMeta.getConfig().getHiveDatabase());
LOG.debug("Custom database {} existence verified with handle {}", namespaceMeta.getConfig().getHiveDatabase(), handle);
}
return handle;
} catch (Throwable e) {
closeInternal(getQueryHandle(sessionConf), new ReadOnlyOperationInfo(sessionHandle, operationHandle, sessionConf, "", ""));
throw e;
}
} catch (HiveSQLException e) {
throw getSqlException(e);
} catch (Throwable e) {
throw new ExploreException(e);
}
}
use of io.cdap.cdap.explore.service.ExploreException in project cdap by caskdata.
the class BaseHiveExploreService method getMetaStoreClient.
private IMetaStoreClient getMetaStoreClient() throws ExploreException {
if (metastoreClientLocal.get() == null) {
try {
IMetaStoreClient client = new HiveMetaStoreClient(createHiveConf());
Supplier<IMetaStoreClient> supplier = Suppliers.ofInstance(client);
metastoreClientLocal.set(supplier);
// We use GC of the supplier as a signal for us to know that a thread is gone
// The supplier is set into the thread local, which will get GC'ed when the thread is gone.
// Since we use a weak reference key to the supplier that points to the client
// (in the metastoreClientReferences map), it won't block GC of the supplier instance.
// We can use the weak reference, which is retrieved through polling the ReferenceQueue,
// to get back the client and call close() on it.
metastoreClientReferences.put(new WeakReference<>(supplier, metastoreClientReferenceQueue), client);
} catch (MetaException e) {
throw new ExploreException("Error initializing Hive Metastore client", e);
}
}
return metastoreClientLocal.get().get();
}
use of io.cdap.cdap.explore.service.ExploreException in project cdap by caskdata.
the class ExploreExecutorHttpHandler method updateDataset.
/**
* Enable ad-hoc exploration of a dataset instance.
*/
@POST
@Path("datasets/{dataset}/update")
@AuditPolicy(AuditDetail.REQUEST_BODY)
public void updateDataset(FullHttpRequest request, HttpResponder responder, @PathParam("namespace-id") String namespace, @PathParam("dataset") String datasetName) throws BadRequestException {
final DatasetId datasetId = new DatasetId(namespace, datasetName);
try {
UpdateExploreParameters params = readUpdateParameters(request);
final DatasetSpecification oldSpec = params.getOldSpec();
final DatasetSpecification datasetSpec = params.getNewSpec();
QueryHandle handle;
if (oldSpec.equals(datasetSpec)) {
handle = QueryHandle.NO_OP;
} else {
handle = impersonator.doAs(datasetId, new Callable<QueryHandle>() {
@Override
public QueryHandle call() throws Exception {
return exploreTableManager.updateDataset(datasetId, datasetSpec, oldSpec);
}
});
}
JsonObject json = new JsonObject();
json.addProperty("handle", handle.getHandle());
responder.sendJson(HttpResponseStatus.OK, json.toString());
} catch (IllegalArgumentException e) {
responder.sendString(HttpResponseStatus.BAD_REQUEST, e.getMessage());
} catch (ExploreException e) {
responder.sendString(HttpResponseStatus.INTERNAL_SERVER_ERROR, "Error updating explore on dataset " + datasetId);
} catch (SQLException e) {
responder.sendString(HttpResponseStatus.BAD_REQUEST, "SQL exception while trying to update explore on dataset " + datasetId);
} catch (UnsupportedTypeException e) {
responder.sendString(HttpResponseStatus.BAD_REQUEST, "Schema for dataset " + datasetId + " is not supported for exploration: " + e.getMessage());
} catch (Throwable e) {
LOG.error("Got exception:", e);
responder.sendString(HttpResponseStatus.INTERNAL_SERVER_ERROR, e.getMessage());
}
}
use of io.cdap.cdap.explore.service.ExploreException in project cdap by caskdata.
the class DistributedStorageProviderNamespaceAdmin method delete.
@SuppressWarnings("ConstantConditions")
@Override
public void delete(NamespaceId namespaceId) throws IOException, ExploreException, SQLException {
// delete namespace directory from filesystem
super.delete(namespaceId);
if (NamespaceId.DEFAULT.equals(namespaceId)) {
return;
}
// delete HBase namespace
NamespaceConfig namespaceConfig;
try {
namespaceConfig = namespaceQueryAdmin.get(namespaceId).getConfig();
} catch (Exception ex) {
throw new IOException("Could not fetch custom HBase mapping.", ex);
}
if (!Strings.isNullOrEmpty(namespaceConfig.getHbaseNamespace())) {
// custom namespace mapping is set for HBase, hence don't do anything during delete since the lifecycle of the
// namespace will be managed by the user
LOG.debug("Custom HBase mapping {} was found while deleting {}. Hence skipping deletion of HBase namespace", namespaceConfig.getHbaseNamespace(), namespaceId);
return;
}
// delete HBase namespace
String namespace = tableUtil.getHBaseNamespace(namespaceId);
try (HBaseDDLExecutor executor = hBaseDDLExecutorFactory.get()) {
executor.deleteNamespaceIfExists(namespace);
}
}
use of io.cdap.cdap.explore.service.ExploreException in project cdap by caskdata.
the class DistributedStorageProviderNamespaceAdmin method create.
@Override
public void create(NamespaceMeta namespaceMeta) throws IOException, ExploreException, SQLException {
// create filesystem directory
super.create(namespaceMeta);
// skip namespace creation in HBase for default namespace
if (NamespaceId.DEFAULT.equals(namespaceMeta.getNamespaceId())) {
return;
}
// create HBase namespace and set group C(reate) permission if a group is configured
String hbaseNamespace = tableUtil.getHBaseNamespace(namespaceMeta);
if (Strings.isNullOrEmpty(namespaceMeta.getConfig().getHbaseNamespace())) {
try (HBaseDDLExecutor executor = hBaseDDLExecutorFactory.get()) {
boolean created = executor.createNamespaceIfNotExists(hbaseNamespace);
if (namespaceMeta.getConfig().getGroupName() != null) {
try {
executor.grantPermissions(hbaseNamespace, null, ImmutableMap.of("@" + namespaceMeta.getConfig().getGroupName(), "C"));
} catch (IOException | RuntimeException e) {
// don't leave a partial state behind, as this fails the create(), the namespace should be removed
if (created) {
try {
executor.deleteNamespaceIfExists(hbaseNamespace);
} catch (Throwable t) {
e.addSuppressed(t);
}
}
throw e;
}
}
} catch (Throwable t) {
try {
// if we failed to create a namespace in hbase then do clean up for above creations
super.delete(namespaceMeta.getNamespaceId());
} catch (Exception e) {
t.addSuppressed(e);
}
throw t;
}
}
try (HBaseAdmin admin = new HBaseAdmin(hConf)) {
if (!tableUtil.hasNamespace(admin, hbaseNamespace)) {
throw new IOException(String.format("HBase namespace '%s' specified for new namespace '%s' does not" + " exist. Please specify an existing HBase namespace.", hbaseNamespace, namespaceMeta.getName()));
}
}
}
Aggregations