use of org.apache.hyracks.algebricks.common.exceptions.AlgebricksException in project asterixdb by apache.
the class ResultUtil method apiErrorHandler.
public static void apiErrorHandler(PrintWriter out, Exception e) {
int errorCode = 99;
if (e instanceof ParseException) {
errorCode = 2;
} else if (e instanceof AlgebricksException) {
errorCode = 3;
} else if (e instanceof HyracksDataException) {
errorCode = 4;
}
ObjectNode errorResp = ResultUtil.getErrorResponse(errorCode, extractErrorMessage(e), extractErrorSummary(e), extractFullStackTrace(e));
out.write(errorResp.toString());
}
use of org.apache.hyracks.algebricks.common.exceptions.AlgebricksException in project asterixdb by apache.
the class QueryTranslator method executeExternalShellProgram.
// Executes external shell commands.
protected int executeExternalShellProgram(ProcessBuilder pb) throws IOException, AlgebricksException, InterruptedException {
Process process = pb.start();
try (BufferedReader in = new BufferedReader(new InputStreamReader(process.getInputStream()))) {
String line;
while ((line = in.readLine()) != null) {
LOGGER.info(line);
if (line.contains("Exception") || line.contains("Error")) {
LOGGER.severe(line);
if (line.contains("Connection refused")) {
throw new AlgebricksException("The connection to your Pregelix cluster was refused. Is it running? " + "Is the port in the query correct?");
}
if (line.contains("Could not find or load main class")) {
throw new AlgebricksException("The main class of your Pregelix query was not found. " + "Is the path to your .jar file correct?");
}
if (line.contains("ClassNotFoundException")) {
throw new AlgebricksException("The vertex class of your Pregelix query was not found. " + "Does it exist? Is the spelling correct?");
}
}
}
process.waitFor();
}
// Gets the exit value of the program.
return process.exitValue();
}
use of org.apache.hyracks.algebricks.common.exceptions.AlgebricksException in project asterixdb by apache.
the class QueryTranslator method handleDataverseDropStatement.
protected void handleDataverseDropStatement(MetadataProvider metadataProvider, Statement stmt, IHyracksClientConnection hcc) throws Exception {
DataverseDropStatement stmtDelete = (DataverseDropStatement) stmt;
String dataverseName = stmtDelete.getDataverseName().getValue();
if (dataverseName.equals(MetadataBuiltinEntities.DEFAULT_DATAVERSE_NAME)) {
throw new HyracksDataException(MetadataBuiltinEntities.DEFAULT_DATAVERSE_NAME + " dataverse can't be dropped");
}
ProgressState progress = ProgressState.NO_PROGRESS;
MetadataTransactionContext mdTxnCtx = MetadataManager.INSTANCE.beginTransaction();
boolean bActiveTxn = true;
metadataProvider.setMetadataTxnContext(mdTxnCtx);
List<JobSpecification> jobsToExecute = new ArrayList<>();
MetadataLockManager.INSTANCE.acquireDataverseWriteLock(metadataProvider.getLocks(), dataverseName);
try {
Dataverse dv = MetadataManager.INSTANCE.getDataverse(mdTxnCtx, dataverseName);
if (dv == null) {
if (stmtDelete.getIfExists()) {
MetadataManager.INSTANCE.commitTransaction(mdTxnCtx);
return;
} else {
throw new AlgebricksException("There is no dataverse with this name " + dataverseName + ".");
}
}
// # disconnect all feeds from any datasets in the dataverse.
ActiveLifecycleListener activeListener = (ActiveLifecycleListener) appCtx.getActiveLifecycleListener();
ActiveJobNotificationHandler activeEventHandler = activeListener.getNotificationHandler();
IActiveEntityEventsListener[] activeListeners = activeEventHandler.getEventListeners();
Identifier dvId = new Identifier(dataverseName);
MetadataProvider tempMdProvider = new MetadataProvider(appCtx, metadataProvider.getDefaultDataverse(), metadataProvider.getStorageComponentProvider());
tempMdProvider.setConfig(metadataProvider.getConfig());
for (IActiveEntityEventsListener listener : activeListeners) {
EntityId activeEntityId = listener.getEntityId();
if (activeEntityId.getExtensionName().equals(Feed.EXTENSION_NAME) && activeEntityId.getDataverse().equals(dataverseName)) {
tempMdProvider.getLocks().reset();
stopFeedBeforeDelete(new Pair<>(dvId, new Identifier(activeEntityId.getEntityName())), tempMdProvider);
// prepare job to remove feed log storage
jobsToExecute.add(FeedOperations.buildRemoveFeedStorageJob(metadataProvider, MetadataManager.INSTANCE.getFeed(mdTxnCtx, dataverseName, activeEntityId.getEntityName())));
}
}
// #. prepare jobs which will drop corresponding datasets with indexes.
List<Dataset> datasets = MetadataManager.INSTANCE.getDataverseDatasets(mdTxnCtx, dataverseName);
for (Dataset dataset : datasets) {
String datasetName = dataset.getDatasetName();
DatasetType dsType = dataset.getDatasetType();
if (dsType == DatasetType.INTERNAL) {
List<Index> indexes = MetadataManager.INSTANCE.getDatasetIndexes(mdTxnCtx, dataverseName, datasetName);
for (Index index : indexes) {
jobsToExecute.add(IndexUtil.buildDropIndexJobSpec(index, metadataProvider, dataset));
}
} else {
// External dataset
List<Index> indexes = MetadataManager.INSTANCE.getDatasetIndexes(mdTxnCtx, dataverseName, datasetName);
for (int k = 0; k < indexes.size(); k++) {
if (ExternalIndexingOperations.isFileIndex(indexes.get(k))) {
jobsToExecute.add(ExternalIndexingOperations.buildDropFilesIndexJobSpec(metadataProvider, dataset));
} else {
jobsToExecute.add(IndexUtil.buildDropIndexJobSpec(indexes.get(k), metadataProvider, dataset));
}
}
ExternalDatasetsRegistry.INSTANCE.removeDatasetInfo(dataset);
}
}
jobsToExecute.add(DataverseUtil.dropDataverseJobSpec(dv, metadataProvider));
// #. mark PendingDropOp on the dataverse record by
// first, deleting the dataverse record from the DATAVERSE_DATASET
// second, inserting the dataverse record with the PendingDropOp value into the
// DATAVERSE_DATASET
MetadataManager.INSTANCE.dropDataverse(mdTxnCtx, dataverseName);
MetadataManager.INSTANCE.addDataverse(mdTxnCtx, new Dataverse(dataverseName, dv.getDataFormat(), MetadataUtil.PENDING_DROP_OP));
MetadataManager.INSTANCE.commitTransaction(mdTxnCtx);
bActiveTxn = false;
progress = ProgressState.ADDED_PENDINGOP_RECORD_TO_METADATA;
for (JobSpecification jobSpec : jobsToExecute) {
JobUtils.runJob(hcc, jobSpec, true);
}
mdTxnCtx = MetadataManager.INSTANCE.beginTransaction();
bActiveTxn = true;
metadataProvider.setMetadataTxnContext(mdTxnCtx);
// #. finally, delete the dataverse.
MetadataManager.INSTANCE.dropDataverse(mdTxnCtx, dataverseName);
// Drops all node groups that no longer needed
for (Dataset dataset : datasets) {
String nodeGroup = dataset.getNodeGroupName();
MetadataLockManager.INSTANCE.acquireNodeGroupWriteLock(metadataProvider.getLocks(), nodeGroup);
if (MetadataManager.INSTANCE.getNodegroup(mdTxnCtx, nodeGroup) != null) {
MetadataManager.INSTANCE.dropNodegroup(mdTxnCtx, nodeGroup, true);
}
}
if (activeDataverse != null && activeDataverse.getDataverseName() == dataverseName) {
activeDataverse = null;
}
MetadataManager.INSTANCE.commitTransaction(mdTxnCtx);
} catch (Exception e) {
if (bActiveTxn) {
abort(e, e, mdTxnCtx);
}
if (progress == ProgressState.ADDED_PENDINGOP_RECORD_TO_METADATA) {
if (activeDataverse != null && activeDataverse.getDataverseName() == dataverseName) {
activeDataverse = null;
}
// remove the all indexes in NC
try {
for (JobSpecification jobSpec : jobsToExecute) {
JobUtils.runJob(hcc, jobSpec, true);
}
} catch (Exception e2) {
// do no throw exception since still the metadata needs to be compensated.
e.addSuppressed(e2);
}
// remove the record from the metadata.
mdTxnCtx = MetadataManager.INSTANCE.beginTransaction();
try {
MetadataManager.INSTANCE.dropDataverse(mdTxnCtx, dataverseName);
MetadataManager.INSTANCE.commitTransaction(mdTxnCtx);
} catch (Exception e2) {
e.addSuppressed(e2);
abort(e, e2, mdTxnCtx);
throw new IllegalStateException("System is inconsistent state: pending dataverse(" + dataverseName + ") couldn't be removed from the metadata", e);
}
}
throw e;
} finally {
metadataProvider.getLocks().unlock();
ExternalDatasetsRegistry.INSTANCE.releaseAcquiredLocks(metadataProvider);
}
}
use of org.apache.hyracks.algebricks.common.exceptions.AlgebricksException in project asterixdb by apache.
the class QueryTranslator method handleCreateFeedPolicyStatement.
protected void handleCreateFeedPolicyStatement(MetadataProvider metadataProvider, Statement stmt) throws AlgebricksException, HyracksDataException {
String dataverse;
String policy;
FeedPolicyEntity newPolicy = null;
MetadataTransactionContext mdTxnCtx = null;
CreateFeedPolicyStatement cfps = (CreateFeedPolicyStatement) stmt;
dataverse = getActiveDataverse(null);
policy = cfps.getPolicyName();
MetadataLockManager.INSTANCE.createFeedPolicyBegin(metadataProvider.getLocks(), dataverse, dataverse + "." + policy);
try {
mdTxnCtx = MetadataManager.INSTANCE.beginTransaction();
metadataProvider.setMetadataTxnContext(mdTxnCtx);
FeedPolicyEntity feedPolicy = MetadataManager.INSTANCE.getFeedPolicy(metadataProvider.getMetadataTxnContext(), dataverse, policy);
if (feedPolicy != null) {
if (cfps.getIfNotExists()) {
MetadataManager.INSTANCE.commitTransaction(mdTxnCtx);
return;
} else {
throw new AlgebricksException("A policy with this name " + policy + " already exists.");
}
}
boolean extendingExisting = cfps.getSourcePolicyName() != null;
String description = cfps.getDescription() == null ? "" : cfps.getDescription();
if (extendingExisting) {
FeedPolicyEntity sourceFeedPolicy = MetadataManager.INSTANCE.getFeedPolicy(metadataProvider.getMetadataTxnContext(), dataverse, cfps.getSourcePolicyName());
if (sourceFeedPolicy == null) {
sourceFeedPolicy = MetadataManager.INSTANCE.getFeedPolicy(metadataProvider.getMetadataTxnContext(), MetadataConstants.METADATA_DATAVERSE_NAME, cfps.getSourcePolicyName());
if (sourceFeedPolicy == null) {
throw new AlgebricksException("Unknown policy " + cfps.getSourcePolicyName());
}
}
Map<String, String> policyProperties = sourceFeedPolicy.getProperties();
policyProperties.putAll(cfps.getProperties());
newPolicy = new FeedPolicyEntity(dataverse, policy, description, policyProperties);
} else {
Properties prop = new Properties();
try {
InputStream stream = new FileInputStream(cfps.getSourcePolicyFile());
prop.load(stream);
} catch (Exception e) {
throw new AlgebricksException("Unable to read policy file" + cfps.getSourcePolicyFile(), e);
}
Map<String, String> policyProperties = new HashMap<>();
for (Entry<Object, Object> entry : prop.entrySet()) {
policyProperties.put((String) entry.getKey(), (String) entry.getValue());
}
newPolicy = new FeedPolicyEntity(dataverse, policy, description, policyProperties);
}
MetadataManager.INSTANCE.addFeedPolicy(mdTxnCtx, newPolicy);
MetadataManager.INSTANCE.commitTransaction(mdTxnCtx);
} catch (RemoteException | ACIDException e) {
abort(e, e, mdTxnCtx);
throw new HyracksDataException(e);
} finally {
metadataProvider.getLocks().unlock();
}
}
use of org.apache.hyracks.algebricks.common.exceptions.AlgebricksException in project asterixdb by apache.
the class QueryTranslator method rewriteCompileInsertUpsert.
private JobSpecification rewriteCompileInsertUpsert(IClusterInfoCollector clusterInfoCollector, MetadataProvider metadataProvider, InsertStatement insertUpsert) throws RemoteException, AlgebricksException, ACIDException {
// Insert/upsert statement rewriting (happens under the same ongoing metadata transaction)
Pair<IReturningStatement, Integer> rewrittenResult = apiFramework.reWriteQuery(declaredFunctions, metadataProvider, insertUpsert, sessionOutput);
InsertStatement rewrittenInsertUpsert = (InsertStatement) rewrittenResult.first;
String dataverseName = getActiveDataverse(rewrittenInsertUpsert.getDataverseName());
String datasetName = rewrittenInsertUpsert.getDatasetName().getValue();
CompiledInsertStatement clfrqs;
switch(insertUpsert.getKind()) {
case Statement.Kind.INSERT:
clfrqs = new CompiledInsertStatement(dataverseName, datasetName, rewrittenInsertUpsert.getQuery(), rewrittenInsertUpsert.getVarCounter(), rewrittenInsertUpsert.getVar(), rewrittenInsertUpsert.getReturnExpression());
break;
case Statement.Kind.UPSERT:
clfrqs = new CompiledUpsertStatement(dataverseName, datasetName, rewrittenInsertUpsert.getQuery(), rewrittenInsertUpsert.getVarCounter(), rewrittenInsertUpsert.getVar(), rewrittenInsertUpsert.getReturnExpression());
break;
default:
throw new AlgebricksException("Unsupported statement type " + rewrittenInsertUpsert.getKind());
}
// Insert/upsert statement compilation (happens under the same ongoing metadata transaction)
return apiFramework.compileQuery(clusterInfoCollector, metadataProvider, rewrittenInsertUpsert.getQuery(), rewrittenResult.second, datasetName, sessionOutput, clfrqs);
}
Aggregations