use of org.apache.asterix.common.exceptions.ACIDException in project asterixdb by apache.
the class FlushDatasetOperatorDescriptor method createPushRuntime.
@Override
public IOperatorNodePushable createPushRuntime(final IHyracksTaskContext ctx, IRecordDescriptorProvider recordDescProvider, int partition, int nPartitions) throws HyracksDataException {
return new AbstractUnaryInputSinkOperatorNodePushable() {
@Override
public void open() throws HyracksDataException {
}
@Override
public void nextFrame(ByteBuffer buffer) throws HyracksDataException {
}
@Override
public void fail() throws HyracksDataException {
this.close();
}
@Override
public void close() throws HyracksDataException {
try {
INcApplicationContext appCtx = (INcApplicationContext) ctx.getJobletContext().getServiceContext().getApplicationContext();
IDatasetLifecycleManager datasetLifeCycleManager = appCtx.getDatasetLifecycleManager();
ILockManager lockManager = appCtx.getTransactionSubsystem().getLockManager();
ITransactionManager txnManager = appCtx.getTransactionSubsystem().getTransactionManager();
// get the local transaction
ITransactionContext txnCtx = txnManager.getTransactionContext(jobId, false);
// lock the dataset granule
lockManager.lock(datasetId, -1, LockMode.S, txnCtx);
// flush the dataset synchronously
datasetLifeCycleManager.flushDataset(datasetId.getId(), false);
} catch (ACIDException e) {
throw new HyracksDataException(e);
}
}
};
}
use of org.apache.asterix.common.exceptions.ACIDException in project asterixdb by apache.
the class MultiTransactionJobletEventListenerFactory method createListener.
@Override
public IJobletEventListener createListener(final IHyracksJobletContext jobletContext) {
return new IJobletEventListener() {
@Override
public void jobletFinish(JobStatus jobStatus) {
try {
ITransactionManager txnManager = ((INcApplicationContext) jobletContext.getServiceContext().getApplicationContext()).getTransactionSubsystem().getTransactionManager();
for (JobId jobId : jobIds) {
ITransactionContext txnContext = txnManager.getTransactionContext(jobId, false);
txnContext.setWriteTxn(transactionalWrite);
txnManager.completedTransaction(txnContext, DatasetId.NULL, -1, !(jobStatus == JobStatus.FAILURE));
}
} catch (ACIDException e) {
throw new Error(e);
}
}
@Override
public void jobletStart() {
try {
for (JobId jobId : jobIds) {
((INcApplicationContext) jobletContext.getServiceContext().getApplicationContext()).getTransactionSubsystem().getTransactionManager().getTransactionContext(jobId, true);
}
} catch (ACIDException e) {
throw new Error(e);
}
}
};
}
use of org.apache.asterix.common.exceptions.ACIDException in project asterixdb by apache.
the class MetadataNode method dropDataset.
@Override
public void dropDataset(JobId jobId, String dataverseName, String datasetName) throws MetadataException, RemoteException {
Dataset dataset = getDataset(jobId, dataverseName, datasetName);
if (dataset == null) {
throw new MetadataException("Cannot drop dataset '" + datasetName + "' because it doesn't exist.");
}
try {
// Delete entry from the 'datasets' dataset.
ITupleReference searchKey = createTuple(dataverseName, datasetName);
// Searches the index for the tuple to be deleted. Acquires an S
// lock on the 'dataset' dataset.
ITupleReference datasetTuple = null;
try {
datasetTuple = getTupleToBeDeleted(jobId, MetadataPrimaryIndexes.DATASET_DATASET, searchKey);
// Delete entry(s) from the 'indexes' dataset.
List<Index> datasetIndexes = getDatasetIndexes(jobId, dataverseName, datasetName);
if (datasetIndexes != null) {
for (Index index : datasetIndexes) {
dropIndex(jobId, dataverseName, datasetName, index.getIndexName());
}
}
if (dataset.getDatasetType() == DatasetType.EXTERNAL) {
// Delete External Files
// As a side effect, acquires an S lock on the 'ExternalFile' dataset
// on behalf of txnId.
List<ExternalFile> datasetFiles = getExternalFiles(jobId, dataset);
if (datasetFiles != null && datasetFiles.size() > 0) {
// Drop all external files in this dataset.
for (ExternalFile file : datasetFiles) {
dropExternalFile(jobId, dataverseName, file.getDatasetName(), file.getFileNumber());
}
}
}
} catch (HyracksDataException hde) {
// artifacts.
if (!hde.getComponent().equals(ErrorCode.HYRACKS) || hde.getErrorCode() != ErrorCode.UPDATE_OR_DELETE_NON_EXISTENT_KEY) {
throw new MetadataException(hde);
}
} finally {
deleteTupleFromIndex(jobId, MetadataPrimaryIndexes.DATASET_DATASET, datasetTuple);
}
} catch (HyracksDataException | ACIDException e) {
throw new MetadataException(e);
}
}
use of org.apache.asterix.common.exceptions.ACIDException in project asterixdb by apache.
the class MetadataNode method dropFeedPolicy.
@Override
public void dropFeedPolicy(JobId jobId, String dataverseName, String policyName) throws MetadataException, RemoteException {
try {
ITupleReference searchKey = createTuple(dataverseName, policyName);
ITupleReference tuple = getTupleToBeDeleted(jobId, MetadataPrimaryIndexes.FEED_POLICY_DATASET, searchKey);
deleteTupleFromIndex(jobId, MetadataPrimaryIndexes.FEED_POLICY_DATASET, tuple);
} catch (HyracksDataException e) {
if (e.getComponent().equals(ErrorCode.HYRACKS) && e.getErrorCode() == ErrorCode.UPDATE_OR_DELETE_NON_EXISTENT_KEY) {
throw new MetadataException("Unknown feed policy " + policyName, e);
} else {
throw new MetadataException(e);
}
} catch (ACIDException e) {
throw new MetadataException(e);
}
}
use of org.apache.asterix.common.exceptions.ACIDException in project asterixdb by apache.
the class MetadataNode method dropNodegroup.
@Override
public boolean dropNodegroup(JobId jobId, String nodeGroupName, boolean failSilently) throws MetadataException, RemoteException {
List<String> datasetNames = getDatasetNamesPartitionedOnThisNodeGroup(jobId, nodeGroupName);
if (!datasetNames.isEmpty()) {
if (failSilently) {
return false;
}
StringBuilder sb = new StringBuilder();
sb.append("Nodegroup '" + nodeGroupName + "' cannot be dropped; it was used for partitioning these datasets:");
for (int i = 0; i < datasetNames.size(); i++) {
sb.append("\n" + (i + 1) + "- " + datasetNames.get(i) + ".");
}
throw new MetadataException(sb.toString());
}
try {
ITupleReference searchKey = createTuple(nodeGroupName);
// Searches the index for the tuple to be deleted. Acquires an S
// lock on the 'nodegroup' dataset.
ITupleReference tuple = getTupleToBeDeleted(jobId, MetadataPrimaryIndexes.NODEGROUP_DATASET, searchKey);
deleteTupleFromIndex(jobId, MetadataPrimaryIndexes.NODEGROUP_DATASET, tuple);
// BTreeKeyDoesNotExistException.
return true;
} catch (HyracksDataException e) {
if (e.getComponent().equals(ErrorCode.HYRACKS) && e.getErrorCode() == ErrorCode.UPDATE_OR_DELETE_NON_EXISTENT_KEY) {
throw new MetadataException("Cannot drop nodegroup '" + nodeGroupName + "' because it doesn't exist", e);
} else {
throw new MetadataException(e);
}
} catch (ACIDException e) {
throw new MetadataException(e);
}
}
Aggregations