use of org.apache.asterix.common.exceptions.ACIDException in project asterixdb by apache.
the class MetadataNode method updateDataset.
@Override
public void updateDataset(JobId jobId, Dataset dataset) throws MetadataException, RemoteException {
try {
// This method will delete previous entry of the dataset and insert the new one
// Delete entry from the 'datasets' dataset.
ITupleReference searchKey;
searchKey = createTuple(dataset.getDataverseName(), dataset.getDatasetName());
// Searches the index for the tuple to be deleted. Acquires an S
// lock on the 'dataset' dataset.
ITupleReference datasetTuple = getTupleToBeDeleted(jobId, MetadataPrimaryIndexes.DATASET_DATASET, searchKey);
deleteTupleFromIndex(jobId, MetadataPrimaryIndexes.DATASET_DATASET, datasetTuple);
// Previous tuple was deleted
// Insert into the 'dataset' dataset.
DatasetTupleTranslator tupleReaderWriter = tupleTranslatorProvider.getDatasetTupleTranslator(true);
datasetTuple = tupleReaderWriter.getTupleFromMetadataEntity(dataset);
insertTupleIntoIndex(jobId, MetadataPrimaryIndexes.DATASET_DATASET, datasetTuple);
} catch (HyracksDataException | ACIDException e) {
throw new MetadataException(e);
}
}
use of org.apache.asterix.common.exceptions.ACIDException in project asterixdb by apache.
the class MetadataBootstrap method startUniverse.
/**
* bootstrap metadata
*
* @param ncServiceContext
* @param isNewUniverse
* @throws ACIDException
* @throws RemoteException
* @throws MetadataException
* @throws Exception
*/
public static void startUniverse(INCServiceContext ncServiceContext, boolean isNewUniverse) throws RemoteException, ACIDException, MetadataException {
MetadataBootstrap.setNewUniverse(isNewUniverse);
appContext = (INcApplicationContext) ncServiceContext.getApplicationContext();
MetadataProperties metadataProperties = appContext.getMetadataProperties();
metadataNodeName = metadataProperties.getMetadataNodeName();
nodeNames = metadataProperties.getNodeNames();
localResourceRepository = appContext.getLocalResourceRepository();
ioManager = ncServiceContext.getIoManager();
MetadataTransactionContext mdTxnCtx = MetadataManager.INSTANCE.beginTransaction();
try {
// Begin a transaction against the metadata.
// Lock the metadata in X mode.
MetadataManager.INSTANCE.lock(mdTxnCtx, LockMode.X);
for (int i = 0; i < PRIMARY_INDEXES.length; i++) {
enlistMetadataDataset(ncServiceContext, PRIMARY_INDEXES[i]);
}
if (LOGGER.isLoggable(Level.INFO)) {
LOGGER.info("Finished enlistment of metadata B-trees in " + (isNewUniverse ? "new" : "old") + " universe");
}
if (isNewUniverse) {
insertInitialDataverses(mdTxnCtx);
insertMetadataDatasets(mdTxnCtx, PRIMARY_INDEXES);
insertMetadataDatatypes(mdTxnCtx);
insertNodes(mdTxnCtx);
insertInitialGroups(mdTxnCtx);
insertInitialAdapters(mdTxnCtx);
insertInitialFeedPolicies(mdTxnCtx);
insertInitialCompactionPolicies(mdTxnCtx);
if (LOGGER.isLoggable(Level.INFO)) {
LOGGER.info("Finished creating metadata B-trees.");
}
}
// #. initialize datasetIdFactory
MetadataManager.INSTANCE.initializeDatasetIdFactory(mdTxnCtx);
MetadataManager.INSTANCE.commitTransaction(mdTxnCtx);
} catch (Exception e) {
try {
if (IS_DEBUG_MODE) {
LOGGER.log(Level.SEVERE, "Failure during metadata bootstrap", e);
}
MetadataManager.INSTANCE.abortTransaction(mdTxnCtx);
} catch (Exception e2) {
e.addSuppressed(e2);
// TODO change the exception type to AbortFailureException
throw new MetadataException(e);
}
throw new MetadataException(e);
}
}
use of org.apache.asterix.common.exceptions.ACIDException in project asterixdb by apache.
the class JobEventListenerFactory method createListener.
@Override
public IJobletEventListener createListener(final IHyracksJobletContext jobletContext) {
return new IJobletEventListener() {
@Override
public void jobletFinish(JobStatus jobStatus) {
try {
ITransactionManager txnManager = ((INcApplicationContext) jobletContext.getServiceContext().getApplicationContext()).getTransactionSubsystem().getTransactionManager();
ITransactionContext txnContext = txnManager.getTransactionContext(jobId, false);
txnContext.setWriteTxn(transactionalWrite);
txnManager.completedTransaction(txnContext, DatasetId.NULL, -1, !(jobStatus == JobStatus.FAILURE));
} catch (ACIDException e) {
throw new Error(e);
}
}
@Override
public void jobletStart() {
try {
((INcApplicationContext) jobletContext.getServiceContext().getApplicationContext()).getTransactionSubsystem().getTransactionManager().getTransactionContext(jobId, true);
} catch (ACIDException e) {
throw new Error(e);
}
}
};
}
use of org.apache.asterix.common.exceptions.ACIDException in project asterixdb by apache.
the class DatasetLifecycleManager method flushDatasetOpenIndexes.
/*
* This method can only be called asynchronously safely if we're sure no modify operation will take place until the flush is scheduled
*/
private void flushDatasetOpenIndexes(DatasetInfo dsInfo, boolean asyncFlush) throws HyracksDataException {
if (!dsInfo.isExternal() && dsInfo.isDurable()) {
synchronized (logRecord) {
TransactionUtil.formFlushLogRecord(logRecord, dsInfo.getDatasetID(), null, logManager.getNodeId(), dsInfo.getIndexes().size());
try {
logManager.log(logRecord);
} catch (ACIDException e) {
throw new HyracksDataException("could not write flush log while closing dataset", e);
}
try {
//notification will come from LogPage class (notifyFlushTerminator)
logRecord.wait();
} catch (InterruptedException e) {
throw new HyracksDataException(e);
}
}
for (IndexInfo iInfo : dsInfo.getIndexes().values()) {
//update resource lsn
AbstractLSMIOOperationCallback ioOpCallback = (AbstractLSMIOOperationCallback) iInfo.getIndex().getIOOperationCallback();
ioOpCallback.updateLastLSN(logRecord.getLSN());
}
}
if (asyncFlush) {
for (IndexInfo iInfo : dsInfo.getIndexes().values()) {
ILSMIndexAccessor accessor = iInfo.getIndex().createAccessor(NoOpOperationCallback.INSTANCE, NoOpOperationCallback.INSTANCE);
accessor.scheduleFlush(iInfo.getIndex().getIOOperationCallback());
}
} else {
for (IndexInfo iInfo : dsInfo.getIndexes().values()) {
// TODO: This is not efficient since we flush the indexes sequentially.
// Think of a way to allow submitting the flush requests concurrently. We don't do them concurrently because this
// may lead to a deadlock scenario between the DatasetLifeCycleManager and the PrimaryIndexOperationTracker.
flushAndWaitForIO(dsInfo, iInfo);
}
}
}
use of org.apache.asterix.common.exceptions.ACIDException in project asterixdb by apache.
the class PrimaryIndexOperationTracker method flushIfRequested.
public void flushIfRequested() throws HyracksDataException {
// If we need a flush, and this is the last completing operation, then schedule the flush,
// or if there is a flush scheduled by the checkpoint (flushOnExit), then schedule it
boolean needsFlush = false;
Set<ILSMIndex> indexes = dsInfo.getDatasetIndexes();
if (!flushOnExit) {
for (ILSMIndex lsmIndex : indexes) {
if (lsmIndex.hasFlushRequestForCurrentMutableComponent()) {
needsFlush = true;
break;
}
}
}
if (needsFlush || flushOnExit) {
//Make the current mutable components READABLE_UNWRITABLE to stop coming modify operations from entering them until the current flush is scheduled.
for (ILSMIndex lsmIndex : indexes) {
ILSMOperationTracker opTracker = lsmIndex.getOperationTracker();
synchronized (opTracker) {
ILSMMemoryComponent memComponent = lsmIndex.getCurrentMemoryComponent();
if (memComponent.getState() == ComponentState.READABLE_WRITABLE && memComponent.isModified()) {
memComponent.setState(ComponentState.READABLE_UNWRITABLE);
}
}
}
LogRecord logRecord = new LogRecord();
flushOnExit = false;
if (dsInfo.isDurable()) {
/**
* Generate a FLUSH log.
* Flush will be triggered when the log is written to disk by LogFlusher.
*/
TransactionUtil.formFlushLogRecord(logRecord, datasetID, this, logManager.getNodeId(), dsInfo.getDatasetIndexes().size());
try {
logManager.log(logRecord);
} catch (ACIDException e) {
throw new HyracksDataException("could not write flush log", e);
}
flushLogCreated = true;
} else {
//trigger flush for temporary indexes without generating a FLUSH log.
triggerScheduleFlush(logRecord);
}
}
}
Aggregations