use of org.apache.hyracks.storage.am.lsm.common.api.ILSMOperationTracker in project asterixdb by apache.
the class DatasetLifecycleManager method closeDataset.
private void closeDataset(DatasetInfo dsInfo) throws HyracksDataException {
// First wait for any ongoing IO operations
synchronized (dsInfo) {
while (dsInfo.getNumActiveIOOps() > 0) {
try {
dsInfo.wait();
} catch (InterruptedException e) {
throw new HyracksDataException(e);
}
}
}
try {
flushDatasetOpenIndexes(dsInfo, false);
} catch (Exception e) {
throw new HyracksDataException(e);
}
for (IndexInfo iInfo : dsInfo.getIndexes().values()) {
if (iInfo.isOpen()) {
ILSMOperationTracker opTracker = iInfo.getIndex().getOperationTracker();
synchronized (opTracker) {
iInfo.getIndex().deactivate(false);
}
iInfo.setOpen(false);
}
}
removeDatasetFromCache(dsInfo.getDatasetID());
dsInfo.setOpen(false);
}
use of org.apache.hyracks.storage.am.lsm.common.api.ILSMOperationTracker in project asterixdb by apache.
the class PrimaryIndexOperationTracker method flushIfRequested.
public void flushIfRequested() throws HyracksDataException {
// If we need a flush, and this is the last completing operation, then schedule the flush,
// or if there is a flush scheduled by the checkpoint (flushOnExit), then schedule it
boolean needsFlush = false;
Set<ILSMIndex> indexes = dsInfo.getDatasetIndexes();
if (!flushOnExit) {
for (ILSMIndex lsmIndex : indexes) {
if (lsmIndex.hasFlushRequestForCurrentMutableComponent()) {
needsFlush = true;
break;
}
}
}
if (needsFlush || flushOnExit) {
//Make the current mutable components READABLE_UNWRITABLE to stop coming modify operations from entering them until the current flush is scheduled.
for (ILSMIndex lsmIndex : indexes) {
ILSMOperationTracker opTracker = lsmIndex.getOperationTracker();
synchronized (opTracker) {
ILSMMemoryComponent memComponent = lsmIndex.getCurrentMemoryComponent();
if (memComponent.getState() == ComponentState.READABLE_WRITABLE && memComponent.isModified()) {
memComponent.setState(ComponentState.READABLE_UNWRITABLE);
}
}
}
LogRecord logRecord = new LogRecord();
flushOnExit = false;
if (dsInfo.isDurable()) {
/**
* Generate a FLUSH log.
* Flush will be triggered when the log is written to disk by LogFlusher.
*/
TransactionUtil.formFlushLogRecord(logRecord, datasetID, this, logManager.getNodeId(), dsInfo.getDatasetIndexes().size());
try {
logManager.log(logRecord);
} catch (ACIDException e) {
throw new HyracksDataException("could not write flush log", e);
}
flushLogCreated = true;
} else {
//trigger flush for temporary indexes without generating a FLUSH log.
triggerScheduleFlush(logRecord);
}
}
}
use of org.apache.hyracks.storage.am.lsm.common.api.ILSMOperationTracker in project asterixdb by apache.
the class DatasetLifecycleManager method open.
@Override
public synchronized void open(String resourcePath) throws HyracksDataException {
validateDatasetLifecycleManagerState();
int did = getDIDfromResourcePath(resourcePath);
long resourceID = getResourceIDfromResourcePath(resourcePath);
DatasetResource dsr = datasets.get(did);
DatasetInfo dsInfo = dsr.getDatasetInfo();
if (dsInfo == null || !dsInfo.isRegistered()) {
throw new HyracksDataException("Failed to open index with resource ID " + resourceID + " since it does not exist.");
}
IndexInfo iInfo = dsInfo.getIndexes().get(resourceID);
if (iInfo == null) {
throw new HyracksDataException("Failed to open index with resource ID " + resourceID + " since it does not exist.");
}
dsr.open(true);
dsr.touch();
if (!iInfo.isOpen()) {
ILSMOperationTracker opTracker = iInfo.getIndex().getOperationTracker();
synchronized (opTracker) {
iInfo.getIndex().activate();
}
iInfo.setOpen(true);
}
iInfo.touch();
}
use of org.apache.hyracks.storage.am.lsm.common.api.ILSMOperationTracker in project asterixdb by apache.
the class DatasetLifecycleManager method unregister.
@Override
public synchronized void unregister(String resourcePath) throws HyracksDataException {
validateDatasetLifecycleManagerState();
int did = getDIDfromResourcePath(resourcePath);
long resourceID = getResourceIDfromResourcePath(resourcePath);
DatasetResource dsr = datasets.get(did);
IndexInfo iInfo = dsr == null ? null : dsr.getIndexInfo(resourceID);
if (dsr == null || iInfo == null) {
throw new HyracksDataException("Index with resource ID " + resourceID + " does not exist.");
}
PrimaryIndexOperationTracker opTracker = dsr.getOpTracker();
if (iInfo.getReferenceCount() != 0 || (opTracker != null && opTracker.getNumActiveOperations() != 0)) {
throw new HyracksDataException("Cannot remove index while it is open. (Dataset reference count = " + iInfo.getReferenceCount() + ", Operation tracker number of active operations = " + opTracker.getNumActiveOperations() + ")");
}
// TODO: use fine-grained counters, one for each index instead of a single counter per dataset.
// First wait for any ongoing IO operations
DatasetInfo dsInfo = dsr.getDatasetInfo();
synchronized (dsInfo) {
while (dsInfo.getNumActiveIOOps() > 0) {
try {
//notification will come from DatasetInfo class (undeclareActiveIOOperation)
dsInfo.wait();
} catch (InterruptedException e) {
throw new HyracksDataException(e);
}
}
}
if (iInfo.isOpen()) {
ILSMOperationTracker indexOpTracker = iInfo.getIndex().getOperationTracker();
synchronized (indexOpTracker) {
iInfo.getIndex().deactivate(false);
}
}
dsInfo.getIndexes().remove(resourceID);
if (dsInfo.getReferenceCount() == 0 && dsInfo.isOpen() && dsInfo.getIndexes().isEmpty() && !dsInfo.isExternal()) {
removeDatasetFromCache(dsInfo.getDatasetID());
}
}
Aggregations