use of org.apache.commons.lang3.mutable.MutableBoolean in project neo4j by neo4j.
the class SeekCursorTest method shouldCatchupRootWhenNodeHasTooNewGenerationWhileTraversingDownTree.
@Test
public void shouldCatchupRootWhenNodeHasTooNewGenerationWhileTraversingDownTree() throws Exception {
// given
long generation = node.generation(cursor);
MutableBoolean triggered = new MutableBoolean(false);
// a newer leaf
long leftChild = cursor.getCurrentPageId();
// A newer leaf
node.initializeLeaf(cursor, stableGeneration + 1, unstableGeneration + 1);
cursor.next();
// a root
long rootId = cursor.getCurrentPageId();
node.initializeInternal(cursor, stableGeneration, unstableGeneration);
long keyInRoot = 10L;
insertKey.setValue(keyInRoot);
node.insertKeyAt(cursor, insertKey, 0, 0);
node.setKeyCount(cursor, 1);
// with old pointer to child (simulating reuse of child node)
node.setChildAt(cursor, leftChild, 0, stableGeneration, unstableGeneration);
// a root catchup that records usage
Supplier<Root> rootCatchup = () -> {
try {
triggered.setTrue();
// and set child generation to match pointer
cursor.next(leftChild);
cursor.zapPage();
node.initializeLeaf(cursor, stableGeneration, unstableGeneration);
cursor.next(rootId);
return new Root(rootId, generation);
} catch (IOException e) {
throw new RuntimeException(e);
}
};
// when
from.setValue(1L);
to.setValue(2L);
try (SeekCursor<MutableLong, MutableLong> seek = new SeekCursor<>(cursor, node, from, to, layout, stableGeneration, unstableGeneration, generationSupplier, rootCatchup, unstableGeneration)) {
// do nothing
}
// then
assertTrue(triggered.getValue());
}
use of org.apache.commons.lang3.mutable.MutableBoolean in project neo4j by neo4j.
the class BuiltInProceduresInteractionTestBase method queryIsRunning.
private boolean queryIsRunning(String targetQuery) {
String query = "CALL dbms.listQueries() YIELD query WITH query WHERE query = '" + targetQuery + "' RETURN 1";
MutableBoolean resultIsNotEmpty = new MutableBoolean();
neo.executeQuery(adminSubject, query, emptyMap(), itr -> resultIsNotEmpty.setValue(itr.hasNext()));
return resultIsNotEmpty.booleanValue();
}
use of org.apache.commons.lang3.mutable.MutableBoolean in project asterixdb by apache.
the class QueryTranslator method doDropDataset.
public static void doDropDataset(String dataverseName, String datasetName, MetadataProvider metadataProvider, boolean ifExists, IHyracksClientConnection hcc, boolean dropCorrespondingNodeGroup) throws Exception {
MutableObject<ProgressState> progress = new MutableObject<>(ProgressState.NO_PROGRESS);
MutableObject<MetadataTransactionContext> mdTxnCtx = new MutableObject<>(MetadataManager.INSTANCE.beginTransaction());
MutableBoolean bActiveTxn = new MutableBoolean(true);
metadataProvider.setMetadataTxnContext(mdTxnCtx.getValue());
List<JobSpecification> jobsToExecute = new ArrayList<>();
try {
Dataset ds = metadataProvider.findDataset(dataverseName, datasetName);
if (ds == null) {
if (ifExists) {
MetadataManager.INSTANCE.commitTransaction(mdTxnCtx.getValue());
return;
} else {
throw new AlgebricksException("There is no dataset with this name " + datasetName + " in dataverse " + dataverseName + ".");
}
}
ds.drop(metadataProvider, mdTxnCtx, jobsToExecute, bActiveTxn, progress, hcc, dropCorrespondingNodeGroup);
MetadataManager.INSTANCE.commitTransaction(mdTxnCtx.getValue());
} catch (Exception e) {
if (bActiveTxn.booleanValue()) {
abort(e, e, mdTxnCtx.getValue());
}
if (progress.getValue() == ProgressState.ADDED_PENDINGOP_RECORD_TO_METADATA) {
// remove the all indexes in NC
try {
for (JobSpecification jobSpec : jobsToExecute) {
JobUtils.runJob(hcc, jobSpec, true);
}
} catch (Exception e2) {
// do no throw exception since still the metadata needs to be compensated.
e.addSuppressed(e2);
}
// remove the record from the metadata.
mdTxnCtx.setValue(MetadataManager.INSTANCE.beginTransaction());
metadataProvider.setMetadataTxnContext(mdTxnCtx.getValue());
try {
MetadataManager.INSTANCE.dropDataset(metadataProvider.getMetadataTxnContext(), dataverseName, datasetName);
MetadataManager.INSTANCE.commitTransaction(mdTxnCtx.getValue());
} catch (Exception e2) {
e.addSuppressed(e2);
abort(e, e2, mdTxnCtx.getValue());
throw new IllegalStateException("System is inconsistent state: pending dataset(" + dataverseName + "." + datasetName + ") couldn't be removed from the metadata", e);
}
}
throw e;
} finally {
ExternalDatasetsRegistry.INSTANCE.releaseAcquiredLocks(metadataProvider);
}
}
use of org.apache.commons.lang3.mutable.MutableBoolean in project asterixdb by apache.
the class QueryTranslator method deliverResult.
private void deliverResult(IHyracksClientConnection hcc, IHyracksDataset hdc, IStatementCompiler compiler, MetadataProvider metadataProvider, IMetadataLocker locker, ResultDelivery resultDelivery, ResultMetadata outMetadata, Stats stats, String clientContextId, IStatementExecutorContext ctx) throws Exception {
final ResultSetId resultSetId = metadataProvider.getResultSetId();
switch(resultDelivery) {
case ASYNC:
MutableBoolean printed = new MutableBoolean(false);
executorService.submit(() -> asyncCreateAndRunJob(hcc, compiler, locker, resultDelivery, clientContextId, ctx, resultSetId, printed));
synchronized (printed) {
while (!printed.booleanValue()) {
printed.wait();
}
}
break;
case IMMEDIATE:
createAndRunJob(hcc, null, compiler, locker, resultDelivery, id -> {
final ResultReader resultReader = new ResultReader(hdc, id, resultSetId);
ResultUtil.printResults(appCtx, resultReader, sessionOutput, stats, metadataProvider.findOutputRecordType());
}, clientContextId, ctx);
break;
case DEFERRED:
createAndRunJob(hcc, null, compiler, locker, resultDelivery, id -> {
ResultUtil.printResultHandle(sessionOutput, new ResultHandle(id, resultSetId));
if (outMetadata != null) {
outMetadata.getResultSets().add(Triple.of(id, resultSetId, metadataProvider.findOutputRecordType()));
}
}, clientContextId, ctx);
break;
default:
break;
}
}
use of org.apache.commons.lang3.mutable.MutableBoolean in project asterixdb by apache.
the class Dataset method drop.
/**
* Drop this dataset
*
* @param metadataProvider
* metadata provider that can be used to get metadata info and runtimes
* @param mdTxnCtx
* the transaction context
* @param jobsToExecute
* a list of jobs to be executed as part of the drop operation
* @param bActiveTxn
* whether the metadata transaction is ongoing
* @param progress
* a mutable progress state used for error handling during the drop operation
* @param hcc
* a client connection to hyracks master for job execution
* @throws Exception
* if an error occur during the drop process or if the dataset can't be dropped for any reason
*/
public void drop(MetadataProvider metadataProvider, MutableObject<MetadataTransactionContext> mdTxnCtx, List<JobSpecification> jobsToExecute, MutableBoolean bActiveTxn, MutableObject<ProgressState> progress, IHyracksClientConnection hcc, boolean dropCorrespondingNodeGroup) throws Exception {
Map<FeedConnectionId, Pair<JobSpecification, Boolean>> disconnectJobList = new HashMap<>();
if (getDatasetType() == DatasetType.INTERNAL) {
// prepare job spec(s) that would disconnect any active feeds involving the dataset.
ActiveLifecycleListener activeListener = (ActiveLifecycleListener) metadataProvider.getApplicationContext().getActiveLifecycleListener();
IActiveEntityEventsListener[] activeListeners = activeListener.getNotificationHandler().getEventListeners();
for (IActiveEntityEventsListener listener : activeListeners) {
if (listener.isEntityUsingDataset(this)) {
throw new CompilationException(ErrorCode.COMPILATION_CANT_DROP_ACTIVE_DATASET, RecordUtil.toFullyQualifiedName(dataverseName, datasetName), listener.getEntityId().toString());
}
}
// #. prepare jobs to drop the datatset and the indexes in NC
List<Index> indexes = MetadataManager.INSTANCE.getDatasetIndexes(mdTxnCtx.getValue(), dataverseName, datasetName);
for (int j = 0; j < indexes.size(); j++) {
if (indexes.get(j).isSecondaryIndex()) {
jobsToExecute.add(IndexUtil.buildDropIndexJobSpec(indexes.get(j), metadataProvider, this));
}
}
jobsToExecute.add(DatasetUtil.dropDatasetJobSpec(this, metadataProvider));
// #. mark the existing dataset as PendingDropOp
MetadataManager.INSTANCE.dropDataset(mdTxnCtx.getValue(), dataverseName, datasetName);
MetadataManager.INSTANCE.addDataset(mdTxnCtx.getValue(), new Dataset(dataverseName, datasetName, getItemTypeDataverseName(), getItemTypeName(), getMetaItemTypeDataverseName(), getMetaItemTypeName(), getNodeGroupName(), getCompactionPolicy(), getCompactionPolicyProperties(), getDatasetDetails(), getHints(), getDatasetType(), getDatasetId(), MetadataUtil.PENDING_DROP_OP));
MetadataManager.INSTANCE.commitTransaction(mdTxnCtx.getValue());
bActiveTxn.setValue(false);
progress.setValue(ProgressState.ADDED_PENDINGOP_RECORD_TO_METADATA);
// # disconnect the feeds
for (Pair<JobSpecification, Boolean> p : disconnectJobList.values()) {
JobUtils.runJob(hcc, p.first, true);
}
// #. run the jobs
for (JobSpecification jobSpec : jobsToExecute) {
JobUtils.runJob(hcc, jobSpec, true);
}
mdTxnCtx.setValue(MetadataManager.INSTANCE.beginTransaction());
bActiveTxn.setValue(true);
metadataProvider.setMetadataTxnContext(mdTxnCtx.getValue());
} else {
// External dataset
ExternalDatasetsRegistry.INSTANCE.removeDatasetInfo(this);
// #. prepare jobs to drop the datatset and the indexes in NC
List<Index> indexes = MetadataManager.INSTANCE.getDatasetIndexes(mdTxnCtx.getValue(), dataverseName, datasetName);
for (int j = 0; j < indexes.size(); j++) {
if (ExternalIndexingOperations.isFileIndex(indexes.get(j))) {
jobsToExecute.add(IndexUtil.buildDropIndexJobSpec(indexes.get(j), metadataProvider, this));
} else {
jobsToExecute.add(DatasetUtil.buildDropFilesIndexJobSpec(metadataProvider, this));
}
}
// #. mark the existing dataset as PendingDropOp
MetadataManager.INSTANCE.dropDataset(mdTxnCtx.getValue(), dataverseName, datasetName);
MetadataManager.INSTANCE.addDataset(mdTxnCtx.getValue(), new Dataset(dataverseName, datasetName, getItemTypeDataverseName(), getItemTypeName(), getNodeGroupName(), getCompactionPolicy(), getCompactionPolicyProperties(), getDatasetDetails(), getHints(), getDatasetType(), getDatasetId(), MetadataUtil.PENDING_DROP_OP));
MetadataManager.INSTANCE.commitTransaction(mdTxnCtx.getValue());
bActiveTxn.setValue(false);
progress.setValue(ProgressState.ADDED_PENDINGOP_RECORD_TO_METADATA);
// #. run the jobs
for (JobSpecification jobSpec : jobsToExecute) {
JobUtils.runJob(hcc, jobSpec, true);
}
if (!indexes.isEmpty()) {
ExternalDatasetsRegistry.INSTANCE.removeDatasetInfo(this);
}
mdTxnCtx.setValue(MetadataManager.INSTANCE.beginTransaction());
bActiveTxn.setValue(true);
metadataProvider.setMetadataTxnContext(mdTxnCtx.getValue());
}
// #. finally, delete the dataset.
MetadataManager.INSTANCE.dropDataset(mdTxnCtx.getValue(), dataverseName, datasetName);
// Drops the associated nodegroup if it is no longer used by any other dataset.
if (dropCorrespondingNodeGroup) {
MetadataLockManager.INSTANCE.acquireNodeGroupWriteLock(metadataProvider.getLocks(), nodeGroupName);
MetadataManager.INSTANCE.dropNodegroup(mdTxnCtx.getValue(), nodeGroupName, true);
}
}
Aggregations