Search in sources :

Example 1 with ILogRecord

use of org.apache.asterix.common.transactions.ILogRecord in project asterixdb by apache.

the class LogBuffer method notifyToSyncCommitQWaiter.

public void notifyToSyncCommitQWaiter() {
    ILogRecord logRecord = null;
    while (logRecord == null) {
        try {
            logRecord = syncCommitQ.take();
        } catch (InterruptedException e) {
        //ignore
        }
    }
    synchronized (logRecord) {
        logRecord.isFlushed(true);
        logRecord.notifyAll();
    }
}
Also used : ILogRecord(org.apache.asterix.common.transactions.ILogRecord)

Example 2 with ILogRecord

use of org.apache.asterix.common.transactions.ILogRecord in project asterixdb by apache.

the class LogMarkerTest method testInsertWithSnapshot.

@Test
public void testInsertWithSnapshot() {
    try {
        TestNodeController nc = new TestNodeController(null, false);
        nc.init();
        StorageComponentProvider storageManager = new StorageComponentProvider();
        List<List<String>> partitioningKeys = new ArrayList<>();
        partitioningKeys.add(Collections.singletonList("key"));
        Dataset dataset = new Dataset(DATAVERSE_NAME, DATASET_NAME, DATAVERSE_NAME, DATA_TYPE_NAME, NODE_GROUP_NAME, null, null, new InternalDatasetDetails(null, PartitioningStrategy.HASH, partitioningKeys, null, null, null, false, null, false), null, DatasetType.INTERNAL, DATASET_ID, 0);
        try {
            nc.createPrimaryIndex(dataset, KEY_TYPES, RECORD_TYPE, META_TYPE, new NoMergePolicyFactory(), null, null, storageManager, KEY_INDEXES, KEY_INDICATORS_LIST);
            IHyracksTaskContext ctx = nc.createTestContext(true);
            nc.newJobId();
            ITransactionContext txnCtx = nc.getTransactionManager().getTransactionContext(nc.getTxnJobId(), true);
            LSMInsertDeleteOperatorNodePushable insertOp = nc.getInsertPipeline(ctx, dataset, KEY_TYPES, RECORD_TYPE, META_TYPE, new NoMergePolicyFactory(), null, null, KEY_INDEXES, KEY_INDICATORS_LIST, storageManager).getLeft();
            insertOp.open();
            TupleGenerator tupleGenerator = new TupleGenerator(RECORD_TYPE, META_TYPE, KEY_INDEXES, KEY_INDICATORS, RECORD_GEN_FUNCTION, UNIQUE_RECORD_FIELDS, META_GEN_FUNCTION, UNIQUE_META_FIELDS);
            VSizeFrame frame = new VSizeFrame(ctx);
            VSizeFrame marker = new VSizeFrame(ctx);
            FrameTupleAppender tupleAppender = new FrameTupleAppender(frame);
            long markerId = 0L;
            for (int j = 0; j < NUM_OF_RECORDS; j++) {
                if (j % SNAPSHOT_SIZE == 0) {
                    marker.reset();
                    marker.getBuffer().put(MessagingFrameTupleAppender.MARKER_MESSAGE);
                    marker.getBuffer().putLong(markerId);
                    marker.getBuffer().flip();
                    markerId++;
                    TaskUtil.putInSharedMap(HyracksConstants.KEY_MESSAGE, marker, ctx);
                    tupleAppender.flush(insertOp);
                }
                ITupleReference tuple = tupleGenerator.next();
                DataflowUtils.addTupleToFrame(tupleAppender, tuple, insertOp);
            }
            if (tupleAppender.getTupleCount() > 0) {
                tupleAppender.write(insertOp, true);
            }
            insertOp.close();
            nc.getTransactionManager().completedTransaction(txnCtx, DatasetId.NULL, -1, true);
            IIndexDataflowHelper dataflowHelper = nc.getPrimaryIndexDataflowHelper(dataset, KEY_TYPES, RECORD_TYPE, META_TYPE, new NoMergePolicyFactory(), null, null, storageManager, KEY_INDEXES, KEY_INDICATORS_LIST);
            dataflowHelper.open();
            LSMBTree btree = (LSMBTree) dataflowHelper.getIndexInstance();
            LongPointable longPointable = LongPointable.FACTORY.createPointable();
            ComponentMetadataUtil.get(btree, ComponentMetadataUtil.MARKER_LSN_KEY, longPointable);
            long lsn = longPointable.getLong();
            int numOfMarkers = 0;
            LogReader logReader = (LogReader) nc.getTransactionSubsystem().getLogManager().getLogReader(false);
            long expectedMarkerId = markerId - 1;
            while (lsn >= 0) {
                numOfMarkers++;
                ILogRecord logRecord = logReader.read(lsn);
                lsn = logRecord.getPreviousMarkerLSN();
                long logMarkerId = logRecord.getMarker().getLong();
                Assert.assertEquals(expectedMarkerId, logMarkerId);
                expectedMarkerId--;
            }
            logReader.close();
            dataflowHelper.close();
            Assert.assertEquals(markerId, numOfMarkers);
            nc.newJobId();
            TestTupleCounterFrameWriter countOp = create(nc.getSearchOutputDesc(KEY_TYPES, RECORD_TYPE, META_TYPE), Collections.emptyList(), Collections.emptyList(), false);
            IPushRuntime emptyTupleOp = nc.getFullScanPipeline(countOp, ctx, dataset, KEY_TYPES, RECORD_TYPE, META_TYPE, new NoMergePolicyFactory(), null, null, KEY_INDEXES, KEY_INDICATORS_LIST, storageManager);
            emptyTupleOp.open();
            emptyTupleOp.close();
            Assert.assertEquals(NUM_OF_RECORDS, countOp.getCount());
        } finally {
            nc.deInit();
        }
    } catch (Throwable e) {
        e.printStackTrace();
        Assert.fail(e.getMessage());
    }
}
Also used : LSMInsertDeleteOperatorNodePushable(org.apache.asterix.common.dataflow.LSMInsertDeleteOperatorNodePushable) IIndexDataflowHelper(org.apache.hyracks.storage.am.common.api.IIndexDataflowHelper) IPushRuntime(org.apache.hyracks.algebricks.runtime.base.IPushRuntime) ArrayList(java.util.ArrayList) TestTupleCounterFrameWriter(org.apache.asterix.app.data.gen.TestTupleCounterFrameWriter) NoMergePolicyFactory(org.apache.hyracks.storage.am.lsm.common.impls.NoMergePolicyFactory) MessagingFrameTupleAppender(org.apache.hyracks.dataflow.common.io.MessagingFrameTupleAppender) FrameTupleAppender(org.apache.hyracks.dataflow.common.comm.io.FrameTupleAppender) LongPointable(org.apache.hyracks.data.std.primitive.LongPointable) ArrayList(java.util.ArrayList) List(java.util.List) ILogRecord(org.apache.asterix.common.transactions.ILogRecord) Dataset(org.apache.asterix.metadata.entities.Dataset) InternalDatasetDetails(org.apache.asterix.metadata.entities.InternalDatasetDetails) ITransactionContext(org.apache.asterix.common.transactions.ITransactionContext) TupleGenerator(org.apache.asterix.app.data.gen.TupleGenerator) StorageComponentProvider(org.apache.asterix.file.StorageComponentProvider) VSizeFrame(org.apache.hyracks.api.comm.VSizeFrame) IHyracksTaskContext(org.apache.hyracks.api.context.IHyracksTaskContext) LSMBTree(org.apache.hyracks.storage.am.lsm.btree.impls.LSMBTree) TestNodeController(org.apache.asterix.app.bootstrap.TestNodeController) ITupleReference(org.apache.hyracks.dataflow.common.data.accessors.ITupleReference) LogReader(org.apache.asterix.transaction.management.service.logging.LogReader) Test(org.junit.Test)

Example 3 with ILogRecord

use of org.apache.asterix.common.transactions.ILogRecord in project asterixdb by apache.

the class RecoveryManager method startRecoveryRedoPhase.

private synchronized void startRecoveryRedoPhase(Set<Integer> partitions, ILogReader logReader, long lowWaterMarkLSN, Set<Integer> winnerJobSet) throws IOException, ACIDException {
    int redoCount = 0;
    int jobId = -1;
    long resourceId;
    long maxDiskLastLsn;
    long lsn = -1;
    ILSMIndex index = null;
    LocalResource localResource = null;
    DatasetLocalResource localResourceMetadata = null;
    boolean foundWinner = false;
    JobEntityCommits jobEntityWinners = null;
    IAppRuntimeContextProvider appRuntimeContext = txnSubsystem.getAsterixAppRuntimeContextProvider();
    IDatasetLifecycleManager datasetLifecycleManager = appRuntimeContext.getDatasetLifecycleManager();
    Map<Long, LocalResource> resourcesMap = localResourceRepository.loadAndGetAllResources();
    Map<Long, Long> resourceId2MaxLSNMap = new HashMap<>();
    TxnId tempKeyTxnId = new TxnId(-1, -1, -1, null, -1, false);
    ILogRecord logRecord = null;
    try {
        logReader.initializeScan(lowWaterMarkLSN);
        logRecord = logReader.next();
        while (logRecord != null) {
            if (IS_DEBUG_MODE) {
                LOGGER.info(logRecord.getLogRecordForDisplay());
            }
            lsn = logRecord.getLSN();
            jobId = logRecord.getJobId();
            foundWinner = false;
            switch(logRecord.getLogType()) {
                case LogType.UPDATE:
                    if (partitions.contains(logRecord.getResourcePartition())) {
                        if (winnerJobSet.contains(jobId)) {
                            foundWinner = true;
                        } else if (jobId2WinnerEntitiesMap.containsKey(jobId)) {
                            jobEntityWinners = jobId2WinnerEntitiesMap.get(jobId);
                            tempKeyTxnId.setTxnId(jobId, logRecord.getDatasetId(), logRecord.getPKHashValue(), logRecord.getPKValue(), logRecord.getPKValueSize());
                            if (jobEntityWinners.containsEntityCommitForTxnId(lsn, tempKeyTxnId)) {
                                foundWinner = true;
                            }
                        }
                        if (foundWinner) {
                            resourceId = logRecord.getResourceId();
                            localResource = resourcesMap.get(resourceId);
                            /*******************************************************************
                                 * [Notice]
                                 * -> Issue
                                 * Delete index may cause a problem during redo.
                                 * The index operation to be redone couldn't be redone because the corresponding index
                                 * may not exist in NC due to the possible index drop DDL operation.
                                 * -> Approach
                                 * Avoid the problem during redo.
                                 * More specifically, the problem will be detected when the localResource of
                                 * the corresponding index is retrieved, which will end up with 'null'.
                                 * If null is returned, then just go and process the next
                                 * log record.
                                 *******************************************************************/
                            if (localResource == null) {
                                LOGGER.log(Level.WARNING, "resource was not found for resource id " + resourceId);
                                logRecord = logReader.next();
                                continue;
                            }
                            /*******************************************************************/
                            //get index instance from IndexLifeCycleManager
                            //if index is not registered into IndexLifeCycleManager,
                            //create the index using LocalMetadata stored in LocalResourceRepository
                            //get partition path in this node
                            localResourceMetadata = (DatasetLocalResource) localResource.getResource();
                            index = (ILSMIndex) datasetLifecycleManager.get(localResource.getPath());
                            if (index == null) {
                                //#. create index instance and register to indexLifeCycleManager
                                index = (ILSMIndex) localResourceMetadata.createInstance(serviceCtx);
                                datasetLifecycleManager.register(localResource.getPath(), index);
                                datasetLifecycleManager.open(localResource.getPath());
                                //#. get maxDiskLastLSN
                                ILSMIndex lsmIndex = index;
                                try {
                                    maxDiskLastLsn = ((AbstractLSMIOOperationCallback) lsmIndex.getIOOperationCallback()).getComponentLSN(lsmIndex.getImmutableComponents());
                                } catch (HyracksDataException e) {
                                    datasetLifecycleManager.close(localResource.getPath());
                                    throw e;
                                }
                                //#. set resourceId and maxDiskLastLSN to the map
                                resourceId2MaxLSNMap.put(resourceId, maxDiskLastLsn);
                            } else {
                                maxDiskLastLsn = resourceId2MaxLSNMap.get(resourceId);
                            }
                            if (lsn > maxDiskLastLsn) {
                                redo(logRecord, datasetLifecycleManager);
                                redoCount++;
                            }
                        }
                    }
                    break;
                case LogType.JOB_COMMIT:
                case LogType.ENTITY_COMMIT:
                case LogType.ABORT:
                case LogType.FLUSH:
                case LogType.WAIT:
                case LogType.MARKER:
                    //do nothing
                    break;
                default:
                    throw new ACIDException("Unsupported LogType: " + logRecord.getLogType());
            }
            logRecord = logReader.next();
        }
        LOGGER.info("Logs REDO phase completed. Redo logs count: " + redoCount);
    } finally {
        //close all indexes
        Set<Long> resourceIdList = resourceId2MaxLSNMap.keySet();
        for (long r : resourceIdList) {
            datasetLifecycleManager.close(resourcesMap.get(r).getPath());
        }
    }
}
Also used : HashMap(java.util.HashMap) ILSMIndex(org.apache.hyracks.storage.am.lsm.common.api.ILSMIndex) Checkpoint(org.apache.asterix.common.transactions.Checkpoint) HyracksDataException(org.apache.hyracks.api.exceptions.HyracksDataException) DatasetLocalResource(org.apache.asterix.common.dataflow.DatasetLocalResource) LocalResource(org.apache.hyracks.storage.common.LocalResource) ACIDException(org.apache.asterix.common.exceptions.ACIDException) DatasetLocalResource(org.apache.asterix.common.dataflow.DatasetLocalResource) IDatasetLifecycleManager(org.apache.asterix.common.api.IDatasetLifecycleManager) TxnId(org.apache.asterix.transaction.management.service.recovery.TxnId) IAppRuntimeContextProvider(org.apache.asterix.common.transactions.IAppRuntimeContextProvider) ILogRecord(org.apache.asterix.common.transactions.ILogRecord)

Example 4 with ILogRecord

use of org.apache.asterix.common.transactions.ILogRecord in project asterixdb by apache.

the class RecoveryManager method rollbackTransaction.

@Override
public void rollbackTransaction(ITransactionContext txnContext) throws ACIDException {
    int abortedJobId = txnContext.getJobId().getId();
    // Obtain the first/last log record LSNs written by the Job
    long firstLSN = txnContext.getFirstLSN();
    /**
         * The effect of any log record with LSN below minFirstLSN has already been written to disk and
         * will not be rolled back. Therefore, we will set the first LSN of the job to the maximum of
         * minFirstLSN and the job's first LSN.
         */
    try {
        long localMinFirstLSN = getLocalMinFirstLSN();
        firstLSN = Math.max(firstLSN, localMinFirstLSN);
    } catch (HyracksDataException e) {
        throw new ACIDException(e);
    }
    long lastLSN = txnContext.getLastLSN();
    if (LOGGER.isLoggable(Level.INFO)) {
        LOGGER.info("rollbacking transaction log records from " + firstLSN + " to " + lastLSN);
    }
    // check if the transaction actually wrote some logs.
    if (firstLSN == TransactionManagementConstants.LogManagerConstants.TERMINAL_LSN || firstLSN > lastLSN) {
        if (LOGGER.isLoggable(Level.INFO)) {
            LOGGER.info("no need to roll back as there were no operations by the job " + txnContext.getJobId());
        }
        return;
    }
    // While reading log records from firstLsn to lastLsn, collect uncommitted txn's Lsns
    if (LOGGER.isLoggable(Level.INFO)) {
        LOGGER.info("collecting loser transaction's LSNs from " + firstLSN + " to " + lastLSN);
    }
    Map<TxnId, List<Long>> jobLoserEntity2LSNsMap = new HashMap<>();
    TxnId tempKeyTxnId = new TxnId(-1, -1, -1, null, -1, false);
    int updateLogCount = 0;
    int entityCommitLogCount = 0;
    int logJobId = -1;
    long currentLSN = -1;
    TxnId loserEntity = null;
    List<Long> undoLSNSet = null;
    //get active partitions on this node
    Set<Integer> activePartitions = localResourceRepository.getActivePartitions();
    ILogReader logReader = logMgr.getLogReader(false);
    try {
        logReader.initializeScan(firstLSN);
        ILogRecord logRecord = null;
        while (currentLSN < lastLSN) {
            logRecord = logReader.next();
            if (logRecord == null) {
                break;
            } else {
                currentLSN = logRecord.getLSN();
                if (IS_DEBUG_MODE) {
                    LOGGER.info(logRecord.getLogRecordForDisplay());
                }
            }
            logJobId = logRecord.getJobId();
            if (logJobId != abortedJobId) {
                continue;
            }
            tempKeyTxnId.setTxnId(logJobId, logRecord.getDatasetId(), logRecord.getPKHashValue(), logRecord.getPKValue(), logRecord.getPKValueSize());
            switch(logRecord.getLogType()) {
                case LogType.UPDATE:
                    if (activePartitions.contains(logRecord.getResourcePartition())) {
                        undoLSNSet = jobLoserEntity2LSNsMap.get(tempKeyTxnId);
                        if (undoLSNSet == null) {
                            loserEntity = new TxnId(logJobId, logRecord.getDatasetId(), logRecord.getPKHashValue(), logRecord.getPKValue(), logRecord.getPKValueSize(), true);
                            undoLSNSet = new LinkedList<>();
                            jobLoserEntity2LSNsMap.put(loserEntity, undoLSNSet);
                        }
                        undoLSNSet.add(currentLSN);
                        updateLogCount++;
                        if (IS_DEBUG_MODE) {
                            LOGGER.info(Thread.currentThread().getId() + "======> update[" + currentLSN + "]:" + tempKeyTxnId);
                        }
                    }
                    break;
                case LogType.ENTITY_COMMIT:
                    if (activePartitions.contains(logRecord.getResourcePartition())) {
                        jobLoserEntity2LSNsMap.remove(tempKeyTxnId);
                        entityCommitLogCount++;
                        if (IS_DEBUG_MODE) {
                            LOGGER.info(Thread.currentThread().getId() + "======> entity_commit[" + currentLSN + "]" + tempKeyTxnId);
                        }
                    }
                    break;
                case LogType.JOB_COMMIT:
                    throw new ACIDException("Unexpected LogType(" + logRecord.getLogType() + ") during abort.");
                case LogType.ABORT:
                case LogType.FLUSH:
                case LogType.WAIT:
                case LogType.MARKER:
                    //ignore
                    break;
                default:
                    throw new ACIDException("Unsupported LogType: " + logRecord.getLogType());
            }
        }
        if (currentLSN != lastLSN) {
            throw new ACIDException("LastLSN mismatch: lastLSN(" + lastLSN + ") vs currentLSN(" + currentLSN + ") during abort( " + txnContext.getJobId() + ")");
        }
        //undo loserTxn's effect
        LOGGER.log(Level.INFO, "undoing loser transaction's effect");
        IDatasetLifecycleManager datasetLifecycleManager = txnSubsystem.getAsterixAppRuntimeContextProvider().getDatasetLifecycleManager();
        //TODO sort loser entities by smallest LSN to undo in one pass.
        Iterator<Entry<TxnId, List<Long>>> iter = jobLoserEntity2LSNsMap.entrySet().iterator();
        int undoCount = 0;
        while (iter.hasNext()) {
            Map.Entry<TxnId, List<Long>> loserEntity2LSNsMap = iter.next();
            undoLSNSet = loserEntity2LSNsMap.getValue();
            // The step below is important since the upsert operations must be done in reverse order.
            Collections.reverse(undoLSNSet);
            for (long undoLSN : undoLSNSet) {
                //here, all the log records are UPDATE type. So, we don't need to check the type again.
                //read the corresponding log record to be undone.
                logRecord = logReader.read(undoLSN);
                if (logRecord == null) {
                    throw new ACIDException("IllegalState exception during abort( " + txnContext.getJobId() + ")");
                }
                if (IS_DEBUG_MODE) {
                    LOGGER.info(logRecord.getLogRecordForDisplay());
                }
                undo(logRecord, datasetLifecycleManager);
                undoCount++;
            }
        }
        if (LOGGER.isLoggable(Level.INFO)) {
            LOGGER.info("undone loser transaction's effect");
            LOGGER.info("[RecoveryManager's rollback log count] update/entityCommit/undo:" + updateLogCount + "/" + entityCommitLogCount + "/" + undoCount);
        }
    } finally {
        logReader.close();
    }
}
Also used : HashMap(java.util.HashMap) Checkpoint(org.apache.asterix.common.transactions.Checkpoint) HyracksDataException(org.apache.hyracks.api.exceptions.HyracksDataException) ILogReader(org.apache.asterix.common.transactions.ILogReader) ACIDException(org.apache.asterix.common.exceptions.ACIDException) IDatasetLifecycleManager(org.apache.asterix.common.api.IDatasetLifecycleManager) TxnId(org.apache.asterix.transaction.management.service.recovery.TxnId) Entry(java.util.Map.Entry) List(java.util.List) ArrayList(java.util.ArrayList) LinkedList(java.util.LinkedList) ILogRecord(org.apache.asterix.common.transactions.ILogRecord) Map(java.util.Map) HashMap(java.util.HashMap)

Example 5 with ILogRecord

use of org.apache.asterix.common.transactions.ILogRecord in project asterixdb by apache.

the class RecoveryManager method startRecoverysAnalysisPhase.

private synchronized Set<Integer> startRecoverysAnalysisPhase(Set<Integer> partitions, ILogReader logReader, long lowWaterMarkLSN) throws IOException, ACIDException {
    int updateLogCount = 0;
    int entityCommitLogCount = 0;
    int jobCommitLogCount = 0;
    int abortLogCount = 0;
    Set<Integer> winnerJobSet = new HashSet<>();
    jobId2WinnerEntitiesMap = new HashMap<>();
    //set log reader to the lowWaterMarkLsn
    ILogRecord logRecord;
    logReader.initializeScan(lowWaterMarkLSN);
    logRecord = logReader.next();
    while (logRecord != null) {
        if (IS_DEBUG_MODE) {
            LOGGER.info(logRecord.getLogRecordForDisplay());
        }
        switch(logRecord.getLogType()) {
            case LogType.UPDATE:
                if (partitions.contains(logRecord.getResourcePartition())) {
                    updateLogCount++;
                }
                break;
            case LogType.JOB_COMMIT:
                winnerJobSet.add(logRecord.getJobId());
                cleanupJobCommits(logRecord.getJobId());
                jobCommitLogCount++;
                break;
            case LogType.ENTITY_COMMIT:
                if (partitions.contains(logRecord.getResourcePartition())) {
                    analyzeEntityCommitLog(logRecord);
                    entityCommitLogCount++;
                }
                break;
            case LogType.ABORT:
                abortLogCount++;
                break;
            case LogType.FLUSH:
            case LogType.WAIT:
            case LogType.MARKER:
                break;
            default:
                throw new ACIDException("Unsupported LogType: " + logRecord.getLogType());
        }
        logRecord = logReader.next();
    }
    //prepare winners for search after analysis is done to flush anything remaining in memory to disk.
    for (JobEntityCommits winners : jobId2WinnerEntitiesMap.values()) {
        winners.prepareForSearch();
    }
    LOGGER.info("Logs analysis phase completed.");
    LOGGER.info("Analysis log count update/entityCommit/jobCommit/abort = " + updateLogCount + "/" + entityCommitLogCount + "/" + jobCommitLogCount + "/" + abortLogCount);
    return winnerJobSet;
}
Also used : ILogRecord(org.apache.asterix.common.transactions.ILogRecord) Checkpoint(org.apache.asterix.common.transactions.Checkpoint) HashSet(java.util.HashSet) ACIDException(org.apache.asterix.common.exceptions.ACIDException)

Aggregations

ILogRecord (org.apache.asterix.common.transactions.ILogRecord)6 ACIDException (org.apache.asterix.common.exceptions.ACIDException)3 Checkpoint (org.apache.asterix.common.transactions.Checkpoint)3 ArrayList (java.util.ArrayList)2 HashMap (java.util.HashMap)2 List (java.util.List)2 IDatasetLifecycleManager (org.apache.asterix.common.api.IDatasetLifecycleManager)2 TxnId (org.apache.asterix.transaction.management.service.recovery.TxnId)2 HyracksDataException (org.apache.hyracks.api.exceptions.HyracksDataException)2 HashSet (java.util.HashSet)1 LinkedList (java.util.LinkedList)1 Map (java.util.Map)1 Entry (java.util.Map.Entry)1 TestNodeController (org.apache.asterix.app.bootstrap.TestNodeController)1 TestTupleCounterFrameWriter (org.apache.asterix.app.data.gen.TestTupleCounterFrameWriter)1 TupleGenerator (org.apache.asterix.app.data.gen.TupleGenerator)1 DatasetLocalResource (org.apache.asterix.common.dataflow.DatasetLocalResource)1 LSMInsertDeleteOperatorNodePushable (org.apache.asterix.common.dataflow.LSMInsertDeleteOperatorNodePushable)1 IAppRuntimeContextProvider (org.apache.asterix.common.transactions.IAppRuntimeContextProvider)1 ILogReader (org.apache.asterix.common.transactions.ILogReader)1