Search in sources :

Example 6 with CacheDataRow

use of org.apache.ignite.internal.processors.cache.persistence.CacheDataRow in project ignite by apache.

the class GridCacheQueryManager method scanIterator.

/**
 * @param qry Query.
 * @param transformer Transformer.
 * @param locNode Local node.
 * @return Full-scan row iterator.
 * @throws IgniteCheckedException If failed to get iterator.
 */
@SuppressWarnings({ "unchecked" })
private GridCloseableIterator scanIterator(final GridCacheQueryAdapter<?> qry, IgniteClosure transformer, boolean locNode) throws IgniteCheckedException {
    assert !cctx.mvccEnabled() || qry.mvccSnapshot() != null;
    final IgniteBiPredicate<K, V> keyValFilter = qry.scanFilter();
    final InternalScanFilter<K, V> intFilter = keyValFilter != null ? new InternalScanFilter<>(keyValFilter) : null;
    try {
        if (keyValFilter instanceof PlatformCacheEntryFilter)
            ((PlatformCacheEntryFilter) keyValFilter).cacheContext(cctx);
        else
            injectResources(keyValFilter);
        Integer part = cctx.isLocal() ? null : qry.partition();
        if (part != null && (part < 0 || part >= cctx.affinity().partitions()))
            return new GridEmptyCloseableIterator() {

                @Override
                public void close() throws IgniteCheckedException {
                    if (intFilter != null)
                        intFilter.close();
                    super.close();
                }
            };
        AffinityTopologyVersion topVer = GridQueryProcessor.getRequestAffinityTopologyVersion();
        if (topVer == null)
            topVer = cctx.affinity().affinityTopologyVersion();
        final boolean backups = qry.includeBackups() || cctx.isReplicated();
        final GridDhtLocalPartition locPart;
        final GridIterator<CacheDataRow> it;
        if (part != null) {
            final GridDhtCacheAdapter dht = cctx.isNear() ? cctx.near().dht() : cctx.dht();
            GridDhtLocalPartition locPart0 = dht.topology().localPartition(part, topVer, false);
            if (locPart0 == null || locPart0.state() != OWNING || !locPart0.reserve()) {
                throw locPart0 != null && locPart0.state() == LOST ? new CacheInvalidStateException("Failed to execute scan query because cache partition has been " + "lost [cacheName=" + cctx.name() + ", part=" + part + "]") : new GridDhtUnreservedPartitionException(part, cctx.affinity().affinityTopologyVersion(), "Partition can not be reserved");
            }
            locPart = locPart0;
            it = cctx.offheap().cachePartitionIterator(cctx.cacheId(), part, qry.mvccSnapshot(), qry.isDataPageScanEnabled());
        } else {
            locPart = null;
            if (!cctx.isLocal()) {
                final GridDhtCacheAdapter dht = cctx.isNear() ? cctx.near().dht() : cctx.dht();
                Set<Integer> lostParts = dht.topology().lostPartitions();
                if (!lostParts.isEmpty()) {
                    throw new CacheInvalidStateException("Failed to execute scan query because cache partition " + "has been lost [cacheName=" + cctx.name() + ", part=" + lostParts.iterator().next() + "]");
                }
            }
            it = cctx.offheap().cacheIterator(cctx.cacheId(), true, backups, topVer, qry.mvccSnapshot(), qry.isDataPageScanEnabled());
        }
        ScanQueryIterator iter = new ScanQueryIterator(it, qry, topVer, locPart, SecurityUtils.sandboxedProxy(cctx.kernalContext(), IgniteBiPredicate.class, keyValFilter), SecurityUtils.sandboxedProxy(cctx.kernalContext(), IgniteClosure.class, transformer), locNode, locNode ? locIters : null, cctx, log);
        if (locNode) {
            ScanQueryIterator old = locIters.addx(iter);
            assert old == null;
        }
        return iter;
    } catch (IgniteCheckedException | RuntimeException e) {
        if (intFilter != null)
            intFilter.close();
        throw e;
    }
}
Also used : CacheDataRow(org.apache.ignite.internal.processors.cache.persistence.CacheDataRow) GridDhtCacheAdapter(org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtCacheAdapter) IgniteClosure(org.apache.ignite.lang.IgniteClosure) GridDhtUnreservedPartitionException(org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtUnreservedPartitionException) AffinityTopologyVersion(org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion) IgniteBiPredicate(org.apache.ignite.lang.IgniteBiPredicate) GridEmptyCloseableIterator(org.apache.ignite.internal.util.GridEmptyCloseableIterator) PlatformCacheEntryFilter(org.apache.ignite.internal.processors.platform.cache.PlatformCacheEntryFilter) IgniteCheckedException(org.apache.ignite.IgniteCheckedException) GridDhtLocalPartition(org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtLocalPartition) CacheInvalidStateException(org.apache.ignite.internal.processors.cache.CacheInvalidStateException)

Example 7 with CacheDataRow

use of org.apache.ignite.internal.processors.cache.persistence.CacheDataRow in project ignite by apache.

the class CacheContinuousQueryManager method executeQuery0.

/**
 * @param locLsnr Local listener.
 * @param clsr Closure to create CacheContinuousQueryHandler.
 * @param bufSize Buffer size.
 * @param timeInterval Time interval.
 * @param autoUnsubscribe Auto unsubscribe flag.
 * @param internal Internal flag.
 * @param notifyExisting Notify existing flag.
 * @param loc Local flag.
 * @param keepBinary Keep binary flag.
 * @param onStart Waiting topology exchange.
 * @return Continuous routine ID.
 * @throws IgniteCheckedException In case of error.
 */
private UUID executeQuery0(CacheEntryUpdatedListener locLsnr, IgniteOutClosure<CacheContinuousQueryHandler> clsr, int bufSize, long timeInterval, boolean autoUnsubscribe, boolean internal, boolean notifyExisting, boolean loc, final boolean keepBinary, boolean onStart) throws IgniteCheckedException {
    cctx.checkSecurity(SecurityPermission.CACHE_READ);
    int taskNameHash = !internal && cctx.kernalContext().security().enabled() ? cctx.kernalContext().job().currentTaskNameHash() : 0;
    boolean skipPrimaryCheck = loc && cctx.config().getCacheMode() == CacheMode.REPLICATED && cctx.affinityNode();
    final CacheContinuousQueryHandler hnd = clsr.apply();
    boolean locOnly = cctx.isLocal() || loc;
    hnd.taskNameHash(taskNameHash);
    hnd.skipPrimaryCheck(skipPrimaryCheck);
    hnd.notifyExisting(notifyExisting);
    hnd.internal(internal);
    hnd.keepBinary(keepBinary);
    hnd.localOnly(locOnly);
    IgnitePredicate<ClusterNode> pred = (loc || cctx.config().getCacheMode() == CacheMode.LOCAL) ? F.nodeForNodeId(cctx.localNodeId()) : new IsAllPredicate<>(cctx.group().nodeFilter(), new AttributeNodeFilter(ATTR_CLIENT_MODE, false));
    assert pred != null : cctx.config();
    UUID id = null;
    try {
        id = cctx.kernalContext().continuous().startRoutine(hnd, locOnly, bufSize, timeInterval, autoUnsubscribe, pred).get();
        if (hnd.isQuery() && cctx.userCache() && !locOnly && !onStart)
            hnd.waitTopologyFuture(cctx.kernalContext());
    } catch (NodeStoppingException e) {
        // Wrap original exception to show the source of continuous query start stacktrace.
        throw new NodeStoppingException(e);
    } catch (IgniteCheckedException e) {
        log.warning("Failed to start continuous query.", e);
        if (id != null)
            cctx.kernalContext().continuous().stopRoutine(id);
        throw new IgniteCheckedException("Failed to start continuous query.", e);
    }
    if (notifyExisting) {
        assert locLsnr != null : "Local listener can't be null if notification for existing entries are enabled";
        final Iterator<CacheDataRow> it = cctx.offheap().cacheIterator(cctx.cacheId(), true, true, AffinityTopologyVersion.NONE, null, null);
        locLsnr.onUpdated(new Iterable<CacheEntryEvent>() {

            @Override
            public Iterator<CacheEntryEvent> iterator() {
                return new Iterator<CacheEntryEvent>() {

                    private CacheContinuousQueryEvent next;

                    {
                        advance();
                    }

                    @Override
                    public boolean hasNext() {
                        return next != null;
                    }

                    @Override
                    public CacheEntryEvent next() {
                        if (!hasNext())
                            throw new NoSuchElementException();
                        CacheEntryEvent next0 = next;
                        advance();
                        return next0;
                    }

                    @Override
                    public void remove() {
                        throw new UnsupportedOperationException();
                    }

                    private void advance() {
                        next = null;
                        while (next == null) {
                            if (!it.hasNext())
                                break;
                            CacheDataRow e = it.next();
                            CacheContinuousQueryEntry entry = new CacheContinuousQueryEntry(cctx.cacheId(), CREATED, e.key(), e.value(), null, keepBinary, 0, -1, null, (byte) 0);
                            next = new CacheContinuousQueryEvent<>(cctx.kernalContext().cache().jcache(cctx.name()), cctx, entry);
                            if (!hnd.filter(next))
                                next = null;
                        }
                    }
                };
            }
        });
    }
    return id;
}
Also used : ClusterNode(org.apache.ignite.cluster.ClusterNode) CacheDataRow(org.apache.ignite.internal.processors.cache.persistence.CacheDataRow) NodeStoppingException(org.apache.ignite.internal.NodeStoppingException) AttributeNodeFilter(org.apache.ignite.util.AttributeNodeFilter) CacheEntryEvent(javax.cache.event.CacheEntryEvent) IgniteCheckedException(org.apache.ignite.IgniteCheckedException) Iterator(java.util.Iterator) UUID(java.util.UUID) NoSuchElementException(java.util.NoSuchElementException)

Example 8 with CacheDataRow

use of org.apache.ignite.internal.processors.cache.persistence.CacheDataRow in project ignite by apache.

the class CacheDataTree method scanDataPages.

/**
 * @param rowData Required row data.
 * @param c Optional MVCC closure.
 * @return Cache row cursor.
 * @throws IgniteCheckedException If failed.
 */
private GridCursor<CacheDataRow> scanDataPages(CacheDataRowAdapter.RowData rowData, MvccDataPageClosure c) throws IgniteCheckedException {
    lastFindWithDataPageScan = TRUE;
    checkDestroyed();
    assert rowData != null;
    assert grp.persistenceEnabled();
    int partId = rowStore.getPartitionId();
    GridCacheSharedContext shared = grp.shared();
    GridCacheDatabaseSharedManager db = (GridCacheDatabaseSharedManager) shared.database();
    PageStore pageStore = db.getPageStore(grpId, partId);
    boolean mvccEnabled = grp.mvccEnabled();
    int pageSize = pageSize();
    long startPageId = ((PageMemoryEx) pageMem).partitionMetaPageId(grp.groupId(), partId);
    /**
     */
    final class DataPageScanCursor implements GridCursor<CacheDataRow> {

        /**
         */
        int pagesCnt = pageStore.pages();

        /**
         */
        int curPage = -1;

        /**
         */
        CacheDataRow[] rows = EMPTY_ROWS;

        /**
         */
        int curRow = -1;

        /**
         * {@inheritDoc}
         */
        @Override
        public boolean next() throws IgniteCheckedException {
            if (rows == null)
                return false;
            if (++curRow < rows.length && rows[curRow] != null)
                return true;
            return readNextDataPage();
        }

        /**
         * @return {@code true} If new rows were fetched.
         * @throws IgniteCheckedException If failed.
         */
        private boolean readNextDataPage() throws IgniteCheckedException {
            checkDestroyed();
            for (; ; ) {
                if (++curPage >= pagesCnt) {
                    // Reread number of pages when we reach it (it may grow).
                    int newPagesCnt = pageStore.pages();
                    if (newPagesCnt <= pagesCnt) {
                        rows = null;
                        return false;
                    }
                    pagesCnt = newPagesCnt;
                }
                long pageId = startPageId + curPage;
                long page = pageMem.acquirePage(grpId, pageId);
                try {
                    boolean skipVer = CacheDataRowStore.getSkipVersion();
                    long pageAddr = ((PageMemoryEx) pageMem).readLock(page, pageId, true, false);
                    try {
                        // Here we should also exclude fragmented pages that don't contain the head of the entry.
                        if (PageIO.getType(pageAddr) != T_DATA)
                            // Not a data page.
                            continue;
                        DataPageIO io = PageIO.getPageIO(T_DATA, PageIO.getVersion(pageAddr));
                        int rowsCnt = io.getRowsCount(pageAddr);
                        if (rowsCnt == 0)
                            // Empty page.
                            continue;
                        if (rowsCnt > rows.length)
                            rows = new CacheDataRow[rowsCnt];
                        else
                            clearTail(rows, rowsCnt);
                        int r = 0;
                        for (int i = 0; i < rowsCnt; i++) {
                            if (c == null || c.applyMvcc(io, pageAddr, i, pageSize)) {
                                DataRow row = mvccEnabled ? new MvccDataRow() : new DataRow();
                                row.initFromDataPage(io, pageAddr, i, grp, shared, pageMem, rowData, skipVer);
                                rows[r++] = row;
                            }
                        }
                        if (r == 0)
                            // No rows fetched in this page.
                            continue;
                        curRow = 0;
                        return true;
                    } finally {
                        pageMem.readUnlock(grpId, pageId, page);
                    }
                } finally {
                    pageMem.releasePage(grpId, pageId, page);
                }
            }
        }

        /**
         * {@inheritDoc}
         */
        @Override
        public CacheDataRow get() {
            return rows[curRow];
        }
    }
    return new DataPageScanCursor();
}
Also used : DataPageIO(org.apache.ignite.internal.processors.cache.persistence.tree.io.DataPageIO) CacheDataRow(org.apache.ignite.internal.processors.cache.persistence.CacheDataRow) GridCursor(org.apache.ignite.internal.util.lang.GridCursor) GridCacheDatabaseSharedManager(org.apache.ignite.internal.processors.cache.persistence.GridCacheDatabaseSharedManager) PageStore(org.apache.ignite.internal.pagemem.store.PageStore) MvccDataRow(org.apache.ignite.internal.processors.cache.tree.mvcc.data.MvccDataRow) GridCacheSharedContext(org.apache.ignite.internal.processors.cache.GridCacheSharedContext) CacheDataRow(org.apache.ignite.internal.processors.cache.persistence.CacheDataRow) MvccDataRow(org.apache.ignite.internal.processors.cache.tree.mvcc.data.MvccDataRow) PageMemoryEx(org.apache.ignite.internal.processors.cache.persistence.pagemem.PageMemoryEx)

Example 9 with CacheDataRow

use of org.apache.ignite.internal.processors.cache.persistence.CacheDataRow in project ignite by apache.

the class GridCommandHandlerIndexingUtils method breakSqlIndex.

/**
 * Deleting records from the index bypassing cache.
 *
 * @param internalCache Cache.
 * @param partId Partition number.
 * @param filter Row filter.
 * @throws Exception If failed.
 */
static <K, V> void breakSqlIndex(IgniteInternalCache<K, V> internalCache, int partId, @Nullable Predicate<CacheDataRow> filter) throws Exception {
    requireNonNull(internalCache);
    GridCacheContext<K, V> cacheCtx = internalCache.context();
    GridDhtLocalPartition locPart = cacheCtx.topology().localPartitions().get(partId);
    GridIterator<CacheDataRow> cacheDataGridIter = cacheCtx.group().offheap().partitionIterator(locPart.id());
    GridQueryProcessor qryProcessor = internalCache.context().kernalContext().query();
    while (cacheDataGridIter.hasNextX()) {
        CacheDataRow cacheDataRow = cacheDataGridIter.nextX();
        if (nonNull(filter) && !filter.test(cacheDataRow))
            continue;
        cacheCtx.shared().database().checkpointReadLock();
        try {
            qryProcessor.remove(cacheCtx, cacheDataRow);
        } finally {
            cacheCtx.shared().database().checkpointReadUnlock();
        }
    }
}
Also used : CacheDataRow(org.apache.ignite.internal.processors.cache.persistence.CacheDataRow) GridQueryProcessor(org.apache.ignite.internal.processors.query.GridQueryProcessor) GridDhtLocalPartition(org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtLocalPartition)

Example 10 with CacheDataRow

use of org.apache.ignite.internal.processors.cache.persistence.CacheDataRow in project ignite by apache.

the class GridDhtPartitionSupplier method handleDemandMessage.

/**
 * For each demand message method lookups (or creates new) supply context and starts to iterate entries across requested partitions.
 * Each entry in iterator is placed to prepared supply message.
 *
 * If supply message size in bytes becomes greater than {@link IgniteConfiguration#getRebalanceBatchSize()}
 * method sends this message to demand node and saves partial state of iterated entries to supply context,
 * then restores the context again after new demand message with the same context id is arrived.
 *
 * @param topicId Id of the topic is used for the supply-demand communication.
 * @param nodeId Id of the node which sent the demand message.
 * @param demandMsg Demand message.
 */
public void handleDemandMessage(int topicId, UUID nodeId, GridDhtPartitionDemandMessage demandMsg) {
    assert demandMsg != null;
    assert nodeId != null;
    T3<UUID, Integer, AffinityTopologyVersion> contextId = new T3<>(nodeId, topicId, demandMsg.topologyVersion());
    if (demandMsg.rebalanceId() < 0) {
        // Demand node requested context cleanup.
        synchronized (scMap) {
            SupplyContext sctx = scMap.get(contextId);
            if (sctx != null && sctx.rebalanceId == -demandMsg.rebalanceId()) {
                clearContext(scMap.remove(contextId), log);
                if (log.isDebugEnabled())
                    log.debug("Supply context cleaned [" + supplyRoutineInfo(topicId, nodeId, demandMsg) + ", supplyContext=" + sctx + "]");
            } else {
                if (log.isDebugEnabled())
                    log.debug("Stale supply context cleanup message [" + supplyRoutineInfo(topicId, nodeId, demandMsg) + ", supplyContext=" + sctx + "]");
            }
            return;
        }
    }
    ClusterNode demanderNode = grp.shared().discovery().node(nodeId);
    if (demanderNode == null) {
        if (log.isDebugEnabled())
            log.debug("Demand message rejected (demander left cluster) [" + supplyRoutineInfo(topicId, nodeId, demandMsg) + "]");
        return;
    }
    IgniteRebalanceIterator iter = null;
    SupplyContext sctx = null;
    Set<Integer> remainingParts = null;
    GridDhtPartitionSupplyMessage supplyMsg = new GridDhtPartitionSupplyMessage(demandMsg.rebalanceId(), grp.groupId(), demandMsg.topologyVersion(), grp.deploymentEnabled());
    try {
        synchronized (scMap) {
            sctx = scMap.remove(contextId);
            if (sctx != null && demandMsg.rebalanceId() < sctx.rebalanceId) {
                // Stale message, return context back and return.
                scMap.put(contextId, sctx);
                if (log.isDebugEnabled())
                    log.debug("Stale demand message [" + supplyRoutineInfo(topicId, nodeId, demandMsg) + ", actualContext=" + sctx + "]");
                return;
            }
        }
        // Demand request should not contain empty partitions if no supply context is associated with it.
        if (sctx == null && (demandMsg.partitions() == null || demandMsg.partitions().isEmpty())) {
            if (log.isDebugEnabled())
                log.debug("Empty demand message (no context and partitions) [" + supplyRoutineInfo(topicId, nodeId, demandMsg) + "]");
            return;
        }
        if (log.isDebugEnabled())
            log.debug("Demand message accepted [" + supplyRoutineInfo(topicId, nodeId, demandMsg) + "]");
        assert !(sctx != null && !demandMsg.partitions().isEmpty());
        long maxBatchesCnt = /* Each thread should gain prefetched batches. */
        grp.preloader().batchesPrefetchCount() * grp.shared().gridConfig().getRebalanceThreadPoolSize();
        if (sctx == null) {
            if (log.isDebugEnabled())
                log.debug("Starting supplying rebalancing [" + supplyRoutineInfo(topicId, nodeId, demandMsg) + ", fullPartitions=" + S.compact(demandMsg.partitions().fullSet()) + ", histPartitions=" + S.compact(demandMsg.partitions().historicalSet()) + "]");
        } else
            maxBatchesCnt = 1;
        if (sctx == null || sctx.iterator == null) {
            remainingParts = new HashSet<>(demandMsg.partitions().fullSet());
            CachePartitionPartialCountersMap histMap = demandMsg.partitions().historicalMap();
            for (int i = 0; i < histMap.size(); i++) {
                int p = histMap.partitionAt(i);
                remainingParts.add(p);
            }
            iter = grp.offheap().rebalanceIterator(demandMsg.partitions(), demandMsg.topologyVersion());
            for (Integer part : demandMsg.partitions().fullSet()) {
                if (iter.isPartitionMissing(part))
                    continue;
                GridDhtLocalPartition loc = top.localPartition(part, demandMsg.topologyVersion(), false);
                assert loc != null && loc.state() == GridDhtPartitionState.OWNING : "Partition should be in OWNING state: " + loc;
                supplyMsg.addEstimatedKeysCount(loc.dataStore().fullSize());
            }
            for (int i = 0; i < histMap.size(); i++) {
                int p = histMap.partitionAt(i);
                if (iter.isPartitionMissing(p))
                    continue;
                supplyMsg.addEstimatedKeysCount(histMap.updateCounterAt(i) - histMap.initialUpdateCounterAt(i));
            }
        } else {
            iter = sctx.iterator;
            remainingParts = sctx.remainingParts;
        }
        final int msgMaxSize = grp.preloader().batchSize();
        long batchesCnt = 0;
        CacheDataRow prevRow = null;
        while (iter.hasNext()) {
            CacheDataRow row = iter.peek();
            // Prevent mvcc entry history splitting into separate batches.
            boolean canFlushHistory = !grp.mvccEnabled() || prevRow != null && ((grp.sharedGroup() && row.cacheId() != prevRow.cacheId()) || !row.key().equals(prevRow.key()));
            if (canFlushHistory && supplyMsg.messageSize() >= msgMaxSize) {
                if (++batchesCnt >= maxBatchesCnt) {
                    saveSupplyContext(contextId, iter, remainingParts, demandMsg.rebalanceId());
                    reply(topicId, demanderNode, demandMsg, supplyMsg, contextId);
                    return;
                } else {
                    if (!reply(topicId, demanderNode, demandMsg, supplyMsg, contextId))
                        return;
                    supplyMsg = new GridDhtPartitionSupplyMessage(demandMsg.rebalanceId(), grp.groupId(), demandMsg.topologyVersion(), grp.deploymentEnabled());
                }
            }
            row = iter.next();
            prevRow = row;
            int part = row.partition();
            GridDhtLocalPartition loc = top.localPartition(part, demandMsg.topologyVersion(), false);
            assert (loc != null && loc.state() == OWNING && loc.reservations() > 0) || iter.isPartitionMissing(part) : "Partition should be in OWNING state and has at least 1 reservation " + loc;
            if (iter.isPartitionMissing(part) && remainingParts.contains(part)) {
                supplyMsg.missed(part);
                remainingParts.remove(part);
                if (grp.eventRecordable(EVT_CACHE_REBALANCE_PART_MISSED))
                    grp.addRebalanceMissEvent(part);
                if (log.isDebugEnabled())
                    log.debug("Requested partition is marked as missing [" + supplyRoutineInfo(topicId, nodeId, demandMsg) + ", p=" + part + "]");
                continue;
            }
            if (!remainingParts.contains(part))
                continue;
            GridCacheEntryInfo info = extractEntryInfo(row);
            if (info == null)
                continue;
            supplyMsg.addEntry0(part, iter.historical(part), info, grp.shared(), grp.cacheObjectContext());
            if (iter.isPartitionDone(part)) {
                supplyMsg.last(part, loc.updateCounter());
                remainingParts.remove(part);
                if (grp.eventRecordable(EVT_CACHE_REBALANCE_PART_SUPPLIED))
                    grp.addRebalanceSupplyEvent(part);
            }
        }
        Iterator<Integer> remainingIter = remainingParts.iterator();
        while (remainingIter.hasNext()) {
            int p = remainingIter.next();
            if (iter.isPartitionDone(p)) {
                GridDhtLocalPartition loc = top.localPartition(p, demandMsg.topologyVersion(), false);
                assert loc != null : "Supply partition is gone: grp=" + grp.cacheOrGroupName() + ", p=" + p;
                supplyMsg.last(p, loc.updateCounter());
                remainingIter.remove();
                if (grp.eventRecordable(EVT_CACHE_REBALANCE_PART_SUPPLIED))
                    grp.addRebalanceSupplyEvent(p);
            } else if (iter.isPartitionMissing(p)) {
                supplyMsg.missed(p);
                remainingIter.remove();
                if (grp.eventRecordable(EVT_CACHE_REBALANCE_PART_MISSED))
                    grp.addRebalanceMissEvent(p);
            }
        }
        assert remainingParts.isEmpty() : "Partitions after rebalance should be either done or missing: " + remainingParts;
        if (sctx != null)
            clearContext(sctx, log);
        else
            iter.close();
        reply(topicId, demanderNode, demandMsg, supplyMsg, contextId);
        if (log.isInfoEnabled())
            log.info("Finished supplying rebalancing [" + supplyRoutineInfo(topicId, nodeId, demandMsg) + "]");
    } catch (Throwable t) {
        if (iter != null && !iter.isClosed()) {
            try {
                iter.close();
            } catch (IgniteCheckedException e) {
                t.addSuppressed(e);
            }
        }
        if (grp.shared().kernalContext().isStopping())
            return;
        // Sending supply messages with error requires new protocol.
        boolean sendErrMsg = demanderNode.version().compareTo(GridDhtPartitionSupplyMessageV2.AVAILABLE_SINCE) >= 0;
        if (t instanceof IgniteSpiException) {
            if (log.isDebugEnabled())
                log.debug("Failed to send message to node (current node is stopping?) [" + supplyRoutineInfo(topicId, nodeId, demandMsg) + ", msg=" + t.getMessage() + ']');
            sendErrMsg = false;
        } else
            U.error(log, "Failed to continue supplying [" + supplyRoutineInfo(topicId, nodeId, demandMsg) + ']', t);
        try {
            if (sctx != null)
                clearContext(sctx, log);
        } catch (Throwable t1) {
            U.error(log, "Failed to cleanup supplying context [" + supplyRoutineInfo(topicId, nodeId, demandMsg) + ']', t1);
        }
        if (!sendErrMsg)
            return;
        boolean fallbackToFullRebalance = X.hasCause(t, IgniteHistoricalIteratorException.class);
        try {
            GridDhtPartitionSupplyMessage errMsg;
            if (fallbackToFullRebalance) {
                // Mark the last checkpoint as not applicable for WAL rebalance.
                grp.shared().database().lastCheckpointInapplicableForWalRebalance(grp.groupId());
                // Mark all remaining partitions as missed to trigger full rebalance.
                if (iter == null && F.isEmpty(remainingParts)) {
                    remainingParts = new HashSet<>(demandMsg.partitions().fullSet());
                    remainingParts.addAll(demandMsg.partitions().historicalSet());
                }
                for (int p : Optional.ofNullable(remainingParts).orElseGet(Collections::emptySet)) supplyMsg.missed(p);
                errMsg = supplyMsg;
            } else {
                errMsg = new GridDhtPartitionSupplyMessageV2(demandMsg.rebalanceId(), grp.groupId(), demandMsg.topologyVersion(), grp.deploymentEnabled(), t);
            }
            reply(topicId, demanderNode, demandMsg, errMsg, contextId);
        } catch (Throwable t1) {
            U.error(log, "Failed to send supply error message [" + supplyRoutineInfo(topicId, nodeId, demandMsg) + ']', t1);
        }
        // instead of triggering failure handler.
        if (!fallbackToFullRebalance) {
            grp.shared().kernalContext().failure().process(new FailureContext(FailureType.CRITICAL_ERROR, new IgniteCheckedException("Failed to continue supplying [" + supplyRoutineInfo(topicId, nodeId, demandMsg) + ']', t)));
        }
    }
}
Also used : IgniteCheckedException(org.apache.ignite.IgniteCheckedException) FailureContext(org.apache.ignite.failure.FailureContext) GridDhtLocalPartition(org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtLocalPartition) IgniteSpiException(org.apache.ignite.spi.IgniteSpiException) UUID(java.util.UUID) T3(org.apache.ignite.internal.util.typedef.T3) HashSet(java.util.HashSet) ClusterNode(org.apache.ignite.cluster.ClusterNode) CacheDataRow(org.apache.ignite.internal.processors.cache.persistence.CacheDataRow) GridCacheEntryInfo(org.apache.ignite.internal.processors.cache.GridCacheEntryInfo) AffinityTopologyVersion(org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion) IgniteRebalanceIterator(org.apache.ignite.internal.processors.cache.IgniteRebalanceIterator)

Aggregations

CacheDataRow (org.apache.ignite.internal.processors.cache.persistence.CacheDataRow)78 IgniteCheckedException (org.apache.ignite.IgniteCheckedException)35 KeyCacheObject (org.apache.ignite.internal.processors.cache.KeyCacheObject)20 GridCacheVersion (org.apache.ignite.internal.processors.cache.version.GridCacheVersion)16 ArrayList (java.util.ArrayList)14 AtomicInteger (java.util.concurrent.atomic.AtomicInteger)14 IgniteException (org.apache.ignite.IgniteException)14 Nullable (org.jetbrains.annotations.Nullable)12 AffinityTopologyVersion (org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion)11 CacheObject (org.apache.ignite.internal.processors.cache.CacheObject)11 GridDhtLocalPartition (org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtLocalPartition)11 HashMap (java.util.HashMap)10 IgniteEx (org.apache.ignite.internal.IgniteEx)10 GridCacheContext (org.apache.ignite.internal.processors.cache.GridCacheContext)10 GridCacheEntryEx (org.apache.ignite.internal.processors.cache.GridCacheEntryEx)10 HashSet (java.util.HashSet)9 GridCacheEntryRemovedException (org.apache.ignite.internal.processors.cache.GridCacheEntryRemovedException)9 EntryGetResult (org.apache.ignite.internal.processors.cache.EntryGetResult)8 GridCursor (org.apache.ignite.internal.util.lang.GridCursor)8 NodeStoppingException (org.apache.ignite.internal.NodeStoppingException)7