Search in sources :

Example 41 with IgniteCheckedException

use of org.apache.ignite.IgniteCheckedException in project ignite by apache.

the class GridDhtPartitionSupplier method clearContext.

/**
     * Clear context.
     *
     * @param sc Supply context.
     * @param log Logger.
     */
private static void clearContext(final SupplyContext sc, final IgniteLogger log) {
    if (sc != null) {
        final Iterator it = sc.entryIt;
        if (it != null && it instanceof GridCloseableIterator && !((GridCloseableIterator) it).isClosed()) {
            try {
                ((GridCloseableIterator) it).close();
            } catch (IgniteCheckedException e) {
                U.error(log, "Iterator close failed.", e);
            }
        }
        final GridDhtLocalPartition loc = sc.loc;
        if (loc != null) {
            assert loc.reservations() > 0;
            loc.release();
        }
    }
}
Also used : IgniteCheckedException(org.apache.ignite.IgniteCheckedException) GridCloseableIterator(org.apache.ignite.internal.util.lang.GridCloseableIterator) Iterator(java.util.Iterator) GridCloseableIterator(org.apache.ignite.internal.util.lang.GridCloseableIterator) IgniteRebalanceIterator(org.apache.ignite.internal.processors.cache.IgniteRebalanceIterator) GridDhtLocalPartition(org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtLocalPartition)

Example 42 with IgniteCheckedException

use of org.apache.ignite.IgniteCheckedException in project ignite by apache.

the class GridDhtPartitionsExchangeFuture method validateCache.

/** {@inheritDoc} */
@Nullable
@Override
public Throwable validateCache(GridCacheContext cctx, boolean recovery, boolean read, @Nullable Object key, @Nullable Collection<?> keys) {
    assert isDone() : this;
    Throwable err = error();
    if (err != null)
        return err;
    if (!cctx.shared().kernalContext().state().active())
        return new CacheInvalidStateException("Failed to perform cache operation (cluster is not activated): " + cctx.name());
    PartitionLossPolicy partLossPlc = cctx.config().getPartitionLossPolicy();
    if (cctx.needsRecovery() && !recovery) {
        if (!read && (partLossPlc == READ_ONLY_SAFE || partLossPlc == READ_ONLY_ALL))
            return new IgniteCheckedException("Failed to write to cache (cache is moved to a read-only state): " + cctx.name());
    }
    if (cctx.needsRecovery() || cctx.config().getTopologyValidator() != null) {
        CacheValidation validation = cacheValidRes.get(cctx.cacheId());
        if (validation == null)
            return null;
        if (!validation.valid && !read)
            return new IgniteCheckedException("Failed to perform cache operation " + "(cache topology is not valid): " + cctx.name());
        if (recovery || !cctx.needsRecovery())
            return null;
        if (key != null) {
            int p = cctx.affinity().partition(key);
            CacheInvalidStateException ex = validatePartitionOperation(cctx.name(), read, key, p, validation.lostParts, partLossPlc);
            if (ex != null)
                return ex;
        }
        if (keys != null) {
            for (Object k : keys) {
                int p = cctx.affinity().partition(k);
                CacheInvalidStateException ex = validatePartitionOperation(cctx.name(), read, k, p, validation.lostParts, partLossPlc);
                if (ex != null)
                    return ex;
            }
        }
    }
    return null;
}
Also used : IgniteCheckedException(org.apache.ignite.IgniteCheckedException) PartitionLossPolicy(org.apache.ignite.cache.PartitionLossPolicy) CacheInvalidStateException(org.apache.ignite.internal.processors.cache.CacheInvalidStateException) Nullable(org.jetbrains.annotations.Nullable)

Example 43 with IgniteCheckedException

use of org.apache.ignite.IgniteCheckedException in project ignite by apache.

the class GridCachePartitionExchangeManager method sendAllPartitions.

/**
 * @param nodes Nodes.
 * @param msgTopVer Topology version. Will be added to full message.
 */
private void sendAllPartitions(Collection<ClusterNode> nodes, AffinityTopologyVersion msgTopVer) {
    GridDhtPartitionsFullMessage m = createPartitionsFullMessage(true, false, null, null, null, null);
    m.topologyVersion(msgTopVer);
    if (log.isDebugEnabled())
        log.debug("Sending all partitions [nodeIds=" + U.nodeIds(nodes) + ", msg=" + m + ']');
    for (ClusterNode node : nodes) {
        try {
            assert !node.equals(cctx.localNode());
            cctx.io().sendNoRetry(node, m, SYSTEM_POOL);
        } catch (ClusterTopologyCheckedException ignore) {
            if (log.isDebugEnabled())
                log.debug("Failed to send partition update to node because it left grid (will ignore) [node=" + node.id() + ", msg=" + m + ']');
        } catch (IgniteCheckedException e) {
            U.warn(log, "Failed to send partitions full message [node=" + node + ", err=" + e + ']');
        }
    }
}
Also used : ClusterNode(org.apache.ignite.cluster.ClusterNode) GridDhtPartitionsFullMessage(org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPartitionsFullMessage) IgniteCheckedException(org.apache.ignite.IgniteCheckedException) ClusterTopologyCheckedException(org.apache.ignite.internal.cluster.ClusterTopologyCheckedException)

Example 44 with IgniteCheckedException

use of org.apache.ignite.IgniteCheckedException in project ignite by apache.

the class GridCacheProcessor method validate.

/**
 * @param c Ignite configuration.
 * @param cc Configuration to validate.
 * @param cacheType Cache type.
 * @param cfgStore Cache store.
 * @throws IgniteCheckedException If failed.
 */
private void validate(IgniteConfiguration c, CacheConfiguration cc, CacheType cacheType, @Nullable CacheStore cfgStore) throws IgniteCheckedException {
    assertParameter(cc.getName() != null && !cc.getName().isEmpty(), "name is null or empty");
    if (cc.getCacheMode() == REPLICATED) {
        if (cc.getNearConfiguration() != null && ctx.discovery().cacheAffinityNode(ctx.discovery().localNode(), cc.getName())) {
            U.warn(log, "Near cache cannot be used with REPLICATED cache, " + "will be ignored [cacheName=" + U.maskName(cc.getName()) + ']');
            cc.setNearConfiguration(null);
        }
    }
    if (storesLocallyOnClient(c, cc))
        throw new IgniteCheckedException("DataRegion for client caches must be explicitly configured " + "on client node startup. Use DataStorageConfiguration to configure DataRegion.");
    if (cc.getCacheMode() == LOCAL && !cc.getAffinity().getClass().equals(LocalAffinityFunction.class))
        U.warn(log, "AffinityFunction configuration parameter will be ignored for local cache [cacheName=" + U.maskName(cc.getName()) + ']');
    if (cc.getAffinity().partitions() > CacheConfiguration.MAX_PARTITIONS_COUNT)
        throw new IgniteCheckedException("Cannot have more than " + CacheConfiguration.MAX_PARTITIONS_COUNT + " partitions [cacheName=" + cc.getName() + ", partitions=" + cc.getAffinity().partitions() + ']');
    if (cc.getRebalanceMode() != CacheRebalanceMode.NONE)
        assertParameter(cc.getRebalanceBatchSize() > 0, "rebalanceBatchSize > 0");
    if (cc.getCacheMode() == PARTITIONED || cc.getCacheMode() == REPLICATED) {
        if (cc.getAtomicityMode() == ATOMIC && cc.getWriteSynchronizationMode() == FULL_ASYNC)
            U.warn(log, "Cache write synchronization mode is set to FULL_ASYNC. All single-key 'put' and " + "'remove' operations will return 'null', all 'putx' and 'removex' operations will return" + " 'true' [cacheName=" + U.maskName(cc.getName()) + ']');
    }
    DeploymentMode depMode = c.getDeploymentMode();
    if (c.isPeerClassLoadingEnabled() && (depMode == PRIVATE || depMode == ISOLATED) && !CU.isSystemCache(cc.getName()) && !(c.getMarshaller() instanceof BinaryMarshaller))
        throw new IgniteCheckedException("Cache can be started in PRIVATE or ISOLATED deployment mode only when" + " BinaryMarshaller is used [depMode=" + ctx.config().getDeploymentMode() + ", marshaller=" + c.getMarshaller().getClass().getName() + ']');
    if (cc.getAffinity().partitions() > CacheConfiguration.MAX_PARTITIONS_COUNT)
        throw new IgniteCheckedException("Affinity function must return at most " + CacheConfiguration.MAX_PARTITIONS_COUNT + " partitions [actual=" + cc.getAffinity().partitions() + ", affFunction=" + cc.getAffinity() + ", cacheName=" + cc.getName() + ']');
    if (cc.isWriteBehindEnabled()) {
        if (cfgStore == null)
            throw new IgniteCheckedException("Cannot enable write-behind (writer or store is not provided) " + "for cache: " + U.maskName(cc.getName()));
        assertParameter(cc.getWriteBehindBatchSize() > 0, "writeBehindBatchSize > 0");
        assertParameter(cc.getWriteBehindFlushSize() >= 0, "writeBehindFlushSize >= 0");
        assertParameter(cc.getWriteBehindFlushFrequency() >= 0, "writeBehindFlushFrequency >= 0");
        assertParameter(cc.getWriteBehindFlushThreadCount() > 0, "writeBehindFlushThreadCount > 0");
        if (cc.getWriteBehindFlushSize() == 0 && cc.getWriteBehindFlushFrequency() == 0)
            throw new IgniteCheckedException("Cannot set both 'writeBehindFlushFrequency' and " + "'writeBehindFlushSize' parameters to 0 for cache: " + U.maskName(cc.getName()));
    }
    if (cc.isReadThrough() && cfgStore == null)
        throw new IgniteCheckedException("Cannot enable read-through (loader or store is not provided) " + "for cache: " + U.maskName(cc.getName()));
    if (cc.isWriteThrough() && cfgStore == null)
        throw new IgniteCheckedException("Cannot enable write-through (writer or store is not provided) " + "for cache: " + U.maskName(cc.getName()));
    long delay = cc.getRebalanceDelay();
    if (delay != 0) {
        if (cc.getCacheMode() != PARTITIONED)
            U.warn(log, "Rebalance delay is supported only for partitioned caches (will ignore): " + (cc.getName()), "Will ignore rebalance delay for cache: " + U.maskName(cc.getName()));
        else if (cc.getRebalanceMode() == SYNC) {
            if (delay < 0) {
                U.warn(log, "Ignoring SYNC rebalance mode with manual rebalance start (node will not wait for " + "rebalancing to be finished): " + U.maskName(cc.getName()), "Node will not wait for rebalance in SYNC mode: " + U.maskName(cc.getName()));
            } else {
                U.warn(log, "Using SYNC rebalance mode with rebalance delay (node will wait until rebalancing is " + "initiated for " + delay + "ms) for cache: " + U.maskName(cc.getName()), "Node will wait until rebalancing is initiated for " + delay + "ms for cache: " + U.maskName(cc.getName()));
            }
        }
    }
    ctx.igfsHelper().validateCacheConfiguration(cc);
    if (cc.getAtomicityMode() == ATOMIC)
        assertParameter(cc.getTransactionManagerLookupClassName() == null, "transaction manager can not be used with ATOMIC cache");
    if ((cc.getEvictionPolicyFactory() != null || cc.getEvictionPolicy() != null) && !cc.isOnheapCacheEnabled())
        throw new IgniteCheckedException("Onheap cache must be enabled if eviction policy is configured [cacheName=" + U.maskName(cc.getName()) + "]");
    if (cacheType != CacheType.DATA_STRUCTURES && DataStructuresProcessor.isDataStructureCache(cc.getName()))
        throw new IgniteCheckedException("Using cache names reserved for datastructures is not allowed for " + "other cache types [cacheName=" + cc.getName() + ", cacheType=" + cacheType + "]");
    if (cacheType != CacheType.DATA_STRUCTURES && DataStructuresProcessor.isReservedGroup(cc.getGroupName()))
        throw new IgniteCheckedException("Using cache group names reserved for datastructures is not allowed for " + "other cache types [cacheName=" + cc.getName() + ", groupName=" + cc.getGroupName() + ", cacheType=" + cacheType + "]");
}
Also used : IgniteCheckedException(org.apache.ignite.IgniteCheckedException) GridBinaryMarshaller(org.apache.ignite.internal.binary.GridBinaryMarshaller) BinaryMarshaller(org.apache.ignite.internal.binary.BinaryMarshaller) DeploymentMode(org.apache.ignite.configuration.DeploymentMode)

Example 45 with IgniteCheckedException

use of org.apache.ignite.IgniteCheckedException in project ignite by apache.

the class GridCacheAdapter method getAllAsync0.

/**
 * @param keys Keys.
 * @param readerArgs Near cache reader will be added if not null.
 * @param readThrough Read-through flag.
 * @param checkTx Check local transaction flag.
 * @param subjId Subject ID.
 * @param taskName Task name/
 * @param deserializeBinary Deserialize binary flag.
 * @param expiry Expiry policy.
 * @param skipVals Skip values flag.
 * @param keepCacheObjects Keep cache objects.
 * @param needVer If {@code true} returns values as tuples containing value and version.
 * @return Future.
 */
protected final <K1, V1> IgniteInternalFuture<Map<K1, V1>> getAllAsync0(@Nullable final Collection<KeyCacheObject> keys, @Nullable final ReaderArguments readerArgs, final boolean readThrough, boolean checkTx, @Nullable final UUID subjId, final String taskName, final boolean deserializeBinary, @Nullable final IgniteCacheExpiryPolicy expiry, final boolean skipVals, final boolean keepCacheObjects, final boolean recovery, final boolean needVer) {
    if (F.isEmpty(keys))
        return new GridFinishedFuture<>(Collections.<K1, V1>emptyMap());
    GridNearTxLocal tx = null;
    if (checkTx) {
        try {
            checkJta();
        } catch (IgniteCheckedException e) {
            return new GridFinishedFuture<>(e);
        }
        tx = ctx.tm().threadLocalTx(ctx);
    }
    if (tx == null || tx.implicit()) {
        Map<KeyCacheObject, EntryGetResult> misses = null;
        Set<GridCacheEntryEx> newLocalEntries = null;
        final AffinityTopologyVersion topVer = tx == null ? ctx.affinity().affinityTopologyVersion() : tx.topologyVersion();
        try {
            int keysSize = keys.size();
            GridDhtTopologyFuture topFut = ctx.shared().exchange().lastFinishedFuture();
            Throwable ex = topFut != null ? topFut.validateCache(ctx, recovery, /*read*/
            true, null, keys) : null;
            if (ex != null)
                return new GridFinishedFuture<>(ex);
            final Map<K1, V1> map = keysSize == 1 ? (Map<K1, V1>) new IgniteBiTuple<>() : U.<K1, V1>newHashMap(keysSize);
            final boolean storeEnabled = !skipVals && readThrough && ctx.readThrough();
            boolean readNoEntry = ctx.readNoEntry(expiry, readerArgs != null);
            for (KeyCacheObject key : keys) {
                while (true) {
                    try {
                        EntryGetResult res = null;
                        boolean evt = !skipVals;
                        boolean updateMetrics = !skipVals;
                        GridCacheEntryEx entry = null;
                        boolean skipEntry = readNoEntry;
                        if (readNoEntry) {
                            CacheDataRow row = ctx.offheap().read(ctx, key);
                            if (row != null) {
                                long expireTime = row.expireTime();
                                if (expireTime != 0) {
                                    if (expireTime > U.currentTimeMillis()) {
                                        res = new EntryGetWithTtlResult(row.value(), row.version(), false, expireTime, 0);
                                    } else
                                        skipEntry = false;
                                } else
                                    res = new EntryGetResult(row.value(), row.version(), false);
                            }
                            if (res != null) {
                                if (evt) {
                                    ctx.events().readEvent(key, null, row.value(), subjId, taskName, !deserializeBinary);
                                }
                                if (updateMetrics && ctx.statisticsEnabled())
                                    ctx.cache().metrics0().onRead(true);
                            } else if (storeEnabled)
                                skipEntry = false;
                        }
                        if (!skipEntry) {
                            boolean isNewLocalEntry = this.map.getEntry(ctx, key) == null;
                            entry = entryEx(key);
                            if (entry == null) {
                                if (!skipVals && ctx.statisticsEnabled())
                                    ctx.cache().metrics0().onRead(false);
                                break;
                            }
                            if (isNewLocalEntry) {
                                if (newLocalEntries == null)
                                    newLocalEntries = new HashSet<>();
                                newLocalEntries.add(entry);
                            }
                            if (storeEnabled) {
                                res = entry.innerGetAndReserveForLoad(updateMetrics, evt, subjId, taskName, expiry, !deserializeBinary, readerArgs);
                                assert res != null;
                                if (res.value() == null) {
                                    if (misses == null)
                                        misses = new HashMap<>();
                                    misses.put(key, res);
                                    res = null;
                                }
                            } else {
                                res = entry.innerGetVersioned(null, null, updateMetrics, evt, subjId, null, taskName, expiry, !deserializeBinary, readerArgs);
                                if (res == null)
                                    ctx.evicts().touch(entry, topVer);
                            }
                        }
                        if (res != null) {
                            ctx.addResult(map, key, res, skipVals, keepCacheObjects, deserializeBinary, true, needVer);
                            if (entry != null && (tx == null || (!tx.implicit() && tx.isolation() == READ_COMMITTED)))
                                ctx.evicts().touch(entry, topVer);
                            if (keysSize == 1)
                                // Safe to return because no locks are required in READ_COMMITTED mode.
                                return new GridFinishedFuture<>(map);
                        }
                        break;
                    } catch (GridCacheEntryRemovedException ignored) {
                        if (log.isDebugEnabled())
                            log.debug("Got removed entry in getAllAsync(..) method (will retry): " + key);
                    }
                }
            }
            if (storeEnabled && misses != null) {
                final Map<KeyCacheObject, EntryGetResult> loadKeys = misses;
                final IgniteTxLocalAdapter tx0 = tx;
                final Collection<KeyCacheObject> loaded = new HashSet<>();
                return new GridEmbeddedFuture(ctx.closures().callLocalSafe(ctx.projectSafe(new GPC<Map<K1, V1>>() {

                    @Override
                    public Map<K1, V1> call() throws Exception {
                        ctx.store().loadAll(null, /*tx*/
                        loadKeys.keySet(), new CI2<KeyCacheObject, Object>() {

                            @Override
                            public void apply(KeyCacheObject key, Object val) {
                                EntryGetResult res = loadKeys.get(key);
                                if (res == null || val == null)
                                    return;
                                loaded.add(key);
                                CacheObject cacheVal = ctx.toCacheObject(val);
                                while (true) {
                                    GridCacheEntryEx entry = null;
                                    try {
                                        ctx.shared().database().ensureFreeSpace(ctx.dataRegion());
                                    } catch (IgniteCheckedException e) {
                                        // Wrap errors (will be unwrapped).
                                        throw new GridClosureException(e);
                                    }
                                    ctx.shared().database().checkpointReadLock();
                                    try {
                                        entry = entryEx(key);
                                        entry.unswap();
                                        EntryGetResult verVal = entry.versionedValue(cacheVal, res.version(), null, expiry, readerArgs);
                                        if (log.isDebugEnabled())
                                            log.debug("Set value loaded from store into entry [" + "oldVer=" + res.version() + ", newVer=" + verVal.version() + ", " + "entry=" + entry + ']');
                                        // Don't put key-value pair into result map if value is null.
                                        if (verVal.value() != null) {
                                            ctx.addResult(map, key, verVal, skipVals, keepCacheObjects, deserializeBinary, true, needVer);
                                        }
                                        if (tx0 == null || (!tx0.implicit() && tx0.isolation() == READ_COMMITTED))
                                            ctx.evicts().touch(entry, topVer);
                                        break;
                                    } catch (GridCacheEntryRemovedException ignore) {
                                        if (log.isDebugEnabled())
                                            log.debug("Got removed entry during getAllAsync (will retry): " + entry);
                                    } catch (IgniteCheckedException e) {
                                        // Wrap errors (will be unwrapped).
                                        throw new GridClosureException(e);
                                    } finally {
                                        ctx.shared().database().checkpointReadUnlock();
                                    }
                                }
                            }
                        });
                        clearReservationsIfNeeded(topVer, loadKeys, loaded, tx0);
                        return map;
                    }
                }), true), new C2<Map<K, V>, Exception, IgniteInternalFuture<Map<K, V>>>() {

                    @Override
                    public IgniteInternalFuture<Map<K, V>> apply(Map<K, V> map, Exception e) {
                        if (e != null) {
                            clearReservationsIfNeeded(topVer, loadKeys, loaded, tx0);
                            return new GridFinishedFuture<>(e);
                        }
                        if (tx0 == null || (!tx0.implicit() && tx0.isolation() == READ_COMMITTED)) {
                            Collection<KeyCacheObject> notFound = new HashSet<>(loadKeys.keySet());
                            notFound.removeAll(loaded);
                            // Touch entries that were not found in store.
                            for (KeyCacheObject key : notFound) {
                                GridCacheEntryEx entry = peekEx(key);
                                if (entry != null)
                                    ctx.evicts().touch(entry, topVer);
                            }
                        }
                        // There were no misses.
                        return new GridFinishedFuture<>(Collections.<K, V>emptyMap());
                    }
                }, new C2<Map<K1, V1>, Exception, Map<K1, V1>>() {

                    @Override
                    public Map<K1, V1> apply(Map<K1, V1> loaded, Exception e) {
                        if (e == null)
                            map.putAll(loaded);
                        return map;
                    }
                });
            } else
                // Misses can be non-zero only if store is enabled.
                assert misses == null;
            return new GridFinishedFuture<>(map);
        } catch (RuntimeException | AssertionError e) {
            if (misses != null) {
                for (KeyCacheObject key0 : misses.keySet()) ctx.evicts().touch(peekEx(key0), topVer);
            }
            if (newLocalEntries != null) {
                for (GridCacheEntryEx entry : newLocalEntries) removeEntry(entry);
            }
            return new GridFinishedFuture<>(e);
        } catch (IgniteCheckedException e) {
            return new GridFinishedFuture<>(e);
        }
    } else {
        return asyncOp(tx, new AsyncOp<Map<K1, V1>>(keys) {

            @Override
            public IgniteInternalFuture<Map<K1, V1>> op(GridNearTxLocal tx, AffinityTopologyVersion readyTopVer) {
                return tx.getAllAsync(ctx, readyTopVer, keys, deserializeBinary, skipVals, false, !readThrough, recovery, needVer);
            }
        }, ctx.operationContextPerCall(), /*retry*/
        false);
    }
}
Also used : IgniteBiTuple(org.apache.ignite.lang.IgniteBiTuple) HashMap(java.util.HashMap) IgniteInternalFuture(org.apache.ignite.internal.IgniteInternalFuture) CI2(org.apache.ignite.internal.util.typedef.CI2) GridEmbeddedFuture(org.apache.ignite.internal.util.future.GridEmbeddedFuture) GridFinishedFuture(org.apache.ignite.internal.util.future.GridFinishedFuture) GridDhtTopologyFuture(org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtTopologyFuture) IgniteCheckedException(org.apache.ignite.IgniteCheckedException) IgniteTxLocalAdapter(org.apache.ignite.internal.processors.cache.transactions.IgniteTxLocalAdapter) HashSet(java.util.HashSet) CacheDataRow(org.apache.ignite.internal.processors.cache.persistence.CacheDataRow) GridClosureException(org.apache.ignite.internal.util.lang.GridClosureException) AffinityTopologyVersion(org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion) GridNearTxLocal(org.apache.ignite.internal.processors.cache.distributed.near.GridNearTxLocal) IgniteTxRollbackCheckedException(org.apache.ignite.internal.transactions.IgniteTxRollbackCheckedException) InvalidObjectException(java.io.InvalidObjectException) IgniteCheckedException(org.apache.ignite.IgniteCheckedException) EntryProcessorException(javax.cache.processor.EntryProcessorException) IgniteInterruptedCheckedException(org.apache.ignite.internal.IgniteInterruptedCheckedException) ClusterTopologyServerNotFoundException(org.apache.ignite.internal.cluster.ClusterTopologyServerNotFoundException) IOException(java.io.IOException) ObjectStreamException(java.io.ObjectStreamException) IgniteException(org.apache.ignite.IgniteException) NodeStoppingException(org.apache.ignite.internal.NodeStoppingException) ClusterTopologyException(org.apache.ignite.cluster.ClusterTopologyException) NoSuchElementException(java.util.NoSuchElementException) IgniteTxHeuristicCheckedException(org.apache.ignite.internal.transactions.IgniteTxHeuristicCheckedException) ClusterTopologyCheckedException(org.apache.ignite.internal.cluster.ClusterTopologyCheckedException) GridClosureException(org.apache.ignite.internal.util.lang.GridClosureException) GridDhtInvalidPartitionException(org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtInvalidPartitionException) Collection(java.util.Collection) Map(java.util.Map) HashMap(java.util.HashMap)

Aggregations

IgniteCheckedException (org.apache.ignite.IgniteCheckedException)1568 IgniteException (org.apache.ignite.IgniteException)388 ArrayList (java.util.ArrayList)237 IOException (java.io.IOException)226 ClusterNode (org.apache.ignite.cluster.ClusterNode)215 Map (java.util.Map)181 UUID (java.util.UUID)163 HashMap (java.util.HashMap)160 ClusterTopologyCheckedException (org.apache.ignite.internal.cluster.ClusterTopologyCheckedException)150 Test (org.junit.Test)143 List (java.util.List)139 IgniteInternalFuture (org.apache.ignite.internal.IgniteInternalFuture)138 Nullable (org.jetbrains.annotations.Nullable)134 AffinityTopologyVersion (org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion)118 KeyCacheObject (org.apache.ignite.internal.processors.cache.KeyCacheObject)109 GridCommonAbstractTest (org.apache.ignite.testframework.junits.common.GridCommonAbstractTest)105 IgniteInterruptedCheckedException (org.apache.ignite.internal.IgniteInterruptedCheckedException)104 Collection (java.util.Collection)97 HashSet (java.util.HashSet)97 Ignite (org.apache.ignite.Ignite)96