use of org.apache.ignite.internal.processors.cache.GridCacheAdapter in project ignite by apache.
the class GridCacheMultithreadedFailoverAbstractTest method compareCaches.
/**
* Compare caches.
*
* @param expVals Expected values.
* @return {@code True} if check passed successfully.
* @throws Exception If failed.
*/
@SuppressWarnings({ "TooBroadScope", "ConstantIfStatement" })
private boolean compareCaches(Map<Integer, Integer> expVals) throws Exception {
List<IgniteCache<Integer, Integer>> caches = new ArrayList<>(dataNodes());
List<GridDhtCacheAdapter<Integer, Integer>> dhtCaches = null;
for (int i = 0; i < dataNodes(); i++) {
IgniteCache<Integer, Integer> cache = G.ignite(nodeName(i)).cache(CACHE_NAME);
assert cache != null;
caches.add(cache);
GridCacheAdapter<Integer, Integer> cache0 = (GridCacheAdapter<Integer, Integer>) ((IgniteKernal) cache.unwrap(Ignite.class)).<Integer, Integer>getCache(CACHE_NAME);
if (cache0.isNear()) {
if (dhtCaches == null)
dhtCaches = new ArrayList<>(dataNodes());
dhtCaches.add(((GridNearCacheAdapter<Integer, Integer>) cache0).dht());
}
}
// Compare key sets on each cache.
Collection<Integer> cacheKeys = new HashSet<>();
Collection<Integer> dhtCacheKeys = new HashSet<>();
for (int i = 0; i < dataNodes(); i++) {
for (Cache.Entry<Integer, Integer> entry : caches.get(i)) cacheKeys.add(entry.getKey());
if (dhtCaches != null)
dhtCacheKeys.addAll(dhtCaches.get(i).keySet());
}
boolean failed = false;
if (!F.eq(expVals.keySet(), cacheKeys)) {
Collection<Integer> expOnly = new HashSet<>();
Collection<Integer> cacheOnly = new HashSet<>();
expOnly.addAll(expVals.keySet());
expOnly.removeAll(cacheKeys);
cacheOnly.addAll(cacheKeys);
cacheOnly.removeAll(expVals.keySet());
if (!expOnly.isEmpty())
log.error("Cache does not contain expected keys: " + expOnly);
if (!cacheOnly.isEmpty())
log.error("Cache does contain unexpected keys: " + cacheOnly);
failed = true;
}
if (dhtCaches != null && !F.eq(expVals.keySet(), dhtCacheKeys)) {
Collection<Integer> expOnly = new HashSet<>();
Collection<Integer> cacheOnly = new HashSet<>();
expOnly.addAll(expVals.keySet());
expOnly.removeAll(dhtCacheKeys);
cacheOnly.addAll(dhtCacheKeys);
cacheOnly.removeAll(expVals.keySet());
if (!expOnly.isEmpty())
log.error("DHT cache does not contain expected keys: " + expOnly);
if (!cacheOnly.isEmpty())
log.error("DHT cache does contain unexpected keys: " + cacheOnly);
failed = true;
}
// Compare values.
Collection<Integer> failedKeys = new HashSet<>();
for (Map.Entry<Integer, Integer> entry : expVals.entrySet()) {
for (int i = 0; i < dataNodes(); i++) {
if (!F.eq(caches.get(i).get(entry.getKey()), entry.getValue()))
failedKeys.add(entry.getKey());
}
}
if (!failedKeys.isEmpty()) {
log.error("Cache content is incorrect for " + failedKeys.size() + " keys:");
for (Integer key : failedKeys) {
for (int i = 0; i < dataNodes(); i++) {
IgniteCache<Integer, Integer> cache = caches.get(i);
UUID nodeId = G.ignite(nodeName(i)).cluster().localNode().id();
if (!F.eq(cache.get(key), expVals.get(key)))
log.error("key=" + key + ", expVal=" + expVals.get(key) + ", nodeId=" + nodeId);
}
}
failed = true;
}
return !failed;
}
use of org.apache.ignite.internal.processors.cache.GridCacheAdapter in project ignite by apache.
the class IgfsFragmentizerSelfTest method testDeleteFragmentizing.
/**
* @throws Exception If failed.
*/
public void testDeleteFragmentizing() throws Exception {
IgfsImpl igfs = (IgfsImpl) grid(0).fileSystem("igfs");
for (int i = 0; i < 30; i++) {
IgfsPath path = new IgfsPath("/someFile" + i);
try (IgfsOutputStream out = igfs.create(path, true)) {
for (int j = 0; j < 5 * IGFS_GROUP_SIZE; j++) out.write(new byte[IGFS_BLOCK_SIZE]);
}
U.sleep(200);
}
igfs.clear();
GridTestUtils.retryAssert(log, 50, 100, new CA() {
@Override
public void apply() {
for (int i = 0; i < NODE_CNT; i++) {
IgniteEx g = grid(i);
GridCacheAdapter<Object, Object> cache = ((IgniteKernal) g).internalCache(g.igfsx("igfs").configuration().getDataCacheConfiguration().getName());
assertTrue("Data cache is not empty [keys=" + cache.keySet() + ", node=" + g.localNode().id() + ']', cache.isEmpty());
}
}
});
}
use of org.apache.ignite.internal.processors.cache.GridCacheAdapter in project ignite by apache.
the class DataStreamerImpl method load0.
/**
* @param entries Entries.
* @param resFut Result future.
* @param activeKeys Active keys.
* @param remaps Remaps count.
*/
private void load0(Collection<? extends DataStreamerEntry> entries, final GridFutureAdapter<Object> resFut, @Nullable final Collection<KeyCacheObjectWrapper> activeKeys, final int remaps) {
try {
assert entries != null;
final boolean remap = remaps > 0;
if (!remap) {
// Failed data should be processed prior to new data.
acquireRemapSemaphore();
}
if (!isWarningPrinted) {
synchronized (this) {
if (!allowOverwrite() && !isWarningPrinted) {
U.warn(log, "Data streamer will not overwrite existing cache entries for better performance " + "(to change, set allowOverwrite to true)");
}
isWarningPrinted = true;
}
}
Map<ClusterNode, Collection<DataStreamerEntry>> mappings = new HashMap<>();
boolean initPda = ctx.deploy().enabled() && jobPda == null;
GridCacheAdapter cache = ctx.cache().internalCache(cacheName);
if (cache == null)
throw new IgniteCheckedException("Cache not created or already destroyed.");
GridCacheContext cctx = cache.context();
GridCacheGateway gate = null;
AffinityTopologyVersion topVer;
if (!cctx.isLocal())
topVer = ctx.cache().context().exchange().lastTopologyFuture().get();
else
topVer = ctx.cache().context().exchange().readyAffinityVersion();
List<List<ClusterNode>> assignments = cctx.affinity().assignments(topVer);
if (!allowOverwrite() && !cctx.isLocal()) {
// Cases where cctx required.
gate = cctx.gate();
gate.enter();
}
try {
for (DataStreamerEntry entry : entries) {
List<ClusterNode> nodes;
try {
KeyCacheObject key = entry.getKey();
assert key != null;
if (initPda) {
if (cacheObjCtx.addDeploymentInfo())
jobPda = new DataStreamerPda(key.value(cacheObjCtx, false), entry.getValue() != null ? entry.getValue().value(cacheObjCtx, false) : null, rcvr);
else if (rcvr != null)
jobPda = new DataStreamerPda(rcvr);
initPda = false;
}
if (key.partition() == -1)
key.partition(cctx.affinity().partition(key, false));
nodes = nodes(key, topVer, cctx);
} catch (IgniteCheckedException e) {
resFut.onDone(e);
return;
}
if (F.isEmpty(nodes)) {
resFut.onDone(new ClusterTopologyException("Failed to map key to node " + "(no nodes with cache found in topology) [infos=" + entries.size() + ", cacheName=" + cacheName + ']'));
return;
}
for (ClusterNode node : nodes) {
Collection<DataStreamerEntry> col = mappings.get(node);
if (col == null)
mappings.put(node, col = new ArrayList<>());
col.add(entry);
}
}
for (final Map.Entry<ClusterNode, Collection<DataStreamerEntry>> e : mappings.entrySet()) {
final UUID nodeId = e.getKey().id();
Buffer buf = bufMappings.get(nodeId);
if (buf == null) {
Buffer old = bufMappings.putIfAbsent(nodeId, buf = new Buffer(e.getKey()));
if (old != null)
buf = old;
}
final Collection<DataStreamerEntry> entriesForNode = e.getValue();
IgniteInClosure<IgniteInternalFuture<?>> lsnr = new IgniteInClosure<IgniteInternalFuture<?>>() {
@Override
public void apply(IgniteInternalFuture<?> t) {
try {
t.get();
if (activeKeys != null) {
for (DataStreamerEntry e : entriesForNode) activeKeys.remove(new KeyCacheObjectWrapper(e.getKey()));
if (activeKeys.isEmpty())
resFut.onDone();
} else {
assert entriesForNode.size() == 1;
// That has been a single key,
// so complete result future right away.
resFut.onDone();
}
} catch (IgniteClientDisconnectedCheckedException e1) {
if (log.isDebugEnabled())
log.debug("Future finished with disconnect error [nodeId=" + nodeId + ", err=" + e1 + ']');
resFut.onDone(e1);
} catch (IgniteCheckedException e1) {
if (log.isDebugEnabled())
log.debug("Future finished with error [nodeId=" + nodeId + ", err=" + e1 + ']');
if (cancelled) {
resFut.onDone(new IgniteCheckedException("Data streamer has been cancelled: " + DataStreamerImpl.this, e1));
} else if (remaps + 1 > maxRemapCnt) {
resFut.onDone(new IgniteCheckedException("Failed to finish operation (too many remaps): " + remaps, e1));
} else {
try {
remapSem.acquire();
final Runnable r = new Runnable() {
@Override
public void run() {
try {
if (cancelled)
closedException();
load0(entriesForNode, resFut, activeKeys, remaps + 1);
} catch (Throwable ex) {
resFut.onDone(new IgniteCheckedException("DataStreamer remapping failed. ", ex));
} finally {
remapSem.release();
}
}
};
dataToRemap.add(r);
if (!remapOwning.get() && remapOwning.compareAndSet(false, true)) {
ctx.closure().callLocalSafe(new GPC<Boolean>() {
@Override
public Boolean call() {
boolean locked = true;
while (locked || !dataToRemap.isEmpty()) {
if (!locked && !remapOwning.compareAndSet(false, true))
return false;
try {
Runnable r = dataToRemap.poll();
if (r != null)
r.run();
} finally {
if (!dataToRemap.isEmpty())
locked = true;
else {
remapOwning.set(false);
locked = false;
}
}
}
return true;
}
}, true);
}
} catch (InterruptedException e2) {
resFut.onDone(e2);
}
}
}
}
};
GridCompoundFuture opFut = new SilentCompoundFuture();
opFut.listen(lsnr);
final List<GridFutureAdapter<?>> futs;
try {
futs = buf.update(entriesForNode, topVer, assignments, opFut, remap);
opFut.markInitialized();
} catch (IgniteInterruptedCheckedException e1) {
resFut.onDone(e1);
return;
}
if (ctx.discovery().node(nodeId) == null) {
if (bufMappings.remove(nodeId, buf)) {
final Buffer buf0 = buf;
waitAffinityAndRun(new Runnable() {
@Override
public void run() {
buf0.onNodeLeft();
if (futs != null) {
Throwable ex = new ClusterTopologyCheckedException("Failed to wait for request completion (node has left): " + nodeId);
for (int i = 0; i < futs.size(); i++) futs.get(i).onDone(ex);
}
}
}, ctx.discovery().topologyVersion(), false);
}
}
}
} finally {
if (gate != null)
gate.leave();
}
} catch (Exception ex) {
resFut.onDone(new IgniteCheckedException("DataStreamer data loading failed.", ex));
}
}
use of org.apache.ignite.internal.processors.cache.GridCacheAdapter in project ignite by apache.
the class GridCommonAbstractTest method waitForRebalancing.
/**
* @param ignite Node.
* @param top Topology version.
* @throws IgniteCheckedException If failed.
*/
protected void waitForRebalancing(IgniteEx ignite, AffinityTopologyVersion top) throws IgniteCheckedException {
if (ignite.configuration().isClientMode())
return;
boolean finished = false;
long stopTime = System.currentTimeMillis() + 60_000;
while (!finished && (System.currentTimeMillis() < stopTime)) {
finished = true;
if (top == null)
top = ignite.context().discovery().topologyVersionEx();
for (GridCacheAdapter c : ignite.context().cache().internalCaches()) {
GridDhtPartitionDemander.RebalanceFuture fut = (GridDhtPartitionDemander.RebalanceFuture) c.preloader().rebalanceFuture();
if (fut.topologyVersion() == null || fut.topologyVersion().compareTo(top) < 0) {
finished = false;
log.info("Unexpected future version, will retry [futVer=" + fut.topologyVersion() + ", expVer=" + top + ']');
U.sleep(100);
break;
} else if (!fut.get()) {
finished = false;
log.warning("Rebalancing finished with missed partitions.");
U.sleep(100);
break;
}
}
}
assertTrue(finished);
}
use of org.apache.ignite.internal.processors.cache.GridCacheAdapter in project ignite by apache.
the class GridDhtPartitionsExchangeFuture method processFullMessage.
/**
* @param checkCrd If {@code true} checks that local node is exchange coordinator.
* @param node Sender node.
* @param msg Message.
*/
private void processFullMessage(boolean checkCrd, ClusterNode node, GridDhtPartitionsFullMessage msg) {
try {
assert exchId.equals(msg.exchangeId()) : msg;
assert msg.lastVersion() != null : msg;
timeBag.finishGlobalStage("Waiting for Full message");
if (checkCrd) {
assert node != null;
synchronized (mux) {
if (crd == null) {
if (log.isInfoEnabled())
log.info("Ignore full message, all server nodes left: " + msg);
return;
}
switch(state) {
case CRD:
case BECOME_CRD:
{
if (log.isInfoEnabled())
log.info("Ignore full message, node is coordinator: " + msg);
return;
}
case DONE:
{
if (log.isInfoEnabled())
log.info("Ignore full message, future is done: " + msg);
return;
}
case SRV:
case CLIENT:
{
if (!crd.equals(node)) {
if (log.isInfoEnabled()) {
log.info("Received full message from non-coordinator [node=" + node.id() + ", nodeOrder=" + node.order() + ", crd=" + crd.id() + ", crdOrder=" + crd.order() + ']');
}
if (node.order() > crd.order())
fullMsgs.put(node, msg);
return;
} else {
if (!F.isEmpty(msg.getErrorsMap())) {
Exception e = msg.getErrorsMap().get(cctx.localNodeId());
if (e instanceof IgniteNeedReconnectException) {
onDone(e);
return;
}
}
AffinityTopologyVersion resVer = msg.resultTopologyVersion() != null ? msg.resultTopologyVersion() : initialVersion();
if (log.isInfoEnabled()) {
log.info("Received full message, will finish exchange [node=" + node.id() + ", resVer=" + resVer + ']');
}
finishState = new FinishState(crd.id(), resVer, msg);
state = ExchangeLocalState.DONE;
break;
}
}
}
}
} else
assert node == null : node;
AffinityTopologyVersion resTopVer = initialVersion();
if (exchCtx.mergeExchanges()) {
if (msg.resultTopologyVersion() != null && !initialVersion().equals(msg.resultTopologyVersion())) {
if (log.isInfoEnabled()) {
log.info("Received full message, need merge [curFut=" + initialVersion() + ", resVer=" + msg.resultTopologyVersion() + ']');
}
resTopVer = msg.resultTopologyVersion();
if (cctx.exchange().mergeExchanges(this, msg)) {
assert cctx.kernalContext().isStopping() || cctx.kernalContext().clientDisconnected();
// Node is stopping, no need to further process exchange.
return;
}
assert resTopVer.equals(exchCtx.events().topologyVersion()) : "Unexpected result version [" + "msgVer=" + resTopVer + ", locVer=" + exchCtx.events().topologyVersion() + ']';
}
exchCtx.events().processEvents(this);
if (localJoinExchange()) {
Set<Integer> noAffinityGroups = cctx.affinity().onLocalJoin(this, msg.joinedNodeAffinity(), resTopVer);
// Prevent cache usage by a user.
if (!noAffinityGroups.isEmpty()) {
List<GridCacheAdapter> closedCaches = cctx.cache().blockGateways(noAffinityGroups);
closedCaches.forEach(cache -> log.warning("Affinity for cache " + cache.context().name() + " has not received from coordinator during local join. " + " Probably cache is already stopped but not processed on local node yet." + " Cache proxy will be closed for user interactions for safety."));
}
} else {
if (exchCtx.events().hasServerLeft())
cctx.affinity().applyAffinityFromFullMessage(this, msg.idealAffinityDiff());
else
cctx.affinity().onServerJoinWithExchangeMergeProtocol(this, false);
for (CacheGroupContext grp : cctx.cache().cacheGroups()) {
if (grp.isLocal() || cacheGroupStopping(grp.groupId()))
continue;
grp.topology().beforeExchange(this, true, false);
}
}
} else if (localJoinExchange() && !exchCtx.fetchAffinityOnJoin())
cctx.affinity().onLocalJoin(this, msg.joinedNodeAffinity(), resTopVer);
else if (forceAffReassignment)
cctx.affinity().applyAffinityFromFullMessage(this, msg.idealAffinityDiff());
timeBag.finishGlobalStage("Affinity recalculation");
if (dynamicCacheStartExchange() && !F.isEmpty(exchangeGlobalExceptions)) {
assert cctx.localNode().isClient();
// TODO: https://issues.apache.org/jira/browse/IGNITE-8796
// The current exchange has been successfully completed on all server nodes,
// but has failed on that client node for some reason.
// It looks like that we need to rollback dynamically started caches on the client node,
// complete DynamicCacheStartFutures (if they are registered) with the cause of that failure
// and complete current exchange without errors.
onDone(exchangeLocE);
return;
}
updatePartitionFullMap(resTopVer, msg);
if (msg.rebalanced())
markRebalanced();
if (stateChangeExchange() && !F.isEmpty(msg.getErrorsMap()))
cctx.kernalContext().state().onStateChangeError(msg.getErrorsMap(), exchActions.stateChangeRequest());
if (firstDiscoEvt.type() == EVT_DISCOVERY_CUSTOM_EVT) {
DiscoveryCustomMessage discoveryCustomMessage = ((DiscoveryCustomEvent) firstDiscoEvt).customMessage();
if (discoveryCustomMessage instanceof SnapshotDiscoveryMessage && ((SnapshotDiscoveryMessage) discoveryCustomMessage).needAssignPartitions())
markAffinityReassign();
}
onDone(resTopVer, null);
} catch (IgniteCheckedException e) {
onDone(e);
}
}
Aggregations