use of org.apache.ignite.lang.IgniteInClosure in project ignite by apache.
the class GridPartitionedGetFuture method map.
/**
* @param keys Keys.
* @param mapped Mappings to check for duplicates.
* @param topVer Topology version on which keys should be mapped.
*/
@Override
protected void map(Collection<KeyCacheObject> keys, Map<ClusterNode, LinkedHashMap<KeyCacheObject, Boolean>> mapped, AffinityTopologyVersion topVer) {
GridDhtPartitionsExchangeFuture fut = cctx.shared().exchange().lastTopologyFuture();
// Finished DHT future is required for topology validation.
if (!fut.isDone()) {
if (fut.initialVersion().after(topVer) || (fut.exchangeActions() != null && fut.exchangeActions().hasStop()))
fut = cctx.shared().exchange().lastFinishedFuture();
else {
fut.listen(new IgniteInClosure<IgniteInternalFuture<AffinityTopologyVersion>>() {
@Override
public void apply(IgniteInternalFuture<AffinityTopologyVersion> fut) {
if (fut.error() != null)
onDone(fut.error());
else {
cctx.closures().runLocalSafe(new GridPlainRunnable() {
@Override
public void run() {
map(keys, mapped, topVer);
}
}, true);
}
}
});
return;
}
}
Collection<ClusterNode> cacheNodes = CU.affinityNodes(cctx, topVer);
validate(cacheNodes, fut);
// Future can be already done with some exception.
if (isDone())
return;
Map<ClusterNode, LinkedHashMap<KeyCacheObject, Boolean>> mappings = U.newHashMap(cacheNodes.size());
int keysSize = keys.size();
// Map for local (key,value) pairs.
Map<K, V> locVals = U.newHashMap(keysSize);
// True if we have remote nodes after key mapping complete.
boolean hasRmtNodes = false;
// Assign keys to nodes.
for (KeyCacheObject key : keys) hasRmtNodes |= map(key, topVer, mappings, mapped, locVals);
// Future can be alredy done with some exception.
if (isDone())
return;
// Add local read (key,value) in result.
if (!locVals.isEmpty())
add(new GridFinishedFuture<>(locVals));
// If we have remote nodes in mapping we should registrate future in mvcc manager.
if (hasRmtNodes)
registrateFutureInMvccManager(this);
// Create mini futures after mapping to remote nodes.
for (Map.Entry<ClusterNode, LinkedHashMap<KeyCacheObject, Boolean>> entry : mappings.entrySet()) {
// Node for request.
ClusterNode n = entry.getKey();
// Keys for request.
LinkedHashMap<KeyCacheObject, Boolean> mappedKeys = entry.getValue();
assert !mappedKeys.isEmpty();
// If this is the primary or backup node for the keys.
if (n.isLocal()) {
GridDhtFuture<Collection<GridCacheEntryInfo>> fut0 = cache().getDhtAsync(n.id(), -1, mappedKeys, false, readThrough, topVer, taskName == null ? 0 : taskName.hashCode(), expiryPlc, skipVals, recovery, txLbl, mvccSnapshot());
Collection<Integer> invalidParts = fut0.invalidPartitions();
if (!F.isEmpty(invalidParts)) {
Collection<KeyCacheObject> remapKeys = new ArrayList<>(keysSize);
for (KeyCacheObject key : keys) {
int part = cctx.affinity().partition(key);
if (key != null && invalidParts.contains(part)) {
addNodeAsInvalid(n, part, topVer);
remapKeys.add(key);
}
}
AffinityTopologyVersion updTopVer = cctx.shared().exchange().readyAffinityVersion();
// Remap recursively.
map(remapKeys, mappings, updTopVer);
}
// Add new future.
add(fut0.chain(f -> {
try {
return createResultMap(f.get());
} catch (Exception e) {
U.error(log, "Failed to get values from dht cache [fut=" + fut0 + "]", e);
onDone(e);
return Collections.emptyMap();
}
}));
} else {
MiniFuture miniFut = new MiniFuture(n, mappedKeys, topVer);
GridCacheMessage req = miniFut.createGetRequest(futId);
// Append new future.
add(miniFut);
try {
cctx.io().send(n, req, cctx.ioPolicy());
} catch (IgniteCheckedException e) {
// Fail the whole thing.
if (e instanceof ClusterTopologyCheckedException)
miniFut.onNodeLeft((ClusterTopologyCheckedException) e);
else
miniFut.onResult(e);
}
}
}
markInitialized();
}
use of org.apache.ignite.lang.IgniteInClosure in project ignite by apache.
the class DataStreamerImpl method load0.
/**
* @param entries Entries.
* @param resFut Result future.
* @param activeKeys Active keys.
* @param remaps Remaps count.
* @param remapNode Node for remap. In case update with {@code allowOverride() == false} fails on one node,
* we don't need to send update request to all affinity nodes again, if topology version does not changed.
* @param remapTopVer Topology version.
*/
private void load0(Collection<? extends DataStreamerEntry> entries, final GridFutureAdapter<Object> resFut, @Nullable final Collection<KeyCacheObjectWrapper> activeKeys, final int remaps, ClusterNode remapNode, AffinityTopologyVersion remapTopVer) {
try {
assert entries != null;
final boolean remap = remaps > 0;
if (!remap) {
// Failed data should be processed prior to new data.
acquireRemapSemaphore();
}
if (!isWarningPrinted) {
synchronized (this) {
if (!allowOverwrite() && !isWarningPrinted) {
U.warn(log, "Data streamer will not overwrite existing cache entries for better performance " + "(to change, set allowOverwrite to true)");
}
isWarningPrinted = true;
}
}
Map<ClusterNode, Collection<DataStreamerEntry>> mappings = new HashMap<>();
boolean initPda = ctx.deploy().enabled() && jobPda == null;
GridCacheAdapter cache = ctx.cache().internalCache(cacheName);
if (cache == null)
throw new IgniteCheckedException("Cache not created or already destroyed.");
GridCacheContext cctx = cache.context();
GridCacheGateway gate = null;
AffinityTopologyVersion topVer;
if (!cctx.isLocal()) {
GridDhtPartitionsExchangeFuture exchFut = ctx.cache().context().exchange().lastTopologyFuture();
if (!exchFut.isDone()) {
ExchangeActions acts = exchFut.exchangeActions();
if (acts != null && acts.cacheStopped(CU.cacheId(cacheName)))
throw new CacheStoppedException(cacheName);
}
// It is safe to block here even if the cache gate is acquired.
topVer = exchFut.get();
} else
topVer = ctx.cache().context().exchange().readyAffinityVersion();
List<List<ClusterNode>> assignments = cctx.affinity().assignments(topVer);
if (!allowOverwrite() && !cctx.isLocal()) {
// Cases where cctx required.
gate = cctx.gate();
gate.enter();
}
try {
for (DataStreamerEntry entry : entries) {
List<ClusterNode> nodes;
try {
KeyCacheObject key = entry.getKey();
assert key != null;
if (initPda) {
if (cacheObjCtx.addDeploymentInfo())
jobPda = new DataStreamerPda(key.value(cacheObjCtx, false), entry.getValue() != null ? entry.getValue().value(cacheObjCtx, false) : null, rcvr);
else if (rcvr != null)
jobPda = new DataStreamerPda(rcvr);
initPda = false;
}
if (key.partition() == -1)
key.partition(cctx.affinity().partition(key, false));
if (!allowOverwrite() && remapNode != null && F.eq(topVer, remapTopVer))
nodes = Collections.singletonList(remapNode);
else
nodes = nodes(key, topVer, cctx);
} catch (IgniteCheckedException e) {
resFut.onDone(e);
return;
}
if (F.isEmpty(nodes)) {
resFut.onDone(new ClusterTopologyException("Failed to map key to node " + "(no nodes with cache found in topology) [infos=" + entries.size() + ", cacheName=" + cacheName + ']'));
return;
}
for (ClusterNode node : nodes) {
Collection<DataStreamerEntry> col = mappings.get(node);
if (col == null)
mappings.put(node, col = new ArrayList<>());
col.add(entry);
}
}
for (final Map.Entry<ClusterNode, Collection<DataStreamerEntry>> e : mappings.entrySet()) {
final ClusterNode node = e.getKey();
final UUID nodeId = e.getKey().id();
Buffer buf = bufMappings.get(nodeId);
if (buf == null) {
Buffer old = bufMappings.putIfAbsent(nodeId, buf = new Buffer(e.getKey()));
if (old != null)
buf = old;
}
final Collection<DataStreamerEntry> entriesForNode = e.getValue();
IgniteInClosure<IgniteInternalFuture<?>> lsnr = new IgniteInClosure<IgniteInternalFuture<?>>() {
@Override
public void apply(IgniteInternalFuture<?> t) {
try {
t.get();
if (activeKeys != null) {
for (DataStreamerEntry e : entriesForNode) activeKeys.remove(new KeyCacheObjectWrapper(e.getKey()));
if (activeKeys.isEmpty())
resFut.onDone();
} else {
assert entriesForNode.size() == 1;
// That has been a single key,
// so complete result future right away.
resFut.onDone();
}
} catch (IgniteClientDisconnectedCheckedException e1) {
if (log.isDebugEnabled())
log.debug("Future finished with disconnect error [nodeId=" + nodeId + ", err=" + e1 + ']');
resFut.onDone(e1);
} catch (IgniteCheckedException e1) {
if (log.isDebugEnabled())
log.debug("Future finished with error [nodeId=" + nodeId + ", err=" + e1 + ']');
if (cancelled) {
resFut.onDone(new IgniteCheckedException("Data streamer has been cancelled: " + DataStreamerImpl.this, e1));
} else if (remaps + 1 > maxRemapCnt) {
resFut.onDone(new IgniteCheckedException("Failed to finish operation (too many remaps): " + remaps, e1));
} else if (X.hasCause(e1, IgniteClusterReadOnlyException.class)) {
resFut.onDone(new IgniteClusterReadOnlyException("Failed to finish operation. Cluster in read-only mode!", e1));
} else {
try {
remapSem.acquire();
final Runnable r = new Runnable() {
@Override
public void run() {
try {
if (cancelled)
closedException();
load0(entriesForNode, resFut, activeKeys, remaps + 1, node, topVer);
} catch (Throwable ex) {
resFut.onDone(new IgniteCheckedException("DataStreamer remapping failed. ", ex));
} finally {
remapSem.release();
}
}
};
dataToRemap.add(r);
if (!remapOwning.get() && remapOwning.compareAndSet(false, true)) {
ctx.closure().callLocalSafe(new GPC<Boolean>() {
@Override
public Boolean call() {
boolean locked = true;
while (locked || !dataToRemap.isEmpty()) {
if (!locked && !remapOwning.compareAndSet(false, true))
return false;
try {
Runnable r = dataToRemap.poll();
if (r != null)
r.run();
} finally {
if (!dataToRemap.isEmpty())
locked = true;
else {
remapOwning.set(false);
locked = false;
}
}
}
return true;
}
}, true);
}
} catch (InterruptedException e2) {
resFut.onDone(e2);
}
}
}
}
};
GridCompoundFuture opFut = new SilentCompoundFuture();
opFut.listen(lsnr);
final List<GridFutureAdapter<?>> futs;
try {
futs = buf.update(entriesForNode, topVer, assignments, opFut, remap);
opFut.markInitialized();
} catch (IgniteInterruptedCheckedException e1) {
resFut.onDone(e1);
return;
}
if (ctx.discovery().node(nodeId) == null) {
if (bufMappings.remove(nodeId, buf)) {
final Buffer buf0 = buf;
waitAffinityAndRun(new GridPlainRunnable() {
@Override
public void run() {
buf0.onNodeLeft();
if (futs != null) {
Throwable ex = new ClusterTopologyCheckedException("Failed to wait for request completion (node has left): " + nodeId);
for (int i = 0; i < futs.size(); i++) futs.get(i).onDone(ex);
}
}
}, ctx.discovery().topologyVersion(), false);
}
}
}
} finally {
if (gate != null)
gate.leave();
}
} catch (Exception ex) {
resFut.onDone(new IgniteCheckedException("DataStreamer data loading failed.", ex));
}
}
use of org.apache.ignite.lang.IgniteInClosure in project ignite by apache.
the class GridCachePartitionExchangeManager method start0.
/**
* {@inheritDoc}
*/
@Override
protected void start0() throws IgniteCheckedException {
super.start0();
exchWorker = new ExchangeWorker();
latchMgr = new ExchangeLatchManager(cctx.kernalContext());
cctx.gridEvents().addDiscoveryEventListener(discoLsnr, EVT_NODE_JOINED, EVT_NODE_LEFT, EVT_NODE_FAILED, EVT_DISCOVERY_CUSTOM_EVT);
cctx.io().addCacheHandler(0, GridDhtPartitionsSingleMessage.class, new MessageHandler<GridDhtPartitionsSingleMessage>() {
@Override
public void onMessage(final ClusterNode node, final GridDhtPartitionsSingleMessage msg) {
GridDhtPartitionExchangeId exchangeId = msg.exchangeId();
if (exchangeId != null) {
GridDhtPartitionsExchangeFuture fut = exchangeFuture(exchangeId);
boolean fastReplied = fut.fastReplyOnSingleMessage(node, msg);
if (fastReplied) {
if (log.isInfoEnabled())
log.info("Fast replied to single message " + "[exchId=" + exchangeId + ", nodeId=" + node.id() + "]");
return;
}
} else {
GridDhtPartitionsExchangeFuture cur = lastTopologyFuture();
if (!cur.isDone() && cur.changedAffinity() && !msg.restoreState()) {
cur.listen(new IgniteInClosure<IgniteInternalFuture<AffinityTopologyVersion>>() {
@Override
public void apply(IgniteInternalFuture<AffinityTopologyVersion> fut) {
if (fut.error() == null)
processSinglePartitionUpdate(node, msg);
}
});
return;
}
}
processSinglePartitionUpdate(node, msg);
}
});
cctx.io().addCacheHandler(0, GridDhtPartitionsFullMessage.class, new MessageHandler<GridDhtPartitionsFullMessage>() {
@Override
public void onMessage(ClusterNode node, GridDhtPartitionsFullMessage msg) {
if (msg.exchangeId() == null) {
GridDhtPartitionsExchangeFuture currentExchange = lastTopologyFuture();
if (currentExchange != null && currentExchange.addOrMergeDelayedFullMessage(node, msg)) {
if (log.isInfoEnabled()) {
log.info("Delay process full message without exchange id (there is exchange in progress) " + "[nodeId=" + node.id() + "]");
}
return;
}
}
processFullPartitionUpdate(node, msg);
}
});
cctx.io().addCacheHandler(0, GridDhtPartitionsSingleRequest.class, new MessageHandler<GridDhtPartitionsSingleRequest>() {
@Override
public void onMessage(ClusterNode node, GridDhtPartitionsSingleRequest msg) {
processSinglePartitionRequest(node, msg);
}
});
if (!cctx.kernalContext().clientNode()) {
for (int cnt = 0; cnt < cctx.gridConfig().getRebalanceThreadPoolSize(); cnt++) {
final int idx = cnt;
cctx.io().addOrderedCacheGroupHandler(cctx, rebalanceTopic(cnt), new CI2<UUID, GridCacheGroupIdMessage>() {
@Override
public void apply(final UUID id, final GridCacheGroupIdMessage m) {
if (!enterBusy())
return;
try {
CacheGroupContext grp = cctx.cache().cacheGroup(m.groupId());
if (grp != null) {
if (m instanceof GridDhtPartitionSupplyMessage) {
grp.preloader().handleSupplyMessage(id, (GridDhtPartitionSupplyMessage) m);
return;
} else if (m instanceof GridDhtPartitionDemandMessage) {
grp.preloader().handleDemandMessage(idx, id, (GridDhtPartitionDemandMessage) m);
return;
} else if (m instanceof GridDhtPartitionDemandLegacyMessage) {
grp.preloader().handleDemandMessage(idx, id, new GridDhtPartitionDemandMessage((GridDhtPartitionDemandLegacyMessage) m));
return;
} else
U.error(log, "Unsupported message type: " + m.getClass().getName());
}
U.warn(log, "Cache group with id=" + m.groupId() + " is stopped or absent");
} finally {
leaveBusy();
}
}
});
}
}
MetricRegistry mreg = cctx.kernalContext().metric().registry(PME_METRICS);
mreg.register(PME_DURATION, () -> currentPMEDuration(false), "Current PME duration in milliseconds.");
mreg.register(PME_OPS_BLOCKED_DURATION, () -> currentPMEDuration(true), "Current PME cache operations blocked duration in milliseconds.");
durationHistogram = mreg.findMetric(PME_DURATION_HISTOGRAM);
blockingDurationHistogram = mreg.findMetric(PME_OPS_BLOCKED_DURATION_HISTOGRAM);
MetricRegistry clusterReg = cctx.kernalContext().metric().registry(CLUSTER_METRICS);
rebalanced = clusterReg.booleanMetric(REBALANCED, "True if the cluster has fully achieved rebalanced state. Note that an inactive cluster always has" + " this metric in False regardless of the real partitions state.");
startLatch.countDown();
}
use of org.apache.ignite.lang.IgniteInClosure in project ignite by apache.
the class GridDhtPartitionDemander method forceRebalance.
/**
* @return Rebalance future.
*/
IgniteInternalFuture<Boolean> forceRebalance() {
GridTimeoutObject obj = lastTimeoutObj.getAndSet(null);
if (obj != null)
ctx.time().removeTimeoutObject(obj);
final GridDhtPartitionsExchangeFuture exchFut = lastExchangeFut;
if (exchFut != null) {
if (log.isDebugEnabled())
log.debug("Forcing rebalance event for future: " + exchFut);
final GridFutureAdapter<Boolean> fut = new GridFutureAdapter<>();
exchFut.listen(new CI1<IgniteInternalFuture<AffinityTopologyVersion>>() {
@Override
public void apply(IgniteInternalFuture<AffinityTopologyVersion> t) {
if (t.error() == null) {
IgniteInternalFuture<Boolean> fut0 = ctx.exchange().forceRebalance(exchFut.exchangeId());
fut0.listen(new IgniteInClosure<IgniteInternalFuture<Boolean>>() {
@Override
public void apply(IgniteInternalFuture<Boolean> fut1) {
try {
fut.onDone(fut1.get());
} catch (Exception e) {
fut.onDone(e);
}
}
});
} else
fut.onDone(t.error());
}
});
return fut;
} else if (log.isDebugEnabled())
log.debug("Ignoring force rebalance request (no topology event happened yet).");
return new GridFinishedFuture<>(true);
}
use of org.apache.ignite.lang.IgniteInClosure in project ignite by apache.
the class CacheMvccSqlUpdateCountersTest method testUpdateCountersMultithreaded.
/**
* @throws Exception If failed.
*/
@Test
public void testUpdateCountersMultithreaded() throws Exception {
final int writers = 4;
final int readers = 0;
int parts = 8;
int keys = 20;
final Map<Integer, AtomicLong> tracker = new ConcurrentHashMap<>();
for (int i = 0; i < keys; i++) tracker.put(i, new AtomicLong(1));
final IgniteInClosure<IgniteCache<Object, Object>> init = new IgniteInClosure<IgniteCache<Object, Object>>() {
@Override
public void apply(IgniteCache<Object, Object> cache) {
final IgniteTransactions txs = cache.unwrap(Ignite.class).transactions();
try (Transaction tx = txs.txStart(PESSIMISTIC, REPEATABLE_READ)) {
SqlFieldsQuery qry = new SqlFieldsQuery("INSERT INTO MvccTestAccount(_key, val, updateCnt) VALUES " + "(?, 0, 1)");
for (int i = 0; i < keys; i++) {
try (FieldsQueryCursor<List<?>> cur = cache.query(qry.setArgs(i))) {
assertEquals(1L, cur.iterator().next().get(0));
}
tx.commit();
}
}
}
};
GridInClosure3<Integer, List<TestCache>, AtomicBoolean> writer = new GridInClosure3<Integer, List<TestCache>, AtomicBoolean>() {
@Override
public void apply(Integer idx, List<TestCache> caches, AtomicBoolean stop) {
ThreadLocalRandom rnd = ThreadLocalRandom.current();
Map<Integer, AtomicLong> acc = new HashMap<>();
int v = 0;
while (!stop.get()) {
int cnt = rnd.nextInt(keys / 3);
if (cnt == 0)
cnt = 2;
// Generate key set to be changed in tx.
while (acc.size() < cnt) acc.put(rnd.nextInt(cnt), new AtomicLong());
TestCache<Integer, Integer> cache = randomCache(caches, rnd);
boolean success = true;
try {
IgniteTransactions txs = cache.cache.unwrap(Ignite.class).transactions();
try (Transaction tx = txs.txStart(PESSIMISTIC, REPEATABLE_READ)) {
Map<Integer, MvccTestAccount> allVals = readAllByMode(cache.cache, tracker.keySet(), SQL, ACCOUNT_CODEC);
boolean rmv = allVals.size() > keys * 2 / 3;
for (Map.Entry<Integer, AtomicLong> e : acc.entrySet()) {
int key = e.getKey();
AtomicLong accCntr = e.getValue();
boolean exists = allVals.containsKey(key);
int delta = 0;
boolean createdInTx = false;
if (rmv && rnd.nextBoolean()) {
if (exists)
delta = 1;
SqlFieldsQuery qry = new SqlFieldsQuery("DELETE FROM MvccTestAccount WHERE _key=" + key);
cache.cache.query(qry).getAll();
} else {
delta = 1;
if (!exists)
createdInTx = true;
SqlFieldsQuery qry = new SqlFieldsQuery("MERGE INTO MvccTestAccount " + "(_key, val, updateCnt) VALUES (" + key + ", " + rnd.nextInt(100) + ", 1)");
cache.cache.query(qry).getAll();
}
if (rnd.nextBoolean()) {
if (createdInTx)
// Do not count cases when key created and removed in the same tx.
delta = 0;
SqlFieldsQuery qry = new SqlFieldsQuery("DELETE FROM MvccTestAccount WHERE _key=" + key);
cache.cache.query(qry).getAll();
} else {
delta = 1;
SqlFieldsQuery qry = new SqlFieldsQuery("MERGE INTO MvccTestAccount " + "(_key, val, updateCnt) VALUES (" + key + ", " + rnd.nextInt(100) + ", 1)");
cache.cache.query(qry).getAll();
}
accCntr.addAndGet(delta);
}
tx.commit();
}
} catch (Exception e) {
handleTxException(e);
success = false;
int r = 0;
for (Map.Entry<Integer, AtomicLong> en : acc.entrySet()) {
if (((IgniteCacheProxy) cache.cache).context().affinity().partition(en.getKey()) == 0)
r += en.getValue().intValue();
}
} finally {
cache.readUnlock();
if (success) {
v++;
for (Map.Entry<Integer, AtomicLong> e : acc.entrySet()) {
int k = e.getKey();
long updCntr = e.getValue().get();
tracker.get(k).addAndGet(updCntr);
}
int r = 0;
for (Map.Entry<Integer, AtomicLong> en : acc.entrySet()) {
if (((IgniteCacheProxy) cache.cache).context().affinity().partition(en.getKey()) == 0)
r += en.getValue().intValue();
}
}
acc.clear();
}
}
info("Writer done, updates: " + v);
}
};
GridInClosure3<Integer, List<TestCache>, AtomicBoolean> reader = new GridInClosure3<Integer, List<TestCache>, AtomicBoolean>() {
@Override
public void apply(Integer idx, List<TestCache> caches, AtomicBoolean stop) {
// No-op.
}
};
readWriteTest(null, 4, 1, 2, parts, writers, readers, DFLT_TEST_TIME, new InitIndexing(Integer.class, MvccTestAccount.class), init, writer, reader);
Map<Integer, AtomicLong> updPerParts = new HashMap<>(parts);
Affinity aff = grid(1).cachex(DEFAULT_CACHE_NAME).affinity();
for (Map.Entry<Integer, AtomicLong> e : tracker.entrySet()) {
int k = e.getKey();
long updCntr = e.getValue().get();
int p = aff.partition(k);
AtomicLong cntr = updPerParts.get(p);
if (cntr == null) {
cntr = new AtomicLong();
updPerParts.putIfAbsent(p, cntr);
}
cntr.addAndGet(updCntr);
}
for (Map.Entry<Integer, AtomicLong> e : updPerParts.entrySet()) checkUpdateCounters(DEFAULT_CACHE_NAME, e.getKey(), e.getValue().get());
}
Aggregations