use of org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion in project ignite by apache.
the class GridDhtPartitionTopologyImpl method updateTopologyVersion.
/**
* {@inheritDoc}
*/
@Override
public void updateTopologyVersion(GridDhtTopologyFuture exchFut, @NotNull DiscoCache discoCache, long updSeq, boolean stopping) throws IgniteInterruptedCheckedException {
U.writeLock(lock);
try {
AffinityTopologyVersion exchTopVer = exchFut.initialVersion();
// Update is correct if topology version is newer or in case of newer discovery caches.
boolean isCorrectUpdate = exchTopVer.compareTo(readyTopVer) > 0 || (exchTopVer.compareTo(readyTopVer) == 0 && this.discoCache != null && discoCache.version().compareTo(this.discoCache.version()) > 0);
assert isCorrectUpdate : "Invalid topology version [grp=" + grp.cacheOrGroupName() + ", topVer=" + readyTopVer + ", exchTopVer=" + exchTopVer + ", discoCacheVer=" + (this.discoCache != null ? this.discoCache.version() : "None") + ", exchDiscoCacheVer=" + discoCache.version() + ", fut=" + exchFut + ']';
this.stopping = stopping;
updateSeq.setIfGreater(updSeq);
topReadyFut = exchFut;
rebalancedTopVer = AffinityTopologyVersion.NONE;
lastTopChangeVer = exchTopVer;
this.discoCache = discoCache;
} finally {
lock.writeLock().unlock();
}
}
use of org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion in project ignite by apache.
the class GridDhtPartitionTopologyImpl method afterExchange.
/**
* {@inheritDoc}
*/
@Override
public boolean afterExchange(GridDhtPartitionsExchangeFuture exchFut) {
boolean changed = false;
int num = grp.affinity().partitions();
AffinityTopologyVersion topVer = exchFut.context().events().topologyVersion();
assert grp.affinity().lastVersion().equals(topVer) : "Affinity is not initialized " + "[grp=" + grp.cacheOrGroupName() + ", topVer=" + topVer + ", affVer=" + grp.affinity().lastVersion() + ", fut=" + exchFut + ']';
ctx.database().checkpointReadLock();
try {
lock.writeLock().lock();
try {
if (stopping)
return false;
assert readyTopVer.initialized() : readyTopVer;
assert lastTopChangeVer.equals(readyTopVer);
if (log.isDebugEnabled()) {
log.debug("Partition map before afterExchange [grp=" + grp.cacheOrGroupName() + ", exchId=" + exchFut.exchangeId() + ", fullMap=" + fullMapString() + ']');
}
long updateSeq = this.updateSeq.incrementAndGet();
for (int p = 0; p < num; p++) {
GridDhtLocalPartition locPart = localPartition0(p, topVer, false, false, false);
if (partitionLocalNode(p, topVer)) {
// which obviously has not happened at this point.
if (locPart == null) {
if (log.isDebugEnabled()) {
log.debug("Skipping local partition afterExchange (will not create) [" + "grp=" + grp.cacheOrGroupName() + ", p=" + p + ']');
}
continue;
}
GridDhtPartitionState state = locPart.state();
if (state == MOVING) {
if (grp.rebalanceEnabled()) {
Collection<ClusterNode> owners = owners(p);
// If there are no other owners, then become an owner.
if (F.isEmpty(owners)) {
boolean owned = locPart.own();
assert owned : "Failed to own partition [grp=" + grp.cacheOrGroupName() + ", locPart=" + locPart + ']';
updateSeq = updateLocal(p, locPart.state(), updateSeq, topVer);
changed = true;
if (grp.eventRecordable(EVT_CACHE_REBALANCE_PART_DATA_LOST)) {
DiscoveryEvent discoEvt = exchFut.events().lastEvent();
grp.addRebalanceEvent(p, EVT_CACHE_REBALANCE_PART_DATA_LOST, discoEvt.eventNode(), discoEvt.type(), discoEvt.timestamp());
}
if (log.isDebugEnabled()) {
log.debug("Owned partition [grp=" + grp.cacheOrGroupName() + ", part=" + locPart + ']');
}
} else if (log.isDebugEnabled())
log.debug("Will not own partition (there are owners to rebalance from) [grp=" + grp.cacheOrGroupName() + ", locPart=" + locPart + ", owners = " + owners + ']');
} else
updateSeq = updateLocal(p, locPart.state(), updateSeq, topVer);
}
} else {
if (locPart != null) {
GridDhtPartitionState state = locPart.state();
if (state == MOVING) {
locPart.rent(false);
updateSeq = updateLocal(p, locPart.state(), updateSeq, topVer);
changed = true;
if (log.isDebugEnabled()) {
log.debug("Evicting " + state + " partition (it does not belong to affinity) [" + "grp=" + grp.cacheOrGroupName() + ", part=" + locPart + ']');
}
}
}
}
}
AffinityAssignment aff = grp.affinity().readyAffinity(topVer);
if (node2part != null && node2part.valid())
changed |= checkEvictions(updateSeq, aff);
updateRebalanceVersion(aff.assignment());
consistencyCheck();
} finally {
lock.writeLock().unlock();
}
} finally {
ctx.database().checkpointReadUnlock();
}
return changed;
}
use of org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion in project ignite by apache.
the class PlatformDataStreamer method processInLongOutLong.
/**
* {@inheritDoc}
*/
@Override
public long processInLongOutLong(int type, final long val) throws IgniteCheckedException {
switch(type) {
case OP_SET_ALLOW_OVERWRITE:
ldr.allowOverwrite(val == TRUE);
return TRUE;
case OP_SET_PER_NODE_BUFFER_SIZE:
ldr.perNodeBufferSize((int) val);
return TRUE;
case OP_SET_SKIP_STORE:
ldr.skipStore(val == TRUE);
return TRUE;
case OP_SET_PER_NODE_PARALLEL_OPS:
ldr.perNodeParallelOperations((int) val);
return TRUE;
case OP_LISTEN_TOPOLOGY:
{
lsnr = new GridLocalEventListener() {
@Override
public void onEvent(Event evt) {
DiscoveryEvent discoEvt = (DiscoveryEvent) evt;
long topVer = discoEvt.topologyVersion();
int topSize = platformCtx.kernalContext().discovery().cacheNodes(cacheName, new AffinityTopologyVersion(topVer)).size();
platformCtx.gateway().dataStreamerTopologyUpdate(val, topVer, topSize);
}
};
platformCtx.kernalContext().event().addLocalEventListener(lsnr, EVT_NODE_JOINED, EVT_NODE_FAILED, EVT_NODE_LEFT);
GridDiscoveryManager discoMgr = platformCtx.kernalContext().discovery();
AffinityTopologyVersion topVer = platformCtx.kernalContext().cache().context().exchange().lastTopologyFuture().get();
int topSize = discoMgr.cacheNodes(cacheName, topVer).size();
platformCtx.gateway().dataStreamerTopologyUpdate(val, topVer.topologyVersion(), topSize);
return TRUE;
}
case OP_ALLOW_OVERWRITE:
return ldr.allowOverwrite() ? TRUE : FALSE;
case OP_PER_NODE_BUFFER_SIZE:
return ldr.perNodeBufferSize();
case OP_SKIP_STORE:
return ldr.skipStore() ? TRUE : FALSE;
case OP_PER_NODE_PARALLEL_OPS:
return ldr.perNodeParallelOperations();
case OP_GET_TIMEOUT:
return ldr.timeout();
case OP_SET_TIMEOUT:
ldr.timeout(val);
return TRUE;
}
return super.processInLongOutLong(type, val);
}
use of org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion in project ignite by apache.
the class GridDhtTxLocalAdapter method lockAllAsync.
/**
* @param cacheCtx Cache context.
* @param entries Entries to lock.
* @param msgId Message ID.
* @param read Read flag.
* @param createTtl TTL for create operation.
* @param accessTtl TTL for read operation.
* @param needRetVal Return value flag.
* @param skipStore Skip store flag.
* @param keepBinary Keep binary flag.
* @param nearCache {@code True} if near cache enabled on originating node.
* @return Lock future.
*/
@SuppressWarnings("ForLoopReplaceableByForEach")
IgniteInternalFuture<GridCacheReturn> lockAllAsync(GridCacheContext cacheCtx, List<GridCacheEntryEx> entries, long msgId, final boolean read, final boolean needRetVal, long createTtl, long accessTtl, boolean skipStore, boolean keepBinary, boolean nearCache) {
try {
checkValid();
} catch (IgniteCheckedException e) {
return new GridFinishedFuture<>(e);
}
final GridCacheReturn ret = new GridCacheReturn(localResult(), false);
if (F.isEmpty(entries))
return new GridFinishedFuture<>(ret);
init();
onePhaseCommit(onePhaseCommit);
try {
Set<KeyCacheObject> skipped = null;
AffinityTopologyVersion topVer = topologyVersion();
GridDhtCacheAdapter dhtCache = cacheCtx.isNear() ? cacheCtx.near().dht() : cacheCtx.dht();
// Enlist locks into transaction.
for (int i = 0; i < entries.size(); i++) {
GridCacheEntryEx entry = entries.get(i);
KeyCacheObject key = entry.key();
IgniteTxEntry txEntry = entry(entry.txKey());
// First time access.
if (txEntry == null) {
GridDhtCacheEntry cached;
while (true) {
try {
cached = dhtCache.entryExx(key, topVer);
cached.unswap(read);
break;
} catch (GridCacheEntryRemovedException ignore) {
if (log.isDebugEnabled())
log.debug("Get removed entry: " + key);
}
}
addActiveCache(dhtCache.context(), false);
txEntry = addEntry(NOOP, null, null, null, cached, null, CU.empty0(), false, -1L, -1L, null, skipStore, keepBinary, nearCache);
if (read)
txEntry.ttl(accessTtl);
txEntry.cached(cached);
addReader(msgId, cached, txEntry, topVer);
} else {
if (skipped == null)
skipped = new GridLeanSet<>();
skipped.add(key);
}
}
assert pessimistic();
Collection<KeyCacheObject> keys = F.viewReadOnly(entries, CU.entry2Key());
// Acquire locks only after having added operation to the write set.
// Otherwise, during rollback we will not know whether locks need
// to be rolled back.
// Loose all skipped and previously locked (we cannot reenter locks here).
final Collection<KeyCacheObject> passedKeys = skipped != null ? F.view(keys, F0.notIn(skipped)) : keys;
if (log.isDebugEnabled())
log.debug("Lock keys: " + passedKeys);
return obtainLockAsync(cacheCtx, ret, passedKeys, read, needRetVal, createTtl, accessTtl, skipStore, keepBinary);
} catch (IgniteCheckedException e) {
setRollbackOnly();
return new GridFinishedFuture<>(e);
}
}
use of org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion in project ignite by apache.
the class DataStreamProcessor method processRequest.
/**
* @param nodeId Sender ID.
* @param req Request.
*/
private void processRequest(final UUID nodeId, final DataStreamerRequest req) {
if (!busyLock.enterBusy()) {
if (log.isDebugEnabled())
log.debug("Ignoring data load request (node is stopping): " + req);
return;
}
try {
if (log.isDebugEnabled())
log.debug("Processing data load request: " + req);
AffinityTopologyVersion locAffVer = ctx.cache().context().exchange().readyAffinityVersion();
AffinityTopologyVersion rmtAffVer = req.topologyVersion();
if (locAffVer.compareTo(rmtAffVer) < 0) {
if (log.isDebugEnabled())
log.debug("Received request has higher affinity topology version [request=" + req + ", locTopVer=" + locAffVer + ", rmtTopVer=" + rmtAffVer + ']');
IgniteInternalFuture<?> fut = ctx.cache().context().exchange().affinityReadyFuture(rmtAffVer);
if (fut != null && !fut.isDone()) {
final byte plc = threadIoPolicy();
fut.listen(new CI1<IgniteInternalFuture<?>>() {
@Override
public void apply(IgniteInternalFuture<?> t) {
ctx.closure().runLocalSafe(new Runnable() {
@Override
public void run() {
processRequest(nodeId, req);
}
}, plc);
}
});
return;
}
}
Object topic;
try {
topic = U.unmarshal(marsh, req.responseTopicBytes(), U.resolveClassLoader(null, ctx.config()));
} catch (IgniteCheckedException e) {
U.error(log, "Failed to unmarshal topic from request: " + req, e);
return;
}
ClassLoader clsLdr;
if (req.forceLocalDeployment())
clsLdr = U.gridClassLoader();
else {
GridDeployment dep = ctx.deploy().getGlobalDeployment(req.deploymentMode(), req.sampleClassName(), req.sampleClassName(), req.userVersion(), nodeId, req.classLoaderId(), req.participants(), null);
if (dep == null) {
sendResponse(nodeId, topic, req.requestId(), new IgniteCheckedException("Failed to get deployment for request [sndId=" + nodeId + ", req=" + req + ']'), false);
return;
}
clsLdr = dep.classLoader();
}
StreamReceiver<K, V> updater;
try {
updater = U.unmarshal(marsh, req.updaterBytes(), U.resolveClassLoader(clsLdr, ctx.config()));
if (updater != null)
ctx.resource().injectGeneric(updater);
} catch (IgniteCheckedException e) {
U.error(log, "Failed to unmarshal message [nodeId=" + nodeId + ", req=" + req + ']', e);
sendResponse(nodeId, topic, req.requestId(), e, false);
return;
}
localUpdate(nodeId, req, updater, topic);
} finally {
busyLock.leaveBusy();
}
}
Aggregations