use of org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion in project ignite by apache.
the class DataStreamerImpl method load0.
/**
* @param entries Entries.
* @param resFut Result future.
* @param activeKeys Active keys.
* @param remaps Remaps count.
*/
private void load0(Collection<? extends DataStreamerEntry> entries, final GridFutureAdapter<Object> resFut, @Nullable final Collection<KeyCacheObjectWrapper> activeKeys, final int remaps) {
try {
assert entries != null;
final boolean remap = remaps > 0;
if (!remap) {
// Failed data should be processed prior to new data.
acquireRemapSemaphore();
}
if (!isWarningPrinted) {
synchronized (this) {
if (!allowOverwrite() && !isWarningPrinted) {
U.warn(log, "Data streamer will not overwrite existing cache entries for better performance " + "(to change, set allowOverwrite to true)");
}
isWarningPrinted = true;
}
}
Map<ClusterNode, Collection<DataStreamerEntry>> mappings = new HashMap<>();
boolean initPda = ctx.deploy().enabled() && jobPda == null;
GridCacheAdapter cache = ctx.cache().internalCache(cacheName);
if (cache == null)
throw new IgniteCheckedException("Cache not created or already destroyed.");
GridCacheContext cctx = cache.context();
GridCacheGateway gate = null;
AffinityTopologyVersion topVer;
if (!cctx.isLocal())
topVer = ctx.cache().context().exchange().lastTopologyFuture().get();
else
topVer = ctx.cache().context().exchange().readyAffinityVersion();
List<List<ClusterNode>> assignments = cctx.affinity().assignments(topVer);
if (!allowOverwrite() && !cctx.isLocal()) {
// Cases where cctx required.
gate = cctx.gate();
gate.enter();
}
try {
for (DataStreamerEntry entry : entries) {
List<ClusterNode> nodes;
try {
KeyCacheObject key = entry.getKey();
assert key != null;
if (initPda) {
if (cacheObjCtx.addDeploymentInfo())
jobPda = new DataStreamerPda(key.value(cacheObjCtx, false), entry.getValue() != null ? entry.getValue().value(cacheObjCtx, false) : null, rcvr);
else if (rcvr != null)
jobPda = new DataStreamerPda(rcvr);
initPda = false;
}
if (key.partition() == -1)
key.partition(cctx.affinity().partition(key, false));
nodes = nodes(key, topVer, cctx);
} catch (IgniteCheckedException e) {
resFut.onDone(e);
return;
}
if (F.isEmpty(nodes)) {
resFut.onDone(new ClusterTopologyException("Failed to map key to node " + "(no nodes with cache found in topology) [infos=" + entries.size() + ", cacheName=" + cacheName + ']'));
return;
}
for (ClusterNode node : nodes) {
Collection<DataStreamerEntry> col = mappings.get(node);
if (col == null)
mappings.put(node, col = new ArrayList<>());
col.add(entry);
}
}
for (final Map.Entry<ClusterNode, Collection<DataStreamerEntry>> e : mappings.entrySet()) {
final UUID nodeId = e.getKey().id();
Buffer buf = bufMappings.get(nodeId);
if (buf == null) {
Buffer old = bufMappings.putIfAbsent(nodeId, buf = new Buffer(e.getKey()));
if (old != null)
buf = old;
}
final Collection<DataStreamerEntry> entriesForNode = e.getValue();
IgniteInClosure<IgniteInternalFuture<?>> lsnr = new IgniteInClosure<IgniteInternalFuture<?>>() {
@Override
public void apply(IgniteInternalFuture<?> t) {
try {
t.get();
if (activeKeys != null) {
for (DataStreamerEntry e : entriesForNode) activeKeys.remove(new KeyCacheObjectWrapper(e.getKey()));
if (activeKeys.isEmpty())
resFut.onDone();
} else {
assert entriesForNode.size() == 1;
// That has been a single key,
// so complete result future right away.
resFut.onDone();
}
} catch (IgniteClientDisconnectedCheckedException e1) {
if (log.isDebugEnabled())
log.debug("Future finished with disconnect error [nodeId=" + nodeId + ", err=" + e1 + ']');
resFut.onDone(e1);
} catch (IgniteCheckedException e1) {
if (log.isDebugEnabled())
log.debug("Future finished with error [nodeId=" + nodeId + ", err=" + e1 + ']');
if (cancelled) {
resFut.onDone(new IgniteCheckedException("Data streamer has been cancelled: " + DataStreamerImpl.this, e1));
} else if (remaps + 1 > maxRemapCnt) {
resFut.onDone(new IgniteCheckedException("Failed to finish operation (too many remaps): " + remaps, e1));
} else {
try {
remapSem.acquire();
final Runnable r = new Runnable() {
@Override
public void run() {
try {
if (cancelled)
closedException();
load0(entriesForNode, resFut, activeKeys, remaps + 1);
} catch (Throwable ex) {
resFut.onDone(new IgniteCheckedException("DataStreamer remapping failed. ", ex));
} finally {
remapSem.release();
}
}
};
dataToRemap.add(r);
if (!remapOwning.get() && remapOwning.compareAndSet(false, true)) {
ctx.closure().callLocalSafe(new GPC<Boolean>() {
@Override
public Boolean call() {
boolean locked = true;
while (locked || !dataToRemap.isEmpty()) {
if (!locked && !remapOwning.compareAndSet(false, true))
return false;
try {
Runnable r = dataToRemap.poll();
if (r != null)
r.run();
} finally {
if (!dataToRemap.isEmpty())
locked = true;
else {
remapOwning.set(false);
locked = false;
}
}
}
return true;
}
}, true);
}
} catch (InterruptedException e2) {
resFut.onDone(e2);
}
}
}
}
};
GridCompoundFuture opFut = new SilentCompoundFuture();
opFut.listen(lsnr);
final List<GridFutureAdapter<?>> futs;
try {
futs = buf.update(entriesForNode, topVer, assignments, opFut, remap);
opFut.markInitialized();
} catch (IgniteInterruptedCheckedException e1) {
resFut.onDone(e1);
return;
}
if (ctx.discovery().node(nodeId) == null) {
if (bufMappings.remove(nodeId, buf)) {
final Buffer buf0 = buf;
waitAffinityAndRun(new Runnable() {
@Override
public void run() {
buf0.onNodeLeft();
if (futs != null) {
Throwable ex = new ClusterTopologyCheckedException("Failed to wait for request completion (node has left): " + nodeId);
for (int i = 0; i < futs.size(); i++) futs.get(i).onDone(ex);
}
}
}, ctx.discovery().topologyVersion(), false);
}
}
}
} finally {
if (gate != null)
gate.leave();
}
} catch (Exception ex) {
resFut.onDone(new IgniteCheckedException("DataStreamer data loading failed.", ex));
}
}
use of org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion in project ignite by apache.
the class DataStreamerImpl method waitAffinityAndRun.
/**
* @param c Closure to run.
* @param topVer Topology version to wait for.
* @param async Async flag.
*/
private void waitAffinityAndRun(final Runnable c, long topVer, boolean async) {
AffinityTopologyVersion topVer0 = new AffinityTopologyVersion(topVer, 0);
IgniteInternalFuture<?> fut = ctx.cache().context().exchange().affinityReadyFuture(topVer0);
if (fut != null && !fut.isDone()) {
fut.listen(new CI1<IgniteInternalFuture<?>>() {
@Override
public void apply(IgniteInternalFuture<?> fut) {
ctx.closure().runLocalSafe(c, true);
}
});
} else {
if (async)
ctx.closure().runLocalSafe(c, true);
else
c.run();
}
}
use of org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion in project ignite by apache.
the class GridCacheAtomicSequenceImpl method internalUpdate.
/**
* Synchronous sequence update operation. Will add given amount to the sequence value.
*
* @param l Increment amount.
* @param updateCall Cache call that will update sequence reservation count in accordance with l.
* @param updated If {@code true}, will return sequence value after update, otherwise will return sequence value
* prior to update.
* @return Sequence value.
* @throws IgniteCheckedException If update failed.
*/
@SuppressWarnings("SignalWithoutCorrespondingAwait")
private long internalUpdate(long l, @Nullable Callable<Long> updateCall, boolean updated) throws IgniteCheckedException {
checkRemoved();
assert l > 0;
localUpdate.lock();
try {
// If reserved range isn't exhausted.
long locVal0 = locVal;
if (locVal0 + l <= upBound) {
locVal = locVal0 + l;
return updated ? locVal0 + l : locVal0;
}
} finally {
localUpdate.unlock();
}
AffinityTopologyVersion lockedVer = ctx.shared().lockedTopologyVersion(null);
// We need two separate locks here because two independent thread may attempt to update the sequence
// simultaneously, one thread with locked topology and other with unlocked.
// We cannot use the same lock for both cases because it leads to a deadlock when free-topology thread
// waits for topology change, and locked topology thread waits to acquire the lock.
// If a thread has locked topology, it must bypass sync with non-locked threads, but at the same time
// we do not want multiple threads to attempt to run identical cache updates.
ReentrantLock distLock = lockedVer == null ? distUpdateFreeTop : distUpdateLockedTop;
distLock.lock();
try {
if (updateCall == null)
updateCall = internalUpdate(l, updated);
try {
return updateCall.call();
} catch (IgniteCheckedException | IgniteException | IllegalStateException e) {
throw e;
} catch (Exception e) {
throw new IgniteCheckedException(e);
}
} finally {
distLock.unlock();
}
}
use of org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion in project ignite by apache.
the class GridNearOptimisticTxPrepareFuture method prepareSingle.
/**
* @param write Write.
* @param topLocked {@code True} if thread already acquired lock preventing topology change.
* @param remap Remap flag.
*/
private void prepareSingle(IgniteTxEntry write, boolean topLocked, boolean remap) {
write.clearEntryReadVersion();
AffinityTopologyVersion topVer = tx.topologyVersion();
assert topVer.topologyVersion() > 0;
txMapping = new GridDhtTxMapping();
GridDistributedTxMapping mapping = map(write, topVer, null, topLocked, remap);
if (isDone()) {
if (log.isDebugEnabled())
log.debug("Abandoning (re)map because future is done: " + this);
return;
}
if (mapping.primary().isLocal()) {
if (write.context().isNear())
tx.nearLocallyMapped(true);
else if (write.context().isColocated())
tx.colocatedLocallyMapped(true);
}
if (keyLockFut != null)
keyLockFut.onAllKeysAdded();
tx.addSingleEntryMapping(mapping, write);
cctx.mvcc().recheckPendingLocks();
mapping.last(true);
tx.transactionNodes(txMapping.transactionNodes());
if (!write.context().isNear())
checkOnePhase(txMapping);
assert !(mapping.hasColocatedCacheEntries() && mapping.hasNearCacheEntries()) : mapping;
proceedPrepare(mapping, null);
}
use of org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion in project ignite by apache.
the class GridNearPessimisticTxPrepareFuture method preparePessimistic.
/**
*/
private void preparePessimistic() {
Map<UUID, GridDistributedTxMapping> mappings = new HashMap<>();
AffinityTopologyVersion topVer = tx.topologyVersion();
GridDhtTxMapping txMapping = new GridDhtTxMapping();
boolean hasNearCache = false;
for (IgniteTxEntry txEntry : tx.allEntries()) {
txEntry.clearEntryReadVersion();
GridCacheContext cacheCtx = txEntry.context();
if (cacheCtx.isNear())
hasNearCache = true;
List<ClusterNode> nodes;
if (!cacheCtx.isLocal()) {
GridDhtPartitionTopology top = cacheCtx.topology();
nodes = top.nodes(cacheCtx.affinity().partition(txEntry.key()), topVer);
} else
nodes = cacheCtx.affinity().nodesByKey(txEntry.key(), topVer);
if (F.isEmpty(nodes)) {
onDone(new ClusterTopologyServerNotFoundException("Failed to map keys to nodes (partition " + "is not mapped to any node) [key=" + txEntry.key() + ", partition=" + cacheCtx.affinity().partition(txEntry.key()) + ", topVer=" + topVer + ']'));
return;
}
ClusterNode primary = nodes.get(0);
GridDistributedTxMapping nodeMapping = mappings.get(primary.id());
if (nodeMapping == null)
mappings.put(primary.id(), nodeMapping = new GridDistributedTxMapping(primary));
txEntry.nodeId(primary.id());
nodeMapping.add(txEntry);
txMapping.addMapping(nodes);
}
tx.transactionNodes(txMapping.transactionNodes());
if (!hasNearCache)
checkOnePhase(txMapping);
long timeout = tx.remainingTime();
if (timeout == -1) {
onDone(new IgniteTxTimeoutCheckedException("Transaction timed out and was rolled back: " + tx));
return;
}
int miniId = 0;
Map<UUID, Collection<UUID>> txNodes = txMapping.transactionNodes();
for (final GridDistributedTxMapping m : mappings.values()) {
final ClusterNode primary = m.primary();
if (primary.isLocal()) {
if (m.hasNearCacheEntries() && m.hasColocatedCacheEntries()) {
GridNearTxPrepareRequest nearReq = createRequest(txMapping.transactionNodes(), m, timeout, m.nearEntriesReads(), m.nearEntriesWrites());
prepareLocal(nearReq, m, ++miniId, true);
GridNearTxPrepareRequest colocatedReq = createRequest(txNodes, m, timeout, m.colocatedEntriesReads(), m.colocatedEntriesWrites());
prepareLocal(colocatedReq, m, ++miniId, false);
} else {
GridNearTxPrepareRequest req = createRequest(txNodes, m, timeout, m.reads(), m.writes());
prepareLocal(req, m, ++miniId, m.hasNearCacheEntries());
}
} else {
GridNearTxPrepareRequest req = createRequest(txNodes, m, timeout, m.reads(), m.writes());
final MiniFuture fut = new MiniFuture(m, ++miniId);
req.miniId(fut.futureId());
add(fut);
try {
cctx.io().send(primary, req, tx.ioPolicy());
if (msgLog.isDebugEnabled()) {
msgLog.debug("Near pessimistic prepare, sent request [txId=" + tx.nearXidVersion() + ", node=" + primary.id() + ']');
}
} catch (ClusterTopologyCheckedException e) {
e.retryReadyFuture(cctx.nextAffinityReadyFuture(topVer));
fut.onNodeLeft(e);
} catch (IgniteCheckedException e) {
if (msgLog.isDebugEnabled()) {
msgLog.debug("Near pessimistic prepare, failed send request [txId=" + tx.nearXidVersion() + ", node=" + primary.id() + ", err=" + e + ']');
}
fut.onError(e);
break;
}
}
}
markInitialized();
}
Aggregations