use of org.apache.ignite.internal.util.future.GridFinishedFuture in project ignite by apache.
the class IgniteTxHandler method prepareNearTx.
/**
* @param originTx Transaction for copy.
* @param nearNode Node that initiated transaction.
* @param req Near prepare request.
* @return Prepare future or {@code null} if need retry operation.
*/
@Nullable
private IgniteInternalFuture<GridNearTxPrepareResponse> prepareNearTx(final GridNearTxLocal originTx, final ClusterNode nearNode, final GridNearTxPrepareRequest req) {
IgniteTxEntry firstEntry;
try {
IgniteTxEntry firstWrite = unmarshal(req.writes());
IgniteTxEntry firstRead = unmarshal(req.reads());
firstEntry = firstWrite != null ? firstWrite : firstRead;
} catch (IgniteCheckedException e) {
return new GridFinishedFuture<>(e);
}
GridDhtTxLocal tx = null;
GridCacheVersion mappedVer = ctx.tm().mappedVersion(req.version());
if (mappedVer != null) {
tx = ctx.tm().tx(mappedVer);
if (tx == null)
U.warn(log, "Missing local transaction for mapped near version [nearVer=" + req.version() + ", mappedVer=" + mappedVer + ']');
else {
if (req.concurrency() == PESSIMISTIC)
tx.nearFutureId(req.futureId());
}
} else {
GridDhtPartitionTopology top = null;
if (req.firstClientRequest()) {
assert firstEntry != null : req;
assert req.concurrency() == OPTIMISTIC : req;
assert nearNode.isClient() : nearNode;
top = firstEntry.context().topology();
top.readLock();
if (req.allowWaitTopologyFuture()) {
GridDhtTopologyFuture topFut = top.topologyVersionFuture();
if (!topFut.isDone()) {
top.readUnlock();
return null;
}
}
}
try {
if (top != null) {
boolean retry = false;
GridDhtTopologyFuture topFut = top.topologyVersionFuture();
if (!req.allowWaitTopologyFuture() && !topFut.isDone()) {
retry = true;
if (txPrepareMsgLog.isDebugEnabled()) {
txPrepareMsgLog.debug("Topology change is in progress, need remap transaction [" + "txId=" + req.version() + ", node=" + nearNode.id() + ", reqTopVer=" + req.topologyVersion() + ", locTopVer=" + top.readyTopologyVersion() + ", req=" + req + ']');
}
}
if (!retry && needRemap(req.topologyVersion(), top.readyTopologyVersion(), req)) {
retry = true;
if (txPrepareMsgLog.isDebugEnabled()) {
txPrepareMsgLog.debug("Topology version mismatch for near prepare, need remap transaction [" + "txId=" + req.version() + ", node=" + nearNode.id() + ", reqTopVer=" + req.topologyVersion() + ", locTopVer=" + top.readyTopologyVersion() + ", req=" + req + ']');
}
}
if (retry) {
GridNearTxPrepareResponse res = new GridNearTxPrepareResponse(req.partition(), req.version(), req.futureId(), req.miniId(), req.version(), req.version(), null, null, top.lastTopologyChangeVersion(), req.onePhaseCommit(), req.deployInfo() != null);
try {
ctx.io().send(nearNode, res, req.policy());
if (txPrepareMsgLog.isDebugEnabled()) {
txPrepareMsgLog.debug("Sent remap response for near prepare [txId=" + req.version() + ", node=" + nearNode.id() + ']');
}
} catch (ClusterTopologyCheckedException ignored) {
if (txPrepareMsgLog.isDebugEnabled()) {
txPrepareMsgLog.debug("Failed to send remap response for near prepare, node failed [" + "txId=" + req.version() + ", node=" + nearNode.id() + ']');
}
} catch (IgniteCheckedException e) {
U.error(txPrepareMsgLog, "Failed to send remap response for near prepare " + "[txId=" + req.version() + ", node=" + nearNode.id() + ", req=" + req + ']', e);
}
return new GridFinishedFuture<>(res);
}
assert topFut.isDone();
}
tx = new GridDhtTxLocal(ctx, req.topologyVersion(), nearNode.id(), req.version(), req.futureId(), req.miniId(), req.threadId(), req.implicitSingle(), req.implicitSingle(), req.system(), req.explicitLock(), req.policy(), req.concurrency(), req.isolation(), req.timeout(), req.isInvalidate(), true, req.onePhaseCommit(), req.txSize(), req.transactionNodes(), securitySubjectId(ctx), req.taskNameHash(), req.txLabel(), originTx);
tx = ctx.tm().onCreated(null, tx);
if (tx != null)
tx.topologyVersion(req.topologyVersion());
else
U.warn(log, "Failed to create local transaction (was transaction rolled back?) [xid=" + req.version() + ", req=" + req + ']');
} finally {
if (tx != null)
req.txState(tx.txState());
if (top != null)
top.readUnlock();
}
}
if (tx != null) {
req.txState(tx.txState());
if (req.explicitLock())
tx.explicitLock(true);
tx.transactionNodes(req.transactionNodes());
if (req.near())
tx.nearOnOriginatingNode(true);
if (req.onePhaseCommit()) {
assert req.last() : req;
tx.onePhaseCommit(true);
}
if (req.needReturnValue())
tx.needReturnValue(true);
IgniteInternalFuture<GridNearTxPrepareResponse> fut = tx.prepareAsync(req);
if (tx.isRollbackOnly() && !tx.commitOnPrepare()) {
if (tx.state() != TransactionState.ROLLED_BACK && tx.state() != TransactionState.ROLLING_BACK)
tx.rollbackDhtLocalAsync();
}
final GridDhtTxLocal tx0 = tx;
fut.listen(new CI1<IgniteInternalFuture<?>>() {
@Override
public void apply(IgniteInternalFuture<?> txFut) {
try {
txFut.get();
} catch (IgniteCheckedException e) {
// Just in case.
tx0.setRollbackOnly();
if (!X.hasCause(e, IgniteTxOptimisticCheckedException.class) && !X.hasCause(e, IgniteFutureCancelledException.class) && !ctx.kernalContext().isStopping())
U.error(log, "Failed to prepare DHT transaction: " + tx0, e);
}
}
});
return fut;
} else
return new GridFinishedFuture<>((GridNearTxPrepareResponse) null);
}
use of org.apache.ignite.internal.util.future.GridFinishedFuture in project ignite by apache.
the class GridNearAtomicUpdateFuture method waitAndRemap.
/**
*/
private void waitAndRemap(AffinityTopologyVersion remapTopVer) {
assert remapTopVer != null;
if (topLocked) {
assert !F.isEmpty(remapKeys) : remapKeys;
CachePartialUpdateCheckedException e = new CachePartialUpdateCheckedException("Failed to update keys (retry update if possible).");
ClusterTopologyCheckedException cause = new ClusterTopologyCheckedException("Failed to update keys, topology changed while execute atomic update inside transaction.");
cause.retryReadyFuture(cctx.shared().exchange().affinityReadyFuture(remapTopVer));
e.add(remapKeys, cause);
completeFuture(null, e, null);
return;
}
IgniteInternalFuture<AffinityTopologyVersion> fut = cctx.shared().exchange().affinityReadyFuture(remapTopVer);
if (fut == null)
fut = new GridFinishedFuture<>(remapTopVer);
fut.listen(new CI1<IgniteInternalFuture<AffinityTopologyVersion>>() {
@Override
public void apply(final IgniteInternalFuture<AffinityTopologyVersion> fut) {
cctx.kernalContext().closure().runLocalSafe(new GridPlainRunnable() {
@Override
public void run() {
mapOnTopology();
}
});
}
});
}
use of org.apache.ignite.internal.util.future.GridFinishedFuture in project ignite by apache.
the class GridDhtPartitionDemander method forceRebalance.
/**
* @return Rebalance future.
*/
IgniteInternalFuture<Boolean> forceRebalance() {
GridTimeoutObject obj = lastTimeoutObj.getAndSet(null);
if (obj != null)
ctx.time().removeTimeoutObject(obj);
final GridDhtPartitionsExchangeFuture exchFut = lastExchangeFut;
if (exchFut != null) {
if (log.isDebugEnabled())
log.debug("Forcing rebalance event for future: " + exchFut);
final GridFutureAdapter<Boolean> fut = new GridFutureAdapter<>();
exchFut.listen(new CI1<IgniteInternalFuture<AffinityTopologyVersion>>() {
@Override
public void apply(IgniteInternalFuture<AffinityTopologyVersion> t) {
if (t.error() == null) {
IgniteInternalFuture<Boolean> fut0 = ctx.exchange().forceRebalance(exchFut.exchangeId());
fut0.listen(new IgniteInClosure<IgniteInternalFuture<Boolean>>() {
@Override
public void apply(IgniteInternalFuture<Boolean> fut1) {
try {
fut.onDone(fut1.get());
} catch (Exception e) {
fut.onDone(e);
}
}
});
} else
fut.onDone(t.error());
}
});
return fut;
} else if (log.isDebugEnabled())
log.debug("Ignoring force rebalance request (no topology event happened yet).");
return new GridFinishedFuture<>(true);
}
use of org.apache.ignite.internal.util.future.GridFinishedFuture in project ignite by apache.
the class GridDhtColocatedCache method loadAsync.
/**
* @param keys Keys to load.
* @param readThrough Read through flag.
* @param forcePrimary Force get from primary node flag.
* @param topVer Topology version.
* @param taskName Task name.
* @param deserializeBinary Deserialize binary flag.
* @param expiryPlc Expiry policy.
* @param skipVals Skip values flag.
* @param needVer If {@code true} returns values as tuples containing value and version.
* @param keepCacheObj Keep cache objects flag.
* @param txLbl Transaction label.
* @param mvccSnapshot Mvcc snapshot.
* @return Load future.
*/
public final IgniteInternalFuture<Map<K, V>> loadAsync(@Nullable Collection<KeyCacheObject> keys, boolean readThrough, boolean forcePrimary, AffinityTopologyVersion topVer, String taskName, boolean deserializeBinary, boolean recovery, @Nullable IgniteCacheExpiryPolicy expiryPlc, boolean skipVals, boolean needVer, boolean keepCacheObj, @Nullable String txLbl, @Nullable MvccSnapshot mvccSnapshot) {
assert (mvccSnapshot == null) == !ctx.mvccEnabled();
if (keys == null || keys.isEmpty())
return new GridFinishedFuture<>(Collections.<K, V>emptyMap());
if (expiryPlc == null)
expiryPlc = expiryPolicy(null);
// Optimization: try to resolve value locally and escape 'get future' creation.
if (!forcePrimary && ctx.config().isReadFromBackup() && ctx.affinityNode() && ctx.topology().lostPartitions().isEmpty()) {
ctx.shared().database().checkpointReadLock();
try {
Map<K, V> locVals = null;
boolean success = true;
boolean readNoEntry = ctx.readNoEntry(expiryPlc, false);
boolean evt = !skipVals;
for (KeyCacheObject key : keys) {
if (readNoEntry) {
CacheDataRow row = mvccSnapshot != null ? ctx.offheap().mvccRead(ctx, key, mvccSnapshot) : ctx.offheap().read(ctx, key);
if (row != null) {
long expireTime = row.expireTime();
if (expireTime == 0 || expireTime > U.currentTimeMillis()) {
if (locVals == null)
locVals = U.newHashMap(keys.size());
ctx.addResult(locVals, key, row.value(), skipVals, keepCacheObj, deserializeBinary, true, null, row.version(), 0, 0, needVer, U.deploymentClassLoader(ctx.kernalContext(), U.contextDeploymentClassLoaderId(ctx.kernalContext())));
if (evt) {
ctx.events().readEvent(key, null, txLbl, row.value(), taskName, !deserializeBinary);
}
} else
success = false;
} else
success = false;
} else {
GridCacheEntryEx entry = null;
while (true) {
try {
entry = entryEx(key);
// If our DHT cache do has value, then we peek it.
if (entry != null) {
boolean isNew = entry.isNewLocked();
EntryGetResult getRes = null;
CacheObject v = null;
GridCacheVersion ver = null;
if (needVer) {
getRes = entry.innerGetVersioned(null, null, /*update-metrics*/
false, /*event*/
evt, null, taskName, expiryPlc, !deserializeBinary, null);
if (getRes != null) {
v = getRes.value();
ver = getRes.version();
}
} else {
v = entry.innerGet(null, null, /*read-through*/
false, /*update-metrics*/
false, /*event*/
evt, null, taskName, expiryPlc, !deserializeBinary);
}
// Entry was not in memory or in swap, so we remove it from cache.
if (v == null) {
GridCacheVersion obsoleteVer = nextVersion();
if (isNew && entry.markObsoleteIfEmpty(obsoleteVer))
removeEntry(entry);
success = false;
} else {
if (locVals == null)
locVals = U.newHashMap(keys.size());
ctx.addResult(locVals, key, v, skipVals, keepCacheObj, deserializeBinary, true, getRes, ver, 0, 0, needVer, U.deploymentClassLoader(ctx.kernalContext(), U.contextDeploymentClassLoaderId(ctx.kernalContext())));
}
} else
success = false;
// While.
break;
} catch (GridCacheEntryRemovedException ignored) {
// No-op, retry.
} catch (GridDhtInvalidPartitionException ignored) {
success = false;
// While.
break;
} finally {
if (entry != null)
entry.touch();
}
}
}
if (!success)
break;
else if (!skipVals && ctx.statisticsEnabled())
ctx.cache().metrics0().onRead(true);
}
if (success) {
sendTtlUpdateRequest(expiryPlc);
return new GridFinishedFuture<>(locVals);
}
} catch (IgniteCheckedException e) {
return new GridFinishedFuture<>(e);
} finally {
ctx.shared().database().checkpointReadUnlock();
}
}
if (expiryPlc != null)
expiryPlc.reset();
// Either reload or not all values are available locally.
GridPartitionedGetFuture<K, V> fut = new GridPartitionedGetFuture<>(ctx, keys, readThrough, forcePrimary, taskName, deserializeBinary, recovery, expiryPlc, skipVals, needVer, keepCacheObj, txLbl, mvccSnapshot, null);
fut.init(topVer);
return fut;
}
use of org.apache.ignite.internal.util.future.GridFinishedFuture in project ignite by apache.
the class CachePartitionDefragmentationManager method executeDefragmentation.
/**
*/
public void executeDefragmentation() throws IgniteCheckedException {
Map<Integer, List<CacheDataStore>> oldStores = new HashMap<>();
for (CacheGroupContext oldGrpCtx : cacheGrpCtxsForDefragmentation) {
int grpId = oldGrpCtx.groupId();
final IgniteCacheOffheapManager offheap = oldGrpCtx.offheap();
List<CacheDataStore> oldCacheDataStores = stream(offheap.cacheDataStores().spliterator(), false).filter(store -> {
try {
return filePageStoreMgr.exists(grpId, store.partId());
} catch (IgniteCheckedException e) {
throw new IgniteException(e);
}
}).collect(Collectors.toList());
oldStores.put(grpId, oldCacheDataStores);
}
int partitionCount = oldStores.values().stream().mapToInt(List::size).sum();
status.onStart(cacheGrpCtxsForDefragmentation, partitionCount);
try {
// Now the actual process starts.
IgniteInternalFuture<?> idxDfrgFut = null;
DataPageEvictionMode prevPageEvictionMode = null;
for (CacheGroupContext oldGrpCtx : cacheGrpCtxsForDefragmentation) {
int grpId = oldGrpCtx.groupId();
File workDir = filePageStoreMgr.cacheWorkDir(oldGrpCtx.sharedGroup(), oldGrpCtx.cacheOrGroupName());
List<CacheDataStore> oldCacheDataStores = oldStores.get(grpId);
if (skipAlreadyDefragmentedCacheGroup(workDir, grpId, log)) {
status.onCacheGroupSkipped(oldGrpCtx, oldCacheDataStores.size());
continue;
}
try {
GridCacheOffheapManager offheap = (GridCacheOffheapManager) oldGrpCtx.offheap();
status.onCacheGroupStart(oldGrpCtx, oldCacheDataStores.size());
if (workDir == null || oldCacheDataStores.isEmpty()) {
status.onCacheGroupFinish(oldGrpCtx);
continue;
}
// We can't start defragmentation of new group on the region that has wrong eviction mode.
// So waiting of the previous cache group defragmentation is inevitable.
DataPageEvictionMode curPageEvictionMode = oldGrpCtx.dataRegion().config().getPageEvictionMode();
if (prevPageEvictionMode == null || prevPageEvictionMode != curPageEvictionMode) {
prevPageEvictionMode = curPageEvictionMode;
partDataRegion.config().setPageEvictionMode(curPageEvictionMode);
if (idxDfrgFut != null)
idxDfrgFut.get();
}
IntMap<CacheDataStore> cacheDataStores = new IntHashMap<>();
for (CacheDataStore store : offheap.cacheDataStores()) {
// This would mean that these partitions are empty.
assert store.tree() == null || store.tree().groupId() == grpId;
if (store.tree() != null)
cacheDataStores.put(store.partId(), store);
}
dbMgr.checkpointedDataRegions().remove(oldGrpCtx.dataRegion());
// Another cheat. Ttl cleanup manager knows too much shit.
oldGrpCtx.caches().stream().filter(cacheCtx -> cacheCtx.groupId() == grpId).forEach(cacheCtx -> cacheCtx.ttl().unregister());
// Technically wal is already disabled, but "PageHandler.isWalDeltaRecordNeeded" doesn't care
// and WAL records will be allocated anyway just to be ignored later if we don't disable WAL for
// cache group explicitly.
oldGrpCtx.localWalEnabled(false, false);
boolean encrypted = oldGrpCtx.config().isEncryptionEnabled();
FileVersionCheckingFactory pageStoreFactory = filePageStoreMgr.getPageStoreFactory(grpId, encrypted);
AtomicLong idxAllocationTracker = new GridAtomicLong();
createIndexPageStore(grpId, workDir, pageStoreFactory, partDataRegion, idxAllocationTracker::addAndGet);
checkCancellation();
GridCompoundFuture<Object, Object> cmpFut = new GridCompoundFuture<>();
PageMemoryEx oldPageMem = (PageMemoryEx) oldGrpCtx.dataRegion().pageMemory();
CacheGroupContext newGrpCtx = new CacheGroupContext(sharedCtx, grpId, oldGrpCtx.receivedFrom(), CacheType.USER, oldGrpCtx.config(), oldGrpCtx.affinityNode(), partDataRegion, oldGrpCtx.cacheObjectContext(), null, null, oldGrpCtx.localStartVersion(), true, false, true);
defragmentationCheckpoint.checkpointTimeoutLock().checkpointReadLock();
try {
// This will initialize partition meta in index partition - meta tree and reuse list.
newGrpCtx.start();
} finally {
defragmentationCheckpoint.checkpointTimeoutLock().checkpointReadUnlock();
}
IgniteUtils.doInParallel(defragmentationThreadPool, oldCacheDataStores, oldCacheDataStore -> defragmentOnePartition(oldGrpCtx, grpId, workDir, offheap, pageStoreFactory, cmpFut, oldPageMem, newGrpCtx, oldCacheDataStore));
// A bit too general for now, but I like it more then saving only the last checkpoint future.
cmpFut.markInitialized().get();
idxDfrgFut = new GridFinishedFuture<>();
if (filePageStoreMgr.hasIndexStore(grpId)) {
defragmentIndexPartition(oldGrpCtx, newGrpCtx);
idxDfrgFut = defragmentationCheckpoint.forceCheckpoint("index defragmented", null).futureFor(FINISHED);
}
PageStore oldIdxPageStore = filePageStoreMgr.getStore(grpId, INDEX_PARTITION);
idxDfrgFut = idxDfrgFut.chain(fut -> {
if (log.isDebugEnabled()) {
log.debug(S.toString("Index partition defragmented", "grpId", grpId, false, "oldPages", oldIdxPageStore.pages(), false, "newPages", idxAllocationTracker.get() + 1, false, "pageSize", pageSize, false, "partFile", defragmentedIndexFile(workDir).getName(), false, "workDir", workDir, false));
}
oldPageMem.invalidate(grpId, INDEX_PARTITION);
PageMemoryEx partPageMem = (PageMemoryEx) partDataRegion.pageMemory();
partPageMem.invalidate(grpId, INDEX_PARTITION);
DefragmentationPageReadWriteManager pageMgr = (DefragmentationPageReadWriteManager) partPageMem.pageManager();
pageMgr.pageStoreMap().removePageStore(grpId, INDEX_PARTITION);
PageMemoryEx mappingPageMem = (PageMemoryEx) mappingDataRegion.pageMemory();
pageMgr = (DefragmentationPageReadWriteManager) mappingPageMem.pageManager();
pageMgr.pageStoreMap().clear(grpId);
renameTempIndexFile(workDir);
writeDefragmentationCompletionMarker(filePageStoreMgr.getPageStoreFileIoFactory(), workDir, log);
batchRenameDefragmentedCacheGroupPartitions(workDir, log);
return null;
});
status.onIndexDefragmented(oldGrpCtx, oldIdxPageStore.size(), // + file header.
pageSize + idxAllocationTracker.get() * pageSize);
} catch (DefragmentationCancelledException e) {
DefragmentationFileUtils.deleteLeftovers(workDir);
throw e;
}
status.onCacheGroupFinish(oldGrpCtx);
}
if (idxDfrgFut != null)
idxDfrgFut.get();
mntcReg.unregisterMaintenanceTask(DEFRAGMENTATION_MNTC_TASK_NAME);
status.onFinish();
completionFut.onDone();
} catch (DefragmentationCancelledException e) {
mntcReg.unregisterMaintenanceTask(DEFRAGMENTATION_MNTC_TASK_NAME);
log.info("Defragmentation process has been cancelled.");
status.onFinish();
completionFut.onDone();
} catch (Throwable t) {
log.error("Defragmentation process failed.", t);
status.onFinish();
completionFut.onDone(t);
throw t;
} finally {
defragmentationCheckpoint.stop(true);
}
}
Aggregations