use of org.apache.ignite.internal.util.future.GridFutureAdapter in project ignite by apache.
the class SnapshotRestoreProcess method rollback.
/**
* @param reqId Request ID.
* @return Result future.
*/
private IgniteInternalFuture<Boolean> rollback(UUID reqId) {
if (ctx.clientNode())
return new GridFinishedFuture<>();
SnapshotRestoreContext opCtx0 = opCtx;
if (opCtx0 == null || F.isEmpty(opCtx0.dirs))
return new GridFinishedFuture<>();
GridFutureAdapter<Boolean> retFut = new GridFutureAdapter<>();
synchronized (this) {
opCtx0.stopFut = new IgniteFutureImpl<>(retFut.chain(f -> null));
}
try {
ctx.cache().context().snapshotMgr().snapshotExecutorService().execute(() -> {
if (log.isInfoEnabled()) {
log.info("Removing restored cache directories [reqId=" + reqId + ", snapshot=" + opCtx0.snpName + ", dirs=" + opCtx0.dirs + ']');
}
IgniteCheckedException ex = null;
for (File cacheDir : opCtx0.dirs) {
File tmpCacheDir = formatTmpDirName(cacheDir);
if (tmpCacheDir.exists() && !U.delete(tmpCacheDir)) {
log.error("Unable to perform rollback routine completely, cannot remove temp directory " + "[reqId=" + reqId + ", snapshot=" + opCtx0.snpName + ", dir=" + tmpCacheDir + ']');
ex = new IgniteCheckedException("Unable to remove temporary cache directory " + cacheDir);
}
if (cacheDir.exists() && !U.delete(cacheDir)) {
log.error("Unable to perform rollback routine completely, cannot remove cache directory " + "[reqId=" + reqId + ", snapshot=" + opCtx0.snpName + ", dir=" + cacheDir + ']');
ex = new IgniteCheckedException("Unable to remove cache directory " + cacheDir);
}
}
if (ex != null)
retFut.onDone(ex);
else
retFut.onDone(true);
});
} catch (RejectedExecutionException e) {
log.error("Unable to perform rollback routine, task has been rejected " + "[reqId=" + reqId + ", snapshot=" + opCtx0.snpName + ']');
retFut.onDone(e);
}
return retFut;
}
use of org.apache.ignite.internal.util.future.GridFutureAdapter in project ignite by apache.
the class SnapshotRestoreProcess method preload.
/**
* @param reqId Request id.
* @return Future which will be completed when the preload ends.
*/
private IgniteInternalFuture<Boolean> preload(UUID reqId) {
if (ctx.clientNode())
return new GridFinishedFuture<>();
SnapshotRestoreContext opCtx0 = opCtx;
GridFutureAdapter<Boolean> retFut = new GridFutureAdapter<>();
if (opCtx0 == null)
return new GridFinishedFuture<>(new IgniteCheckedException("Snapshot restore process has incorrect restore state: " + reqId));
if (opCtx0.dirs.isEmpty())
return new GridFinishedFuture<>();
try {
if (ctx.isStopping())
throw new NodeStoppingException("Node is stopping: " + ctx.localNodeId());
Set<SnapshotMetadata> allMetas = opCtx0.metasPerNode.values().stream().flatMap(List::stream).collect(Collectors.toSet());
AbstractSnapshotVerificationTask.checkMissedMetadata(allMetas);
IgniteSnapshotManager snpMgr = ctx.cache().context().snapshotMgr();
synchronized (this) {
opCtx0.stopFut = new IgniteFutureImpl<>(retFut.chain(f -> null));
}
if (log.isInfoEnabled()) {
log.info("Starting snapshot preload operation to restore cache groups " + "[reqId=" + reqId + ", snapshot=" + opCtx0.snpName + ", caches=" + F.transform(opCtx0.dirs, FilePageStoreManager::cacheGroupName) + ']');
}
CompletableFuture<Void> metaFut = ctx.localNodeId().equals(opCtx0.opNodeId) ? CompletableFuture.runAsync(() -> {
try {
SnapshotMetadata meta = F.first(opCtx0.metasPerNode.get(opCtx0.opNodeId));
File binDir = binaryWorkDir(snpMgr.snapshotLocalDir(opCtx0.snpName).getAbsolutePath(), meta.folderName());
ctx.cacheObjects().updateMetadata(binDir, opCtx0.stopChecker);
} catch (Throwable t) {
log.error("Unable to perform metadata update operation for the cache groups restore process", t);
opCtx0.errHnd.accept(t);
}
}, snpMgr.snapshotExecutorService()) : CompletableFuture.completedFuture(null);
Map<String, GridAffinityAssignmentCache> affCache = new HashMap<>();
for (StoredCacheData data : opCtx0.cfgs.values()) {
affCache.computeIfAbsent(CU.cacheOrGroupName(data.config()), grp -> calculateAffinity(ctx, data.config(), opCtx0.discoCache));
}
Map<Integer, Set<PartitionRestoreFuture>> allParts = new HashMap<>();
Map<Integer, Set<PartitionRestoreFuture>> rmtLoadParts = new HashMap<>();
ClusterNode locNode = ctx.cache().context().localNode();
List<SnapshotMetadata> locMetas = opCtx0.metasPerNode.get(locNode.id());
// First preload everything from the local node.
for (File dir : opCtx0.dirs) {
String cacheOrGrpName = cacheGroupName(dir);
int grpId = CU.cacheId(cacheOrGrpName);
File tmpCacheDir = formatTmpDirName(dir);
tmpCacheDir.mkdir();
Set<PartitionRestoreFuture> leftParts;
// Partitions contained in the snapshot.
Set<Integer> availParts = new HashSet<>();
for (SnapshotMetadata meta : allMetas) {
Set<Integer> parts = meta.partitions().get(grpId);
if (parts != null)
availParts.addAll(parts);
}
List<List<ClusterNode>> assignment = affCache.get(cacheOrGrpName).idealAssignment().assignment();
Set<PartitionRestoreFuture> partFuts = availParts.stream().filter(p -> p != INDEX_PARTITION && assignment.get(p).contains(locNode)).map(p -> new PartitionRestoreFuture(p, opCtx0.processedParts)).collect(Collectors.toSet());
allParts.put(grpId, partFuts);
rmtLoadParts.put(grpId, leftParts = new HashSet<>(partFuts));
if (leftParts.isEmpty())
continue;
SnapshotMetadata full = findMetadataWithSamePartitions(locMetas, grpId, leftParts.stream().map(p -> p.partId).collect(Collectors.toSet()));
for (SnapshotMetadata meta : full == null ? locMetas : Collections.singleton(full)) {
if (leftParts.isEmpty())
break;
File snpCacheDir = new File(ctx.cache().context().snapshotMgr().snapshotLocalDir(opCtx0.snpName), Paths.get(databaseRelativePath(meta.folderName()), dir.getName()).toString());
leftParts.removeIf(partFut -> {
boolean doCopy = ofNullable(meta.partitions().get(grpId)).orElse(Collections.emptySet()).contains(partFut.partId);
if (doCopy) {
copyLocalAsync(ctx.cache().context().snapshotMgr(), opCtx0, snpCacheDir, tmpCacheDir, partFut);
}
return doCopy;
});
if (meta == full) {
assert leftParts.isEmpty() : leftParts;
if (log.isInfoEnabled()) {
log.info("The snapshot was taken on the same cluster topology. The index will be copied to " + "restoring cache group if necessary [reqId=" + reqId + ", snapshot=" + opCtx0.snpName + ", dir=" + dir.getName() + ']');
}
File idxFile = new File(snpCacheDir, FilePageStoreManager.getPartitionFileName(INDEX_PARTITION));
if (idxFile.exists()) {
PartitionRestoreFuture idxFut;
allParts.computeIfAbsent(grpId, g -> new HashSet<>()).add(idxFut = new PartitionRestoreFuture(INDEX_PARTITION, opCtx0.processedParts));
copyLocalAsync(ctx.cache().context().snapshotMgr(), opCtx0, snpCacheDir, tmpCacheDir, idxFut);
}
}
}
}
// Load other partitions from remote nodes.
List<PartitionRestoreFuture> rmtAwaitParts = rmtLoadParts.values().stream().flatMap(Collection::stream).collect(Collectors.toList());
// This is necessary for sending only one partitions request per each cluster node.
Map<UUID, Map<Integer, Set<Integer>>> snpAff = snapshotAffinity(opCtx0.metasPerNode.entrySet().stream().filter(e -> !e.getKey().equals(ctx.localNodeId())).collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)), (grpId, partId) -> rmtLoadParts.get(grpId) != null && rmtLoadParts.get(grpId).remove(new PartitionRestoreFuture(partId, opCtx0.processedParts)));
Map<Integer, File> grpToDir = opCtx0.dirs.stream().collect(Collectors.toMap(d -> CU.cacheId(FilePageStoreManager.cacheGroupName(d)), d -> d));
try {
if (log.isInfoEnabled() && !snpAff.isEmpty()) {
log.info("Trying to request partitions from remote nodes " + "[reqId=" + reqId + ", snapshot=" + opCtx0.snpName + ", map=" + snpAff.entrySet().stream().collect(Collectors.toMap(Map.Entry::getKey, e -> partitionsMapToCompactString(e.getValue()))) + ']');
}
for (Map.Entry<UUID, Map<Integer, Set<Integer>>> m : snpAff.entrySet()) {
ctx.cache().context().snapshotMgr().requestRemoteSnapshotFiles(m.getKey(), opCtx0.snpName, m.getValue(), opCtx0.stopChecker, (snpFile, t) -> {
if (opCtx0.stopChecker.getAsBoolean())
throw new IgniteInterruptedException("Snapshot remote operation request cancelled.");
if (t == null) {
int grpId = CU.cacheId(cacheGroupName(snpFile.getParentFile()));
int partId = partId(snpFile.getName());
PartitionRestoreFuture partFut = F.find(allParts.get(grpId), null, new IgnitePredicate<PartitionRestoreFuture>() {
@Override
public boolean apply(PartitionRestoreFuture f) {
return f.partId == partId;
}
});
assert partFut != null : snpFile.getAbsolutePath();
File tmpCacheDir = formatTmpDirName(grpToDir.get(grpId));
Path partFile = Paths.get(tmpCacheDir.getAbsolutePath(), snpFile.getName());
try {
Files.move(snpFile.toPath(), partFile);
partFut.complete(partFile);
} catch (Exception e) {
opCtx0.errHnd.accept(e);
completeListExceptionally(rmtAwaitParts, e);
}
} else {
opCtx0.errHnd.accept(t);
completeListExceptionally(rmtAwaitParts, t);
}
});
}
} catch (IgniteCheckedException e) {
opCtx0.errHnd.accept(e);
completeListExceptionally(rmtAwaitParts, e);
}
List<PartitionRestoreFuture> allPartFuts = allParts.values().stream().flatMap(Collection::stream).collect(Collectors.toList());
int size = allPartFuts.size();
opCtx0.totalParts = size;
CompletableFuture.allOf(allPartFuts.toArray(new CompletableFuture[size])).runAfterBothAsync(metaFut, () -> {
try {
if (opCtx0.stopChecker.getAsBoolean())
throw new IgniteInterruptedException("The operation has been stopped on temporary directory switch.");
for (File src : opCtx0.dirs) Files.move(formatTmpDirName(src).toPath(), src.toPath(), StandardCopyOption.ATOMIC_MOVE);
} catch (IOException e) {
throw new IgniteException(e);
}
}, snpMgr.snapshotExecutorService()).whenComplete((r, t) -> opCtx0.errHnd.accept(t)).whenComplete((res, t) -> {
Throwable t0 = ofNullable(opCtx0.err.get()).orElse(t);
if (t0 == null)
retFut.onDone(true);
else {
log.error("Unable to restore cache group(s) from a snapshot " + "[reqId=" + opCtx.reqId + ", snapshot=" + opCtx.snpName + ']', t0);
retFut.onDone(t0);
}
});
} catch (Exception ex) {
opCtx0.errHnd.accept(ex);
return new GridFinishedFuture<>(ex);
}
return retFut;
}
use of org.apache.ignite.internal.util.future.GridFutureAdapter in project ignite by apache.
the class OdbcRequestHandlerWorker method body.
/**
* {@inheritDoc}
*/
@Override
protected void body() throws InterruptedException, IgniteInterruptedCheckedException {
try {
while (!isCancelled()) {
T2<OdbcRequest, GridFutureAdapter<ClientListenerResponse>> req = queue.take();
GridFutureAdapter<ClientListenerResponse> fut = req.get2();
try {
ClientListenerResponse res = hnd.doHandle(req.get1());
fut.onDone(res);
} catch (Exception e) {
fut.onDone(e);
}
}
} finally {
// Notify indexing that this worker is being stopped.
try {
ctx.query().getIndexing().onClientDisconnect();
} catch (Exception e) {
// No-op.
}
// Drain the queue on stop.
T2<OdbcRequest, GridFutureAdapter<ClientListenerResponse>> req = queue.poll();
while (req != null) {
req.get2().onDone(ERR_RESPONSE);
req = queue.poll();
}
}
}
use of org.apache.ignite.internal.util.future.GridFutureAdapter in project ignite by apache.
the class GridNearTxLocal method requestSnapshot.
/**
* Requests version on coordinator.
*
* @return Future to wait for result.
*/
public IgniteInternalFuture<MvccSnapshot> requestSnapshot() {
if (isRollbackOnly())
return new GridFinishedFuture<>(rollbackException());
MvccSnapshot mvccSnapshot0 = mvccSnapshot;
if (mvccSnapshot0 != null)
return new GridFinishedFuture<>(mvccSnapshot0);
MvccProcessor prc = cctx.coordinators();
MvccCoordinator crd = prc.currentCoordinator();
synchronized (this) {
this.crdVer = crd.version();
}
if (crd.local())
mvccSnapshot0 = prc.requestWriteSnapshotLocal();
if (mvccSnapshot0 == null) {
MvccSnapshotFuture fut = new MvccTxSnapshotFuture();
prc.requestWriteSnapshotAsync(crd, fut);
return fut;
}
GridFutureAdapter<MvccSnapshot> fut = new GridFutureAdapter<>();
onResponse0(mvccSnapshot0, fut);
return fut;
}
use of org.apache.ignite.internal.util.future.GridFutureAdapter in project ignite by apache.
the class GridNearTxLocal method enlistWrite.
/**
* Internal routine for <tt>putAll(..)</tt>
*
* @param cacheCtx Cache context.
* @param keys Keys to enlist.
* @param expiryPlc Explicitly specified expiry policy for entry.
* @param lookup Value lookup map ({@code null} for remove).
* @param invokeMap Map with entry processors for invoke operation.
* @param invokeArgs Optional arguments for EntryProcessor.
* @param retval Flag indicating whether a value should be returned.
* @param lockOnly If {@code true}, then entry will be enlisted as noop.
* @param filter User filters.
* @param ret Return value.
* @param enlisted Collection of keys enlisted into this transaction.
* @param drPutMap DR put map (optional).
* @param drRmvMap DR remove map (optional).
* @param skipStore Skip store flag.
* @param singleRmv {@code True} for single key remove operation ({@link Cache#remove(Object)}.
* @param keepBinary Keep binary flag.
* @param recovery Recovery flag.
* @param dataCenterId Optional data center ID.
* @return Future for enlisting writes.
*/
private <K, V> IgniteInternalFuture<Void> enlistWrite(final GridCacheContext cacheCtx, @Nullable AffinityTopologyVersion entryTopVer, Collection<?> keys, @Nullable ExpiryPolicy expiryPlc, @Nullable Map<?, ?> lookup, @Nullable Map<?, EntryProcessor<K, V, Object>> invokeMap, @Nullable Object[] invokeArgs, final boolean retval, boolean lockOnly, final CacheEntryPredicate[] filter, final GridCacheReturn ret, Collection<KeyCacheObject> enlisted, @Nullable Map<KeyCacheObject, GridCacheDrInfo> drPutMap, @Nullable Map<KeyCacheObject, GridCacheVersion> drRmvMap, boolean skipStore, final boolean singleRmv, final boolean keepBinary, final boolean recovery, Byte dataCenterId) {
assert retval || invokeMap == null;
try (TraceSurroundings ignored2 = MTC.support(context().kernalContext().tracing().create(TX_NEAR_ENLIST_WRITE, MTC.span()))) {
GridFutureAdapter<Void> enlistFut = new GridFutureAdapter<>();
if (!updateLockFuture(null, enlistFut))
return finishFuture(enlistFut, timedOut() ? timeoutException() : rollbackException(), false);
try {
addActiveCache(cacheCtx, recovery);
} catch (IgniteCheckedException e) {
return finishFuture(enlistFut, e, false);
}
boolean rmv = lookup == null && invokeMap == null;
final boolean hasFilters = !F.isEmptyOrNulls(filter) && !F.isAlwaysTrue(filter);
final boolean needVal = singleRmv || retval || hasFilters;
final boolean needReadVer = needVal && (serializable() && optimistic());
try {
// Set transform flag for transaction.
if (invokeMap != null)
transform = true;
Set<KeyCacheObject> missedForLoad = null;
for (Object key : keys) {
if (isRollbackOnly())
return finishFuture(enlistFut, timedOut() ? timeoutException() : rollbackException(), false);
if (key == null) {
rollback();
throw new NullPointerException("Null key.");
}
Object val = rmv || lookup == null ? null : lookup.get(key);
EntryProcessor entryProcessor = invokeMap == null ? null : invokeMap.get(key);
GridCacheVersion drVer;
long drTtl;
long drExpireTime;
if (drPutMap != null) {
GridCacheDrInfo info = drPutMap.get(key);
assert info != null;
drVer = info.version();
drTtl = info.ttl();
drExpireTime = info.expireTime();
} else if (drRmvMap != null) {
assert drRmvMap.get(key) != null;
drVer = drRmvMap.get(key);
drTtl = -1L;
drExpireTime = -1L;
} else if (dataCenterId != null) {
drVer = cacheCtx.cache().nextVersion(dataCenterId);
drTtl = -1L;
drExpireTime = -1L;
} else {
drVer = null;
drTtl = -1L;
drExpireTime = -1L;
}
if (!rmv && val == null && entryProcessor == null) {
setRollbackOnly();
throw new NullPointerException("Null value.");
}
KeyCacheObject cacheKey = cacheCtx.toCacheKeyObject(key);
boolean loadMissed = enlistWriteEntry(cacheCtx, entryTopVer, cacheKey, val, entryProcessor, invokeArgs, expiryPlc, retval, lockOnly, filter, drVer, drTtl, drExpireTime, ret, enlisted, skipStore, singleRmv, hasFilters, needVal, needReadVer, keepBinary, recovery);
if (loadMissed) {
if (missedForLoad == null)
missedForLoad = new HashSet<>();
missedForLoad.add(cacheKey);
}
}
if (missedForLoad != null) {
AffinityTopologyVersion topVer = topologyVersionSnapshot();
if (topVer == null)
topVer = entryTopVer;
IgniteInternalFuture<Void> loadFut = loadMissing(cacheCtx, topVer != null ? topVer : topologyVersion(), missedForLoad, filter, ret, needReadVer, singleRmv, hasFilters, /*read through*/
(invokeMap != null || cacheCtx.config().isLoadPreviousValue()) && !skipStore, retval, keepBinary, recovery, expiryPlc);
loadFut.listen(new IgniteInClosure<IgniteInternalFuture<Void>>() {
@Override
public void apply(IgniteInternalFuture<Void> fut) {
try {
fut.get();
finishFuture(enlistFut, null, true);
} catch (IgniteCheckedException e) {
finishFuture(enlistFut, e, true);
}
}
});
return enlistFut;
}
return finishFuture(enlistFut, null, true);
} catch (IgniteCheckedException e) {
return finishFuture(enlistFut, e, true);
}
}
}
Aggregations