use of org.infinispan.remoting.responses.Response in project infinispan by infinispan.
the class StateConsumerImpl method onTopologyUpdate.
@Override
public CompletionStage<CompletionStage<Void>> onTopologyUpdate(CacheTopology cacheTopology, boolean isRebalance) {
final ConsistentHash newWriteCh = cacheTopology.getWriteConsistentHash();
final CacheTopology previousCacheTopology = this.cacheTopology;
final ConsistentHash previousWriteCh = previousCacheTopology != null ? previousCacheTopology.getWriteConsistentHash() : null;
IntSet newWriteSegments = getOwnedSegments(newWriteCh);
Address address = rpcManager.getAddress();
final boolean isMember = cacheTopology.getMembers().contains(address);
final boolean wasMember = previousWriteCh != null && previousWriteCh.getMembers().contains(address);
if (log.isTraceEnabled())
log.tracef("Received new topology for cache %s, isRebalance = %b, isMember = %b, topology = %s", cacheName, isRebalance, isMember, cacheTopology);
if (!ownsData && isMember) {
ownsData = true;
} else if (ownsData && !isMember) {
// This can happen after a merge, if the local node was in a minority partition.
ownsData = false;
}
// If a member leaves/crashes immediately after a rebalance was started, the new CH_UPDATE
// command may be executed before the REBALANCE_START command, so it has to start the rebalance.
boolean addedPendingCH = cacheTopology.getPendingCH() != null && wasMember && previousCacheTopology.getPendingCH() == null;
boolean startConflictResolution = !isRebalance && cacheTopology.getPhase() == CacheTopology.Phase.CONFLICT_RESOLUTION;
boolean startStateTransfer = isRebalance || (addedPendingCH && !startConflictResolution);
if (startStateTransfer && !isRebalance) {
if (log.isTraceEnabled())
log.tracef("Forcing startRebalance = true");
}
CompletionStage<Void> stage = CompletableFutures.completedNull();
if (startStateTransfer) {
// Only update the rebalance topology id when starting the rebalance, as we're going to ignore any state
// response with a smaller topology id
stateTransferTopologyId.compareAndSet(NO_STATE_TRANSFER_IN_PROGRESS, cacheTopology.getTopologyId());
conflictManager.cancelVersionRequests();
if (cacheNotifier.hasListener(DataRehashed.class)) {
stage = cacheNotifier.notifyDataRehashed(cacheTopology.getCurrentCH(), cacheTopology.getPendingCH(), cacheTopology.getUnionCH(), cacheTopology.getTopologyId(), true);
}
}
stage = stage.thenCompose(ignored -> {
if (startConflictResolution) {
// This stops state being applied from a prior rebalance and also prevents tracking from being stopped
stateTransferTopologyId.set(NO_STATE_TRANSFER_IN_PROGRESS);
}
// Make sure we don't send a REBALANCE_CONFIRM command before we've added all the transfer tasks
// even if some of the tasks are removed and re-added
waitingForState.set(false);
stateTransferFuture = new CompletableFuture<>();
beforeTopologyInstalled(cacheTopology.getTopologyId(), previousWriteCh, newWriteCh);
if (!configuration.clustering().cacheMode().isInvalidation()) {
// Owned segments
dataContainer.addSegments(newWriteSegments);
// TODO Should we throw an exception if addSegments() returns false?
return ignoreValue(persistenceManager.addSegments(newWriteSegments));
}
return CompletableFutures.completedNull();
});
stage = stage.thenCompose(ignored -> {
// Tracking is stopped once the state transfer completes (i.e. all the entries have been inserted)
if (startStateTransfer || startConflictResolution) {
if (commitManager.isTracking(PUT_FOR_STATE_TRANSFER)) {
log.debug("Starting state transfer but key tracking is already enabled");
} else {
if (log.isTraceEnabled())
log.tracef("Start keeping track of keys for state transfer");
commitManager.startTrack(PUT_FOR_STATE_TRANSFER);
}
}
// Ensures writes to the data container use the right consistent hash
// Writers block on the state transfer shared lock, so we keep the exclusive lock as short as possible
stateTransferLock.acquireExclusiveTopologyLock();
try {
this.cacheTopology = cacheTopology;
distributionManager.setCacheTopology(cacheTopology);
} finally {
stateTransferLock.releaseExclusiveTopologyLock();
}
stateTransferLock.notifyTopologyInstalled(cacheTopology.getTopologyId());
inboundInvocationHandler.checkForReadyTasks();
xSiteStateTransferManager.onTopologyUpdated(cacheTopology, isStateTransferInProgress());
if (!wasMember && isMember) {
return fetchClusterListeners(cacheTopology);
}
return CompletableFutures.completedNull();
});
stage = stage.thenCompose(ignored -> {
// fetch transactions and data segments from other owners if this is enabled
if (startConflictResolution || (!isTransactional && !isFetchEnabled)) {
return CompletableFutures.completedNull();
}
IntSet addedSegments, removedSegments;
if (previousWriteCh == null) {
// If we have any segments assigned in the initial CH, it means we are the first member.
// If we are not the first member, we can only add segments via rebalance.
removedSegments = IntSets.immutableEmptySet();
addedSegments = IntSets.immutableEmptySet();
if (log.isTraceEnabled()) {
log.tracef("On cache %s we have: added segments: %s", cacheName, addedSegments);
}
} else {
IntSet previousSegments = getOwnedSegments(previousWriteCh);
if (newWriteSegments.size() == numSegments) {
// Optimization for replicated caches
removedSegments = IntSets.immutableEmptySet();
} else {
removedSegments = IntSets.mutableCopyFrom(previousSegments);
removedSegments.removeAll(newWriteSegments);
}
// This is a rebalance, we need to request the segments we own in the new CH.
addedSegments = IntSets.mutableCopyFrom(newWriteSegments);
addedSegments.removeAll(previousSegments);
if (log.isTraceEnabled()) {
log.tracef("On cache %s we have: new segments: %s; old segments: %s", cacheName, newWriteSegments, previousSegments);
log.tracef("On cache %s we have: added segments: %s; removed segments: %s", cacheName, addedSegments, removedSegments);
}
// remove inbound transfers for segments we no longer own
cancelTransfers(removedSegments);
// Scattered cache gets added segments on the first CH_UPDATE, and we want to keep these
if (!startStateTransfer && !addedSegments.isEmpty() && !configuration.clustering().cacheMode().isScattered()) {
// If the last owner of a segment leaves the cluster, a new set of owners is assigned,
// but the new owners should not try to retrieve the segment from each other.
// If this happens during a rebalance, we might have already sent our rebalance
// confirmation, so the coordinator won't wait for us to retrieve those segments anyway.
log.debugf("Not requesting segments %s because the last owner left the cluster", addedSegments);
addedSegments.clear();
}
// check if any of the existing transfers should be restarted from a different source because
// the initial source is no longer a member
restartBrokenTransfers(cacheTopology, addedSegments);
}
IntSet transactionOnlySegments = computeTransactionOnlySegments(cacheTopology, address);
return handleSegments(startStateTransfer, addedSegments, removedSegments, transactionOnlySegments);
});
stage = stage.thenCompose(ignored -> {
int stateTransferTopologyId = this.stateTransferTopologyId.get();
if (log.isTraceEnabled())
log.tracef("Topology update processed, stateTransferTopologyId = %d, startRebalance = %s, pending CH = %s", (Object) stateTransferTopologyId, startStateTransfer, cacheTopology.getPendingCH());
if (stateTransferTopologyId != NO_STATE_TRANSFER_IN_PROGRESS && !startStateTransfer && !cacheTopology.getPhase().isRebalance()) {
// we have received a topology update without a pending CH, signalling the end of the rebalance
boolean changed = this.stateTransferTopologyId.compareAndSet(stateTransferTopologyId, NO_STATE_TRANSFER_IN_PROGRESS);
// but we only want to notify the @DataRehashed listeners once
if (changed) {
stopApplyingState(stateTransferTopologyId);
if (cacheNotifier.hasListener(DataRehashed.class)) {
return cacheNotifier.notifyDataRehashed(previousCacheTopology.getCurrentCH(), previousCacheTopology.getPendingCH(), previousCacheTopology.getUnionCH(), cacheTopology.getTopologyId(), false);
}
}
}
return CompletableFutures.completedNull();
});
return handleAndCompose(stage, (ignored, throwable) -> {
if (log.isTraceEnabled()) {
log.tracef("Unlock State Transfer in Progress for topology ID %s", cacheTopology.getTopologyId());
}
stateTransferLock.notifyTransactionDataReceived(cacheTopology.getTopologyId());
inboundInvocationHandler.checkForReadyTasks();
// Only set the flag here, after all the transfers have been added to the transfersBySource map
if (stateTransferTopologyId.get() != NO_STATE_TRANSFER_IN_PROGRESS && isMember) {
waitingForState.set(true);
}
notifyEndOfStateTransferIfNeeded();
// and after notifyTransactionDataReceived - otherwise the RollbackCommands would block.
try {
if (transactionTable != null) {
transactionTable.cleanupLeaverTransactions(rpcManager.getTransport().getMembers());
}
} catch (Exception e) {
// Do not fail state transfer when the cleanup fails. See ISPN-7437 for details.
log.transactionCleanupError(e);
}
commandAckCollector.onMembersChange(newWriteCh.getMembers());
// and STABLE does not have to be confirmed at all
switch(cacheTopology.getPhase()) {
case READ_ALL_WRITE_ALL:
case READ_NEW_WRITE_ALL:
stateTransferFuture.complete(null);
}
// and the other partition was available) or when L1 is enabled.
if ((isMember || wasMember) && cacheTopology.getPhase() == CacheTopology.Phase.NO_REBALANCE) {
int numSegments = newWriteCh.getNumSegments();
IntSet removedSegments = IntSets.mutableEmptySet(numSegments);
IntSet newSegments = getOwnedSegments(newWriteCh);
for (int i = 0; i < numSegments; ++i) {
if (!newSegments.contains(i)) {
removedSegments.set(i);
}
}
return removeStaleData(removedSegments).thenApply(ignored1 -> {
conflictManager.restartVersionRequests();
// rethrow the original exception, if any
CompletableFutures.rethrowExceptionIfPresent(throwable);
return stateTransferFuture;
});
}
CompletableFutures.rethrowExceptionIfPresent(throwable);
return CompletableFuture.completedFuture(stateTransferFuture);
});
}
use of org.infinispan.remoting.responses.Response in project infinispan by infinispan.
the class ScatteredStateConsumerImpl method backupEntries.
private void backupEntries(List<InternalCacheEntry<?, ?>> entries) {
long incrementedCounter = chunkCounter.incrementAndGet();
if (log.isTraceEnabled())
log.tracef("Backing up entries, chunk counter is %d", incrementedCounter);
Map<Object, InternalCacheValue<?>> map = new HashMap<>();
for (InternalCacheEntry<?, ?> entry : entries) {
map.put(entry.getKey(), entry.toInternalCacheValue());
}
PutMapCommand putMapCommand = commandsFactory.buildPutMapCommand(map, null, STATE_TRANSFER_FLAGS);
putMapCommand.setTopologyId(rpcManager.getTopologyId());
rpcManager.invokeCommand(backupAddress, putMapCommand, SingleResponseCollector.validOnly(), rpcManager.getSyncRpcOptions()).whenComplete(((response, throwable) -> {
try {
if (throwable != null) {
log.failedOutBoundTransferExecution(throwable);
}
} finally {
long decrementedCounter = chunkCounter.decrementAndGet();
if (log.isTraceEnabled())
log.tracef("Backed up entries, chunk counter is %d", decrementedCounter);
if (decrementedCounter == 0) {
notifyEndOfStateTransferIfNeeded();
}
}
}));
}
use of org.infinispan.remoting.responses.Response in project infinispan by infinispan.
the class SingleClusterExecutor method submit.
@Override
public CompletableFuture<Void> submit(Runnable runnable) {
Address target = findTarget();
if (target == null) {
return CompletableFutures.completedExceptionFuture(new SuspectException("No available nodes!"));
}
if (log.isTraceEnabled()) {
log.tracef("Submitting runnable to single remote node - JGroups Address %s", target);
}
CompletableFuture<Void> future = new CompletableFuture<>();
if (target == me) {
return super.submit(runnable);
} else {
ReplicableCommand command = new ReplicableRunnableCommand(runnable);
CompletionStage<Response> request = transport.invokeCommand(target, command, PassthroughSingleResponseCollector.INSTANCE, DeliverOrder.NONE, time, unit);
request.whenComplete((r, t) -> {
if (t != null) {
future.completeExceptionally(t);
} else {
consumeResponse(r, target, future::completeExceptionally);
future.complete(null);
}
});
}
return future;
}
use of org.infinispan.remoting.responses.Response in project infinispan by infinispan.
the class RecoveryManagerImpl method forceTransactionCompletionFromCluster.
@Override
public String forceTransactionCompletionFromCluster(XidImpl xid, Address where, boolean commit) {
CompleteTransactionCommand ctc = commandFactory.buildCompleteTransactionCommand(xid, commit);
CompletionStage<Map<Address, Response>> completionStage = rpcManager.invokeCommand(where, ctc, MapResponseCollector.validOnly(), rpcManager.getSyncRpcOptions());
Map<Address, Response> responseMap = rpcManager.blocking(completionStage);
if (responseMap.size() != 1 || responseMap.get(where) == null) {
log.expectedJustOneResponse(responseMap);
throw new CacheException("Expected response size is 1, received " + responseMap);
}
// noinspection rawtypes
return (String) ((SuccessfulResponse) responseMap.get(where)).getResponseValue();
}
use of org.infinispan.remoting.responses.Response in project infinispan by infinispan.
the class RecoveryManagerImpl method getAllPreparedTxFromCluster.
private Map<Address, Response> getAllPreparedTxFromCluster() {
GetInDoubtTransactionsCommand command = commandFactory.buildGetInDoubtTransactionsCommand();
CompletionStage<Map<Address, Response>> completionStage = rpcManager.invokeCommandOnAll(command, MapResponseCollector.ignoreLeavers(), rpcManager.getSyncRpcOptions());
Map<Address, Response> addressResponseMap = rpcManager.blocking(completionStage);
if (log.isTraceEnabled())
log.tracef("getAllPreparedTxFromCluster received from cluster: %s", addressResponseMap);
return addressResponseMap;
}
Aggregations