use of org.infinispan.remoting.rpc.RpcOptions in project infinispan by infinispan.
the class StateConsumerImpl method start.
// Must run after the PersistenceManager
@Start(priority = 20)
public void start() {
cacheName = cache.wired().getName();
isInvalidationMode = configuration.clustering().cacheMode().isInvalidation();
isTransactional = configuration.transaction().transactionMode().isTransactional();
timeout = configuration.clustering().stateTransfer().timeout();
numSegments = configuration.clustering().hash().numSegments();
isFetchEnabled = isFetchEnabled(configuration.persistence().fetchPersistentState());
rpcOptions = new RpcOptions(DeliverOrder.NONE, timeout, TimeUnit.MILLISECONDS);
stateRequestExecutor = new LimitedExecutor("StateRequest-" + cacheName, nonBlockingExecutor, 1);
persistenceManager.addStoreListener(storeChangeListener);
running = true;
}
use of org.infinispan.remoting.rpc.RpcOptions in project infinispan by infinispan.
the class TransactionTable method cleanupTimedOutTransactions.
private void cleanupTimedOutTransactions() {
if (log.isTraceEnabled())
log.tracef("About to cleanup remote transactions older than %d ms", configuration.transaction().completedTxTimeout());
long beginning = timeService.time();
long cutoffCreationTime = beginning - TimeUnit.MILLISECONDS.toNanos(configuration.transaction().completedTxTimeout());
List<GlobalTransaction> toKill = new ArrayList<>();
Map<Address, Collection<GlobalTransaction>> toCheck = new HashMap<>();
// Check remote transactions.
for (Map.Entry<GlobalTransaction, RemoteTransaction> e : remoteTransactions.entrySet()) {
GlobalTransaction gtx = e.getKey();
RemoteTransaction remoteTx = e.getValue();
// concurrent map doesn't accept null values
assert remoteTx != null;
if (log.isTraceEnabled()) {
log.tracef("Checking transaction %s", gtx);
}
// Check the time.
long creationTime = remoteTx.getCreationTime();
if (creationTime - cutoffCreationTime >= 0) {
// transaction still valid
continue;
}
if (transactionOriginatorChecker.isOriginatorMissing(gtx)) {
// originator no longer available. Transaction can be rolled back.
long duration = timeService.timeDuration(creationTime, beginning, TimeUnit.MILLISECONDS);
log.remoteTransactionTimeout(gtx, duration);
toKill.add(gtx);
} else {
// originator alive or hot rod transaction
Address orig = gtx.getAddress();
if (rpcManager.getMembers().contains(orig)) {
// originator still in view. Check if the transaction is valid.
Collection<GlobalTransaction> addressCheckList = toCheck.computeIfAbsent(orig, k -> new ArrayList<>());
addressCheckList.add(gtx);
}
// else, it is a hot rod transaction. don't kill it since the server reaper will take appropriate action
}
}
// check if the transaction is running on originator
for (Map.Entry<Address, Collection<GlobalTransaction>> entry : toCheck.entrySet()) {
CheckTransactionRpcCommand cmd = commandsFactory.buildCheckTransactionRpcCommand(entry.getValue());
RpcOptions rpcOptions = rpcManager.getSyncRpcOptions();
rpcManager.invokeCommand(entry.getKey(), cmd, CheckTransactionRpcCommand.responseCollector(), rpcOptions).thenAccept(this::killAllTransactionsAsync);
}
// Rollback the orphaned transactions and release any held locks.
killAllTransactionsAsync(toKill);
}
use of org.infinispan.remoting.rpc.RpcOptions in project infinispan by infinispan.
the class DistAsyncFuncTest method createCacheManagers.
@Override
protected void createCacheManagers() throws Throwable {
super.createCacheManagers();
r1 = new ReplListener(c1, true, true);
r2 = new ReplListener(c2, true, true);
r3 = new ReplListener(c3, true, true);
r4 = new ReplListener(c4, true, true);
r = new ReplListener[] { r1, r2, r3, r4 };
listenerLookup = new HashMap<>();
for (ReplListener rl : r) listenerLookup.put(rl.getCache().getCacheManager().getAddress(), rl);
for (Cache c : caches) {
TestingUtil.wrapComponent(c, RpcManager.class, original -> new AbstractDelegatingRpcManager(original) {
@Override
protected <T> CompletionStage<T> performRequest(Collection<Address> targets, ReplicableCommand command, ResponseCollector<T> collector, Function<ResponseCollector<T>, CompletionStage<T>> invoker, RpcOptions rpcOptions) {
if (command instanceof SingleRpcCommand) {
command = ((SingleRpcCommand) command).getCommand();
}
if (command instanceof InvalidateL1Command) {
InvalidateL1Command invalidateL1Command = (InvalidateL1Command) command;
log.tracef("Sending invalidation %s to %s", command, targets);
Collection<Address> realTargets = targets != null ? targets : cacheAddresses;
for (Address target : realTargets) {
expectedL1Invalidations.computeIfAbsent(target, ignored -> Collections.synchronizedList(new ArrayList<>())).add(invalidateL1Command);
}
}
return super.performRequest(targets, command, collector, invoker, rpcOptions);
}
});
}
}
use of org.infinispan.remoting.rpc.RpcOptions in project infinispan by infinispan.
the class BaseDistributionInterceptor method primaryReturnHandler.
protected Object primaryReturnHandler(InvocationContext ctx, AbstractDataWriteCommand command, Object localResult) {
if (!command.isSuccessful()) {
if (log.isTraceEnabled())
log.tracef("Skipping the replication of the conditional command as it did not succeed on primary owner (%s).", command);
return localResult;
}
LocalizedCacheTopology cacheTopology = checkTopologyId(command);
int segment = SegmentSpecificCommand.extractSegment(command, command.getKey(), keyPartitioner);
DistributionInfo distributionInfo = cacheTopology.getSegmentDistribution(segment);
Collection<Address> owners = distributionInfo.writeOwners();
if (owners.size() == 1) {
// There are no backups, skip the replication part.
return localResult;
}
// Cache the matcher and reset it if we get OOTE (or any other exception) from backup
ValueMatcher originalMatcher = command.getValueMatcher();
// Ignore the previous value on the backup owners
command.setValueMatcher(ValueMatcher.MATCH_ALWAYS);
if (!isSynchronous(command)) {
if (isReplicated) {
rpcManager.sendToAll(command, DeliverOrder.PER_SENDER);
} else {
rpcManager.sendToMany(owners, command, DeliverOrder.PER_SENDER);
}
// Switch to the retry policy, in case the primary owner changes before we commit locally
command.setValueMatcher(originalMatcher.matcherForRetry());
return localResult;
}
VoidResponseCollector collector = VoidResponseCollector.ignoreLeavers();
RpcOptions rpcOptions = rpcManager.getSyncRpcOptions();
// Mark the command as a backup write so it can skip some checks
command.addFlags(FlagBitSets.BACKUP_WRITE);
CompletionStage<Void> remoteInvocation = isReplicated ? rpcManager.invokeCommandOnAll(command, collector, rpcOptions) : rpcManager.invokeCommand(owners, command, collector, rpcOptions);
return asyncValue(remoteInvocation.handle((ignored, t) -> {
// Unset the backup write bit as the command will be retried
command.setFlagsBitSet(command.getFlagsBitSet() & ~FlagBitSets.BACKUP_WRITE);
// Switch to the retry policy, in case the primary owner changed and the write already succeeded on the new primary
command.setValueMatcher(originalMatcher.matcherForRetry());
CompletableFutures.rethrowExceptionIfPresent(t);
return localResult;
}));
}
use of org.infinispan.remoting.rpc.RpcOptions in project infinispan by infinispan.
the class ScatteredDistributionInterceptor method visitClearCommand.
@Override
public Object visitClearCommand(InvocationContext ctx, ClearCommand command) throws Throwable {
// local mode clear will have unpredictable results
svm.clearInvalidations();
if (ctx.isOriginLocal() && !isLocalModeForced(command)) {
if (isSynchronous(command)) {
RpcOptions rpcOptions = rpcManager.getSyncRpcOptions();
MapResponseCollector collector = MapResponseCollector.ignoreLeavers();
return makeStage(asyncInvokeNext(ctx, command, rpcManager.invokeCommandOnAll(command, collector, rpcOptions))).thenApply(ctx, command, clearHandler);
} else {
rpcManager.sendToAll(command, DeliverOrder.PER_SENDER);
return invokeNextThenApply(ctx, command, clearHandler);
}
} else {
return invokeNextThenApply(ctx, command, clearHandler);
}
}
Aggregations