use of org.infinispan.commands.statetransfer.StateTransferCancelCommand in project infinispan by infinispan.
the class StateConsumerTest method test1.
public void test1() throws Exception {
// create cache configuration
ConfigurationBuilder cb = new ConfigurationBuilder();
cb.clustering().invocationBatching().enable().clustering().cacheMode(CacheMode.DIST_SYNC).clustering().stateTransfer().timeout(30000).locking().lockAcquisitionTimeout(TestingUtil.shortTimeoutMillis()).locking().isolationLevel(IsolationLevel.REPEATABLE_READ);
Configuration configuration = cb.build();
PersistentUUIDManager persistentUUIDManager = new PersistentUUIDManagerImpl();
// create list of 6 members
Address[] addresses = new Address[4];
for (int i = 0; i < 4; i++) {
addresses[i] = new TestAddress(i);
persistentUUIDManager.addPersistentAddressMapping(addresses[i], PersistentUUID.randomUUID());
}
List<Address> members1 = Arrays.asList(addresses[0], addresses[1], addresses[2], addresses[3]);
List<Address> members2 = Arrays.asList(addresses[0], addresses[1], addresses[2]);
// create CHes
DefaultConsistentHashFactory chf = new DefaultConsistentHashFactory();
KeyPartitioner keyPartitioner = new HashFunctionPartitioner(40);
DefaultConsistentHash ch1 = chf.create(2, 40, members1, null);
final DefaultConsistentHash ch2 = chf.updateMembers(ch1, members2, null);
DefaultConsistentHash ch3 = chf.rebalance(ch2);
DefaultConsistentHash ch23 = chf.union(ch2, ch3);
log.debug(ch1);
log.debug(ch2);
// create dependencies
Cache cache = mock(Cache.class);
when(cache.getName()).thenReturn("testCache");
when(cache.getStatus()).thenReturn(ComponentStatus.RUNNING);
pooledExecutorService = new ThreadPoolExecutor(0, 20, 0L, TimeUnit.MILLISECONDS, new SynchronousQueue<>(), getTestThreadFactory("Worker"), new ThreadPoolExecutor.CallerRunsPolicy());
LocalTopologyManager localTopologyManager = mock(LocalTopologyManager.class);
CacheNotifier cacheNotifier = mock(CacheNotifier.class);
RpcManager rpcManager = mock(RpcManager.class);
Transport transport = mock(Transport.class);
CommandsFactory commandsFactory = mock(CommandsFactory.class);
PersistenceManager persistenceManager = mock(PersistenceManager.class);
InternalDataContainer dataContainer = mock(InternalDataContainer.class);
TransactionTable transactionTable = mock(TransactionTable.class);
StateTransferLock stateTransferLock = mock(StateTransferLock.class);
AsyncInterceptorChain interceptorChain = mock(AsyncInterceptorChain.class);
InvocationContextFactory icf = mock(InvocationContextFactory.class);
InternalConflictManager conflictManager = mock(InternalConflictManager.class);
DistributionManager distributionManager = mock(DistributionManager.class);
LocalPublisherManager localPublisherManager = mock(LocalPublisherManager.class);
PerCacheInboundInvocationHandler invocationHandler = mock(PerCacheInboundInvocationHandler.class);
XSiteStateTransferManager xSiteStateTransferManager = mock(XSiteStateTransferManager.class);
when(persistenceManager.removeSegments(any())).thenReturn(CompletableFuture.completedFuture(false));
when(persistenceManager.addSegments(any())).thenReturn(CompletableFuture.completedFuture(false));
when(persistenceManager.publishKeys(any(), any())).thenReturn(Flowable.empty());
when(commandsFactory.buildStateTransferStartCommand(anyInt(), any(IntSet.class))).thenAnswer(invocation -> new StateTransferStartCommand(ByteString.fromString("cache1"), (Integer) invocation.getArguments()[0], (IntSet) invocation.getArguments()[1]));
when(commandsFactory.buildStateTransferGetTransactionsCommand(anyInt(), any(IntSet.class))).thenAnswer(invocation -> new StateTransferGetTransactionsCommand(ByteString.fromString("cache1"), (Integer) invocation.getArguments()[0], (IntSet) invocation.getArguments()[1]));
when(commandsFactory.buildStateTransferCancelCommand(anyInt(), any(IntSet.class))).thenAnswer(invocation -> new StateTransferCancelCommand(ByteString.fromString("cache1"), (Integer) invocation.getArguments()[0], (IntSet) invocation.getArguments()[1]));
when(transport.getViewId()).thenReturn(1);
when(rpcManager.getAddress()).thenReturn(addresses[0]);
when(rpcManager.getTransport()).thenReturn(transport);
final Map<Address, Set<Integer>> requestedSegments = new ConcurrentHashMap<>();
final Set<Integer> flatRequestedSegments = new ConcurrentSkipListSet<>();
when(rpcManager.invokeCommand(any(Address.class), any(StateTransferGetTransactionsCommand.class), any(ResponseCollector.class), any(RpcOptions.class))).thenAnswer(invocation -> {
Address recipient = invocation.getArgument(0);
StateTransferGetTransactionsCommand cmd = invocation.getArgument(1);
Set<Integer> segments = cmd.getSegments();
requestedSegments.put(recipient, segments);
flatRequestedSegments.addAll(segments);
return CompletableFuture.completedFuture(SuccessfulResponse.create(new ArrayList<TransactionInfo>()));
});
Answer<?> successfulResponse = invocation -> CompletableFuture.completedFuture(SuccessfulResponse.SUCCESSFUL_EMPTY_RESPONSE);
when(rpcManager.invokeCommand(any(Address.class), any(StateTransferStartCommand.class), any(ResponseCollector.class), any(RpcOptions.class))).thenAnswer(successfulResponse);
when(rpcManager.invokeCommand(any(Address.class), any(StateTransferCancelCommand.class), any(ResponseCollector.class), any(RpcOptions.class))).thenAnswer(successfulResponse);
when(rpcManager.getSyncRpcOptions()).thenReturn(new RpcOptions(DeliverOrder.NONE, 10000, TimeUnit.MILLISECONDS));
when(rpcManager.blocking(any(CompletionStage.class))).thenAnswer(invocation -> ((CompletionStage) invocation.getArgument(0)).toCompletableFuture().join());
doNothing().when(xSiteStateTransferManager).onTopologyUpdated(any(CacheTopology.class), anyBoolean());
// create state provider
final StateConsumerImpl stateConsumer = new StateConsumerImpl();
TestingUtil.inject(stateConsumer, cache, TestingUtil.named(NON_BLOCKING_EXECUTOR, pooledExecutorService), localTopologyManager, interceptorChain, icf, configuration, rpcManager, commandsFactory, persistenceManager, dataContainer, transactionTable, stateTransferLock, cacheNotifier, new CommitManager(), new CommandAckCollector(), new TriangleOrderManager(0), new HashFunctionPartitioner(), conflictManager, distributionManager, localPublisherManager, invocationHandler, xSiteStateTransferManager);
stateConsumer.start();
final List<InternalCacheEntry> cacheEntries = new ArrayList<>();
Object key1 = new TestKey("key1", 0, keyPartitioner);
Object key2 = new TestKey("key2", 0, keyPartitioner);
cacheEntries.add(new ImmortalCacheEntry(key1, "value1"));
cacheEntries.add(new ImmortalCacheEntry(key2, "value2"));
when(dataContainer.iterator()).thenAnswer(invocation -> cacheEntries.iterator());
when(transactionTable.getLocalTransactions()).thenReturn(Collections.emptyList());
when(transactionTable.getRemoteTransactions()).thenReturn(Collections.emptyList());
assertFalse(stateConsumer.hasActiveTransfers());
// node 4 leaves
stateConsumer.onTopologyUpdate(new CacheTopology(1, 1, ch2, null, CacheTopology.Phase.NO_REBALANCE, ch2.getMembers(), persistentUUIDManager.mapAddresses(ch2.getMembers())), false);
assertFalse(stateConsumer.hasActiveTransfers());
// start a rebalance
stateConsumer.onTopologyUpdate(new CacheTopology(2, 2, ch2, ch3, ch23, CacheTopology.Phase.READ_OLD_WRITE_ALL, ch23.getMembers(), persistentUUIDManager.mapAddresses(ch23.getMembers())), true);
assertTrue(stateConsumer.hasActiveTransfers());
// check that all segments have been requested
Set<Integer> oldSegments = ch2.getSegmentsForOwner(addresses[0]);
final Set<Integer> newSegments = ch3.getSegmentsForOwner(addresses[0]);
newSegments.removeAll(oldSegments);
log.debugf("Rebalancing. Added segments=%s, old segments=%s", newSegments, oldSegments);
assertEquals(flatRequestedSegments, newSegments);
// simulate a cluster state recovery and return to ch2
Future<Object> future = fork(() -> {
stateConsumer.onTopologyUpdate(new CacheTopology(3, 2, ch2, null, CacheTopology.Phase.NO_REBALANCE, ch2.getMembers(), persistentUUIDManager.mapAddresses(ch2.getMembers())), false);
return null;
});
stateConsumer.onTopologyUpdate(new CacheTopology(3, 2, ch2, null, CacheTopology.Phase.NO_REBALANCE, ch2.getMembers(), persistentUUIDManager.mapAddresses(ch2.getMembers())), false);
future.get();
assertFalse(stateConsumer.hasActiveTransfers());
// restart the rebalance
requestedSegments.clear();
stateConsumer.onTopologyUpdate(new CacheTopology(4, 4, ch2, ch3, ch23, CacheTopology.Phase.READ_OLD_WRITE_ALL, ch23.getMembers(), persistentUUIDManager.mapAddresses(ch23.getMembers())), true);
assertTrue(stateConsumer.hasActiveTransfers());
assertEquals(flatRequestedSegments, newSegments);
// apply state
ArrayList<StateChunk> stateChunks = new ArrayList<>();
for (Integer segment : newSegments) {
stateChunks.add(new StateChunk(segment, Collections.emptyList(), true));
}
stateConsumer.applyState(addresses[1], 2, false, stateChunks);
stateConsumer.stop();
assertFalse(stateConsumer.hasActiveTransfers());
}
use of org.infinispan.commands.statetransfer.StateTransferCancelCommand in project infinispan by infinispan.
the class CoordinatorStopTest method testCoordinatorLeaves.
// Reproducer for ISPN-9128
public void testCoordinatorLeaves() throws InterruptedException, ExecutionException, TimeoutException, BrokenBarrierException {
String cacheName = cache(1).getName();
MagicKey key = new MagicKey(cache(0));
cache(1).put(key, "value");
int stableTopologyId = cache(1).getAdvancedCache().getDistributionManager().getCacheTopology().getTopologyId();
BlockingLocalTopologyManager ltm2 = BlockingLocalTopologyManager.replaceTopologyManager(manager(2), cacheName);
ControlledTransport transport0 = ControlledTransport.replace(cache(0));
ControlledTransport transport1 = ControlledTransport.replace(cache(1));
// Block sending REBALANCE_START until the CH_UPDATE is delivered to make the test deterministic
transport0.blockBefore(RebalanceStartCommand.class, command -> command.getCacheName().equals(cacheName) && command.getTopologyId() == stableTopologyId + 2);
// Also block rebalance initiated by the new coord until we test with topology + 3
transport1.blockBefore(RebalanceStartCommand.class, command -> command.getCacheName().equals(cacheName) && command.getTopologyId() == stableTopologyId + 4);
ControlledRpcManager rpcManager2 = ControlledRpcManager.replaceRpcManager(cache(2));
// Ignore push transfer of segment 2
// Ignore the remote get which does not happen without the fix
rpcManager2.excludeCommands(StateResponseCommand.class, ClusteredGetCommand.class);
// segment 0 will be moved to cache(2). Since we've lost coord cache(1) now -> 0 and cache(2) -> 1
chf.setOwnerIndexes(new int[][] { { 1 }, { 0 }, { 1 } });
log.infof("Stopping coordinator %s, last stable topology is %d", manager(0), stableTopologyId);
Future<Void> stopFuture = fork(() -> manager(0).stop());
// topology + 1 is the one that just omits the leaving node
BlockedTopology t1 = ltm2.expectTopologyUpdate(CacheTopology.Phase.NO_REBALANCE, stableTopologyId + 1);
if (t1.getCacheTopology().getTopologyId() == stableTopologyId + 1)
assertEquals(CacheTopology.Phase.NO_REBALANCE, t1.getPhase());
assertEquals(2, t1.getCacheTopology().getActualMembers().size());
assertEquals(null, t1.getCacheTopology().getPendingCH());
assertOwners(t1, true, 0);
assertOwners(t1, true, 1, address(1));
assertOwners(t1, true, 2, address(2));
t1.unblock();
transport0.stopBlocking();
stopFuture.get(10, TimeUnit.SECONDS);
// It is not guaranteed that all members got new view when stop() finishes - when the coord is leaving
// the members ack the view before installing it.
// We are delaying view 3 until topology + 2 is installed on cache(1) - therefore at this point manager(1)
// is not the coordinator yet, and we have 3 members in view
// topology + 2 has TRANSITORY phase and all segments have an owner in pendingCH
BlockedTopology t2 = ltm2.expectTopologyUpdate(CacheTopology.Phase.TRANSITORY, stableTopologyId + 2);
assertEquals(CacheTopology.Phase.TRANSITORY, t2.getPhase());
assertEquals(2, t2.getCacheTopology().getActualMembers().size());
assertNotNull(t2.getCacheTopology().getPendingCH());
assertOwners(t2, false, 0, address(2));
assertOwners(t2, false, 1, address(1));
assertOwners(t2, false, 2, address(2));
t2.unblock();
// Let the rebalance begin
rpcManager2.expectCommand(ScatteredStateConfirmRevokedCommand.class).send().receiveAll();
// Allow both nodes to receive the view. If we did not block (1), too, topology + 2 could be ignored
// on cache(1) and the CONFIRM_REVOKED_SEGMENTS would get blocked until topology + 3 arrives - and this
// does not happen before the test times out.
viewLatch.countDown();
ControlledRpcManager.BlockedRequest<ScatteredStateGetKeysCommand> keyTransferRequest = rpcManager2.expectCommand(ScatteredStateGetKeysCommand.class);
// topology + 3 should have null pendingCH
// Before the fix the topology would recover transitory topologies from above and base current CH on them
BlockedTopology t3 = ltm2.expectTopologyUpdate(CacheTopology.Phase.NO_REBALANCE, stableTopologyId + 3);
assertEquals(2, t3.getCacheTopology().getActualMembers().size());
assertEquals(null, t3.getCacheTopology().getPendingCH());
TopologyChangeListener topologyChangeListener = TopologyChangeListener.install(cache(2));
ltm2.stopBlocking();
t3.unblock();
// Cancel command is sent only with the fix in
if (t3.getCacheTopology().getCurrentCH().locatePrimaryOwnerForSegment(0) == null) {
ControlledRpcManager.BlockedRequest<StateTransferCancelCommand> cancelStateTransfer = rpcManager2.expectCommand(StateTransferCancelCommand.class);
cancelStateTransfer.send();
}
// Wait until topology + 3 is installed
topologyChangeListener.await(10, TimeUnit.SECONDS);
// unblock outdated keys transfer
keyTransferRequest.send().receiveAll();
CyclicBarrier oteBarrier = new CyclicBarrier(2);
BlockingInterceptor oteInterceptor = new BlockingInterceptor(oteBarrier, GetKeyValueCommand.class, true, true);
cache(2).getAdvancedCache().getAsyncInterceptorChain().addInterceptorAfter(oteInterceptor, StateTransferInterceptor.class);
// The get is supposed to retry as the primary owner is null in topology + 3
Future<Object> future = fork(() -> cache(2).get(key));
// This barrier will wait until the command returns, in any way. Without the fix it should just return null,
// with the fix it should throw OTE and we'll be waiting for the next topology - that's why we have to unblock it.
oteBarrier.await(10, TimeUnit.SECONDS);
oteInterceptor.suspend(true);
rpcManager2.stopBlocking();
transport1.stopBlocking();
oteBarrier.await(10, TimeUnit.SECONDS);
assertEquals("value", future.get());
((DelayedViewJGroupsTransport) transport1.getDelegate()).assertUnblocked();
((DelayedViewJGroupsTransport) manager(2).getTransport()).assertUnblocked();
}
Aggregations