use of org.infinispan.commands.statetransfer.StateTransferGetTransactionsCommand in project infinispan by infinispan.
the class StateResponseOrderingTest method testStateResponseWhileRestartingBrokenTransfers.
public void testStateResponseWhileRestartingBrokenTransfers() throws Throwable {
// The initial topology is different from the other method's
consistentHashFactory.setOwnerIndexes(new int[][] { { 1, 2, 3 }, { 2, 1, 3 } });
consistentHashFactory.triggerRebalance(cache(0));
// waitForStableTopology doesn't work here, since the cache looks already "balanced"
// So we wait for the primary owner of segment 1 to change
eventuallyEquals(address(2), () -> advancedCache(0).getDistributionManager().getReadConsistentHash().locatePrimaryOwnerForSegment(1));
// See https://issues.jboss.org/browse/ISPN-3120?focusedCommentId=12777231
// Start with segment 0 owned by [cache1, cache2, cache3], and segment 1 owned by [cache2, cache1, cache3]
// Trigger a rebalance with cache0 becoming an owner for both segments
// Wait for either cache1 or cache2 to send a StateResponseCommand
// Block the state response on cache0
// Kill the node that didn't receive the request
// Block new state requests from cache0 so that the killed node's segment doesn't have a transfer task
// Unblock the first state response
// Check that the StateResponseCommand hasn't marked state transfer as completed
// Unblock the new state request
// Wait for the state transfer to end and check that state hasn't been lost
StateSequencer sequencer = new StateSequencer();
sequencer.logicalThread("st", "st:block_first_state_response", "st:kill_node", "st:block_second_state_request", "st:resume_first_state_response", "st:after_first_state_response", "st:check_incomplete", "st:resume_second_state_request");
final AtomicReference<Address> firstResponseSender = new AtomicReference<>();
CommandMatcher firstStateResponseMatcher = new CommandMatcher() {
CommandMatcher realMatcher = matchCommand(StateResponseCommand.class).matchCount(0).build();
public boolean accept(ReplicableCommand command) {
if (!realMatcher.accept(command))
return false;
firstResponseSender.set(((StateResponseCommand) command).getOrigin());
return true;
}
};
advanceOnInboundRpc(sequencer, cache(0), firstStateResponseMatcher).before("st:block_first_state_response", "st:resume_first_state_response").after("st:after_first_state_response");
CommandMatcher secondStateRequestMatcher = new CommandMatcher() {
private final AtomicInteger counter = new AtomicInteger();
@Override
public boolean accept(ReplicableCommand command) {
if (command instanceof StateTransferGetTransactionsCommand) {
// Command 2 is the first sent after the node is killed
if (counter.getAndIncrement() == 2)
return true;
log.debugf("Not blocking command %s", command);
}
return false;
}
};
advanceOnOutboundRpc(sequencer, cache(0), secondStateRequestMatcher).before("st:block_second_state_request", "st:resume_second_state_request");
DistributionManager dm0 = advancedCache(0).getDistributionManager();
StateTransferManager stm0 = TestingUtil.extractComponentRegistry(cache(0)).getStateTransferManager();
MagicKey k1 = new MagicKey("k1", cache(1));
assertEquals(Arrays.asList(address(1), address(2), address(3)), dm0.getCacheTopology().getDistribution(k1).readOwners());
cache(0).put(k1, "v1");
MagicKey k2 = new MagicKey("k2", cache(2));
assertEquals(Arrays.asList(address(2), address(1), address(3)), dm0.getCacheTopology().getDistribution(k2).readOwners());
cache(0).put(k2, "v2");
// Start the rebalance
consistentHashFactory.setOwnerIndexes(new int[][] { { 0, 1, 2 }, { 0, 2, 1 } });
consistentHashFactory.triggerRebalance(cache(0));
// Wait for cache0 to receive the state response
sequencer.enter("st:kill_node");
assertNotNull(dm0.getCacheTopology().getPendingCH());
// No need to update the owner indexes, the CH factory only knows about the cache members
int nodeToKeep = managerIndex(firstResponseSender.get());
int nodeToKill = nodeToKeep == 1 ? 2 : 1;
log.debugf("Blocked state response from %s, killing %s", firstResponseSender.get(), manager(nodeToKill));
cache(nodeToKill).stop();
eventuallyEquals(3, () -> dm0.getCacheTopology().getMembers().size());
sequencer.exit("st:kill_node");
sequencer.enter("st:check_incomplete");
assertTrue(stm0.isStateTransferInProgress());
sequencer.exit("st:check_incomplete");
// Only the 3 live caches are in the collection, wait for the rehash to end
waitForNoRebalance(cache(0), cache(nodeToKeep), cache(3));
assertTrue(dm0.getCacheTopology().isReadOwner(k1));
assertTrue(dm0.getCacheTopology().isReadOwner(k2));
assertEquals("v1", cache(0).get(k1));
assertEquals("v2", cache(0).get(k2));
}
use of org.infinispan.commands.statetransfer.StateTransferGetTransactionsCommand in project infinispan by infinispan.
the class StateConsumerTest method test1.
public void test1() throws Exception {
// create cache configuration
ConfigurationBuilder cb = new ConfigurationBuilder();
cb.clustering().invocationBatching().enable().clustering().cacheMode(CacheMode.DIST_SYNC).clustering().stateTransfer().timeout(30000).locking().lockAcquisitionTimeout(TestingUtil.shortTimeoutMillis()).locking().isolationLevel(IsolationLevel.REPEATABLE_READ);
Configuration configuration = cb.build();
PersistentUUIDManager persistentUUIDManager = new PersistentUUIDManagerImpl();
// create list of 6 members
Address[] addresses = new Address[4];
for (int i = 0; i < 4; i++) {
addresses[i] = new TestAddress(i);
persistentUUIDManager.addPersistentAddressMapping(addresses[i], PersistentUUID.randomUUID());
}
List<Address> members1 = Arrays.asList(addresses[0], addresses[1], addresses[2], addresses[3]);
List<Address> members2 = Arrays.asList(addresses[0], addresses[1], addresses[2]);
// create CHes
DefaultConsistentHashFactory chf = new DefaultConsistentHashFactory();
KeyPartitioner keyPartitioner = new HashFunctionPartitioner(40);
DefaultConsistentHash ch1 = chf.create(2, 40, members1, null);
final DefaultConsistentHash ch2 = chf.updateMembers(ch1, members2, null);
DefaultConsistentHash ch3 = chf.rebalance(ch2);
DefaultConsistentHash ch23 = chf.union(ch2, ch3);
log.debug(ch1);
log.debug(ch2);
// create dependencies
Cache cache = mock(Cache.class);
when(cache.getName()).thenReturn("testCache");
when(cache.getStatus()).thenReturn(ComponentStatus.RUNNING);
pooledExecutorService = new ThreadPoolExecutor(0, 20, 0L, TimeUnit.MILLISECONDS, new SynchronousQueue<>(), getTestThreadFactory("Worker"), new ThreadPoolExecutor.CallerRunsPolicy());
LocalTopologyManager localTopologyManager = mock(LocalTopologyManager.class);
CacheNotifier cacheNotifier = mock(CacheNotifier.class);
RpcManager rpcManager = mock(RpcManager.class);
Transport transport = mock(Transport.class);
CommandsFactory commandsFactory = mock(CommandsFactory.class);
PersistenceManager persistenceManager = mock(PersistenceManager.class);
InternalDataContainer dataContainer = mock(InternalDataContainer.class);
TransactionTable transactionTable = mock(TransactionTable.class);
StateTransferLock stateTransferLock = mock(StateTransferLock.class);
AsyncInterceptorChain interceptorChain = mock(AsyncInterceptorChain.class);
InvocationContextFactory icf = mock(InvocationContextFactory.class);
InternalConflictManager conflictManager = mock(InternalConflictManager.class);
DistributionManager distributionManager = mock(DistributionManager.class);
LocalPublisherManager localPublisherManager = mock(LocalPublisherManager.class);
PerCacheInboundInvocationHandler invocationHandler = mock(PerCacheInboundInvocationHandler.class);
XSiteStateTransferManager xSiteStateTransferManager = mock(XSiteStateTransferManager.class);
when(persistenceManager.removeSegments(any())).thenReturn(CompletableFuture.completedFuture(false));
when(persistenceManager.addSegments(any())).thenReturn(CompletableFuture.completedFuture(false));
when(persistenceManager.publishKeys(any(), any())).thenReturn(Flowable.empty());
when(commandsFactory.buildStateTransferStartCommand(anyInt(), any(IntSet.class))).thenAnswer(invocation -> new StateTransferStartCommand(ByteString.fromString("cache1"), (Integer) invocation.getArguments()[0], (IntSet) invocation.getArguments()[1]));
when(commandsFactory.buildStateTransferGetTransactionsCommand(anyInt(), any(IntSet.class))).thenAnswer(invocation -> new StateTransferGetTransactionsCommand(ByteString.fromString("cache1"), (Integer) invocation.getArguments()[0], (IntSet) invocation.getArguments()[1]));
when(commandsFactory.buildStateTransferCancelCommand(anyInt(), any(IntSet.class))).thenAnswer(invocation -> new StateTransferCancelCommand(ByteString.fromString("cache1"), (Integer) invocation.getArguments()[0], (IntSet) invocation.getArguments()[1]));
when(transport.getViewId()).thenReturn(1);
when(rpcManager.getAddress()).thenReturn(addresses[0]);
when(rpcManager.getTransport()).thenReturn(transport);
final Map<Address, Set<Integer>> requestedSegments = new ConcurrentHashMap<>();
final Set<Integer> flatRequestedSegments = new ConcurrentSkipListSet<>();
when(rpcManager.invokeCommand(any(Address.class), any(StateTransferGetTransactionsCommand.class), any(ResponseCollector.class), any(RpcOptions.class))).thenAnswer(invocation -> {
Address recipient = invocation.getArgument(0);
StateTransferGetTransactionsCommand cmd = invocation.getArgument(1);
Set<Integer> segments = cmd.getSegments();
requestedSegments.put(recipient, segments);
flatRequestedSegments.addAll(segments);
return CompletableFuture.completedFuture(SuccessfulResponse.create(new ArrayList<TransactionInfo>()));
});
Answer<?> successfulResponse = invocation -> CompletableFuture.completedFuture(SuccessfulResponse.SUCCESSFUL_EMPTY_RESPONSE);
when(rpcManager.invokeCommand(any(Address.class), any(StateTransferStartCommand.class), any(ResponseCollector.class), any(RpcOptions.class))).thenAnswer(successfulResponse);
when(rpcManager.invokeCommand(any(Address.class), any(StateTransferCancelCommand.class), any(ResponseCollector.class), any(RpcOptions.class))).thenAnswer(successfulResponse);
when(rpcManager.getSyncRpcOptions()).thenReturn(new RpcOptions(DeliverOrder.NONE, 10000, TimeUnit.MILLISECONDS));
when(rpcManager.blocking(any(CompletionStage.class))).thenAnswer(invocation -> ((CompletionStage) invocation.getArgument(0)).toCompletableFuture().join());
doNothing().when(xSiteStateTransferManager).onTopologyUpdated(any(CacheTopology.class), anyBoolean());
// create state provider
final StateConsumerImpl stateConsumer = new StateConsumerImpl();
TestingUtil.inject(stateConsumer, cache, TestingUtil.named(NON_BLOCKING_EXECUTOR, pooledExecutorService), localTopologyManager, interceptorChain, icf, configuration, rpcManager, commandsFactory, persistenceManager, dataContainer, transactionTable, stateTransferLock, cacheNotifier, new CommitManager(), new CommandAckCollector(), new TriangleOrderManager(0), new HashFunctionPartitioner(), conflictManager, distributionManager, localPublisherManager, invocationHandler, xSiteStateTransferManager);
stateConsumer.start();
final List<InternalCacheEntry> cacheEntries = new ArrayList<>();
Object key1 = new TestKey("key1", 0, keyPartitioner);
Object key2 = new TestKey("key2", 0, keyPartitioner);
cacheEntries.add(new ImmortalCacheEntry(key1, "value1"));
cacheEntries.add(new ImmortalCacheEntry(key2, "value2"));
when(dataContainer.iterator()).thenAnswer(invocation -> cacheEntries.iterator());
when(transactionTable.getLocalTransactions()).thenReturn(Collections.emptyList());
when(transactionTable.getRemoteTransactions()).thenReturn(Collections.emptyList());
assertFalse(stateConsumer.hasActiveTransfers());
// node 4 leaves
stateConsumer.onTopologyUpdate(new CacheTopology(1, 1, ch2, null, CacheTopology.Phase.NO_REBALANCE, ch2.getMembers(), persistentUUIDManager.mapAddresses(ch2.getMembers())), false);
assertFalse(stateConsumer.hasActiveTransfers());
// start a rebalance
stateConsumer.onTopologyUpdate(new CacheTopology(2, 2, ch2, ch3, ch23, CacheTopology.Phase.READ_OLD_WRITE_ALL, ch23.getMembers(), persistentUUIDManager.mapAddresses(ch23.getMembers())), true);
assertTrue(stateConsumer.hasActiveTransfers());
// check that all segments have been requested
Set<Integer> oldSegments = ch2.getSegmentsForOwner(addresses[0]);
final Set<Integer> newSegments = ch3.getSegmentsForOwner(addresses[0]);
newSegments.removeAll(oldSegments);
log.debugf("Rebalancing. Added segments=%s, old segments=%s", newSegments, oldSegments);
assertEquals(flatRequestedSegments, newSegments);
// simulate a cluster state recovery and return to ch2
Future<Object> future = fork(() -> {
stateConsumer.onTopologyUpdate(new CacheTopology(3, 2, ch2, null, CacheTopology.Phase.NO_REBALANCE, ch2.getMembers(), persistentUUIDManager.mapAddresses(ch2.getMembers())), false);
return null;
});
stateConsumer.onTopologyUpdate(new CacheTopology(3, 2, ch2, null, CacheTopology.Phase.NO_REBALANCE, ch2.getMembers(), persistentUUIDManager.mapAddresses(ch2.getMembers())), false);
future.get();
assertFalse(stateConsumer.hasActiveTransfers());
// restart the rebalance
requestedSegments.clear();
stateConsumer.onTopologyUpdate(new CacheTopology(4, 4, ch2, ch3, ch23, CacheTopology.Phase.READ_OLD_WRITE_ALL, ch23.getMembers(), persistentUUIDManager.mapAddresses(ch23.getMembers())), true);
assertTrue(stateConsumer.hasActiveTransfers());
assertEquals(flatRequestedSegments, newSegments);
// apply state
ArrayList<StateChunk> stateChunks = new ArrayList<>();
for (Integer segment : newSegments) {
stateChunks.add(new StateChunk(segment, Collections.emptyList(), true));
}
stateConsumer.applyState(addresses[1], 2, false, stateChunks);
stateConsumer.stop();
assertFalse(stateConsumer.hasActiveTransfers());
}
Aggregations