use of org.infinispan.topology.PersistentUUIDManager in project infinispan by infinispan.
the class BaseCHPersistenceTest method testCHPersistenceMissingMembers.
public void testCHPersistenceMissingMembers() {
PersistentUUIDManager persistentUUIDManager = new PersistentUUIDManagerImpl();
ConsistentHash ch = createConsistentHash();
Map<Address, PersistentUUID> addressMap = generateRandomPersistentUUIDs(ch.getMembers(), persistentUUIDManager);
ScopedPersistentState state = new ScopedPersistentStateImpl("scope");
ch.remapAddresses(persistentUUIDManager.addressToPersistentUUID()).toScopedState(state);
persistentUUIDManager.removePersistentAddressMapping(addressMap.keySet().iterator().next());
ConsistentHashFactory<?> hashFactory = createConsistentHashFactory();
ConsistentHash restoredCH = hashFactory.fromPersistentState(state).remapAddresses(persistentUUIDManager.persistentUUIDToAddress());
assertNull(restoredCH);
}
use of org.infinispan.topology.PersistentUUIDManager in project infinispan by infinispan.
the class BaseCHPersistenceTest method testCHPersistence.
public void testCHPersistence() {
PersistentUUIDManager persistentUUIDManager = new PersistentUUIDManagerImpl();
ConsistentHash ch = createConsistentHash();
generateRandomPersistentUUIDs(ch.getMembers(), persistentUUIDManager);
ScopedPersistentState state = new ScopedPersistentStateImpl("scope");
ch.remapAddresses(persistentUUIDManager.addressToPersistentUUID()).toScopedState(state);
ConsistentHashFactory<?> hashFactory = createConsistentHashFactory();
ConsistentHash restoredCH = hashFactory.fromPersistentState(state).remapAddresses(persistentUUIDManager.persistentUUIDToAddress());
assertEquals(ch, restoredCH);
}
use of org.infinispan.topology.PersistentUUIDManager in project infinispan by infinispan.
the class PreferConsistencyStrategyTest method beforeMethod.
@BeforeMethod
public void beforeMethod() {
EventLogManager eventLogManager = new EventLogManagerImpl();
PersistentUUIDManager persistentUUIDManager = new PersistentUUIDManagerImpl();
ClusterTopologyManagerImpl topologyManager = new ClusterTopologyManagerImpl();
EmbeddedCacheManager cacheManager = mock(EmbeddedCacheManager.class);
preferConsistencyStrategy = new PreferConsistencyStrategy(eventLogManager, persistentUUIDManager, null);
status = new ClusterCacheStatus(cacheManager, null, "does-not-matter", preferConsistencyStrategy, RebalanceType.FOUR_PHASE, topologyManager, null, persistentUUIDManager, eventLogManager, Optional.empty(), false);
}
use of org.infinispan.topology.PersistentUUIDManager in project infinispan by infinispan.
the class AbstractGlobalStateRestartTest method shutdownAndRestart.
protected void shutdownAndRestart(int extraneousNodePosition, boolean reverse) throws Throwable {
Map<JGroupsAddress, PersistentUUID> addressMappings = createInitialCluster();
ConsistentHash oldConsistentHash = advancedCache(0, CACHE_NAME).getDistributionManager().getWriteConsistentHash();
// Shutdown the cache cluster-wide
cache(0, CACHE_NAME).shutdown();
TestingUtil.killCacheManagers(this.cacheManagers);
// Verify that the cache state file exists
for (int i = 0; i < getClusterSize(); i++) {
String persistentLocation = manager(i).getCacheManagerConfiguration().globalState().persistentLocation();
File[] listFiles = new File(persistentLocation).listFiles((dir, name) -> name.equals(CACHE_NAME + ".state"));
assertEquals(Arrays.toString(listFiles), 1, listFiles.length);
}
this.cacheManagers.clear();
// Recreate the cluster
createStatefulCacheManagers(false, extraneousNodePosition, reverse);
if (reverse) {
Map<JGroupsAddress, PersistentUUID> reversed = new LinkedHashMap<>();
reverseLinkedMap(addressMappings.entrySet().iterator(), reversed);
addressMappings = reversed;
}
// Healthy cluster
switch(extraneousNodePosition) {
case -1:
{
// Healthy cluster
waitForClusterToForm(CACHE_NAME);
checkClusterRestartedCorrectly(addressMappings);
checkData();
ConsistentHash newConsistentHash = advancedCache(0, CACHE_NAME).getDistributionManager().getWriteConsistentHash();
PersistentUUIDManager persistentUUIDManager = TestingUtil.extractGlobalComponent(manager(0), PersistentUUIDManager.class);
assertEquivalent(addressMappings, oldConsistentHash, newConsistentHash, persistentUUIDManager);
break;
}
case 0:
{
// Coordinator without state, all other nodes will break
for (int i = 1; i < cacheManagers.size(); i++) {
try {
cache(i, CACHE_NAME);
fail("Cache with state should not have joined coordinator without state");
} catch (CacheException e) {
// Ignore
log.debugf("Got expected exception: %s", e);
}
}
break;
}
default:
{
// Other node without state
try {
cache(extraneousNodePosition, CACHE_NAME);
fail("Cache without state should not have joined coordinator with state");
} catch (CacheException e) {
// Ignore
}
}
}
}
use of org.infinispan.topology.PersistentUUIDManager in project infinispan by infinispan.
the class StateConsumerTest method test1.
public void test1() throws Exception {
// create cache configuration
ConfigurationBuilder cb = new ConfigurationBuilder();
cb.clustering().invocationBatching().enable().clustering().cacheMode(CacheMode.DIST_SYNC).clustering().stateTransfer().timeout(30000).locking().lockAcquisitionTimeout(TestingUtil.shortTimeoutMillis()).locking().isolationLevel(IsolationLevel.REPEATABLE_READ);
Configuration configuration = cb.build();
PersistentUUIDManager persistentUUIDManager = new PersistentUUIDManagerImpl();
// create list of 6 members
Address[] addresses = new Address[4];
for (int i = 0; i < 4; i++) {
addresses[i] = new TestAddress(i);
persistentUUIDManager.addPersistentAddressMapping(addresses[i], PersistentUUID.randomUUID());
}
List<Address> members1 = Arrays.asList(addresses[0], addresses[1], addresses[2], addresses[3]);
List<Address> members2 = Arrays.asList(addresses[0], addresses[1], addresses[2]);
// create CHes
DefaultConsistentHashFactory chf = new DefaultConsistentHashFactory();
KeyPartitioner keyPartitioner = new HashFunctionPartitioner(40);
DefaultConsistentHash ch1 = chf.create(2, 40, members1, null);
final DefaultConsistentHash ch2 = chf.updateMembers(ch1, members2, null);
DefaultConsistentHash ch3 = chf.rebalance(ch2);
DefaultConsistentHash ch23 = chf.union(ch2, ch3);
log.debug(ch1);
log.debug(ch2);
// create dependencies
Cache cache = mock(Cache.class);
when(cache.getName()).thenReturn("testCache");
when(cache.getStatus()).thenReturn(ComponentStatus.RUNNING);
pooledExecutorService = new ThreadPoolExecutor(0, 20, 0L, TimeUnit.MILLISECONDS, new SynchronousQueue<>(), getTestThreadFactory("Worker"), new ThreadPoolExecutor.CallerRunsPolicy());
LocalTopologyManager localTopologyManager = mock(LocalTopologyManager.class);
CacheNotifier cacheNotifier = mock(CacheNotifier.class);
RpcManager rpcManager = mock(RpcManager.class);
Transport transport = mock(Transport.class);
CommandsFactory commandsFactory = mock(CommandsFactory.class);
PersistenceManager persistenceManager = mock(PersistenceManager.class);
InternalDataContainer dataContainer = mock(InternalDataContainer.class);
TransactionTable transactionTable = mock(TransactionTable.class);
StateTransferLock stateTransferLock = mock(StateTransferLock.class);
AsyncInterceptorChain interceptorChain = mock(AsyncInterceptorChain.class);
InvocationContextFactory icf = mock(InvocationContextFactory.class);
InternalConflictManager conflictManager = mock(InternalConflictManager.class);
DistributionManager distributionManager = mock(DistributionManager.class);
LocalPublisherManager localPublisherManager = mock(LocalPublisherManager.class);
PerCacheInboundInvocationHandler invocationHandler = mock(PerCacheInboundInvocationHandler.class);
XSiteStateTransferManager xSiteStateTransferManager = mock(XSiteStateTransferManager.class);
when(persistenceManager.removeSegments(any())).thenReturn(CompletableFuture.completedFuture(false));
when(persistenceManager.addSegments(any())).thenReturn(CompletableFuture.completedFuture(false));
when(persistenceManager.publishKeys(any(), any())).thenReturn(Flowable.empty());
when(commandsFactory.buildStateTransferStartCommand(anyInt(), any(IntSet.class))).thenAnswer(invocation -> new StateTransferStartCommand(ByteString.fromString("cache1"), (Integer) invocation.getArguments()[0], (IntSet) invocation.getArguments()[1]));
when(commandsFactory.buildStateTransferGetTransactionsCommand(anyInt(), any(IntSet.class))).thenAnswer(invocation -> new StateTransferGetTransactionsCommand(ByteString.fromString("cache1"), (Integer) invocation.getArguments()[0], (IntSet) invocation.getArguments()[1]));
when(commandsFactory.buildStateTransferCancelCommand(anyInt(), any(IntSet.class))).thenAnswer(invocation -> new StateTransferCancelCommand(ByteString.fromString("cache1"), (Integer) invocation.getArguments()[0], (IntSet) invocation.getArguments()[1]));
when(transport.getViewId()).thenReturn(1);
when(rpcManager.getAddress()).thenReturn(addresses[0]);
when(rpcManager.getTransport()).thenReturn(transport);
final Map<Address, Set<Integer>> requestedSegments = new ConcurrentHashMap<>();
final Set<Integer> flatRequestedSegments = new ConcurrentSkipListSet<>();
when(rpcManager.invokeCommand(any(Address.class), any(StateTransferGetTransactionsCommand.class), any(ResponseCollector.class), any(RpcOptions.class))).thenAnswer(invocation -> {
Address recipient = invocation.getArgument(0);
StateTransferGetTransactionsCommand cmd = invocation.getArgument(1);
Set<Integer> segments = cmd.getSegments();
requestedSegments.put(recipient, segments);
flatRequestedSegments.addAll(segments);
return CompletableFuture.completedFuture(SuccessfulResponse.create(new ArrayList<TransactionInfo>()));
});
Answer<?> successfulResponse = invocation -> CompletableFuture.completedFuture(SuccessfulResponse.SUCCESSFUL_EMPTY_RESPONSE);
when(rpcManager.invokeCommand(any(Address.class), any(StateTransferStartCommand.class), any(ResponseCollector.class), any(RpcOptions.class))).thenAnswer(successfulResponse);
when(rpcManager.invokeCommand(any(Address.class), any(StateTransferCancelCommand.class), any(ResponseCollector.class), any(RpcOptions.class))).thenAnswer(successfulResponse);
when(rpcManager.getSyncRpcOptions()).thenReturn(new RpcOptions(DeliverOrder.NONE, 10000, TimeUnit.MILLISECONDS));
when(rpcManager.blocking(any(CompletionStage.class))).thenAnswer(invocation -> ((CompletionStage) invocation.getArgument(0)).toCompletableFuture().join());
doNothing().when(xSiteStateTransferManager).onTopologyUpdated(any(CacheTopology.class), anyBoolean());
// create state provider
final StateConsumerImpl stateConsumer = new StateConsumerImpl();
TestingUtil.inject(stateConsumer, cache, TestingUtil.named(NON_BLOCKING_EXECUTOR, pooledExecutorService), localTopologyManager, interceptorChain, icf, configuration, rpcManager, commandsFactory, persistenceManager, dataContainer, transactionTable, stateTransferLock, cacheNotifier, new CommitManager(), new CommandAckCollector(), new TriangleOrderManager(0), new HashFunctionPartitioner(), conflictManager, distributionManager, localPublisherManager, invocationHandler, xSiteStateTransferManager);
stateConsumer.start();
final List<InternalCacheEntry> cacheEntries = new ArrayList<>();
Object key1 = new TestKey("key1", 0, keyPartitioner);
Object key2 = new TestKey("key2", 0, keyPartitioner);
cacheEntries.add(new ImmortalCacheEntry(key1, "value1"));
cacheEntries.add(new ImmortalCacheEntry(key2, "value2"));
when(dataContainer.iterator()).thenAnswer(invocation -> cacheEntries.iterator());
when(transactionTable.getLocalTransactions()).thenReturn(Collections.emptyList());
when(transactionTable.getRemoteTransactions()).thenReturn(Collections.emptyList());
assertFalse(stateConsumer.hasActiveTransfers());
// node 4 leaves
stateConsumer.onTopologyUpdate(new CacheTopology(1, 1, ch2, null, CacheTopology.Phase.NO_REBALANCE, ch2.getMembers(), persistentUUIDManager.mapAddresses(ch2.getMembers())), false);
assertFalse(stateConsumer.hasActiveTransfers());
// start a rebalance
stateConsumer.onTopologyUpdate(new CacheTopology(2, 2, ch2, ch3, ch23, CacheTopology.Phase.READ_OLD_WRITE_ALL, ch23.getMembers(), persistentUUIDManager.mapAddresses(ch23.getMembers())), true);
assertTrue(stateConsumer.hasActiveTransfers());
// check that all segments have been requested
Set<Integer> oldSegments = ch2.getSegmentsForOwner(addresses[0]);
final Set<Integer> newSegments = ch3.getSegmentsForOwner(addresses[0]);
newSegments.removeAll(oldSegments);
log.debugf("Rebalancing. Added segments=%s, old segments=%s", newSegments, oldSegments);
assertEquals(flatRequestedSegments, newSegments);
// simulate a cluster state recovery and return to ch2
Future<Object> future = fork(() -> {
stateConsumer.onTopologyUpdate(new CacheTopology(3, 2, ch2, null, CacheTopology.Phase.NO_REBALANCE, ch2.getMembers(), persistentUUIDManager.mapAddresses(ch2.getMembers())), false);
return null;
});
stateConsumer.onTopologyUpdate(new CacheTopology(3, 2, ch2, null, CacheTopology.Phase.NO_REBALANCE, ch2.getMembers(), persistentUUIDManager.mapAddresses(ch2.getMembers())), false);
future.get();
assertFalse(stateConsumer.hasActiveTransfers());
// restart the rebalance
requestedSegments.clear();
stateConsumer.onTopologyUpdate(new CacheTopology(4, 4, ch2, ch3, ch23, CacheTopology.Phase.READ_OLD_WRITE_ALL, ch23.getMembers(), persistentUUIDManager.mapAddresses(ch23.getMembers())), true);
assertTrue(stateConsumer.hasActiveTransfers());
assertEquals(flatRequestedSegments, newSegments);
// apply state
ArrayList<StateChunk> stateChunks = new ArrayList<>();
for (Integer segment : newSegments) {
stateChunks.add(new StateChunk(segment, Collections.emptyList(), true));
}
stateConsumer.applyState(addresses[1], 2, false, stateChunks);
stateConsumer.stop();
assertFalse(stateConsumer.hasActiveTransfers());
}
Aggregations