use of org.infinispan.distribution.TestAddress in project infinispan by infinispan.
the class DefaultConsistentHashFactoryTest method testConsistentHashModifications.
private void testConsistentHashModifications(ConsistentHashFactory<DefaultConsistentHash> chf, List<Address> nodes, int ns, int no, Map<Address, Float> capacityFactors) {
log.tracef("Creating consistent hash with ns=%d, no=%d, members=(%d)%s", ns, no, nodes.size(), membersString(nodes, capacityFactors));
DefaultConsistentHash baseCH = chf.create(no, ns, nodes, capacityFactors);
assertEquals(baseCH.getCapacityFactors(), capacityFactors);
checkDistribution(baseCH, capacityFactors);
// check that the base CH is already balanced
List<Address> baseMembers = baseCH.getMembers();
assertSame(baseCH, chf.updateMembers(baseCH, baseMembers, capacityFactors));
assertSame(baseCH, chf.rebalance(baseCH));
// starting point, so that we don't confuse nodes
int nodeIndex = baseMembers.size();
for (int[] nodeChange : NODE_CHANGES) {
int nodesToAdd = nodeChange[0];
int nodesToRemove = nodeChange[1];
if (nodesToRemove > baseMembers.size())
break;
if (nodesToRemove == baseMembers.size() && nodesToAdd == 0)
break;
List<Address> newMembers = new ArrayList<>(baseMembers);
HashMap<Address, Float> newCapacityFactors = capacityFactors != null ? new HashMap<>(capacityFactors) : null;
for (int k = 0; k < nodesToRemove; k++) {
int indexToRemove = Math.abs(MurmurHash3.getInstance().hash(k) % newMembers.size());
if (newCapacityFactors != null) {
newCapacityFactors.remove(newMembers.get(indexToRemove));
}
newMembers.remove(indexToRemove);
}
for (int k = 0; k < nodesToAdd; k++) {
TestAddress address = new TestAddress(nodeIndex++, "TA");
newMembers.add(address);
if (newCapacityFactors != null) {
newCapacityFactors.put(address, capacityFactors.get(baseMembers.get(k % baseMembers.size())));
}
}
log.tracef("Rebalance iteration %d, members=(%d)%s", iterationCount, newMembers.size(), membersString(newMembers, newCapacityFactors));
baseCH = rebalanceIteration(chf, baseCH, nodesToAdd, nodesToRemove, newMembers, newCapacityFactors);
baseMembers = baseCH.getMembers();
capacityFactors = newCapacityFactors;
iterationCount++;
}
}
use of org.infinispan.distribution.TestAddress in project infinispan by infinispan.
the class StateConsumerTest method test1.
public void test1() throws Exception {
// create cache configuration
ConfigurationBuilder cb = new ConfigurationBuilder();
cb.clustering().invocationBatching().enable().clustering().cacheMode(CacheMode.DIST_SYNC).clustering().stateTransfer().timeout(30000).locking().lockAcquisitionTimeout(TestingUtil.shortTimeoutMillis()).locking().isolationLevel(IsolationLevel.REPEATABLE_READ);
Configuration configuration = cb.build();
PersistentUUIDManager persistentUUIDManager = new PersistentUUIDManagerImpl();
// create list of 6 members
Address[] addresses = new Address[4];
for (int i = 0; i < 4; i++) {
addresses[i] = new TestAddress(i);
persistentUUIDManager.addPersistentAddressMapping(addresses[i], PersistentUUID.randomUUID());
}
List<Address> members1 = Arrays.asList(addresses[0], addresses[1], addresses[2], addresses[3]);
List<Address> members2 = Arrays.asList(addresses[0], addresses[1], addresses[2]);
// create CHes
DefaultConsistentHashFactory chf = new DefaultConsistentHashFactory();
KeyPartitioner keyPartitioner = new HashFunctionPartitioner(40);
DefaultConsistentHash ch1 = chf.create(2, 40, members1, null);
final DefaultConsistentHash ch2 = chf.updateMembers(ch1, members2, null);
DefaultConsistentHash ch3 = chf.rebalance(ch2);
DefaultConsistentHash ch23 = chf.union(ch2, ch3);
log.debug(ch1);
log.debug(ch2);
// create dependencies
Cache cache = mock(Cache.class);
when(cache.getName()).thenReturn("testCache");
when(cache.getStatus()).thenReturn(ComponentStatus.RUNNING);
pooledExecutorService = new ThreadPoolExecutor(0, 20, 0L, TimeUnit.MILLISECONDS, new SynchronousQueue<>(), getTestThreadFactory("Worker"), new ThreadPoolExecutor.CallerRunsPolicy());
LocalTopologyManager localTopologyManager = mock(LocalTopologyManager.class);
CacheNotifier cacheNotifier = mock(CacheNotifier.class);
RpcManager rpcManager = mock(RpcManager.class);
Transport transport = mock(Transport.class);
CommandsFactory commandsFactory = mock(CommandsFactory.class);
PersistenceManager persistenceManager = mock(PersistenceManager.class);
InternalDataContainer dataContainer = mock(InternalDataContainer.class);
TransactionTable transactionTable = mock(TransactionTable.class);
StateTransferLock stateTransferLock = mock(StateTransferLock.class);
AsyncInterceptorChain interceptorChain = mock(AsyncInterceptorChain.class);
InvocationContextFactory icf = mock(InvocationContextFactory.class);
InternalConflictManager conflictManager = mock(InternalConflictManager.class);
DistributionManager distributionManager = mock(DistributionManager.class);
LocalPublisherManager localPublisherManager = mock(LocalPublisherManager.class);
PerCacheInboundInvocationHandler invocationHandler = mock(PerCacheInboundInvocationHandler.class);
XSiteStateTransferManager xSiteStateTransferManager = mock(XSiteStateTransferManager.class);
when(persistenceManager.removeSegments(any())).thenReturn(CompletableFuture.completedFuture(false));
when(persistenceManager.addSegments(any())).thenReturn(CompletableFuture.completedFuture(false));
when(persistenceManager.publishKeys(any(), any())).thenReturn(Flowable.empty());
when(commandsFactory.buildStateTransferStartCommand(anyInt(), any(IntSet.class))).thenAnswer(invocation -> new StateTransferStartCommand(ByteString.fromString("cache1"), (Integer) invocation.getArguments()[0], (IntSet) invocation.getArguments()[1]));
when(commandsFactory.buildStateTransferGetTransactionsCommand(anyInt(), any(IntSet.class))).thenAnswer(invocation -> new StateTransferGetTransactionsCommand(ByteString.fromString("cache1"), (Integer) invocation.getArguments()[0], (IntSet) invocation.getArguments()[1]));
when(commandsFactory.buildStateTransferCancelCommand(anyInt(), any(IntSet.class))).thenAnswer(invocation -> new StateTransferCancelCommand(ByteString.fromString("cache1"), (Integer) invocation.getArguments()[0], (IntSet) invocation.getArguments()[1]));
when(transport.getViewId()).thenReturn(1);
when(rpcManager.getAddress()).thenReturn(addresses[0]);
when(rpcManager.getTransport()).thenReturn(transport);
final Map<Address, Set<Integer>> requestedSegments = new ConcurrentHashMap<>();
final Set<Integer> flatRequestedSegments = new ConcurrentSkipListSet<>();
when(rpcManager.invokeCommand(any(Address.class), any(StateTransferGetTransactionsCommand.class), any(ResponseCollector.class), any(RpcOptions.class))).thenAnswer(invocation -> {
Address recipient = invocation.getArgument(0);
StateTransferGetTransactionsCommand cmd = invocation.getArgument(1);
Set<Integer> segments = cmd.getSegments();
requestedSegments.put(recipient, segments);
flatRequestedSegments.addAll(segments);
return CompletableFuture.completedFuture(SuccessfulResponse.create(new ArrayList<TransactionInfo>()));
});
Answer<?> successfulResponse = invocation -> CompletableFuture.completedFuture(SuccessfulResponse.SUCCESSFUL_EMPTY_RESPONSE);
when(rpcManager.invokeCommand(any(Address.class), any(StateTransferStartCommand.class), any(ResponseCollector.class), any(RpcOptions.class))).thenAnswer(successfulResponse);
when(rpcManager.invokeCommand(any(Address.class), any(StateTransferCancelCommand.class), any(ResponseCollector.class), any(RpcOptions.class))).thenAnswer(successfulResponse);
when(rpcManager.getSyncRpcOptions()).thenReturn(new RpcOptions(DeliverOrder.NONE, 10000, TimeUnit.MILLISECONDS));
when(rpcManager.blocking(any(CompletionStage.class))).thenAnswer(invocation -> ((CompletionStage) invocation.getArgument(0)).toCompletableFuture().join());
doNothing().when(xSiteStateTransferManager).onTopologyUpdated(any(CacheTopology.class), anyBoolean());
// create state provider
final StateConsumerImpl stateConsumer = new StateConsumerImpl();
TestingUtil.inject(stateConsumer, cache, TestingUtil.named(NON_BLOCKING_EXECUTOR, pooledExecutorService), localTopologyManager, interceptorChain, icf, configuration, rpcManager, commandsFactory, persistenceManager, dataContainer, transactionTable, stateTransferLock, cacheNotifier, new CommitManager(), new CommandAckCollector(), new TriangleOrderManager(0), new HashFunctionPartitioner(), conflictManager, distributionManager, localPublisherManager, invocationHandler, xSiteStateTransferManager);
stateConsumer.start();
final List<InternalCacheEntry> cacheEntries = new ArrayList<>();
Object key1 = new TestKey("key1", 0, keyPartitioner);
Object key2 = new TestKey("key2", 0, keyPartitioner);
cacheEntries.add(new ImmortalCacheEntry(key1, "value1"));
cacheEntries.add(new ImmortalCacheEntry(key2, "value2"));
when(dataContainer.iterator()).thenAnswer(invocation -> cacheEntries.iterator());
when(transactionTable.getLocalTransactions()).thenReturn(Collections.emptyList());
when(transactionTable.getRemoteTransactions()).thenReturn(Collections.emptyList());
assertFalse(stateConsumer.hasActiveTransfers());
// node 4 leaves
stateConsumer.onTopologyUpdate(new CacheTopology(1, 1, ch2, null, CacheTopology.Phase.NO_REBALANCE, ch2.getMembers(), persistentUUIDManager.mapAddresses(ch2.getMembers())), false);
assertFalse(stateConsumer.hasActiveTransfers());
// start a rebalance
stateConsumer.onTopologyUpdate(new CacheTopology(2, 2, ch2, ch3, ch23, CacheTopology.Phase.READ_OLD_WRITE_ALL, ch23.getMembers(), persistentUUIDManager.mapAddresses(ch23.getMembers())), true);
assertTrue(stateConsumer.hasActiveTransfers());
// check that all segments have been requested
Set<Integer> oldSegments = ch2.getSegmentsForOwner(addresses[0]);
final Set<Integer> newSegments = ch3.getSegmentsForOwner(addresses[0]);
newSegments.removeAll(oldSegments);
log.debugf("Rebalancing. Added segments=%s, old segments=%s", newSegments, oldSegments);
assertEquals(flatRequestedSegments, newSegments);
// simulate a cluster state recovery and return to ch2
Future<Object> future = fork(() -> {
stateConsumer.onTopologyUpdate(new CacheTopology(3, 2, ch2, null, CacheTopology.Phase.NO_REBALANCE, ch2.getMembers(), persistentUUIDManager.mapAddresses(ch2.getMembers())), false);
return null;
});
stateConsumer.onTopologyUpdate(new CacheTopology(3, 2, ch2, null, CacheTopology.Phase.NO_REBALANCE, ch2.getMembers(), persistentUUIDManager.mapAddresses(ch2.getMembers())), false);
future.get();
assertFalse(stateConsumer.hasActiveTransfers());
// restart the rebalance
requestedSegments.clear();
stateConsumer.onTopologyUpdate(new CacheTopology(4, 4, ch2, ch3, ch23, CacheTopology.Phase.READ_OLD_WRITE_ALL, ch23.getMembers(), persistentUUIDManager.mapAddresses(ch23.getMembers())), true);
assertTrue(stateConsumer.hasActiveTransfers());
assertEquals(flatRequestedSegments, newSegments);
// apply state
ArrayList<StateChunk> stateChunks = new ArrayList<>();
for (Integer segment : newSegments) {
stateChunks.add(new StateChunk(segment, Collections.emptyList(), true));
}
stateConsumer.applyState(addresses[1], 2, false, stateChunks);
stateConsumer.stop();
assertFalse(stateConsumer.hasActiveTransfers());
}
use of org.infinispan.distribution.TestAddress in project infinispan by infinispan.
the class StateReceiverTest method createConsistentHash.
private ConsistentHash createConsistentHash(int numberOfNodes) {
PersistentUUIDManager persistentUUIDManager = new PersistentUUIDManagerImpl();
List<Address> addresses = new ArrayList<>(numberOfNodes);
for (int i = 0; i < numberOfNodes; i++) {
Address address = new TestAddress(i);
addresses.add(address);
persistentUUIDManager.addPersistentAddressMapping(address, PersistentUUID.randomUUID());
}
DefaultConsistentHashFactory chf = new DefaultConsistentHashFactory();
return chf.create(2, 40, addresses, null);
}
use of org.infinispan.distribution.TestAddress in project infinispan by infinispan.
the class StateReceiverTest method testOldAndInvalidStateIgnored.
public void testOldAndInvalidStateIgnored() {
initTransferTaskMock(new CompletableFuture<>());
int segmentId = 0;
stateReceiver.getAllReplicasForSegment(segmentId, localizedCacheTopology, 10000);
List<Address> sourceAddresses = new ArrayList<>(stateReceiver.getTransferTaskMap(segmentId).keySet());
Map<Object, Map<Address, CacheEntry<Object, Object>>> receiverKeyMap = stateReceiver.getKeyReplicaMap(segmentId);
assertEquals(0, receiverKeyMap.size());
stateReceiver.receiveState(sourceAddresses.get(0), 2, createStateChunks("Key1", "Value1"));
assertEquals(1, receiverKeyMap.size());
stateReceiver.receiveState(new TestAddress(5), 2, createStateChunks("Key2", "Value2"));
assertEquals(1, receiverKeyMap.size());
stateReceiver.receiveState(sourceAddresses.get(1), 1, new ArrayList<>());
assertEquals(1, receiverKeyMap.size());
}
Aggregations