use of org.infinispan.client.hotrod.CacheTopologyInfo in project infinispan by infinispan.
the class BaseMultiServerRemoteIteratorTest method testFilterBySegment.
@Test
public void testFilterBySegment() {
RemoteCache<Integer, AccountHS> cache = clients.get(0).getCache();
populateCache(CACHE_SIZE, this::newAccount, cache);
CacheTopologyInfo cacheTopologyInfo = cache.getCacheTopologyInfo();
// Request all segments from one node
Set<Integer> filterBySegments = cacheTopologyInfo.getSegmentsPerServer().values().iterator().next();
Set<Entry<Object, Object>> entries = new HashSet<>();
try (CloseableIterator<Entry<Object, Object>> iterator = cache.retrieveEntries(null, filterBySegments, 10)) {
while (iterator.hasNext()) {
entries.add(iterator.next());
}
}
Marshaller marshaller = clients.get(0).getMarshaller();
KeyPartitioner keyPartitioner = TestingUtil.extractComponent(cache(0), KeyPartitioner.class);
assertKeysInSegment(entries, filterBySegments, marshaller, keyPartitioner::getSegment);
}
use of org.infinispan.client.hotrod.CacheTopologyInfo in project infinispan by infinispan.
the class HotRodTargetMigrator method synchronizeData.
@Override
@SuppressWarnings("rawtypes")
public long synchronizeData(Cache<Object, Object> cache, int readBatch, int threads) throws CacheException {
ComponentRegistry cr = SecurityActions.getComponentRegistry(cache.getAdvancedCache());
PersistenceManager loaderManager = cr.getComponent(PersistenceManager.class);
Set<RemoteStore> stores = loaderManager.getStores(RemoteStore.class);
String cacheName = cache.getName();
if (stores.size() != 1) {
throw log.couldNotMigrateData(cacheName);
}
RemoteStore store = stores.iterator().next();
final RemoteCache remoteSourceCache = store.getRemoteCache();
ClusterExecutor clusterExecutor = SecurityActions.getClusterExecutor(cache.getCacheManager());
clusterExecutor = clusterExecutor.timeout(Long.MAX_VALUE, TimeUnit.NANOSECONDS).singleNodeSubmission();
CacheTopologyInfo sourceCacheTopologyInfo = remoteSourceCache.getCacheTopologyInfo();
if (sourceCacheTopologyInfo.getSegmentsPerServer().size() == 1) {
return migrateFromSingleServer(cache.getCacheManager(), cacheName, readBatch, threads);
}
int sourceSegments = sourceCacheTopologyInfo.getNumSegments();
List<Address> targetServers = cache.getAdvancedCache().getDistributionManager().getCacheTopology().getMembers();
List<List<Integer>> partitions = split(range(sourceSegments), targetServers.size());
Iterator<Address> iterator = targetServers.iterator();
AtomicInteger count = new AtomicInteger();
TriConsumer<Address, Integer, Throwable> consumer = (a, value, t) -> {
if (t != null) {
throw new CacheException(t);
}
count.addAndGet(value);
};
CompletableFuture[] futures = new CompletableFuture[partitions.size()];
int offset = 0;
for (List<Integer> partition : partitions) {
Set<Integer> segmentSet = new HashSet<>(partition);
futures[offset++] = clusterExecutor.filterTargets(Collections.singleton(iterator.next())).submitConsumer(new MigrationTask(cacheName, segmentSet, readBatch, threads), consumer);
}
CompletableFuture.allOf(futures).join();
return count.get();
}
Aggregations