use of com.hazelcast.instance.Node in project hazelcast by hazelcast.
the class ServiceManagerImpl method registerServices.
private void registerServices(Map<String, Properties> serviceProps, Map<String, Object> serviceConfigObjects) {
registerCoreServices();
registerExtensionServices();
Node node = nodeEngine.getNode();
ServicesConfig servicesConfig = node.getConfig().getServicesConfig();
if (servicesConfig != null) {
registerDefaultServices(servicesConfig);
registerUserServices(servicesConfig, serviceProps, serviceConfigObjects);
}
}
use of com.hazelcast.instance.Node in project hazelcast by hazelcast.
the class CacheLoadAllTest method createAndFillEntries.
private Map<String, String> createAndFillEntries() {
final int ENTRY_COUNT_PER_PARTITION = 3;
Node node = getNode(hazelcastInstance);
int partitionCount = node.getPartitionService().getPartitionCount();
Map<String, String> entries = new HashMap<String, String>(partitionCount * ENTRY_COUNT_PER_PARTITION);
for (int partitionId = 0; partitionId < partitionCount; partitionId++) {
for (int i = 0; i < ENTRY_COUNT_PER_PARTITION; i++) {
String key = generateKeyForPartition(hazelcastInstance, partitionId);
String value = getValueOfKey(key);
entries.put(key, value);
}
}
return entries;
}
use of com.hazelcast.instance.Node in project hazelcast by hazelcast.
the class CacheLoadAllTest method testLoadAll.
@Test
public void testLoadAll() throws InterruptedException {
ICache<String, String> cache = createCache();
String cacheName = cache.getName();
Map<String, String> entries = createAndFillEntries();
final CountDownLatch latch = new CountDownLatch(1);
cache.loadAll(entries.keySet(), true, new CompletionListener() {
@Override
public void onCompletion() {
latch.countDown();
}
@Override
public void onException(Exception e) {
latch.countDown();
}
});
latch.await(60, TimeUnit.SECONDS);
// Verify that load-all works
for (Map.Entry<String, String> entry : entries.entrySet()) {
String key = entry.getKey();
String expectedValue = entries.get(key);
String actualValue = cache.get(key);
assertEquals(expectedValue, actualValue);
}
Node node = getNode(hazelcastInstance);
InternalPartitionService partitionService = node.getPartitionService();
SerializationService serializationService = node.getSerializationService();
// Verify that backup of load-all works
for (Map.Entry<String, String> entry : entries.entrySet()) {
String key = entry.getKey();
String expectedValue = entries.get(key);
Data keyData = serializationService.toData(key);
int keyPartitionId = partitionService.getPartitionId(keyData);
for (int i = 0; i < INSTANCE_COUNT; i++) {
Node n = getNode(hazelcastInstances[i]);
ICacheService cacheService = n.getNodeEngine().getService(ICacheService.SERVICE_NAME);
ICacheRecordStore recordStore = cacheService.getRecordStore("/hz/" + cacheName, keyPartitionId);
assertNotNull(recordStore);
String actualValue = serializationService.toObject(recordStore.get(keyData, null));
assertEquals(expectedValue, actualValue);
}
}
}
use of com.hazelcast.instance.Node in project hazelcast by hazelcast.
the class CachePartitionLostListenerTest method test_partitionLostListenerInvoked_whenNodeCrashed.
@Test
public void test_partitionLostListenerInvoked_whenNodeCrashed() {
List<HazelcastInstance> instances = getCreatedInstancesShuffledAfterWarmedUp(2);
HazelcastInstance survivingInstance = instances.get(0);
HazelcastInstance terminatingInstance = instances.get(1);
HazelcastServerCachingProvider cachingProvider = createCachingProvider(survivingInstance);
CacheManager cacheManager = cachingProvider.getCacheManager();
CacheConfig<Integer, String> config = new CacheConfig<Integer, String>();
config.setBackupCount(0);
Cache<Integer, String> cache = cacheManager.createCache(getIthCacheName(0), config);
ICache iCache = cache.unwrap(ICache.class);
final EventCollectingCachePartitionLostListener listener = new EventCollectingCachePartitionLostListener(0);
iCache.addPartitionLostListener(listener);
final Set<Integer> survivingPartitionIds = new HashSet<Integer>();
Node survivingNode = getNode(survivingInstance);
Address survivingAddress = survivingNode.getThisAddress();
for (IPartition partition : survivingNode.getPartitionService().getPartitions()) {
if (survivingAddress.equals(partition.getReplicaAddress(0))) {
survivingPartitionIds.add(partition.getPartitionId());
}
}
terminatingInstance.getLifecycleService().terminate();
waitAllForSafeState(survivingInstance);
assertTrueEventually(new AssertTask() {
@Override
public void run() throws Exception {
final List<CachePartitionLostEvent> events = listener.getEvents();
assertFalse(events.isEmpty());
for (CachePartitionLostEvent event : events) {
assertFalse(survivingPartitionIds.contains(event.getPartitionId()));
}
}
});
cacheManager.destroyCache(getIthCacheName(0));
cacheManager.close();
cachingProvider.close();
}
use of com.hazelcast.instance.Node in project hazelcast by hazelcast.
the class CachePutAllTest method createAndFillEntries.
private Map<String, String> createAndFillEntries() {
final int ENTRY_COUNT_PER_PARTITION = 3;
Node node = getNode(hazelcastInstance);
int partitionCount = node.getPartitionService().getPartitionCount();
Map<String, String> entries = new HashMap<String, String>(partitionCount * ENTRY_COUNT_PER_PARTITION);
for (int partitionId = 0; partitionId < partitionCount; partitionId++) {
for (int i = 0; i < ENTRY_COUNT_PER_PARTITION; i++) {
String key = generateKeyForPartition(hazelcastInstance, partitionId);
String value = generateRandomString(16);
entries.put(key, value);
}
}
return entries;
}
Aggregations