use of com.hazelcast.instance.impl.HazelcastInstanceImpl in project hazelcast by hazelcast.
the class CacheBackupAccessor method size.
@Override
public int size() {
InternalPartitionService partitionService = getNode(cluster[0]).getPartitionService();
IPartition[] partitions = partitionService.getPartitions();
int count = 0;
for (IPartition partition : partitions) {
Address replicaAddress = partition.getReplicaAddress(replicaIndex);
if (replicaAddress == null) {
continue;
}
HazelcastInstance hz = getInstanceWithAddress(replicaAddress);
HazelcastInstanceImpl hazelcastInstanceImpl = getHazelcastInstanceImpl(hz);
CachingProvider provider = createServerCachingProvider(hazelcastInstanceImpl);
HazelcastCacheManager cacheManager = (HazelcastServerCacheManager) provider.getCacheManager();
NodeEngineImpl nodeEngine = getNodeEngineImpl(hz);
CacheService cacheService = nodeEngine.getService(CacheService.SERVICE_NAME);
String cacheNameWithPrefix = cacheManager.getCacheNameWithPrefix(cacheName);
int partitionId = partition.getPartitionId();
count += runOnPartitionThread(hz, new SizeCallable(cacheService, cacheNameWithPrefix, partitionId), partitionId);
}
return count;
}
use of com.hazelcast.instance.impl.HazelcastInstanceImpl in project hazelcast by hazelcast.
the class ClusterServiceImpl method shutdownNodesSerially.
private void shutdownNodesSerially(final long timeoutNanos) {
Operation op = new ShutdownNodeOp();
long startTimeNanos = Timer.nanos();
Collection<Member> members = getMembers(NON_LOCAL_MEMBER_SELECTOR);
logger.info("Sending shut down operations to other members one by one...");
while (Timer.nanosElapsed(startTimeNanos) < timeoutNanos && !members.isEmpty()) {
Member member = members.iterator().next();
nodeEngine.getOperationService().send(op, member.getAddress());
members = getMembers(NON_LOCAL_MEMBER_SELECTOR);
try {
Thread.sleep(CLUSTER_SHUTDOWN_SLEEP_DURATION_IN_MILLIS);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
logger.warning("Shutdown sleep interrupted. ", e);
break;
}
}
logger.info("Number of other members remaining: " + getSize(NON_LOCAL_MEMBER_SELECTOR) + ". Shutting down itself.");
HazelcastInstanceImpl hazelcastInstance = node.hazelcastInstance;
hazelcastInstance.getLifecycleService().shutdown();
}
use of com.hazelcast.instance.impl.HazelcastInstanceImpl in project hazelcast by hazelcast.
the class CacheBackupAccessor method getCacheNameWithPrefix.
private static String getCacheNameWithPrefix(HazelcastInstance hz, String cacheName) {
HazelcastInstanceImpl hazelcastInstanceImpl = getHazelcastInstanceImpl(hz);
CachingProvider provider = createServerCachingProvider(hazelcastInstanceImpl);
HazelcastCacheManager cacheManager = (HazelcastServerCacheManager) provider.getCacheManager();
return cacheManager.getCacheNameWithPrefix(cacheName);
}
use of com.hazelcast.instance.impl.HazelcastInstanceImpl in project hazelcast by hazelcast.
the class HazelcastStarter method getHazelcastInstanceImpl.
public static HazelcastInstanceImpl getHazelcastInstanceImpl(HazelcastInstance hz) {
try {
HazelcastAPIDelegatingClassloader classloader = getHazelcastAPIDelegatingClassloader(hz);
Object instance = getHazelcastInstanceImpl(hz, classloader);
Object node = getFieldValueReflectively(instance, "node");
HazelcastInstanceImpl proxy = mock(HazelcastInstanceImpl.class, new HazelcastInstanceImplAnswer(instance));
setFieldValueReflectively(proxy, "node", mock(Node.class, new NodeAnswer(node)));
return proxy;
} catch (HazelcastInstanceNotActiveException e) {
throw new IllegalArgumentException("The given HazelcastInstance is not an active HazelcastInstanceImpl: " + hz.getClass());
} catch (Exception e) {
throw rethrowGuardianException(e);
}
}
use of com.hazelcast.instance.impl.HazelcastInstanceImpl in project hazelcast by hazelcast.
the class SingleValueBitmapIndexTest method testClearedIndexes.
@Test
public void testClearedIndexes() {
for (int i = BATCH_COUNT - 1; i >= 0; --i) {
for (long j = 0; j < BATCH_SIZE; ++j) {
long id = i * BATCH_SIZE + j;
put(id, (int) id);
}
verifyQueries();
}
for (HazelcastInstance instance : factory.getAllHazelcastInstances()) {
HazelcastInstanceImpl instanceImpl = (HazelcastInstanceImpl) instance;
MapService mapService = instanceImpl.node.getNodeEngine().getService(MapService.SERVICE_NAME);
Indexes indexes = mapService.getMapServiceContext().getMapContainer(persons.getName()).getIndexes();
indexes.clearAll();
for (Partition partition : instanceImpl.getPartitionService().getPartitions()) {
if (partition.getOwner().localMember()) {
Indexes.beginPartitionUpdate(indexes.getIndexes());
Indexes.markPartitionAsIndexed(partition.getPartitionId(), indexes.getIndexes());
}
}
}
for (ExpectedQuery expectedQuery : expectedQueries) {
expectedQuery.clear();
}
// Repopulate the index and run queries. Technically, we are doing index
// updates here instead of inserts since the map is still populated, but
// the index interprets them as inserts.
persons.getLocalMapStats().getIndexStats();
for (int i = BATCH_COUNT - 1; i >= 0; --i) {
for (long j = 0; j < BATCH_SIZE; ++j) {
long id = i * BATCH_SIZE + j;
put(id, (int) id);
}
verifyQueries();
}
LocalIndexStats statsA = personsA.getLocalMapStats().getIndexStats().values().iterator().next();
LocalIndexStats statsB = personsB.getLocalMapStats().getIndexStats().values().iterator().next();
assertEquals(BATCH_COUNT * BATCH_SIZE, statsA.getInsertCount() + statsB.getInsertCount());
assertEquals(BATCH_COUNT * BATCH_SIZE, statsA.getUpdateCount() + statsB.getUpdateCount());
}
Aggregations