use of com.hazelcast.logging.ILogger in project hazelcast by hazelcast.
the class ReplicaSyncRequest method preCheckReplicaSync.
/** Checks if we can continue with the replication or not. Can send a retry or empty response to the replica in some cases */
private boolean preCheckReplicaSync(NodeEngineImpl nodeEngine, int partitionId, int replicaIndex) throws IOException {
InternalPartitionServiceImpl partitionService = (InternalPartitionServiceImpl) nodeEngine.getPartitionService();
PartitionStateManager partitionStateManager = partitionService.getPartitionStateManager();
InternalPartitionImpl partition = partitionStateManager.getPartitionImpl(partitionId);
Address owner = partition.getOwnerOrNull();
long[] replicaVersions = partitionService.getPartitionReplicaVersions(partitionId);
long currentVersion = replicaVersions[replicaIndex - 1];
ILogger logger = getLogger();
if (!nodeEngine.getThisAddress().equals(owner)) {
if (logger.isFinestEnabled()) {
logger.finest("Wrong target! " + toString() + " cannot be processed! Target should be: " + owner);
}
sendRetryResponse();
return false;
}
if (currentVersion == 0) {
if (logger.isFinestEnabled()) {
logger.finest("Current replicaVersion=0, sending empty response for partitionId=" + getPartitionId() + ", replicaIndex=" + getReplicaIndex() + ", replicaVersions=" + Arrays.toString(replicaVersions));
}
sendEmptyResponse();
return false;
}
if (!partitionService.getReplicaManager().tryToAcquireReplicaSyncPermit()) {
if (logger.isFinestEnabled()) {
logger.finest("Max parallel replication process limit exceeded! Could not run replica sync -> " + toString());
}
sendRetryResponse();
return false;
}
return true;
}
use of com.hazelcast.logging.ILogger in project hazelcast by hazelcast.
the class ReplicaSyncRequest method run.
@Override
public void run() throws Exception {
NodeEngineImpl nodeEngine = (NodeEngineImpl) getNodeEngine();
InternalPartitionServiceImpl partitionService = (InternalPartitionServiceImpl) nodeEngine.getPartitionService();
int partitionId = getPartitionId();
int replicaIndex = getReplicaIndex();
if (!partitionService.isReplicaSyncAllowed()) {
ILogger logger = getLogger();
if (logger.isFinestEnabled()) {
logger.finest("Migration is paused! Cannot run replica sync -> " + toString());
}
sendRetryResponse();
return;
}
if (!preCheckReplicaSync(nodeEngine, partitionId, replicaIndex)) {
return;
}
try {
List<Operation> tasks = createReplicationOperations();
if (tasks.isEmpty()) {
logNoReplicaDataFound(partitionId, replicaIndex);
sendEmptyResponse();
} else {
sendResponse(tasks);
}
} finally {
partitionService.getReplicaManager().releaseReplicaSyncPermit();
}
}
use of com.hazelcast.logging.ILogger in project hazelcast by hazelcast.
the class ReplicaSyncResponse method logException.
private void logException(Operation op, Throwable e) {
ILogger logger = getLogger();
NodeEngine nodeEngine = getNodeEngine();
Level level = nodeEngine.isRunning() ? Level.WARNING : Level.FINEST;
if (logger.isLoggable(level)) {
logger.log(level, "While executing " + op, e);
}
}
use of com.hazelcast.logging.ILogger in project hazelcast by hazelcast.
the class MapNearCacheManager method createRepairingInvalidationTask.
private RepairingTask createRepairingInvalidationTask() {
ExecutionService executionService = nodeEngine.getExecutionService();
ClusterService clusterService = nodeEngine.getClusterService();
OperationService operationService = nodeEngine.getOperationService();
HazelcastProperties properties = nodeEngine.getProperties();
ILogger logger = nodeEngine.getLogger(RepairingTask.class);
MetaDataFetcher metaDataFetcher = new MemberMapMetaDataFetcher(clusterService, operationService, logger);
String localUuid = nodeEngine.getLocalMember().getUuid();
return new RepairingTask(metaDataFetcher, executionService.getGlobalTaskScheduler(), partitionService, properties, localUuid, logger);
}
use of com.hazelcast.logging.ILogger in project hazelcast by hazelcast.
the class PartitionDistributionTest method testPartitionDistribution.
private void testPartitionDistribution(int partitionCount, int dataNodeCount, int liteNodeCount, Config config, Config liteConfig) throws InterruptedException {
config.setProperty(GroupProperty.PARTITION_COUNT.getName(), String.valueOf(partitionCount));
int nodeCount = dataNodeCount + liteNodeCount;
TestHazelcastInstanceFactory factory = createHazelcastInstanceFactory(nodeCount);
final BlockingQueue<Integer> counts = new ArrayBlockingQueue<Integer>(nodeCount);
final HazelcastInstance[] instances = new HazelcastInstance[nodeCount];
for (int i = 0; i < dataNodeCount; i++) {
instances[i] = factory.newHazelcastInstance(config);
}
liteConfig.setProperty(GroupProperty.PARTITION_COUNT.getName(), String.valueOf(partitionCount));
liteConfig.setLiteMember(true);
for (int i = dataNodeCount; i < nodeCount; i++) {
instances[i] = factory.newHazelcastInstance(liteConfig);
}
ExecutorService ex = Executors.newCachedThreadPool();
try {
for (int i = 0; i < dataNodeCount; i++) {
final int instanceIndex = i;
new Thread(new Runnable() {
public void run() {
HazelcastInstance instance = instances[instanceIndex];
counts.offer(getLocalPartitionsCount(instance));
}
}).start();
}
ILogger logger = instances[0].getLoggingService().getLogger(getClass());
String firstFailureMessage = null;
int average = (partitionCount / dataNodeCount);
logger.info(format("Partition count: %d, nodes: %d, average: %d", partitionCount, dataNodeCount, average));
int totalPartitions = 0;
for (int i = 0; i < dataNodeCount; i++) {
Integer localPartitionCount = counts.poll(1, TimeUnit.MINUTES);
assertNotNull(localPartitionCount);
String msg = format("Node: %d, local partition count: %d", i + 1, localPartitionCount);
if (firstFailureMessage == null && localPartitionCount < average) {
firstFailureMessage = msg;
}
logger.info(msg);
totalPartitions += localPartitionCount;
}
assertEqualsStringFormat("Expected sum of local partitions to be %d, but was %d", partitionCount, totalPartitions);
if (firstFailureMessage != null) {
fail(format("%s, partition count: %d, nodes: %d, average: %d", firstFailureMessage, partitionCount, dataNodeCount, average));
}
for (int i = dataNodeCount; i < nodeCount; i++) {
assertEquals(0, getLocalPartitionsCount(instances[i]));
}
} finally {
ex.shutdownNow();
}
}
Aggregations