use of com.hazelcast.spi.partition.IPartitionService in project hazelcast by hazelcast.
the class RingbufferService method prepareReplicationOperation.
@Override
public Operation prepareReplicationOperation(PartitionReplicationEvent event) {
Map<String, RingbufferContainer> migrationData = new HashMap<String, RingbufferContainer>();
IPartitionService partitionService = nodeEngine.getPartitionService();
for (Map.Entry<String, RingbufferContainer> entry : containers.entrySet()) {
String name = entry.getKey();
int partitionId = partitionService.getPartitionId(getPartitionKey(name));
RingbufferContainer container = entry.getValue();
int backupCount = container.getConfig().getTotalBackupCount();
if (partitionId == event.getPartitionId() && backupCount >= event.getReplicaIndex()) {
migrationData.put(name, container);
}
}
if (migrationData.isEmpty()) {
return null;
}
return new ReplicationOperation(migrationData, event.getPartitionId(), event.getReplicaIndex());
}
use of com.hazelcast.spi.partition.IPartitionService in project hazelcast by hazelcast.
the class PutAllOperation method run.
@Override
public void run() throws Exception {
ReplicatedMapService service = getService();
ReplicatedRecordStore store = service.getReplicatedRecordStore(name, true, getPartitionId());
int partitionId = getPartitionId();
IPartitionService partitionService = getNodeEngine().getPartitionService();
ReplicatedMapEventPublishingService eventPublishingService = service.getEventPublishingService();
for (int i = 0; i < entries.size(); i++) {
Data key = entries.getKey(i);
Data value = entries.getValue(i);
if (partitionId != partitionService.getPartitionId(key)) {
continue;
}
Object putResult = store.put(key, value);
Data oldValue = getNodeEngine().toData(putResult);
eventPublishingService.fireEntryListenerEvent(key, oldValue, value, name, getCallerAddress());
VersionResponsePair response = new VersionResponsePair(putResult, store.getVersion());
publishReplicationMessage(key, value, response);
}
}
use of com.hazelcast.spi.partition.IPartitionService in project hazelcast by hazelcast.
the class XAResourceImpl method finalizeTransactionRemotely.
private void finalizeTransactionRemotely(Xid xid, boolean isCommit) throws XAException {
NodeEngine nodeEngine = getNodeEngine();
IPartitionService partitionService = nodeEngine.getPartitionService();
OperationService operationService = nodeEngine.getOperationService();
SerializableXID serializableXID = new SerializableXID(xid.getFormatId(), xid.getGlobalTransactionId(), xid.getBranchQualifier());
Data xidData = nodeEngine.toData(serializableXID);
int partitionId = partitionService.getPartitionId(xidData);
FinalizeRemoteTransactionOperation operation = new FinalizeRemoteTransactionOperation(xidData, isCommit);
InternalCompletableFuture<Integer> future = operationService.invokeOnPartition(SERVICE_NAME, operation, partitionId);
Integer errorCode;
try {
errorCode = future.get();
} catch (Exception e) {
throw ExceptionUtil.rethrow(e);
}
if (errorCode != null) {
throw new XAException(errorCode);
}
}
use of com.hazelcast.spi.partition.IPartitionService in project hazelcast by hazelcast.
the class CacheSplitBrainHandler method prepareMergeRunnable.
Runnable prepareMergeRunnable() {
final Map<String, Map<Data, CacheRecord>> recordMap = new HashMap<String, Map<Data, CacheRecord>>(configs.size());
final IPartitionService partitionService = nodeEngine.getPartitionService();
final int partitionCount = partitionService.getPartitionCount();
final Address thisAddress = nodeEngine.getClusterService().getThisAddress();
for (int i = 0; i < partitionCount; i++) {
// Add your owned entries so they will be merged
if (thisAddress.equals(partitionService.getPartitionOwner(i))) {
CachePartitionSegment segment = segments[i];
Iterator<ICacheRecordStore> iter = segment.recordStoreIterator();
while (iter.hasNext()) {
ICacheRecordStore cacheRecordStore = iter.next();
if (!(cacheRecordStore instanceof SplitBrainAwareCacheRecordStore)) {
continue;
}
String cacheName = cacheRecordStore.getName();
Map<Data, CacheRecord> records = recordMap.get(cacheName);
if (records == null) {
records = new HashMap<Data, CacheRecord>(cacheRecordStore.size());
recordMap.put(cacheName, records);
}
for (Map.Entry<Data, CacheRecord> cacheRecordEntry : cacheRecordStore.getReadOnlyRecords().entrySet()) {
Data key = cacheRecordEntry.getKey();
CacheRecord cacheRecord = cacheRecordEntry.getValue();
records.put(key, cacheRecord);
}
// Clear all records either owned or backup
cacheRecordStore.clear();
// send the cache invalidation event regardless if any actually cleared or not (no need to know how many
// actually cleared)
final CacheService cacheService = nodeEngine.getService(CacheService.SERVICE_NAME);
cacheService.sendInvalidationEvent(cacheName, null, AbstractCacheRecordStore.SOURCE_NOT_AVAILABLE);
}
}
}
return new CacheMerger(nodeEngine, configs, recordMap, mergePolicyProvider);
}
use of com.hazelcast.spi.partition.IPartitionService in project hazelcast by hazelcast.
the class CardinalityEstimatorService method getPartitionId.
private int getPartitionId(String name) {
IPartitionService partitionService = nodeEngine.getPartitionService();
String partitionKey = getPartitionKey(name);
return partitionService.getPartitionId(partitionKey);
}
Aggregations