use of com.hazelcast.map.impl.recordstore.RecordStore in project hazelcast by hazelcast.
the class MapServiceContextImpl method removeRecordStoresFromPartitionMatchingWith.
@Override
public void removeRecordStoresFromPartitionMatchingWith(Predicate<RecordStore> predicate, int partitionId, boolean onShutdown, boolean onRecordStoreDestroy) {
PartitionContainer container = partitionContainers[partitionId];
if (container == null) {
return;
}
Iterator<RecordStore> partitionIterator = container.getMaps().values().iterator();
while (partitionIterator.hasNext()) {
RecordStore partition = partitionIterator.next();
if (predicate.test(partition)) {
partition.beforeOperation();
try {
partition.clearPartition(onShutdown, onRecordStoreDestroy);
} finally {
partition.afterOperation();
}
partitionIterator.remove();
}
}
}
use of com.hazelcast.map.impl.recordstore.RecordStore in project hazelcast by hazelcast.
the class MapClearExpiredRecordsTask method equalizeBackupSizeWithPrimary.
protected void equalizeBackupSizeWithPrimary(PartitionContainer container) {
if (!canPrimaryDriveExpiration()) {
return;
}
ConcurrentMap<String, RecordStore> maps = container.getMaps();
for (RecordStore recordStore : maps.values()) {
int totalBackupCount = recordStore.getMapContainer().getTotalBackupCount();
toBackupSender.invokeBackupExpiryOperation(Collections.emptyList(), totalBackupCount, recordStore.getPartitionId(), recordStore);
}
}
use of com.hazelcast.map.impl.recordstore.RecordStore in project hazelcast by hazelcast.
the class MapClearExpiredRecordsTask method notHaveAnyExpirableRecord.
/**
* Here we check if that partition has any expirable record or not,
* if no expirable record exists in that partition no need to fire
* an expiration operation.
*
* @param partitionContainer corresponding partition container.
* @return <code>true</code> if no expirable record in that
* partition <code>false</code> otherwise.
*/
@Override
protected boolean notHaveAnyExpirableRecord(PartitionContainer partitionContainer) {
boolean notExist = true;
final ConcurrentMap<String, RecordStore> maps = partitionContainer.getMaps();
for (RecordStore store : maps.values()) {
if (store.isExpirable()) {
notExist = false;
break;
}
}
return notExist;
}
use of com.hazelcast.map.impl.recordstore.RecordStore in project hazelcast by hazelcast.
the class PartitionContainer method createRecordStore.
private RecordStore createRecordStore(String name) {
MapServiceContext serviceContext = mapService.getMapServiceContext();
MapContainer mapContainer = serviceContext.getMapContainer(name);
MapConfig mapConfig = mapContainer.getMapConfig();
NodeEngine nodeEngine = serviceContext.getNodeEngine();
IPartitionService ps = nodeEngine.getPartitionService();
OperationService opService = nodeEngine.getOperationService();
ExecutionService execService = nodeEngine.getExecutionService();
HazelcastProperties hazelcastProperties = nodeEngine.getProperties();
MapKeyLoader keyLoader = new MapKeyLoader(name, opService, ps, nodeEngine.getClusterService(), execService, mapContainer.toData(), serviceContext.getNodeWideLoadedKeyLimiter());
keyLoader.setMaxBatch(hazelcastProperties.getInteger(ClusterProperty.MAP_LOAD_CHUNK_SIZE));
keyLoader.setMaxSize(getMaxSizePerNode(mapConfig.getEvictionConfig()));
keyLoader.setHasBackup(mapConfig.getTotalBackupCount() > 0);
keyLoader.setMapOperationProvider(serviceContext.getMapOperationProvider(name));
if (!mapContainer.isGlobalIndexEnabled()) {
Indexes indexesForMap = mapContainer.createIndexes(false);
indexes.putIfAbsent(name, indexesForMap);
}
RecordStore recordStore = serviceContext.createRecordStore(mapContainer, partitionId, keyLoader);
recordStore.init();
return recordStore;
}
use of com.hazelcast.map.impl.recordstore.RecordStore in project hazelcast by hazelcast.
the class MapReplicationStateHolder method applyState.
@SuppressWarnings({ "checkstyle:npathcomplexity", "checkstyle:methodlength", "checkstyle:cyclomaticcomplexity", "checkstyle:nestedifdepth" })
void applyState() {
ThreadUtil.assertRunningOnPartitionThread();
applyIndexesState();
if (!isNullOrEmpty(data)) {
for (Map.Entry<String, List> dataEntry : data.entrySet()) {
String mapName = dataEntry.getKey();
List keyRecordExpiry = dataEntry.getValue();
RecordStore recordStore = operation.getRecordStore(mapName);
recordStore.beforeOperation();
try {
initializeRecordStore(mapName, recordStore);
recordStore.setPreMigrationLoadedStatus(loaded.get(mapName));
MapContainer mapContainer = recordStore.getMapContainer();
PartitionContainer partitionContainer = recordStore.getMapContainer().getMapServiceContext().getPartitionContainer(operation.getPartitionId());
for (Map.Entry<String, IndexConfig> indexDefinition : mapContainer.getIndexDefinitions().entrySet()) {
Indexes indexes = mapContainer.getIndexes(partitionContainer.getPartitionId());
indexes.addOrGetIndex(indexDefinition.getValue());
}
final Indexes indexes = mapContainer.getIndexes(partitionContainer.getPartitionId());
final boolean populateIndexes = indexesMustBePopulated(indexes, operation);
InternalIndex[] indexesSnapshot = null;
if (populateIndexes) {
// defensively clear possible stale leftovers in non-global indexes from
// the previous failed promotion attempt
indexesSnapshot = indexes.getIndexes();
Indexes.beginPartitionUpdate(indexesSnapshot);
indexes.clearAll();
}
long nowInMillis = Clock.currentTimeMillis();
forEachReplicatedRecord(keyRecordExpiry, mapContainer, recordStore, populateIndexes, nowInMillis);
if (populateIndexes) {
Indexes.markPartitionAsIndexed(partitionContainer.getPartitionId(), indexesSnapshot);
}
} finally {
recordStore.afterOperation();
}
}
}
for (Map.Entry<String, LocalRecordStoreStats> statsEntry : recordStoreStatsPerMapName.entrySet()) {
String mapName = statsEntry.getKey();
LocalRecordStoreStats stats = statsEntry.getValue();
RecordStore recordStore = operation.getRecordStore(mapName);
recordStore.setStats(stats);
}
}
Aggregations