use of com.hazelcast.map.impl.PartitionContainer in project hazelcast by hazelcast.
the class ClearExpiredOperation method afterRun.
@Override
public void afterRun() throws Exception {
final MapService mapService = getService();
MapServiceContext mapServiceContext = mapService.getMapServiceContext();
final PartitionContainer partitionContainer = mapServiceContext.getPartitionContainer(getPartitionId());
partitionContainer.setHasRunningCleanup(false);
partitionContainer.setLastCleanupTime(Clock.currentTimeMillis());
}
use of com.hazelcast.map.impl.PartitionContainer in project hazelcast by hazelcast.
the class MapClearExpiredOperation method run.
@Override
public void run() throws Exception {
if (getNodeEngine().getLocalMember().isLiteMember()) {
// when converting a data-member to lite-member during merge operations.
return;
}
MapService mapService = getService();
MapServiceContext mapServiceContext = mapService.getMapServiceContext();
PartitionContainer partitionContainer = mapServiceContext.getPartitionContainer(getPartitionId());
ConcurrentMap<String, RecordStore> recordStores = partitionContainer.getMaps();
boolean backup = !isOwner();
long now = Clock.currentTimeMillis();
for (RecordStore recordStore : recordStores.values()) {
if (recordStore.isExpirable()) {
recordStore.evictExpiredEntries(expirationPercentage, now, backup);
recordStore.disposeDeferredBlocks();
}
}
}
use of com.hazelcast.map.impl.PartitionContainer in project hazelcast by hazelcast.
the class MapClearExpiredOperation method prepareForNextCleanup.
protected void prepareForNextCleanup() {
MapService mapService = getService();
MapServiceContext mapServiceContext = mapService.getMapServiceContext();
PartitionContainer partitionContainer = mapServiceContext.getPartitionContainer(getPartitionId());
partitionContainer.setHasRunningCleanup(false);
partitionContainer.setLastCleanupTime(Clock.currentTimeMillis());
}
use of com.hazelcast.map.impl.PartitionContainer in project hazelcast by hazelcast.
the class PartitionScanRunner method run.
/**
* Executes the predicate on a partition chunk. The offset in the partition
* is defined by the {@code pointers} and the soft limit is defined by the
* {@code fetchSize}. The method returns the matched entries and updated
* pointers from which new entries can be fetched which allows for efficient
* iteration of query results.
* <p>
* <b>NOTE</b>
* The iteration may be done when the map is being mutated or when there are
* membership changes. The iterator does not reflect the state when it has
* been constructed - it may return some entries that were added after the
* iteration has started and may not return some entries that were removed
* after iteration has started.
* The iterator will not, however, skip an entry if it has not been changed
* and will not return an entry twice.
*
* @param mapName the map name
* @param predicate the predicate which the entries must match
* @param partitionId the partition which is queried
* @param pointers the pointers defining the state of iteration
* @param fetchSize the soft limit for the number of entries to fetch
* @return entries matching the predicate and a table index from which new
* entries can be fetched
*/
public QueryableEntriesSegment run(String mapName, Predicate predicate, int partitionId, IterationPointer[] pointers, int fetchSize) {
List<QueryableEntry> resultList = new LinkedList<>();
PartitionContainer partitionContainer = mapServiceContext.getPartitionContainer(partitionId);
RecordStore recordStore = partitionContainer.getRecordStore(mapName);
Extractors extractors = mapServiceContext.getExtractors(mapName);
while (resultList.size() < fetchSize && pointers[pointers.length - 1].getIndex() >= 0) {
MapEntriesWithCursor cursor = recordStore.fetchEntries(pointers, fetchSize - resultList.size());
pointers = cursor.getIterationPointers();
Collection<? extends Entry<Data, Data>> entries = cursor.getBatch();
if (entries.isEmpty()) {
break;
}
for (Entry<Data, Data> entry : entries) {
QueryableEntry queryEntry = new LazyMapEntry(entry.getKey(), entry.getValue(), ss, extractors);
if (predicate.apply(queryEntry)) {
resultList.add(queryEntry);
}
}
}
return new QueryableEntriesSegment(resultList, pointers);
}
use of com.hazelcast.map.impl.PartitionContainer in project hazelcast by hazelcast.
the class PartitionScanRunner method run.
@SuppressWarnings("unchecked")
public void run(String mapName, Predicate predicate, int partitionId, Result result) {
PagingPredicateImpl pagingPredicate = predicate instanceof PagingPredicateImpl ? (PagingPredicateImpl) predicate : null;
PartitionContainer partitionContainer = mapServiceContext.getPartitionContainer(partitionId);
MapContainer mapContainer = mapServiceContext.getMapContainer(mapName);
RecordStore<Record> recordStore = partitionContainer.getRecordStore(mapName);
boolean nativeMemory = recordStore.getInMemoryFormat() == InMemoryFormat.NATIVE;
boolean useCachedValues = isUseCachedDeserializedValuesEnabled(mapContainer, partitionId);
Extractors extractors = mapServiceContext.getExtractors(mapName);
Map.Entry<Integer, Map.Entry> nearestAnchorEntry = pagingPredicate == null ? null : pagingPredicate.getNearestAnchorEntry();
recordStore.forEachAfterLoad(new BiConsumer<Data, Record>() {
LazyMapEntry queryEntry = new LazyMapEntry();
@Override
public void accept(Data key, Record record) {
Object value = useCachedValues ? getValueOrCachedValue(record, ss) : record.getValue();
// TODO how can a value be null?
if (value == null) {
return;
}
queryEntry.init(ss, key, value, extractors);
queryEntry.setRecord(record);
queryEntry.setMetadata(recordStore.getOrCreateMetadataStore().get(key));
if (predicate.apply(queryEntry) && compareAnchor(pagingPredicate, queryEntry, nearestAnchorEntry)) {
// always copy key&value to heap if map is backed by native memory
value = nativeMemory ? toHeapData((Data) value) : value;
result.add(queryEntry.init(ss, toHeapData(key), value, extractors));
// We can't reuse the existing entry after it was added to the
// result. Allocate the new one.
queryEntry = new LazyMapEntry();
}
}
}, false);
result.orderAndLimit(pagingPredicate, nearestAnchorEntry);
}
Aggregations