use of com.hazelcast.map.impl.record.Record in project hazelcast by hazelcast.
the class EvictBatchBackupOperation method runInternal.
@Override
protected void runInternal() {
if (recordStore == null) {
return;
}
for (ExpiredKey expiredKey : expiredKeys) {
Data key = expiredKey.getKey();
Record existingRecord = recordStore.getRecord(key);
if (hasSameValueHashCode(existingRecord, expiredKey)) {
recordStore.evict(key, true);
}
}
equalizeEntryCountWithPrimary();
}
use of com.hazelcast.map.impl.record.Record in project hazelcast by hazelcast.
the class GetEntryViewOperation method runInternal.
@Override
protected void runInternal() {
Record record = recordStore.getRecordOrNull(dataKey);
if (record != null) {
Data value = mapServiceContext.toData(record.getValue());
ExpiryMetadata expiryMetadata = recordStore.getExpirySystem().getExpiryMetadata(dataKey);
result = EntryViews.createSimpleEntryView(dataKey, value, record, expiryMetadata);
}
}
use of com.hazelcast.map.impl.record.Record in project hazelcast by hazelcast.
the class PartitionScanRunner method run.
@SuppressWarnings("unchecked")
public void run(String mapName, Predicate predicate, int partitionId, Result result) {
PagingPredicateImpl pagingPredicate = predicate instanceof PagingPredicateImpl ? (PagingPredicateImpl) predicate : null;
PartitionContainer partitionContainer = mapServiceContext.getPartitionContainer(partitionId);
MapContainer mapContainer = mapServiceContext.getMapContainer(mapName);
RecordStore<Record> recordStore = partitionContainer.getRecordStore(mapName);
boolean nativeMemory = recordStore.getInMemoryFormat() == InMemoryFormat.NATIVE;
boolean useCachedValues = isUseCachedDeserializedValuesEnabled(mapContainer, partitionId);
Extractors extractors = mapServiceContext.getExtractors(mapName);
Map.Entry<Integer, Map.Entry> nearestAnchorEntry = pagingPredicate == null ? null : pagingPredicate.getNearestAnchorEntry();
recordStore.forEachAfterLoad(new BiConsumer<Data, Record>() {
LazyMapEntry queryEntry = new LazyMapEntry();
@Override
public void accept(Data key, Record record) {
Object value = useCachedValues ? getValueOrCachedValue(record, ss) : record.getValue();
// TODO how can a value be null?
if (value == null) {
return;
}
queryEntry.init(ss, key, value, extractors);
queryEntry.setRecord(record);
queryEntry.setMetadata(recordStore.getOrCreateMetadataStore().get(key));
if (predicate.apply(queryEntry) && compareAnchor(pagingPredicate, queryEntry, nearestAnchorEntry)) {
// always copy key&value to heap if map is backed by native memory
value = nativeMemory ? toHeapData((Data) value) : value;
result.add(queryEntry.init(ss, toHeapData(key), value, extractors));
// We can't reuse the existing entry after it was added to the
// result. Allocate the new one.
queryEntry = new LazyMapEntry();
}
}
}, false);
result.orderAndLimit(pagingPredicate, nearestAnchorEntry);
}
use of com.hazelcast.map.impl.record.Record in project hazelcast by hazelcast.
the class AbstractRecordStore method createRecord.
@Override
public Record createRecord(Data key, Object value, long now) {
Record record = recordFactory.newRecord(key, value);
record.setCreationTime(now);
record.setLastUpdateTime(now);
if (record.getMatchingRecordReaderWriter() == RecordReaderWriter.SIMPLE_DATA_RECORD_WITH_LRU_EVICTION_READER_WRITER) {
// To distinguish last-access-time from creation-time we
// set last-access-time for only LRU records. A LRU record
// has no creation-time field but last-access-time field.
record.setLastAccessTime(now);
}
updateStatsOnPut(false, now);
return record;
}
use of com.hazelcast.map.impl.record.Record in project hazelcast by hazelcast.
the class DefaultRecordStore method putOrUpdateReplicatedRecord.
@Override
public Record putOrUpdateReplicatedRecord(Data dataKey, Record replicatedRecord, ExpiryMetadata expiryMetadata, boolean indexesMustBePopulated, long now) {
Record newRecord = storage.get(dataKey);
if (newRecord == null) {
newRecord = createRecord(dataKey, replicatedRecord != null ? replicatedRecord.getValue() : null, now);
storage.put(dataKey, newRecord);
} else {
storage.updateRecordValue(dataKey, newRecord, replicatedRecord.getValue());
}
Records.copyMetadataFrom(replicatedRecord, newRecord);
expirySystem.add(dataKey, expiryMetadata, now);
mutationObserver.onReplicationPutRecord(dataKey, newRecord, indexesMustBePopulated);
updateStatsOnPut(replicatedRecord.getHits(), now);
return newRecord;
}
Aggregations