use of com.hazelcast.map.impl.record.Record in project hazelcast by hazelcast.
the class MapReplicationStateHolder method forEachReplicatedRecord.
private void forEachReplicatedRecord(List keyRecordExpiry, MapContainer mapContainer, RecordStore recordStore, boolean populateIndexes, long nowInMillis) {
long ownedEntryCountOnThisNode = entryCountOnThisNode(mapContainer);
EvictionConfig evictionConfig = mapContainer.getMapConfig().getEvictionConfig();
boolean perNodeEvictionConfigured = mapContainer.getEvictor() != Evictor.NULL_EVICTOR && evictionConfig.getMaxSizePolicy() == PER_NODE;
for (int i = 0; i < keyRecordExpiry.size(); i += 3) {
Data dataKey = (Data) keyRecordExpiry.get(i);
Record record = (Record) keyRecordExpiry.get(i + 1);
ExpiryMetadata expiryMetadata = (ExpiryMetadata) keyRecordExpiry.get(i + 2);
if (perNodeEvictionConfigured) {
if (ownedEntryCountOnThisNode >= evictionConfig.getSize()) {
if (operation.getReplicaIndex() == 0) {
recordStore.doPostEvictionOperations(dataKey, record.getValue(), ExpiryReason.NOT_EXPIRED);
}
} else {
recordStore.putOrUpdateReplicatedRecord(dataKey, record, expiryMetadata, populateIndexes, nowInMillis);
ownedEntryCountOnThisNode++;
}
} else {
recordStore.putOrUpdateReplicatedRecord(dataKey, record, expiryMetadata, populateIndexes, nowInMillis);
if (recordStore.shouldEvict()) {
// No need to continue replicating records anymore.
// We are already over eviction threshold, each put record will cause another eviction.
recordStore.evictEntries(dataKey);
break;
}
}
recordStore.disposeDeferredBlocks();
}
}
use of com.hazelcast.map.impl.record.Record in project hazelcast by hazelcast.
the class MergeOperation method toBackupListByRemovingEvictedRecords.
/**
* Since records may get evicted on NOOME after
* they have been merged. We are re-checking
* backup pair list to eliminate evicted entries.
*
* @return list of existing records which can
* safely be transferred to backup replica.
*/
@Nonnull
private List toBackupListByRemovingEvictedRecords() {
List toBackupList = new ArrayList(backupPairs.size());
for (int i = 0; i < backupPairs.size(); i += 2) {
Data dataKey = ((Data) backupPairs.get(i));
Record record = recordStore.getRecord(dataKey);
if (record != null) {
toBackupList.add(dataKey);
toBackupList.add(backupPairs.get(i + 1));
toBackupList.add(record);
toBackupList.add(recordStore.getExpirySystem().getExpiryMetadata(dataKey));
}
}
return toBackupList;
}
use of com.hazelcast.map.impl.record.Record in project hazelcast by hazelcast.
the class PutAllBackupOperation method putBackup.
private void putBackup(Data key, Record record, ExpiryMetadata expiryMetadata) {
Record currentRecord = recordStore.putBackup(key, record, expiryMetadata.getTtl(), expiryMetadata.getMaxIdle(), expiryMetadata.getExpirationTime(), getCallerProvenance());
Records.copyMetadataFrom(record, currentRecord);
publishWanUpdate(key, record.getValue());
evict(key);
}
use of com.hazelcast.map.impl.record.Record in project hazelcast by hazelcast.
the class MapChunk method putOrUpdateReplicatedDataWithPerNodeEviction.
// owned or backup
private void putOrUpdateReplicatedDataWithPerNodeEviction(RecordStore recordStore) {
MapContainer mapContainer = recordStore.getMapContainer();
EvictionConfig evictionConfig = mapContainer.getMapConfig().getEvictionConfig();
long ownedEntryCountOnThisNode = entryCountOnThisNode(mapContainer);
int count = 0;
long nowInMillis = Clock.currentTimeMillis();
do {
Data dataKey = (Data) keyRecordExpiry.poll();
Record record = (Record) keyRecordExpiry.poll();
ExpiryMetadata expiryMetadata = (ExpiryMetadata) keyRecordExpiry.poll();
if (ownedEntryCountOnThisNode >= evictionConfig.getSize()) {
if (getReplicaIndex() == 0) {
recordStore.doPostEvictionOperations(dataKey, record.getValue(), ExpiryReason.NOT_EXPIRED);
}
} else {
Indexes indexes = mapContainer.getIndexes(recordStore.getPartitionId());
recordStore.putOrUpdateReplicatedRecord(dataKey, record, expiryMetadata, indexesMustBePopulated(indexes), nowInMillis);
ownedEntryCountOnThisNode++;
}
if (++count % DISPOSE_AT_COUNT == 0) {
recordStore.disposeDeferredBlocks();
}
} while (!keyRecordExpiry.isEmpty());
recordStore.disposeDeferredBlocks();
}
use of com.hazelcast.map.impl.record.Record in project hazelcast by hazelcast.
the class TxnSetBackupOperation method runInternal.
@Override
protected void runInternal() {
Record currentRecord = recordStore.putBackupTxn(dataKey, record, expiryMetadata, isPutTransient(), getCallerProvenance(), transactionId);
Records.copyMetadataFrom(record, currentRecord);
recordStore.forceUnlock(dataKey);
}
Aggregations