use of com.hazelcast.map.impl.record.Record in project hazelcast by hazelcast.
the class DefaultRecordStore method flush.
/**
* Flushes evicted records to map store.
*
* @param recordsToBeFlushed records to be flushed to map-store.
* @param backup <code>true</code> if backup, false otherwise.
*/
protected void flush(Collection<Record> recordsToBeFlushed, boolean backup) {
Iterator<Record> iterator = recordsToBeFlushed.iterator();
while (iterator.hasNext()) {
Record record = iterator.next();
mapDataStore.flush(record.getKey(), record.getValue(), backup);
}
}
use of com.hazelcast.map.impl.record.Record in project hazelcast by hazelcast.
the class MapReplicationStateHolder method prepare.
void prepare(PartitionContainer container, int replicaIndex) {
data = new HashMap<String, Set<RecordReplicationInfo>>(container.getMaps().size());
loaded = new HashMap<String, Boolean>(container.getMaps().size());
for (Map.Entry<String, RecordStore> entry : container.getMaps().entrySet()) {
RecordStore recordStore = entry.getValue();
MapContainer mapContainer = recordStore.getMapContainer();
MapConfig mapConfig = mapContainer.getMapConfig();
if (mapConfig.getTotalBackupCount() < replicaIndex) {
continue;
}
MapServiceContext mapServiceContext = mapContainer.getMapServiceContext();
String mapName = entry.getKey();
loaded.put(mapName, recordStore.isLoaded());
// now prepare data to migrate records
Set<RecordReplicationInfo> recordSet = new HashSet<RecordReplicationInfo>(recordStore.size());
final Iterator<Record> iterator = recordStore.iterator();
while (iterator.hasNext()) {
Record record = iterator.next();
Data key = record.getKey();
RecordReplicationInfo recordReplicationInfo = mapReplicationOperation.createRecordReplicationInfo(key, record, mapServiceContext);
recordSet.add(recordReplicationInfo);
}
data.put(mapName, recordSet);
}
}
use of com.hazelcast.map.impl.record.Record in project hazelcast by hazelcast.
the class MergeOperation method getBackupOperation.
@Override
public Operation getBackupOperation() {
if (dataValue == null) {
return new RemoveBackupOperation(name, dataKey, false, disableWanReplicationEvent);
} else {
final Record record = recordStore.getRecord(dataKey);
final RecordInfo replicationInfo = Records.buildRecordInfo(record);
return new PutBackupOperation(name, dataKey, dataValue, replicationInfo, false, false, disableWanReplicationEvent);
}
}
use of com.hazelcast.map.impl.record.Record in project hazelcast by hazelcast.
the class MergeOperation method run.
@Override
public void run() {
Record oldRecord = recordStore.getRecord(dataKey);
if (oldRecord != null) {
dataOldValue = mapServiceContext.toData(oldRecord.getValue());
}
merged = recordStore.merge(dataKey, mergingEntry, mergePolicy);
if (merged) {
Record record = recordStore.getRecord(dataKey);
if (record != null) {
dataValue = mapServiceContext.toData(record.getValue());
mergingValue = mapServiceContext.toData(mergingEntry.getValue());
}
}
}
use of com.hazelcast.map.impl.record.Record in project hazelcast by hazelcast.
the class PutFromLoadAllOperation method run.
@Override
public void run() throws Exception {
boolean hasInterceptor = mapServiceContext.hasInterceptor(name);
List<Data> keyValueSequence = this.keyValueSequence;
for (int i = 0; i < keyValueSequence.size(); i += 2) {
Data key = keyValueSequence.get(i);
Data dataValue = keyValueSequence.get(i + 1);
// here object conversion is for interceptors.
Object value = hasInterceptor ? mapServiceContext.toObject(dataValue) : dataValue;
Object previousValue = recordStore.putFromLoad(key, value);
callAfterPutInterceptors(value);
Record record = recordStore.getRecord(key);
if (isPostProcessing(recordStore)) {
value = record.getValue();
}
publishEntryEvent(key, previousValue, value);
publishWanReplicationEvent(key, value, record);
addInvalidation(key);
}
}
Aggregations