use of com.hazelcast.cache.impl.record.CacheRecord in project hazelcast by hazelcast.
the class CacheRecordStore method merge.
public CacheRecord merge(CacheEntryView<Data, Data> cacheEntryView, CacheMergePolicy mergePolicy) {
final long now = Clock.currentTimeMillis();
final long start = isStatisticsEnabled() ? System.nanoTime() : 0;
boolean merged = false;
Data key = cacheEntryView.getKey();
Data value = cacheEntryView.getValue();
long expiryTime = cacheEntryView.getExpirationTime();
CacheRecord record = records.get(key);
boolean isExpired = processExpiredEntry(key, record, now);
if (record == null || isExpired) {
Object newValue = mergePolicy.merge(name, createCacheEntryView(key, value, cacheEntryView.getCreationTime(), cacheEntryView.getExpirationTime(), cacheEntryView.getLastAccessTime(), cacheEntryView.getAccessHit(), mergePolicy), null);
if (newValue != null) {
record = createRecordWithExpiry(key, newValue, expiryTime, now, true, IGNORE_COMPLETION);
merged = record != null;
}
} else {
Object existingValue = record.getValue();
Object newValue = mergePolicy.merge(name, createCacheEntryView(key, value, cacheEntryView.getCreationTime(), cacheEntryView.getExpirationTime(), cacheEntryView.getLastAccessTime(), cacheEntryView.getAccessHit(), mergePolicy), createCacheEntryView(key, existingValue, cacheEntryView.getCreationTime(), record.getExpirationTime(), record.getLastAccessTime(), record.getAccessHit(), mergePolicy));
if (existingValue != newValue) {
merged = updateRecordWithExpiry(key, newValue, record, expiryTime, now, true, IGNORE_COMPLETION);
}
}
if (merged && isStatisticsEnabled()) {
statistics.increaseCachePuts(1);
statistics.addPutTimeNanos(System.nanoTime() - start);
}
return merged ? record : null;
}
use of com.hazelcast.cache.impl.record.CacheRecord in project hazelcast by hazelcast.
the class CacheSplitBrainHandler method prepareMergeRunnable.
Runnable prepareMergeRunnable() {
final Map<String, Map<Data, CacheRecord>> recordMap = new HashMap<String, Map<Data, CacheRecord>>(configs.size());
final IPartitionService partitionService = nodeEngine.getPartitionService();
final int partitionCount = partitionService.getPartitionCount();
final Address thisAddress = nodeEngine.getClusterService().getThisAddress();
for (int i = 0; i < partitionCount; i++) {
// Add your owned entries so they will be merged
if (thisAddress.equals(partitionService.getPartitionOwner(i))) {
CachePartitionSegment segment = segments[i];
Iterator<ICacheRecordStore> iter = segment.recordStoreIterator();
while (iter.hasNext()) {
ICacheRecordStore cacheRecordStore = iter.next();
if (!(cacheRecordStore instanceof SplitBrainAwareCacheRecordStore)) {
continue;
}
String cacheName = cacheRecordStore.getName();
Map<Data, CacheRecord> records = recordMap.get(cacheName);
if (records == null) {
records = new HashMap<Data, CacheRecord>(cacheRecordStore.size());
recordMap.put(cacheName, records);
}
for (Map.Entry<Data, CacheRecord> cacheRecordEntry : cacheRecordStore.getReadOnlyRecords().entrySet()) {
Data key = cacheRecordEntry.getKey();
CacheRecord cacheRecord = cacheRecordEntry.getValue();
records.put(key, cacheRecord);
}
// Clear all records either owned or backup
cacheRecordStore.clear();
// send the cache invalidation event regardless if any actually cleared or not (no need to know how many
// actually cleared)
final CacheService cacheService = nodeEngine.getService(CacheService.SERVICE_NAME);
cacheService.sendInvalidationEvent(cacheName, null, AbstractCacheRecordStore.SOURCE_NOT_AVAILABLE);
}
}
}
return new CacheMerger(nodeEngine, configs, recordMap, mergePolicyProvider);
}
use of com.hazelcast.cache.impl.record.CacheRecord in project hazelcast by hazelcast.
the class CacheReplicationOperation method writeInternal.
@Override
protected void writeInternal(ObjectDataOutput out) throws IOException {
int confSize = configs.size();
out.writeInt(confSize);
for (CacheConfig config : configs) {
out.writeObject(config);
}
int count = data.size();
out.writeInt(count);
long now = Clock.currentTimeMillis();
for (Map.Entry<String, Map<Data, CacheRecord>> entry : data.entrySet()) {
Map<Data, CacheRecord> cacheMap = entry.getValue();
int subCount = cacheMap.size();
out.writeInt(subCount);
out.writeUTF(entry.getKey());
for (Map.Entry<Data, CacheRecord> e : cacheMap.entrySet()) {
final Data key = e.getKey();
final CacheRecord record = e.getValue();
if (record.isExpiredAt(now)) {
continue;
}
out.writeData(key);
out.writeObject(record);
}
// Empty data will terminate the iteration for read in case
// expired entries were found while serializing, since the
// real subCount will then be different from the one written
// before
out.writeData(null);
}
nearCacheStateHolder.writeData(out);
}
use of com.hazelcast.cache.impl.record.CacheRecord in project hazelcast by hazelcast.
the class CachePutAllBackupOperation method run.
@Override
public void run() throws Exception {
if (cache == null) {
return;
}
if (cacheRecords != null) {
for (Map.Entry<Data, CacheRecord> entry : cacheRecords.entrySet()) {
CacheRecord record = entry.getValue();
cache.putRecord(entry.getKey(), record);
}
}
}
use of com.hazelcast.cache.impl.record.CacheRecord in project hazelcast by hazelcast.
the class CachePutAllOperation method run.
@Override
public void run() throws Exception {
int partitionId = getPartitionId();
String callerUuid = getCallerUuid();
ICacheService service = getService();
cache = service.getOrCreateRecordStore(name, partitionId);
backupRecords = new HashMap<Data, CacheRecord>(entries.size());
for (Map.Entry<Data, Data> entry : entries) {
Data key = entry.getKey();
Data value = entry.getValue();
CacheRecord backupRecord = cache.put(key, value, expiryPolicy, callerUuid, completionId);
backupRecords.put(key, backupRecord);
}
}
Aggregations