use of com.hazelcast.cache.impl.record.CacheRecord in project hazelcast by hazelcast.
the class CacheEntryProcessorOperation method afterRun.
@Override
public void afterRun() throws Exception {
if (cache.isWanReplicationEnabled()) {
CacheRecord record = cache.getRecord(key);
if (record != null) {
CacheEntryView<Data, Data> entryView = CacheEntryViews.createDefaultEntryView(key, getNodeEngine().getSerializationService().toData(backupRecord.getValue()), backupRecord);
wanEventPublisher.publishWanReplicationUpdate(name, entryView);
} else {
wanEventPublisher.publishWanReplicationRemove(name, key);
}
}
}
use of com.hazelcast.cache.impl.record.CacheRecord in project hazelcast by hazelcast.
the class CacheLoadAllOperation method run.
@Override
public void run() throws Exception {
int partitionId = getPartitionId();
IPartitionService partitionService = getNodeEngine().getPartitionService();
Set<Data> filteredKeys = null;
if (keys != null) {
filteredKeys = new HashSet<Data>();
for (Data k : keys) {
if (partitionService.getPartitionId(k) == partitionId) {
filteredKeys.add(k);
}
}
}
if (filteredKeys == null || filteredKeys.isEmpty()) {
return;
}
try {
ICacheService service = getService();
cache = service.getOrCreateRecordStore(name, partitionId);
Set<Data> keysLoaded = cache.loadAll(filteredKeys, replaceExistingValues);
int loadedKeyCount = keysLoaded.size();
if (loadedKeyCount > 0) {
backupRecords = new HashMap<Data, CacheRecord>(loadedKeyCount);
for (Data key : keysLoaded) {
CacheRecord record = cache.getRecord(key);
// So if the loaded key is evicted, don't send it to backup.
if (record != null) {
backupRecords.put(key, record);
}
}
shouldBackup = !backupRecords.isEmpty();
}
} catch (CacheException e) {
response = new CacheClearResponse(e);
}
}
use of com.hazelcast.cache.impl.record.CacheRecord in project hazelcast by hazelcast.
the class CacheBackupEntryProcessorOperation method afterRunInternal.
@Override
public void afterRunInternal() throws Exception {
if (cache.isWanReplicationEnabled()) {
CacheRecord record = cache.getRecord(key);
if (record != null) {
CacheEntryView<Data, Data> entryView = CacheEntryViews.createDefaultEntryView(key, getNodeEngine().getSerializationService().toData(record.getValue()), record);
wanEventPublisher.publishWanReplicationUpdate(name, entryView);
} else {
wanEventPublisher.publishWanReplicationRemove(name, key);
}
}
}
use of com.hazelcast.cache.impl.record.CacheRecord in project hazelcast by hazelcast.
the class CacheReplicationOperation method readInternal.
@Override
protected void readInternal(ObjectDataInput in) throws IOException {
super.readInternal(in);
int confSize = in.readInt();
for (int i = 0; i < confSize; i++) {
final CacheConfig config = in.readObject();
configs.add(config);
}
int count = in.readInt();
for (int i = 0; i < count; i++) {
int subCount = in.readInt();
String name = in.readUTF();
Map<Data, CacheRecord> m = new HashMap<Data, CacheRecord>(subCount);
data.put(name, m);
// which adds another Data entry at the end of the stream!
for (int j = 0; j < subCount + 1; j++) {
Data key = in.readData();
// the number on the stream due to found expired entries
if (key == null || key.dataSize() == 0) {
break;
}
CacheRecord record = in.readObject();
m.put(key, record);
}
}
nearCacheStateHolder.readData(in);
}
use of com.hazelcast.cache.impl.record.CacheRecord in project hazelcast by hazelcast.
the class CacheReplicationOperation method run.
@Override
public void run() throws Exception {
ICacheService service = getService();
for (Map.Entry<String, Map<Data, CacheRecord>> entry : data.entrySet()) {
ICacheRecordStore cache = service.getOrCreateRecordStore(entry.getKey(), getPartitionId());
cache.clear();
Map<Data, CacheRecord> map = entry.getValue();
Iterator<Map.Entry<Data, CacheRecord>> iterator = map.entrySet().iterator();
while (iterator.hasNext()) {
Map.Entry<Data, CacheRecord> next = iterator.next();
Data key = next.getKey();
CacheRecord record = next.getValue();
iterator.remove();
cache.putRecord(key, record);
}
}
data.clear();
if (getReplicaIndex() == 0) {
nearCacheStateHolder.applyState();
}
}
Aggregations