use of com.hazelcast.internal.serialization.SerializationService in project hazelcast by hazelcast.
the class PartitionWideEntryOperation method innerBeforeRun.
@Override
public void innerBeforeRun() throws Exception {
super.innerBeforeRun();
SerializationService serializationService = getNodeEngine().getSerializationService();
ManagedContext managedContext = serializationService.getManagedContext();
entryProcessor = (EntryProcessor) managedContext.initialize(entryProcessor);
keysFromIndex = null;
queryOptimizer = mapServiceContext.getQueryOptimizer();
}
use of com.hazelcast.internal.serialization.SerializationService in project hazelcast by hazelcast.
the class MultiMapContainer method mergeNewValue.
private MultiMapValue mergeNewValue(SplitBrainMergePolicy<Collection<Object>, MultiMapMergeTypes<Object, Object>, Collection<Object>> mergePolicy, MultiMapMergeTypes<Object, Object> mergingEntry) {
Collection<Object> newValues = mergePolicy.merge(mergingEntry, null);
if (newValues != null && !newValues.isEmpty()) {
SerializationService serializationService = nodeEngine.getSerializationService();
MultiMapValue mergedValue = getOrCreateMultiMapValue(serializationService.toData(mergingEntry.getRawKey()));
Collection<MultiMapRecord> records = mergedValue.getCollection(false);
createNewMultiMapRecords(records, newValues);
if (newValues.equals(mergingEntry.getRawValue())) {
setMergedStatistics(mergingEntry, mergedValue);
}
return mergedValue;
}
return null;
}
use of com.hazelcast.internal.serialization.SerializationService in project hazelcast by hazelcast.
the class MultiMapContainer method createNewMultiMapRecords.
private void createNewMultiMapRecords(Collection<MultiMapRecord> records, Collection<Object> values) {
boolean isBinary = config.isBinary();
SerializationService serializationService = nodeEngine.getSerializationService();
for (Object value : values) {
long recordId = nextId();
MultiMapRecord record = new MultiMapRecord(recordId, isBinary ? serializationService.toData(value) : value);
records.add(record);
}
}
use of com.hazelcast.internal.serialization.SerializationService in project hazelcast by hazelcast.
the class ScheduledExecutorContainer method merge.
/**
* Merges the given {@link ScheduledExecutorMergeTypes} via the given {@link SplitBrainMergePolicy}.
*
* @param mergingEntry the {@link ScheduledExecutorMergeTypes} instance to merge
* @param mergePolicy the {@link SplitBrainMergePolicy} instance to apply
* @return the used {@link ScheduledTaskDescriptor} if merge is applied, otherwise {@code null}
*/
public ScheduledTaskDescriptor merge(ScheduledExecutorMergeTypes mergingEntry, SplitBrainMergePolicy<ScheduledTaskDescriptor, ScheduledExecutorMergeTypes, ScheduledTaskDescriptor> mergePolicy) {
SerializationService serializationService = nodeEngine.getSerializationService();
mergingEntry = (ScheduledExecutorMergeTypes) serializationService.getManagedContext().initialize(mergingEntry);
mergePolicy = (SplitBrainMergePolicy<ScheduledTaskDescriptor, ScheduledExecutorMergeTypes, ScheduledTaskDescriptor>) serializationService.getManagedContext().initialize(mergePolicy);
// try to find an existing task with the same definition
ScheduledTaskDescriptor mergingTask = ((ScheduledExecutorMergingEntryImpl) mergingEntry).getRawValue();
ScheduledTaskDescriptor existingTask = null;
for (ScheduledTaskDescriptor task : tasks.values()) {
if (mergingTask.equals(task)) {
existingTask = task;
break;
}
}
if (existingTask == null) {
ScheduledTaskDescriptor newTask = mergePolicy.merge(mergingEntry, null);
if (newTask != null) {
enqueueSuspended(newTask, false);
return newTask;
}
} else {
ScheduledExecutorMergeTypes existingEntry = createMergingEntry(serializationService, existingTask);
ScheduledTaskDescriptor newTask = mergePolicy.merge(mergingEntry, existingEntry);
// but we still want to be able to choose which one is merged (e.g. PassThroughMergePolicy)
if (newTask != null && newTask != existingTask) {
// cancel the existing task, before replacing it
existingTask.cancel(true);
enqueueSuspended(newTask, true);
return newTask;
}
}
// the merging task was already suspended on the original node, so we don't have to cancel it here
return null;
}
use of com.hazelcast.internal.serialization.SerializationService in project hazelcast by hazelcast.
the class CachePutAllTest method testPutAll.
@Test
public void testPutAll() {
ICache<String, String> cache = createCache();
String cacheName = cache.getName();
Map<String, String> entries = createAndFillEntries();
cache.putAll(entries);
// Verify that put-all works
for (Map.Entry<String, String> entry : entries.entrySet()) {
String key = entry.getKey();
String expectedValue = entries.get(key);
String actualValue = cache.get(key);
assertEquals(expectedValue, actualValue);
}
Node node = getNode(hazelcastInstance);
InternalPartitionService partitionService = node.getPartitionService();
SerializationService serializationService = node.getSerializationService();
// Verify that backup of put-all works
for (Map.Entry<String, String> entry : entries.entrySet()) {
String key = entry.getKey();
String expectedValue = entries.get(key);
Data keyData = serializationService.toData(key);
int keyPartitionId = partitionService.getPartitionId(keyData);
for (int i = 0; i < INSTANCE_COUNT; i++) {
Node n = getNode(hazelcastInstances[i]);
ICacheService cacheService = n.getNodeEngine().getService(ICacheService.SERVICE_NAME);
ICacheRecordStore recordStore = cacheService.getRecordStore("/hz/" + cacheName, keyPartitionId);
assertNotNull(recordStore);
String actualValue = serializationService.toObject(recordStore.get(keyData, null));
assertEquals(expectedValue, actualValue);
}
}
}
Aggregations