use of com.hazelcast.map.impl.recordstore.RecordStore in project hazelcast by hazelcast.
the class MapMergePolicyQuickTest method testLatestUpdateMapMergePolicy.
@Test
public void testLatestUpdateMapMergePolicy() {
HazelcastInstance instance = createHazelcastInstance(getConfig());
String name = randomString();
IMap<String, String> map = instance.getMap(name);
MapServiceContext mapServiceContext = getMapServiceContext(instance);
Data dataKey = mapServiceContext.toData("key");
Data dataValue = mapServiceContext.toData("value1");
Data dataValue2 = mapServiceContext.toData("value2");
RecordStore recordStore = mapServiceContext.getRecordStore(getPartitionId(instance, "key"), name);
recordStore.beforeOperation();
NodeEngine nodeEngine = mapServiceContext.getNodeEngine();
SplitBrainMergePolicyProvider mergePolicyProvider = nodeEngine.getSplitBrainMergePolicyProvider();
SplitBrainMergePolicy mergePolicy = mergePolicyProvider.getMergePolicy(LatestUpdateMergePolicy.class.getName());
long now = Clock.currentTimeMillis();
SimpleEntryView<Data, Data> initialEntry = new SimpleEntryView<>(dataKey, dataValue);
initialEntry.setCreationTime(now);
initialEntry.setLastUpdateTime(now);
// need some latency to be sure that target members time is greater than now
sleepMillis(100);
recordStore.merge(createMergingEntry(nodeEngine.getSerializationService(), initialEntry), mergePolicy, CallerProvenance.NOT_WAN);
SimpleEntryView<Data, Data> mergingEntry = new SimpleEntryView<>(dataKey, dataValue2);
now = Clock.currentTimeMillis();
mergingEntry.setCreationTime(now);
mergingEntry.setLastUpdateTime(now);
recordStore.merge(createMergingEntry(nodeEngine.getSerializationService(), mergingEntry), mergePolicy, CallerProvenance.NOT_WAN);
assertEquals("value2", map.get("key"));
recordStore.afterOperation();
}
use of com.hazelcast.map.impl.recordstore.RecordStore in project hazelcast by hazelcast.
the class QueryResultSizeLimiterTest method initMocksWithConfiguration.
private void initMocksWithConfiguration(int maxResultSizeLimit, int maxLocalPartitionLimitForPreCheck) {
Config config = new Config();
config.setProperty(QUERY_RESULT_SIZE_LIMIT.getName(), valueOf(maxResultSizeLimit));
config.setProperty(QUERY_MAX_LOCAL_PARTITION_LIMIT_FOR_PRE_CHECK.getName(), valueOf(maxLocalPartitionLimitForPreCheck));
config.setProperty(ClusterProperty.PARTITION_COUNT.getName(), valueOf(PARTITION_COUNT));
HazelcastProperties hazelcastProperties = new HazelcastProperties(config);
InternalPartitionService partitionService = mock(InternalPartitionService.class);
when(partitionService.getPartitionCount()).thenReturn(PARTITION_COUNT);
NodeEngine nodeEngine = mock(NodeEngine.class);
when(nodeEngine.getProperties()).thenReturn(hazelcastProperties);
when(nodeEngine.getPartitionService()).thenReturn(partitionService);
RecordStore recordStore = mock(RecordStore.class);
when(recordStore.size()).then(new RecordStoreAnswer(localPartitions.values()));
MapServiceContext mapServiceContext = mock(MapServiceContext.class);
when(mapServiceContext.getNodeEngine()).thenReturn(nodeEngine);
when(mapServiceContext.getRecordStore(anyInt(), anyString())).thenReturn(recordStore);
when(mapServiceContext.getOrInitCachedMemberPartitions()).thenReturn(new PartitionIdSet(PARTITION_COUNT, localPartitions.keySet()));
limiter = new QueryResultSizeLimiter(mapServiceContext, Logger.getLogger(QueryResultSizeLimiterTest.class));
}
use of com.hazelcast.map.impl.recordstore.RecordStore in project hazelcast by hazelcast.
the class JsonMetadataCreationMigrationTest method getMetadata.
protected JsonMetadata getMetadata(String mapName, Object key, int replicaIndex) {
HazelcastInstance[] instances = factory.getAllHazelcastInstances().toArray(new HazelcastInstance[] { null });
HazelcastInstance instance = factory.getAllHazelcastInstances().iterator().next();
InternalSerializationService serializationService = getSerializationService(instance);
Data keyData = serializationService.toData(key);
int partitionId = getPartitionService(instance).getPartitionId(key);
NodeEngineImpl nodeEngine = getNodeEngineImpl(getBackupInstance(instances, partitionId, replicaIndex));
MapService mapService = nodeEngine.getService(MapService.SERVICE_NAME);
RecordStore recordStore = mapService.getMapServiceContext().getPartitionContainer(partitionId).getRecordStore(mapName);
return recordStore.getOrCreateMetadataStore().get(keyData);
}
use of com.hazelcast.map.impl.recordstore.RecordStore in project hazelcast by hazelcast.
the class MapStoreWriteBehindTest method writeBehindQueueSize.
private int writeBehindQueueSize(HazelcastInstance node, String mapName) {
int size = 0;
final NodeEngineImpl nodeEngine = getNode(node).getNodeEngine();
MapService mapService = nodeEngine.getService(MapService.SERVICE_NAME);
final MapServiceContext mapServiceContext = mapService.getMapServiceContext();
final int partitionCount = nodeEngine.getPartitionService().getPartitionCount();
for (int i = 0; i < partitionCount; i++) {
final RecordStore recordStore = mapServiceContext.getExistingRecordStore(i, mapName);
if (recordStore == null) {
continue;
}
final MapDataStore mapDataStore = recordStore.getMapDataStore();
size += ((WriteBehindStore) mapDataStore).getWriteBehindQueue().size();
}
return size;
}
use of com.hazelcast.map.impl.recordstore.RecordStore in project hazelcast by hazelcast.
the class WriteBehindOnBackupsTest method writeBehindQueueSize.
public static int writeBehindQueueSize(HazelcastInstance node, String mapName) {
int size = 0;
final NodeEngineImpl nodeEngine = getNode(node).getNodeEngine();
MapService mapService = nodeEngine.getService(MapService.SERVICE_NAME);
final MapServiceContext mapServiceContext = mapService.getMapServiceContext();
final int partitionCount = nodeEngine.getPartitionService().getPartitionCount();
for (int i = 0; i < partitionCount; i++) {
final RecordStore recordStore = mapServiceContext.getExistingRecordStore(i, mapName);
if (recordStore == null) {
continue;
}
final MapDataStore mapDataStore = recordStore.getMapDataStore();
if (mapDataStore instanceof WriteBehindStore) {
size += ((WriteBehindStore) mapDataStore).getWriteBehindQueue().size();
}
}
return size;
}
Aggregations