use of com.hazelcast.map.impl.recordstore.DefaultRecordStore in project hazelcast by hazelcast.
the class EvictionMaxSizePolicyTest method setTestSizeEstimator.
public static void setTestSizeEstimator(IMap map, final long oneEntryHeapCostInBytes) {
final MapProxyImpl mapProxy = (MapProxyImpl) map;
final MapService mapService = (MapService) mapProxy.getService();
final MapServiceContext mapServiceContext = mapService.getMapServiceContext();
final NodeEngine nodeEngine = mapServiceContext.getNodeEngine();
final IPartitionService partitionService = nodeEngine.getPartitionService();
for (int i = 0; i < partitionService.getPartitionCount(); i++) {
final Address owner = partitionService.getPartitionOwner(i);
if (nodeEngine.getThisAddress().equals(owner)) {
final PartitionContainer container = mapServiceContext.getPartitionContainer(i);
if (container == null) {
continue;
}
final RecordStore recordStore = container.getRecordStore(map.getName());
final DefaultRecordStore defaultRecordStore = (DefaultRecordStore) recordStore;
defaultRecordStore.setSizeEstimator(new EntryCostEstimator() {
long size;
@Override
public long getEstimate() {
return size;
}
@Override
public void adjustEstimateBy(long size) {
this.size += size;
}
@Override
public long calculateValueCost(Object record) {
if (record == null) {
return 0L;
}
return oneEntryHeapCostInBytes;
}
@Override
public long calculateEntryCost(Object key, Object record) {
if (record == null) {
return 0L;
}
return 2 * oneEntryHeapCostInBytes;
}
@Override
public void reset() {
size = 0;
}
});
}
}
}
use of com.hazelcast.map.impl.recordstore.DefaultRecordStore in project hazelcast by hazelcast.
the class RecordStoreTest method testRecordStoreReset.
private IMap<Object, Object> testRecordStoreReset() {
String mapName = randomName();
Config config = new Config();
MapConfig mapConfig = config.getMapConfig(mapName);
IndexConfig indexConfig = new IndexConfig(IndexType.HASH, "name");
mapConfig.addIndexConfig(indexConfig);
HazelcastInstance hazelcastInstance = createHazelcastInstance(config);
IMap<Object, Object> map = hazelcastInstance.getMap(mapName);
int key = 1;
map.put(key, new SampleTestObjects.Employee("tom", 24, true, 10));
DefaultRecordStore defaultRecordStore = getRecordStore(map, key);
defaultRecordStore.reset();
assertNull(map.get(key));
return map;
}
use of com.hazelcast.map.impl.recordstore.DefaultRecordStore in project hazelcast by hazelcast.
the class EntryLoaderSimpleTest method testLoadEntryAtCurrentTime.
@Test
public void testLoadEntryAtCurrentTime() {
testEntryLoader.putExternally("key", "value", 42);
MapService service = getNodeEngineImpl(instances[0]).getService(MapService.SERVICE_NAME);
MapServiceContext mapServiceContext = service.getMapServiceContext();
Config config = mapServiceContext.getNodeEngine().getConfig();
MapContainer mapContainer = new MapContainer("anyName", config, mapServiceContext);
Data key = mapServiceContext.toData("key");
DefaultRecordStore recordStore = new DefaultRecordStore(mapContainer, 0, mock(MapKeyLoader.class), mock(ILogger.class));
assertNull(recordStore.loadRecordOrNull(key, false, null));
}
use of com.hazelcast.map.impl.recordstore.DefaultRecordStore in project hazelcast by hazelcast.
the class MapSplitBrainHandlerService method onStoreCollection.
/**
* Clears indexes inside partition thread while collecting merge
* tasks. Otherwise, if we do this cleanup upon join of merging node,
* concurrently running merge and migration operations can cause
* inconsistency over shared index objects between record stores.
*/
@Override
protected void onStoreCollection(RecordStore recordStore) {
assertRunningOnPartitionThread();
DefaultRecordStore defaultRecordStore = (DefaultRecordStore) recordStore;
defaultRecordStore.getMapDataStore().reset();
defaultRecordStore.getIndexingObserver().onDestroy(false, true);
}
use of com.hazelcast.map.impl.recordstore.DefaultRecordStore in project hazelcast by hazelcast.
the class MapLoaderFuturesTest method loadingFutureCount.
private static int loadingFutureCount(String mapName, HazelcastInstance node) {
int count = 0;
NodeEngineImpl nodeEngine = getNode(node).getNodeEngine();
MapService mapService = nodeEngine.getService(MapService.SERVICE_NAME);
MapServiceContext mapServiceContext = mapService.getMapServiceContext();
int partitionCount = nodeEngine.getPartitionService().getPartitionCount();
for (int i = 0; i < partitionCount; i++) {
RecordStore recordStore = mapServiceContext.getExistingRecordStore(i, mapName);
if (recordStore != null) {
count += ((DefaultRecordStore) recordStore).getLoadingFutures().size();
}
}
return count;
}
Aggregations