use of com.hazelcast.map.impl.querycache.QueryCacheContext in project hazelcast by hazelcast.
the class AccumulatorSweeper method flushAllAccumulators.
public static void flushAllAccumulators(PublisherContext publisherContext) {
QueryCacheContext context = publisherContext.getContext();
EventPublisherAccumulatorProcessor processor = new EventPublisherAccumulatorProcessor(context.getQueryCacheEventService());
PublisherAccumulatorHandler handler = new PublisherAccumulatorHandler(context, processor);
MapPublisherRegistry mapPublisherRegistry = publisherContext.getMapPublisherRegistry();
Map<String, PublisherRegistry> allPublisherRegistryMap = mapPublisherRegistry.getAll();
for (PublisherRegistry publisherRegistry : allPublisherRegistryMap.values()) {
Map<String, PartitionAccumulatorRegistry> accumulatorRegistryMap = publisherRegistry.getAll();
for (PartitionAccumulatorRegistry accumulatorRegistry : accumulatorRegistryMap.values()) {
Map<Integer, Accumulator> accumulatorMap = accumulatorRegistry.getAll();
for (Map.Entry<Integer, Accumulator> entry : accumulatorMap.entrySet()) {
Integer partitionId = entry.getKey();
Accumulator accumulator = entry.getValue();
processor.setInfo(accumulator.getInfo());
// give 0 to delay-time in order to fetch all events in the accumulator
accumulator.poll(handler, 0, TimeUnit.SECONDS);
// send end event
QueryCacheEventData eventData = createEndOfSequenceEvent(partitionId);
processor.process(eventData);
}
}
}
}
use of com.hazelcast.map.impl.querycache.QueryCacheContext in project hazelcast by hazelcast.
the class ClientQueryCacheEventLostListenerTest method setTestSequencer.
private void setTestSequencer(IMap map, int eventCount) {
ClientMapProxy proxy = (ClientMapProxy) map;
QueryCacheContext queryCacheContext = proxy.getQueryContext();
queryCacheContext.setSubscriberContext(new TestClientSubscriberContext(queryCacheContext, eventCount, true));
}
use of com.hazelcast.map.impl.querycache.QueryCacheContext in project hazelcast by hazelcast.
the class PublisherCreateOperation method getPartitionIdsOfAccumulators.
private Collection<Integer> getPartitionIdsOfAccumulators() {
String mapName = info.getMapName();
String cacheName = info.getCacheName();
QueryCacheContext context = getContext();
return QueryCacheUtil.getAccumulators(context, mapName, cacheName).keySet();
}
use of com.hazelcast.map.impl.querycache.QueryCacheContext in project hazelcast by hazelcast.
the class PostJoinMapOperation method createQueryCaches.
private void createQueryCaches() {
MapService mapService = getService();
MapServiceContext mapServiceContext = mapService.getMapServiceContext();
QueryCacheContext queryCacheContext = mapServiceContext.getQueryCacheContext();
PublisherContext publisherContext = queryCacheContext.getPublisherContext();
MapPublisherRegistry mapPublisherRegistry = publisherContext.getMapPublisherRegistry();
for (AccumulatorInfo info : infoList) {
addAccumulatorInfo(queryCacheContext, info);
PublisherRegistry publisherRegistry = mapPublisherRegistry.getOrCreate(info.getMapName());
publisherRegistry.getOrCreate(info.getCacheName());
// marker listener.
mapServiceContext.addLocalListenerAdapter(new ListenerAdapter<IMapEvent>() {
@Override
public void onEvent(IMapEvent event) {
}
}, info.getMapName());
}
}
use of com.hazelcast.map.impl.querycache.QueryCacheContext in project hazelcast by hazelcast.
the class MapMigrationAwareService method commitMigration.
@Override
public void commitMigration(PartitionMigrationEvent event) {
migrateIndex(event);
if (SOURCE == event.getMigrationEndpoint()) {
clearMapsHavingLesserBackupCountThan(event.getPartitionId(), event.getNewReplicaIndex());
getMetaDataGenerator().removeUuidAndSequence(event.getPartitionId());
} else if (DESTINATION == event.getMigrationEndpoint()) {
if (event.getNewReplicaIndex() != 0) {
getMetaDataGenerator().regenerateUuid(event.getPartitionId());
}
}
PartitionContainer partitionContainer = mapServiceContext.getPartitionContainer(event.getPartitionId());
for (RecordStore recordStore : partitionContainer.getAllRecordStores()) {
// in case the record store has been created without loading during migration trigger again
// if loading has been already started this call will do nothing
recordStore.startLoading();
}
mapServiceContext.reloadOwnedPartitions();
QueryCacheContext queryCacheContext = mapServiceContext.getQueryCacheContext();
PublisherContext publisherContext = queryCacheContext.getPublisherContext();
if (event.getMigrationEndpoint() == MigrationEndpoint.SOURCE) {
int partitionId = event.getPartitionId();
flushAccumulator(publisherContext, partitionId);
removeAccumulator(publisherContext, partitionId);
}
}
Aggregations