use of org.infinispan.factories.annotations.Start in project infinispan by infinispan.
the class LocalPublisherManagerImpl method start.
@Start
public void start() {
// We need to unwrap the cache as a local stream should only deal with BOXED values
// Any mappings will be provided by the originator node in their intermediate operation stack in the operation itself.
this.remoteCache = AbstractDelegatingCache.unwrapCache(cacheComponentRef.running()).getAdvancedCache();
// The iteration caches should only deal with local entries.
this.cache = remoteCache.withFlags(Flag.CACHE_MODE_LOCAL);
ClusteringConfiguration clusteringConfiguration = cache.getCacheConfiguration().clustering();
this.maxSegment = clusteringConfiguration.hash().numSegments();
updateStrategy(configuration.persistence().usingSegmentedStore());
persistenceManager.addStoreListener(storeChangeListener);
}
use of org.infinispan.factories.annotations.Start in project infinispan by infinispan.
the class DistributionManagerImpl method start.
// Start before RpcManagerImpl
@Start(priority = 8)
@SuppressWarnings("unused")
void start() throws Exception {
if (log.isTraceEnabled())
log.tracef("starting distribution manager on %s", getAddress());
cacheMode = configuration.clustering().cacheMode();
// We need an extended topology for preload, before the start of StateTransferManagerImpl
Address localAddress = transport == null ? LocalModeAddress.INSTANCE : transport.getAddress();
extendedTopology = makeSingletonTopology(cacheMode, keyPartitioner, configuration.clustering().hash().numSegments(), localAddress);
}
use of org.infinispan.factories.annotations.Start in project infinispan by infinispan.
the class CacheNotifierImpl method start.
@Start(priority = 9)
public void start() {
if (!config.simpleCache()) {
clusterExecutor = SecurityActions.getClusterExecutor(cache.wired());
}
Collection<FilterIndexingServiceProvider> providers = ServiceFinder.load(FilterIndexingServiceProvider.class);
filterIndexingServiceProviders = new ArrayList<>(providers.size());
for (FilterIndexingServiceProvider provider : providers) {
componentRegistry.wireDependencies(provider, false);
provider.start();
filterIndexingServiceProviders.add(provider);
}
}
use of org.infinispan.factories.annotations.Start in project infinispan by infinispan.
the class MetricsCollector method start.
@Start
protected void start() {
baseRegistry = new PrometheusMeterRegistry(PrometheusConfig.DEFAULT);
baseRegistry.config().meterFilter(new BaseFilter());
new BaseAdditionalMetrics().bindTo(baseRegistry);
vendorRegistry = new PrometheusMeterRegistry(PrometheusConfig.DEFAULT);
vendorRegistry.config().meterFilter(new VendorFilter());
new VendorAdditionalMetrics().bindTo(vendorRegistry);
Transport transport = transportRef.running();
String nodeName = transport != null ? transport.getAddress().toString() : globalConfig.transport().nodeName();
if (nodeName == null) {
// TODO [anistor] Maybe we should just ensure a unique node name was set in all tests and also in real life usage, even for local cache managers
nodeName = generateRandomName();
// throw new CacheConfigurationException("Node name must always be specified in configuration if metrics are enabled.");
}
nodeTag = Tag.of(NODE_TAG_NAME, nodeName);
if (globalConfig.metrics().namesAsTags()) {
cacheManagerTag = Tag.of(CACHE_MANAGER_TAG_NAME, globalConfig.cacheManagerName());
}
}
use of org.infinispan.factories.annotations.Start in project infinispan by infinispan.
the class LocalCacheStatus method doHandleTopologyUpdate.
/**
* Update the cache topology in the LocalCacheStatus and pass it to the CacheTopologyHandler.
*
* @return {@code true} if the topology was applied, {@code false} if it was ignored.
*/
private CompletionStage<Boolean> doHandleTopologyUpdate(String cacheName, CacheTopology cacheTopology, AvailabilityMode availabilityMode, int viewId, Address sender, LocalCacheStatus cacheStatus) {
CacheTopology existingTopology;
synchronized (cacheStatus) {
if (cacheTopology == null) {
// Still, return true because we don't want to re-send the join request.
return CompletableFutures.completedTrue();
}
// Register all persistent UUIDs locally
registerPersistentUUID(cacheTopology);
existingTopology = cacheStatus.getCurrentTopology();
if (existingTopology != null && cacheTopology.getTopologyId() <= existingTopology.getTopologyId()) {
log.debugf("Ignoring late consistent hash update for cache %s, current topology is %s: %s", cacheName, existingTopology.getTopologyId(), cacheTopology);
return CompletableFutures.completedFalse();
}
if (!updateCacheTopology(cacheName, cacheTopology, viewId, sender, cacheStatus))
return CompletableFutures.completedFalse();
}
CacheTopologyHandler handler = cacheStatus.getHandler();
ConsistentHash currentCH = cacheTopology.getCurrentCH();
ConsistentHash pendingCH = cacheTopology.getPendingCH();
ConsistentHash unionCH;
if (pendingCH != null) {
ConsistentHashFactory chf = cacheStatus.getJoinInfo().getConsistentHashFactory();
switch(cacheTopology.getPhase()) {
case READ_NEW_WRITE_ALL:
// When removing members from topology, we have to make sure that the unionCH has
// owners from pendingCH (which is used as the readCH in this phase) before
// owners from currentCH, as primary owners must match in readCH and writeCH.
unionCH = chf.union(pendingCH, currentCH);
break;
default:
unionCH = chf.union(currentCH, pendingCH);
}
} else {
unionCH = null;
}
List<PersistentUUID> persistentUUIDs = persistentUUIDManager.mapAddresses(cacheTopology.getActualMembers());
CacheTopology unionTopology = new CacheTopology(cacheTopology.getTopologyId(), cacheTopology.getRebalanceId(), currentCH, pendingCH, unionCH, cacheTopology.getPhase(), cacheTopology.getActualMembers(), persistentUUIDs);
boolean updateAvailabilityModeFirst = availabilityMode != AvailabilityMode.AVAILABLE;
CompletionStage<Void> stage = resetLocalTopologyBeforeRebalance(cacheName, cacheTopology, existingTopology, handler);
stage = stage.thenCompose(ignored -> {
unionTopology.logRoutingTableInformation(cacheName);
if (updateAvailabilityModeFirst && availabilityMode != null) {
return cacheStatus.getPartitionHandlingManager().setAvailabilityMode(availabilityMode);
}
return CompletableFutures.completedNull();
});
stage = stage.thenCompose(ignored -> {
boolean startConflictResolution = cacheTopology.getPhase() == CacheTopology.Phase.CONFLICT_RESOLUTION;
if (!startConflictResolution && unionCH != null && (existingTopology == null || existingTopology.getRebalanceId() != cacheTopology.getRebalanceId())) {
// This CH_UPDATE command was sent after a REBALANCE_START command, but arrived first.
// We will start the rebalance now and ignore the REBALANCE_START command when it arrives.
log.tracef("This topology update has a pending CH, starting the rebalance now");
return handler.rebalance(unionTopology);
} else {
return handler.updateConsistentHash(unionTopology);
}
});
if (!updateAvailabilityModeFirst) {
stage = stage.thenCompose(ignored -> cacheStatus.getPartitionHandlingManager().setAvailabilityMode(availabilityMode));
}
return stage.thenApply(ignored -> true);
}
Aggregations