use of org.apache.ignite.internal.processors.metastorage.DistributedMetastorageLifecycleListener in project ignite by apache.
the class IgniteExchangeLatchManagerDiscoHistoryTest method testProperException.
/**
* @throws Exception If failed.
*/
@Test
@WithSystemProperty(key = IGNITE_DISCOVERY_HISTORY_SIZE, value = DISCO_HISTORY_SIZE)
public void testProperException() throws Exception {
final IgniteEx crd = startGrid(0);
final CountDownLatch exchangeLatch = new CountDownLatch(1);
final CountDownLatch startSrvsLatch = new CountDownLatch(1);
final AtomicReference<Exception> err = new AtomicReference<>();
// Lifecycle bean that is used to register PartitionsExchangeAware listener.
lifecycleBean = new LifecycleBean() {
/**
* Ignite instance.
*/
@IgniteInstanceResource
IgniteEx ignite;
/**
* {@inheritDoc}
*/
@Override
public void onLifecycleEvent(LifecycleEventType evt) throws IgniteException {
if (evt == LifecycleEventType.BEFORE_NODE_START) {
// The goal is registering PartitionsExchangeAware listener before the discovery manager is started.
ignite.context().internalSubscriptionProcessor().registerDistributedMetastorageListener(new DistributedMetastorageLifecycleListener() {
@Override
public void onReadyForRead(ReadableDistributedMetaStorage metastorage) {
ignite.context().cache().context().exchange().registerExchangeAwareComponent(new PartitionsExchangeAware() {
/**
* {@inheritDoc}
*/
@Override
public void onInitBeforeTopologyLock(GridDhtPartitionsExchangeFuture fut) {
try {
// Let's start nodes.
startSrvsLatch.countDown();
// Blocks the initial exchange and waits for other nodes.
exchangeLatch.await(DEFAULT_TIMEOUT, TimeUnit.MILLISECONDS);
} catch (Exception e) {
err.compareAndSet(null, e);
}
}
});
}
});
}
}
};
// Start server node with short topology history.
victim = true;
GridTestUtils.runAsync(() -> startGrid(1));
// Waits for the initial exchange.
startSrvsLatch.await(DEFAULT_TIMEOUT, TimeUnit.MILLISECONDS);
victim = false;
lifecycleBean = null;
List<IgniteInternalFuture> srvFuts = new ArrayList<>(TOPOLOGY_HISTORY_SIZE);
try {
// Major topology version that is corresponding to the start of the node with short topology history.
final long topVer = 2;
// Starting server nodes to exhaust the topology history.
for (int i = 2; i < 3 * TOPOLOGY_HISTORY_SIZE && !disco.isEmptyTopologyHistory(topVer); ++i) {
final int currNodeIdx = i;
final int joinedNodesCnt = disco.totalJoinedNodes();
srvFuts.add(GridTestUtils.runAsync(() -> startGrid(currNodeIdx)));
assertTrue("Failed to wait for a new server node [joinedNodesCnt=" + joinedNodesCnt + "]", GridTestUtils.waitForCondition(() -> disco.totalJoinedNodes() >= (joinedNodesCnt + 1), DEFAULT_TIMEOUT));
}
assertTrue("Disco cache history is not empty for the topology [majorTopVer=" + topVer + ']', disco.isEmptyTopologyHistory(topVer));
// Let's continue the ongoing exchange.
exchangeLatch.countDown();
boolean failureHnd = GridTestUtils.waitForCondition(() -> cpFailureCtx.get() != null, DEFAULT_TIMEOUT);
assertNull("Unexpected exception (probably, the topology history still exists [err=" + err + ']', err.get());
assertTrue("Failure handler was not triggered.", failureHnd);
// Check that IgniteException was thrown instead of NullPointerException.
assertTrue("IgniteException must be thrown.", X.hasCause(cpFailureCtx.get().error(), IgniteException.class));
// Check that message contains a hint to fix the issue.
GridTestUtils.assertContains(log, cpFailureCtx.get().error().getMessage(), "Consider increasing IGNITE_DISCOVERY_HISTORY_SIZE property. Current value is " + DISCO_HISTORY_SIZE);
} finally {
IgnitionEx.stop(getTestIgniteInstanceName(1), true, ShutdownPolicy.IMMEDIATE, true);
srvFuts.forEach(f -> {
try {
f.get(DEFAULT_TIMEOUT);
} catch (IgniteCheckedException e) {
err.compareAndSet(null, e);
}
});
}
assertNull("Unexpected exception [err=" + err.get() + ']', err.get());
}
use of org.apache.ignite.internal.processors.metastorage.DistributedMetastorageLifecycleListener in project ignite by apache.
the class PerformanceStatisticsProcessor method start.
/**
* {@inheritDoc}
*/
@Override
public void start() throws IgniteCheckedException {
super.start();
ctx.internalSubscriptionProcessor().registerDistributedMetastorageListener(new DistributedMetastorageLifecycleListener() {
@Override
public void onReadyForRead(ReadableDistributedMetaStorage metastorage) {
metastorage.listen(PERF_STAT_KEY::equals, (key, oldVal, newVal) -> {
// Skip history on local join.
if (!ctx.discovery().localJoinFuture().isDone())
return;
onMetastorageUpdate((boolean) newVal);
});
}
@Override
public void onReadyForWrite(DistributedMetaStorage metastorage) {
PerformanceStatisticsProcessor.this.metastorage = metastorage;
try {
Boolean performanceStatsEnabled = metastorage.read(PERF_STAT_KEY);
if (performanceStatsEnabled == null)
return;
onMetastorageUpdate(performanceStatsEnabled);
} catch (IgniteCheckedException e) {
throw new IgniteException(e);
}
}
});
rotateProc = new DistributedProcess<>(ctx, PERFORMANCE_STATISTICS_ROTATE, req -> ctx.closure().callLocalSafe(() -> {
rotateWriter();
return null;
}), (id, res, err) -> {
});
}
use of org.apache.ignite.internal.processors.metastorage.DistributedMetastorageLifecycleListener in project ignite by apache.
the class DistributedConfigurationProcessor method start.
/**
* {@inheritDoc}
*/
@Override
public void start() throws IgniteCheckedException {
GridInternalSubscriptionProcessor isp = ctx.internalSubscriptionProcessor();
isp.registerDistributedMetastorageListener(new DistributedMetastorageLifecycleListener() {
@Override
public void onReadyForRead(ReadableDistributedMetaStorage metastorage) {
distributedMetastorage = ctx.distributedMetastorage();
// Listener for handling of cluster wide change of specific properties. Do local update.
distributedMetastorage.listen((key) -> key.startsWith(DIST_CONF_PREFIX), (String key, Serializable oldVal, Serializable newVal) -> {
DistributedChangeableProperty prop = props.get(toPropertyKey(key));
if (prop != null)
prop.localUpdate(newVal);
});
// Switch to actualize action and actualize already registered properties.
switchCurrentActionTo(ACTUALIZE);
// Register and actualize properties waited for this service.
isp.getDistributedConfigurationListeners().forEach(listener -> listener.onReadyToRegister(DistributedConfigurationProcessor.this));
}
@Override
public void onReadyForWrite(DistributedMetaStorage metastorage) {
// Switch to cluster wide update action and do it on already registered properties.
switchCurrentActionTo(CLUSTER_WIDE_UPDATE);
isp.getDistributedConfigurationListeners().forEach(DistributedConfigurationLifecycleListener::onReadyToWrite);
}
});
}
use of org.apache.ignite.internal.processors.metastorage.DistributedMetastorageLifecycleListener in project ignite by apache.
the class GridMetricManager method start.
/**
* {@inheritDoc}
*/
@Override
public void start() throws IgniteCheckedException {
for (MetricExporterSpi spi : getSpis()) spi.setMetricRegistry(this);
startSpi();
// In case standalone kernal start.
if (ctx.internalSubscriptionProcessor() == null)
return;
ctx.internalSubscriptionProcessor().registerDistributedMetastorageListener(new DistributedMetastorageLifecycleListener() {
/**
* {@inheritDoc}
*/
@Override
public void onReadyForRead(ReadableDistributedMetaStorage metastorage) {
roMetastorage = metastorage;
try {
metastorage.iterate(HITRATE_CFG_PREFIX, (name, val) -> onHitRateConfigChanged(name.substring(HITRATE_CFG_PREFIX.length() + 1), (Long) val));
metastorage.iterate(HISTOGRAM_CFG_PREFIX, (name, val) -> onHistogramConfigChanged(name.substring(HISTOGRAM_CFG_PREFIX.length() + 1), (long[]) val));
} catch (IgniteCheckedException e) {
throw new IgniteException(e);
}
metastorage.listen(n -> n.startsWith(HITRATE_CFG_PREFIX), (name, oldVal, newVal) -> onHitRateConfigChanged(name.substring(HITRATE_CFG_PREFIX.length() + 1), (Long) newVal));
metastorage.listen(n -> n.startsWith(HISTOGRAM_CFG_PREFIX), (name, oldVal, newVal) -> onHistogramConfigChanged(name.substring(HISTOGRAM_CFG_PREFIX.length() + 1), (long[]) newVal));
}
/**
* {@inheritDoc}
*/
@Override
public void onReadyForWrite(DistributedMetaStorage metastorage) {
GridMetricManager.this.metastorage = metastorage;
}
});
}
use of org.apache.ignite.internal.processors.metastorage.DistributedMetastorageLifecycleListener in project ignite by apache.
the class BaselineAutoAdjustTest method testExchangeMerge.
/**
* Tests that merging exchanges properly triggers baseline changing.
*
* @throws Exception If failed.
*/
@Test
public void testExchangeMerge() throws Exception {
// Latch that waits for PME (intTopVer == 3.0)
CountDownLatch exchangeWorkerLatch = new CountDownLatch(1);
// Lyficycle bean is needed in order to register EVT_NODE_JOIN lister that is called
// right after GridCachePartitionExchangeManager and before GridClusterStateProcessor.
lifecycleBean = new LifecycleBean() {
/**
* Ignite instance.
*/
@IgniteInstanceResource
IgniteEx ignite;
/**
* {@inheritDoc}
*/
@Override
public void onLifecycleEvent(LifecycleEventType evt) throws IgniteException {
if (evt == LifecycleEventType.BEFORE_NODE_START) {
ignite.context().internalSubscriptionProcessor().registerDistributedMetastorageListener(new DistributedMetastorageLifecycleListener() {
@Override
public void onReadyForRead(ReadableDistributedMetaStorage metastorage) {
ignite.context().event().addDiscoveryEventListener((evt, disco) -> {
if (evt.type() == EVT_NODE_JOINED && evt.topologyVersion() == 3) {
try {
// Let's wait for exchange worker starts PME
// that related to the first node joined the cluster.
exchangeWorkerLatch.await(getTestTimeout(), MILLISECONDS);
} catch (InterruptedException e) {
throw new IgniteException("exchangeWorkerLatch has been interrupted.", e);
}
}
}, EVT_NODE_JOINED);
}
});
}
}
};
// Start the coordinator node.
IgniteEx crd = startGrid(0);
// This bean is only required on the coordinator node.
lifecycleBean = null;
// Latch indicates that EVT_NODE_JOINED (topVer == 4.0) was processed by all listeners.
CountDownLatch nodeJoinLatch = new CountDownLatch(1);
// This listener is the last one in the queue of handlers.
crd.context().event().addDiscoveryEventListener((evt, disco) -> {
if (evt.type() == EVT_NODE_JOINED && evt.topologyVersion() == 4)
nodeJoinLatch.countDown();
}, EVT_NODE_JOINED);
IgniteEx nonCrd = startGrid(1);
crd.cluster().state(ACTIVE);
crd.cluster().baselineAutoAdjustEnabled(true);
crd.cluster().baselineAutoAdjustTimeout(autoAdjustTimeout);
awaitPartitionMapExchange(false, true, null);
TestRecordingCommunicationSpi spi1 = TestRecordingCommunicationSpi.spi(nonCrd);
spi1.blockMessages((node, msg) -> msg instanceof GridDhtPartitionsSingleMessage);
// Target major topology version (4 nodes)
long targetTopVer = 4;
// Let's block exchange process in order to merge two following exchanges (3.0 && 4.0).
crd.context().cache().context().exchange().mergeExchangesTestWaitVersion(new AffinityTopologyVersion(targetTopVer, 0), null);
AtomicInteger cnt = new AtomicInteger(G.allGrids().size());
runMultiThreadedAsync(new Callable<Void>() {
@Override
public Void call() throws Exception {
startGrid(cnt.getAndIncrement());
return null;
}
}, 2, "async-grid-starter");
// Make sure that PME is in progress.
assertTrue("Failed to wait for PME [topVer=3]", spi1.waitForBlocked(1, getTestTimeout()));
assertTrue("Failed to wait for the first started exchange.", waitForCondition(() -> {
GridDhtPartitionsExchangeFuture fut = crd.context().cache().context().exchange().lastTopologyFuture();
return fut.initialVersion().topologyVersion() == 3;
}, getTestTimeout()));
// This guarantees that BaselineAutoAdjustData listens to real GridDhtPartitionsExchangeFuture
// instead of readyAffinityFuture.
exchangeWorkerLatch.countDown();
assertTrue("Failed to wait for processing node join event [topVer=3]", nodeJoinLatch.await(getTestTimeout(), MILLISECONDS));
// Unblock PME
spi1.stopBlock();
assertTrue("Failed to wait for changing baseline in " + autoAdjustTimeout * 2 + " ms.", waitForCondition(() -> crd.cluster().currentBaselineTopology().size() == targetTopVer, autoAdjustTimeout * 2));
}
Aggregations