use of org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion in project ignite by apache.
the class IgniteH2Indexing method serverTopologyChanged.
/**
* @param readyVer Ready topology version.
*
* @return {@code true} If pending distributed exchange exists because server topology is changed.
*/
public boolean serverTopologyChanged(AffinityTopologyVersion readyVer) {
GridDhtPartitionsExchangeFuture fut = ctx.cache().context().exchange().lastTopologyFuture();
if (fut.isDone())
return false;
AffinityTopologyVersion initVer = fut.initialVersion();
return initVer.compareTo(readyVer) > 0 && !CU.clientNode(fut.firstEvent().node());
}
use of org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion in project ignite by apache.
the class WalRecoveryTxLogicalRecordsTest method testHistoricalRebalanceIterator.
/**
* @throws Exception if failed.
*/
public void testHistoricalRebalanceIterator() throws Exception {
System.setProperty(IgniteSystemProperties.IGNITE_PDS_WAL_REBALANCE_THRESHOLD, "0");
extraCcfg = new CacheConfiguration(CACHE_NAME + "2");
extraCcfg.setAffinity(new RendezvousAffinityFunction(false, PARTS));
Ignite ignite = startGrid();
try {
ignite.cluster().active(true);
GridCacheDatabaseSharedManager dbMgr = (GridCacheDatabaseSharedManager) ((IgniteEx) ignite).context().cache().context().database();
dbMgr.waitForCheckpoint("test");
// This number depends on wal history size.
int entries = 25;
IgniteCache<Integer, Integer> cache = ignite.cache(CACHE_NAME);
IgniteCache<Integer, Integer> cache2 = ignite.cache(CACHE_NAME + "2");
for (int i = 0; i < entries; i++) {
// Put to partition 0.
cache.put(i * PARTS, i * PARTS);
// Put to partition 1.
cache.put(i * PARTS + 1, i * PARTS + 1);
// Put to another cache.
cache2.put(i, i);
dbMgr.waitForCheckpoint("test");
}
for (int i = 0; i < entries; i++) {
assertEquals((Integer) (i * PARTS), cache.get(i * PARTS));
assertEquals((Integer) (i * PARTS + 1), cache.get(i * PARTS + 1));
assertEquals((Integer) (i), cache2.get(i));
}
CacheGroupContext grp = ((IgniteEx) ignite).context().cache().cacheGroup(CU.cacheId(CACHE_NAME));
IgniteCacheOffheapManager offh = grp.offheap();
AffinityTopologyVersion topVer = grp.affinity().lastVersion();
IgniteDhtDemandedPartitionsMap map;
for (int i = 0; i < entries; i++) {
map = new IgniteDhtDemandedPartitionsMap();
map.addHistorical(0, i, Long.MAX_VALUE, entries);
try (IgniteRebalanceIterator it = offh.rebalanceIterator(map, topVer)) {
assertNotNull(it);
assertTrue("Not historical for iteration: " + i, it.historical(0));
for (int j = i; j < entries; j++) {
assertTrue("i=" + i + ", j=" + j, it.hasNextX());
CacheDataRow row = it.next();
assertEquals(j * PARTS, (int) row.key().value(grp.cacheObjectContext(), false));
assertEquals(j * PARTS, (int) row.value().value(grp.cacheObjectContext(), false));
}
assertFalse(it.hasNext());
}
map = new IgniteDhtDemandedPartitionsMap();
map.addHistorical(1, i, Long.MAX_VALUE, entries);
try (IgniteRebalanceIterator it = offh.rebalanceIterator(map, topVer)) {
assertNotNull(it);
assertTrue("Not historical for iteration: " + i, it.historical(1));
for (int j = i; j < entries; j++) {
assertTrue(it.hasNextX());
CacheDataRow row = it.next();
assertEquals(j * PARTS + 1, (int) row.key().value(grp.cacheObjectContext(), false));
assertEquals(j * PARTS + 1, (int) row.value().value(grp.cacheObjectContext(), false));
}
assertFalse(it.hasNext());
}
}
stopAllGrids();
// Check that iterator is valid after restart.
ignite = startGrid();
ignite.cluster().active(true);
grp = ((IgniteEx) ignite).context().cache().cacheGroup(CU.cacheId(CACHE_NAME));
offh = grp.offheap();
topVer = grp.affinity().lastVersion();
for (int i = 0; i < entries; i++) {
long start = System.currentTimeMillis();
map = new IgniteDhtDemandedPartitionsMap();
map.addHistorical(0, i, Long.MAX_VALUE, entries);
try (IgniteRebalanceIterator it = offh.rebalanceIterator(map, topVer)) {
long end = System.currentTimeMillis();
info("Time to get iterator: " + (end - start));
assertTrue("Not historical for iteration: " + i, it.historical(0));
assertNotNull(it);
start = System.currentTimeMillis();
for (int j = i; j < entries; j++) {
assertTrue("i=" + i + ", j=" + j, it.hasNextX());
CacheDataRow row = it.next();
assertEquals(j * PARTS, (int) row.key().value(grp.cacheObjectContext(), false));
assertEquals(j * PARTS, (int) row.value().value(grp.cacheObjectContext(), false));
}
end = System.currentTimeMillis();
info("Time to iterate: " + (end - start));
assertFalse(it.hasNext());
}
map = new IgniteDhtDemandedPartitionsMap();
map.addHistorical(1, i, Long.MAX_VALUE, entries);
try (IgniteRebalanceIterator it = offh.rebalanceIterator(map, topVer)) {
assertNotNull(it);
assertTrue("Not historical for iteration: " + i, it.historical(1));
for (int j = i; j < entries; j++) {
assertTrue(it.hasNextX());
CacheDataRow row = it.next();
assertEquals(j * PARTS + 1, (int) row.key().value(grp.cacheObjectContext(), false));
assertEquals(j * PARTS + 1, (int) row.value().value(grp.cacheObjectContext(), false));
}
assertFalse(it.hasNext());
}
}
} finally {
stopAllGrids();
System.clearProperty(IgniteSystemProperties.IGNITE_PDS_WAL_REBALANCE_THRESHOLD);
}
}
use of org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion in project ignite by apache.
the class CacheContinuousQueryFailoverAbstractSelfTest method waitRebalanceFinished.
/**
* @param ignite Ignite.
* @param topVer Major topology version.
* @param minorVer Minor topology version.
* @throws Exception If failed.
*/
private void waitRebalanceFinished(Ignite ignite, long topVer, int minorVer) throws Exception {
final AffinityTopologyVersion topVer0 = new AffinityTopologyVersion(topVer, minorVer);
final GridDhtPartitionTopology top = ((IgniteKernal) ignite).context().cache().context().cacheContext(CU.cacheId(DEFAULT_CACHE_NAME)).topology();
GridTestUtils.waitForCondition(new GridAbsPredicate() {
@Override
public boolean apply() {
return top.rebalanceFinished(topVer0);
}
}, 5000);
assertTrue(top.rebalanceFinished(topVer0));
}
use of org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion in project ignite by apache.
the class GridContinuousProcessor method start.
/**
* {@inheritDoc}
*/
@Override
public void start() throws IgniteCheckedException {
if (ctx.config().isDaemon())
return;
retryDelay = ctx.config().getNetworkSendRetryDelay();
retryCnt = ctx.config().getNetworkSendRetryCount();
marsh = ctx.config().getMarshaller();
ctx.event().addLocalEventListener(new DiscoveryListener(), EVT_NODE_LEFT, EVT_NODE_FAILED);
ctx.event().addLocalEventListener(new GridLocalEventListener() {
@Override
public void onEvent(Event evt) {
cancelFutures(new IgniteCheckedException("Topology segmented"));
}
}, EVT_NODE_SEGMENTED);
ctx.discovery().setCustomEventListener(StartRoutineDiscoveryMessage.class, new CustomEventListener<StartRoutineDiscoveryMessage>() {
@Override
public void onCustomEvent(AffinityTopologyVersion topVer, ClusterNode snd, StartRoutineDiscoveryMessage msg) {
if (ctx.isStopping())
return;
processStartRequest(snd, msg);
}
});
ctx.discovery().setCustomEventListener(StartRoutineAckDiscoveryMessage.class, new CustomEventListener<StartRoutineAckDiscoveryMessage>() {
@Override
public void onCustomEvent(AffinityTopologyVersion topVer, ClusterNode snd, StartRoutineAckDiscoveryMessage msg) {
if (ctx.isStopping())
return;
processStartAckRequest(topVer, msg);
}
});
ctx.discovery().setCustomEventListener(StopRoutineDiscoveryMessage.class, new CustomEventListener<StopRoutineDiscoveryMessage>() {
@Override
public void onCustomEvent(AffinityTopologyVersion topVer, ClusterNode snd, StopRoutineDiscoveryMessage msg) {
if (ctx.isStopping())
return;
processStopRequest(snd, msg);
}
});
ctx.discovery().setCustomEventListener(StopRoutineAckDiscoveryMessage.class, new CustomEventListener<StopRoutineAckDiscoveryMessage>() {
@Override
public void onCustomEvent(AffinityTopologyVersion topVer, ClusterNode snd, StopRoutineAckDiscoveryMessage msg) {
if (ctx.isStopping())
return;
processStopAckRequest(msg);
}
});
ctx.io().addMessageListener(TOPIC_CONTINUOUS, new GridMessageListener() {
@Override
public void onMessage(UUID nodeId, Object obj, byte plc) {
GridContinuousMessage msg = (GridContinuousMessage) obj;
if (msg.data() == null && msg.dataBytes() != null) {
try {
msg.data(U.unmarshal(marsh, msg.dataBytes(), U.resolveClassLoader(ctx.config())));
} catch (IgniteCheckedException e) {
U.error(log, "Failed to process message (ignoring): " + msg, e);
return;
}
}
switch(msg.type()) {
case MSG_EVT_NOTIFICATION:
processNotification(nodeId, msg);
break;
case MSG_EVT_ACK:
processMessageAck(msg);
break;
default:
assert false : "Unexpected message received: " + msg.type();
}
}
});
ctx.cacheObjects().onContinuousProcessorStarted(ctx);
ctx.service().onContinuousProcessorStarted(ctx);
if (log.isDebugEnabled())
log.debug("Continuous processor started.");
}
use of org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion in project ignite by apache.
the class AbstractAffinityFunctionSelfTest method checkRandomReassignment.
/**
* @param backups Backups.
*/
protected void checkRandomReassignment(int backups) {
AffinityFunction aff = affinityFunction();
Random rnd = new Random();
int maxNodes = 50;
List<ClusterNode> nodes = new ArrayList<>(maxNodes);
List<List<ClusterNode>> prev = null;
int state = 0;
int i = 0;
while (true) {
boolean add;
if (nodes.size() < 2) {
// Returned back to one node?
if (state == 1)
return;
add = true;
} else if (nodes.size() == maxNodes) {
if (state == 0)
state = 1;
add = false;
} else {
// Nodes size in [2, maxNodes - 1].
if (state == 0)
// 66% to add, 33% to remove.
add = rnd.nextInt(3) != 0;
else
// 33% to add, 66% to remove.
add = rnd.nextInt(3) == 0;
}
DiscoveryEvent discoEvt;
if (add) {
ClusterNode addedNode = new GridTestNode(UUID.randomUUID());
nodes.add(addedNode);
discoEvt = new DiscoveryEvent(addedNode, "", EventType.EVT_NODE_JOINED, addedNode);
} else {
ClusterNode rmvNode = nodes.remove(rnd.nextInt(nodes.size()));
discoEvt = new DiscoveryEvent(rmvNode, "", EventType.EVT_NODE_LEFT, rmvNode);
}
info("======================================");
info("Assigning partitions [iter=" + i + ", discoEvt=" + discoEvt + ", nodesSize=" + nodes.size() + ']');
info("======================================");
List<List<ClusterNode>> assignment = aff.assignPartitions(new GridAffinityFunctionContextImpl(nodes, prev, discoEvt, new AffinityTopologyVersion(i), backups));
verifyAssignment(assignment, backups, aff.partitions(), nodes.size());
prev = assignment;
i++;
}
}
Aggregations