use of org.apache.ignite.cache.affinity.rendezvous.ClusterNodeAttributeAffinityBackupFilter in project ignite by apache.
the class PlatformConfigurationUtils method writeAffinityBackupFilter.
/**
* Writes affinity backup filter.
*
* @param out Stream.
* @param filter Filter.
*/
private static void writeAffinityBackupFilter(BinaryRawWriter out, Object filter) {
if (filter instanceof ClusterNodeAttributeAffinityBackupFilter) {
ClusterNodeAttributeAffinityBackupFilter backupFilter = (ClusterNodeAttributeAffinityBackupFilter) filter;
String[] attrs = backupFilter.getAttributeNames();
out.writeInt(attrs.length);
for (String attr : attrs) out.writeString(attr);
} else
out.writeInt(-1);
}
use of org.apache.ignite.cache.affinity.rendezvous.ClusterNodeAttributeAffinityBackupFilter in project ignite by apache.
the class PlatformConfigurationUtils method readAffinityFunction.
/**
* Reads the eviction policy.
*
* @param in Stream.
* @return Affinity function.
*/
public static PlatformAffinityFunction readAffinityFunction(BinaryRawReaderEx in) {
byte plcTyp = in.readByte();
if (plcTyp == 0)
return null;
int partitions = in.readInt();
boolean exclNeighbours = in.readBoolean();
byte overrideFlags = in.readByte();
Object userFunc = in.readObjectDetached();
AffinityFunction baseFunc = null;
switch(plcTyp) {
case 1:
{
throw new IllegalStateException("FairAffinityFunction");
}
case 2:
{
RendezvousAffinityFunction f = new RendezvousAffinityFunction();
f.setPartitions(partitions);
f.setExcludeNeighbors(exclNeighbours);
baseFunc = f;
int attrCnt = in.readInt();
if (attrCnt > 0) {
String[] attrs = new String[attrCnt];
for (int i = 0; i < attrCnt; i++) {
attrs[i] = in.readString();
}
f.setAffinityBackupFilter(new ClusterNodeAttributeAffinityBackupFilter(attrs));
}
break;
}
default:
assert plcTyp == 3;
}
return new PlatformAffinityFunction(userFunc, partitions, overrideFlags, baseFunc);
}
use of org.apache.ignite.cache.affinity.rendezvous.ClusterNodeAttributeAffinityBackupFilter in project ignite by apache.
the class BackupFilter method backupFilter.
@Test
void backupFilter() {
// tag::backup-filter[]
CacheConfiguration<Integer, String> cacheCfg = new CacheConfiguration<Integer, String>("myCache");
cacheCfg.setBackups(1);
RendezvousAffinityFunction af = new RendezvousAffinityFunction();
af.setAffinityBackupFilter(new ClusterNodeAttributeAffinityBackupFilter("AVAILABILITY_ZONE"));
cacheCfg.setAffinity(af);
// end::backup-filter[]
}
use of org.apache.ignite.cache.affinity.rendezvous.ClusterNodeAttributeAffinityBackupFilter in project ignite by apache.
the class IgniteWalRebalanceTest method testMultipleNodesFailHistoricalRebalance.
/**
* Tests that demander switches to full rebalance if the previously chosen two of three of suppliers
* for a group have failed to perform historical rebalance due to an unexpected error.
*
* @throws Exception If failed
*/
@Test
@WithSystemProperty(key = "IGNITE_DISABLE_WAL_DURING_REBALANCING", value = "true")
public void testMultipleNodesFailHistoricalRebalance() throws Exception {
backups = 1;
int node_cnt = 4;
int demanderId = node_cnt - 1;
// Start a new cluster with 3 suppliers.
startGrids(node_cnt - 1);
// Start demander node.
userAttrs.put("TEST_ATTR", "TEST_ATTR");
startGrid(node_cnt - 1);
grid(0).cluster().state(ACTIVE);
// Create a new cache that places a full set of partitions on demander node.
RendezvousAffinityFunction aff = new RendezvousAffinityFunction(false, PARTS_CNT);
aff.setAffinityBackupFilter(new ClusterNodeAttributeAffinityBackupFilter("TEST_ATTR"));
String cacheName = "test-cache-1";
IgniteCache<Integer, IndexedObject> cache0 = grid(0).getOrCreateCache(new CacheConfiguration<Integer, IndexedObject>(cacheName).setBackups(backups).setAffinity(aff).setWriteSynchronizationMode(CacheWriteSynchronizationMode.FULL_SYNC));
// Fill initial data and force checkpoint.
final int entryCnt = PARTS_CNT * 200;
final int preloadEntryCnt = PARTS_CNT * 201;
int val = 0;
for (int k = 0; k < preloadEntryCnt; k++) cache0.put(k, new IndexedObject(val++));
forceCheckpoint();
// Stop demander node.
stopGrid(demanderId);
// Rewrite data to trigger further rebalance.
for (int k = 0; k < entryCnt; k++) {
// even though the corresponding RebalanceFuture will be cancelled.
if (grid(0).affinity(cacheName).partition(k) != 12)
cache0.put(k, new IndexedObject(val++));
}
// Upload additional data to a particular partition (primary partition belongs to coordinator, for instance)
// in order to trigger full rebalance for that partition instead of historical one.
int[] primaries0 = grid(0).affinity(cacheName).primaryPartitions(grid(0).localNode());
for (int i = 0; i < preloadEntryCnt; ++i) cache0.put(primaries0[0], new IndexedObject(val++));
forceCheckpoint();
// Delay rebalance process for specified group.
blockMsgPred = (node, msg) -> {
if (msg instanceof GridDhtPartitionDemandMessage) {
GridDhtPartitionDemandMessage msg0 = (GridDhtPartitionDemandMessage) msg;
return msg0.groupId() == CU.cacheId(cacheName);
}
return false;
};
Queue<RecordedDemandMessage> recorderedMsgs = new ConcurrentLinkedQueue<>();
// Record demand messages for specified group.
recordMsgPred = (node, msg) -> {
if (msg instanceof GridDhtPartitionDemandMessage) {
GridDhtPartitionDemandMessage msg0 = (GridDhtPartitionDemandMessage) msg;
if (msg0.groupId() == CU.cacheId(cacheName)) {
recorderedMsgs.add(new RecordedDemandMessage(node.id(), msg0.groupId(), msg0.partitions().hasFull(), msg0.partitions().hasHistorical()));
}
}
return false;
};
// Corrupt WAL on suppliers, except the one.
injectFailingIOFactory(grid(0));
injectFailingIOFactory(grid(1));
// Trigger rebalance process from suppliers.
IgniteEx restartedDemander = startGrid(node_cnt - 1);
TestRecordingCommunicationSpi demanderSpi = TestRecordingCommunicationSpi.spi(restartedDemander);
// Wait until demander starts historical rebalancning.
demanderSpi.waitForBlocked();
final IgniteInternalFuture<Boolean> preloadFut = restartedDemander.cachex(cacheName).context().group().preloader().rebalanceFuture();
// Unblock messages and start tracking demand and supply messages.
demanderSpi.stopBlock();
// Wait until rebalancing will be cancelled for both suppliers.
assertTrue("Rebalance future was not cancelled [fut=" + preloadFut + ']', GridTestUtils.waitForCondition(preloadFut::isDone, getTestTimeout()));
Assert.assertEquals("Rebalance should be cancelled on demander node: " + preloadFut, false, preloadFut.get());
awaitPartitionMapExchange(true, true, null);
// Check data consistency.
assertPartitionsSame(idleVerify(restartedDemander, cacheName));
// Check that historical rebalance switched to full for supplier 1 & 2 and it was historical for supplier3.
IgnitePredicate<RecordedDemandMessage> histPred = msg -> msg.hasHistorical() && !msg.hasFull();
IgnitePredicate<RecordedDemandMessage> fullPred = msg -> !msg.hasHistorical() && msg.hasFull();
IgnitePredicate<RecordedDemandMessage> mixedPred = msg -> msg.hasHistorical() && msg.hasFull();
IgniteBiInClosure<UUID, Boolean> supplierChecker = (supplierId, mixed) -> {
List<RecordedDemandMessage> demandMsgsForSupplier = recorderedMsgs.stream().filter(msg -> msg.supplierId().equals(supplierId)).filter(msg -> msg.groupId() == CU.cacheId(cacheName)).filter(msg -> msg.hasFull() || msg.hasHistorical()).collect(toList());
assertEquals("There should only two demand messages [supplierId=" + supplierId + ']', 2, demandMsgsForSupplier.size());
assertTrue("The first message should require " + (mixed ? "mixed" : "historical") + " rebalance [msg=" + demandMsgsForSupplier.get(0) + ']', (mixed ? mixedPred.apply(demandMsgsForSupplier.get(0)) : histPred.apply(demandMsgsForSupplier.get(0))));
assertTrue("The second message should require full rebalance [msg=" + demandMsgsForSupplier.get(0) + ']', fullPred.apply(demandMsgsForSupplier.get(1)));
};
supplierChecker.apply(grid(0).cluster().localNode().id(), true);
supplierChecker.apply(grid(1).cluster().localNode().id(), false);
// Check supplier3
List<RecordedDemandMessage> demandMsgsForSupplier = recorderedMsgs.stream().filter(msg -> msg.supplierId().equals(grid(2).cluster().localNode().id())).filter(msg -> msg.groupId() == CU.cacheId(cacheName)).filter(msg -> msg.hasFull() || msg.hasHistorical()).collect(toList());
assertEquals("There should only one demand message.", 1, demandMsgsForSupplier.size());
assertTrue("The first message should require historical rebalance [msg=" + demandMsgsForSupplier.get(0) + ']', histPred.apply(demandMsgsForSupplier.get(0)));
}
Aggregations