use of org.apache.ignite.cluster.ClusterState.ACTIVE in project ignite by apache.
the class IgniteWalReaderTest method testIteratorWithCurrentKernelContext.
/**
* Tests WAL iterator which uses shared cache context of currently started Ignite node.
*/
@Test
public void testIteratorWithCurrentKernelContext() throws Exception {
IgniteEx ignite = startGrid(0);
ignite.cluster().active(true);
int cntEntries = 100;
putDummyRecords(ignite, cntEntries);
String workDir = U.defaultWorkDirectory();
IgniteWalIteratorFactory factory = new IgniteWalIteratorFactory(log);
IteratorParametersBuilder iterParametersBuilder = createIteratorParametersBuilder(workDir, genDbSubfolderName(ignite, 0)).filesOrDirs(workDir).binaryMetadataFileStoreDir(null).marshallerMappingFileStoreDir(null).sharedContext(ignite.context().cache().context());
AtomicInteger cnt = new AtomicInteger();
IgniteBiInClosure<Object, Object> objConsumer = (key, val) -> {
if (val instanceof IndexedObject) {
assertEquals(key, ((IndexedObject) val).iVal);
assertEquals(key, cnt.getAndIncrement());
}
};
iterateAndCountDataRecord(factory.iterator(iterParametersBuilder.copy()), objConsumer, null);
assertEquals(cntEntries, cnt.get());
// Test without converting non primary types.
iterParametersBuilder.keepBinary(true);
cnt.set(0);
IgniteBiInClosure<Object, Object> binObjConsumer = (key, val) -> {
if (val instanceof BinaryObject) {
assertEquals(key, ((BinaryObject) val).field("iVal"));
assertEquals(key, cnt.getAndIncrement());
}
};
iterateAndCountDataRecord(factory.iterator(iterParametersBuilder.copy()), binObjConsumer, null);
assertEquals(cntEntries, cnt.get());
}
use of org.apache.ignite.cluster.ClusterState.ACTIVE in project ignite by apache.
the class IgniteWalReaderTest method testFillWalWithDifferentTypes.
/**
* @throws Exception if failed.
*/
@Test
public void testFillWalWithDifferentTypes() throws Exception {
Ignite ig = startGrid();
ig.cluster().active(true);
IgniteCache<Object, Object> addlCache = ig.getOrCreateCache(CACHE_ADDL_NAME);
addlCache.put("1", "2");
addlCache.put(1, 2);
addlCache.put(1L, 2L);
addlCache.put(TestEnum.A, "Enum_As_Key");
addlCache.put("Enum_As_Value", TestEnum.B);
addlCache.put(TestEnum.C, TestEnum.C);
addlCache.put("Serializable", new TestSerializable(42));
addlCache.put(new TestSerializable(42), "Serializable_As_Key");
addlCache.put("Externalizable", new TestExternalizable(42));
addlCache.put(new TestExternalizable(42), "Externalizable_As_Key");
addlCache.put(292, new IndexedObject(292));
String search1 = "SomeUnexpectedStringValueAsKeyToSearch";
Collection<String> ctrlStringsToSearch = new HashSet<>();
ctrlStringsToSearch.add(search1);
Collection<String> ctrlStringsForBinaryObjSearch = new HashSet<>();
ctrlStringsForBinaryObjSearch.add(search1);
addlCache.put(search1, "SearchKey");
String search2 = "SomeTestStringContainerToBePrintedLongLine";
TestStringContainerToBePrinted val = new TestStringContainerToBePrinted(search2);
// will validate original toString() was called
ctrlStringsToSearch.add("v = [ " + val.getClass().getSimpleName() + "{data='" + search2 + "'}]");
ctrlStringsForBinaryObjSearch.add(search2);
addlCache.put("SearchValue", val);
String search3 = "SomeTestStringContainerToBePrintedLongLine2";
TestStringContainerToBePrinted key = new TestStringContainerToBePrinted(search3);
// will validate original toString() was called
ctrlStringsToSearch.add("k = " + key.getClass().getSimpleName() + "{data='" + search3 + "'}");
// validate only string itself
ctrlStringsForBinaryObjSearch.add(search3);
addlCache.put(key, "SearchKey");
int cntEntries = addlCache.size();
Map<Object, Object> ctrlMap = new HashMap<>();
for (Cache.Entry<Object, Object> next : addlCache) ctrlMap.put(next.getKey(), next.getValue());
Map<Object, Object> ctrlMapForBinaryObjects = new HashMap<>();
for (Cache.Entry<Object, Object> next : addlCache) ctrlMapForBinaryObjects.put(next.getKey(), next.getValue());
String subfolderName = genDbSubfolderName(ig, 0);
// Wait async allocation wal segment file by archiver.
Thread.sleep(1000);
stopGrid("node0", false);
String workDir = U.defaultWorkDirectory();
IgniteWalIteratorFactory factory = new IgniteWalIteratorFactory(log);
IteratorParametersBuilder params0 = createIteratorParametersBuilder(workDir, subfolderName);
params0.filesOrDirs(workDir);
IgniteBiInClosure<Object, Object> objConsumer = (key12, val1) -> {
log.info("K: [" + key12 + ", " + (key12 != null ? key12.getClass().getName() : "?") + "]" + " V: [" + val1 + ", " + (val1 != null ? val1.getClass().getName() : "?") + "]");
boolean rmv = remove(ctrlMap, key12, val1);
if (!rmv) {
String msg = "Unable to remove pair from control map " + "K: [" + key12 + "] V: [" + val1 + "]";
log.error(msg);
}
assertFalse(val1 instanceof BinaryObject);
};
IgniteInClosure<DataRecord> toStrChecker = record -> {
String strRepresentation = record.toString();
for (Iterator<String> iter = ctrlStringsToSearch.iterator(); iter.hasNext(); ) {
final String next = iter.next();
if (strRepresentation.contains(next)) {
iter.remove();
break;
}
}
};
scanIterateAndCount(factory, params0, cntEntries, 0, objConsumer, toStrChecker);
assertTrue(" Control Map is not empty after reading entries: " + ctrlMap, ctrlMap.isEmpty());
assertTrue(" Control Map for strings in entries is not empty after" + " reading records: " + ctrlStringsToSearch, ctrlStringsToSearch.isEmpty());
IgniteBiInClosure<Object, Object> binObjConsumer = (key13, val12) -> {
log.info("K(KeepBinary): [" + key13 + ", " + (key13 != null ? key13.getClass().getName() : "?") + "]" + " V(KeepBinary): [" + val12 + ", " + (val12 != null ? val12.getClass().getName() : "?") + "]");
boolean rmv = remove(ctrlMapForBinaryObjects, key13, val12);
if (!rmv) {
if (key13 instanceof BinaryObject) {
BinaryObject keyBinObj = (BinaryObject) key13;
String binaryObjTypeName = keyBinObj.type().typeName();
if (Objects.equals(TestStringContainerToBePrinted.class.getName(), binaryObjTypeName)) {
String data = keyBinObj.field("data");
rmv = ctrlMapForBinaryObjects.remove(new TestStringContainerToBePrinted(data)) != null;
} else if (Objects.equals(TestSerializable.class.getName(), binaryObjTypeName)) {
Integer iVal = keyBinObj.field("iVal");
rmv = ctrlMapForBinaryObjects.remove(new TestSerializable(iVal)) != null;
} else if (Objects.equals(TestEnum.class.getName(), binaryObjTypeName)) {
TestEnum key1 = TestEnum.values()[keyBinObj.enumOrdinal()];
rmv = ctrlMapForBinaryObjects.remove(key1) != null;
}
} else if (val12 instanceof BinaryObject) {
// don't compare BO values, just remove by key
rmv = ctrlMapForBinaryObjects.remove(key13) != null;
}
}
if (!rmv)
log.error("Unable to remove pair from control map " + "K: [" + key13 + "] V: [" + val12 + "]");
if (val12 instanceof BinaryObject) {
BinaryObject binaryObj = (BinaryObject) val12;
String binaryObjTypeName = binaryObj.type().typeName();
if (Objects.equals(IndexedObject.class.getName(), binaryObjTypeName)) {
assertEquals(binaryObj.field("iVal").toString(), binaryObj.field("jVal").toString());
byte[] data = binaryObj.field("data");
for (byte datum : data) assertTrue(datum >= 'A' && datum <= 'A' + 10);
}
}
};
IgniteInClosure<DataRecord> binObjToStrChecker = record -> {
String strRepresentation = record.toString();
for (Iterator<String> iter = ctrlStringsForBinaryObjSearch.iterator(); iter.hasNext(); ) {
final String next = iter.next();
if (strRepresentation.contains(next)) {
iter.remove();
break;
}
}
};
IteratorParametersBuilder params1 = createIteratorParametersBuilder(workDir, subfolderName);
params1.filesOrDirs(workDir).keepBinary(true);
// Validate same WAL log with flag binary objects only
IgniteWalIteratorFactory keepBinFactory = new IgniteWalIteratorFactory(log);
scanIterateAndCount(keepBinFactory, params1, cntEntries, 0, binObjConsumer, binObjToStrChecker);
assertTrue(" Control Map is not empty after reading entries: " + ctrlMapForBinaryObjects, ctrlMapForBinaryObjects.isEmpty());
assertTrue(" Control Map for strings in entries is not empty after" + " reading records: " + ctrlStringsForBinaryObjSearch, ctrlStringsForBinaryObjSearch.isEmpty());
}
use of org.apache.ignite.cluster.ClusterState.ACTIVE in project ignite by apache.
the class GridCommandHandlerTest method testBaselineAddOnNotActiveCluster.
/**
* Test baseline add items works via control.sh
*
* @throws Exception If failed.
*/
@Test
public void testBaselineAddOnNotActiveCluster() throws Exception {
Ignite ignite = startGrid(1);
assertFalse(ignite.cluster().active());
String consistentIDs = getTestIgniteInstanceName(1);
injectTestSystemOut();
assertEquals(EXIT_CODE_UNEXPECTED_ERROR, execute("--baseline", "add", consistentIDs));
assertContains(log, testOut.toString(), "Changing BaselineTopology on inactive cluster is not allowed.");
consistentIDs = getTestIgniteInstanceName(1) + ", " + getTestIgniteInstanceName(2) + "," + getTestIgniteInstanceName(3);
assertEquals(EXIT_CODE_INVALID_ARGUMENTS, execute("--baseline", "add", consistentIDs));
String testOutStr = testOut.toString();
// Ignite instase 1 can be logged only in arguments list.
boolean isInstanse1Found = Arrays.stream(testOutStr.split("\n")).filter(s -> s.contains("Arguments:")).noneMatch(s -> s.contains(getTestIgniteInstanceName() + "1"));
assertTrue(testOutStr, testOutStr.contains("Node not found for consistent ID:"));
assertFalse(testOutStr, isInstanse1Found);
}
use of org.apache.ignite.cluster.ClusterState.ACTIVE in project ignite by apache.
the class GridClusterStateProcessor method autoAdjustInMemoryClusterState.
/**
* Update baseline locally if cluster is not persistent and baseline autoadjustment is enabled with zero timeout.
*
* @param nodeId Id of the node that initiated the operation (joined/left/failed).
* @param topSnapshot Topology snapshot from the discovery message.
* @param discoCache Discovery cache from the discovery manager.
* @param topVer Topology version.
* @param minorTopVer Minor topology version.
* @return {@code true} if baseline was changed and discovery cache recalculation is required.
*/
public boolean autoAdjustInMemoryClusterState(UUID nodeId, Collection<ClusterNode> topSnapshot, DiscoCache discoCache, long topVer, int minorTopVer) {
IgniteClusterImpl cluster = ctx.cluster().get();
DiscoveryDataClusterState oldState = globalState;
boolean isInMemoryCluster = CU.isInMemoryCluster(ctx.discovery().allNodes(), ctx.marshallerContext().jdkMarshaller(), U.resolveClassLoader(ctx.config()));
boolean autoAdjustBaseline = isInMemoryCluster && oldState.state().active() && !oldState.transition() && cluster.isBaselineAutoAdjustEnabled() && cluster.baselineAutoAdjustTimeout() == 0L;
if (autoAdjustBaseline) {
BaselineTopology oldBlt = oldState.baselineTopology();
Collection<ClusterNode> bltNodes = topSnapshot.stream().filter(n -> !n.isClient() && !n.isDaemon()).collect(Collectors.toList());
if (!bltNodes.isEmpty()) {
int newBltId = oldBlt == null ? 0 : oldBlt.id();
BaselineTopology newBlt = BaselineTopology.build(bltNodes, newBltId);
ChangeGlobalStateMessage changeGlobalStateMsg = new ChangeGlobalStateMessage(nodeId, nodeId, null, oldState.state(), true, newBlt, true, System.currentTimeMillis());
AffinityTopologyVersion ver = new AffinityTopologyVersion(topVer, minorTopVer);
onStateChangeMessage(ver, changeGlobalStateMsg, discoCache);
ChangeGlobalStateFinishMessage finishMsg = new ChangeGlobalStateFinishMessage(nodeId, oldState.state(), true);
onStateFinishMessage(finishMsg);
globalState.localBaselineAutoAdjustment(true);
return true;
}
}
return false;
}
use of org.apache.ignite.cluster.ClusterState.ACTIVE in project ignite by apache.
the class IgniteClusterSnapshotSelfTest method testSnapshotPrimaryBackupsTheSame.
/**
* @throws Exception If fails.
*/
@Test
public void testSnapshotPrimaryBackupsTheSame() throws Exception {
int grids = 3;
AtomicInteger cacheKey = new AtomicInteger();
IgniteEx ignite = startGridsWithCache(grids, dfltCacheCfg, CACHE_KEYS_RANGE);
IgniteInternalFuture<Long> atLoadFut = GridTestUtils.runMultiThreadedAsync(() -> {
while (!Thread.currentThread().isInterrupted()) {
ThreadLocalRandom rnd = ThreadLocalRandom.current();
int gId = rnd.nextInt(grids);
IgniteCache<Integer, Integer> txCache = grid(gId).getOrCreateCache(dfltCacheCfg.getName());
try (Transaction tx = grid(gId).transactions().txStart()) {
txCache.put(cacheKey.incrementAndGet(), 0);
txCache.put(cacheKey.incrementAndGet(), 1);
tx.commit();
}
}
}, 5, "tx-cache-put-");
IgniteInternalFuture<Long> txLoadFut = GridTestUtils.runMultiThreadedAsync(() -> {
while (!Thread.currentThread().isInterrupted()) {
ThreadLocalRandom rnd = ThreadLocalRandom.current();
IgniteCache<Integer, Integer> atomicCache = grid(rnd.nextInt(grids)).getOrCreateCache(atomicCcfg);
atomicCache.put(cacheKey.incrementAndGet(), 0);
}
}, 5, "atomic-cache-put-");
try {
IgniteFuture<Void> fut = ignite.snapshot().createSnapshot(SNAPSHOT_NAME);
fut.get();
} finally {
txLoadFut.cancel();
atLoadFut.cancel();
}
stopAllGrids();
IgniteEx snpIg0 = startGridsFromSnapshot(grids, cfg -> resolveSnapshotWorkDirectory(cfg).getAbsolutePath(), SNAPSHOT_NAME, false);
// Block whole rebalancing.
for (Ignite g : G.allGrids()) TestRecordingCommunicationSpi.spi(g).blockMessages((node, msg) -> msg instanceof GridDhtPartitionDemandMessage);
snpIg0.cluster().state(ACTIVE);
assertFalse("Primary and backup in snapshot must have the same counters. Rebalance must not happen.", GridTestUtils.waitForCondition(() -> {
boolean hasMsgs = false;
for (Ignite g : G.allGrids()) hasMsgs |= TestRecordingCommunicationSpi.spi(g).hasBlockedMessages();
return hasMsgs;
}, REBALANCE_AWAIT_TIME));
TestRecordingCommunicationSpi.stopBlockAll();
assertPartitionsSame(idleVerify(snpIg0, dfltCacheCfg.getName(), atomicCcfg.getName()));
}
Aggregations