use of org.apache.ignite.cluster.ClusterState.ACTIVE in project ignite by apache.
the class GridCommandHandlerTest method testConnectivityCommandWithNodeExit.
/**
* Test that if node exits topology during connectivity check, the command will not fail.
*
* Description:
* 1. Start three nodes.
* 2. Execute connectivity check.
* 3. When 3-rd node receives connectivity check compute task, it must stop itself.
* 4. The command should exit with code OK.
*
* @throws Exception If failed.
*/
@Test
public void testConnectivityCommandWithNodeExit() throws Exception {
IgniteEx[] node3 = new IgniteEx[1];
/**
*/
class KillNode3CommunicationSpi extends TcpCommunicationSpi {
/**
* Fail check connection request and stop third node
*/
boolean fail;
/**
*/
public KillNode3CommunicationSpi(boolean fail) {
this.fail = fail;
}
/**
* {@inheritDoc}
*/
@Override
public IgniteFuture<BitSet> checkConnection(List<ClusterNode> nodes) {
if (fail) {
runAsync(node3[0]::close);
return null;
}
return super.checkConnection(nodes);
}
}
IgniteEx node1 = startGrid(1, (UnaryOperator<IgniteConfiguration>) configuration -> {
configuration.setCommunicationSpi(new KillNode3CommunicationSpi(false));
return configuration;
});
IgniteEx node2 = startGrid(2, (UnaryOperator<IgniteConfiguration>) configuration -> {
configuration.setCommunicationSpi(new KillNode3CommunicationSpi(false));
return configuration;
});
node3[0] = startGrid(3, (UnaryOperator<IgniteConfiguration>) configuration -> {
configuration.setCommunicationSpi(new KillNode3CommunicationSpi(true));
return configuration;
});
assertFalse(node1.cluster().state().active());
node1.cluster().state(ACTIVE);
assertEquals(3, node1.cluster().nodes().size());
injectTestSystemOut();
final IgniteInternalFuture<?> connectivity = runAsync(() -> {
final int result = execute("--diagnostic", "connectivity");
assertEquals(EXIT_CODE_OK, result);
});
connectivity.get();
}
use of org.apache.ignite.cluster.ClusterState.ACTIVE in project ignite by apache.
the class SystemViewCommandTest method testPagesList.
/**
*/
@Test
public void testPagesList() throws Exception {
String cacheName = "cacheFL";
IgniteCache<Integer, byte[]> cache = ignite0.getOrCreateCache(new CacheConfiguration<Integer, byte[]>().setName(cacheName).setAffinity(new RendezvousAffinityFunction().setPartitions(1)));
GridCacheDatabaseSharedManager dbMgr = (GridCacheDatabaseSharedManager) ignite0.context().cache().context().database();
int pageSize = dbMgr.pageSize();
try {
dbMgr.enableCheckpoints(false).get();
int key = 0;
// Fill up different free-list buckets.
for (int j = 0; j < pageSize / 2; j++) cache.put(key++, new byte[j + 1]);
// Put some pages to one bucket to overflow pages cache.
for (int j = 0; j < 1000; j++) cache.put(key++, new byte[pageSize / 2]);
List<List<String>> cacheGrpView = systemView(ignite0, CACHE_GRP_PAGE_LIST_VIEW);
List<List<String>> dataRegionView = systemView(ignite0, DATA_REGION_PAGE_LIST_VIEW);
String cacheId = Integer.toString(cacheId(cacheName));
// Test filtering by 3 columns.
assertFalse(cacheGrpView.stream().noneMatch(row -> cacheId.equals(row.get(0)) && "0".equals(row.get(1)) && "0".equals(row.get(3))));
// Test filtering with invalid cache group id.
assertTrue(cacheGrpView.stream().noneMatch(row -> "-1".equals(row.get(0))));
// Test filtering with invalid partition id.
assertTrue(cacheGrpView.stream().noneMatch(row -> "-1".equals(row.get(1))));
// Test filtering with invalid bucket number.
assertTrue(cacheGrpView.stream().noneMatch(row -> "-1".equals(row.get(3))));
assertFalse(cacheGrpView.stream().noneMatch(row -> Integer.parseInt(row.get(4)) > 0 && cacheId.equals(row.get(0))));
assertFalse(cacheGrpView.stream().noneMatch(row -> Integer.parseInt(row.get(5)) > 0 && cacheId.equals(row.get(0))));
assertFalse(cacheGrpView.stream().noneMatch(row -> Integer.parseInt(row.get(6)) > 0 && cacheId.equals(row.get(0))));
assertFalse(dataRegionView.stream().noneMatch(row -> row.get(0).startsWith(DATA_REGION_NAME)));
assertTrue(dataRegionView.stream().noneMatch(row -> row.get(0).startsWith(DATA_REGION_NAME) && Integer.parseInt(row.get(4)) > 0));
} finally {
dbMgr.enableCheckpoints(true).get();
}
ignite0.cluster().state(INACTIVE);
ignite0.cluster().state(ACTIVE);
IgniteCache<Integer, Integer> cacheInMemory = ignite0.getOrCreateCache(new CacheConfiguration<Integer, Integer>().setName("cacheFLInMemory").setDataRegionName(DATA_REGION_NAME));
cacheInMemory.put(0, 0);
// After activation/deactivation new view for data region pages lists should be created, check that new view
// correctly reflects changes in free-lists.
assertTrue(systemView(ignite0, DATA_REGION_PAGE_LIST_VIEW).stream().noneMatch(row -> row.get(0).startsWith(DATA_REGION_NAME) && Integer.parseInt(row.get(4)) > 0));
}
use of org.apache.ignite.cluster.ClusterState.ACTIVE in project ignite by apache.
the class GridCommandHandlerTest method startGridAndPutNodeToMaintenance.
/**
* Starts cluster of two nodes and prepares situation of corrupted PDS on node2
* so it enters maintenance mode on restart.
*
* @param cachesToStart Configurations of caches that should be started in cluster.
* @param cacheToCorrupt Function determining should cache with given name be corrupted or not.
*/
private File startGridAndPutNodeToMaintenance(CacheConfiguration[] cachesToStart, @Nullable Function<String, Boolean> cacheToCorrupt) throws Exception {
assert cachesToStart != null && cachesToStart.length > 0;
IgniteEx ig0 = startGrid(0);
IgniteEx ig1 = startGrid(1);
String ig1Folder = ig1.context().pdsFolderResolver().resolveFolders().folderName();
File dbDir = U.resolveWorkDirectory(ig1.configuration().getWorkDirectory(), "db", false);
File ig1LfsDir = new File(dbDir, ig1Folder);
ig0.cluster().baselineAutoAdjustEnabled(false);
ig0.cluster().state(ACTIVE);
IgniteCache dfltCache = ig0.getOrCreateCache(cachesToStart[0]);
if (cachesToStart.length > 1) {
for (int i = 1; i < cachesToStart.length; i++) ig0.getOrCreateCache(cachesToStart[i]);
}
for (int k = 0; k < 1000; k++) dfltCache.put(k, k);
GridCacheDatabaseSharedManager dbMrg0 = (GridCacheDatabaseSharedManager) ig0.context().cache().context().database();
GridCacheDatabaseSharedManager dbMrg1 = (GridCacheDatabaseSharedManager) ig1.context().cache().context().database();
dbMrg0.forceCheckpoint("cp").futureFor(CheckpointState.FINISHED).get();
dbMrg1.forceCheckpoint("cp").futureFor(CheckpointState.FINISHED).get();
Arrays.stream(cachesToStart).map(ccfg -> ccfg.getName()).filter(name -> cacheToCorrupt.apply(name)).forEach(name -> ig0.cluster().disableWal(name));
for (int k = 1000; k < 2000; k++) dfltCache.put(k, k);
stopGrid(1);
File[] cpMarkers = new File(ig1LfsDir, "cp").listFiles();
for (File cpMark : cpMarkers) {
if (cpMark.getName().contains("-END"))
cpMark.delete();
}
assertThrows(log, () -> startGrid(1), Exception.class, null);
return ig1LfsDir;
}
use of org.apache.ignite.cluster.ClusterState.ACTIVE in project ignite by apache.
the class GridCommandHandlerTest method testConnectivityCommandWithFailedNodes.
/**
* Test connectivity command works via control.sh with one node failing.
*/
@Test
public void testConnectivityCommandWithFailedNodes() throws Exception {
UUID okId = UUID.randomUUID();
UUID failingId = UUID.randomUUID();
UnaryOperator<IgniteConfiguration> operator = configuration -> {
configuration.setCommunicationSpi(new TcpCommunicationSpi() {
/**
* {inheritDoc}
*/
@Override
public IgniteFuture<BitSet> checkConnection(List<ClusterNode> nodes) {
BitSet bitSet = new BitSet();
int idx = 0;
for (ClusterNode remoteNode : nodes) {
if (!remoteNode.id().equals(failingId))
bitSet.set(idx);
idx++;
}
return new IgniteFinishedFutureImpl<>(bitSet);
}
});
return configuration;
};
IgniteEx ignite = startGrid("normal", configuration -> {
operator.apply(configuration);
configuration.setConsistentId(okId);
configuration.setNodeId(okId);
return configuration;
});
IgniteEx failure = startGrid("failure", configuration -> {
operator.apply(configuration);
configuration.setConsistentId(failingId);
configuration.setNodeId(failingId);
return configuration;
});
ignite.cluster().state(ACTIVE);
failure.cluster().state(ACTIVE);
injectTestSystemOut();
int connectivity = execute("--diagnostic", "connectivity");
assertEquals(EXIT_CODE_OK, connectivity);
String out = testOut.toString();
String what = "There is no connectivity between the following nodes";
assertContains(log, out.replaceAll("[\\W_]+", "").trim(), what.replaceAll("[\\W_]+", "").trim());
}
use of org.apache.ignite.cluster.ClusterState.ACTIVE in project ignite by apache.
the class GridCommandHandlerTest method testDiagnosticPageLocksTracker.
/**
* Test execution of --diagnostic command.
*
* @throws Exception if failed.
*/
@Test
public void testDiagnosticPageLocksTracker() throws Exception {
Ignite ignite = startGrid(0, (UnaryOperator<IgniteConfiguration>) cfg -> cfg.setConsistentId("node0/dump"));
startGrid(1, (UnaryOperator<IgniteConfiguration>) cfg -> cfg.setConsistentId("node1/dump"));
startGrid(2, (UnaryOperator<IgniteConfiguration>) cfg -> cfg.setConsistentId("node2/dump"));
startGrid(3, (UnaryOperator<IgniteConfiguration>) cfg -> cfg.setConsistentId("node3/dump"));
Collection<ClusterNode> nodes = ignite.cluster().nodes();
List<ClusterNode> nodes0 = new ArrayList<>(nodes);
ClusterNode node0 = nodes0.get(0);
ClusterNode node1 = nodes0.get(1);
ClusterNode node2 = nodes0.get(2);
ClusterNode node3 = nodes0.get(3);
ignite.cluster().active(true);
assertEquals(EXIT_CODE_OK, execute("--diagnostic"));
assertEquals(EXIT_CODE_OK, execute("--diagnostic", "help"));
// Dump locks only on connected node to default path.
assertEquals(EXIT_CODE_OK, execute("--diagnostic", "pageLocks", "dump"));
// Check file dump in default path.
checkNumberFiles(defaultDiagnosticDir, 1);
assertEquals(EXIT_CODE_OK, execute("--diagnostic", "pageLocks", "dump_log"));
// Dump locks only on connected node to specific path.
assertEquals(EXIT_CODE_OK, execute("--diagnostic", "pageLocks", "dump", "--path", customDiagnosticDir.getAbsolutePath()));
// Check file dump in specific path.
checkNumberFiles(customDiagnosticDir, 1);
// Dump locks only all nodes.
assertEquals(EXIT_CODE_OK, execute("--diagnostic", "pageLocks", "dump", "--all"));
// Current cluster 4 nodes -> 4 files + 1 from previous operation.
checkNumberFiles(defaultDiagnosticDir, 5);
assertEquals(EXIT_CODE_OK, execute("--diagnostic", "pageLocks", "dump_log", "--all"));
assertEquals(EXIT_CODE_OK, execute("--diagnostic", "pageLocks", "dump", "--path", customDiagnosticDir.getAbsolutePath(), "--all"));
// Current cluster 4 nodes -> 4 files + 1 from previous operation.
checkNumberFiles(customDiagnosticDir, 5);
// Dump locks only 2 nodes use nodeIds as arg.
assertEquals(EXIT_CODE_OK, execute("--diagnostic", "pageLocks", "dump", "--nodes", node0.id().toString() + "," + node2.id().toString()));
// Dump locks only for 2 nodes -> 2 files + 5 from previous operation.
checkNumberFiles(defaultDiagnosticDir, 7);
// Dump locks only for 2 nodes use constIds as arg.
assertEquals(EXIT_CODE_OK, execute("--diagnostic", "pageLocks", "dump", "--nodes", node0.consistentId().toString() + "," + node2.consistentId().toString()));
assertEquals(EXIT_CODE_OK, execute("--diagnostic", "pageLocks", "dump_log", "--nodes", node1.id().toString() + "," + node3.id().toString()));
assertEquals(EXIT_CODE_OK, execute("--diagnostic", "pageLocks", "dump", "--path", customDiagnosticDir.getAbsolutePath(), "--nodes", node1.consistentId().toString() + "," + node3.consistentId().toString()));
// Dump locks only for 2 nodes -> 2 files + 5 from previous operation.
checkNumberFiles(customDiagnosticDir, 7);
}
Aggregations