use of org.apache.ignite.internal.IgniteKernal in project ignite by apache.
the class GridCachePutAllFailoverSelfTest method checkPutAllFailover.
/**
* Tests putAll() method along with failover and cache backup.
*
* Checks that the resulting primary cache size is the same as
* expected.
*
* @param near Near enabled.
* @param workerCnt Worker count.
* @param shutdownCnt Shutdown count.
* @throws Exception If failed.
*/
public void checkPutAllFailover(boolean near, int workerCnt, int shutdownCnt) throws Exception {
nearEnabled = near;
backups = shutdownCnt;
Collection<Integer> testKeys = generateTestKeys();
final Ignite master = startGrid(MASTER);
List<Ignite> workers = new ArrayList<>(workerCnt);
for (int i = 1; i <= workerCnt; i++) workers.add(startGrid("worker" + i));
info("Master: " + master.cluster().localNode().id());
List<Ignite> runningWorkers = new ArrayList<>(workerCnt);
for (int i = 1; i <= workerCnt; i++) {
UUID id = workers.get(i - 1).cluster().localNode().id();
info(String.format("Worker%d - %s", i, id));
runningWorkers.add(workers.get(i - 1));
}
try {
// Dummy call to fetch affinity function from remote node
master.affinity(CACHE_NAME).mapKeyToNode("Dummy");
Random rnd = new Random();
Collection<Integer> dataChunk = new ArrayList<>(DATA_CHUNK_SIZE);
int entryCntr = 0;
int chunkCntr = 0;
final AtomicBoolean jobFailed = new AtomicBoolean(false);
int failoverPushGap = 0;
final CountDownLatch emptyLatch = new CountDownLatch(1);
final AtomicBoolean inputExhausted = new AtomicBoolean();
IgniteCompute comp = compute(master.cluster().forPredicate(workerNodesFilter));
for (Integer key : testKeys) {
dataChunk.add(key);
entryCntr++;
if (entryCntr == DATA_CHUNK_SIZE) {
// time to send data
chunkCntr++;
assert dataChunk.size() == DATA_CHUNK_SIZE;
log.info("Pushing data chunk [chunkNo=" + chunkCntr + "]");
ComputeTaskFuture<Void> fut = comp.executeAsync(new GridCachePutAllTask(runningWorkers.get(rnd.nextInt(runningWorkers.size())).cluster().localNode().id(), CACHE_NAME), dataChunk);
// Blocks if queue is full.
resQueue.put(fut);
fut.listen(new CI1<IgniteFuture<Void>>() {
@Override
public void apply(IgniteFuture<Void> f) {
ComputeTaskFuture<?> taskFut = (ComputeTaskFuture<?>) f;
try {
//if something went wrong - we'll get exception here
taskFut.get();
} catch (IgniteException e) {
log.error("Job failed", e);
jobFailed.set(true);
}
// Remove complete future from queue to allow other jobs to proceed.
resQueue.remove(taskFut);
if (inputExhausted.get() && resQueue.isEmpty())
emptyLatch.countDown();
}
});
entryCntr = 0;
dataChunk = new ArrayList<>(DATA_CHUNK_SIZE);
if (chunkCntr >= FAIL_ON_CHUNK_NO) {
if (workerCnt - runningWorkers.size() < shutdownCnt) {
if (failoverPushGap > 0)
failoverPushGap--;
else {
Ignite victim = runningWorkers.remove(0);
info("Shutting down node: " + victim.cluster().localNode().id());
stopGrid(victim.name());
// Fail next node after some jobs have been pushed.
failoverPushGap = FAILOVER_PUSH_GAP;
}
}
}
}
}
inputExhausted.set(true);
if (resQueue.isEmpty())
emptyLatch.countDown();
assert chunkCntr == TEST_MAP_SIZE / DATA_CHUNK_SIZE;
// Wait for queue to empty.
log.info("Waiting for empty queue...");
boolean failedWait = false;
if (!emptyLatch.await(AWAIT_TIMEOUT_SEC, TimeUnit.SECONDS)) {
info(">>> Failed to wait for queue to empty.");
failedWait = true;
}
if (!failedWait)
assertFalse("One or more jobs have failed.", jobFailed.get());
Collection<Integer> absentKeys = findAbsentKeys(runningWorkers.get(0), testKeys);
if (!failedWait && !absentKeys.isEmpty()) {
// Give some time to preloader.
U.sleep(20000);
absentKeys = findAbsentKeys(runningWorkers.get(0), testKeys);
}
info(">>> Absent keys: " + absentKeys);
if (!F.isEmpty(absentKeys)) {
for (Ignite g : runningWorkers) {
IgniteKernal k = (IgniteKernal) g;
info(">>>> Entries on node: " + k.getLocalNodeId());
GridCacheAdapter<Object, Object> cache = k.internalCache("partitioned");
for (Integer key : absentKeys) {
GridCacheEntryEx entry = cache.peekEx(key);
if (entry != null)
info(" >>> " + entry);
if (cache.context().isNear()) {
GridCacheEntryEx entry0 = cache.context().near().dht().peekEx(key);
if (entry0 != null)
info(" >>> " + entry);
}
}
info("");
}
}
assertTrue(absentKeys.isEmpty());
// Actual primary cache size.
int primaryCacheSize = 0;
for (Ignite g : runningWorkers) {
info("Cache size [node=" + g.name() + ", localSize=" + g.cache(CACHE_NAME).localSize() + ", localPrimarySize=" + g.cache(CACHE_NAME).localSize(PRIMARY) + ']');
primaryCacheSize += ((IgniteKernal) g).internalCache(CACHE_NAME).primarySize();
}
assertEquals(TEST_MAP_SIZE, primaryCacheSize);
for (Ignite g : runningWorkers) assertEquals(TEST_MAP_SIZE, g.cache(CACHE_NAME).size(PRIMARY));
} finally {
stopAllGrids();
}
}
use of org.apache.ignite.internal.IgniteKernal in project ignite by apache.
the class GridCachePartitionedGetSelfTest method prepare.
/**
* Puts value to primary node and registers listener
* that sets {@link #received} flag to {@code true}
* if {@link GridNearGetRequest} was received on primary node.
*
* @throws Exception If failed.
*/
@SuppressWarnings("deprecation")
private void prepare() throws Exception {
for (int i = 0; i < GRID_CNT; i++) {
Ignite g = grid(i);
if (grid(i).affinity(DEFAULT_CACHE_NAME).isPrimary(grid(i).localNode(), KEY)) {
info("Primary node: " + g.cluster().localNode().id());
// Put value.
g.cache(DEFAULT_CACHE_NAME).put(KEY, VAL);
// Register listener.
((IgniteKernal) g).context().io().addMessageListener(TOPIC_CACHE, new GridMessageListener() {
@Override
public void onMessage(UUID nodeId, Object msg) {
info("Received message from node [nodeId=" + nodeId + ", msg=" + msg + ']');
if (msg instanceof GridNearSingleGetRequest) {
info("Setting flag: " + System.identityHashCode(received));
received.set(true);
}
}
});
break;
}
}
}
use of org.apache.ignite.internal.IgniteKernal in project ignite by apache.
the class GridCacheNestedTxAbstractTest method testTwoTx.
/**
* JUnit.
*
* @throws Exception If failed.
*/
public void testTwoTx() throws Exception {
final IgniteCache<String, Integer> c = grid(0).cache(DEFAULT_CACHE_NAME);
GridKernalContext ctx = ((IgniteKernal) grid(0)).context();
c.put(CNTR_KEY, 0);
for (int i = 0; i < 10; i++) {
try (Transaction tx = grid(0).transactions().txStart(PESSIMISTIC, REPEATABLE_READ)) {
c.get(CNTR_KEY);
ctx.closure().callLocalSafe((new Callable<Boolean>() {
@Override
public Boolean call() throws Exception {
assertFalse(((GridCacheAdapter) c).context().tm().inUserTx());
assertNull(((GridCacheAdapter) c).context().tm().userTx());
return true;
}
}), true);
tx.commit();
}
}
}
use of org.apache.ignite.internal.IgniteKernal in project ignite by apache.
the class IgniteDynamicCacheStartSelfTest method testClientCache.
/**
* @throws Exception If failed.
*/
public void testClientCache() throws Exception {
try {
testAttribute = false;
startGrid(nodeCount());
final IgniteEx kernal = grid(0);
CacheConfiguration ccfg = new CacheConfiguration(DEFAULT_CACHE_NAME);
ccfg.setWriteSynchronizationMode(CacheWriteSynchronizationMode.FULL_SYNC);
ccfg.setName(DYNAMIC_CACHE_NAME);
ccfg.setNodeFilter(NODE_FILTER);
kernal.createCache(ccfg);
GridTestUtils.assertThrows(log, new Callable<Object>() {
@Override
public Object call() throws Exception {
IgniteKernal ignite = (IgniteKernal) grid(nodeCount());
return ignite.getCache(DYNAMIC_CACHE_NAME);
}
}, IllegalArgumentException.class, null);
// Should obtain client cache on new node.
IgniteCache<Object, Object> clientCache = ignite(nodeCount()).cache(DYNAMIC_CACHE_NAME);
clientCache.put("1", "1");
for (int g = 0; g < nodeCount() + 1; g++) assertEquals("1", ignite(g).cache(DYNAMIC_CACHE_NAME).get("1"));
kernal.destroyCache(DYNAMIC_CACHE_NAME);
} finally {
stopGrid(nodeCount());
}
}
use of org.apache.ignite.internal.IgniteKernal in project ignite by apache.
the class IgniteDynamicCacheStartSelfTest method testDeployFilter.
/**
* @throws Exception If failed.
*/
public void testDeployFilter() throws Exception {
try {
testAttribute = false;
startGrid(nodeCount());
final IgniteEx kernal = grid(0);
CacheConfiguration ccfg = new CacheConfiguration(DEFAULT_CACHE_NAME);
ccfg.setWriteSynchronizationMode(CacheWriteSynchronizationMode.FULL_SYNC);
ccfg.setName(DYNAMIC_CACHE_NAME);
ccfg.setNodeFilter(NODE_FILTER);
kernal.createCache(ccfg);
startGrid(nodeCount() + 1);
for (int i = 0; i < 100; i++) grid(0).cache(DYNAMIC_CACHE_NAME).put(i, i);
for (int i = 0; i < 100; i++) assertEquals(i, grid(1).cache(DYNAMIC_CACHE_NAME).get(i));
info("Affinity nodes: " + grid(0).affinity(DYNAMIC_CACHE_NAME).mapKeyToPrimaryAndBackups(0));
for (int g = 0; g < nodeCount(); g++) {
for (int i = 0; i < 100; i++) {
assertFalse(grid(g).affinity(DYNAMIC_CACHE_NAME).mapKeyToPrimaryAndBackups(i).contains(grid(nodeCount()).cluster().localNode()));
assertFalse(grid(g).affinity(DYNAMIC_CACHE_NAME).mapKeyToPrimaryAndBackups(i).contains(grid(nodeCount() + 1).cluster().localNode()));
}
}
// Check that cache is not deployed on new node after undeploy.
for (int g = 0; g < nodeCount() + 2; g++) {
final IgniteKernal kernal0 = (IgniteKernal) grid(g);
for (IgniteInternalFuture f : kernal0.context().cache().context().exchange().exchangeFutures()) f.get();
if (g < nodeCount())
assertNotNull(grid(g).cache(DYNAMIC_CACHE_NAME));
else
GridTestUtils.assertThrows(log, new Callable<Object>() {
@Override
public Object call() throws Exception {
return kernal0.getCache(DYNAMIC_CACHE_NAME);
}
}, IllegalArgumentException.class, null);
}
kernal.destroyCache(DYNAMIC_CACHE_NAME);
stopGrid(nodeCount() + 1);
stopGrid(nodeCount());
} finally {
testAttribute = true;
}
}
Aggregations