use of org.apache.ignite.internal.processors.cache.GridCacheProcessor in project ignite by apache.
the class AffinityDistributionLoggingTest method runAndGetExchangeLog.
/**
* Starts a specified number of Ignite nodes and log partition node exchange during a last node's startup.
*
* @param testClientNode Whether it is necessary to get exchange log from the client node.
* @return Log of latest partition map exchange.
* @throws Exception In case of an error.
*/
private String runAndGetExchangeLog(boolean testClientNode) throws Exception {
assert nodes > 1;
IgniteEx ignite = startGrids(nodes - 1);
awaitPartitionMapExchange();
GridCacheProcessor proc = ignite.context().cache();
GridCacheContext cctx = proc.context().cacheContext(CU.cacheId(DEFAULT_CACHE_NAME));
final GridStringLogger log = new GridStringLogger(false, this.log);
GridAffinityAssignmentCache aff = GridTestUtils.getFieldValue(cctx.affinity(), "aff");
GridTestUtils.setFieldValue(aff, "log", log);
if (testClientNode)
startClientGrid(getConfiguration("client"));
else
startGrid(nodes);
awaitPartitionMapExchange();
return log.toString();
}
use of org.apache.ignite.internal.processors.cache.GridCacheProcessor in project ignite by apache.
the class RestorePartitionStateDuringCheckpointTest method test.
/**
* @throws Exception If failed.
*/
@Test
public void test() throws Exception {
IgniteEx ignite0 = startGrids(1);
ignite0.cluster().state(ClusterState.ACTIVE);
IgniteCache cache = ignite0.createCache(DEFAULT_CACHE_NAME);
int partId = ignite0.affinity(DEFAULT_CACHE_NAME).allPartitions(ignite0.localNode())[0];
log.info("Local partition was determined [node= " + ignite0.name() + ", partId=" + partId + ']');
int key = F.first(partitionKeys(cache, partId, 1, 0));
cache.put(key, key);
GridCacheProcessor cacheProcessor = ignite0.context().cache();
cacheProcessor.dynamicDestroyCaches(Collections.singleton(DEFAULT_CACHE_NAME), false, false).get();
assertNull(ignite0.cache(DEFAULT_CACHE_NAME));
DataRegion region = cacheProcessor.context().database().dataRegion(DataStorageConfiguration.DFLT_DATA_REG_DEFAULT_NAME);
PageMemoryEx pageMemorySpy = spy((PageMemoryEx) region.pageMemory());
long partMetaId = PageIdUtils.pageId(partId, PageIdAllocator.FLAG_DATA, 0);
int grpId = CU.cacheId(DEFAULT_CACHE_NAME);
AtomicBoolean checkpointTriggered = new AtomicBoolean(false);
doAnswer(invocation -> {
IgniteCacheOffheapManager.CacheDataStore partDataStore = cacheProcessor.cacheGroup(grpId).topology().localPartition(partId).dataStore();
assertNotNull(partDataStore);
if (partDataStore.rowStore() != null && checkpointTriggered.compareAndSet(false, true)) {
info("Before write lock will be gotten on the partition meta page [pageId=" + invocation.getArgument(2) + ']');
GridTestUtils.runAsync(() -> {
try {
forceCheckpoint();
} catch (IgniteCheckedException e) {
log.error("Checkpoint failed", e);
}
});
doSleep(200);
}
return invocation.callRealMethod();
}).when(pageMemorySpy).writeLock(eq(grpId), eq(partMetaId), anyLong());
GridTestUtils.setFieldValue(region, "pageMem", pageMemorySpy);
IgniteInternalFuture startCacheFut = GridTestUtils.runAsync(() -> {
ignite0.createCache(DEFAULT_CACHE_NAME);
});
startCacheFut.get();
assertTrue(checkpointTriggered.get());
assertSame(GridDhtPartitionState.OWNING, cacheProcessor.cacheGroup(grpId).topology().localPartition(partId).state());
}
use of org.apache.ignite.internal.processors.cache.GridCacheProcessor in project ignite by apache.
the class PageMemoryTracker method comparePages.
/**
* Compare pages content.
*
* @param fullPageId Full page ID.
* @param expPage Expected page.
* @param actualPageAddr Actual page address.
* @return {@code True} if pages are equals, {@code False} otherwise.
* @throws IgniteCheckedException If fails.
*/
private boolean comparePages(FullPageId fullPageId, DirectMemoryPage expPage, long actualPageAddr) throws IgniteCheckedException {
long expPageAddr = expPage.address();
GridCacheProcessor cacheProc = gridCtx.cache();
ByteBuffer locBuf = GridUnsafe.wrapPointer(expPageAddr, pageSize);
ByteBuffer rmtBuf = GridUnsafe.wrapPointer(actualPageAddr, pageSize);
PageIO pageIo = PageIO.getPageIO(actualPageAddr);
if (pageIo.getType() == T_DATA_REF_MVCC_LEAF || pageIo.getType() == T_CACHE_ID_DATA_REF_MVCC_LEAF) {
assert cacheProc.cacheGroup(fullPageId.groupId()).mvccEnabled();
AbstractDataLeafIO io = (AbstractDataLeafIO) pageIo;
int cnt = io.getMaxCount(actualPageAddr, pageSize);
// Reset lock info as there is no sense to log it into WAL.
for (int i = 0; i < cnt; i++) {
io.setMvccLockCoordinatorVersion(expPageAddr, i, io.getMvccLockCoordinatorVersion(actualPageAddr, i));
io.setMvccLockCounter(expPageAddr, i, io.getMvccLockCounter(actualPageAddr, i));
}
}
// Compare only meaningful data.
if (pageIo instanceof CompactablePageIO) {
tmpBuf1.clear();
tmpBuf2.clear();
((CompactablePageIO) pageIo).compactPage(locBuf, tmpBuf1, pageSize);
((CompactablePageIO) pageIo).compactPage(rmtBuf, tmpBuf2, pageSize);
locBuf = tmpBuf1;
rmtBuf = tmpBuf2;
}
if (!locBuf.equals(rmtBuf)) {
log.error("Page buffers are not equals [fullPageId=" + fullPageId + ", pageIo=" + pageIo + ']');
dumpDiff(locBuf, rmtBuf);
dumpHistory(expPage);
return false;
}
return true;
}
use of org.apache.ignite.internal.processors.cache.GridCacheProcessor in project ignite by apache.
the class WarmUpSelfTest method testAvailableWarmUpStrategies.
/**
* Test verifies that available warm-up strategies are correct.
* <p>
* Steps:
* 1)Starting a node, without plugins;
* 2)Check that only basic strategies are available;
* 3)Restarting a node with a test plugin containing additional strategies;
* 4)Checking that basic + from plugin strategies are available.
*
* @throws Exception If failed.
*/
@Test
public void testAvailableWarmUpStrategies() throws Exception {
IgniteEx n = startGrid(getConfiguration(getTestIgniteInstanceName(0)).setPluginProviders());
GridCacheProcessor cacheProc = n.context().cache();
Map<Class<? extends WarmUpConfiguration>, WarmUpStrategy> expStrats = Stream.of(new NoOpWarmUpStrategy(), new LoadAllWarmUpStrategy(log, cacheProc::cacheGroups)).collect(toMap(WarmUpStrategy::configClass, identity()));
Map<Class<? extends WarmUpConfiguration>, WarmUpStrategy> actStrats = CU.warmUpStrategies(n.context());
assertEquals(expStrats, actStrats);
stopAllGrids();
n = startGrid(0);
WarmUpTestPluginProvider pluginProvider = (WarmUpTestPluginProvider) n.configuration().getPluginProviders()[0];
pluginProvider.strats.forEach(strat -> assertNull(expStrats.put(strat.configClass(), strat)));
actStrats = CU.warmUpStrategies(n.context());
assertEquals(expStrats, actStrats);
}
use of org.apache.ignite.internal.processors.cache.GridCacheProcessor in project ignite by apache.
the class WalRecoveryTxLogicalRecordsTest method getFreeListData.
/**
* @param ignite Node.
* @param cacheName Cache name.
* @return Cache free lists data (partition number to map of buckets to tails and buckets size).
*/
private Map<Integer, T2<Map<Integer, long[]>, int[]>> getFreeListData(Ignite ignite, String cacheName) throws IgniteCheckedException {
GridCacheProcessor cacheProc = ((IgniteEx) ignite).context().cache();
GridCacheContext ctx = cacheProc.cache(cacheName).context();
List<GridDhtLocalPartition> parts = ctx.topology().localPartitions();
assertTrue(!parts.isEmpty());
assertEquals(ctx.affinity().partitions(), parts.size());
Map<Integer, T2<Map<Integer, long[]>, int[]>> res = new HashMap<>();
boolean foundNonEmpty = false;
boolean foundTails = false;
cacheProc.context().database().checkpointReadLock();
try {
for (GridDhtLocalPartition part : parts) {
AbstractFreeList freeList = (AbstractFreeList) part.dataStore().rowStore().freeList();
if (freeList == null)
// Lazy store.
continue;
// Flush free-list onheap cache to page memory.
freeList.saveMetadata(IoStatisticsHolderNoOp.INSTANCE);
AtomicReferenceArray<PagesList.Stripe[]> buckets = getFieldValue(freeList, AbstractFreeList.class, "buckets");
AtomicLongArray bucketsSize = getFieldValue(freeList, PagesList.class, "bucketsSize");
assertNotNull(buckets);
assertNotNull(bucketsSize);
assertTrue(buckets.length() > 0);
assertEquals(bucketsSize.length(), buckets.length());
Map<Integer, long[]> tailsPerBucket = new HashMap<>();
for (int i = 0; i < buckets.length(); i++) {
PagesList.Stripe[] tails = buckets.get(i);
long[] ids = null;
if (tails != null) {
ids = new long[tails.length];
for (int j = 0; j < tails.length; j++) ids[j] = tails[j].tailId;
}
tailsPerBucket.put(i, ids);
if (tails != null) {
assertTrue(tails.length > 0);
foundTails = true;
}
}
int[] cntsPerBucket = new int[bucketsSize.length()];
for (int i = 0; i < bucketsSize.length(); i++) {
cntsPerBucket[i] = (int) bucketsSize.get(i);
if (cntsPerBucket[i] > 0)
foundNonEmpty = true;
}
res.put(part.id(), new T2<>(tailsPerBucket, cntsPerBucket));
}
} finally {
cacheProc.context().database().checkpointReadUnlock();
}
assertTrue(foundNonEmpty);
assertTrue(foundTails);
return res;
}
Aggregations