use of org.apache.ignite.internal.processors.cache.CacheGroupContext in project ignite by apache.
the class DurableBackgroundCleanupIndexTreeTask method execute.
/**
* Task execution.
*
* @param ctx Kernal context.
*/
private void execute(GridKernalContext ctx) {
List<InlineIndexTree> trees0 = trees;
if (trees0 == null) {
trees0 = new ArrayList<>(rootPages.size());
GridCacheContext cctx = ctx.cache().context().cacheContext(CU.cacheId(cacheName));
int grpId = CU.cacheGroupId(cacheName, cacheGrpName);
CacheGroupContext grpCtx = ctx.cache().cacheGroup(grpId);
// If group context is null, it means that group doesn't exist and we don't need this task anymore.
if (grpCtx == null)
return;
IgniteCacheOffheapManager offheap = grpCtx.offheap();
if (treeName != null) {
ctx.cache().context().database().checkpointReadLock();
try {
int cacheId = CU.cacheId(cacheName);
for (int segment = 0; segment < rootPages.size(); segment++) {
try {
RootPage rootPage = offheap.findRootPageForIndex(cacheId, treeName, segment);
if (rootPage != null && rootPages.get(segment) == rootPage.pageId().pageId())
offheap.dropRootPageForIndex(cacheId, treeName, segment);
} catch (IgniteCheckedException e) {
throw new IgniteException(e);
}
}
} finally {
ctx.cache().context().database().checkpointReadUnlock();
}
}
IoStatisticsHolderIndex stats = new IoStatisticsHolderIndex(SORTED_INDEX, cctx.name(), idxName, cctx.kernalContext().metric());
PageMemory pageMem = grpCtx.dataRegion().pageMemory();
for (int i = 0; i < rootPages.size(); i++) {
Long rootPage = rootPages.get(i);
assert rootPage != null;
if (skipDeletedRoot(grpId, pageMem, rootPage)) {
ctx.log(getClass()).warning(S.toString("Skipping deletion of the index tree", "cacheGrpName", cacheGrpName, false, "cacheName", cacheName, false, "idxName", idxName, false, "segment", i, false, "rootPageId", PageIdUtils.toDetailString(rootPage), false));
continue;
}
// because we just going to free memory pages that are occupied by tree structure.
try {
String treeName = "deletedTree_" + i + "_" + name();
InlineIndexTree tree = new InlineIndexTree(null, grpCtx, treeName, cctx.offheap(), cctx.offheap().reuseListForIndex(treeName), cctx.dataRegion().pageMemory(), PageIoResolver.DEFAULT_PAGE_IO_RESOLVER, rootPage, false, 0, 0, new IndexKeyTypeSettings(), null, stats, new NoopRowHandlerFactory(), null);
trees0.add(tree);
} catch (IgniteCheckedException e) {
throw new IgniteException(e);
}
}
}
ctx.cache().context().database().checkpointReadLock();
try {
for (int i = 0; i < trees0.size(); i++) {
BPlusTree tree = trees0.get(i);
try {
tree.destroy(null, true);
} catch (IgniteCheckedException e) {
throw new IgniteException(e);
}
}
} finally {
ctx.cache().context().database().checkpointReadUnlock();
}
}
use of org.apache.ignite.internal.processors.cache.CacheGroupContext in project ignite by apache.
the class GridDhtTopologyFutureAdapter method validateCache.
/**
* {@inheritDoc}
*/
@Override
@Nullable
public final CacheInvalidStateException validateCache(GridCacheContext cctx, boolean recovery, boolean read, @Nullable Object key, @Nullable Collection<?> keys) {
assert isDone() : this;
Throwable err = error();
if (err != null)
return new CacheInvalidStateException(err);
if (!clusterIsActive) {
return new CacheInvalidStateException("Failed to perform cache operation (cluster is not activated): " + cctx.name());
}
if (cctx.cache() == null)
return new CacheInvalidStateException("Failed to perform cache operation (cache is stopped): " + cctx.name());
CacheGroupContext grp = cctx.group();
if (cctx.shared().readOnlyMode() && !read && !isSystemCache(cctx.name())) {
return new CacheInvalidStateException(new IgniteClusterReadOnlyException(format(CLUSTER_READ_ONLY_ERROR_MSG, grp.name(), cctx.name())));
}
CacheGroupValidation validation = grpValidRes.get(grp.groupId());
if (validation == null)
return null;
if (!read && !validation.isValid()) {
return new CacheInvalidStateException("Failed to perform cache operation " + "(cache topology is not valid): " + cctx.name());
}
if (!validation.hasLostPartitions())
return null;
PartitionLossPolicy lossPlc = grp.config().getPartitionLossPolicy();
if (!read && (lossPlc == READ_ONLY_SAFE || lossPlc == READ_ONLY_ALL)) {
return new CacheInvalidStateException("Failed to write to cache (cache is moved to a read-only state): " + cctx.name());
}
// Reads from any partition are allowed in recovery mode.
if (read && recovery)
return null;
if (key != null)
return validateKey(cctx, key, validation.lostPartitions());
if (keys != null) {
for (Object key0 : keys) {
final CacheInvalidStateException res = validateKey(cctx, key0, validation.lostPartitions());
if (res != null)
return res;
}
return null;
}
return new CacheInvalidStateException("Failed to perform a cache operation " + "(the cache has lost partitions [cacheGrp=" + cctx.group().name() + ", cache=" + cctx.name() + ']');
}
use of org.apache.ignite.internal.processors.cache.CacheGroupContext in project ignite by apache.
the class UpgradePendingTreeToPerPartitionTask method call.
/**
* {@inheritDoc}
*/
@Override
public Boolean call() throws IgniteException {
GridCacheSharedContext<Object, Object> sharedCtx = node.context().cache().context();
for (CacheGroupContext grp : sharedCtx.cache().cacheGroups()) {
if (!grp.persistenceEnabled() || !grp.affinityNode()) {
if (!grp.persistenceEnabled())
log.info("Skip pending tree upgrade for non-persistent cache group: [grpId=" + grp.groupId() + ", grpName=" + grp.name() + ']');
else
log.info("Skip pending tree upgrade on non-affinity node for cache group: [grpId=" + grp.groupId() + ", grpName=" + grp.name() + ']');
continue;
}
try {
processCacheGroup(grp);
} catch (Exception ex) {
if (Thread.interrupted() || X.hasCause(ex, InterruptedException.class))
log.info("Upgrade pending tree has been cancelled.");
else
log.warning("Failed to upgrade pending tree for cache group: [grpId=" + grp.groupId() + ", grpName=" + grp.name() + ']', ex);
return false;
}
if (Thread.interrupted()) {
log.info("Upgrade pending tree has been cancelled.");
return false;
}
}
log.info("All pending trees upgraded successfully.");
return true;
}
use of org.apache.ignite.internal.processors.cache.CacheGroupContext in project ignite by apache.
the class FailureProcessor method process.
/**
* Processes failure accordingly to given failure handler.
*
* @param failureCtx Failure context.
* @param hnd Failure handler.
* @return {@code True} If this very call led to Ignite node invalidation.
*/
public synchronized boolean process(FailureContext failureCtx, FailureHandler hnd) {
assert failureCtx != null;
assert hnd != null;
if (// Node already terminating, no reason to process more errors.
this.failureCtx != null)
return false;
if (failureTypeIgnored(failureCtx, hnd)) {
U.quietAndWarn(ignite.log(), IGNORED_FAILURE_LOG_MSG + "[hnd=" + hnd + ", failureCtx=" + failureCtx + ']', failureCtx.error());
} else {
U.error(ignite.log(), FAILURE_LOG_MSG + "[hnd=" + hnd + ", failureCtx=" + failureCtx + ']', failureCtx.error());
}
if (reserveBuf != null && X.hasCause(failureCtx.error(), OutOfMemoryError.class))
reserveBuf = null;
CorruptedDataStructureException corruptedDataStructureEx = X.cause(failureCtx.error(), CorruptedDataStructureException.class);
if (corruptedDataStructureEx != null) {
CacheGroupContext grpCtx = ctx.cache().cacheGroup(corruptedDataStructureEx.groupId());
if (grpCtx != null && grpCtx.dataRegion() != null) {
if (grpCtx.dataRegion().config().isPersistenceEnabled()) {
log.error("A critical problem with persistence data structures was detected." + " Please make backup of persistence storage and WAL files for further analysis." + " Persistence storage path: " + ctx.config().getDataStorageConfiguration().getStoragePath() + " WAL path: " + ctx.config().getDataStorageConfiguration().getWalPath() + " WAL archive path: " + ctx.config().getDataStorageConfiguration().getWalArchivePath());
} else
log.error("A critical problem with in-memory data structures was detected.");
}
}
if (igniteDumpThreadsOnFailure && !throttleThreadDump(failureCtx.type()))
U.dumpThreads(log, !failureTypeIgnored(failureCtx, hnd));
DiagnosticProcessor diagnosticProcessor = ctx.diagnostic();
if (diagnosticProcessor != null)
diagnosticProcessor.onFailure(failureCtx);
boolean invalidated = hnd.onFailure(ignite, failureCtx);
if (invalidated) {
this.failureCtx = failureCtx;
log.error("Ignite node is in invalid state due to a critical failure.");
}
return invalidated;
}
use of org.apache.ignite.internal.processors.cache.CacheGroupContext in project ignite by apache.
the class GridCommandHandlerClusterByClassTest method testCacheIdleVerifyDumpForCorruptedDataOnSystemCache.
/**
* Tests that idle verify print partitions info over system caches.
*
* @throws Exception If failed.
*/
@Test
public void testCacheIdleVerifyDumpForCorruptedDataOnSystemCache() throws Exception {
int parts = 32;
atomicConfiguration = new AtomicConfiguration().setAffinity(new RendezvousAffinityFunction(false, parts)).setBackups(2);
IgniteEx ignite = crd;
injectTestSystemOut();
// Adding some assignments without deployments.
for (int i = 0; i < 100; i++) {
ignite.semaphore("s" + i, i, false, true);
ignite.atomicSequence("sq" + i, 0, true).incrementAndGet();
}
CacheGroupContext storedSysCacheCtx = ignite.context().cache().cacheGroup(CU.cacheId("default-ds-group"));
assertNotNull(storedSysCacheCtx);
corruptDataEntry(storedSysCacheCtx.caches().get(0), new GridCacheInternalKeyImpl("sq0", "default-ds-group"), true, false);
corruptDataEntry(storedSysCacheCtx.caches().get(0), new GridCacheInternalKeyImpl("sq" + parts / 2, "default-ds-group"), false, true);
CacheGroupContext memoryVolatileCacheCtx = ignite.context().cache().cacheGroup(CU.cacheId("default-volatile-ds-group@volatileDsMemPlc"));
assertNotNull(memoryVolatileCacheCtx);
assertEquals("volatileDsMemPlc", memoryVolatileCacheCtx.dataRegion().config().getName());
assertEquals(false, memoryVolatileCacheCtx.dataRegion().config().isPersistenceEnabled());
corruptDataEntry(memoryVolatileCacheCtx.caches().get(0), new GridCacheInternalKeyImpl("s0", "default-volatile-ds-group@volatileDsMemPlc"), true, false);
corruptDataEntry(memoryVolatileCacheCtx.caches().get(0), new GridCacheInternalKeyImpl("s" + parts / 2, "default-volatile-ds-group@volatileDsMemPlc"), false, true);
assertEquals(EXIT_CODE_OK, execute("--cache", "idle_verify", "--dump", "--cache-filter", "SYSTEM"));
Matcher fileNameMatcher = dumpFileNameMatcher();
if (fileNameMatcher.find()) {
String dumpWithConflicts = new String(Files.readAllBytes(Paths.get(fileNameMatcher.group(1))));
U.log(log, dumpWithConflicts);
// Non-persistent caches do not have counter conflicts
assertContains(log, dumpWithConflicts, "conflict partitions has been found: [counterConflicts=2, " + "hashConflicts=4]");
} else
fail("Should be found dump with conflicts");
}
Aggregations